Merge branches 'btc', 'dma', 'entry', 'fixes', 'linker-layout', 'misc', 'mmci', ...
authorRussell King <rmk+kernel@arm.linux.org.uk>
Fri, 22 Jul 2011 22:08:48 +0000 (23:08 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Fri, 22 Jul 2011 22:08:48 +0000 (23:08 +0100)
938 files changed:
CREDITS
Documentation/Changes
Documentation/CodingStyle
Documentation/arm/Booting
Documentation/arm/SH-Mobile/zboot-rom-sdhi.txt [new file with mode: 0644]
Documentation/cgroups/blkio-controller.txt
Documentation/devicetree/bindings/arm/pmu.txt [new file with mode: 0644]
Documentation/feature-removal-schedule.txt
Documentation/filesystems/caching/netfs-api.txt
Documentation/filesystems/nilfs2.txt
Documentation/filesystems/proc.txt
Documentation/hwmon/f71882fg
Documentation/hwmon/k10temp
Documentation/kernel-parameters.txt
Documentation/laptops/thinkpad-acpi.txt
Documentation/networking/ip-sysctl.txt
Documentation/power/devices.txt
Documentation/power/runtime_pm.txt
Documentation/spinlocks.txt
Documentation/usb/error-codes.txt
Documentation/x86/boot.txt
MAINTAINERS
Makefile
README
arch/alpha/include/asm/mmzone.h
arch/arm/Kconfig
arch/arm/boot/compressed/Makefile
arch/arm/boot/compressed/head-shmobile.S
arch/arm/boot/compressed/head.S
arch/arm/boot/compressed/mmcif-sh7372.c
arch/arm/boot/compressed/sdhi-sh7372.c [new file with mode: 0644]
arch/arm/boot/compressed/sdhi-shmobile.c [new file with mode: 0644]
arch/arm/boot/compressed/sdhi-shmobile.h [new file with mode: 0644]
arch/arm/boot/compressed/vmlinux.lds.in
arch/arm/common/dmabounce.c
arch/arm/common/gic.c
arch/arm/common/it8152.c
arch/arm/common/sa1111.c
arch/arm/include/asm/assembler.h
arch/arm/include/asm/bitops.h
arch/arm/include/asm/dma-mapping.h
arch/arm/include/asm/entry-macro-multi.S
arch/arm/include/asm/memory.h
arch/arm/include/asm/pmu.h
arch/arm/include/asm/proc-fns.h
arch/arm/include/asm/scatterlist.h
arch/arm/include/asm/setup.h
arch/arm/include/asm/suspend.h [new file with mode: 0644]
arch/arm/include/asm/tcm.h
arch/arm/include/asm/tlbflush.h
arch/arm/include/asm/traps.h
arch/arm/kernel/asm-offsets.c
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-header.S
arch/arm/kernel/head-nommu.S
arch/arm/kernel/head.S
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/irq.c
arch/arm/kernel/module.c
arch/arm/kernel/perf_event.c
arch/arm/kernel/pmu.c
arch/arm/kernel/setup.c
arch/arm/kernel/sleep.S
arch/arm/kernel/smp.c
arch/arm/kernel/smp_scu.c
arch/arm/kernel/smp_twd.c
arch/arm/kernel/tcm.c
arch/arm/kernel/vmlinux.lds.S
arch/arm/mach-at91/at91cap9.c
arch/arm/mach-at91/at91cap9_devices.c
arch/arm/mach-at91/at91rm9200.c
arch/arm/mach-at91/at91rm9200_devices.c
arch/arm/mach-at91/at91sam9260_devices.c
arch/arm/mach-at91/at91sam9261_devices.c
arch/arm/mach-at91/at91sam9263_devices.c
arch/arm/mach-at91/at91sam9g45.c
arch/arm/mach-at91/at91sam9g45_devices.c
arch/arm/mach-at91/at91sam9rl.c
arch/arm/mach-at91/at91sam9rl_devices.c
arch/arm/mach-at91/board-cap9adk.c
arch/arm/mach-at91/board-sam9260ek.c
arch/arm/mach-at91/board-sam9261ek.c
arch/arm/mach-at91/board-sam9263ek.c
arch/arm/mach-at91/board-sam9g20ek.c
arch/arm/mach-at91/board-sam9m10g45ek.c
arch/arm/mach-at91/include/mach/system_rev.h
arch/arm/mach-bcmring/include/mach/entry-macro.S
arch/arm/mach-davinci/board-dm365-evm.c
arch/arm/mach-davinci/gpio.c
arch/arm/mach-davinci/include/mach/entry-macro.S
arch/arm/mach-davinci/irq.c
arch/arm/mach-ep93xx/core.c
arch/arm/mach-exynos4/cpu.c
arch/arm/mach-exynos4/dev-audio.c
arch/arm/mach-exynos4/headsmp.S
arch/arm/mach-exynos4/init.c
arch/arm/mach-exynos4/mach-smdkv310.c
arch/arm/mach-exynos4/platsmp.c
arch/arm/mach-exynos4/pm.c
arch/arm/mach-exynos4/sleep.S
arch/arm/mach-h720x/Kconfig
arch/arm/mach-h720x/include/mach/entry-macro.S
arch/arm/mach-ixp4xx/common-pci.c
arch/arm/mach-ixp4xx/common.c
arch/arm/mach-lpc32xx/include/mach/entry-macro.S
arch/arm/mach-mmp/pxa168.c
arch/arm/mach-mmp/pxa910.c
arch/arm/mach-msm/platsmp.c
arch/arm/mach-msm/timer.c
arch/arm/mach-omap1/Makefile
arch/arm/mach-omap1/board-ams-delta.c
arch/arm/mach-omap1/gpio15xx.c
arch/arm/mach-omap1/gpio16xx.c
arch/arm/mach-omap1/gpio7xx.c
arch/arm/mach-omap1/pm_bus.c
arch/arm/mach-omap2/board-omap3pandora.c
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-omap2/control.c
arch/arm/mach-omap2/control.h
arch/arm/mach-omap2/include/mach/entry-macro.S
arch/arm/mach-omap2/omap-smp.c
arch/arm/mach-omap2/pm-debug.c
arch/arm/mach-omap2/pm.h
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-omap2/sleep34xx.S
arch/arm/mach-pnx4008/include/mach/entry-macro.S
arch/arm/mach-pxa/include/mach/pm.h
arch/arm/mach-pxa/mfp-pxa2xx.c
arch/arm/mach-pxa/palmz72.c
arch/arm/mach-pxa/pm.c
arch/arm/mach-pxa/pxa25x.c
arch/arm/mach-pxa/pxa27x.c
arch/arm/mach-pxa/pxa3xx.c
arch/arm/mach-pxa/raumfeld.c
arch/arm/mach-pxa/sleep.S
arch/arm/mach-pxa/spitz_pm.c
arch/arm/mach-pxa/zeus.c
arch/arm/mach-realview/Kconfig
arch/arm/mach-realview/platsmp.c
arch/arm/mach-s3c2412/pm.c
arch/arm/mach-s3c2416/pm.c
arch/arm/mach-s3c2440/mach-mini2440.c
arch/arm/mach-s3c64xx/dev-spi.c
arch/arm/mach-s3c64xx/dma.c
arch/arm/mach-s3c64xx/pm.c
arch/arm/mach-s3c64xx/sleep.S
arch/arm/mach-s5p64x0/dev-spi.c
arch/arm/mach-s5pc100/dev-spi.c
arch/arm/mach-s5pv210/dev-spi.c
arch/arm/mach-s5pv210/pm.c
arch/arm/mach-s5pv210/sleep.S
arch/arm/mach-sa1100/pm.c
arch/arm/mach-sa1100/sleep.S
arch/arm/mach-shark/include/mach/entry-macro.S
arch/arm/mach-shmobile/board-ag5evm.c
arch/arm/mach-shmobile/board-ap4evb.c
arch/arm/mach-shmobile/board-mackerel.c
arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h [new file with mode: 0644]
arch/arm/mach-shmobile/include/mach/sdhi.h [new file with mode: 0644]
arch/arm/mach-shmobile/platsmp.c
arch/arm/mach-tegra/platsmp.c
arch/arm/mach-ux500/board-mop500-pins.c
arch/arm/mach-ux500/board-mop500.c
arch/arm/mach-ux500/platsmp.c
arch/arm/mach-vexpress/ct-ca9x4.c
arch/arm/mach-vt8500/irq.c
arch/arm/mm/abort-ev4.S
arch/arm/mm/abort-ev4t.S
arch/arm/mm/abort-ev5t.S
arch/arm/mm/abort-ev5tj.S
arch/arm/mm/abort-ev6.S
arch/arm/mm/abort-ev7.S
arch/arm/mm/abort-lv4t.S
arch/arm/mm/abort-macro.S
arch/arm/mm/abort-nommu.S
arch/arm/mm/alignment.c
arch/arm/mm/cache-l2x0.c
arch/arm/mm/copypage-v6.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/fault.c
arch/arm/mm/init.c
arch/arm/mm/mm.h
arch/arm/mm/mmu.c
arch/arm/mm/nommu.c
arch/arm/mm/pabort-legacy.S
arch/arm/mm/pabort-v6.S
arch/arm/mm/pabort-v7.S
arch/arm/mm/proc-arm6_7.S
arch/arm/mm/proc-sa1100.S
arch/arm/mm/proc-v7.S
arch/arm/mm/tlb-fa.S
arch/arm/mm/tlb-v6.S
arch/arm/mm/tlb-v7.S
arch/arm/plat-iop/cp6.c
arch/arm/plat-mxc/include/mach/entry-macro.S
arch/arm/plat-omap/omap_device.c
arch/arm/plat-omap/sram.c
arch/arm/plat-orion/gpio.c
arch/arm/plat-pxa/gpio.c
arch/arm/plat-s3c24xx/dma.c
arch/arm/plat-s3c24xx/sleep.S
arch/arm/plat-s5p/irq-gpioint.c
arch/arm/plat-s5p/s5p-time.c
arch/arm/plat-samsung/dma.c
arch/arm/plat-samsung/include/plat/devs.h
arch/arm/plat-samsung/include/plat/dma.h
arch/arm/plat-samsung/include/plat/pm.h
arch/arm/plat-samsung/include/plat/regs-serial.h
arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
arch/arm/plat-samsung/irq-uart.c
arch/arm/plat-samsung/irq-vic-timer.c
arch/arm/plat-samsung/pm.c
arch/arm/vfp/vfphw.S
arch/arm/vfp/vfpmodule.c
arch/m32r/include/asm/mmzone.h
arch/mips/kernel/i8259.c
arch/mn10300/include/asm/uaccess.h
arch/parisc/include/asm/mmzone.h
arch/powerpc/boot/dts/p1022ds.dts
arch/powerpc/configs/pseries_defconfig
arch/powerpc/include/asm/mmzone.h
arch/powerpc/kernel/rtas-rtc.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/traps.c
arch/powerpc/mm/fault.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/powerpc/sysdev/fsl_rio.c
arch/powerpc/sysdev/mpic.c
arch/s390/Kconfig
arch/s390/kernel/smp.c
arch/s390/oprofile/init.c
arch/sh/Kconfig
arch/sh/configs/sh7757lcr_defconfig
arch/sh/include/asm/mmzone.h
arch/sh/kernel/cpu/sh4a/setup-sh7757.c
arch/sh/kernel/irq.c
arch/sh/mm/alignment.c
arch/sparc/include/asm/irqflags_32.h
arch/sparc/include/asm/irqflags_64.h
arch/sparc/include/asm/mmzone.h
arch/sparc/kernel/entry.S
arch/sparc/mm/leon_mm.c
arch/tile/include/asm/mmzone.h
arch/um/include/asm/percpu.h [new file with mode: 0644]
arch/x86/Kconfig
arch/x86/include/asm/apb_timer.h
arch/x86/include/asm/memblock.h
arch/x86/include/asm/mmzone_32.h
arch/x86/include/asm/mmzone_64.h
arch/x86/include/asm/pvclock.h
arch/x86/kernel/acpi/realmode/wakeup.S
arch/x86/kernel/acpi/realmode/wakeup.h
arch/x86/kernel/acpi/sleep.c
arch/x86/kernel/reboot.c
arch/x86/kvm/emulate.c
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/vmx.c
arch/x86/mm/init_64.c
arch/x86/mm/memblock.c
arch/x86/oprofile/nmi_int.c
arch/x86/pci/acpi.c
arch/x86/pci/xen.c
arch/x86/platform/efi/efi.c
arch/x86/xen/enlighten.c
arch/x86/xen/mmu.c
arch/x86/xen/setup.c
arch/x86/xen/smp.c
block/blk-throttle.c
block/cfq-iosched.c
block/genhd.c
crypto/deflate.c
crypto/zlib.c
drivers/acpi/apei/hest.c
drivers/acpi/osl.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-scsi.c
drivers/ata/pata_marvell.c
drivers/ata/sata_dwc_460ex.c
drivers/base/memory.c
drivers/base/platform.c
drivers/base/power/clock_ops.c
drivers/base/power/main.c
drivers/base/syscore.c
drivers/block/drbd/drbd_actlog.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_worker.c
drivers/bluetooth/btmrvl_debugfs.c
drivers/char/agp/intel-agp.h
drivers/connector/connector.c
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/cpufreq_stats.c
drivers/cpufreq/powernow-k8.c
drivers/crypto/caam/caamalg.c
drivers/firewire/ohci.c
drivers/firmware/google/Kconfig
drivers/gpio/gpio-omap.c
drivers/gpio/langwell_gpio.c
drivers/gpio/tps65910-gpio.c
drivers/gpio/wm831x-gpio.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/nouveau/nouveau_perf.c
drivers/gpu/drm/nouveau/nouveau_state.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_blit_kms.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/nid.h
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_reg.h
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/hid/Kconfig
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-magicmouse.c
drivers/hid/hid-multitouch.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/usbhid/hiddev.c
drivers/hwmon/Kconfig
drivers/hwmon/adm1275.c
drivers/hwmon/asus_atk0110.c
drivers/hwmon/coretemp.c
drivers/hwmon/emc6w201.c
drivers/hwmon/f71882fg.c
drivers/hwmon/hwmon-vid.c
drivers/hwmon/ibmaem.c
drivers/hwmon/ibmpex.c
drivers/hwmon/it87.c
drivers/hwmon/lm95241.c
drivers/hwmon/max1111.c
drivers/hwmon/pmbus.c
drivers/hwmon/pmbus_core.c
drivers/hwmon/s3c-hwmon.c
drivers/hwmon/sch5627.c
drivers/i2c/busses/i2c-bfin-twi.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/i2c/busses/i2c-taos-evm.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/muxes/pca954x.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/mem.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/qib/qib_iba7322.c
drivers/infiniband/hw/qib/qib_intr.c
drivers/input/evdev.c
drivers/input/input.c
drivers/input/keyboard/omap-keypad.c
drivers/input/keyboard/pmic8xxx-keypad.c
drivers/input/keyboard/sh_keysc.c
drivers/input/misc/pmic8xxx-pwrkey.c
drivers/input/mousedev.c
drivers/isdn/gigaset/interface.c
drivers/leds/leds-lp5521.c
drivers/leds/leds-lp5523.c
drivers/leds/leds-pca9532.c
drivers/md/md.c
drivers/media/dvb/dvb-core/dvb_frontend.c
drivers/media/radio/Kconfig
drivers/media/radio/si4713-i2c.c
drivers/media/rc/fintek-cir.c
drivers/media/rc/imon.c
drivers/media/rc/ir-raw.c
drivers/media/rc/ite-cir.c
drivers/media/rc/ite-cir.h
drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
drivers/media/rc/lirc_dev.c
drivers/media/rc/mceusb.c
drivers/media/rc/nuvoton-cir.c
drivers/media/rc/nuvoton-cir.h
drivers/media/rc/rc-main.c
drivers/media/video/bt8xx/bttv-driver.c
drivers/media/video/cx18/cx18-ioctl.c
drivers/media/video/cx23885/cx23885-core.c
drivers/media/video/ivtv/ivtv-ioctl.c
drivers/media/video/m5mols/m5mols.h
drivers/media/video/m5mols/m5mols_capture.c
drivers/media/video/m5mols/m5mols_controls.c
drivers/media/video/m5mols/m5mols_core.c
drivers/media/video/m5mols/m5mols_reg.h
drivers/media/video/msp3400-driver.c
drivers/media/video/mx1_camera.c
drivers/media/video/omap/omap_vout.c
drivers/media/video/omap/omap_voutlib.c
drivers/media/video/omap3isp/isp.c
drivers/media/video/pvrusb2/pvrusb2-hdw.c
drivers/media/video/pwc/pwc-ctrl.c
drivers/media/video/pwc/pwc-if.c
drivers/media/video/pwc/pwc.h
drivers/media/video/s5p-fimc/fimc-capture.c
drivers/media/video/s5p-fimc/fimc-core.c
drivers/media/video/s5p-fimc/fimc-core.h
drivers/media/video/saa7134/saa7134-input.c
drivers/media/video/tuner-core.c
drivers/media/video/uvc/uvc_entity.c
drivers/media/video/uvc/uvc_queue.c
drivers/media/video/uvc/uvc_video.c
drivers/media/video/v4l2-dev.c
drivers/media/video/v4l2-ioctl.c
drivers/media/video/videobuf2-core.c
drivers/media/video/videobuf2-dma-sg.c
drivers/mfd/Kconfig
drivers/mfd/Makefile
drivers/mfd/asic3.c
drivers/mfd/htc-pasic3.c
drivers/mfd/omap-usb-host.c
drivers/mfd/tps65911-comparator.c
drivers/misc/cb710/sgbuf2.c
drivers/misc/ioc4.c
drivers/misc/lkdtm.c
drivers/misc/pti.c
drivers/misc/sgi-xp/xpnet.c
drivers/misc/ti-st/st_core.c
drivers/misc/ti-st/st_kim.c
drivers/mmc/card/block.c
drivers/mmc/card/queue.c
drivers/mmc/card/queue.h
drivers/mmc/core/core.c
drivers/mmc/core/mmc.c
drivers/mmc/core/sdio.c
drivers/mmc/core/sdio_bus.c
drivers/mmc/host/mmci.c
drivers/mmc/host/mmci.h
drivers/mmc/host/of_mmc_spi.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sh_mobile_sdhi.c
drivers/mmc/host/tmio_mmc_pio.c
drivers/mmc/host/vub300.c
drivers/mtd/nand/fsl_elbc_nand.c
drivers/net/3c503.c
drivers/net/8139too.c
drivers/net/Kconfig
drivers/net/bfin_mac.c
drivers/net/bna/bnad.c
drivers/net/bnx2x/bnx2x_main.c
drivers/net/bonding/bond_main.c
drivers/net/can/Kconfig
drivers/net/cxgb3/sge.c
drivers/net/fs_enet/mac-fcc.c
drivers/net/gianfar.c
drivers/net/gianfar.h
drivers/net/greth.c
drivers/net/hamradio/6pack.c
drivers/net/hamradio/mkiss.c
drivers/net/hp100.c
drivers/net/hplance.c
drivers/net/natsemi.c
drivers/net/netxen/netxen_nic_main.c
drivers/net/phy/Kconfig
drivers/net/phy/dp83640.c
drivers/net/ppp_async.c
drivers/net/ppp_deflate.c
drivers/net/pppoe.c
drivers/net/pxa168_eth.c
drivers/net/qlge/qlge.h
drivers/net/qlge/qlge_main.c
drivers/net/r6040.c
drivers/net/r8169.c
drivers/net/rionet.c
drivers/net/sh_eth.c
drivers/net/slip.c
drivers/net/tulip/dmfe.c
drivers/net/tun.c
drivers/net/usb/Kconfig
drivers/net/usb/Makefile
drivers/net/usb/hso.c
drivers/net/usb/kalmia.c [new file with mode: 0644]
drivers/net/usb/zaurus.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/wan/farsync.c
drivers/net/wireless/ath/ath5k/desc.c
drivers/net/wireless/ath/ath5k/eeprom.c
drivers/net/wireless/ath/ath5k/pci.c
drivers/net/wireless/ath/ath5k/sysfs.c
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/usb.c
drivers/net/wireless/iwlwifi/iwl-1000.c
drivers/net/wireless/iwlwifi/iwl-2000.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl-tx.c
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/rtlwifi/pci.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/pci/pci-driver.c
drivers/pci/pci.c
drivers/pci/pci.h
drivers/pci/probe.c
drivers/pci/quirks.c
drivers/pci/setup-bus.c
drivers/pcmcia/pxa2xx_vpac270.c
drivers/platform/x86/acer-wmi.c
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/compal-laptop.c
drivers/platform/x86/dell-laptop.c
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/intel_oaktrail.c
drivers/platform/x86/thinkpad_acpi.c
drivers/regulator/db8500-prcmu.c
drivers/regulator/max8952.c
drivers/regulator/max8997.c
drivers/rtc/rtc-ds1307.c
drivers/rtc/rtc-vt8500.c
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/hpsa.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/isci/Makefile [new file with mode: 0644]
drivers/scsi/isci/firmware/Makefile [new file with mode: 0644]
drivers/scsi/isci/firmware/README [new file with mode: 0644]
drivers/scsi/isci/firmware/create_fw.c [new file with mode: 0644]
drivers/scsi/isci/firmware/create_fw.h [new file with mode: 0644]
drivers/scsi/isci/host.c [new file with mode: 0644]
drivers/scsi/isci/host.h [new file with mode: 0644]
drivers/scsi/isci/init.c [new file with mode: 0644]
drivers/scsi/isci/isci.h [new file with mode: 0644]
drivers/scsi/isci/phy.c [new file with mode: 0644]
drivers/scsi/isci/phy.h [new file with mode: 0644]
drivers/scsi/isci/port.c [new file with mode: 0644]
drivers/scsi/isci/port.h [new file with mode: 0644]
drivers/scsi/isci/port_config.c [new file with mode: 0644]
drivers/scsi/isci/probe_roms.c [new file with mode: 0644]
drivers/scsi/isci/probe_roms.h [new file with mode: 0644]
drivers/scsi/isci/registers.h [new file with mode: 0644]
drivers/scsi/isci/remote_device.c [new file with mode: 0644]
drivers/scsi/isci/remote_device.h [new file with mode: 0644]
drivers/scsi/isci/remote_node_context.c [new file with mode: 0644]
drivers/scsi/isci/remote_node_context.h [new file with mode: 0644]
drivers/scsi/isci/remote_node_table.c [new file with mode: 0644]
drivers/scsi/isci/remote_node_table.h [new file with mode: 0644]
drivers/scsi/isci/request.c [new file with mode: 0644]
drivers/scsi/isci/request.h [new file with mode: 0644]
drivers/scsi/isci/sas.h [new file with mode: 0644]
drivers/scsi/isci/scu_completion_codes.h [new file with mode: 0644]
drivers/scsi/isci/scu_event_codes.h [new file with mode: 0644]
drivers/scsi/isci/scu_remote_node_context.h [new file with mode: 0644]
drivers/scsi/isci/scu_task_context.h [new file with mode: 0644]
drivers/scsi/isci/task.c [new file with mode: 0644]
drivers/scsi/isci/task.h [new file with mode: 0644]
drivers/scsi/isci/unsolicited_frame_control.c [new file with mode: 0644]
drivers/scsi/isci/unsolicited_frame_control.h [new file with mode: 0644]
drivers/spi/spi_bfin5xx.c
drivers/spi/spi_s3c64xx.c
drivers/ssb/driver_pcicore.c
drivers/staging/brcm80211/Kconfig
drivers/staging/comedi/Kconfig
drivers/staging/iio/Kconfig
drivers/staging/iio/accel/adis16204.h
drivers/staging/iio/accel/adis16209.h
drivers/staging/iio/gyro/adis16260.h
drivers/staging/iio/imu/adis16400.h
drivers/staging/lirc/lirc_imon.c
drivers/staging/lirc/lirc_serial.c
drivers/staging/lirc/lirc_sir.c
drivers/staging/lirc/lirc_zilog.c
drivers/staging/mei/init.c
drivers/staging/mei/wd.c
drivers/target/loopback/tcm_loop.c
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_pr.c
drivers/target/target_core_tmr.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tcm_fc.h
drivers/target/tcm_fc/tfc_cmd.c
drivers/target/tcm_fc/tfc_io.c
drivers/target/tcm_fc/tfc_sess.c
drivers/tty/n_gsm.c
drivers/tty/n_tty.c
drivers/tty/serial/8250.c
drivers/tty/serial/8250_pci.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/bcm63xx_uart.c
drivers/tty/serial/jsm/jsm_driver.c
drivers/tty/serial/mrst_max3110.c
drivers/tty/serial/s5pv210.c
drivers/tty/tty_ldisc.c
drivers/usb/core/driver.c
drivers/usb/core/hub.c
drivers/usb/core/message.c
drivers/usb/gadget/fsl_udc_core.c
drivers/usb/host/ehci-ath79.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/isp1760-hcd.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/r8a66597-hcd.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_host.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio.h
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/ti_usb_3410_5052.c
drivers/video/amba-clcd.c
drivers/video/fsl-diu-fb.c
drivers/video/geode/gx1fb_core.c
drivers/video/hecubafb.c
drivers/video/sh_mobile_meram.c
drivers/video/sm501fb.c
drivers/video/udlfb.c
drivers/video/vesafb.c
drivers/w1/masters/ds1wm.c
drivers/watchdog/Kconfig
drivers/watchdog/at32ap700x_wdt.c
drivers/watchdog/gef_wdt.c
drivers/watchdog/intel_scu_watchdog.c
drivers/watchdog/mtx-1_wdt.c
drivers/watchdog/wm831x_wdt.c
drivers/xen/events.c
firmware/Makefile
firmware/isci/isci_firmware.bin.ihex [new file with mode: 0644]
fs/bad_inode.c
fs/binfmt_elf_fdpic.c
fs/block_dev.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/delayed-inode.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/relocation.c
fs/btrfs/super.c
fs/btrfs/sysfs.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/ceph/file.c
fs/ceph/mds_client.c
fs/cifs/Kconfig
fs/cifs/cifs_fs_sb.h
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/cifsproto.h
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/fscache.c
fs/cifs/sess.c
fs/cifs/smbencrypt.c
fs/coda/pioctl.c
fs/cramfs/inode.c
fs/dcache.c
fs/exec.c
fs/exofs/super.c
fs/ext4/ext4_extents.h
fs/ext4/extents.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/move_extent.c
fs/ext4/super.c
fs/fscache/page.c
fs/gfs2/aops.c
fs/gfs2/glops.c
fs/gfs2/incore.h
fs/gfs2/log.c
fs/gfs2/ops_fstype.c
fs/gfs2/super.c
fs/gfs2/sys.c
fs/hfsplus/super.c
fs/hfsplus/wrapper.c
fs/hppfs/hppfs.c
fs/inode.c
fs/isofs/inode.c
fs/jbd2/checkpoint.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jbd2/transaction.c
fs/jfs/file.c
fs/jfs/jfs_imap.c
fs/jfs/jfs_incore.h
fs/jfs/resize.c
fs/libfs.c
fs/lockd/clntproc.c
fs/locks.c
fs/logfs/dir.c
fs/namei.c
fs/nfs/fscache.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs4filelayout.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4xdr.c
fs/nfs/objlayout/objio_osd.c
fs/nfs/objlayout/objlayout.c
fs/nfs/pagelist.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfs/pnfs_dev.c
fs/nfs/write.c
fs/nfsd/Kconfig
fs/nfsd/nfsctl.c
fs/nfsd/vfs.c
fs/nilfs2/inode.c
fs/omfs/file.c
fs/proc/base.c
fs/proc/proc_sysctl.c
fs/reiserfs/xattr.c
fs/romfs/mmap-nommu.c
fs/timerfd.c
fs/ubifs/super.c
fs/ufs/namei.c
fs/xfs/linux-2.6/xfs_file.c
fs/xfs/linux-2.6/xfs_iops.c
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/xfs_attr.c
fs/xfs/xfs_iget.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_log.c
fs/xfs/xfs_trans.c
fs/xfs/xfs_vnodeops.c
include/acpi/acpi_bus.h
include/acpi/acpiosxf.h
include/acpi/platform/aclinux.h
include/asm-generic/gpio.h
include/drm/drm_pciids.h
include/linux/amba/serial.h
include/linux/blk_types.h
include/linux/blktrace_api.h
include/linux/clocksource.h
include/linux/compat.h
include/linux/connector.h
include/linux/device.h
include/linux/device_cgroup.h
include/linux/drbd_limits.h
include/linux/fs.h
include/linux/fscache.h
include/linux/fsl-diu-fb.h
include/linux/gpio.h
include/linux/hrtimer.h
include/linux/input/sh_keysc.h
include/linux/interrupt.h
include/linux/irq.h
include/linux/jbd2.h
include/linux/kmod.h
include/linux/memory.h
include/linux/mfd/ds1wm.h
include/linux/mmc/card.h
include/linux/mmzone.h
include/linux/netdevice.h
include/linux/nfs_page.h
include/linux/nfs_xdr.h
include/linux/pci_ids.h
include/linux/pm.h
include/linux/sched.h
include/linux/sdla.h
include/linux/shmem_fs.h
include/linux/smp.h
include/linux/sunrpc/gss_krb5_enctypes.h [new file with mode: 0644]
include/linux/sunrpc/sched.h
include/linux/swap.h
include/media/lirc_dev.h
include/media/m5mols.h
include/media/v4l2-subdev.h
include/net/cfg80211.h
include/net/dst.h
include/net/netfilter/nf_conntrack.h
include/net/sctp/command.h
include/net/sctp/ulpevent.h
include/net/sock.h
include/sound/sb16_csp.h
include/sound/soc.h
include/trace/events/ext4.h
include/trace/events/irq.h
init/calibrate.c
init/main.c
kernel/irq/generic-chip.c
kernel/jump_label.c
kernel/kmod.c
kernel/power/snapshot.c
kernel/power/user.c
kernel/rcutree.c
kernel/rcutree.h
kernel/rcutree_plugin.h
kernel/rcutree_trace.c
kernel/resource.c
kernel/sched.c
kernel/sched_fair.c
kernel/sched_features.h
kernel/signal.c
kernel/smp.c
kernel/softirq.c
kernel/taskstats.c
kernel/time/alarmtimer.c
kernel/time/clocksource.c
kernel/trace/trace_printk.c
lib/debugobjects.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/memory_hotplug.c
mm/nommu.c
mm/rmap.c
mm/shmem.c
mm/swapfile.c
mm/truncate.c
mm/vmscan.c
net/8021q/vlan.c
net/8021q/vlan_dev.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_event.c
net/bluetooth/hidp/core.c
net/bluetooth/hidp/hidp.h
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/bridge/br_device.c
net/bridge/br_input.c
net/bridge/br_multicast.c
net/caif/cfmuxl.c
net/ceph/ceph_fs.c
net/ceph/osd_client.c
net/core/dst.c
net/ieee802154/nl-phy.c
net/ipv4/af_inet.c
net/ipv4/inet_diag.c
net/ipv4/ip_output.c
net/ipv4/netfilter.c
net/ipv4/netfilter/ip_queue.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_REJECT.c
net/ipv4/netfilter/ipt_ecn.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/ping.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_ipv4.c
net/ipv4/udp.c
net/ipv4/xfrm4_output.c
net/ipv6/af_inet6.c
net/ipv6/netfilter/ip6_queue.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/mac80211/scan.c
net/mac80211/wpa.c
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/protocol.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/sm_statetable.c
net/sctp/socket.c
net/sctp/ulpevent.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/auth_gss/gss_krb5_mech.c
net/sunrpc/clnt.c
net/sunrpc/rpcb_clnt.c
net/sunrpc/sched.c
net/wireless/core.c
net/wireless/core.h
net/wireless/nl80211.c
net/wireless/scan.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
scripts/depmod.sh
security/device_cgroup.c
security/keys/request_key.c
sound/atmel/abdac.c
sound/atmel/ac97c.c
sound/firewire/isight.c
sound/pci/asihpi/asihpi.c
sound/pci/cs5535audio/cs5535audio_pcm.c
sound/pci/emu10k1/emu10k1_main.c
sound/pci/hda/hda_beep.h
sound/pci/hda/hda_eld.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_via.c
sound/pci/lola/lola.c
sound/pci/rme9652/hdspm.c
sound/soc/blackfin/bf5xx-i2s-pcm.c
sound/soc/codecs/ak4642.c
sound/soc/codecs/tlv320aic26.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/wm8731.c
sound/soc/codecs/wm8991.c
sound/soc/codecs/wm8994.c
sound/soc/imx/Kconfig
sound/soc/imx/imx-pcm-dma-mx2.c
sound/soc/imx/imx-ssi.c
sound/soc/pxa/pxa2xx-pcm.c
sound/soc/sh/fsi-ak4642.c
sound/soc/sh/fsi-da7210.c
sound/soc/sh/fsi-hdmi.c
sound/soc/soc-cache.c
sound/soc/soc-core.c
sound/soc/tegra/tegra_i2s.c
sound/spi/at73c213.c
sound/usb/6fire/firmware.c
sound/usb/6fire/pcm.c
tools/perf/Makefile
tools/perf/util/trace-event-parse.c

diff --git a/CREDITS b/CREDITS
index d78359f5f64d7e581110166f9a2a7ecd410abc46..1deb331d96edbe60f3026ce4175dc0efe3bffaa4 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -518,7 +518,7 @@ N: Zach Brown
 E: zab@zabbo.net
 D: maestro pci sound
 
-M: David Brownell
+N: David Brownell
 D: Kernel engineer, mentor, and friend.  Maintained USB EHCI and
 D: gadget layers, SPI subsystem, GPIO subsystem, and more than a few
 D: device drivers.  His encouragement also helped many engineers get
index 5f4828a034e3235f21d3e6050f4a4092dc4bd23e..b1758088527339466c5d5eb8be6151eabe02952d 100644 (file)
@@ -2,13 +2,7 @@ Intro
 =====
 
 This document is designed to provide a list of the minimum levels of
-software necessary to run the 2.6 kernels, as well as provide brief
-instructions regarding any other "Gotchas" users may encounter when
-trying life on the Bleeding Edge.  If upgrading from a pre-2.4.x
-kernel, please consult the Changes file included with 2.4.x kernels for
-additional information; most of that information will not be repeated
-here.  Basically, this document assumes that your system is already
-functional and running at least 2.4.x kernels.
+software necessary to run the 3.0 kernels.
 
 This document is originally based on my "Changes" file for 2.0.x kernels
 and therefore owes credit to the same people as that file (Jared Mauch,
@@ -22,11 +16,10 @@ Upgrade to at *least* these software revisions before thinking you've
 encountered a bug!  If you're unsure what version you're currently
 running, the suggested command should tell you.
 
-Again, keep in mind that this list assumes you are already
-functionally running a Linux 2.4 kernel.  Also, not all tools are
-necessary on all systems; obviously, if you don't have any ISDN
-hardware, for example, you probably needn't concern yourself with
-isdn4k-utils.
+Again, keep in mind that this list assumes you are already functionally
+running a Linux kernel.  Also, not all tools are necessary on all
+systems; obviously, if you don't have any ISDN hardware, for example,
+you probably needn't concern yourself with isdn4k-utils.
 
 o  Gnu C                  3.2                     # gcc --version
 o  Gnu make               3.80                    # make --version
@@ -114,12 +107,12 @@ Ksymoops
 
 If the unthinkable happens and your kernel oopses, you may need the
 ksymoops tool to decode it, but in most cases you don't.
-In the 2.6 kernel it is generally preferred to build the kernel with
-CONFIG_KALLSYMS so that it produces readable dumps that can be used as-is
-(this also produces better output than ksymoops).
-If for some reason your kernel is not build with CONFIG_KALLSYMS and
-you have no way to rebuild and reproduce the Oops with that option, then
-you can still decode that Oops with ksymoops.
+It is generally preferred to build the kernel with CONFIG_KALLSYMS so
+that it produces readable dumps that can be used as-is (this also
+produces better output than ksymoops).  If for some reason your kernel
+is not build with CONFIG_KALLSYMS and you have no way to rebuild and
+reproduce the Oops with that option, then you can still decode that Oops
+with ksymoops.
 
 Module-Init-Tools
 -----------------
@@ -261,8 +254,8 @@ needs to be recompiled or (preferably) upgraded.
 NFS-utils
 ---------
 
-In 2.4 and earlier kernels, the nfs server needed to know about any
-client that expected to be able to access files via NFS.  This
+In ancient (2.4 and earlier) kernels, the nfs server needed to know
+about any client that expected to be able to access files via NFS.  This
 information would be given to the kernel by "mountd" when the client
 mounted the filesystem, or by "exportfs" at system startup.  exportfs
 would take information about active clients from /var/lib/nfs/rmtab.
@@ -272,11 +265,11 @@ which is not always easy, particularly when trying to implement
 fail-over.  Even when the system is working well, rmtab suffers from
 getting lots of old entries that never get removed.
 
-With 2.6 we have the option of having the kernel tell mountd when it
-gets a request from an unknown host, and mountd can give appropriate
-export information to the kernel.  This removes the dependency on
-rmtab and means that the kernel only needs to know about currently
-active clients.
+With modern kernels we have the option of having the kernel tell mountd
+when it gets a request from an unknown host, and mountd can give
+appropriate export information to the kernel.  This removes the
+dependency on rmtab and means that the kernel only needs to know about
+currently active clients.
 
 To enable this new functionality, you need to:
 
index 58b0bf9178349c435fd53674d96c29d7ecea0414..fa6e25b94a54bfe1d39450cd9bc568c4043ddfa3 100644 (file)
@@ -680,8 +680,8 @@ ones already enabled by DEBUG.
                Chapter 14: Allocating memory
 
 The kernel provides the following general purpose memory allocators:
-kmalloc(), kzalloc(), kcalloc(), and vmalloc().  Please refer to the API
-documentation for further information about them.
+kmalloc(), kzalloc(), kcalloc(), vmalloc(), and vzalloc().  Please refer to
+the API documentation for further information about them.
 
 The preferred form for passing a size of a struct is the following:
 
index 4e686a2ed91e48b6f2e0d968f8f1352fe907b2cc..a341d87d276eb1bf3904aec38896f87174b36561 100644 (file)
@@ -164,3 +164,8 @@ In either case, the following conditions must be met:
 - The boot loader is expected to call the kernel image by jumping
   directly to the first instruction of the kernel image.
 
+  On CPUs supporting the ARM instruction set, the entry must be
+  made in ARM state, even for a Thumb-2 kernel.
+
+  On CPUs supporting only the Thumb instruction set such as
+  Cortex-M class CPUs, the entry must be made in Thumb state.
diff --git a/Documentation/arm/SH-Mobile/zboot-rom-sdhi.txt b/Documentation/arm/SH-Mobile/zboot-rom-sdhi.txt
new file mode 100644 (file)
index 0000000..4419598
--- /dev/null
@@ -0,0 +1,42 @@
+ROM-able zImage boot from eSD
+-----------------------------
+
+An ROM-able zImage compiled with ZBOOT_ROM_SDHI may be written to eSD and
+SuperH Mobile ARM will to boot directly from the SDHI hardware block.
+
+This is achieved by the mask ROM loading the first portion of the image into
+MERAM and then jumping to it. This portion contains loader code which
+copies the entire image to SDRAM and jumps to it. From there the zImage
+boot code proceeds as normal, uncompressing the image into its final
+location and then jumping to it.
+
+This code has been tested on an mackerel board using the developer 1A eSD
+boot mode which is configured using the following jumper settings.
+
+   8 7 6 5 4 3 2 1
+   x|x|x|x| |x|x|
+S4 -+-+-+-+-+-+-+-
+    | | | |x| | |x on
+
+The eSD card needs to be present in SDHI slot 1 (CN7).
+As such S1 and S33 also need to be configured as per
+the notes in arch/arm/mach-shmobile/board-mackerel.c.
+
+A partial zImage must be written to physical partition #1 (boot)
+of the eSD at sector 0 in vrl4 format. A utility vrl4 is supplied to
+accomplish this.
+
+e.g.
+       vrl4 < zImage | dd of=/dev/sdX bs=512 count=17
+
+A full copy of _the same_ zImage should be written to physical partition #1
+(boot) of the eSD at sector 0. This should _not_ be in vrl4 format.
+
+       vrl4 < zImage | dd of=/dev/sdX bs=512
+
+Note: The commands above assume that the physical partition has been
+switched. No such facility currently exists in the Linux Kernel.
+
+Physical partitions are described in the eSD specification.  At the time of
+writing they are not the same as partitions that are typically configured
+using fdisk and visible through /proc/partitions
index cd45c8ea7463f71eccee9d82ce6a82b47bcee467..84f0a15fc210aec69648309a7676717d889ef6e5 100644 (file)
@@ -77,7 +77,7 @@ Throttling/Upper Limit policy
 - Specify a bandwidth rate on particular device for root group. The format
   for policy is "<major>:<minor>  <byes_per_second>".
 
-        echo "8:16  1048576" > /sys/fs/cgroup/blkio/blkio.read_bps_device
+        echo "8:16  1048576" > /sys/fs/cgroup/blkio/blkio.throttle.read_bps_device
 
   Above will put a limit of 1MB/second on reads happening for root group
   on device having major/minor number 8:16.
@@ -90,7 +90,7 @@ Throttling/Upper Limit policy
         1024+0 records out
         4194304 bytes (4.2 MB) copied, 4.0001 s, 1.0 MB/s
 
- Limits for writes can be put using blkio.write_bps_device file.
+ Limits for writes can be put using blkio.throttle.write_bps_device file.
 
 Hierarchical Cgroups
 ====================
@@ -286,28 +286,28 @@ Throttling/Upper limit policy files
          specified in bytes per second. Rules are per deivce. Following is
          the format.
 
-  echo "<major>:<minor>  <rate_bytes_per_second>" > /cgrp/blkio.read_bps_device
+  echo "<major>:<minor>  <rate_bytes_per_second>" > /cgrp/blkio.throttle.read_bps_device
 
 - blkio.throttle.write_bps_device
        - Specifies upper limit on WRITE rate to the device. IO rate is
          specified in bytes per second. Rules are per deivce. Following is
          the format.
 
-  echo "<major>:<minor>  <rate_bytes_per_second>" > /cgrp/blkio.write_bps_device
+  echo "<major>:<minor>  <rate_bytes_per_second>" > /cgrp/blkio.throttle.write_bps_device
 
 - blkio.throttle.read_iops_device
        - Specifies upper limit on READ rate from the device. IO rate is
          specified in IO per second. Rules are per deivce. Following is
          the format.
 
-  echo "<major>:<minor>  <rate_io_per_second>" > /cgrp/blkio.read_iops_device
+  echo "<major>:<minor>  <rate_io_per_second>" > /cgrp/blkio.throttle.read_iops_device
 
 - blkio.throttle.write_iops_device
        - Specifies upper limit on WRITE rate to the device. IO rate is
          specified in io per second. Rules are per deivce. Following is
          the format.
 
-  echo "<major>:<minor>  <rate_io_per_second>" > /cgrp/blkio.write_iops_device
+  echo "<major>:<minor>  <rate_io_per_second>" > /cgrp/blkio.throttle.write_iops_device
 
 Note: If both BW and IOPS rules are specified for a device, then IO is
       subjectd to both the constraints.
diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt
new file mode 100644 (file)
index 0000000..1c044eb
--- /dev/null
@@ -0,0 +1,21 @@
+* ARM Performance Monitor Units
+
+ARM cores often have a PMU for counting cpu and cache events like cache misses
+and hits. The interface to the PMU is part of the ARM ARM. The ARM PMU
+representation in the device tree should be done as under:-
+
+Required properties:
+
+- compatible : should be one of
+       "arm,cortex-a9-pmu"
+       "arm,cortex-a8-pmu"
+       "arm,arm1176-pmu"
+       "arm,arm1136-pmu"
+- interrupts : 1 combined interrupt or 1 per core.
+
+Example:
+
+pmu {
+        compatible = "arm,cortex-a9-pmu";
+        interrupts = <100 101>;
+};
index 72e238465b0b6ca452ee4905dc097007b9ff3dc0..b1c921c27519a0953591b6a64159e46856e7558d 100644 (file)
@@ -583,3 +583,25 @@ Why:       Superseded by the UVCIOC_CTRL_QUERY ioctl.
 Who:   Laurent Pinchart <laurent.pinchart@ideasonboard.com>
 
 ----------------------------
+
+What:  For VIDIOC_S_FREQUENCY the type field must match the device node's type.
+       If not, return -EINVAL.
+When:  3.2
+Why:   It makes no sense to switch the tuner to radio mode by calling
+       VIDIOC_S_FREQUENCY on a video node, or to switch the tuner to tv mode by
+       calling VIDIOC_S_FREQUENCY on a radio node. This is the first step of a
+       move to more consistent handling of tv and radio tuners.
+Who:   Hans Verkuil <hans.verkuil@cisco.com>
+
+----------------------------
+
+What:  Opening a radio device node will no longer automatically switch the
+       tuner mode from tv to radio.
+When:  3.3
+Why:   Just opening a V4L device should not change the state of the hardware
+       like that. It's very unexpected and against the V4L spec. Instead, you
+       switch to radio mode by calling VIDIOC_S_FREQUENCY. This is the second
+       and last step of the move to consistent handling of tv and radio tuners.
+Who:   Hans Verkuil <hans.verkuil@cisco.com>
+
+----------------------------
index a167ab876c357111c8e5ba4e1a204bda29a7285e..7cc6bf2871ebfbae736436959a3d8a2e5153334d 100644 (file)
@@ -673,6 +673,22 @@ storage request to complete, or it may attempt to cancel the storage request -
 in which case the page will not be stored in the cache this time.
 
 
+BULK INODE PAGE UNCACHE
+-----------------------
+
+A convenience routine is provided to perform an uncache on all the pages
+attached to an inode.  This assumes that the pages on the inode correspond on a
+1:1 basis with the pages in the cache.
+
+       void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
+                                            struct inode *inode);
+
+This takes the netfs cookie that the pages were cached with and the inode that
+the pages are attached to.  This function will wait for pages to finish being
+written to the cache and for the cache to finish with the page generally.  No
+error is returned.
+
+
 ==========================
 INDEX AND DATA FILE UPDATE
 ==========================
index d5c0cef38a7122ed378371acba045f928f891c0b..873a2ab2e9f8801aee72a11833ac4bfa24fa3d84 100644 (file)
@@ -40,7 +40,6 @@ Features which NILFS2 does not support yet:
        - POSIX ACLs
        - quotas
        - fsck
-       - resize
        - defragmentation
 
 Mount options
index f48178024067fd48fc3454806bdaa366c64dd966..db3b1aba32a3f9c0d80ce0cde2d8b6f1943f4dea 100644 (file)
@@ -843,6 +843,7 @@ Provides counts of softirq handlers serviced since boot time, for each cpu.
  TASKLET:          0          0          0        290
    SCHED:      27035      26983      26971      26746
  HRTIMER:          0          0          0          0
+     RCU:       1678       1769       2178       2250
 
 
 1.3 IDE devices in /proc/ide
index 84d2623810f31ade6b45315297ee755c6985209d..de91c0db5846f5e5c10a1cff5039b8edf542f421 100644 (file)
@@ -22,6 +22,10 @@ Supported chips:
     Prefix: 'f71869'
     Addresses scanned: none, address read from Super I/O config space
     Datasheet: Available from the Fintek website
+  * Fintek F71869A
+    Prefix: 'f71869a'
+    Addresses scanned: none, address read from Super I/O config space
+    Datasheet: Not public
   * Fintek F71882FG and F71883FG
     Prefix: 'f71882fg'
     Addresses scanned: none, address read from Super I/O config space
index 0393c89277c021b3e64921a7cda1007cb4f4d602..a10f73624ad3d8f65dd365ba1ca225dcf64c9b01 100644 (file)
@@ -9,8 +9,8 @@ Supported chips:
   Socket S1G3: Athlon II, Sempron, Turion II
 * AMD Family 11h processors:
   Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra)
-* AMD Family 12h processors: "Llano"
-* AMD Family 14h processors: "Brazos" (C/E/G-Series)
+* AMD Family 12h processors: "Llano" (E2/A4/A6/A8-Series)
+* AMD Family 14h processors: "Brazos" (C/E/G/Z-Series)
 * AMD Family 15h processors: "Bulldozer"
 
   Prefix: 'k10temp'
@@ -20,12 +20,16 @@ Supported chips:
     http://support.amd.com/us/Processor_TechDocs/31116.pdf
   BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors:
     http://support.amd.com/us/Processor_TechDocs/41256.pdf
+  BIOS and Kernel Developer's Guide (BKDG) for AMD Family 12h Processors:
+    http://support.amd.com/us/Processor_TechDocs/41131.pdf
   BIOS and Kernel Developer's Guide (BKDG) for AMD Family 14h Models 00h-0Fh Processors:
     http://support.amd.com/us/Processor_TechDocs/43170.pdf
   Revision Guide for AMD Family 10h Processors:
     http://support.amd.com/us/Processor_TechDocs/41322.pdf
   Revision Guide for AMD Family 11h Processors:
     http://support.amd.com/us/Processor_TechDocs/41788.pdf
+  Revision Guide for AMD Family 12h Processors:
+    http://support.amd.com/us/Processor_TechDocs/44739.pdf
   Revision Guide for AMD Family 14h Models 00h-0Fh Processors:
     http://support.amd.com/us/Processor_TechDocs/47534.pdf
   AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks:
index fd248a318211a189163f2c42ba911a44284ea0bc..aa47be71df4c12ddeb15f7abead764279b20f113 100644 (file)
@@ -2015,6 +2015,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                                the default.
                                off: Turn ECRC off
                                on: Turn ECRC on.
+               realloc         reallocate PCI resources if allocations done by BIOS
+                               are erroneous.
 
        pcie_aspm=      [PCIE] Forcibly enable or disable PCIe Active State Power
                        Management.
index 1565eefd6fd52a4c225fd19f4036164b62af2cbc..61815483efa34d9f7118d4aa91b321c66d42db44 100644 (file)
@@ -534,6 +534,8 @@ Events that are never propagated by the driver:
 0x2404         System is waking up from hibernation to undock
 0x2405         System is waking up from hibernation to eject bay
 0x5010         Brightness level changed/control event
+0x6000         KEYBOARD: Numlock key pressed
+0x6005         KEYBOARD: Fn key pressed (TO BE VERIFIED)
 
 Events that are propagated by the driver to userspace:
 
@@ -545,6 +547,8 @@ Events that are propagated by the driver to userspace:
 0x3006         Bay hotplug request (hint to power up SATA link when
                the optical drive tray is ejected)
 0x4003         Undocked (see 0x2x04), can sleep again
+0x4010         Docked into hotplug port replicator (non-ACPI dock)
+0x4011         Undocked from hotplug port replicator (non-ACPI dock)
 0x500B         Tablet pen inserted into its storage bay
 0x500C         Tablet pen removed from its storage bay
 0x6011         ALARM: battery is too hot
@@ -552,6 +556,7 @@ Events that are propagated by the driver to userspace:
 0x6021         ALARM: a sensor is too hot
 0x6022         ALARM: a sensor is extremely hot
 0x6030         System thermal table changed
+0x6040         Nvidia Optimus/AC adapter related (TO BE VERIFIED)
 
 Battery nearly empty alarms are a last resort attempt to get the
 operating system to hibernate or shutdown cleanly (0x2313), or shutdown
index d3d653a5f9b923be1ab518cba040e9ccb3868f3e..bfe924217f246a8c0a10846e3a034201a9b9095a 100644 (file)
@@ -346,7 +346,7 @@ tcp_orphan_retries - INTEGER
        when RTO retransmissions remain unacknowledged.
        See tcp_retries2 for more details.
 
-       The default value is 7.
+       The default value is 8.
        If your machine is a loaded WEB server,
        you should think about lowering this value, such sockets
        may consume significant resources. Cf. tcp_max_orphans.
index 88880839ece4e84f8101b4cfd50e3e22b8024515..64565aac6e4009b1bca8f963cf03f900e319d538 100644 (file)
@@ -520,59 +520,20 @@ Support for power domains is provided through the pwr_domain field of struct
 device.  This field is a pointer to an object of type struct dev_power_domain,
 defined in include/linux/pm.h, providing a set of power management callbacks
 analogous to the subsystem-level and device driver callbacks that are executed
-for the given device during all power transitions, in addition to the respective
-subsystem-level callbacks.  Specifically, the power domain "suspend" callbacks
-(i.e. ->runtime_suspend(), ->suspend(), ->freeze(), ->poweroff(), etc.) are
-executed after the analogous subsystem-level callbacks, while the power domain
-"resume" callbacks (i.e. ->runtime_resume(), ->resume(), ->thaw(), ->restore,
-etc.) are executed before the analogous subsystem-level callbacks.  Error codes
-returned by the "suspend" and "resume" power domain callbacks are ignored.
-
-Power domain ->runtime_idle() callback is executed before the subsystem-level
-->runtime_idle() callback and the result returned by it is not ignored.  Namely,
-if it returns error code, the subsystem-level ->runtime_idle() callback will not
-be called and the helper function rpm_idle() executing it will return error
-code.  This mechanism is intended to help platforms where saving device state
-is a time consuming operation and should only be carried out if all devices
-in the power domain are idle, before turning off the shared power resource(s).
-Namely, the power domain ->runtime_idle() callback may return error code until
-the pm_runtime_idle() helper (or its asychronous version) has been called for
-all devices in the power domain (it is recommended that the returned error code
-be -EBUSY in those cases), preventing the subsystem-level ->runtime_idle()
-callback from being run prematurely.
-
-The support for device power domains is only relevant to platforms needing to
-use the same subsystem-level (e.g. platform bus type) and device driver power
-management callbacks in many different power domain configurations and wanting
-to avoid incorporating the support for power domains into the subsystem-level
-callbacks.  The other platforms need not implement it or take it into account
-in any way.
-
-
-System Devices
---------------
-System devices (sysdevs) follow a slightly different API, which can be found in
-
-       include/linux/sysdev.h
-       drivers/base/sys.c
-
-System devices will be suspended with interrupts disabled, and after all other
-devices have been suspended.  On resume, they will be resumed before any other
-devices, and also with interrupts disabled.  These things occur in special
-"sysdev_driver" phases, which affect only system devices.
-
-Thus, after the suspend_noirq (or freeze_noirq or poweroff_noirq) phase, when
-the non-boot CPUs are all offline and IRQs are disabled on the remaining online
-CPU, then a sysdev_driver.suspend phase is carried out, and the system enters a
-sleep state (or a system image is created).  During resume (or after the image
-has been created or loaded) a sysdev_driver.resume phase is carried out, IRQs
-are enabled on the only online CPU, the non-boot CPUs are enabled, and the
-resume_noirq (or thaw_noirq or restore_noirq) phase begins.
-
-Code to actually enter and exit the system-wide low power state sometimes
-involves hardware details that are only known to the boot firmware, and
-may leave a CPU running software (from SRAM or flash memory) that monitors
-the system and manages its wakeup sequence.
+for the given device during all power transitions, instead of the respective
+subsystem-level callbacks.  Specifically, if a device's pm_domain pointer is
+not NULL, the ->suspend() callback from the object pointed to by it will be
+executed instead of its subsystem's (e.g. bus type's) ->suspend() callback and
+anlogously for all of the remaining callbacks.  In other words, power management
+domain callbacks, if defined for the given device, always take precedence over
+the callbacks provided by the device's subsystem (e.g. bus type).
+
+The support for device power management domains is only relevant to platforms
+needing to use the same device driver power management callbacks in many
+different power domain configurations and wanting to avoid incorporating the
+support for power domains into subsystem-level callbacks, for example by
+modifying the platform bus type.  Other platforms need not implement it or take
+it into account in any way.
 
 
 Device Low Power (suspend) States
index 654097b130b46175c4c5165ded5f535718266cc7..b24875b1ced5f04ff7caff5a6f99f89ebe3a32ff 100644 (file)
@@ -501,13 +501,29 @@ helper functions described in Section 4.  In that case, pm_runtime_resume()
 should be used.  Of course, for this purpose the device's run-time PM has to be
 enabled earlier by calling pm_runtime_enable().
 
-If the device bus type's or driver's ->probe() or ->remove() callback runs
+If the device bus type's or driver's ->probe() callback runs
 pm_runtime_suspend() or pm_runtime_idle() or their asynchronous counterparts,
 they will fail returning -EAGAIN, because the device's usage counter is
-incremented by the core before executing ->probe() and ->remove().  Still, it
-may be desirable to suspend the device as soon as ->probe() or ->remove() has
-finished, so the PM core uses pm_runtime_idle_sync() to invoke the
-subsystem-level idle callback for the device at that time.
+incremented by the driver core before executing ->probe().  Still, it may be
+desirable to suspend the device as soon as ->probe() has finished, so the driver
+core uses pm_runtime_put_sync() to invoke the subsystem-level idle callback for
+the device at that time.
+
+Moreover, the driver core prevents runtime PM callbacks from racing with the bus
+notifier callback in __device_release_driver(), which is necessary, because the
+notifier is used by some subsystems to carry out operations affecting the
+runtime PM functionality.  It does so by calling pm_runtime_get_sync() before
+driver_sysfs_remove() and the BUS_NOTIFY_UNBIND_DRIVER notifications.  This
+resumes the device if it's in the suspended state and prevents it from
+being suspended again while those routines are being executed.
+
+To allow bus types and drivers to put devices into the suspended state by
+calling pm_runtime_suspend() from their ->remove() routines, the driver core
+executes pm_runtime_put_sync() after running the BUS_NOTIFY_UNBIND_DRIVER
+notifications in __device_release_driver().  This requires bus types and
+drivers to make their ->remove() callbacks avoid races with runtime PM directly,
+but also it allows of more flexibility in the handling of devices during the
+removal of their drivers.
 
 The user space can effectively disallow the driver of the device to power manage
 it at run time by changing the value of its /sys/devices/.../power/control
@@ -566,11 +582,6 @@ to do this is:
        pm_runtime_set_active(dev);
        pm_runtime_enable(dev);
 
-The PM core always increments the run-time usage counter before calling the
-->prepare() callback and decrements it after calling the ->complete() callback.
-Hence disabling run-time PM temporarily like this will not cause any run-time
-suspend callbacks to be lost.
-
 7. Generic subsystem callbacks
 
 Subsystems may wish to conserve code space by using the set of generic power
index 2e3c64b1a6a5a64ede63c9e76377f79514e95088..9dbe885ecd8d130e8a1e0300f05686dea59440c6 100644 (file)
@@ -13,18 +13,8 @@ static DEFINE_SPINLOCK(xxx_lock);
 The above is always safe. It will disable interrupts _locally_, but the
 spinlock itself will guarantee the global lock, so it will guarantee that
 there is only one thread-of-control within the region(s) protected by that
-lock. This works well even under UP. The above sequence under UP
-essentially is just the same as doing
-
-       unsigned long flags;
-
-       save_flags(flags); cli();
-        ... critical section ...
-       restore_flags(flags);
-
-so the code does _not_ need to worry about UP vs SMP issues: the spinlocks
-work correctly under both (and spinlocks are actually more efficient on
-architectures that allow doing the "save_flags + cli" in one operation).
+lock. This works well even under UP also, so the code does _not_ need to
+worry about UP vs SMP issues: the spinlocks work correctly under both.
 
    NOTE! Implications of spin_locks for memory are further described in:
 
@@ -36,27 +26,7 @@ The above is usually pretty simple (you usually need and want only one
 spinlock for most things - using more than one spinlock can make things a
 lot more complex and even slower and is usually worth it only for
 sequences that you _know_ need to be split up: avoid it at all cost if you
-aren't sure). HOWEVER, it _does_ mean that if you have some code that does
-
-       cli();
-       .. critical section ..
-       sti();
-
-and another sequence that does
-
-       spin_lock_irqsave(flags);
-       .. critical section ..
-       spin_unlock_irqrestore(flags);
-
-then they are NOT mutually exclusive, and the critical regions can happen
-at the same time on two different CPU's. That's fine per se, but the
-critical regions had better be critical for different things (ie they
-can't stomp on each other).
-
-The above is a problem mainly if you end up mixing code - for example the
-routines in ll_rw_block() tend to use cli/sti to protect the atomicity of
-their actions, and if a driver uses spinlocks instead then you should
-think about issues like the above.
+aren't sure).
 
 This is really the only really hard part about spinlocks: once you start
 using spinlocks they tend to expand to areas you might not have noticed
@@ -120,11 +90,10 @@ Lesson 3: spinlocks revisited.
 
 The single spin-lock primitives above are by no means the only ones. They
 are the most safe ones, and the ones that work under all circumstances,
-but partly _because_ they are safe they are also fairly slow. They are
-much faster than a generic global cli/sti pair, but slower than they'd
-need to be, because they do have to disable interrupts (which is just a
-single instruction on a x86, but it's an expensive one - and on other
-architectures it can be worse).
+but partly _because_ they are safe they are also fairly slow. They are slower
+than they'd need to be, because they do have to disable interrupts
+(which is just a single instruction on a x86, but it's an expensive one -
+and on other architectures it can be worse).
 
 If you have a case where you have to protect a data structure across
 several CPU's and you want to use spinlocks you can potentially use
index d83703ea74b21285fbe6fea9eff1d00753eb51e0..b3f606b81a03926e287a13afa443ed8cd44d47a7 100644 (file)
@@ -76,6 +76,13 @@ A transfer's actual_length may be positive even when an error has been
 reported.  That's because transfers often involve several packets, so that
 one or more packets could finish before an error stops further endpoint I/O.
 
+For isochronous URBs, the urb status value is non-zero only if the URB is
+unlinked, the device is removed, the host controller is disabled, or the total
+transferred length is less than the requested length and the URB_SHORT_NOT_OK
+flag is set.  Completion handlers for isochronous URBs should only see
+urb->status set to zero, -ENOENT, -ECONNRESET, -ESHUTDOWN, or -EREMOTEIO.
+Individual frame descriptor status fields may report more status codes.
+
 
 0                      Transfer completed successfully
 
@@ -132,7 +139,7 @@ one or more packets could finish before an error stops further endpoint I/O.
                        device removal events immediately.
 
 -EXDEV                 ISO transfer only partially completed
-                       look at individual frame status for details
+                       (only set in iso_frame_desc[n].status, not urb->status)
 
 -EINVAL                        ISO madness, if this happens: Log off and go home
 
index 9b7221a86df291e6c651e66a3bebc41c69256633..7c3a8801b7ce0c20b3395022d937427405f138dd 100644 (file)
@@ -674,7 +674,7 @@ Protocol:   2.10+
 
 Field name:    init_size
 Type:          read
-Offset/size:   0x25c/4
+Offset/size:   0x260/4
 
   This field indicates the amount of linear contiguous memory starting
   at the kernel runtime start address that the kernel needs before it
index 6c59eb90fdf4267a0629674f6546fb5747cc63ff..187282da92137a2ea147bcb0f664e0e99d90e681 100644 (file)
@@ -594,6 +594,16 @@ S: Maintained
 F:     arch/arm/lib/floppydma.S
 F:     arch/arm/include/asm/floppy.h
 
+ARM PMU PROFILING AND DEBUGGING
+M:     Will Deacon <will.deacon@arm.com>
+S:     Maintained
+F:     arch/arm/kernel/perf_event*
+F:     arch/arm/oprofile/common.c
+F:     arch/arm/kernel/pmu.c
+F:     arch/arm/include/asm/pmu.h
+F:     arch/arm/kernel/hw_breakpoint.c
+F:     arch/arm/include/asm/hw_breakpoint.h
+
 ARM PORT
 M:     Russell King <linux@arm.linux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1345,16 +1355,18 @@ F:      drivers/auxdisplay/
 F:     include/linux/cfag12864b.h
 
 AVR32 ARCHITECTURE
-M:     Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
+M:     Haavard Skinnemoen <hskinnemoen@gmail.com>
+M:     Hans-Christian Egtvedt <egtvedt@samfundet.no>
 W:     http://www.atmel.com/products/AVR32/
 W:     http://avr32linux.org/
 W:     http://avrfreaks.net/
-S:     Supported
+S:     Maintained
 F:     arch/avr32/
 
 AVR32/AT32AP MACHINE SUPPORT
-M:     Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
-S:     Supported
+M:     Haavard Skinnemoen <hskinnemoen@gmail.com>
+M:     Hans-Christian Egtvedt <egtvedt@samfundet.no>
+S:     Maintained
 F:     arch/avr32/mach-at32ap/
 
 AX.25 NETWORK LAYER
@@ -1390,7 +1402,6 @@ F:        include/linux/backlight.h
 BATMAN ADVANCED
 M:     Marek Lindner <lindner_marek@yahoo.de>
 M:     Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
-M:     Sven Eckelmann <sven@narfation.org>
 L:     b.a.t.m.a.n@lists.open-mesh.org
 W:     http://www.open-mesh.org/
 S:     Maintained
@@ -1423,7 +1434,6 @@ S:        Supported
 F:     arch/blackfin/
 
 BLACKFIN EMAC DRIVER
-M:     Michael Hennerich <michael.hennerich@analog.com>
 L:     uclinux-dist-devel@blackfin.uclinux.org
 W:     http://blackfin.uclinux.org
 S:     Supported
@@ -1639,7 +1649,7 @@ CAN NETWORK LAYER
 M:     Oliver Hartkopp <socketcan@hartkopp.net>
 M:     Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
 M:     Urs Thuermann <urs.thuermann@volkswagen.de>
-L:     socketcan-core@lists.berlios.de
+L:     socketcan-core@lists.berlios.de (subscribers-only)
 L:     netdev@vger.kernel.org
 W:     http://developer.berlios.de/projects/socketcan/
 S:     Maintained
@@ -1651,7 +1661,7 @@ F:        include/linux/can/raw.h
 
 CAN NETWORK DRIVERS
 M:     Wolfgang Grandegger <wg@grandegger.com>
-L:     socketcan-core@lists.berlios.de
+L:     socketcan-core@lists.berlios.de (subscribers-only)
 L:     netdev@vger.kernel.org
 W:     http://developer.berlios.de/projects/socketcan/
 S:     Maintained
@@ -2197,7 +2207,7 @@ F:        drivers/acpi/dock.c
 DOCUMENTATION
 M:     Randy Dunlap <rdunlap@xenotime.net>
 L:     linux-doc@vger.kernel.org
-T:     quilt oss.oracle.com/~rdunlap/kernel-doc-patches/current/
+T:     quilt http://userweb.kernel.org/~rdunlap/kernel-doc-patches/current/
 S:     Maintained
 F:     Documentation/
 
@@ -2291,8 +2301,7 @@ F:        drivers/scsi/eata_pio.*
 
 EBTABLES
 M:     Bart De Schuymer <bart.de.schuymer@pandora.be>
-L:     ebtables-user@lists.sourceforge.net
-L:     ebtables-devel@lists.sourceforge.net
+L:     netfilter-devel@vger.kernel.org
 W:     http://ebtables.sourceforge.net/
 S:     Maintained
 F:     include/linux/netfilter_bridge/ebt_*.h
@@ -4983,7 +4992,7 @@ F:        drivers/power/power_supply*
 
 PNP SUPPORT
 M:     Adam Belay <abelay@mit.edu>
-M:     Bjorn Helgaas <bjorn.helgaas@hp.com>
+M:     Bjorn Helgaas <bhelgaas@google.com>
 S:     Maintained
 F:     drivers/pnp/
 
@@ -5182,6 +5191,7 @@ S:        Supported
 F:     drivers/net/qlcnic/
 
 QLOGIC QLGE 10Gb ETHERNET DRIVER
+M:     Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
 M:     Ron Mercer <ron.mercer@qlogic.com>
 M:     linux-driver@qlogic.com
 L:     netdev@vger.kernel.org
@@ -6435,8 +6445,9 @@ S:        Maintained
 F:     drivers/usb/misc/rio500*
 
 USB EHCI DRIVER
+M:     Alan Stern <stern@rowland.harvard.edu>
 L:     linux-usb@vger.kernel.org
-S:     Orphan
+S:     Maintained
 F:     Documentation/usb/ehci.txt
 F:     drivers/usb/host/ehci*
 
@@ -6463,9 +6474,15 @@ M:       Jiri Kosina <jkosina@suse.cz>
 L:     linux-usb@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
 S:     Maintained
-F:     Documentation/usb/hiddev.txt
+F:     Documentation/hid/hiddev.txt
 F:     drivers/hid/usbhid/
 
+USB/IP DRIVERS
+M:     Matt Mooney <mfm@muteddisk.com>
+L:     linux-usb@vger.kernel.org
+S:     Maintained
+F:     drivers/staging/usbip/
+
 USB ISP116X DRIVER
 M:     Olav Kongas <ok@artecdesign.ee>
 L:     linux-usb@vger.kernel.org
@@ -6495,8 +6512,9 @@ S:        Maintained
 F:     sound/usb/midi.*
 
 USB OHCI DRIVER
+M:     Alan Stern <stern@rowland.harvard.edu>
 L:     linux-usb@vger.kernel.org
-S:     Orphan
+S:     Maintained
 F:     Documentation/usb/ohci.txt
 F:     drivers/usb/host/ohci*
 
@@ -6725,6 +6743,7 @@ F:        fs/fat/
 VIDEOBUF2 FRAMEWORK
 M:     Pawel Osciak <pawel@osciak.com>
 M:     Marek Szyprowski <m.szyprowski@samsung.com>
+M:     Kyungmin Park <kyungmin.park@samsung.com>
 L:     linux-media@vger.kernel.org
 S:     Maintained
 F:     drivers/media/video/videobuf2-*
@@ -7007,6 +7026,13 @@ T:       git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.
 S:     Maintained
 F:     drivers/platform/x86
 
+X86 MCE INFRASTRUCTURE
+M:     Tony Luck <tony.luck@intel.com>
+M:     Borislav Petkov <bp@amd64.org>
+L:     linux-edac@vger.kernel.org
+S:     Maintained
+F:     arch/x86/kernel/cpu/mcheck/*
+
 XEN HYPERVISOR INTERFACE
 M:     Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
 M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
index 0499c2ee8541f7dfde385a74ea27270be75e45a0..60d91f76c2fd1c94af79dd994d79d46a131c6cbd 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc7
 NAME = Sneaky Weasel
 
 # *DOCUMENTATION*
diff --git a/README b/README
index 8510017a357619eb2998ad423fb4580673abf086..0d5a7ddbe3ee8d108bf7079909ddcec30dfb4560 100644 (file)
--- a/README
+++ b/README
@@ -1,6 +1,6 @@
-       Linux kernel release 2.6.xx <http://kernel.org/>
+       Linux kernel release 3.x <http://kernel.org/>
 
-These are the release notes for Linux version 2.6.  Read them carefully,
+These are the release notes for Linux version 3.  Read them carefully,
 as they tell you what this is all about, explain how to install the
 kernel, and what to do if something goes wrong. 
 
@@ -62,10 +62,10 @@ INSTALLING the kernel source:
    directory where you have permissions (eg. your home directory) and
    unpack it:
 
-               gzip -cd linux-2.6.XX.tar.gz | tar xvf -
+               gzip -cd linux-3.X.tar.gz | tar xvf -
 
    or
-               bzip2 -dc linux-2.6.XX.tar.bz2 | tar xvf -
+               bzip2 -dc linux-3.X.tar.bz2 | tar xvf -
 
 
    Replace "XX" with the version number of the latest kernel.
@@ -75,15 +75,15 @@ INSTALLING the kernel source:
    files.  They should match the library, and not get messed up by
    whatever the kernel-du-jour happens to be.
 
- - You can also upgrade between 2.6.xx releases by patching.  Patches are
+ - You can also upgrade between 3.x releases by patching.  Patches are
    distributed in the traditional gzip and the newer bzip2 format.  To
    install by patching, get all the newer patch files, enter the
-   top level directory of the kernel source (linux-2.6.xx) and execute:
+   top level directory of the kernel source (linux-3.x) and execute:
 
-               gzip -cd ../patch-2.6.xx.gz | patch -p1
+               gzip -cd ../patch-3.x.gz | patch -p1
 
    or
-               bzip2 -dc ../patch-2.6.xx.bz2 | patch -p1
+               bzip2 -dc ../patch-3.x.bz2 | patch -p1
 
    (repeat xx for all versions bigger than the version of your current
    source tree, _in_order_) and you should be ok.  You may want to remove
@@ -91,9 +91,9 @@ INSTALLING the kernel source:
    failed patches (xxx# or xxx.rej). If there are, either you or me has
    made a mistake.
 
-   Unlike patches for the 2.6.x kernels, patches for the 2.6.x.y kernels
+   Unlike patches for the 3.x kernels, patches for the 3.x.y kernels
    (also known as the -stable kernels) are not incremental but instead apply
-   directly to the base 2.6.x kernel.  Please read
+   directly to the base 3.x kernel.  Please read
    Documentation/applying-patches.txt for more information.
 
    Alternatively, the script patch-kernel can be used to automate this
@@ -107,14 +107,14 @@ INSTALLING the kernel source:
    an alternative directory can be specified as the second argument.
 
  - If you are upgrading between releases using the stable series patches
-   (for example, patch-2.6.xx.y), note that these "dot-releases" are
-   not incremental and must be applied to the 2.6.xx base tree. For
-   example, if your base kernel is 2.6.12 and you want to apply the
-   2.6.12.3 patch, you do not and indeed must not first apply the
-   2.6.12.1 and 2.6.12.2 patches. Similarly, if you are running kernel
-   version 2.6.12.2 and want to jump to 2.6.12.3, you must first
-   reverse the 2.6.12.2 patch (that is, patch -R) _before_ applying
-   the 2.6.12.3 patch.
+   (for example, patch-3.x.y), note that these "dot-releases" are
+   not incremental and must be applied to the 3.x base tree. For
+   example, if your base kernel is 3.0 and you want to apply the
+   3.0.3 patch, you do not and indeed must not first apply the
+   3.0.1 and 3.0.2 patches. Similarly, if you are running kernel
+   version 3.0.2 and want to jump to 3.0.3, you must first
+   reverse the 3.0.2 patch (that is, patch -R) _before_ applying
+   the 3.0.3 patch.
    You can read more on this in Documentation/applying-patches.txt
 
  - Make sure you have no stale .o files and dependencies lying around:
@@ -126,7 +126,7 @@ INSTALLING the kernel source:
 
 SOFTWARE REQUIREMENTS
 
-   Compiling and running the 2.6.xx kernels requires up-to-date
+   Compiling and running the 3.x kernels requires up-to-date
    versions of various software packages.  Consult
    Documentation/Changes for the minimum version numbers required
    and how to get updates for these packages.  Beware that using
@@ -142,11 +142,11 @@ BUILD directory for the kernel:
    Using the option "make O=output/dir" allow you to specify an alternate
    place for the output files (including .config).
    Example:
-     kernel source code:       /usr/src/linux-2.6.N
+     kernel source code:       /usr/src/linux-3.N
      build directory:          /home/name/build/kernel
 
    To configure and build the kernel use:
-   cd /usr/src/linux-2.6.N
+   cd /usr/src/linux-3.N
    make O=/home/name/build/kernel menuconfig
    make O=/home/name/build/kernel
    sudo make O=/home/name/build/kernel modules_install install
index 8af56ce346add9d2bebd26641f30656141464a03..445dc42e033495be2ac6b1ae37fe3a8221c483e8 100644 (file)
@@ -56,7 +56,6 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
  * Given a kernel address, find the home node of the underlying memory.
  */
 #define kvaddr_to_nid(kaddr)   pa_to_nid(__pa(kaddr))
-#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
 
 /*
  * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
index 9adc278a22abb36df1bdfdacd1db1fe240f8d0fb..84fda2bebd7aab75c8f77c6da0fc4b13cee99b4f 100644 (file)
@@ -37,6 +37,9 @@ config ARM
          Europe.  There is an ARM Linux project with a web page at
          <http://www.arm.linux.org.uk/>.
 
+config ARM_HAS_SG_CHAIN
+       bool
+
 config HAVE_PWM
        bool
 
@@ -1346,7 +1349,6 @@ config SMP_ON_UP
 
 config HAVE_ARM_SCU
        bool
-       depends on SMP
        help
          This option enables support for the ARM system coherency unit
 
@@ -1715,17 +1717,34 @@ config ZBOOT_ROM
          Say Y here if you intend to execute your compressed kernel image
          (zImage) directly from ROM or flash.  If unsure, say N.
 
+choice
+       prompt "Include SD/MMC loader in zImage (EXPERIMENTAL)"
+       depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL
+       default ZBOOT_ROM_NONE
+       help
+         Include experimental SD/MMC loading code in the ROM-able zImage.
+         With this enabled it is possible to write the the ROM-able zImage
+         kernel image to an MMC or SD card and boot the kernel straight
+         from the reset vector. At reset the processor Mask ROM will load
+         the first part of the the ROM-able zImage which in turn loads the
+         rest the kernel image to RAM.
+
+config ZBOOT_ROM_NONE
+       bool "No SD/MMC loader in zImage (EXPERIMENTAL)"
+       help
+         Do not load image from SD or MMC
+
 config ZBOOT_ROM_MMCIF
        bool "Include MMCIF loader in zImage (EXPERIMENTAL)"
-       depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL
        help
-         Say Y here to include experimental MMCIF loading code in the
-         ROM-able zImage. With this enabled it is possible to write the
-         the ROM-able zImage kernel image to an MMC card and boot the
-         kernel straight from the reset vector. At reset the processor
-         Mask ROM will load the first part of the the ROM-able zImage
-         which in turn loads the rest the kernel image to RAM using the
-         MMCIF hardware block.
+         Load image from MMCIF hardware block.
+
+config ZBOOT_ROM_SH_MOBILE_SDHI
+       bool "Include SuperH Mobile SDHI loader in zImage (EXPERIMENTAL)"
+       help
+         Load image from SDHI hardware block
+
+endchoice
 
 config CMDLINE
        string "Default kernel command string"
index 23aad07223035a3fcfd9346158a808d2fe423b8e..0c74a6fab95278eee8cdbc077aa0800c40da428f 100644 (file)
@@ -6,13 +6,19 @@
 
 OBJS           =
 
-# Ensure that mmcif loader code appears early in the image
+# Ensure that MMCIF loader code appears early in the image
 # to minimise that number of bocks that have to be read in
 # order to load it.
 ifeq ($(CONFIG_ZBOOT_ROM_MMCIF),y)
-ifeq ($(CONFIG_ARCH_SH7372),y)
 OBJS           += mmcif-sh7372.o
 endif
+
+# Ensure that SDHI loader code appears early in the image
+# to minimise that number of bocks that have to be read in
+# order to load it.
+ifeq ($(CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI),y)
+OBJS           += sdhi-shmobile.o
+OBJS           += sdhi-sh7372.o
 endif
 
 AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET)
index c943d2e7da9dddbad978d7ea60c45339ac76360f..fe3719b516fd47370ba089b624ddd123de6a5750 100644 (file)
        /* load board-specific initialization code */
 #include <mach/zboot.h>
 
-#ifdef CONFIG_ZBOOT_ROM_MMCIF
-       /* Load image from MMC */
-       adr     sp, __tmp_stack + 128
+#if defined(CONFIG_ZBOOT_ROM_MMCIF) || defined(CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI)
+       /* Load image from MMC/SD */
+       adr     sp, __tmp_stack + 256
        ldr     r0, __image_start
        ldr     r1, __image_end
        subs    r1, r1, r0
        ldr     r0, __load_base
-       bl      mmcif_loader
+       bl      mmc_loader
 
        /* Jump to loaded code */
        ldr     r0, __loaded
@@ -51,9 +51,9 @@ __loaded:
        .long   __continue
        .align
 __tmp_stack:
-       .space  128
+       .space  256
 __continue:
-#endif /* CONFIG_ZBOOT_ROM_MMCIF */
+#endif /* CONFIG_ZBOOT_ROM_MMC || CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI */
 
        b       1f
 __atags:@ tag #1
index 942fad97e4472c8e27b98068efaafc1cc518787e..e95a5989602ae3fcf54f56257ce35739593d5f86 100644 (file)
@@ -353,7 +353,8 @@ not_relocated:      mov     r0, #0
                mov     r0, #0                  @ must be zero
                mov     r1, r7                  @ restore architecture number
                mov     r2, r8                  @ restore atags pointer
-               mov     pc, r4                  @ call kernel
+ ARM(          mov     pc, r4  )               @ call kernel
+ THUMB(                bx      r4      )               @ entry point is always ARM
 
                .align  2
                .type   LC0, #object
@@ -597,6 +598,8 @@ __common_mmu_cache_on:
                sub     pc, lr, r0, lsr #32     @ properly flush pipeline
 #endif
 
+#define PROC_ENTRY_SIZE (4*5)
+
 /*
  * Here follow the relocatable cache support functions for the
  * various processors.  This is a generic hook for locating an
@@ -624,7 +627,7 @@ call_cache_fn:      adr     r12, proc_types
  ARM(          addeq   pc, r12, r3             ) @ call cache function
  THUMB(                addeq   r12, r3                 )
  THUMB(                moveq   pc, r12                 ) @ call cache function
-               add     r12, r12, #4*5
+               add     r12, r12, #PROC_ENTRY_SIZE
                b       1b
 
 /*
@@ -794,6 +797,16 @@ proc_types:
 
                .size   proc_types, . - proc_types
 
+               /*
+                * If you get a "non-constant expression in ".if" statement"
+                * error from the assembler on this line, check that you have
+                * not accidentally written a "b" instruction where you should
+                * have written W(b).
+                */
+               .if (. - proc_types) % PROC_ENTRY_SIZE != 0
+               .error "The size of one or more proc_types entries is wrong."
+               .endif
+
 /*
  * Turn off the Cache and MMU.  ARMv3 does not support
  * reading the control register, but ARMv4 does.
index 7453c8337b83a278f84b83892f48e4e979804b70..b6f61d9a5a1b5279bf8576267b5a788f9f1cfe40 100644 (file)
@@ -40,7 +40,7 @@
  * to an MMC card
  * # dd if=vrl4.out of=/dev/sdx bs=512 seek=1
  */
-asmlinkage void mmcif_loader(unsigned char *buf, unsigned long len)
+asmlinkage void mmc_loader(unsigned char *buf, unsigned long len)
 {
        mmc_init_progress();
        mmc_update_progress(MMC_PROGRESS_ENTER);
diff --git a/arch/arm/boot/compressed/sdhi-sh7372.c b/arch/arm/boot/compressed/sdhi-sh7372.c
new file mode 100644 (file)
index 0000000..d403a8b
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * SuperH Mobile SDHI
+ *
+ * Copyright (C) 2010 Magnus Damm
+ * Copyright (C) 2010 Kuninori Morimoto
+ * Copyright (C) 2010 Simon Horman
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Parts inspired by u-boot
+ */
+
+#include <linux/io.h>
+#include <mach/mmc.h>
+#include <linux/mmc/boot.h>
+#include <linux/mmc/tmio.h>
+
+#include "sdhi-shmobile.h"
+
+#define PORT179CR       0xe60520b3
+#define PORT180CR       0xe60520b4
+#define PORT181CR       0xe60520b5
+#define PORT182CR       0xe60520b6
+#define PORT183CR       0xe60520b7
+#define PORT184CR       0xe60520b8
+
+#define SMSTPCR3        0xe615013c
+
+#define CR_INPUT_ENABLE 0x10
+#define CR_FUNCTION1    0x01
+
+#define SDHI1_BASE     (void __iomem *)0xe6860000
+#define SDHI_BASE      SDHI1_BASE
+
+/*  SuperH Mobile SDHI loader
+ *
+ * loads the zImage from an SD card starting from block 0
+ * on physical partition 1
+ *
+ * The image must be start with a vrl4 header and
+ * the zImage must start at offset 512 of the image. That is,
+ * at block 1 (=byte 512) of physical partition 1
+ *
+ * Use the following line to write the vrl4 formated zImage
+ * to an SD card
+ * # dd if=vrl4.out of=/dev/sdx bs=512
+ */
+asmlinkage void mmc_loader(unsigned short *buf, unsigned long len)
+{
+       int high_capacity;
+
+       mmc_init_progress();
+
+       mmc_update_progress(MMC_PROGRESS_ENTER);
+        /* Initialise SDHI1 */
+        /* PORT184CR: GPIO_FN_SDHICMD1 Control */
+        __raw_writeb(CR_FUNCTION1, PORT184CR);
+        /* PORT179CR: GPIO_FN_SDHICLK1 Control */
+        __raw_writeb(CR_INPUT_ENABLE|CR_FUNCTION1, PORT179CR);
+        /* PORT181CR: GPIO_FN_SDHID1_3 Control */
+        __raw_writeb(CR_FUNCTION1, PORT183CR);
+        /* PORT182CR: GPIO_FN_SDHID1_2 Control */
+        __raw_writeb(CR_FUNCTION1, PORT182CR);
+        /* PORT183CR: GPIO_FN_SDHID1_1 Control */
+        __raw_writeb(CR_FUNCTION1, PORT181CR);
+        /* PORT180CR: GPIO_FN_SDHID1_0 Control */
+        __raw_writeb(CR_FUNCTION1, PORT180CR);
+
+        /* Enable clock to SDHI1 hardware block */
+        __raw_writel(__raw_readl(SMSTPCR3) & ~(1 << 13), SMSTPCR3);
+
+       /* setup SDHI hardware */
+       mmc_update_progress(MMC_PROGRESS_INIT);
+       high_capacity = sdhi_boot_init(SDHI_BASE);
+       if (high_capacity < 0)
+               goto err;
+
+       mmc_update_progress(MMC_PROGRESS_LOAD);
+       /* load kernel */
+       if (sdhi_boot_do_read(SDHI_BASE, high_capacity,
+                             0, /* Kernel is at block 1 */
+                             (len + TMIO_BBS - 1) / TMIO_BBS, buf))
+               goto err;
+
+        /* Disable clock to SDHI1 hardware block */
+        __raw_writel(__raw_readl(SMSTPCR3) & (1 << 13), SMSTPCR3);
+
+       mmc_update_progress(MMC_PROGRESS_DONE);
+
+       return;
+err:
+       for(;;);
+}
diff --git a/arch/arm/boot/compressed/sdhi-shmobile.c b/arch/arm/boot/compressed/sdhi-shmobile.c
new file mode 100644 (file)
index 0000000..bd3d469
--- /dev/null
@@ -0,0 +1,449 @@
+/*
+ * SuperH Mobile SDHI
+ *
+ * Copyright (C) 2010 Magnus Damm
+ * Copyright (C) 2010 Kuninori Morimoto
+ * Copyright (C) 2010 Simon Horman
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Parts inspired by u-boot
+ */
+
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+#include <linux/mmc/tmio.h>
+#include <mach/sdhi.h>
+
+#define OCR_FASTBOOT           (1<<29)
+#define OCR_HCS                        (1<<30)
+#define OCR_BUSY               (1<<31)
+
+#define RESP_CMD12             0x00000030
+
+static inline u16 sd_ctrl_read16(void __iomem *base, int addr)
+{
+        return __raw_readw(base + addr);
+}
+
+static inline u32 sd_ctrl_read32(void __iomem *base, int addr)
+{
+       return __raw_readw(base + addr) |
+              __raw_readw(base + addr + 2) << 16;
+}
+
+static inline void sd_ctrl_write16(void __iomem *base, int addr, u16 val)
+{
+       __raw_writew(val, base + addr);
+}
+
+static inline void sd_ctrl_write32(void __iomem *base, int addr, u32 val)
+{
+       __raw_writew(val, base + addr);
+       __raw_writew(val >> 16, base + addr + 2);
+}
+
+#define ALL_ERROR (TMIO_STAT_CMD_IDX_ERR | TMIO_STAT_CRCFAIL |         \
+                  TMIO_STAT_STOPBIT_ERR | TMIO_STAT_DATATIMEOUT |      \
+                  TMIO_STAT_RXOVERFLOW | TMIO_STAT_TXUNDERRUN |        \
+                  TMIO_STAT_CMDTIMEOUT | TMIO_STAT_ILL_ACCESS |        \
+                  TMIO_STAT_ILL_FUNC)
+
+static int sdhi_intr(void __iomem *base)
+{
+       unsigned long state = sd_ctrl_read32(base, CTL_STATUS);
+
+       if (state & ALL_ERROR) {
+               sd_ctrl_write32(base, CTL_STATUS, ~ALL_ERROR);
+               sd_ctrl_write32(base, CTL_IRQ_MASK,
+                               ALL_ERROR |
+                               sd_ctrl_read32(base, CTL_IRQ_MASK));
+               return -EINVAL;
+       }
+       if (state & TMIO_STAT_CMDRESPEND) {
+               sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_CMDRESPEND);
+               sd_ctrl_write32(base, CTL_IRQ_MASK,
+                               TMIO_STAT_CMDRESPEND |
+                               sd_ctrl_read32(base, CTL_IRQ_MASK));
+               return 0;
+       }
+       if (state & TMIO_STAT_RXRDY) {
+               sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_RXRDY);
+               sd_ctrl_write32(base, CTL_IRQ_MASK,
+                               TMIO_STAT_RXRDY | TMIO_STAT_TXUNDERRUN |
+                               sd_ctrl_read32(base, CTL_IRQ_MASK));
+               return 0;
+       }
+       if (state & TMIO_STAT_DATAEND) {
+               sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_DATAEND);
+               sd_ctrl_write32(base, CTL_IRQ_MASK,
+                               TMIO_STAT_DATAEND |
+                               sd_ctrl_read32(base, CTL_IRQ_MASK));
+               return 0;
+       }
+
+       return -EAGAIN;
+}
+
+static int sdhi_boot_wait_resp_end(void __iomem *base)
+{
+       int err = -EAGAIN, timeout = 10000000;
+
+       while (timeout--) {
+               err = sdhi_intr(base);
+               if (err != -EAGAIN)
+                       break;
+               udelay(1);
+       }
+
+       return err;
+}
+
+/* SDHI_CLK_CTRL */
+#define CLK_MMC_ENABLE                 (1 << 8)
+#define CLK_MMC_INIT                   (1 << 6)        /* clk / 256 */
+
+static void sdhi_boot_mmc_clk_stop(void __iomem *base)
+{
+       sd_ctrl_write16(base, CTL_CLK_AND_WAIT_CTL, 0x0000);
+       msleep(10);
+       sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, ~CLK_MMC_ENABLE &
+               sd_ctrl_read16(base, CTL_SD_CARD_CLK_CTL));
+       msleep(10);
+}
+
+static void sdhi_boot_mmc_clk_start(void __iomem *base)
+{
+       sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, CLK_MMC_ENABLE |
+               sd_ctrl_read16(base, CTL_SD_CARD_CLK_CTL));
+       msleep(10);
+       sd_ctrl_write16(base, CTL_CLK_AND_WAIT_CTL, CLK_MMC_ENABLE);
+       msleep(10);
+}
+
+static void sdhi_boot_reset(void __iomem *base)
+{
+       sd_ctrl_write16(base, CTL_RESET_SD, 0x0000);
+       msleep(10);
+       sd_ctrl_write16(base, CTL_RESET_SD, 0x0001);
+       msleep(10);
+}
+
+/* Set MMC clock / power.
+ * Note: This controller uses a simple divider scheme therefore it cannot
+ * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
+ * MMC wont run that fast, it has to be clocked at 12MHz which is the next
+ * slowest setting.
+ */
+static int sdhi_boot_mmc_set_ios(void __iomem *base, struct mmc_ios *ios)
+{
+       if (sd_ctrl_read32(base, CTL_STATUS) & TMIO_STAT_CMD_BUSY)
+               return -EBUSY;
+
+       if (ios->clock)
+               sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL,
+                               ios->clock | CLK_MMC_ENABLE);
+
+       /* Power sequence - OFF -> ON -> UP */
+       switch (ios->power_mode) {
+       case MMC_POWER_OFF: /* power down SD bus */
+               sdhi_boot_mmc_clk_stop(base);
+               break;
+       case MMC_POWER_ON: /* power up SD bus */
+               break;
+       case MMC_POWER_UP: /* start bus clock */
+               sdhi_boot_mmc_clk_start(base);
+               break;
+       }
+
+       switch (ios->bus_width) {
+       case MMC_BUS_WIDTH_1:
+               sd_ctrl_write16(base, CTL_SD_MEM_CARD_OPT, 0x80e0);
+       break;
+       case MMC_BUS_WIDTH_4:
+               sd_ctrl_write16(base, CTL_SD_MEM_CARD_OPT, 0x00e0);
+       break;
+       }
+
+       /* Let things settle. delay taken from winCE driver */
+       udelay(140);
+
+       return 0;
+}
+
+/* These are the bitmasks the tmio chip requires to implement the MMC response
+ * types. Note that R1 and R6 are the same in this scheme. */
+#define RESP_NONE      0x0300
+#define RESP_R1        0x0400
+#define RESP_R1B       0x0500
+#define RESP_R2        0x0600
+#define RESP_R3        0x0700
+#define DATA_PRESENT   0x0800
+#define TRANSFER_READ  0x1000
+
+static int sdhi_boot_request(void __iomem *base, struct mmc_command *cmd)
+{
+       int err, c = cmd->opcode;
+
+       switch (mmc_resp_type(cmd)) {
+       case MMC_RSP_NONE: c |= RESP_NONE; break;
+       case MMC_RSP_R1:   c |= RESP_R1;   break;
+       case MMC_RSP_R1B:  c |= RESP_R1B;  break;
+       case MMC_RSP_R2:   c |= RESP_R2;   break;
+       case MMC_RSP_R3:   c |= RESP_R3;   break;
+       default:
+               return -EINVAL;
+       }
+
+       /* No interrupts so this may not be cleared */
+       sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_CMDRESPEND);
+
+       sd_ctrl_write32(base, CTL_IRQ_MASK, TMIO_STAT_CMDRESPEND |
+                       sd_ctrl_read32(base, CTL_IRQ_MASK));
+       sd_ctrl_write32(base, CTL_ARG_REG, cmd->arg);
+       sd_ctrl_write16(base, CTL_SD_CMD, c);
+
+
+       sd_ctrl_write32(base, CTL_IRQ_MASK,
+                       ~(TMIO_STAT_CMDRESPEND | ALL_ERROR) &
+                       sd_ctrl_read32(base, CTL_IRQ_MASK));
+
+       err = sdhi_boot_wait_resp_end(base);
+       if (err)
+               return err;
+
+       cmd->resp[0] = sd_ctrl_read32(base, CTL_RESPONSE);
+
+       return 0;
+}
+
+static int sdhi_boot_do_read_single(void __iomem *base, int high_capacity,
+                                   unsigned long block, unsigned short *buf)
+{
+       int err, i;
+
+       /* CMD17 - Read */
+       {
+               struct mmc_command cmd;
+
+               cmd.opcode = MMC_READ_SINGLE_BLOCK | \
+                            TRANSFER_READ | DATA_PRESENT;
+               if (high_capacity)
+                       cmd.arg = block;
+               else
+                       cmd.arg = block * TMIO_BBS;
+               cmd.flags = MMC_RSP_R1;
+               err = sdhi_boot_request(base, &cmd);
+               if (err)
+                       return err;
+       }
+
+       sd_ctrl_write32(base, CTL_IRQ_MASK,
+                       ~(TMIO_STAT_DATAEND | TMIO_STAT_RXRDY |
+                         TMIO_STAT_TXUNDERRUN) &
+                       sd_ctrl_read32(base, CTL_IRQ_MASK));
+       err = sdhi_boot_wait_resp_end(base);
+       if (err)
+               return err;
+
+       sd_ctrl_write16(base, CTL_SD_XFER_LEN, TMIO_BBS);
+       for (i = 0; i < TMIO_BBS / sizeof(*buf); i++)
+               *buf++ = sd_ctrl_read16(base, RESP_CMD12);
+
+       err = sdhi_boot_wait_resp_end(base);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+int sdhi_boot_do_read(void __iomem *base, int high_capacity,
+                     unsigned long offset, unsigned short count,
+                     unsigned short *buf)
+{
+       unsigned long i;
+       int err = 0;
+
+       for (i = 0; i < count; i++) {
+               err = sdhi_boot_do_read_single(base, high_capacity, offset + i,
+                                              buf + (i * TMIO_BBS /
+                                                     sizeof(*buf)));
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+#define VOLTAGES (MMC_VDD_32_33 | MMC_VDD_33_34)
+
+int sdhi_boot_init(void __iomem *base)
+{
+       bool sd_v2 = false, sd_v1_0 = false;
+       unsigned short cid;
+       int err, high_capacity = 0;
+
+       sdhi_boot_mmc_clk_stop(base);
+       sdhi_boot_reset(base);
+
+       /* mmc0: clock 400000Hz busmode 1 powermode 2 cs 0 Vdd 21 width 0 timing 0 */
+       {
+               struct mmc_ios ios;
+               ios.power_mode = MMC_POWER_ON;
+               ios.bus_width = MMC_BUS_WIDTH_1;
+               ios.clock = CLK_MMC_INIT;
+               err = sdhi_boot_mmc_set_ios(base, &ios);
+               if (err)
+                       return err;
+       }
+
+       /* CMD0 */
+       {
+               struct mmc_command cmd;
+               msleep(1);
+               cmd.opcode = MMC_GO_IDLE_STATE;
+               cmd.arg = 0;
+               cmd.flags = MMC_RSP_NONE;
+               err = sdhi_boot_request(base, &cmd);
+               if (err)
+                       return err;
+               msleep(2);
+       }
+
+       /* CMD8 - Test for SD version 2 */
+       {
+               struct mmc_command cmd;
+               cmd.opcode = SD_SEND_IF_COND;
+               cmd.arg = (VOLTAGES != 0) << 8 | 0xaa;
+               cmd.flags = MMC_RSP_R1;
+               err = sdhi_boot_request(base, &cmd); /* Ignore error */
+               if ((cmd.resp[0] & 0xff) == 0xaa)
+                       sd_v2 = true;
+       }
+
+       /* CMD55 - Get OCR (SD) */
+       {
+               int timeout = 1000;
+               struct mmc_command cmd;
+
+               cmd.arg = 0;
+
+               do {
+                       cmd.opcode = MMC_APP_CMD;
+                       cmd.flags = MMC_RSP_R1;
+                       cmd.arg = 0;
+                       err = sdhi_boot_request(base, &cmd);
+                       if (err)
+                               break;
+
+                       cmd.opcode = SD_APP_OP_COND;
+                       cmd.flags = MMC_RSP_R3;
+                       cmd.arg = (VOLTAGES & 0xff8000);
+                       if (sd_v2)
+                               cmd.arg |= OCR_HCS;
+                       cmd.arg |= OCR_FASTBOOT;
+                       err = sdhi_boot_request(base, &cmd);
+                       if (err)
+                               break;
+
+                       msleep(1);
+               } while((!(cmd.resp[0] & OCR_BUSY)) && --timeout);
+
+               if (!err && timeout) {
+                       if (!sd_v2)
+                               sd_v1_0 = true;
+                       high_capacity = (cmd.resp[0] & OCR_HCS) == OCR_HCS;
+               }
+       }
+
+       /* CMD1 - Get OCR (MMC) */
+       if (!sd_v2 && !sd_v1_0) {
+               int timeout = 1000;
+               struct mmc_command cmd;
+
+               do {
+                       cmd.opcode = MMC_SEND_OP_COND;
+                       cmd.arg = VOLTAGES | OCR_HCS;
+                       cmd.flags = MMC_RSP_R3;
+                       err = sdhi_boot_request(base, &cmd);
+                       if (err)
+                               return err;
+
+                       msleep(1);
+               } while((!(cmd.resp[0] & OCR_BUSY)) && --timeout);
+
+               if (!timeout)
+                       return -EAGAIN;
+
+               high_capacity = (cmd.resp[0] & OCR_HCS) == OCR_HCS;
+       }
+
+       /* CMD2 - Get CID */
+       {
+               struct mmc_command cmd;
+               cmd.opcode = MMC_ALL_SEND_CID;
+               cmd.arg = 0;
+               cmd.flags = MMC_RSP_R2;
+               err = sdhi_boot_request(base, &cmd);
+               if (err)
+                       return err;
+       }
+
+       /* CMD3
+        * MMC: Set the relative address
+        * SD:  Get the relative address
+        * Also puts the card into the standby state
+        */
+       {
+               struct mmc_command cmd;
+               cmd.opcode = MMC_SET_RELATIVE_ADDR;
+               cmd.arg = 0;
+               cmd.flags = MMC_RSP_R1;
+               err = sdhi_boot_request(base, &cmd);
+               if (err)
+                       return err;
+               cid = cmd.resp[0] >> 16;
+       }
+
+       /* CMD9 - Get CSD */
+       {
+               struct mmc_command cmd;
+               cmd.opcode = MMC_SEND_CSD;
+               cmd.arg = cid << 16;
+               cmd.flags = MMC_RSP_R2;
+               err = sdhi_boot_request(base, &cmd);
+               if (err)
+                       return err;
+       }
+
+       /* CMD7 - Select the card */
+       {
+               struct mmc_command cmd;
+               cmd.opcode = MMC_SELECT_CARD;
+               //cmd.arg = rca << 16;
+               cmd.arg = cid << 16;
+               //cmd.flags = MMC_RSP_R1B;
+               cmd.flags = MMC_RSP_R1;
+               err = sdhi_boot_request(base, &cmd);
+               if (err)
+                       return err;
+       }
+
+       /* CMD16 - Set the block size */
+       {
+               struct mmc_command cmd;
+               cmd.opcode = MMC_SET_BLOCKLEN;
+               cmd.arg = TMIO_BBS;
+               cmd.flags = MMC_RSP_R1;
+               err = sdhi_boot_request(base, &cmd);
+               if (err)
+                       return err;
+       }
+
+       return high_capacity;
+}
diff --git a/arch/arm/boot/compressed/sdhi-shmobile.h b/arch/arm/boot/compressed/sdhi-shmobile.h
new file mode 100644 (file)
index 0000000..92eaa09
--- /dev/null
@@ -0,0 +1,11 @@
+#ifndef SDHI_MOBILE_H
+#define SDHI_MOBILE_H
+
+#include <linux/compiler.h>
+
+int sdhi_boot_do_read(void __iomem *base, int high_capacity,
+                     unsigned long offset, unsigned short count,
+                     unsigned short *buf);
+int sdhi_boot_init(void __iomem *base);
+
+#endif
index ea80abe788449444a685339c01a128e04250ceb8..4e728834a1b9dea2d0d6a41076d9a3d572424321 100644 (file)
@@ -33,20 +33,24 @@ SECTIONS
     *(.text.*)
     *(.fixup)
     *(.gnu.warning)
+    *(.glue_7t)
+    *(.glue_7)
+  }
+  .rodata : {
     *(.rodata)
     *(.rodata.*)
-    *(.glue_7)
-    *(.glue_7t)
+  }
+  .piggydata : {
     *(.piggydata)
-    . = ALIGN(4);
   }
 
+  . = ALIGN(4);
   _etext = .;
 
+  .got.plt             : { *(.got.plt) }
   _got_start = .;
   .got                 : { *(.got) }
   _got_end = .;
-  .got.plt             : { *(.got.plt) }
   _edata = .;
 
   . = BSS_START;
index e5681636626f5fab71e38ac758a4409b2a127247..595ecd290ebf3462b974da4fcf73b69ed2e3682b 100644 (file)
@@ -79,6 +79,8 @@ struct dmabounce_device_info {
        struct dmabounce_pool   large;
 
        rwlock_t lock;
+
+       int (*needs_bounce)(struct device *, dma_addr_t, size_t);
 };
 
 #ifdef STATS
@@ -210,114 +212,91 @@ static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
        if (!dev || !dev->archdata.dmabounce)
                return NULL;
        if (dma_mapping_error(dev, dma_addr)) {
-               if (dev)
-                       dev_err(dev, "Trying to %s invalid mapping\n", where);
-               else
-                       pr_err("unknown device: Trying to %s invalid mapping\n", where);
+               dev_err(dev, "Trying to %s invalid mapping\n", where);
                return NULL;
        }
        return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
 }
 
-static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
-               enum dma_data_direction dir)
+static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
 {
-       struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
-       dma_addr_t dma_addr;
-       int needs_bounce = 0;
-
-       if (device_info)
-               DO_STATS ( device_info->map_op_count++ );
-
-       dma_addr = virt_to_dma(dev, ptr);
+       if (!dev || !dev->archdata.dmabounce)
+               return 0;
 
        if (dev->dma_mask) {
-               unsigned long mask = *dev->dma_mask;
-               unsigned long limit;
+               unsigned long limit, mask = *dev->dma_mask;
 
                limit = (mask + 1) & ~mask;
                if (limit && size > limit) {
                        dev_err(dev, "DMA mapping too big (requested %#x "
                                "mask %#Lx)\n", size, *dev->dma_mask);
-                       return ~0;
+                       return -E2BIG;
                }
 
-               /*
-                * Figure out if we need to bounce from the DMA mask.
-                */
-               needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
+               /* Figure out if we need to bounce from the DMA mask. */
+               if ((dma_addr | (dma_addr + size - 1)) & ~mask)
+                       return 1;
        }
 
-       if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
-               struct safe_buffer *buf;
+       return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
+}
 
-               buf = alloc_safe_buffer(device_info, ptr, size, dir);
-               if (buf == 0) {
-                       dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
-                              __func__, ptr);
-                       return 0;
-               }
+static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
+               enum dma_data_direction dir)
+{
+       struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
+       struct safe_buffer *buf;
 
-               dev_dbg(dev,
-                       "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
-                       __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
-                       buf->safe, buf->safe_dma_addr);
+       if (device_info)
+               DO_STATS ( device_info->map_op_count++ );
 
-               if ((dir == DMA_TO_DEVICE) ||
-                   (dir == DMA_BIDIRECTIONAL)) {
-                       dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
-                               __func__, ptr, buf->safe, size);
-                       memcpy(buf->safe, ptr, size);
-               }
-               ptr = buf->safe;
+       buf = alloc_safe_buffer(device_info, ptr, size, dir);
+       if (buf == NULL) {
+               dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
+                      __func__, ptr);
+               return ~0;
+       }
 
-               dma_addr = buf->safe_dma_addr;
-       } else {
-               /*
-                * We don't need to sync the DMA buffer since
-                * it was allocated via the coherent allocators.
-                */
-               __dma_single_cpu_to_dev(ptr, size, dir);
+       dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+               __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+               buf->safe, buf->safe_dma_addr);
+
+       if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
+               dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
+                       __func__, ptr, buf->safe, size);
+               memcpy(buf->safe, ptr, size);
        }
 
-       return dma_addr;
+       return buf->safe_dma_addr;
 }
 
-static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
+static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
                size_t size, enum dma_data_direction dir)
 {
-       struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");
-
-       if (buf) {
-               BUG_ON(buf->size != size);
-               BUG_ON(buf->direction != dir);
+       BUG_ON(buf->size != size);
+       BUG_ON(buf->direction != dir);
 
-               dev_dbg(dev,
-                       "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
-                       __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
-                       buf->safe, buf->safe_dma_addr);
+       dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+               __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+               buf->safe, buf->safe_dma_addr);
 
-               DO_STATS(dev->archdata.dmabounce->bounce_count++);
+       DO_STATS(dev->archdata.dmabounce->bounce_count++);
 
-               if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
-                       void *ptr = buf->ptr;
+       if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
+               void *ptr = buf->ptr;
 
-                       dev_dbg(dev,
-                               "%s: copy back safe %p to unsafe %p size %d\n",
-                               __func__, buf->safe, ptr, size);
-                       memcpy(ptr, buf->safe, size);
+               dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
+                       __func__, buf->safe, ptr, size);
+               memcpy(ptr, buf->safe, size);
 
-                       /*
-                        * Since we may have written to a page cache page,
-                        * we need to ensure that the data will be coherent
-                        * with user mappings.
-                        */
-                       __cpuc_flush_dcache_area(ptr, size);
-               }
-               free_safe_buffer(dev->archdata.dmabounce, buf);
-       } else {
-               __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
+               /*
+                * Since we may have written to a page cache page,
+                * we need to ensure that the data will be coherent
+                * with user mappings.
+                */
+               __cpuc_flush_dcache_area(ptr, size);
        }
+       free_safe_buffer(dev->archdata.dmabounce, buf);
 }
 
 /* ************************************************** */
@@ -328,45 +307,28 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
  * substitute the safe buffer for the unsafe one.
  * (basically move the buffer from an unsafe area to a safe one)
  */
-dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size,
-               enum dma_data_direction dir)
-{
-       dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
-               __func__, ptr, size, dir);
-
-       BUG_ON(!valid_dma_direction(dir));
-
-       return map_single(dev, ptr, size, dir);
-}
-EXPORT_SYMBOL(__dma_map_single);
-
-/*
- * see if a mapped address was really a "safe" buffer and if so, copy
- * the data from the safe buffer back to the unsafe buffer and free up
- * the safe buffer.  (basically return things back to the way they
- * should be)
- */
-void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-               enum dma_data_direction dir)
-{
-       dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
-               __func__, (void *) dma_addr, size, dir);
-
-       unmap_single(dev, dma_addr, size, dir);
-}
-EXPORT_SYMBOL(__dma_unmap_single);
-
 dma_addr_t __dma_map_page(struct device *dev, struct page *page,
                unsigned long offset, size_t size, enum dma_data_direction dir)
 {
+       dma_addr_t dma_addr;
+       int ret;
+
        dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
                __func__, page, offset, size, dir);
 
-       BUG_ON(!valid_dma_direction(dir));
+       dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
+
+       ret = needs_bounce(dev, dma_addr, size);
+       if (ret < 0)
+               return ~0;
+
+       if (ret == 0) {
+               __dma_page_cpu_to_dev(page, offset, size, dir);
+               return dma_addr;
+       }
 
        if (PageHighMem(page)) {
-               dev_err(dev, "DMA buffer bouncing of HIGHMEM pages "
-                            "is not supported\n");
+               dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
                return ~0;
        }
 
@@ -383,10 +345,19 @@ EXPORT_SYMBOL(__dma_map_page);
 void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
                enum dma_data_direction dir)
 {
-       dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
-               __func__, (void *) dma_addr, size, dir);
+       struct safe_buffer *buf;
+
+       dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
+               __func__, dma_addr, size, dir);
+
+       buf = find_safe_buffer_dev(dev, dma_addr, __func__);
+       if (!buf) {
+               __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
+                       dma_addr & ~PAGE_MASK, size, dir);
+               return;
+       }
 
-       unmap_single(dev, dma_addr, size, dir);
+       unmap_single(dev, buf, size, dir);
 }
 EXPORT_SYMBOL(__dma_unmap_page);
 
@@ -461,7 +432,8 @@ static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
 }
 
 int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
-               unsigned long large_buffer_size)
+               unsigned long large_buffer_size,
+               int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
 {
        struct dmabounce_device_info *device_info;
        int ret;
@@ -497,6 +469,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
        device_info->dev = dev;
        INIT_LIST_HEAD(&device_info->safe_buffers);
        rwlock_init(&device_info->lock);
+       device_info->needs_bounce = needs_bounce_fn;
 
 #ifdef STATS
        device_info->total_allocs = 0;
index 4ddd0a6ac7ff3d9db4f55b165c172ec144e90a51..7bdd91766d65fd2d6d777e19a8838dfb5b361f61 100644 (file)
@@ -179,22 +179,21 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
 {
        void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
        unsigned int shift = (d->irq % 4) * 8;
-       unsigned int cpu = cpumask_first(mask_val);
+       unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
        u32 val, mask, bit;
 
-       if (cpu >= 8)
+       if (cpu >= 8 || cpu >= nr_cpu_ids)
                return -EINVAL;
 
        mask = 0xff << shift;
        bit = 1 << (cpu + shift);
 
        spin_lock(&irq_controller_lock);
-       d->node = cpu;
        val = readl_relaxed(reg) & ~mask;
        writel_relaxed(val | bit, reg);
        spin_unlock(&irq_controller_lock);
 
-       return 0;
+       return IRQ_SET_MASK_OK;
 }
 #endif
 
index 7a21927c52e184024b71fa48bdfbb380d8b55f55..14ad62e16dd1f1de122ceb83e5b16a96d69cf1b6 100644 (file)
@@ -243,6 +243,12 @@ static struct resource it8152_mem = {
  * ITE8152 chip can address up to 64MByte, so all the devices
  * connected to ITE8152 (PCI and USB) should have limited DMA window
  */
+static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+       dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
+               __func__, dma_addr, size);
+       return (dma_addr + size - PHYS_OFFSET) >= SZ_64M;
+}
 
 /*
  * Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all
@@ -254,7 +260,7 @@ static int it8152_pci_platform_notify(struct device *dev)
                if (dev->dma_mask)
                        *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
                dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
-               dmabounce_register_dev(dev, 2048, 4096);
+               dmabounce_register_dev(dev, 2048, 4096, it8152_needs_bounce);
        }
        return 0;
 }
@@ -267,14 +273,6 @@ static int it8152_pci_platform_notify_remove(struct device *dev)
        return 0;
 }
 
-int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
-{
-       dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
-               __func__, dma_addr, size);
-       return (dev->bus == &pci_bus_type) &&
-               ((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
-}
-
 int dma_set_coherent_mask(struct device *dev, u64 mask)
 {
        if (mask >= PHYS_OFFSET + SZ_64M - 1)
index 9c49a46a2b7a5c24b6c85ed6b45fb6fa7e5d3008..0569de6acfba66752b53bf9dd34ba213b47e5d11 100644 (file)
@@ -579,7 +579,36 @@ sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac,
 
        sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2];
 }
+#endif
 
+#ifdef CONFIG_DMABOUNCE
+/*
+ * According to the "Intel StrongARM SA-1111 Microprocessor Companion
+ * Chip Specification Update" (June 2000), erratum #7, there is a
+ * significant bug in the SA1111 SDRAM shared memory controller.  If
+ * an access to a region of memory above 1MB relative to the bank base,
+ * it is important that address bit 10 _NOT_ be asserted. Depending
+ * on the configuration of the RAM, bit 10 may correspond to one
+ * of several different (processor-relative) address bits.
+ *
+ * This routine only identifies whether or not a given DMA address
+ * is susceptible to the bug.
+ *
+ * This should only get called for sa1111_device types due to the
+ * way we configure our device dma_masks.
+ */
+static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
+{
+       /*
+        * Section 4.6 of the "Intel StrongARM SA-1111 Development Module
+        * User's Guide" mentions that jumpers R51 and R52 control the
+        * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
+        * SDRAM bank 1 on Neponset). The default configuration selects
+        * Assabet, so any address in bank 1 is necessarily invalid.
+        */
+       return (machine_is_assabet() || machine_is_pfs168()) &&
+               (addr >= 0xc8000000 || (addr + size) >= 0xc8000000);
+}
 #endif
 
 static void sa1111_dev_release(struct device *_dev)
@@ -644,7 +673,8 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
                dev->dev.dma_mask = &dev->dma_mask;
 
                if (dev->dma_mask != 0xffffffffUL) {
-                       ret = dmabounce_register_dev(&dev->dev, 1024, 4096);
+                       ret = dmabounce_register_dev(&dev->dev, 1024, 4096,
+                                       sa1111_needs_bounce);
                        if (ret) {
                                dev_err(&dev->dev, "SA1111: Failed to register"
                                        " with dmabounce\n");
@@ -818,34 +848,6 @@ static void __sa1111_remove(struct sa1111 *sachip)
        kfree(sachip);
 }
 
-/*
- * According to the "Intel StrongARM SA-1111 Microprocessor Companion
- * Chip Specification Update" (June 2000), erratum #7, there is a
- * significant bug in the SA1111 SDRAM shared memory controller.  If
- * an access to a region of memory above 1MB relative to the bank base,
- * it is important that address bit 10 _NOT_ be asserted. Depending
- * on the configuration of the RAM, bit 10 may correspond to one
- * of several different (processor-relative) address bits.
- *
- * This routine only identifies whether or not a given DMA address
- * is susceptible to the bug.
- *
- * This should only get called for sa1111_device types due to the
- * way we configure our device dma_masks.
- */
-int dma_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
-{
-       /*
-        * Section 4.6 of the "Intel StrongARM SA-1111 Development Module
-        * User's Guide" mentions that jumpers R51 and R52 control the
-        * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
-        * SDRAM bank 1 on Neponset). The default configuration selects
-        * Assabet, so any address in bank 1 is necessarily invalid.
-        */
-       return ((machine_is_assabet() || machine_is_pfs168()) &&
-               (addr >= 0xc8000000 || (addr + size) >= 0xc8000000));
-}
-
 struct sa1111_save_data {
        unsigned int    skcr;
        unsigned int    skpcr;
index bc2d2d75f7068998bb69a92b69c1216ec4dbd8fe..65c3f2474f5e3333142952b4dc7a1f445bcaedb4 100644 (file)
@@ -13,6 +13,9 @@
  *  Do not include any C declarations in this file - it is included by
  *  assembler source.
  */
+#ifndef __ASM_ASSEMBLER_H__
+#define __ASM_ASSEMBLER_H__
+
 #ifndef __ASSEMBLY__
 #error "Only include this from assembly code"
 #endif
        .macro  ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
        usracc  ldr, \reg, \ptr, \inc, \cond, \rept, \abort
        .endm
+#endif /* __ASM_ASSEMBLER_H__ */
index b4892a06442cea0a6b80ed1a99e465d0126db2d0..f4280593dfa3a61b9675bfbb35fd2a2b61eb2ed2 100644 (file)
@@ -26,8 +26,8 @@
 #include <linux/compiler.h>
 #include <asm/system.h>
 
-#define smp_mb__before_clear_bit()     mb()
-#define smp_mb__after_clear_bit()      mb()
+#define smp_mb__before_clear_bit()     smp_mb()
+#define smp_mb__after_clear_bit()      smp_mb()
 
 /*
  * These functions are the basis of our bit ops.
index 4fff837363edd93249a0884d11f0fabc2ec6db30..7a21d0bf7134d2ec16b8afeb87ea1c184ee2f717 100644 (file)
@@ -115,39 +115,8 @@ static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
                ___dma_page_dev_to_cpu(page, off, size, dir);
 }
 
-/*
- * Return whether the given device DMA address mask can be supported
- * properly.  For example, if your device can only drive the low 24-bits
- * during bus mastering, then you would pass 0x00ffffff as the mask
- * to this function.
- *
- * FIXME: This should really be a platform specific issue - we should
- * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
- */
-static inline int dma_supported(struct device *dev, u64 mask)
-{
-       if (mask < ISA_DMA_THRESHOLD)
-               return 0;
-       return 1;
-}
-
-static inline int dma_set_mask(struct device *dev, u64 dma_mask)
-{
-#ifdef CONFIG_DMABOUNCE
-       if (dev->archdata.dmabounce) {
-               if (dma_mask >= ISA_DMA_THRESHOLD)
-                       return 0;
-               else
-                       return -EIO;
-       }
-#endif
-       if (!dev->dma_mask || !dma_supported(dev, dma_mask))
-               return -EIO;
-
-       *dev->dma_mask = dma_mask;
-
-       return 0;
-}
+extern int dma_supported(struct device *, u64);
+extern int dma_set_mask(struct device *, u64);
 
 /*
  * DMA errors are defined by all-bits-set in the DMA address.
@@ -256,14 +225,14 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
  * @dev: valid struct device pointer
  * @small_buf_size: size of buffers to use with small buffer pool
  * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
+ * @needs_bounce_fn: called to determine whether buffer needs bouncing
  *
  * This function should be called by low-level platform code to register
  * a device as requireing DMA buffer bouncing. The function will allocate
  * appropriate DMA pools for the device.
- *
  */
 extern int dmabounce_register_dev(struct device *, unsigned long,
-               unsigned long);
+               unsigned long, int (*)(struct device *, dma_addr_t, size_t));
 
 /**
  * dmabounce_unregister_dev
@@ -277,31 +246,9 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
  */
 extern void dmabounce_unregister_dev(struct device *);
 
-/**
- * dma_needs_bounce
- *
- * @dev: valid struct device pointer
- * @dma_handle: dma_handle of unbounced buffer
- * @size: size of region being mapped
- *
- * Platforms that utilize the dmabounce mechanism must implement
- * this function.
- *
- * The dmabounce routines call this function whenever a dma-mapping
- * is requested to determine whether a given buffer needs to be bounced
- * or not. The function must return 0 if the buffer is OK for
- * DMA access and 1 if the buffer needs to be bounced.
- *
- */
-extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
-
 /*
  * The DMA API, implemented by dmabounce.c.  See below for descriptions.
  */
-extern dma_addr_t __dma_map_single(struct device *, void *, size_t,
-               enum dma_data_direction);
-extern void __dma_unmap_single(struct device *, dma_addr_t, size_t,
-               enum dma_data_direction);
 extern dma_addr_t __dma_map_page(struct device *, struct page *,
                unsigned long, size_t, enum dma_data_direction);
 extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
@@ -328,13 +275,6 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
 }
 
 
-static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr,
-               size_t size, enum dma_data_direction dir)
-{
-       __dma_single_cpu_to_dev(cpu_addr, size, dir);
-       return virt_to_dma(dev, cpu_addr);
-}
-
 static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
             unsigned long offset, size_t size, enum dma_data_direction dir)
 {
@@ -342,12 +282,6 @@ static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
        return pfn_to_dma(dev, page_to_pfn(page)) + offset;
 }
 
-static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle,
-               size_t size, enum dma_data_direction dir)
-{
-       __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
-}
-
 static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
                size_t size, enum dma_data_direction dir)
 {
@@ -373,14 +307,18 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
 static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
                size_t size, enum dma_data_direction dir)
 {
+       unsigned long offset;
+       struct page *page;
        dma_addr_t addr;
 
+       BUG_ON(!virt_addr_valid(cpu_addr));
+       BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
        BUG_ON(!valid_dma_direction(dir));
 
-       addr = __dma_map_single(dev, cpu_addr, size, dir);
-       debug_dma_map_page(dev, virt_to_page(cpu_addr),
-                       (unsigned long)cpu_addr & ~PAGE_MASK, size,
-                       dir, addr, true);
+       page = virt_to_page(cpu_addr);
+       offset = (unsigned long)cpu_addr & ~PAGE_MASK;
+       addr = __dma_map_page(dev, page, offset, size, dir);
+       debug_dma_map_page(dev, page, offset, size, dir, addr, true);
 
        return addr;
 }
@@ -430,7 +368,7 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
                size_t size, enum dma_data_direction dir)
 {
        debug_dma_unmap_page(dev, handle, size, dir, true);
-       __dma_unmap_single(dev, handle, size, dir);
+       __dma_unmap_page(dev, handle, size, dir);
 }
 
 /**
index ec0bbf79c71fd30d6c523773a4baf6bafffe5c6f..2f1e2098dfe778211e9209ea0285e089020e5745 100644 (file)
@@ -1,9 +1,11 @@
+#include <asm/assembler.h>
+
 /*
  * Interrupt handling.  Preserves r7, r8, r9
  */
        .macro  arch_irq_handler_default
-       get_irqnr_preamble r5, lr
-1:     get_irqnr_and_base r0, r6, r5, lr
+       get_irqnr_preamble r6, lr
+1:     get_irqnr_and_base r0, r2, r6, lr
        movne   r1, sp
        @
        @ routine called with r0 = irq number, r1 = struct pt_regs *
        /*
         * XXX
         *
-        * this macro assumes that irqstat (r6) and base (r5) are
+        * this macro assumes that irqstat (r2) and base (r6) are
         * preserved from get_irqnr_and_base above
         */
-       ALT_SMP(test_for_ipi r0, r6, r5, lr)
+       ALT_SMP(test_for_ipi r0, r2, r6, lr)
        ALT_UP_B(9997f)
        movne   r1, sp
        adrne   lr, BSYM(1b)
        bne     do_IPI
 
 #ifdef CONFIG_LOCAL_TIMERS
-       test_for_ltirq r0, r6, r5, lr
+       test_for_ltirq r0, r2, r6, lr
        movne   r0, sp
        adrne   lr, BSYM(1b)
        bne     do_local_timer
@@ -38,7 +40,7 @@
        .align  5
        .global \symbol_name
 \symbol_name:
-       mov     r4, lr
+       mov     r8, lr
        arch_irq_handler_default
-       mov     pc, r4
+       mov     pc, r8
        .endm
index af44a8fb34809fda134b10e28006d76fb5c3132b..b8de516e600e855e4ece64e3a654a1d451626603 100644 (file)
@@ -203,18 +203,6 @@ static inline unsigned long __phys_to_virt(unsigned long x)
 #define PHYS_OFFSET    PLAT_PHYS_OFFSET
 #endif
 
-/*
- * The DMA mask corresponding to the maximum bus address allocatable
- * using GFP_DMA.  The default here places no restriction on DMA
- * allocations.  This must be the smallest DMA mask in the system,
- * so a successful GFP_DMA allocation will always satisfy this.
- */
-#ifndef ARM_DMA_ZONE_SIZE
-#define ISA_DMA_THRESHOLD      (0xffffffffULL)
-#else
-#define ISA_DMA_THRESHOLD      (PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1)
-#endif
-
 /*
  * PFNs are used to describe any physical page; this means
  * PFN 0 == physical address 0.
index 7544ce6b481ac4ba4a369e0b2445cbc638a15078..67c70a31a1be33c34ecdd271c343767342a83dfe 100644 (file)
@@ -52,7 +52,7 @@ reserve_pmu(enum arm_pmu_type device);
  * a cookie.
  */
 extern int
-release_pmu(struct platform_device *pdev);
+release_pmu(enum arm_pmu_type type);
 
 /**
  * init_pmu() - Initialise the PMU.
index 8ec535e11fd73c81ebdc12500c1ad24d7a9add35..633d1cb84d87cbe356496b863e24f85a5f0402ea 100644 (file)
@@ -82,13 +82,13 @@ extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
 extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
 extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
 #else
-#define cpu_proc_init()                        processor._proc_init()
-#define cpu_proc_fin()                 processor._proc_fin()
-#define cpu_reset(addr)                        processor.reset(addr)
-#define cpu_do_idle()                  processor._do_idle()
-#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz)
-#define cpu_set_pte_ext(ptep,pte,ext)  processor.set_pte_ext(ptep,pte,ext)
-#define cpu_do_switch_mm(pgd,mm)       processor.switch_mm(pgd,mm)
+#define cpu_proc_init                  processor._proc_init
+#define cpu_proc_fin                   processor._proc_fin
+#define cpu_reset                      processor.reset
+#define cpu_do_idle                    processor._do_idle
+#define cpu_dcache_clean_area          processor.dcache_clean_area
+#define cpu_set_pte_ext                        processor.set_pte_ext
+#define cpu_do_switch_mm               processor.switch_mm
 #endif
 
 extern void cpu_resume(void);
index 2f87870d93471d79d9913da8fadfcb9f1b3efe67..cefdb8f898a15fe48482b1d47636933a434ca222 100644 (file)
@@ -1,6 +1,10 @@
 #ifndef _ASMARM_SCATTERLIST_H
 #define _ASMARM_SCATTERLIST_H
 
+#ifdef CONFIG_ARM_HAS_SG_CHAIN
+#define ARCH_HAS_SG_CHAIN
+#endif
+
 #include <asm/memory.h>
 #include <asm/types.h>
 #include <asm-generic/scatterlist.h>
index ee2ad8ae07af7d4006d82a474cb2282654cb986a..915696dd9c7c32d4d9702d54c933d50bbc4ca1e0 100644 (file)
@@ -187,12 +187,16 @@ struct tagtable {
 
 #define __tag __used __attribute__((__section__(".taglist.init")))
 #define __tagtable(tag, fn) \
-static struct tagtable __tagtable_##fn __tag = { tag, fn }
+static const struct tagtable __tagtable_##fn __tag = { tag, fn }
 
 /*
  * Memory map description
  */
-#define NR_BANKS 8
+#ifdef CONFIG_ARCH_EP93XX
+# define NR_BANKS 16
+#else
+# define NR_BANKS 8
+#endif
 
 struct membank {
        phys_addr_t start;
diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h
new file mode 100644 (file)
index 0000000..b0e4e1a
--- /dev/null
@@ -0,0 +1,22 @@
+#ifndef __ASM_ARM_SUSPEND_H
+#define __ASM_ARM_SUSPEND_H
+
+#include <asm/memory.h>
+#include <asm/tlbflush.h>
+
+extern void cpu_resume(void);
+
+/*
+ * Hide the first two arguments to __cpu_suspend - these are an implementation
+ * detail which platform code shouldn't have to know about.
+ */
+static inline int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+{
+       extern int __cpu_suspend(int, long, unsigned long,
+                                int (*)(unsigned long));
+       int ret = __cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, arg, fn);
+       flush_tlb_all();
+       return ret;
+}
+
+#endif
index 5929ef5d927abedfc26454d975449554ab49454f..8578d726ad78ffb4537e79c4d7b2b433e1d321b4 100644 (file)
@@ -27,5 +27,7 @@
 
 void *tcm_alloc(size_t len);
 void tcm_free(void *addr, size_t len);
+bool tcm_dtcm_present(void);
+bool tcm_itcm_present(void);
 
 #endif
index d2005de383b8c105cf85368a5a7539bdaae0410b..8077145698ffff09f802644021ae9980b8b226a2 100644 (file)
 #define TLB_V6_D_ASID  (1 << 17)
 #define TLB_V6_I_ASID  (1 << 18)
 
-#define TLB_BTB                (1 << 28)
-
 /* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
 #define TLB_V7_UIS_PAGE        (1 << 19)
 #define TLB_V7_UIS_FULL (1 << 20)
 #define TLB_V7_UIS_ASID (1 << 21)
 
-/* Inner Shareable BTB operation (ARMv7 MP extensions) */
-#define TLB_V7_IS_BTB  (1 << 22)
-
+#define TLB_BARRIER    (1 << 28)
 #define TLB_L2CLEAN_FR (1 << 29)               /* Feroceon */
 #define TLB_DCLEAN     (1 << 30)
 #define TLB_WB         (1 << 31)
@@ -58,7 +54,7 @@
  *       v4wb  - ARMv4 with write buffer without I TLB flush entry instruction
  *       v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
  *       fr    - Feroceon (v4wbi with non-outer-cacheable page table walks)
- *       fa    - Faraday (v4 with write buffer with UTLB and branch target buffer (BTB))
+ *       fa    - Faraday (v4 with write buffer with UTLB)
  *       v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
  *       v7wbi - identical to v6wbi
  */
@@ -99,7 +95,7 @@
 # define v4_always_flags       (-1UL)
 #endif
 
-#define fa_tlb_flags   (TLB_WB | TLB_BTB | TLB_DCLEAN | \
+#define fa_tlb_flags   (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
                         TLB_V4_U_FULL | TLB_V4_U_PAGE)
 
 #ifdef CONFIG_CPU_TLB_FA
 # define v4wb_always_flags     (-1UL)
 #endif
 
-#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
+#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
                         TLB_V6_I_FULL | TLB_V6_D_FULL | \
                         TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
                         TLB_V6_I_ASID | TLB_V6_D_ASID)
 # define v6wbi_always_flags    (-1UL)
 #endif
 
-#define v7wbi_tlb_flags_smp    (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
+#define v7wbi_tlb_flags_smp    (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
                         TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
-#define v7wbi_tlb_flags_up     (TLB_WB | TLB_DCLEAN | TLB_BTB | \
+#define v7wbi_tlb_flags_up     (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
                         TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
 
 #ifdef CONFIG_CPU_TLB_V7
@@ -341,15 +337,7 @@ static inline void local_flush_tlb_all(void)
        if (tlb_flag(TLB_V7_UIS_FULL))
                asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc");
 
-       if (tlb_flag(TLB_BTB)) {
-               /* flush the branch target cache */
-               asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
-               dsb();
-               isb();
-       }
-       if (tlb_flag(TLB_V7_IS_BTB)) {
-               /* flush the branch target cache */
-               asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+       if (tlb_flag(TLB_BARRIER)) {
                dsb();
                isb();
        }
@@ -389,17 +377,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
                asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc");
 #endif
 
-       if (tlb_flag(TLB_BTB)) {
-               /* flush the branch target cache */
-               asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
-               dsb();
-       }
-       if (tlb_flag(TLB_V7_IS_BTB)) {
-               /* flush the branch target cache */
-               asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+       if (tlb_flag(TLB_BARRIER))
                dsb();
-               isb();
-       }
 }
 
 static inline void
@@ -439,17 +418,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
                asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc");
 #endif
 
-       if (tlb_flag(TLB_BTB)) {
-               /* flush the branch target cache */
-               asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
-               dsb();
-       }
-       if (tlb_flag(TLB_V7_IS_BTB)) {
-               /* flush the branch target cache */
-               asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+       if (tlb_flag(TLB_BARRIER))
                dsb();
-               isb();
-       }
 }
 
 static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
@@ -482,15 +452,7 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
        if (tlb_flag(TLB_V7_UIS_PAGE))
                asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc");
 
-       if (tlb_flag(TLB_BTB)) {
-               /* flush the branch target cache */
-               asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
-               dsb();
-               isb();
-       }
-       if (tlb_flag(TLB_V7_IS_BTB)) {
-               /* flush the branch target cache */
-               asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+       if (tlb_flag(TLB_BARRIER)) {
                dsb();
                isb();
        }
index f90756dc16dc416f8a67bd9029abad857585788f..5b29a66736250e71d9a4ee24cfb9e0cd341ea14f 100644 (file)
@@ -3,6 +3,9 @@
 
 #include <linux/list.h>
 
+struct pt_regs;
+struct task_struct;
+
 struct undef_hook {
        struct list_head node;
        u32 instr_mask;
index 927522cfc12e3b547783175a40c19f98ef4d7b4a..16baba2e436961fa9f7128278368b8a5746182d8 100644 (file)
@@ -59,6 +59,9 @@ int main(void)
   DEFINE(TI_TP_VALUE,          offsetof(struct thread_info, tp_value));
   DEFINE(TI_FPSTATE,           offsetof(struct thread_info, fpstate));
   DEFINE(TI_VFPSTATE,          offsetof(struct thread_info, vfpstate));
+#ifdef CONFIG_SMP
+  DEFINE(VFP_CPU,              offsetof(union vfp_state, hard.cpu));
+#endif
 #ifdef CONFIG_ARM_THUMBEE
   DEFINE(TI_THUMBEE_STATE,     offsetof(struct thread_info, thumbee_state));
 #endif
index 90c62cd51ca9ffc891f7c95336b1c2017097d512..fa02a22a4c4b5868959a66adc6e94003f3e489f9 100644 (file)
 #include <asm/entry-macro-multi.S>
 
 /*
- * Interrupt handling.  Preserves r7, r8, r9
+ * Interrupt handling.
  */
        .macro  irq_handler
 #ifdef CONFIG_MULTI_IRQ_HANDLER
-       ldr     r5, =handle_arch_irq
+       ldr     r1, =handle_arch_irq
        mov     r0, sp
-       ldr     r5, [r5]
+       ldr     r1, [r1]
        adr     lr, BSYM(9997f)
-       teq     r5, #0
-       movne   pc, r5
+       teq     r1, #0
+       movne   pc, r1
 #endif
        arch_irq_handler_default
 9997:
        .endm
 
+       .macro  pabt_helper
+       @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
+#ifdef MULTI_PABORT
+       ldr     ip, .LCprocfns
+       mov     lr, pc
+       ldr     pc, [ip, #PROCESSOR_PABT_FUNC]
+#else
+       bl      CPU_PABORT_HANDLER
+#endif
+       .endm
+
+       .macro  dabt_helper
+
+       @
+       @ Call the processor-specific abort handler:
+       @
+       @  r2 - pt_regs
+       @  r4 - aborted context pc
+       @  r5 - aborted context psr
+       @
+       @ The abort handler must return the aborted address in r0, and
+       @ the fault status register in r1.  r9 must be preserved.
+       @
+#ifdef MULTI_DABORT
+       ldr     ip, .LCprocfns
+       mov     lr, pc
+       ldr     pc, [ip, #PROCESSOR_DABT_FUNC]
+#else
+       bl      CPU_DABORT_HANDLER
+#endif
+       .endm
+
 #ifdef CONFIG_KPROBES
        .section        .kprobes.text,"ax",%progbits
 #else
@@ -126,106 +158,74 @@ ENDPROC(__und_invalid)
  SPFIX(        subeq   sp, sp, #4      )
        stmia   sp, {r1 - r12}
 
-       ldmia   r0, {r1 - r3}
-       add     r5, sp, #S_SP - 4       @ here for interlock avoidance
-       mov     r4, #-1                 @  ""  ""      ""       ""
-       add     r0, sp, #(S_FRAME_SIZE + \stack_hole - 4)
- SPFIX(        addeq   r0, r0, #4      )
-       str     r1, [sp, #-4]!          @ save the "real" r0 copied
+       ldmia   r0, {r3 - r5}
+       add     r7, sp, #S_SP - 4       @ here for interlock avoidance
+       mov     r6, #-1                 @  ""  ""      ""       ""
+       add     r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+ SPFIX(        addeq   r2, r2, #4      )
+       str     r3, [sp, #-4]!          @ save the "real" r0 copied
                                        @ from the exception stack
 
-       mov     r1, lr
+       mov     r3, lr
 
        @
        @ We are now ready to fill in the remaining blanks on the stack:
        @
-       @  r0 - sp_svc
-       @  r1 - lr_svc
-       @  r2 - lr_<exception>, already fixed up for correct return/restart
-       @  r3 - spsr_<exception>
-       @  r4 - orig_r0 (see pt_regs definition in ptrace.h)
+       @  r2 - sp_svc
+       @  r3 - lr_svc
+       @  r4 - lr_<exception>, already fixed up for correct return/restart
+       @  r5 - spsr_<exception>
+       @  r6 - orig_r0 (see pt_regs definition in ptrace.h)
        @
-       stmia   r5, {r0 - r4}
+       stmia   r7, {r2 - r6}
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+       bl      trace_hardirqs_off
+#endif
        .endm
 
        .align  5
 __dabt_svc:
        svc_entry
-
-       @
-       @ get ready to re-enable interrupts if appropriate
-       @
-       mrs     r9, cpsr
-       tst     r3, #PSR_I_BIT
-       biceq   r9, r9, #PSR_I_BIT
-
-       @
-       @ Call the processor-specific abort handler:
-       @
-       @  r2 - aborted context pc
-       @  r3 - aborted context cpsr
-       @
-       @ The abort handler must return the aborted address in r0, and
-       @ the fault status register in r1.  r9 must be preserved.
-       @
-#ifdef MULTI_DABORT
-       ldr     r4, .LCprocfns
-       mov     lr, pc
-       ldr     pc, [r4, #PROCESSOR_DABT_FUNC]
-#else
-       bl      CPU_DABORT_HANDLER
-#endif
-
-       @
-       @ set desired IRQ state, then call main handler
-       @
-       debug_entry r1
-       msr     cpsr_c, r9
        mov     r2, sp
-       bl      do_DataAbort
+       dabt_helper
 
        @
        @ IRQs off again before pulling preserved data off the stack
        @
        disable_irq_notrace
 
-       @
-       @ restore SPSR and restart the instruction
-       @
-       ldr     r2, [sp, #S_PSR]
-       svc_exit r2                             @ return from exception
+#ifdef CONFIG_TRACE_IRQFLAGS
+       tst     r5, #PSR_I_BIT
+       bleq    trace_hardirqs_on
+       tst     r5, #PSR_I_BIT
+       blne    trace_hardirqs_off
+#endif
+       svc_exit r5                             @ return from exception
  UNWIND(.fnend         )
 ENDPROC(__dabt_svc)
 
        .align  5
 __irq_svc:
        svc_entry
+       irq_handler
 
-#ifdef CONFIG_TRACE_IRQFLAGS
-       bl      trace_hardirqs_off
-#endif
 #ifdef CONFIG_PREEMPT
        get_thread_info tsk
        ldr     r8, [tsk, #TI_PREEMPT]          @ get preempt count
-       add     r7, r8, #1                      @ increment it
-       str     r7, [tsk, #TI_PREEMPT]
-#endif
-
-       irq_handler
-#ifdef CONFIG_PREEMPT
-       str     r8, [tsk, #TI_PREEMPT]          @ restore preempt count
        ldr     r0, [tsk, #TI_FLAGS]            @ get flags
        teq     r8, #0                          @ if preempt count != 0
        movne   r0, #0                          @ force flags to 0
        tst     r0, #_TIF_NEED_RESCHED
        blne    svc_preempt
 #endif
-       ldr     r4, [sp, #S_PSR]                @ irqs are already disabled
+
 #ifdef CONFIG_TRACE_IRQFLAGS
-       tst     r4, #PSR_I_BIT
-       bleq    trace_hardirqs_on
+       @ The parent context IRQs must have been enabled to get here in
+       @ the first place, so there's no point checking the PSR I bit.
+       bl      trace_hardirqs_on
 #endif
-       svc_exit r4                             @ return from exception
+       svc_exit r5                             @ return from exception
  UNWIND(.fnend         )
 ENDPROC(__irq_svc)
 
@@ -251,7 +251,6 @@ __und_svc:
 #else
        svc_entry
 #endif
-
        @
        @ call emulation code, which returns using r9 if it has emulated
        @ the instruction, or the more conventional lr if we are to treat
@@ -260,15 +259,16 @@ __und_svc:
        @  r0 - instruction
        @
 #ifndef        CONFIG_THUMB2_KERNEL
-       ldr     r0, [r2, #-4]
+       ldr     r0, [r4, #-4]
 #else
-       ldrh    r0, [r2, #-2]                   @ Thumb instruction at LR - 2
+       ldrh    r0, [r4, #-2]                   @ Thumb instruction at LR - 2
        and     r9, r0, #0xf800
        cmp     r9, #0xe800                     @ 32-bit instruction if xx >= 0
-       ldrhhs  r9, [r2]                        @ bottom 16 bits
+       ldrhhs  r9, [r4]                        @ bottom 16 bits
        orrhs   r0, r9, r0, lsl #16
 #endif
        adr     r9, BSYM(1f)
+       mov     r2, r4
        bl      call_fpe
 
        mov     r0, sp                          @ struct pt_regs *regs
@@ -282,45 +282,35 @@ __und_svc:
        @
        @ restore SPSR and restart the instruction
        @
-       ldr     r2, [sp, #S_PSR]                @ Get SVC cpsr
-       svc_exit r2                             @ return from exception
+       ldr     r5, [sp, #S_PSR]                @ Get SVC cpsr
+#ifdef CONFIG_TRACE_IRQFLAGS
+       tst     r5, #PSR_I_BIT
+       bleq    trace_hardirqs_on
+       tst     r5, #PSR_I_BIT
+       blne    trace_hardirqs_off
+#endif
+       svc_exit r5                             @ return from exception
  UNWIND(.fnend         )
 ENDPROC(__und_svc)
 
        .align  5
 __pabt_svc:
        svc_entry
-
-       @
-       @ re-enable interrupts if appropriate
-       @
-       mrs     r9, cpsr
-       tst     r3, #PSR_I_BIT
-       biceq   r9, r9, #PSR_I_BIT
-
-       mov     r0, r2                  @ pass address of aborted instruction.
-#ifdef MULTI_PABORT
-       ldr     r4, .LCprocfns
-       mov     lr, pc
-       ldr     pc, [r4, #PROCESSOR_PABT_FUNC]
-#else
-       bl      CPU_PABORT_HANDLER
-#endif
-       debug_entry r1
-       msr     cpsr_c, r9                      @ Maybe enable interrupts
        mov     r2, sp                          @ regs
-       bl      do_PrefetchAbort                @ call abort handler
+       pabt_helper
 
        @
        @ IRQs off again before pulling preserved data off the stack
        @
        disable_irq_notrace
 
-       @
-       @ restore SPSR and restart the instruction
-       @
-       ldr     r2, [sp, #S_PSR]
-       svc_exit r2                             @ return from exception
+#ifdef CONFIG_TRACE_IRQFLAGS
+       tst     r5, #PSR_I_BIT
+       bleq    trace_hardirqs_on
+       tst     r5, #PSR_I_BIT
+       blne    trace_hardirqs_off
+#endif
+       svc_exit r5                             @ return from exception
  UNWIND(.fnend         )
 ENDPROC(__pabt_svc)
 
@@ -351,23 +341,23 @@ ENDPROC(__pabt_svc)
  ARM(  stmib   sp, {r1 - r12}  )
  THUMB(        stmia   sp, {r0 - r12}  )
 
-       ldmia   r0, {r1 - r3}
+       ldmia   r0, {r3 - r5}
        add     r0, sp, #S_PC           @ here for interlock avoidance
-       mov     r4, #-1                 @  ""  ""     ""        ""
+       mov     r6, #-1                 @  ""  ""     ""        ""
 
-       str     r1, [sp]                @ save the "real" r0 copied
+       str     r3, [sp]                @ save the "real" r0 copied
                                        @ from the exception stack
 
        @
        @ We are now ready to fill in the remaining blanks on the stack:
        @
-       @  r2 - lr_<exception>, already fixed up for correct return/restart
-       @  r3 - spsr_<exception>
-       @  r4 - orig_r0 (see pt_regs definition in ptrace.h)
+       @  r4 - lr_<exception>, already fixed up for correct return/restart
+       @  r5 - spsr_<exception>
+       @  r6 - orig_r0 (see pt_regs definition in ptrace.h)
        @
        @ Also, separately save sp_usr and lr_usr
        @
-       stmia   r0, {r2 - r4}
+       stmia   r0, {r4 - r6}
  ARM(  stmdb   r0, {sp, lr}^                   )
  THUMB(        store_user_sp_lr r0, r1, S_SP - S_PC    )
 
@@ -380,6 +370,10 @@ ENDPROC(__pabt_svc)
        @ Clear FP to mark the first stack frame
        @
        zero_fp
+
+#ifdef CONFIG_IRQSOFF_TRACER
+       bl      trace_hardirqs_off
+#endif
        .endm
 
        .macro  kuser_cmpxchg_check
@@ -391,7 +385,7 @@ ENDPROC(__pabt_svc)
        @ if it was interrupted in a critical region.  Here we
        @ perform a quick test inline since it should be false
        @ 99.9999% of the time.  The rest is done out of line.
-       cmp     r2, #TASK_SIZE
+       cmp     r4, #TASK_SIZE
        blhs    kuser_cmpxchg_fixup
 #endif
 #endif
@@ -401,32 +395,9 @@ ENDPROC(__pabt_svc)
 __dabt_usr:
        usr_entry
        kuser_cmpxchg_check
-
-       @
-       @ Call the processor-specific abort handler:
-       @
-       @  r2 - aborted context pc
-       @  r3 - aborted context cpsr
-       @
-       @ The abort handler must return the aborted address in r0, and
-       @ the fault status register in r1.
-       @
-#ifdef MULTI_DABORT
-       ldr     r4, .LCprocfns
-       mov     lr, pc
-       ldr     pc, [r4, #PROCESSOR_DABT_FUNC]
-#else
-       bl      CPU_DABORT_HANDLER
-#endif
-
-       @
-       @ IRQs on, then call the main handler
-       @
-       debug_entry r1
-       enable_irq
        mov     r2, sp
-       adr     lr, BSYM(ret_from_exception)
-       b       do_DataAbort
+       dabt_helper
+       b       ret_from_exception
  UNWIND(.fnend         )
 ENDPROC(__dabt_usr)
 
@@ -434,28 +405,8 @@ ENDPROC(__dabt_usr)
 __irq_usr:
        usr_entry
        kuser_cmpxchg_check
-
-#ifdef CONFIG_IRQSOFF_TRACER
-       bl      trace_hardirqs_off
-#endif
-
-       get_thread_info tsk
-#ifdef CONFIG_PREEMPT
-       ldr     r8, [tsk, #TI_PREEMPT]          @ get preempt count
-       add     r7, r8, #1                      @ increment it
-       str     r7, [tsk, #TI_PREEMPT]
-#endif
-
        irq_handler
-#ifdef CONFIG_PREEMPT
-       ldr     r0, [tsk, #TI_PREEMPT]
-       str     r8, [tsk, #TI_PREEMPT]
-       teq     r0, r7
- ARM(  strne   r0, [r0, -r0]   )
- THUMB(        movne   r0, #0          )
- THUMB(        strne   r0, [r0]        )
-#endif
-
+       get_thread_info tsk
        mov     why, #0
        b       ret_to_user_from_irq
  UNWIND(.fnend         )
@@ -467,6 +418,9 @@ ENDPROC(__irq_usr)
 __und_usr:
        usr_entry
 
+       mov     r2, r4
+       mov     r3, r5
+
        @
        @ fall through to the emulation code, which returns using r9 if
        @ it has emulated the instruction, or the more conventional lr
@@ -682,19 +636,8 @@ ENDPROC(__und_usr_unknown)
        .align  5
 __pabt_usr:
        usr_entry
-
-       mov     r0, r2                  @ pass address of aborted instruction.
-#ifdef MULTI_PABORT
-       ldr     r4, .LCprocfns
-       mov     lr, pc
-       ldr     pc, [r4, #PROCESSOR_PABT_FUNC]
-#else
-       bl      CPU_PABORT_HANDLER
-#endif
-       debug_entry r1
-       enable_irq                              @ Enable interrupts
        mov     r2, sp                          @ regs
-       bl      do_PrefetchAbort                @ call abort handler
+       pabt_helper
  UNWIND(.fnend         )
        /* fall through */
 /*
@@ -927,13 +870,13 @@ __kuser_cmpxchg:                          @ 0xffff0fc0
        .text
 kuser_cmpxchg_fixup:
        @ Called from kuser_cmpxchg_check macro.
-       @ r2 = address of interrupted insn (must be preserved).
+       @ r4 = address of interrupted insn (must be preserved).
        @ sp = saved regs. r7 and r8 are clobbered.
        @ 1b = first critical insn, 2b = last critical insn.
-       @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b.
+       @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
        mov     r7, #0xffff0fff
        sub     r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
-       subs    r8, r2, r7
+       subs    r8, r4, r7
        rsbcss  r8, r8, #(2b - 1b)
        strcs   r7, [sp, #S_PC]
        mov     pc, lr
index 051166c2a932cfed1620bb3a5612383ffff12149..4d6ad8348e892157fb6642e55a551f943d3eb338 100644 (file)
        .endm
 #endif /* !CONFIG_THUMB2_KERNEL */
 
-       @
-       @ Debug exceptions are taken as prefetch or data aborts.
-       @ We must disable preemption during the handler so that
-       @ we can access the debug registers safely.
-       @
-       .macro  debug_entry, fsr
-#if defined(CONFIG_HAVE_HW_BREAKPOINT) && defined(CONFIG_PREEMPT)
-       ldr     r4, =0x40f              @ mask out fsr.fs
-       and     r5, r4, \fsr
-       cmp     r5, #2                  @ debug exception
-       bne     1f
-       get_thread_info r10
-       ldr     r6, [r10, #TI_PREEMPT]  @ get preempt count
-       add     r11, r6, #1             @ increment it
-       str     r11, [r10, #TI_PREEMPT]
-1:
-#endif
-       .endm
-
 /*
  * These are the registers used in the syscall handler, and allow us to
  * have in theory up to 7 arguments to a function - r0 to r6.
index 6b1e0ad9ec3b7494d8b3474dca0a88256c836959..d46f25968bec2ce04143daa785de6a63cf4b635b 100644 (file)
  * numbers for r1.
  *
  */
+       .arm
+
        __HEAD
 ENTRY(stext)
+
+ THUMB(        adr     r9, BSYM(1f)    )       @ Kernel is always entered in ARM.
+ THUMB(        bx      r9              )       @ If this is a Thumb-2 kernel,
+ THUMB(        .thumb                  )       @ switch to Thumb now.
+ THUMB(1:                      )
+
        setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
                                                @ and irqs disabled
 #ifndef CONFIG_CPU_CP15
index 278c1b0ebb2ee340fcb0e4f4b010c80e535c9308..742b6108a00168b8ed8c391c6d8e8b1b9c438fc6 100644 (file)
  * crap here - that's what the boot loader (or in extreme, well justified
  * circumstances, zImage) is for.
  */
+       .arm
+
        __HEAD
 ENTRY(stext)
+
+ THUMB(        adr     r9, BSYM(1f)    )       @ Kernel is always entered in ARM.
+ THUMB(        bx      r9              )       @ If this is a Thumb-2 kernel,
+ THUMB(        .thumb                  )       @ switch to Thumb now.
+ THUMB(1:                      )
+
        setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
                                                @ and irqs disabled
        mrc     p15, 0, r9, c0, c0              @ get processor id
index 87acc25d7a3e203646f2ee71f1d7c711304d52f2..a927ca1f5566ce67055296f9a45f9e8714dcda51 100644 (file)
@@ -796,7 +796,7 @@ unlock:
 
 /*
  * Called from either the Data Abort Handler [watchpoint] or the
- * Prefetch Abort Handler [breakpoint] with preemption disabled.
+ * Prefetch Abort Handler [breakpoint] with interrupts disabled.
  */
 static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
                                 struct pt_regs *regs)
@@ -804,8 +804,10 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
        int ret = 0;
        u32 dscr;
 
-       /* We must be called with preemption disabled. */
-       WARN_ON(preemptible());
+       preempt_disable();
+
+       if (interrupts_enabled(regs))
+               local_irq_enable();
 
        /* We only handle watchpoints and hardware breakpoints. */
        ARM_DBG_READ(c1, 0, dscr);
@@ -824,10 +826,6 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
                ret = 1; /* Unhandled fault. */
        }
 
-       /*
-        * Re-enable preemption after it was disabled in the
-        * low-level exception handling code.
-        */
        preempt_enable();
 
        return ret;
index 83bbad03fcc6642f7ab5dc0bc1f8bce5a790eac9..0f928a131af83ab8c1fa4370bab691a7f092534d 100644 (file)
@@ -131,54 +131,63 @@ int __init arch_probe_nr_irqs(void)
 
 #ifdef CONFIG_HOTPLUG_CPU
 
-static bool migrate_one_irq(struct irq_data *d)
+static bool migrate_one_irq(struct irq_desc *desc)
 {
-       unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask);
+       struct irq_data *d = irq_desc_get_irq_data(desc);
+       const struct cpumask *affinity = d->affinity;
+       struct irq_chip *c;
        bool ret = false;
 
-       if (cpu >= nr_cpu_ids) {
-               cpu = cpumask_any(cpu_online_mask);
+       /*
+        * If this is a per-CPU interrupt, or the affinity does not
+        * include this CPU, then we have nothing to do.
+        */
+       if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
+               return false;
+
+       if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+               affinity = cpu_online_mask;
                ret = true;
        }
 
-       pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu);
-
-       d->chip->irq_set_affinity(d, cpumask_of(cpu), true);
+       c = irq_data_get_irq_chip(d);
+       if (c->irq_set_affinity)
+               c->irq_set_affinity(d, affinity, true);
+       else
+               pr_debug("IRQ%u: unable to set affinity\n", d->irq);
 
        return ret;
 }
 
 /*
- * The CPU has been marked offline.  Migrate IRQs off this CPU.  If
- * the affinity settings do not allow other CPUs, force them onto any
+ * The current CPU has been marked offline.  Migrate IRQs off this CPU.
+ * If the affinity settings do not allow other CPUs, force them onto any
  * available CPU.
+ *
+ * Note: we must iterate over all IRQs, whether they have an attached
+ * action structure or not, as we need to get chained interrupts too.
  */
 void migrate_irqs(void)
 {
-       unsigned int i, cpu = smp_processor_id();
+       unsigned int i;
        struct irq_desc *desc;
        unsigned long flags;
 
        local_irq_save(flags);
 
        for_each_irq_desc(i, desc) {
-               struct irq_data *d = &desc->irq_data;
                bool affinity_broken = false;
 
-               raw_spin_lock(&desc->lock);
-               do {
-                       if (desc->action == NULL)
-                               break;
-
-                       if (d->node != cpu)
-                               break;
+               if (!desc)
+                       continue;
 
-                       affinity_broken = migrate_one_irq(d);
-               } while (0);
+               raw_spin_lock(&desc->lock);
+               affinity_broken = migrate_one_irq(desc);
                raw_spin_unlock(&desc->lock);
 
                if (affinity_broken && printk_ratelimit())
-                       pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu);
+                       pr_warning("IRQ%u no longer affine to CPU%u\n", i,
+                               smp_processor_id());
        }
 
        local_irq_restore(flags);
index fee7c36349eb6c137e7c578946968e20caf35910..016d6a0830a3e202bc42df7c40108a0875916d4b 100644 (file)
@@ -193,8 +193,17 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
                                offset -= 0x02000000;
                        offset += sym->st_value - loc;
 
-                       /* only Thumb addresses allowed (no interworking) */
-                       if (!(offset & 1) ||
+                       /*
+                        * For function symbols, only Thumb addresses are
+                        * allowed (no interworking).
+                        *
+                        * For non-function symbols, the destination
+                        * has no specific ARM/Thumb disposition, so
+                        * the branch is resolved under the assumption
+                        * that interworking is not required.
+                        */
+                       if ((ELF32_ST_TYPE(sym->st_info) == STT_FUNC &&
+                               !(offset & 1)) ||
                            offset <= (s32)0xff000000 ||
                            offset >= (s32)0x01000000) {
                                pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
index d53c0abc4dd3aaa375595fcd4d54527f013b41f8..8d8507858e5c8431ced9f6a3bff385514cf1c1ad 100644 (file)
@@ -435,7 +435,7 @@ armpmu_reserve_hardware(void)
                        if (irq >= 0)
                                free_irq(irq, NULL);
                }
-               release_pmu(pmu_device);
+               release_pmu(ARM_PMU_DEVICE_CPU);
                pmu_device = NULL;
        }
 
@@ -454,7 +454,7 @@ armpmu_release_hardware(void)
        }
        armpmu->stop();
 
-       release_pmu(pmu_device);
+       release_pmu(ARM_PMU_DEVICE_CPU);
        pmu_device = NULL;
 }
 
@@ -583,7 +583,7 @@ static int armpmu_event_init(struct perf_event *event)
 static void armpmu_enable(struct pmu *pmu)
 {
        /* Enable all of the perf events on hardware. */
-       int idx;
+       int idx, enabled = 0;
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
        if (!armpmu)
@@ -596,9 +596,11 @@ static void armpmu_enable(struct pmu *pmu)
                        continue;
 
                armpmu->enable(&event->hw, idx);
+               enabled = 1;
        }
 
-       armpmu->start();
+       if (enabled)
+               armpmu->start();
 }
 
 static void armpmu_disable(struct pmu *pmu)
index 2c79eec192629b9b3e2a91914adcfbf3f8312723..2b70709376c3271e1007b3e6bd7d829db997375e 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 
 #include <asm/pmu.h>
@@ -25,36 +26,88 @@ static volatile long pmu_lock;
 
 static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES];
 
-static int __devinit pmu_device_probe(struct platform_device *pdev)
+static int __devinit pmu_register(struct platform_device *pdev,
+                                       enum arm_pmu_type type)
 {
-
-       if (pdev->id < 0 || pdev->id >= ARM_NUM_PMU_DEVICES) {
+       if (type < 0 || type >= ARM_NUM_PMU_DEVICES) {
                pr_warning("received registration request for unknown "
-                               "device %d\n", pdev->id);
+                               "device %d\n", type);
                return -EINVAL;
        }
 
-       if (pmu_devices[pdev->id])
-               pr_warning("registering new PMU device type %d overwrites "
-                               "previous registration!\n", pdev->id);
-       else
-               pr_info("registered new PMU device of type %d\n",
-                               pdev->id);
+       if (pmu_devices[type]) {
+               pr_warning("rejecting duplicate registration of PMU device "
+                       "type %d.", type);
+               return -ENOSPC;
+       }
 
-       pmu_devices[pdev->id] = pdev;
+       pr_info("registered new PMU device of type %d\n", type);
+       pmu_devices[type] = pdev;
        return 0;
 }
 
-static struct platform_driver pmu_driver = {
+#define OF_MATCH_PMU(_name, _type) {   \
+       .compatible = _name,            \
+       .data = (void *)_type,          \
+}
+
+#define OF_MATCH_CPU(name)     OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU)
+
+static struct of_device_id armpmu_of_device_ids[] = {
+       OF_MATCH_CPU("arm,cortex-a9-pmu"),
+       OF_MATCH_CPU("arm,cortex-a8-pmu"),
+       OF_MATCH_CPU("arm,arm1136-pmu"),
+       OF_MATCH_CPU("arm,arm1176-pmu"),
+       {},
+};
+
+#define PLAT_MATCH_PMU(_name, _type) { \
+       .name           = _name,        \
+       .driver_data    = _type,        \
+}
+
+#define PLAT_MATCH_CPU(_name)  PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU)
+
+static struct platform_device_id armpmu_plat_device_ids[] = {
+       PLAT_MATCH_CPU("arm-pmu"),
+       {},
+};
+
+enum arm_pmu_type armpmu_device_type(struct platform_device *pdev)
+{
+       const struct of_device_id       *of_id;
+       const struct platform_device_id *pdev_id;
+
+       /* provided by of_device_id table */
+       if (pdev->dev.of_node) {
+               of_id = of_match_device(armpmu_of_device_ids, &pdev->dev);
+               BUG_ON(!of_id);
+               return (enum arm_pmu_type)of_id->data;
+       }
+
+       /* Provided by platform_device_id table */
+       pdev_id = platform_get_device_id(pdev);
+       BUG_ON(!pdev_id);
+       return pdev_id->driver_data;
+}
+
+static int __devinit armpmu_device_probe(struct platform_device *pdev)
+{
+       return pmu_register(pdev, armpmu_device_type(pdev));
+}
+
+static struct platform_driver armpmu_driver = {
        .driver         = {
                .name   = "arm-pmu",
+               .of_match_table = armpmu_of_device_ids,
        },
-       .probe          = pmu_device_probe,
+       .probe          = armpmu_device_probe,
+       .id_table       = armpmu_plat_device_ids,
 };
 
 static int __init register_pmu_driver(void)
 {
-       return platform_driver_register(&pmu_driver);
+       return platform_driver_register(&armpmu_driver);
 }
 device_initcall(register_pmu_driver);
 
@@ -77,11 +130,11 @@ reserve_pmu(enum arm_pmu_type device)
 EXPORT_SYMBOL_GPL(reserve_pmu);
 
 int
-release_pmu(struct platform_device *pdev)
+release_pmu(enum arm_pmu_type device)
 {
-       if (WARN_ON(pdev != pmu_devices[pdev->id]))
+       if (WARN_ON(!pmu_devices[device]))
                return -EINVAL;
-       clear_bit_unlock(pdev->id, &pmu_lock);
+       clear_bit_unlock(device, &pmu_lock);
        return 0;
 }
 EXPORT_SYMBOL_GPL(release_pmu);
index ed11fb08b05a5b2d3fd022d981970c01c9fdc088..9c3278f37796f6184a9f5986bcfe98686fcba4e7 100644 (file)
@@ -73,6 +73,7 @@ __setup("fpe=", fpe_setup);
 #endif
 
 extern void paging_init(struct machine_desc *desc);
+extern void sanity_check_meminfo(void);
 extern void reboot_setup(char *str);
 
 unsigned int processor_id;
@@ -342,54 +343,6 @@ static void __init feat_v6_fixup(void)
                elf_hwcap &= ~HWCAP_TLS;
 }
 
-static void __init setup_processor(void)
-{
-       struct proc_info_list *list;
-
-       /*
-        * locate processor in the list of supported processor
-        * types.  The linker builds this table for us from the
-        * entries in arch/arm/mm/proc-*.S
-        */
-       list = lookup_processor_type(read_cpuid_id());
-       if (!list) {
-               printk("CPU configuration botched (ID %08x), unable "
-                      "to continue.\n", read_cpuid_id());
-               while (1);
-       }
-
-       cpu_name = list->cpu_name;
-
-#ifdef MULTI_CPU
-       processor = *list->proc;
-#endif
-#ifdef MULTI_TLB
-       cpu_tlb = *list->tlb;
-#endif
-#ifdef MULTI_USER
-       cpu_user = *list->user;
-#endif
-#ifdef MULTI_CACHE
-       cpu_cache = *list->cache;
-#endif
-
-       printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
-              cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
-              proc_arch[cpu_architecture()], cr_alignment);
-
-       sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
-       sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
-       elf_hwcap = list->elf_hwcap;
-#ifndef CONFIG_ARM_THUMB
-       elf_hwcap &= ~HWCAP_THUMB;
-#endif
-
-       feat_v6_fixup();
-
-       cacheid_init();
-       cpu_proc_init();
-}
-
 /*
  * cpu_init - initialise one CPU.
  *
@@ -405,6 +358,8 @@ void cpu_init(void)
                BUG();
        }
 
+       cpu_proc_init();
+
        /*
         * Define the placement constraint for the inline asm directive below.
         * In Thumb-2, msr with an immediate value is not allowed.
@@ -441,6 +396,54 @@ void cpu_init(void)
            : "r14");
 }
 
+static void __init setup_processor(void)
+{
+       struct proc_info_list *list;
+
+       /*
+        * locate processor in the list of supported processor
+        * types.  The linker builds this table for us from the
+        * entries in arch/arm/mm/proc-*.S
+        */
+       list = lookup_processor_type(read_cpuid_id());
+       if (!list) {
+               printk("CPU configuration botched (ID %08x), unable "
+                      "to continue.\n", read_cpuid_id());
+               while (1);
+       }
+
+       cpu_name = list->cpu_name;
+
+#ifdef MULTI_CPU
+       processor = *list->proc;
+#endif
+#ifdef MULTI_TLB
+       cpu_tlb = *list->tlb;
+#endif
+#ifdef MULTI_USER
+       cpu_user = *list->user;
+#endif
+#ifdef MULTI_CACHE
+       cpu_cache = *list->cache;
+#endif
+
+       printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
+              cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
+              proc_arch[cpu_architecture()], cr_alignment);
+
+       sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
+       sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
+       elf_hwcap = list->elf_hwcap;
+#ifndef CONFIG_ARM_THUMB
+       elf_hwcap &= ~HWCAP_THUMB;
+#endif
+
+       feat_v6_fixup();
+
+       cacheid_init();
+       cpu_init();
+}
+
 void __init dump_machine_table(void)
 {
        struct machine_desc *p;
@@ -900,6 +903,7 @@ void __init setup_arch(char **cmdline_p)
 
        parse_early_param();
 
+       sanity_check_meminfo();
        arm_memblock_init(&meminfo, mdesc);
 
        paging_init(mdesc);
@@ -913,7 +917,6 @@ void __init setup_arch(char **cmdline_p)
 #endif
        reserve_crashkernel();
 
-       cpu_init();
        tcm_init();
 
 #ifdef CONFIG_MULTI_IRQ_HANDLER
index 6398ead9d1c08da1774dba4568a04e22fe4125ae..dc902f2c68457b0f3277b8112a7a1e867106cde8 100644 (file)
 /*
  * Save CPU state for a suspend
  *  r1 = v:p offset
- *  r3 = virtual return function
- * Note: sp is decremented to allocate space for CPU state on stack
- * r0-r3,r9,r10,lr corrupted
+ *  r2 = suspend function arg0
+ *  r3 = suspend function
  */
-ENTRY(cpu_suspend)
-       mov     r9, lr
+ENTRY(__cpu_suspend)
+       stmfd   sp!, {r4 - r11, lr}
 #ifdef MULTI_CPU
        ldr     r10, =processor
-       mov     r2, sp                  @ current virtual SP
-       ldr     r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
+       ldr     r5, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
        ldr     ip, [r10, #CPU_DO_RESUME] @ virtual resume function
-       sub     sp, sp, r0              @ allocate CPU state on stack
-       mov     r0, sp                  @ save pointer
+#else
+       ldr     r5, =cpu_suspend_size
+       ldr     ip, =cpu_do_resume
+#endif
+       mov     r6, sp                  @ current virtual SP
+       sub     sp, sp, r5              @ allocate CPU state on stack
+       mov     r0, sp                  @ save pointer to CPU save block
        add     ip, ip, r1              @ convert resume fn to phys
-       stmfd   sp!, {r1, r2, r3, ip}   @ save v:p, virt SP, retfn, phys resume fn
-       ldr     r3, =sleep_save_sp
-       add     r2, sp, r1              @ convert SP to phys
+       stmfd   sp!, {r1, r6, ip}       @ save v:p, virt SP, phys resume fn
+       ldr     r5, =sleep_save_sp
+       add     r6, sp, r1              @ convert SP to phys
+       stmfd   sp!, {r2, r3}           @ save suspend func arg and pointer
 #ifdef CONFIG_SMP
        ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
        ALT_UP(mov lr, #0)
        and     lr, lr, #15
-       str     r2, [r3, lr, lsl #2]    @ save phys SP
+       str     r6, [r5, lr, lsl #2]    @ save phys SP
 #else
-       str     r2, [r3]                @ save phys SP
+       str     r6, [r5]                @ save phys SP
 #endif
+#ifdef MULTI_CPU
        mov     lr, pc
        ldr     pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
 #else
-       mov     r2, sp                  @ current virtual SP
-       ldr     r0, =cpu_suspend_size
-       sub     sp, sp, r0              @ allocate CPU state on stack
-       mov     r0, sp                  @ save pointer
-       stmfd   sp!, {r1, r2, r3}       @ save v:p, virt SP, return fn
-       ldr     r3, =sleep_save_sp
-       add     r2, sp, r1              @ convert SP to phys
-#ifdef CONFIG_SMP
-       ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
-       ALT_UP(mov lr, #0)
-       and     lr, lr, #15
-       str     r2, [r3, lr, lsl #2]    @ save phys SP
-#else
-       str     r2, [r3]                @ save phys SP
-#endif
        bl      cpu_do_suspend
 #endif
 
        @ flush data cache
 #ifdef MULTI_CACHE
        ldr     r10, =cpu_cache
-       mov     lr, r9
+       mov     lr, pc
        ldr     pc, [r10, #CACHE_FLUSH_KERN_ALL]
 #else
-       mov     lr, r9
-       b       __cpuc_flush_kern_all
+       bl      __cpuc_flush_kern_all
 #endif
-ENDPROC(cpu_suspend)
+       adr     lr, BSYM(cpu_suspend_abort)
+       ldmfd   sp!, {r0, pc}           @ call suspend fn
+ENDPROC(__cpu_suspend)
        .ltorg
 
+cpu_suspend_abort:
+       ldmia   sp!, {r1 - r3}          @ pop v:p, virt SP, phys resume fn
+       mov     sp, r2
+       ldmfd   sp!, {r4 - r11, pc}
+ENDPROC(cpu_suspend_abort)
+
 /*
  * r0 = control register value
  * r1 = v:p offset (preserved by cpu_do_resume)
@@ -97,7 +94,9 @@ ENDPROC(cpu_resume_turn_mmu_on)
 cpu_resume_after_mmu:
        str     r5, [r2, r4, lsl #2]    @ restore old mapping
        mcr     p15, 0, r0, c1, c0, 0   @ turn on D-cache
-       mov     pc, lr
+       bl      cpu_init                @ restore the und/abt/irq banked regs
+       mov     r0, #0                  @ return zero on success
+       ldmfd   sp!, {r4 - r11, pc}
 ENDPROC(cpu_resume_after_mmu)
 
 /*
@@ -120,20 +119,11 @@ ENTRY(cpu_resume)
        ldr     r0, sleep_save_sp       @ stack phys addr
 #endif
        setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1  @ set SVC, irqs off
-#ifdef MULTI_CPU
-       @ load v:p, stack, return fn, resume fn
-  ARM( ldmia   r0!, {r1, sp, lr, pc}   )
-THUMB( ldmia   r0!, {r1, r2, r3, r4}   )
+       @ load v:p, stack, resume fn
+  ARM( ldmia   r0!, {r1, sp, pc}       )
+THUMB( ldmia   r0!, {r1, r2, r3}       )
 THUMB( mov     sp, r2                  )
-THUMB( mov     lr, r3                  )
-THUMB( bx      r4                      )
-#else
-       @ load v:p, stack, return fn
-  ARM( ldmia   r0!, {r1, sp, lr}       )
-THUMB( ldmia   r0!, {r1, r2, lr}       )
-THUMB( mov     sp, r2                  )
-       b       cpu_do_resume
-#endif
+THUMB( bx      r3                      )
 ENDPROC(cpu_resume)
 
 sleep_save_sp:
index 344e52b16c8cd4234b523547d07a8b6ff9106bd6..167e3cbe1f2fc815367c955c29d0a842b6059f62 100644 (file)
@@ -318,9 +318,13 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
        smp_store_cpu_info(cpu);
 
        /*
-        * OK, now it's safe to let the boot CPU continue
+        * OK, now it's safe to let the boot CPU continue.  Wait for
+        * the CPU migration code to notice that the CPU is online
+        * before we continue.
         */
        set_cpu_online(cpu, true);
+       while (!cpu_active(cpu))
+               cpu_relax();
 
        /*
         * OK, it's off to the idle thread for us
@@ -361,14 +365,21 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
         */
        if (max_cpus > ncores)
                max_cpus = ncores;
-
-       if (max_cpus > 1) {
+       if (ncores > 1 && max_cpus) {
                /*
                 * Enable the local timer or broadcast device for the
                 * boot CPU, but only if we have more than one CPU.
                 */
                percpu_timer_setup();
 
+               /*
+                * Initialise the present map, which describes the set of CPUs
+                * actually populated at the present time. A platform should
+                * re-initialize the map in platform_smp_prepare_cpus() if
+                * present != possible (e.g. physical hotplug).
+                */
+               init_cpu_present(&cpu_possible_map);
+
                /*
                 * Initialise the SCU if there are more than one CPU
                 * and let them know where to start.
index a1e757c3439bcb8155f5f93f940788854fd364d0..79ed5e7f204a9a32fb0ac11f8cc9ee840438dc70 100644 (file)
@@ -20,6 +20,7 @@
 #define SCU_INVALIDATE         0x0c
 #define SCU_FPGA_REVISION      0x10
 
+#ifdef CONFIG_SMP
 /*
  * Get the number of CPU cores from the SCU configuration
  */
@@ -50,6 +51,7 @@ void __init scu_enable(void __iomem *scu_base)
         */
        flush_cache_all();
 }
+#endif
 
 /*
  * Set the executing CPUs power mode as defined.  This will be in
index 60636f499cb3eafd5318413dd4c83a97949abe91..2c277d40cee681ccc496239b3ab6724259a35756 100644 (file)
@@ -115,7 +115,7 @@ static void __cpuinit twd_calibrate_rate(void)
                twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
 
                printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000,
-                       (twd_timer_rate / 1000000) % 100);
+                       (twd_timer_rate / 10000) % 100);
        }
 }
 
index f5cf660eefccda61515b0f33a56db0bf692d3b51..30e302d33e0add9d51e5aeb1261ad350d935c9d4 100644 (file)
@@ -19,6 +19,8 @@
 #include "tcm.h"
 
 static struct gen_pool *tcm_pool;
+static bool dtcm_present;
+static bool itcm_present;
 
 /* TCM section definitions from the linker */
 extern char __itcm_start, __sitcm_text, __eitcm_text;
@@ -90,6 +92,18 @@ void tcm_free(void *addr, size_t len)
 }
 EXPORT_SYMBOL(tcm_free);
 
+bool tcm_dtcm_present(void)
+{
+       return dtcm_present;
+}
+EXPORT_SYMBOL(tcm_dtcm_present);
+
+bool tcm_itcm_present(void)
+{
+       return itcm_present;
+}
+EXPORT_SYMBOL(tcm_itcm_present);
+
 static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks,
                                  u32 *offset)
 {
@@ -134,6 +148,10 @@ static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks,
                        (tcm_region & 1) ? "" : "not ");
        }
 
+       /* Not much fun you can do with a size 0 bank */
+       if (tcm_size == 0)
+               return 0;
+
        /* Force move the TCM bank to where we want it, enable */
        tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1;
 
@@ -165,12 +183,20 @@ void __init tcm_init(void)
        u32 tcm_status = read_cpuid_tcmstatus();
        u8 dtcm_banks = (tcm_status >> 16) & 0x03;
        u8 itcm_banks = (tcm_status & 0x03);
+       size_t dtcm_code_sz = &__edtcm_data - &__sdtcm_data;
+       size_t itcm_code_sz = &__eitcm_text - &__sitcm_text;
        char *start;
        char *end;
        char *ram;
        int ret;
        int i;
 
+       /* Values greater than 2 for D/ITCM banks are "reserved" */
+       if (dtcm_banks > 2)
+               dtcm_banks = 0;
+       if (itcm_banks > 2)
+               itcm_banks = 0;
+
        /* Setup DTCM if present */
        if (dtcm_banks > 0) {
                for (i = 0; i < dtcm_banks; i++) {
@@ -178,6 +204,13 @@ void __init tcm_init(void)
                        if (ret)
                                return;
                }
+               /* This means you compiled more code than fits into DTCM */
+               if (dtcm_code_sz > (dtcm_end - DTCM_OFFSET)) {
+                       pr_info("CPU DTCM: %u bytes of code compiled to "
+                               "DTCM but only %lu bytes of DTCM present\n",
+                               dtcm_code_sz, (dtcm_end - DTCM_OFFSET));
+                       goto no_dtcm;
+               }
                dtcm_res.end = dtcm_end - 1;
                request_resource(&iomem_resource, &dtcm_res);
                dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET;
@@ -186,12 +219,16 @@ void __init tcm_init(void)
                start = &__sdtcm_data;
                end   = &__edtcm_data;
                ram   = &__dtcm_start;
-               /* This means you compiled more code than fits into DTCM */
-               BUG_ON((end - start) > (dtcm_end - DTCM_OFFSET));
-               memcpy(start, ram, (end-start));
-               pr_debug("CPU DTCM: copied data from %p - %p\n", start, end);
+               memcpy(start, ram, dtcm_code_sz);
+               pr_debug("CPU DTCM: copied data from %p - %p\n",
+                        start, end);
+               dtcm_present = true;
+       } else if (dtcm_code_sz) {
+               pr_info("CPU DTCM: %u bytes of code compiled to DTCM but no "
+                       "DTCM banks present in CPU\n", dtcm_code_sz);
        }
 
+no_dtcm:
        /* Setup ITCM if present */
        if (itcm_banks > 0) {
                for (i = 0; i < itcm_banks; i++) {
@@ -199,6 +236,13 @@ void __init tcm_init(void)
                        if (ret)
                                return;
                }
+               /* This means you compiled more code than fits into ITCM */
+               if (itcm_code_sz > (itcm_end - ITCM_OFFSET)) {
+                       pr_info("CPU ITCM: %u bytes of code compiled to "
+                               "ITCM but only %lu bytes of ITCM present\n",
+                               itcm_code_sz, (itcm_end - ITCM_OFFSET));
+                       return;
+               }
                itcm_res.end = itcm_end - 1;
                request_resource(&iomem_resource, &itcm_res);
                itcm_iomap[0].length = itcm_end - ITCM_OFFSET;
@@ -207,10 +251,13 @@ void __init tcm_init(void)
                start = &__sitcm_text;
                end   = &__eitcm_text;
                ram   = &__itcm_start;
-               /* This means you compiled more code than fits into ITCM */
-               BUG_ON((end - start) > (itcm_end - ITCM_OFFSET));
-               memcpy(start, ram, (end-start));
-               pr_debug("CPU ITCM: copied code from %p - %p\n", start, end);
+               memcpy(start, ram, itcm_code_sz);
+               pr_debug("CPU ITCM: copied code from %p - %p\n",
+                        start, end);
+               itcm_present = true;
+       } else if (itcm_code_sz) {
+               pr_info("CPU ITCM: %u bytes of code compiled to ITCM but no "
+                       "ITCM banks present in CPU\n", itcm_code_sz);
        }
 }
 
@@ -221,7 +268,6 @@ void __init tcm_init(void)
  */
 static int __init setup_tcm_pool(void)
 {
-       u32 tcm_status = read_cpuid_tcmstatus();
        u32 dtcm_pool_start = (u32) &__edtcm_data;
        u32 itcm_pool_start = (u32) &__eitcm_text;
        int ret;
@@ -236,7 +282,7 @@ static int __init setup_tcm_pool(void)
        pr_debug("Setting up TCM memory pool\n");
 
        /* Add the rest of DTCM to the TCM pool */
-       if (tcm_status & (0x03 << 16)) {
+       if (dtcm_present) {
                if (dtcm_pool_start < dtcm_end) {
                        ret = gen_pool_add(tcm_pool, dtcm_pool_start,
                                           dtcm_end - dtcm_pool_start, -1);
@@ -253,7 +299,7 @@ static int __init setup_tcm_pool(void)
        }
 
        /* Add the rest of ITCM to the TCM pool */
-       if (tcm_status & 0x03) {
+       if (itcm_present) {
                if (itcm_pool_start < itcm_end) {
                        ret = gen_pool_add(tcm_pool, itcm_pool_start,
                                           itcm_end - itcm_pool_start, -1);
index e5287f21badc7e87c3c2e7a2d8953d5fb270ec86..bf977f8514f60191e478e8e25a696c93d2802b87 100644 (file)
@@ -38,57 +38,6 @@ jiffies = jiffies_64 + 4;
 
 SECTIONS
 {
-#ifdef CONFIG_XIP_KERNEL
-       . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
-#else
-       . = PAGE_OFFSET + TEXT_OFFSET;
-#endif
-
-       .init : {                       /* Init code and data           */
-               _stext = .;
-               _sinittext = .;
-                       HEAD_TEXT
-                       INIT_TEXT
-                       ARM_EXIT_KEEP(EXIT_TEXT)
-               _einittext = .;
-               ARM_CPU_DISCARD(PROC_INFO)
-               __arch_info_begin = .;
-                       *(.arch.info.init)
-               __arch_info_end = .;
-               __tagtable_begin = .;
-                       *(.taglist.init)
-               __tagtable_end = .;
-#ifdef CONFIG_SMP_ON_UP
-               __smpalt_begin = .;
-                       *(.alt.smp.init)
-               __smpalt_end = .;
-#endif
-
-               __pv_table_begin = .;
-                       *(.pv_table)
-               __pv_table_end = .;
-
-               INIT_SETUP(16)
-
-               INIT_CALLS
-               CON_INITCALL
-               SECURITY_INITCALL
-               INIT_RAM_FS
-
-#ifndef CONFIG_XIP_KERNEL
-               __init_begin = _stext;
-               INIT_DATA
-               ARM_EXIT_KEEP(EXIT_DATA)
-#endif
-       }
-
-       PERCPU_SECTION(32)
-
-#ifndef CONFIG_XIP_KERNEL
-       . = ALIGN(PAGE_SIZE);
-       __init_end = .;
-#endif
-
        /*
         * unwind exit sections must be discarded before the rest of the
         * unwind sections get included.
@@ -105,11 +54,23 @@ SECTIONS
 #ifndef CONFIG_MMU
                *(.fixup)
                *(__ex_table)
+#endif
+#ifndef CONFIG_SMP_ON_UP
+               *(.alt.smp.init)
 #endif
        }
 
+#ifdef CONFIG_XIP_KERNEL
+       . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
+#else
+       . = PAGE_OFFSET + TEXT_OFFSET;
+#endif
+       .head.text : {
+               _text = .;
+               HEAD_TEXT
+       }
        .text : {                       /* Real text segment            */
-               _text = .;              /* Text and read-only data      */
+               _stext = .;             /* Text and read-only data      */
                        __exception_text_start = .;
                        *(.exception.text)
                        __exception_text_end = .;
@@ -122,8 +83,6 @@ SECTIONS
                        *(.fixup)
 #endif
                        *(.gnu.warning)
-                       *(.rodata)
-                       *(.rodata.*)
                        *(.glue_7)
                        *(.glue_7t)
                . = ALIGN(4);
@@ -152,10 +111,63 @@ SECTIONS
 
        _etext = .;                     /* End of text and rodata section */
 
+#ifndef CONFIG_XIP_KERNEL
+       . = ALIGN(PAGE_SIZE);
+       __init_begin = .;
+#endif
+
+       INIT_TEXT_SECTION(8)
+       .exit.text : {
+               ARM_EXIT_KEEP(EXIT_TEXT)
+       }
+       .init.proc.info : {
+               ARM_CPU_DISCARD(PROC_INFO)
+       }
+       .init.arch.info : {
+               __arch_info_begin = .;
+               *(.arch.info.init)
+               __arch_info_end = .;
+       }
+       .init.tagtable : {
+               __tagtable_begin = .;
+               *(.taglist.init)
+               __tagtable_end = .;
+       }
+#ifdef CONFIG_SMP_ON_UP
+       .init.smpalt : {
+               __smpalt_begin = .;
+               *(.alt.smp.init)
+               __smpalt_end = .;
+       }
+#endif
+       .init.pv_table : {
+               __pv_table_begin = .;
+               *(.pv_table)
+               __pv_table_end = .;
+       }
+       .init.data : {
+#ifndef CONFIG_XIP_KERNEL
+               INIT_DATA
+#endif
+               INIT_SETUP(16)
+               INIT_CALLS
+               CON_INITCALL
+               SECURITY_INITCALL
+               INIT_RAM_FS
+       }
+#ifndef CONFIG_XIP_KERNEL
+       .exit.data : {
+               ARM_EXIT_KEEP(EXIT_DATA)
+       }
+#endif
+
+       PERCPU_SECTION(32)
+
 #ifdef CONFIG_XIP_KERNEL
        __data_loc = ALIGN(4);          /* location in binary */
        . = PAGE_OFFSET + TEXT_OFFSET;
 #else
+       __init_end = .;
        . = ALIGN(THREAD_SIZE);
        __data_loc = .;
 #endif
@@ -270,12 +282,6 @@ SECTIONS
 
        /* Default discards */
        DISCARDS
-
-#ifndef CONFIG_SMP_ON_UP
-       /DISCARD/ : {
-               *(.alt.smp.init)
-       }
-#endif
 }
 
 /*
index 17fae4a42ab587f413025780f5228b7f0bfc7553..f1013d08bb5738b7670264841b6e8723c4c64f70 100644 (file)
@@ -223,15 +223,15 @@ static struct clk *periph_clocks[] __initdata = {
 };
 
 static struct clk_lookup periph_clocks_lookups[] = {
-       CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
-       CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
+       CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk),
+       CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk),
        CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
        CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk),
        CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
        CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
        CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb_clk),
-       CLKDEV_CON_DEV_ID("ssc", "ssc.0", &ssc0_clk),
-       CLKDEV_CON_DEV_ID("ssc", "ssc.1", &ssc1_clk),
+       CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
+       CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
 };
 
 static struct clk_lookup usart_clocks_lookups[] = {
index cd850ed6f33542b5a70a7d7bfcbe0080f5d05619..dba0d8d8a4bd185dd96054cbc466b9a2a6c825d7 100644 (file)
@@ -1220,7 +1220,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91cap9_set_console_clock(portnr);
+               at91cap9_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index b228ce9e21a14b9e6c20c57e5b87d8bcc5b769bf..83a1a3fee5549be3f7cb92a8bb50335e9150509e 100644 (file)
@@ -199,9 +199,9 @@ static struct clk_lookup periph_clocks_lookups[] = {
        CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
        CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
        CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
-       CLKDEV_CON_DEV_ID("ssc", "ssc.0", &ssc0_clk),
-       CLKDEV_CON_DEV_ID("ssc", "ssc.1", &ssc1_clk),
-       CLKDEV_CON_DEV_ID("ssc", "ssc.2", &ssc2_clk),
+       CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
+       CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
+       CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
 };
 
 static struct clk_lookup usart_clocks_lookups[] = {
index a0ba475be04cd23a7aeba30d933c8eead4fb3d1f..7227755ffec643fae52b92ebd71844581fb1ed92 100644 (file)
@@ -1135,7 +1135,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91rm9200_set_console_clock(portnr);
+               at91rm9200_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index 1fdeb9058a760a865ba0463d6fdbca89cd448bfb..39f81f47b4ba4d4fe12c02e852cae1afb97b661b 100644 (file)
@@ -1173,7 +1173,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91sam9260_set_console_clock(portnr);
+               at91sam9260_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index 3eb4538fceebeb243aba31185861d3639d05c9c3..5004bf0a05f2dc0c9e1eea84708ae9bd7accf408 100644 (file)
@@ -1013,7 +1013,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91sam9261_set_console_clock(portnr);
+               at91sam9261_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index ffe081b77ed0f5bfafc97116919f0c381b06be98..a050f41fc860d7698b2a8ba83b29221f14805d22 100644 (file)
@@ -1395,7 +1395,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91sam9263_set_console_clock(portnr);
+               at91sam9263_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index 2bb6ff9af1c7fede95a7879c3381dc0a1c424d8c..11e214121b23cd45d413f2b19273b61ce039f940 100644 (file)
@@ -217,11 +217,11 @@ static struct clk *periph_clocks[] __initdata = {
 static struct clk_lookup periph_clocks_lookups[] = {
        /* One additional fake clock for ohci */
        CLKDEV_CON_ID("ohci_clk", &uhphs_clk),
-       CLKDEV_CON_DEV_ID("ehci_clk", "atmel-ehci.0", &uhphs_clk),
-       CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
-       CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
-       CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
-       CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk),
+       CLKDEV_CON_DEV_ID("ehci_clk", "atmel-ehci", &uhphs_clk),
+       CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk),
+       CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk),
+       CLKDEV_CON_DEV_ID("mci_clk", "atmel_mci.0", &mmc0_clk),
+       CLKDEV_CON_DEV_ID("mci_clk", "atmel_mci.1", &mmc1_clk),
        CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
        CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
        CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb0_clk),
index 05674865bc214d5642885038c514b5c4b067e7a5..600bffb01edb9b056f0e45cdb0bbf5960f1cea6a 100644 (file)
@@ -1550,7 +1550,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91sam9g45_set_console_clock(portnr);
+               at91sam9g45_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index 1a40f16b66c849dd71a14dd238e8df60df284876..29dff18ed1309bb7442e4078c5e01a60ff056134 100644 (file)
@@ -191,8 +191,8 @@ static struct clk *periph_clocks[] __initdata = {
 };
 
 static struct clk_lookup periph_clocks_lookups[] = {
-       CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk),
-       CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk),
+       CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk),
+       CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk),
        CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
        CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
        CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
index c296045f2b6aa2099cca149b520192dc0ab89034..aacb19dc9225a7c90554ee6e03e7610d90df57cb 100644 (file)
@@ -1168,7 +1168,7 @@ void __init at91_set_serial_console(unsigned portnr)
 {
        if (portnr < ATMEL_MAX_UART) {
                atmel_default_console_device = at91_uarts[portnr];
-               at91sam9rl_set_console_clock(portnr);
+               at91sam9rl_set_console_clock(at91_uarts[portnr]->id);
        }
 }
 
index 1904fdf87613d9803ffcae12cfd1618111f4aee5..cdb65d483250f8b8acf5a91495a878710f716cec 100644 (file)
@@ -215,7 +215,7 @@ static void __init cap9adk_add_device_nand(void)
        csa = at91_sys_read(AT91_MATRIX_EBICSA);
        at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_EBI_VDDIOMSEL_3_3V);
 
-       cap9adk_nand_data.bus_width_16 = !board_have_nand_8bit();
+       cap9adk_nand_data.bus_width_16 = board_have_nand_16bit();
        /* setup bus-width (8 or 16) */
        if (cap9adk_nand_data.bus_width_16)
                cap9adk_nand_smc_config.mode |= AT91_SMC_DBW_16;
index d600dc123227f04dad183eb1606f798258eb2ba1..5c240743c5b7ebec2650c8871ac4ba1a45b418c0 100644 (file)
@@ -214,7 +214,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
 
 static void __init ek_add_device_nand(void)
 {
-       ek_nand_data.bus_width_16 = !board_have_nand_8bit();
+       ek_nand_data.bus_width_16 = board_have_nand_16bit();
        /* setup bus-width (8 or 16) */
        if (ek_nand_data.bus_width_16)
                ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
index f897f84d43dc490d393768c3c9ed6296687e5f44..b60c22b6e2411ad35455825b2ff4f916b62e66e4 100644 (file)
@@ -220,7 +220,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
 
 static void __init ek_add_device_nand(void)
 {
-       ek_nand_data.bus_width_16 = !board_have_nand_8bit();
+       ek_nand_data.bus_width_16 = board_have_nand_16bit();
        /* setup bus-width (8 or 16) */
        if (ek_nand_data.bus_width_16)
                ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
index 605b26f40a4ce6e947f0a322c2597e88cf3514de..9bbdc92ea194b5a5f898c727e65fce60fae5bcf2 100644 (file)
@@ -221,7 +221,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
 
 static void __init ek_add_device_nand(void)
 {
-       ek_nand_data.bus_width_16 = !board_have_nand_8bit();
+       ek_nand_data.bus_width_16 = board_have_nand_16bit();
        /* setup bus-width (8 or 16) */
        if (ek_nand_data.bus_width_16)
                ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
index 7624cf0d006b9030639d1a7d2a84d9a3b8f5893a..1325a50101a812065aa617f245987a50a6198c41 100644 (file)
@@ -198,7 +198,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
 
 static void __init ek_add_device_nand(void)
 {
-       ek_nand_data.bus_width_16 = !board_have_nand_8bit();
+       ek_nand_data.bus_width_16 = board_have_nand_16bit();
        /* setup bus-width (8 or 16) */
        if (ek_nand_data.bus_width_16)
                ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
index 063c95d0e8f02879c35792b548da42452056e183..33eaa135f2480a4fc58df6db1eaeb82133fbd1d6 100644 (file)
@@ -178,7 +178,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
 
 static void __init ek_add_device_nand(void)
 {
-       ek_nand_data.bus_width_16 = !board_have_nand_8bit();
+       ek_nand_data.bus_width_16 = board_have_nand_16bit();
        /* setup bus-width (8 or 16) */
        if (ek_nand_data.bus_width_16)
                ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
index b855ee75f72cec84a63650a9de3eb3515d8c890e..8f4866045b41e213172c761f522ffca720a5802e 100644 (file)
  * the 16-31 bit are reserved for at91 generic information
  *
  * bit 31:
- *     0 => nand 16 bit
- *     1 => nand 8 bit
+ *     0 => nand 8 bit
+ *     1 => nand 16 bit
  */
-#define BOARD_HAVE_NAND_8BIT   (1 << 31)
-static int inline board_have_nand_8bit(void)
+#define BOARD_HAVE_NAND_16BIT  (1 << 31)
+static inline int board_have_nand_16bit(void)
 {
-       return system_rev & BOARD_HAVE_NAND_8BIT;
+       return system_rev & BOARD_HAVE_NAND_16BIT;
 }
 
 #endif /* __ARCH_SYSTEM_REV_H__ */
index 7d393ca010acc2bf7e79fc17e515a973f066a43f..94c950d783babdf6f162f80755919f5aa88b6c11 100644 (file)
@@ -80,7 +80,3 @@
 
                .macro  arch_ret_to_user, tmp1, tmp2
                .endm
-
-               .macro  irq_prio_table
-               .endm
-
index c67f684ee3e58e219a182c55624e902e069e666b..09a87e61ffcf451dfe76ab10e6fd1ba58c5a3834 100644 (file)
@@ -520,7 +520,7 @@ fail:
         */
        if (have_imager()) {
                label = "HD imager";
-               mux |= 1;
+               mux |= 2;
 
                /* externally mux MMC1/ENET/AIC33 to imager */
                mux |= BIT(6) | BIT(5) | BIT(3);
@@ -540,7 +540,7 @@ fail:
                resets &= ~BIT(1);
 
                if (have_tvp7002()) {
-                       mux |= 2;
+                       mux |= 1;
                        resets &= ~BIT(2);
                        label = "tvp7002 HD";
                } else {
index e7221398e5af9c751d1dc8f85c1d294b2afb0f9b..cafbe13a82a5c5bc56e24bf60fd37fae1d324c4c 100644 (file)
@@ -254,8 +254,10 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc)
 {
        struct davinci_gpio_regs __iomem *g;
        u32 mask = 0xffff;
+       struct davinci_gpio_controller *d;
 
-       g = (__force struct davinci_gpio_regs __iomem *) irq_desc_get_handler_data(desc);
+       d = (struct davinci_gpio_controller *)irq_desc_get_handler_data(desc);
+       g = (struct davinci_gpio_regs __iomem *)d->regs;
 
        /* we only care about one bank */
        if (irq & 1)
@@ -274,11 +276,14 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc)
                if (!status)
                        break;
                __raw_writel(status, &g->intstat);
-               if (irq & 1)
-                       status >>= 16;
 
                /* now demux them to the right lowlevel handler */
-               n = (int)irq_get_handler_data(irq);
+               n = d->irq_base;
+               if (irq & 1) {
+                       n += 16;
+                       status >>= 16;
+               }
+
                while (status) {
                        res = ffs(status);
                        n += res;
@@ -424,7 +429,13 @@ static int __init davinci_gpio_irq_setup(void)
 
                /* set up all irqs in this bank */
                irq_set_chained_handler(bank_irq, gpio_irq_handler);
-               irq_set_handler_data(bank_irq, (__force void *)g);
+
+               /*
+                * Each chip handles 32 gpios, and each irq bank consists of 16
+                * gpio irqs. Pass the irq bank's corresponding controller to
+                * the chained irq handler.
+                */
+               irq_set_handler_data(bank_irq, &chips[gpio / 32]);
 
                for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) {
                        irq_set_chip(irq, &gpio_irqchip);
index fbdebc7cb409828dddaf0d8f6e575309ca07be0f..e14c0dc0e12c06923d41e8cb9ac874eccb96e4bc 100644 (file)
@@ -46,6 +46,3 @@
 #endif
 1002:
                .endm
-
-               .macro  irq_prio_table
-               .endm
index bfe68ec4e1a67bc1c95f2ae617bfa1bfe2912e58..952dc126c390cca25e1b33e838cbc282ab501ccc 100644 (file)
@@ -52,8 +52,14 @@ davinci_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
        struct irq_chip_type *ct;
 
        gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq);
+       if (!gc) {
+               pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n",
+                      __func__, irq_start);
+               return;
+       }
+
        ct = gc->chip_types;
-       ct->chip.irq_ack = irq_gc_ack;
+       ct->chip.irq_ack = irq_gc_ack_set_bit;
        ct->chip.irq_mask = irq_gc_mask_clr_bit;
        ct->chip.irq_unmask = irq_gc_mask_set_bit;
 
index 1d4b65fd673eb23e9d2ac2fd9dbded0f2e8ba6d9..6659a0d137a32f246895a011195d1ead66657841 100644 (file)
@@ -251,9 +251,9 @@ static void ep93xx_uart_set_mctrl(struct amba_device *dev,
        unsigned int mcr;
 
        mcr = 0;
-       if (!(mctrl & TIOCM_RTS))
+       if (mctrl & TIOCM_RTS)
                mcr |= 2;
-       if (!(mctrl & TIOCM_DTR))
+       if (mctrl & TIOCM_DTR)
                mcr |= 1;
 
        __raw_writel(mcr, base + EP93XX_UART_MCR_OFFSET);
index 9babe4473e8893614e2cddef1ebfd1855f9e94c2..bfd621460abfa6d5322024a9412d28c4b891a4d7 100644 (file)
@@ -23,6 +23,7 @@
 #include <plat/sdhci.h>
 #include <plat/devs.h>
 #include <plat/fimc-core.h>
+#include <plat/iic-core.h>
 
 #include <mach/regs-irq.h>
 
@@ -132,6 +133,11 @@ void __init exynos4_map_io(void)
        s3c_fimc_setname(1, "exynos4-fimc");
        s3c_fimc_setname(2, "exynos4-fimc");
        s3c_fimc_setname(3, "exynos4-fimc");
+
+       /* The I2C bus controllers are directly compatible with s3c2440 */
+       s3c_i2c0_setname("s3c2440-i2c");
+       s3c_i2c1_setname("s3c2440-i2c");
+       s3c_i2c2_setname("s3c2440-i2c");
 }
 
 void __init exynos4_init_clocks(int xtal)
index 1eed5f9f7bd318bee8be3270239d684bd7b69824..983069a5323912f6b7f0775681166db200089b29 100644 (file)
@@ -330,7 +330,7 @@ struct platform_device exynos4_device_ac97 = {
 
 static int exynos4_spdif_cfg_gpio(struct platform_device *pdev)
 {
-       s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 2, S3C_GPIO_SFN(3));
+       s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 2, S3C_GPIO_SFN(4));
 
        return 0;
 }
index 6c6cfc50c46b6bd6ef37b1c213c2e9fb4e5f6131..3cdeb3647542592a4a62f98e0c6b647da358b0d4 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 
-       __INIT
+       __CPUINIT
 
 /*
  * exynos4 specific entry point for secondary CPUs.  This provides
index cf91f50e43ab6434962b0ed9f3457e32a1cc12ec..a8a83e3881a4e9442a15a06a46210355e65ddebd 100644 (file)
@@ -35,6 +35,7 @@ void __init exynos4_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
                        tcfg->clocks = exynos4_serial_clocks;
                        tcfg->clocks_size = ARRAY_SIZE(exynos4_serial_clocks);
                }
+               tcfg->flags |= NO_NEED_CHECK_CLKSRC;
        }
 
        s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no);
index 152676471b67d4fb7f219c3bedf1eaa5de560ca5..edd814110da86efcf451e1c1c0585651a447011b 100644 (file)
@@ -78,9 +78,7 @@ static struct s3c2410_uartcfg smdkv310_uartcfgs[] __initdata = {
 };
 
 static struct s3c_sdhci_platdata smdkv310_hsmmc0_pdata __initdata = {
-       .cd_type                = S3C_SDHCI_CD_GPIO,
-       .ext_cd_gpio            = EXYNOS4_GPK0(2),
-       .ext_cd_gpio_invert     = 1,
+       .cd_type                = S3C_SDHCI_CD_INTERNAL,
        .clk_type               = S3C_SDHCI_CLK_DIV_EXTERNAL,
 #ifdef CONFIG_EXYNOS4_SDHCI_CH0_8BIT
        .max_width              = 8,
@@ -96,9 +94,7 @@ static struct s3c_sdhci_platdata smdkv310_hsmmc1_pdata __initdata = {
 };
 
 static struct s3c_sdhci_platdata smdkv310_hsmmc2_pdata __initdata = {
-       .cd_type                = S3C_SDHCI_CD_GPIO,
-       .ext_cd_gpio            = EXYNOS4_GPK2(2),
-       .ext_cd_gpio_invert     = 1,
+       .cd_type                = S3C_SDHCI_CD_INTERNAL,
        .clk_type               = S3C_SDHCI_CLK_DIV_EXTERNAL,
 #ifdef CONFIG_EXYNOS4_SDHCI_CH2_8BIT
        .max_width              = 8,
index c5e65a02be8d4e2e4303f1333825e141c5988b66..b68d5bdf04cf57e52691dd0778a9bf6f664662a0 100644 (file)
@@ -154,14 +154,6 @@ void __init smp_init_cpus(void)
 
 void __init platform_smp_prepare_cpus(unsigned int max_cpus)
 {
-       int i;
-
-       /*
-        * Initialise the present map, which describes the set of CPUs
-        * actually populated at the present time.
-        */
-       for (i = 0; i < max_cpus; i++)
-               set_cpu_present(i, true);
 
        scu_enable(scu_base_addr());
 
index 8755ca8dd48d2157df921a7fb5150deceb804ef1..533c28f758ca437ba8f6ca2570a4ba006dca6e45 100644 (file)
@@ -280,7 +280,7 @@ static struct sleep_save exynos4_l2cc_save[] = {
        SAVE_ITEM(S5P_VA_L2CC + L2X0_AUX_CTRL),
 };
 
-void exynos4_cpu_suspend(void)
+static int exynos4_cpu_suspend(unsigned long arg)
 {
        unsigned long tmp;
        unsigned long mask = 0xFFFFFFFF;
index 6b62425417a6cd77be7e82aa99f631063d9d23c4..0984078f1ebae22c4ab59c2b09db45211bcf3a7e 100644 (file)
 
        .text
 
-       /*
-        * s3c_cpu_save
-        *
-        * entry:
-        *      r1 = v:p offset
-        */
-
-ENTRY(s3c_cpu_save)
-
-       stmfd   sp!, { r3 - r12, lr }
-       ldr     r3, =resume_with_mmu
-       bl      cpu_suspend
-
-       ldr     r0, =pm_cpu_sleep
-       ldr     r0, [ r0 ]
-       mov     pc, r0
-
-resume_with_mmu:
-       ldmfd   sp!, { r3 - r12, pc }
-
-       .ltorg
-
        /*
         * sleep magic, to allow the bootloader to check for an valid
         * image to resume to. Must be the first word before the
index 9b6982efbd22014e59b56e33b7c46bd3c20ccd56..abf356c02343fd144fc305f1c2c7f45af38b3a60 100644 (file)
@@ -6,12 +6,14 @@ config ARCH_H7201
        bool "gms30c7201"
        depends on ARCH_H720X
        select CPU_H7201
+       select ZONE_DMA
        help
          Say Y here if you are using the Hynix GMS30C7201 Reference Board
 
 config ARCH_H7202
        bool "hms30c7202"
        select CPU_H7202
+       select ZONE_DMA
        depends on ARCH_H720X
        help
          Say Y here if you are using the Hynix HMS30C7202 Reference Board
index 6d3b917c4a18e7cbd9f441d9e73d4a904f4b16e5..c3948e5ba4a09b04224f8fefd974d758ef91c202 100644 (file)
@@ -57,9 +57,6 @@
                tst     \irqstat, #1                   @ bit 0 should be set
                .endm
 
-               .macro  irq_prio_table
-               .endm
-
 #else
 #error hynix processor selection missmatch
 #endif
index e9a589395723dafd59e01f9c6d3458a1b1689b4c..e2e98bbb641341403eb7e02e5644e79d7f72bb60 100644 (file)
@@ -316,6 +316,11 @@ static int abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *r
 }
 
 
+static int ixp4xx_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+       return (dma_addr + size) >= SZ_64M;
+}
+
 /*
  * Setup DMA mask to 64MB on PCI devices. Ignore all other devices.
  */
@@ -324,7 +329,7 @@ static int ixp4xx_pci_platform_notify(struct device *dev)
        if(dev->bus == &pci_bus_type) {
                *dev->dma_mask =  SZ_64M - 1;
                dev->coherent_dma_mask = SZ_64M - 1;
-               dmabounce_register_dev(dev, 2048, 4096);
+               dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce);
        }
        return 0;
 }
@@ -337,11 +342,6 @@ static int ixp4xx_pci_platform_notify_remove(struct device *dev)
        return 0;
 }
 
-int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
-{
-       return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M);
-}
-
 void __init ixp4xx_pci_preinit(void)
 {
        unsigned long cpuid = read_cpuid_id();
index 74ed81a3cb1a7ae0ca7ca97d7ba3f41e4509c150..07772575d7ab22d3205238003322487cd9fead2b 100644 (file)
@@ -419,14 +419,20 @@ static void notrace ixp4xx_update_sched_clock(void)
 /*
  * clocksource
  */
+
+static cycle_t ixp4xx_clocksource_read(struct clocksource *c)
+{
+       return *IXP4XX_OSTS;
+}
+
 unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ;
 EXPORT_SYMBOL(ixp4xx_timer_freq);
 static void __init ixp4xx_clocksource_init(void)
 {
        init_sched_clock(&cd, ixp4xx_update_sched_clock, 32, ixp4xx_timer_freq);
 
-       clocksource_mmio_init(&IXP4XX_OSTS, "OSTS", ixp4xx_timer_freq, 200, 32,
-                       clocksource_mmio_readl_up);
+       clocksource_mmio_init(NULL, "OSTS", ixp4xx_timer_freq, 200, 32,
+                       ixp4xx_clocksource_read);
 }
 
 /*
index 870227c96602e824a2f39c52c355dde390e9ca5c..b725f6c93975d9b459d0a96878fa026ff88beaa1 100644 (file)
@@ -41,7 +41,3 @@
        rsb     \irqnr, \irqnr, #31
        teq     \irqstat, #0
        .endm
-
-       .macro  irq_prio_table
-       .endm
-
index 72b4e76315830e91b9dad19dfdedcdf829021559..ab9f999106c765131d00421f22fa8e16cc75ab7f 100644 (file)
@@ -79,7 +79,7 @@ static APBC_CLK(ssp4, PXA168_SSP4, 4, 0);
 static APBC_CLK(ssp5, PXA168_SSP5, 4, 0);
 static APBC_CLK(keypad, PXA168_KPC, 0, 32000);
 
-static APMU_CLK(nand, NAND, 0x01db, 208000000);
+static APMU_CLK(nand, NAND, 0x19b, 156000000);
 static APMU_CLK(lcd, LCD, 0x7f, 312000000);
 
 /* device and clock bindings */
index 8f92ccd26edf9f77457d34f7429fb9182dd5e62d..1464607aa60db76632ce72b0008cfa4333178b13 100644 (file)
@@ -110,7 +110,7 @@ static APBC_CLK(pwm2, PXA910_PWM2, 1, 13000000);
 static APBC_CLK(pwm3, PXA910_PWM3, 1, 13000000);
 static APBC_CLK(pwm4, PXA910_PWM4, 1, 13000000);
 
-static APMU_CLK(nand, NAND, 0x01db, 208000000);
+static APMU_CLK(nand, NAND, 0x19b, 156000000);
 static APMU_CLK(u2o, USB, 0x1b, 480000000);
 
 /* device and clock bindings */
index 2034098cf0150eaa45487a2020796d809bc265dc..315b9f365329a459bed0140f3d96f524cdfe655b 100644 (file)
@@ -157,12 +157,4 @@ void __init smp_init_cpus(void)
 
 void __init platform_smp_prepare_cpus(unsigned int max_cpus)
 {
-       int i;
-
-       /*
-        * Initialise the present map, which describes the set of CPUs
-        * actually populated at the present time.
-        */
-       for (i = 0; i < max_cpus; i++)
-               set_cpu_present(i, true);
 }
index 38b95e949d13b0a87d83a3274b0369e98c8321ed..63621f152c989c0a2041208cf7ff1ab58bb9ee52 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/io.h>
 
 #include <asm/mach/time.h>
+#include <asm/hardware/gic.h>
+
 #include <mach/msm_iomap.h>
 #include <mach/cpu.h>
 
@@ -55,10 +57,12 @@ enum timer_location {
 #if defined(CONFIG_ARCH_QSD8X50)
 #define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */
 #define MSM_DGT_SHIFT (0)
-#elif defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) || \
-                                     defined(CONFIG_ARCH_MSM8960)
+#elif defined(CONFIG_ARCH_MSM7X30)
 #define DGT_HZ (24576000 / 4) /* 24.576 MHz (LPXO) / 4 by default */
 #define MSM_DGT_SHIFT (0)
+#elif defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960)
+#define DGT_HZ (27000000 / 4) /* 27 MHz (PXO) / 4 by default */
+#define MSM_DGT_SHIFT (0)
 #else
 #define DGT_HZ 19200000 /* 19.2 MHz or 600 KHz after shift */
 #define MSM_DGT_SHIFT (5)
@@ -100,7 +104,11 @@ static cycle_t msm_read_timer_count(struct clocksource *cs)
 {
        struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource);
 
-       return readl(clk->global_counter);
+       /*
+        * Shift timer count down by a constant due to unreliable lower bits
+        * on some targets.
+        */
+       return readl(clk->global_counter) >> clk->shift;
 }
 
 static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt)
index af98117043d214f8e088e8fd1429656410300a14..5b114d1558c83f41fe8acc524b1aa86f2ffa4a54 100644 (file)
@@ -4,14 +4,14 @@
 
 # Common support
 obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o
-obj-y += clock.o clock_data.o opp_data.o reset.o
+obj-y += clock.o clock_data.o opp_data.o reset.o pm_bus.o
 
 obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
 
 obj-$(CONFIG_OMAP_32K_TIMER)   += timer32k.o
 
 # Power Management
-obj-$(CONFIG_PM) += pm.o sleep.o pm_bus.o
+obj-$(CONFIG_PM) += pm.o sleep.o
 
 # DSP
 obj-$(CONFIG_OMAP_MBOX_FWK)    += mailbox_mach.o
index de88c9297b68bc4f014076bf444196549d7893ef..f49ce85d2448eab2a65b8875045504d5b9c44d98 100644 (file)
@@ -215,7 +215,7 @@ static struct omap_kp_platform_data ams_delta_kp_data __initdata = {
        .delay          = 9,
 };
 
-static struct platform_device ams_delta_kp_device __initdata = {
+static struct platform_device ams_delta_kp_device = {
        .name           = "omap-keypad",
        .id             = -1,
        .dev            = {
@@ -225,12 +225,12 @@ static struct platform_device ams_delta_kp_device __initdata = {
        .resource       = ams_delta_kp_resources,
 };
 
-static struct platform_device ams_delta_lcd_device __initdata = {
+static struct platform_device ams_delta_lcd_device = {
        .name   = "lcd_ams_delta",
        .id     = -1,
 };
 
-static struct platform_device ams_delta_led_device __initdata = {
+static struct platform_device ams_delta_led_device = {
        .name   = "ams-delta-led",
        .id     = -1
 };
@@ -267,7 +267,7 @@ static struct soc_camera_link ams_delta_iclink = {
        .power          = ams_delta_camera_power,
 };
 
-static struct platform_device ams_delta_camera_device __initdata = {
+static struct platform_device ams_delta_camera_device = {
        .name   = "soc-camera-pdrv",
        .id     = 0,
        .dev    = {
index 04c4b04cf54eba1a72c08a42d3bc96e53dbb5a1f..364137c2042c71a93f64ceab30ba65ee3fbadaa8 100644 (file)
@@ -41,7 +41,7 @@ static struct __initdata omap_gpio_platform_data omap15xx_mpu_gpio_config = {
        .bank_stride            = 1,
 };
 
-static struct __initdata platform_device omap15xx_mpu_gpio = {
+static struct platform_device omap15xx_mpu_gpio = {
        .name           = "omap_gpio",
        .id             = 0,
        .dev            = {
@@ -70,7 +70,7 @@ static struct __initdata omap_gpio_platform_data omap15xx_gpio_config = {
        .bank_width             = 16,
 };
 
-static struct __initdata platform_device omap15xx_gpio = {
+static struct platform_device omap15xx_gpio = {
        .name           = "omap_gpio",
        .id             = 1,
        .dev            = {
index 5dd0d4c82b247601fd8ceafbb690df17d236f600..293a246e2824a4c1c4d47971e5cd713b530c2e7c 100644 (file)
@@ -44,7 +44,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_mpu_gpio_config = {
        .bank_stride            = 1,
 };
 
-static struct __initdata platform_device omap16xx_mpu_gpio = {
+static struct platform_device omap16xx_mpu_gpio = {
        .name           = "omap_gpio",
        .id             = 0,
        .dev            = {
@@ -73,7 +73,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio1_config = {
        .bank_width             = 16,
 };
 
-static struct __initdata platform_device omap16xx_gpio1 = {
+static struct platform_device omap16xx_gpio1 = {
        .name           = "omap_gpio",
        .id             = 1,
        .dev            = {
@@ -102,7 +102,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio2_config = {
        .bank_width             = 16,
 };
 
-static struct __initdata platform_device omap16xx_gpio2 = {
+static struct platform_device omap16xx_gpio2 = {
        .name           = "omap_gpio",
        .id             = 2,
        .dev            = {
@@ -131,7 +131,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio3_config = {
        .bank_width             = 16,
 };
 
-static struct __initdata platform_device omap16xx_gpio3 = {
+static struct platform_device omap16xx_gpio3 = {
        .name           = "omap_gpio",
        .id             = 3,
        .dev            = {
@@ -160,7 +160,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio4_config = {
        .bank_width             = 16,
 };
 
-static struct __initdata platform_device omap16xx_gpio4 = {
+static struct platform_device omap16xx_gpio4 = {
        .name           = "omap_gpio",
        .id             = 4,
        .dev            = {
index 1204c8b871af505bd2ccc001271bb15f1d87585f..c6ad248d63a6f24524cb9b78709cbf1fc7172ea8 100644 (file)
@@ -46,7 +46,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_mpu_gpio_config = {
        .bank_stride            = 2,
 };
 
-static struct __initdata platform_device omap7xx_mpu_gpio = {
+static struct platform_device omap7xx_mpu_gpio = {
        .name           = "omap_gpio",
        .id             = 0,
        .dev            = {
@@ -75,7 +75,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio1_config = {
        .bank_width             = 32,
 };
 
-static struct __initdata platform_device omap7xx_gpio1 = {
+static struct platform_device omap7xx_gpio1 = {
        .name           = "omap_gpio",
        .id             = 1,
        .dev            = {
@@ -104,7 +104,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio2_config = {
        .bank_width             = 32,
 };
 
-static struct __initdata platform_device omap7xx_gpio2 = {
+static struct platform_device omap7xx_gpio2 = {
        .name           = "omap_gpio",
        .id             = 2,
        .dev            = {
@@ -133,7 +133,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio3_config = {
        .bank_width             = 32,
 };
 
-static struct __initdata platform_device omap7xx_gpio3 = {
+static struct platform_device omap7xx_gpio3 = {
        .name           = "omap_gpio",
        .id             = 3,
        .dev            = {
@@ -162,7 +162,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio4_config = {
        .bank_width             = 32,
 };
 
-static struct __initdata platform_device omap7xx_gpio4 = {
+static struct platform_device omap7xx_gpio4 = {
        .name           = "omap_gpio",
        .id             = 4,
        .dev            = {
@@ -191,7 +191,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio5_config = {
        .bank_width             = 32,
 };
 
-static struct __initdata platform_device omap7xx_gpio5 = {
+static struct platform_device omap7xx_gpio5 = {
        .name           = "omap_gpio",
        .id             = 5,
        .dev            = {
@@ -220,7 +220,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio6_config = {
        .bank_width             = 32,
 };
 
-static struct __initdata platform_device omap7xx_gpio6 = {
+static struct platform_device omap7xx_gpio6 = {
        .name           = "omap_gpio",
        .id             = 6,
        .dev            = {
index fe31d933f0edee78106d52d0c82fb95ef70a3a58..334fb8871bc319348f9edb152e5d611fb539aa6f 100644 (file)
@@ -56,9 +56,13 @@ static struct dev_power_domain default_power_domain = {
                USE_PLATFORM_PM_SLEEP_OPS
        },
 };
+#define OMAP1_PWR_DOMAIN (&default_power_domain)
+#else
+#define OMAP1_PWR_DOMAIN NULL
+#endif /* CONFIG_PM_RUNTIME */
 
 static struct pm_clk_notifier_block platform_bus_notifier = {
-       .pwr_domain = &default_power_domain,
+       .pwr_domain = OMAP1_PWR_DOMAIN,
        .con_ids = { "ick", "fck", NULL, },
 };
 
@@ -72,4 +76,4 @@ static int __init omap1_pm_runtime_init(void)
        return 0;
 }
 core_initcall(omap1_pm_runtime_init);
-#endif /* CONFIG_PM_RUNTIME */
+
index 2a0bb4818caef6a99a99e068946f970f8adcee1e..23f71d40883ea1fafbd2fd331d33fd9c581731dd 100644 (file)
@@ -84,7 +84,8 @@ static struct mtd_partition omap3pandora_nand_partitions[] = {
 
 static struct omap_nand_platform_data pandora_nand_data = {
        .cs             = 0,
-       .devsize        = 1,    /* '0' for 8-bit, '1' for 16-bit device */
+       .devsize        = NAND_BUSWIDTH_16,
+       .xfer_type      = NAND_OMAP_PREFETCH_DMA,
        .parts          = omap3pandora_nand_partitions,
        .nr_parts       = ARRAY_SIZE(omap3pandora_nand_partitions),
 };
index 990366726c58f2c2614e43536dc28c5f36b70cbf..88bd6f7705f0317808575cd8ecda856e4e36ec87 100644 (file)
@@ -558,7 +558,7 @@ static struct radio_si4713_platform_data rx51_si4713_data __initdata_or_module =
        .subdev_board_info = &rx51_si4713_board_info,
 };
 
-static struct platform_device rx51_si4713_dev __initdata_or_module = {
+static struct platform_device rx51_si4713_dev = {
        .name   = "radio-si4713",
        .id     = -1,
        .dev    = {
index da53ba3917cae5df365b34bcb8f314e4142a2471..aab884fecc55c673aa28407666676756e870c47e 100644 (file)
@@ -286,14 +286,15 @@ void omap3_save_scratchpad_contents(void)
        scratchpad_contents.boot_config_ptr = 0x0;
        if (cpu_is_omap3630())
                scratchpad_contents.public_restore_ptr =
-                       virt_to_phys(get_omap3630_restore_pointer());
+                       virt_to_phys(omap3_restore_3630);
        else if (omap_rev() != OMAP3430_REV_ES3_0 &&
                                        omap_rev() != OMAP3430_REV_ES3_1)
                scratchpad_contents.public_restore_ptr =
-                       virt_to_phys(get_restore_pointer());
+                       virt_to_phys(omap3_restore);
        else
                scratchpad_contents.public_restore_ptr =
-                       virt_to_phys(get_es3_restore_pointer());
+                       virt_to_phys(omap3_restore_es3);
+
        if (omap_type() == OMAP2_DEVICE_TYPE_GP)
                scratchpad_contents.secure_ram_restore_ptr = 0x0;
        else
index a016c8b59e0063f4294d1b63d2d4ba47669fb498..d4ef75d5a3823d0f85336e8f416e8d861553f83a 100644 (file)
@@ -386,9 +386,9 @@ extern void omap4_ctrl_pad_writel(u32 val, u16 offset);
 
 extern void omap3_save_scratchpad_contents(void);
 extern void omap3_clear_scratchpad_contents(void);
-extern u32 *get_restore_pointer(void);
-extern u32 *get_es3_restore_pointer(void);
-extern u32 *get_omap3630_restore_pointer(void);
+extern void omap3_restore(void);
+extern void omap3_restore_es3(void);
+extern void omap3_restore_3630(void);
 extern u32 omap3_arm_context[128];
 extern void omap3_control_save_context(void);
 extern void omap3_control_restore_context(void);
index a48690b90990b25e3608b9049433bf0ceea37270..ceb8b7e593d7f162285d043c0106944b311fbff5 100644 (file)
 #endif
 
 #endif /* MULTI_OMAP2 */
-
-               .macro  irq_prio_table
-               .endm
index ecfe93c4b5859d25c333f6f01b2507d882ff3895..ce65e9329c7b7ac77c179e7802aa673a6c82328a 100644 (file)
@@ -125,14 +125,6 @@ void __init smp_init_cpus(void)
 
 void __init platform_smp_prepare_cpus(unsigned int max_cpus)
 {
-       int i;
-
-       /*
-        * Initialise the present map, which describes the set of CPUs
-        * actually populated at the present time.
-        */
-       for (i = 0; i < max_cpus; i++)
-               set_cpu_present(i, true);
 
        /*
         * Initialise the SCU and wake up the secondary core using
index a5a83b358ddd89724de71eace26e11d8289c8e65..e01da45c053756f62ac08956620dc16dd09d3407 100644 (file)
@@ -189,7 +189,7 @@ static struct dentry *pm_dbg_dir;
 
 static int pm_dbg_init_done;
 
-static int __init pm_dbg_init(void);
+static int pm_dbg_init(void);
 
 enum {
        DEBUG_FILE_COUNTERS = 0,
@@ -595,7 +595,7 @@ static int option_set(void *data, u64 val)
 
 DEFINE_SIMPLE_ATTRIBUTE(pm_dbg_option_fops, option_get, option_set, "%llu\n");
 
-static int __init pm_dbg_init(void)
+static int pm_dbg_init(void)
 {
        int i;
        struct dentry *d;
index 45bcfce7735248732a609f647870cee35d19f8a9..04ee5664612613a0c833131d589dd0910fc20dcc 100644 (file)
@@ -88,18 +88,28 @@ extern int pm_dbg_regset_init(int reg_set);
 #define pm_dbg_regset_init(reg_set) do {} while (0);
 #endif /* CONFIG_PM_DEBUG */
 
+/* 24xx */
 extern void omap24xx_idle_loop_suspend(void);
+extern unsigned int omap24xx_idle_loop_suspend_sz;
 
 extern void omap24xx_cpu_suspend(u32 dll_ctrl, void __iomem *sdrc_dlla_ctrl,
                                        void __iomem *sdrc_power);
-extern void omap34xx_cpu_suspend(u32 *addr, int save_state);
-extern int save_secure_ram_context(u32 *addr);
-extern void omap3_save_scratchpad_contents(void);
+extern unsigned int omap24xx_cpu_suspend_sz;
 
-extern unsigned int omap24xx_idle_loop_suspend_sz;
+/* 3xxx */
+extern void omap34xx_cpu_suspend(int save_state);
+
+/* omap3_do_wfi function pointer and size, for copy to SRAM */
+extern void omap3_do_wfi(void);
+extern unsigned int omap3_do_wfi_sz;
+/* ... and its pointer from SRAM after copy */
+extern void (*omap3_do_wfi_sram)(void);
+
+/* save_secure_ram_context function pointer and size, for copy to SRAM */
+extern int save_secure_ram_context(u32 *addr);
 extern unsigned int save_secure_ram_context_sz;
-extern unsigned int omap24xx_cpu_suspend_sz;
-extern unsigned int omap34xx_cpu_suspend_sz;
+
+extern void omap3_save_scratchpad_contents(void);
 
 #define PM_RTA_ERRATUM_i608            (1 << 0)
 #define PM_SDRC_WAKEUP_ERRATUM_i583    (1 << 1)
index c155c9d1c82cbe3b7868e545b02033e530f0afca..b77d82665abb51a18b0dd663d51f731f81fd2354 100644 (file)
@@ -31,6 +31,8 @@
 #include <linux/console.h>
 #include <trace/events/power.h>
 
+#include <asm/suspend.h>
+
 #include <plat/sram.h>
 #include "clockdomain.h"
 #include "powerdomain.h"
@@ -40,8 +42,6 @@
 #include <plat/gpmc.h>
 #include <plat/dma.h>
 
-#include <asm/tlbflush.h>
-
 #include "cm2xxx_3xxx.h"
 #include "cm-regbits-34xx.h"
 #include "prm-regbits-34xx.h"
@@ -64,11 +64,6 @@ static inline bool is_suspending(void)
 }
 #endif
 
-/* Scratchpad offsets */
-#define OMAP343X_TABLE_ADDRESS_OFFSET     0xc4
-#define OMAP343X_TABLE_VALUE_OFFSET       0xc0
-#define OMAP343X_CONTROL_REG_VALUE_OFFSET  0xc8
-
 /* pm34xx errata defined in pm.h */
 u16 pm34xx_errata;
 
@@ -83,9 +78,8 @@ struct power_state {
 
 static LIST_HEAD(pwrst_list);
 
-static void (*_omap_sram_idle)(u32 *addr, int save_state);
-
 static int (*_omap_save_secure_sram)(u32 *addr);
+void (*omap3_do_wfi_sram)(void);
 
 static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
 static struct powerdomain *core_pwrdm, *per_pwrdm;
@@ -312,28 +306,25 @@ static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-/* Function to restore the table entry that was modified for enabling MMU */
-static void restore_table_entry(void)
+static void omap34xx_save_context(u32 *save)
 {
-       void __iomem *scratchpad_address;
-       u32 previous_value, control_reg_value;
-       u32 *address;
+       u32 val;
 
-       scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD);
+       /* Read Auxiliary Control Register */
+       asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (val));
+       *save++ = 1;
+       *save++ = val;
 
-       /* Get address of entry that was modified */
-       address = (u32 *)__raw_readl(scratchpad_address +
-                                    OMAP343X_TABLE_ADDRESS_OFFSET);
-       /* Get the previous value which needs to be restored */
-       previous_value = __raw_readl(scratchpad_address +
-                                    OMAP343X_TABLE_VALUE_OFFSET);
-       address = __va(address);
-       *address = previous_value;
-       flush_tlb_all();
-       control_reg_value = __raw_readl(scratchpad_address
-                                       + OMAP343X_CONTROL_REG_VALUE_OFFSET);
-       /* This will enable caches and prediction */
-       set_cr(control_reg_value);
+       /* Read L2 AUX ctrl register */
+       asm("mrc p15, 1, %0, c9, c0, 2" : "=r" (val));
+       *save++ = 1;
+       *save++ = val;
+}
+
+static int omap34xx_do_sram_idle(unsigned long save_state)
+{
+       omap34xx_cpu_suspend(save_state);
+       return 0;
 }
 
 void omap_sram_idle(void)
@@ -352,9 +343,6 @@ void omap_sram_idle(void)
        int core_prev_state, per_prev_state;
        u32 sdrc_pwr = 0;
 
-       if (!_omap_sram_idle)
-               return;
-
        pwrdm_clear_all_prev_pwrst(mpu_pwrdm);
        pwrdm_clear_all_prev_pwrst(neon_pwrdm);
        pwrdm_clear_all_prev_pwrst(core_pwrdm);
@@ -432,12 +420,16 @@ void omap_sram_idle(void)
                sdrc_pwr = sdrc_read_reg(SDRC_POWER);
 
        /*
-        * omap3_arm_context is the location where ARM registers
-        * get saved. The restore path then reads from this
-        * location and restores them back.
+        * omap3_arm_context is the location where some ARM context
+        * get saved. The rest is placed on the stack, and restored
+        * from there before resuming.
         */
-       _omap_sram_idle(omap3_arm_context, save_state);
-       cpu_init();
+       if (save_state)
+               omap34xx_save_context(omap3_arm_context);
+       if (save_state == 1 || save_state == 3)
+               cpu_suspend(save_state, omap34xx_do_sram_idle);
+       else
+               omap34xx_do_sram_idle(save_state);
 
        /* Restore normal SDRC POWER settings */
        if (omap_rev() >= OMAP3430_REV_ES3_0 &&
@@ -445,10 +437,6 @@ void omap_sram_idle(void)
            core_next_state == PWRDM_POWER_OFF)
                sdrc_write_reg(sdrc_pwr, SDRC_POWER);
 
-       /* Restore table entry modified during MMU restoration */
-       if (pwrdm_read_prev_pwrst(mpu_pwrdm) == PWRDM_POWER_OFF)
-               restore_table_entry();
-
        /* CORE */
        if (core_next_state < PWRDM_POWER_ON) {
                core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
@@ -852,10 +840,17 @@ static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
        return 0;
 }
 
+/*
+ * Push functions to SRAM
+ *
+ * The minimum set of functions is pushed to SRAM for execution:
+ * - omap3_do_wfi for erratum i581 WA,
+ * - save_secure_ram_context for security extensions.
+ */
 void omap_push_sram_idle(void)
 {
-       _omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend,
-                                       omap34xx_cpu_suspend_sz);
+       omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz);
+
        if (omap_type() != OMAP2_DEVICE_TYPE_GP)
                _omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
                                save_secure_ram_context_sz);
@@ -920,7 +915,6 @@ static int __init omap3_pm_init(void)
        per_clkdm = clkdm_lookup("per_clkdm");
        core_clkdm = clkdm_lookup("core_clkdm");
 
-       omap_push_sram_idle();
 #ifdef CONFIG_SUSPEND
        suspend_set_ops(&omap_pm_ops);
 #endif /* CONFIG_SUSPEND */
index 63f10669571ad262c7ca0e105b3f779484c1a9a0..f2ea1bd1c6918d72079a029fb00fc8725f4fafa4 100644 (file)
  * API functions
  */
 
-/*
- * The "get_*restore_pointer" functions are used to provide a
- * physical restore address where the ROM code jumps while waking
- * up from MPU OFF/OSWR state.
- * The restore pointer is stored into the scratchpad.
- */
-
-       .text
-/* Function call to get the restore pointer for resume from OFF */
-ENTRY(get_restore_pointer)
-       stmfd   sp!, {lr}       @ save registers on stack
-       adr     r0, restore
-       ldmfd   sp!, {pc}       @ restore regs and return
-ENDPROC(get_restore_pointer)
-       .align
-ENTRY(get_restore_pointer_sz)
-       .word   . - get_restore_pointer
-
-       .text
-/* Function call to get the restore pointer for 3630 resume from OFF */
-ENTRY(get_omap3630_restore_pointer)
-       stmfd   sp!, {lr}       @ save registers on stack
-       adr     r0, restore_3630
-       ldmfd   sp!, {pc}       @ restore regs and return
-ENDPROC(get_omap3630_restore_pointer)
-       .align
-ENTRY(get_omap3630_restore_pointer_sz)
-       .word   . - get_omap3630_restore_pointer
-
-       .text
-/* Function call to get the restore pointer for ES3 to resume from OFF */
-ENTRY(get_es3_restore_pointer)
-       stmfd   sp!, {lr}       @ save registers on stack
-       adr     r0, restore_es3
-       ldmfd   sp!, {pc}       @ restore regs and return
-ENDPROC(get_es3_restore_pointer)
-       .align
-ENTRY(get_es3_restore_pointer_sz)
-       .word   . - get_es3_restore_pointer
-
        .text
 /*
  * L2 cache needs to be toggled for stable OFF mode functionality on 3630.
@@ -133,7 +93,7 @@ ENDPROC(enable_omap3630_toggle_l2_on_restore)
 /* Function to call rom code to save secure ram context */
        .align  3
 ENTRY(save_secure_ram_context)
-       stmfd   sp!, {r1-r12, lr}       @ save registers on stack
+       stmfd   sp!, {r4 - r11, lr}     @ save registers on stack
        adr     r3, api_params          @ r3 points to parameters
        str     r0, [r3,#0x4]           @ r0 has sdram address
        ldr     r12, high_mask
@@ -152,7 +112,7 @@ ENTRY(save_secure_ram_context)
        nop
        nop
        nop
-       ldmfd   sp!, {r1-r12, pc}
+       ldmfd   sp!, {r4 - r11, pc}
        .align
 sram_phy_addr_mask:
        .word   SRAM_BASE_P
@@ -179,69 +139,38 @@ ENTRY(save_secure_ram_context_sz)
  *
  *
  * Notes:
- * - this code gets copied to internal SRAM at boot and after wake-up
- *   from OFF mode. The execution pointer in SRAM is _omap_sram_idle.
+ * - only the minimum set of functions gets copied to internal SRAM at boot
+ *   and after wake-up from OFF mode, cf. omap_push_sram_idle. The function
+ *   pointers in SDRAM or SRAM are called depending on the desired low power
+ *   target state.
  * - when the OMAP wakes up it continues at different execution points
  *   depending on the low power mode (non-OFF vs OFF modes),
  *   cf. 'Resume path for xxx mode' comments.
  */
        .align  3
 ENTRY(omap34xx_cpu_suspend)
-       stmfd   sp!, {r0-r12, lr}       @ save registers on stack
+       stmfd   sp!, {r4 - r11, lr}     @ save registers on stack
 
        /*
-        * r0 contains CPU context save/restore pointer in sdram
-        * r1 contains information about saving context:
+        * r0 contains information about saving context:
         *   0 - No context lost
         *   1 - Only L1 and logic lost
         *   2 - Only L2 lost (Even L1 is retained we clean it along with L2)
         *   3 - Both L1 and L2 lost and logic lost
         */
 
-       /* Directly jump to WFI is the context save is not required */
-       cmp     r1, #0x0
-       beq     omap3_do_wfi
+       /*
+        * For OFF mode: save context and jump to WFI in SDRAM (omap3_do_wfi)
+        * For non-OFF modes: jump to the WFI code in SRAM (omap3_do_wfi_sram)
+        */
+       ldr     r4, omap3_do_wfi_sram_addr
+       ldr     r5, [r4]
+       cmp     r0, #0x0                @ If no context save required,
+       bxeq    r5                      @  jump to the WFI code in SRAM
+
 
        /* Otherwise fall through to the save context code */
 save_context_wfi:
-       mov     r8, r0                  @ Store SDRAM address in r8
-       mrc     p15, 0, r5, c1, c0, 1   @ Read Auxiliary Control Register
-       mov     r4, #0x1                @ Number of parameters for restore call
-       stmia   r8!, {r4-r5}            @ Push parameters for restore call
-       mrc     p15, 1, r5, c9, c0, 2   @ Read L2 AUX ctrl register
-       stmia   r8!, {r4-r5}            @ Push parameters for restore call
-
-        /* Check what that target sleep state is from r1 */
-       cmp     r1, #0x2                @ Only L2 lost, no need to save context
-       beq     clean_caches
-
-l1_logic_lost:
-       mov     r4, sp                  @ Store sp
-       mrs     r5, spsr                @ Store spsr
-       mov     r6, lr                  @ Store lr
-       stmia   r8!, {r4-r6}
-
-       mrc     p15, 0, r4, c1, c0, 2   @ Coprocessor access control register
-       mrc     p15, 0, r5, c2, c0, 0   @ TTBR0
-       mrc     p15, 0, r6, c2, c0, 1   @ TTBR1
-       mrc     p15, 0, r7, c2, c0, 2   @ TTBCR
-       stmia   r8!, {r4-r7}
-
-       mrc     p15, 0, r4, c3, c0, 0   @ Domain access Control Register
-       mrc     p15, 0, r5, c10, c2, 0  @ PRRR
-       mrc     p15, 0, r6, c10, c2, 1  @ NMRR
-       stmia   r8!,{r4-r6}
-
-       mrc     p15, 0, r4, c13, c0, 1  @ Context ID
-       mrc     p15, 0, r5, c13, c0, 2  @ User r/w thread and process ID
-       mrc     p15, 0, r6, c12, c0, 0  @ Secure or NS vector base address
-       mrs     r7, cpsr                @ Store current cpsr
-       stmia   r8!, {r4-r7}
-
-       mrc     p15, 0, r4, c1, c0, 0   @ save control register
-       stmia   r8!, {r4}
-
-clean_caches:
        /*
         * jump out to kernel flush routine
         *  - reuse that code is better
@@ -284,7 +213,32 @@ clean_caches:
  THUMB(        nop             )
        .arm
 
-omap3_do_wfi:
+       b       omap3_do_wfi
+
+/*
+ * Local variables
+ */
+omap3_do_wfi_sram_addr:
+       .word omap3_do_wfi_sram
+kernel_flush:
+       .word v7_flush_dcache_all
+
+/* ===================================
+ * == WFI instruction => Enter idle ==
+ * ===================================
+ */
+
+/*
+ * Do WFI instruction
+ * Includes the resume path for non-OFF modes
+ *
+ * This code gets copied to internal SRAM and is accessible
+ * from both SDRAM and SRAM:
+ * - executed from SRAM for non-off modes (omap3_do_wfi_sram),
+ * - executed from SDRAM for OFF mode (omap3_do_wfi).
+ */
+       .align  3
+ENTRY(omap3_do_wfi)
        ldr     r4, sdrc_power          @ read the SDRC_POWER register
        ldr     r5, [r4]                @ read the contents of SDRC_POWER
        orr     r5, r5, #0x40           @ enable self refresh on idle req
@@ -316,8 +270,86 @@ omap3_do_wfi:
        nop
        nop
        nop
-       bl wait_sdrc_ok
 
+/*
+ * This function implements the erratum ID i581 WA:
+ *  SDRC state restore before accessing the SDRAM
+ *
+ * Only used at return from non-OFF mode. For OFF
+ * mode the ROM code configures the SDRC and
+ * the DPLL before calling the restore code directly
+ * from DDR.
+ */
+
+/* Make sure SDRC accesses are ok */
+wait_sdrc_ok:
+
+/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */
+       ldr     r4, cm_idlest_ckgen
+wait_dpll3_lock:
+       ldr     r5, [r4]
+       tst     r5, #1
+       beq     wait_dpll3_lock
+
+       ldr     r4, cm_idlest1_core
+wait_sdrc_ready:
+       ldr     r5, [r4]
+       tst     r5, #0x2
+       bne     wait_sdrc_ready
+       /* allow DLL powerdown upon hw idle req */
+       ldr     r4, sdrc_power
+       ldr     r5, [r4]
+       bic     r5, r5, #0x40
+       str     r5, [r4]
+
+/*
+ * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
+ * base instead.
+ * Be careful not to clobber r7 when maintaing this code.
+ */
+
+is_dll_in_lock_mode:
+       /* Is dll in lock mode? */
+       ldr     r4, sdrc_dlla_ctrl
+       ldr     r5, [r4]
+       tst     r5, #0x4
+       bne     exit_nonoff_modes       @ Return if locked
+       /* wait till dll locks */
+       adr     r7, kick_counter
+wait_dll_lock_timed:
+       ldr     r4, wait_dll_lock_counter
+       add     r4, r4, #1
+       str     r4, [r7, #wait_dll_lock_counter - kick_counter]
+       ldr     r4, sdrc_dlla_status
+       /* Wait 20uS for lock */
+       mov     r6, #8
+wait_dll_lock:
+       subs    r6, r6, #0x1
+       beq     kick_dll
+       ldr     r5, [r4]
+       and     r5, r5, #0x4
+       cmp     r5, #0x4
+       bne     wait_dll_lock
+       b       exit_nonoff_modes       @ Return when locked
+
+       /* disable/reenable DLL if not locked */
+kick_dll:
+       ldr     r4, sdrc_dlla_ctrl
+       ldr     r5, [r4]
+       mov     r6, r5
+       bic     r6, #(1<<3)             @ disable dll
+       str     r6, [r4]
+       dsb
+       orr     r6, r6, #(1<<3)         @ enable dll
+       str     r6, [r4]
+       dsb
+       ldr     r4, kick_counter
+       add     r4, r4, #1
+       str     r4, [r7]                @ kick_counter
+       b       wait_dll_lock_timed
+
+exit_nonoff_modes:
+       /* Re-enable C-bit if needed */
        mrc     p15, 0, r0, c1, c0, 0
        tst     r0, #(1 << 2)           @ Check C bit enabled?
        orreq   r0, r0, #(1 << 2)       @ Enable the C bit if cleared
@@ -329,7 +361,32 @@ omap3_do_wfi:
  * == Exit point from non-OFF modes ==
  * ===================================
  */
-       ldmfd   sp!, {r0-r12, pc}       @ restore regs and return
+       ldmfd   sp!, {r4 - r11, pc}     @ restore regs and return
+
+/*
+ * Local variables
+ */
+sdrc_power:
+       .word   SDRC_POWER_V
+cm_idlest1_core:
+       .word   CM_IDLEST1_CORE_V
+cm_idlest_ckgen:
+       .word   CM_IDLEST_CKGEN_V
+sdrc_dlla_status:
+       .word   SDRC_DLLA_STATUS_V
+sdrc_dlla_ctrl:
+       .word   SDRC_DLLA_CTRL_V
+       /*
+        * When exporting to userspace while the counters are in SRAM,
+        * these 2 words need to be at the end to facilitate retrival!
+        */
+kick_counter:
+       .word   0
+wait_dll_lock_counter:
+       .word   0
+
+ENTRY(omap3_do_wfi_sz)
+       .word   . - omap3_do_wfi
 
 
 /*
@@ -346,13 +403,17 @@ omap3_do_wfi:
  *  restore_es3: applies to 34xx >= ES3.0
  *  restore_3630: applies to 36xx
  *  restore: common code for 3xxx
+ *
+ * Note: when back from CORE and MPU OFF mode we are running
+ *  from SDRAM, without MMU, without the caches and prediction.
+ *  Also the SRAM content has been cleared.
  */
-restore_es3:
+ENTRY(omap3_restore_es3)
        ldr     r5, pm_prepwstst_core_p
        ldr     r4, [r5]
        and     r4, r4, #0x3
        cmp     r4, #0x0        @ Check if previous power state of CORE is OFF
-       bne     restore
+       bne     omap3_restore   @ Fall through to OMAP3 common code
        adr     r0, es3_sdrc_fix
        ldr     r1, sram_base
        ldr     r2, es3_sdrc_fix_sz
@@ -364,35 +425,32 @@ copy_to_sram:
        bne     copy_to_sram
        ldr     r1, sram_base
        blx     r1
-       b       restore
+       b       omap3_restore   @ Fall through to OMAP3 common code
+ENDPROC(omap3_restore_es3)
 
-restore_3630:
+ENTRY(omap3_restore_3630)
        ldr     r1, pm_prepwstst_core_p
        ldr     r2, [r1]
        and     r2, r2, #0x3
        cmp     r2, #0x0        @ Check if previous power state of CORE is OFF
-       bne     restore
+       bne     omap3_restore   @ Fall through to OMAP3 common code
        /* Disable RTA before giving control */
        ldr     r1, control_mem_rta
        mov     r2, #OMAP36XX_RTA_DISABLE
        str     r2, [r1]
+ENDPROC(omap3_restore_3630)
 
        /* Fall through to common code for the remaining logic */
 
-restore:
+ENTRY(omap3_restore)
        /*
-        * Check what was the reason for mpu reset and store the reason in r9:
-        *  0 - No context lost
-        *  1 - Only L1 and logic lost
-        *  2 - Only L2 lost - In this case, we wont be here
-        *  3 - Both L1 and L2 lost
+        * Read the pwstctrl register to check the reason for mpu reset.
+        * This tells us what was lost.
         */
        ldr     r1, pm_pwstctrl_mpu
        ldr     r2, [r1]
        and     r2, r2, #0x3
        cmp     r2, #0x0        @ Check if target power state was OFF or RET
-       moveq   r9, #0x3        @ MPU OFF => L1 and L2 lost
-       movne   r9, #0x1        @ Only L1 and L2 lost => avoid L2 invalidation
        bne     logic_l1_restore
 
        ldr     r0, l2dis_3630
@@ -471,115 +529,39 @@ logic_l1_restore:
        orr     r1, r1, #2              @ re-enable L2 cache
        mcr     p15, 0, r1, c1, c0, 1
 skipl2reen:
-       mov     r1, #0
-       /*
-        * Invalidate all instruction caches to PoU
-        * and flush branch target cache
-        */
-       mcr     p15, 0, r1, c7, c5, 0
 
-       ldr     r4, scratchpad_base
-       ldr     r3, [r4,#0xBC]
-       adds    r3, r3, #16
-
-       ldmia   r3!, {r4-r6}
-       mov     sp, r4                  @ Restore sp
-       msr     spsr_cxsf, r5           @ Restore spsr
-       mov     lr, r6                  @ Restore lr
-
-       ldmia   r3!, {r4-r7}
-       mcr     p15, 0, r4, c1, c0, 2   @ Coprocessor access Control Register
-       mcr     p15, 0, r5, c2, c0, 0   @ TTBR0
-       mcr     p15, 0, r6, c2, c0, 1   @ TTBR1
-       mcr     p15, 0, r7, c2, c0, 2   @ TTBCR
-
-       ldmia   r3!,{r4-r6}
-       mcr     p15, 0, r4, c3, c0, 0   @ Domain access Control Register
-       mcr     p15, 0, r5, c10, c2, 0  @ PRRR
-       mcr     p15, 0, r6, c10, c2, 1  @ NMRR
-
-
-       ldmia   r3!,{r4-r7}
-       mcr     p15, 0, r4, c13, c0, 1  @ Context ID
-       mcr     p15, 0, r5, c13, c0, 2  @ User r/w thread and process ID
-       mrc     p15, 0, r6, c12, c0, 0  @ Secure or NS vector base address
-       msr     cpsr, r7                @ store cpsr
-
-       /* Enabling MMU here */
-       mrc     p15, 0, r7, c2, c0, 2   @ Read TTBRControl
-       /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
-       and     r7, #0x7
-       cmp     r7, #0x0
-       beq     usettbr0
-ttbr_error:
-       /*
-        * More work needs to be done to support N[0:2] value other than 0
-        * So looping here so that the error can be detected
-        */
-       b       ttbr_error
-usettbr0:
-       mrc     p15, 0, r2, c2, c0, 0
-       ldr     r5, ttbrbit_mask
-       and     r2, r5
-       mov     r4, pc
-       ldr     r5, table_index_mask
-       and     r4, r5                  @ r4 = 31 to 20 bits of pc
-       /* Extract the value to be written to table entry */
-       ldr     r1, table_entry
-       /* r1 has the value to be written to table entry*/
-       add     r1, r1, r4
-       /* Getting the address of table entry to modify */
-       lsr     r4, #18
-       /* r2 has the location which needs to be modified */
-       add     r2, r4
-       /* Storing previous entry of location being modified */
-       ldr     r5, scratchpad_base
-       ldr     r4, [r2]
-       str     r4, [r5, #0xC0]
-       /* Modify the table entry */
-       str     r1, [r2]
-       /*
-        * Storing address of entry being modified
-        * - will be restored after enabling MMU
-        */
-       ldr     r5, scratchpad_base
-       str     r2, [r5, #0xC4]
-
-       mov     r0, #0
-       mcr     p15, 0, r0, c7, c5, 4   @ Flush prefetch buffer
-       mcr     p15, 0, r0, c7, c5, 6   @ Invalidate branch predictor array
-       mcr     p15, 0, r0, c8, c5, 0   @ Invalidate instruction TLB
-       mcr     p15, 0, r0, c8, c6, 0   @ Invalidate data TLB
-       /*
-        * Restore control register. This enables the MMU.
-        * The caches and prediction are not enabled here, they
-        * will be enabled after restoring the MMU table entry.
-        */
-       ldmia   r3!, {r4}
-       /* Store previous value of control register in scratchpad */
-       str     r4, [r5, #0xC8]
-       ldr     r2, cache_pred_disable_mask
-       and     r4, r2
-       mcr     p15, 0, r4, c1, c0, 0
-       dsb
-       isb
-       ldr     r0, =restoremmu_on
-       bx      r0
+       /* Now branch to the common CPU resume function */
+       b       cpu_resume
+ENDPROC(omap3_restore)
+
+       .ltorg
 
 /*
- * ==============================
- * == Exit point from OFF mode ==
- * ==============================
+ * Local variables
  */
-restoremmu_on:
-       ldmfd   sp!, {r0-r12, pc}       @ restore regs and return
-
+pm_prepwstst_core_p:
+       .word   PM_PREPWSTST_CORE_P
+pm_pwstctrl_mpu:
+       .word   PM_PWSTCTRL_MPU_P
+scratchpad_base:
+       .word   SCRATCHPAD_BASE_P
+sram_base:
+       .word   SRAM_BASE_P + 0x8000
+control_stat:
+       .word   CONTROL_STAT
+control_mem_rta:
+       .word   CONTROL_MEM_RTA_CTRL
+l2dis_3630:
+       .word   0
 
 /*
  * Internal functions
  */
 
-/* This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 */
+/*
+ * This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0
+ * Copied to and run from SRAM in order to reconfigure the SDRC parameters.
+ */
        .text
        .align  3
 ENTRY(es3_sdrc_fix)
@@ -609,6 +591,9 @@ ENTRY(es3_sdrc_fix)
        str     r5, [r4]                @ kick off refreshes
        bx      lr
 
+/*
+ * Local variables
+ */
        .align
 sdrc_syscfg:
        .word   SDRC_SYSCONFIG_P
@@ -627,128 +612,3 @@ sdrc_manual_1:
 ENDPROC(es3_sdrc_fix)
 ENTRY(es3_sdrc_fix_sz)
        .word   . - es3_sdrc_fix
-
-/*
- * This function implements the erratum ID i581 WA:
- *  SDRC state restore before accessing the SDRAM
- *
- * Only used at return from non-OFF mode. For OFF
- * mode the ROM code configures the SDRC and
- * the DPLL before calling the restore code directly
- * from DDR.
- */
-
-/* Make sure SDRC accesses are ok */
-wait_sdrc_ok:
-
-/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */
-       ldr     r4, cm_idlest_ckgen
-wait_dpll3_lock:
-       ldr     r5, [r4]
-       tst     r5, #1
-       beq     wait_dpll3_lock
-
-       ldr     r4, cm_idlest1_core
-wait_sdrc_ready:
-       ldr     r5, [r4]
-       tst     r5, #0x2
-       bne     wait_sdrc_ready
-       /* allow DLL powerdown upon hw idle req */
-       ldr     r4, sdrc_power
-       ldr     r5, [r4]
-       bic     r5, r5, #0x40
-       str     r5, [r4]
-
-/*
- * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
- * base instead.
- * Be careful not to clobber r7 when maintaing this code.
- */
-
-is_dll_in_lock_mode:
-       /* Is dll in lock mode? */
-       ldr     r4, sdrc_dlla_ctrl
-       ldr     r5, [r4]
-       tst     r5, #0x4
-       bxne    lr                      @ Return if locked
-       /* wait till dll locks */
-       adr     r7, kick_counter
-wait_dll_lock_timed:
-       ldr     r4, wait_dll_lock_counter
-       add     r4, r4, #1
-       str     r4, [r7, #wait_dll_lock_counter - kick_counter]
-       ldr     r4, sdrc_dlla_status
-       /* Wait 20uS for lock */
-       mov     r6, #8
-wait_dll_lock:
-       subs    r6, r6, #0x1
-       beq     kick_dll
-       ldr     r5, [r4]
-       and     r5, r5, #0x4
-       cmp     r5, #0x4
-       bne     wait_dll_lock
-       bx      lr                      @ Return when locked
-
-       /* disable/reenable DLL if not locked */
-kick_dll:
-       ldr     r4, sdrc_dlla_ctrl
-       ldr     r5, [r4]
-       mov     r6, r5
-       bic     r6, #(1<<3)             @ disable dll
-       str     r6, [r4]
-       dsb
-       orr     r6, r6, #(1<<3)         @ enable dll
-       str     r6, [r4]
-       dsb
-       ldr     r4, kick_counter
-       add     r4, r4, #1
-       str     r4, [r7]                @ kick_counter
-       b       wait_dll_lock_timed
-
-       .align
-cm_idlest1_core:
-       .word   CM_IDLEST1_CORE_V
-cm_idlest_ckgen:
-       .word   CM_IDLEST_CKGEN_V
-sdrc_dlla_status:
-       .word   SDRC_DLLA_STATUS_V
-sdrc_dlla_ctrl:
-       .word   SDRC_DLLA_CTRL_V
-pm_prepwstst_core_p:
-       .word   PM_PREPWSTST_CORE_P
-pm_pwstctrl_mpu:
-       .word   PM_PWSTCTRL_MPU_P
-scratchpad_base:
-       .word   SCRATCHPAD_BASE_P
-sram_base:
-       .word   SRAM_BASE_P + 0x8000
-sdrc_power:
-       .word   SDRC_POWER_V
-ttbrbit_mask:
-       .word   0xFFFFC000
-table_index_mask:
-       .word   0xFFF00000
-table_entry:
-       .word   0x00000C02
-cache_pred_disable_mask:
-       .word   0xFFFFE7FB
-control_stat:
-       .word   CONTROL_STAT
-control_mem_rta:
-       .word   CONTROL_MEM_RTA_CTRL
-kernel_flush:
-       .word   v7_flush_dcache_all
-l2dis_3630:
-       .word   0
-       /*
-        * When exporting to userspace while the counters are in SRAM,
-        * these 2 words need to be at the end to facilitate retrival!
-        */
-kick_counter:
-       .word   0
-wait_dll_lock_counter:
-       .word   0
-ENDPROC(omap34xx_cpu_suspend)
-
-ENTRY(omap34xx_cpu_suspend_sz)
-       .word   . - omap34xx_cpu_suspend
index 8003037578ed89c4d8807933ed7052e3f011097c..db7eeebf30d75c260241569b98850ef36706d79e 100644 (file)
 1003:
                .endm
 
-
-               .macro  irq_prio_table
-               .endm
-
-
index f15afe012995d386f8d9046a1dbfd48ce1267b9e..51558bcee999e949e32500e661b0a016a34da283 100644 (file)
@@ -22,8 +22,8 @@ struct pxa_cpu_pm_fns {
 extern struct pxa_cpu_pm_fns *pxa_cpu_pm_fns;
 
 /* sleep.S */
-extern void pxa25x_cpu_suspend(unsigned int, long);
-extern void pxa27x_cpu_suspend(unsigned int, long);
+extern int pxa25x_finish_suspend(unsigned long);
+extern int pxa27x_finish_suspend(unsigned long);
 
 extern int pxa_pm_enter(suspend_state_t state);
 extern int pxa_pm_prepare(void);
index 87ae3129f4f702353e7348442f9ae29df4379d47..b27544bcafcb5c68b384d850ea522da8f8712afe 100644 (file)
@@ -347,9 +347,9 @@ static int pxa2xx_mfp_suspend(void)
                if ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) &&
                    (GPDR(i) & GPIO_bit(i))) {
                        if (GPLR(i) & GPIO_bit(i))
-                               PGSR(i) |= GPIO_bit(i);
+                               PGSR(gpio_to_bank(i)) |= GPIO_bit(i);
                        else
-                               PGSR(i) &= ~GPIO_bit(i);
+                               PGSR(gpio_to_bank(i)) &= ~GPIO_bit(i);
                }
        }
 
index 65f24f0b77e85673d7908aec8683debc6110249e..5a5329bc33f1dda89db167d669be1d2ec554ecd8 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/i2c-gpio.h>
 
 #include <asm/mach-types.h>
+#include <asm/suspend.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 
index 51e1583265b26e6c834dbd02c914ed2fdb2d24a9..37178a8559b15d9efaaaf885e8ce1d5c5755ef7a 100644 (file)
@@ -42,7 +42,6 @@ int pxa_pm_enter(suspend_state_t state)
 
        /* *** go zzz *** */
        pxa_cpu_pm_fns->enter(state);
-       cpu_init();
 
        if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) {
                /* after sleeping, validate the checksum */
index fed363cec9c635feb64cdaa70a79a375c92c1a0d..9c434d21a271a9d5e49cf1e0776ac84cd27c72c2 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/irq.h>
 
 #include <asm/mach/map.h>
+#include <asm/suspend.h>
 #include <mach/hardware.h>
 #include <mach/irqs.h>
 #include <mach/gpio.h>
@@ -244,7 +245,7 @@ static void pxa25x_cpu_pm_enter(suspend_state_t state)
 
        switch (state) {
        case PM_SUSPEND_MEM:
-               pxa25x_cpu_suspend(PWRMODE_SLEEP, PLAT_PHYS_OFFSET - PAGE_OFFSET);
+               cpu_suspend(PWRMODE_SLEEP, pxa25x_finish_suspend);
                break;
        }
 }
index 2fecbec58d8821e720066b0e20583d18b69dc3f7..9d2400b5f503f617ff29e849cc49f1a7f2a33dc6 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/mach/map.h>
 #include <mach/hardware.h>
 #include <asm/irq.h>
+#include <asm/suspend.h>
 #include <mach/irqs.h>
 #include <mach/gpio.h>
 #include <mach/pxa27x.h>
@@ -284,6 +285,11 @@ void pxa27x_cpu_pm_restore(unsigned long *sleep_save)
 void pxa27x_cpu_pm_enter(suspend_state_t state)
 {
        extern void pxa_cpu_standby(void);
+#ifndef CONFIG_IWMMXT
+       u64 acc0;
+
+       asm volatile("mra %Q0, %R0, acc0" : "=r" (acc0));
+#endif
 
        /* ensure voltage-change sequencer not initiated, which hangs */
        PCFR &= ~PCFR_FVC;
@@ -299,7 +305,10 @@ void pxa27x_cpu_pm_enter(suspend_state_t state)
                pxa_cpu_standby();
                break;
        case PM_SUSPEND_MEM:
-               pxa27x_cpu_suspend(pwrmode, PLAT_PHYS_OFFSET - PAGE_OFFSET);
+               cpu_suspend(pwrmode, pxa27x_finish_suspend);
+#ifndef CONFIG_IWMMXT
+               asm volatile("mar acc0, %Q0, %R0" : "=r" (acc0));
+#endif
                break;
        }
 }
index 8521d7d6f1dab39402ef5c36ed3abece4cf17f89..ef1c56a67afcbd0ebcdfc70ab7c078e7b256ea11 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/i2c/pxa-i2c.h>
 
 #include <asm/mach/map.h>
+#include <asm/suspend.h>
 #include <mach/hardware.h>
 #include <mach/gpio.h>
 #include <mach/pxa3xx-regs.h>
@@ -141,8 +142,13 @@ static void pxa3xx_cpu_pm_suspend(void)
 {
        volatile unsigned long *p = (volatile void *)0xc0000000;
        unsigned long saved_data = *p;
+#ifndef CONFIG_IWMMXT
+       u64 acc0;
 
-       extern void pxa3xx_cpu_suspend(long);
+       asm volatile("mra %Q0, %R0, acc0" : "=r" (acc0));
+#endif
+
+       extern int pxa3xx_finish_suspend(unsigned long);
 
        /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */
        CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM);
@@ -162,11 +168,15 @@ static void pxa3xx_cpu_pm_suspend(void)
        /* overwrite with the resume address */
        *p = virt_to_phys(cpu_resume);
 
-       pxa3xx_cpu_suspend(PLAT_PHYS_OFFSET - PAGE_OFFSET);
+       cpu_suspend(0, pxa3xx_finish_suspend);
 
        *p = saved_data;
 
        AD3ER = 0;
+
+#ifndef CONFIG_IWMMXT
+       asm volatile("mar acc0, %Q0, %R0" : "=r" (acc0));
+#endif
 }
 
 static void pxa3xx_cpu_pm_enter(suspend_state_t state)
index d130f77b6d11c8dfd6317cf793dcdcabd1b354af..2f37d43f51b66519f2492f444e7607938d3bc404 100644 (file)
@@ -573,10 +573,10 @@ static struct pxafb_mode_info sharp_lq043t3dx02_mode = {
        .xres           = 480,
        .yres           = 272,
        .bpp            = 16,
-       .hsync_len      = 4,
+       .hsync_len      = 41,
        .left_margin    = 2,
        .right_margin   = 1,
-       .vsync_len      = 1,
+       .vsync_len      = 10,
        .upper_margin   = 3,
        .lower_margin   = 1,
        .sync           = 0,
@@ -596,29 +596,31 @@ static void __init raumfeld_lcd_init(void)
 {
        int ret;
 
-       pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info);
-
-       /* Earlier devices had the backlight regulator controlled
-        * via PWM, later versions use another controller for that */
-       if ((system_rev & 0xff) < 2) {
-               mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT;
-               pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1);
-               platform_device_register(&raumfeld_pwm_backlight_device);
-       } else
-               platform_device_register(&raumfeld_lt3593_device);
-
        ret = gpio_request(GPIO_TFT_VA_EN, "display VA enable");
        if (ret < 0)
                pr_warning("Unable to request GPIO_TFT_VA_EN\n");
        else
                gpio_direction_output(GPIO_TFT_VA_EN, 1);
 
+       msleep(100);
+
        ret = gpio_request(GPIO_DISPLAY_ENABLE, "display enable");
        if (ret < 0)
                pr_warning("Unable to request GPIO_DISPLAY_ENABLE\n");
        else
                gpio_direction_output(GPIO_DISPLAY_ENABLE, 1);
 
+       /* Hardware revision 2 has the backlight regulator controlled
+        * by an LT3593, earlier and later devices use PWM for that. */
+       if ((system_rev & 0xff) == 2) {
+               platform_device_register(&raumfeld_lt3593_device);
+       } else {
+               mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT;
+               pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1);
+               platform_device_register(&raumfeld_pwm_backlight_device);
+       }
+
+       pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info);
        platform_device_register(&pxa3xx_device_gcu);
 }
 
@@ -657,10 +659,10 @@ static struct lis3lv02d_platform_data lis3_pdata = {
 
 #define SPI_AK4104     \
 {                      \
-       .modalias       = "ak4104",     \
-       .max_speed_hz   = 10000,        \
-       .bus_num        = 0,            \
-       .chip_select    = 0,            \
+       .modalias       = "ak4104-codec",       \
+       .max_speed_hz   = 10000,                \
+       .bus_num        = 0,                    \
+       .chip_select    = 0,                    \
        .controller_data = (void *) GPIO_SPDIF_CS,      \
 }
 
index 6f5368899d84f595d052b4bf9068ad1cf9662889..1e544be9905dc11abb9182b46e95ce73d8fca43b 100644 (file)
 
 #ifdef CONFIG_PXA3xx
 /*
- * pxa3xx_cpu_suspend() - forces CPU into sleep state (S2D3C4)
- *
- * r0 = v:p offset
+ * pxa3xx_finish_suspend() - forces CPU into sleep state (S2D3C4)
  */
-ENTRY(pxa3xx_cpu_suspend)
-
-#ifndef CONFIG_IWMMXT
-       mra     r2, r3, acc0
-#endif
-       stmfd   sp!, {r2 - r12, lr}     @ save registers on stack
-       mov     r1, r0
-       ldr     r3, =pxa_cpu_resume     @ resume function
-       bl      cpu_suspend
-
+ENTRY(pxa3xx_finish_suspend)
        mov     r0, #0x06               @ S2D3C4 mode
        mcr     p14, 0, r0, c7, c0, 0   @ enter sleep
 
@@ -46,28 +35,18 @@ ENTRY(pxa3xx_cpu_suspend)
 
 #ifdef CONFIG_PXA27x
 /*
- * pxa27x_cpu_suspend()
+ * pxa27x_finish_suspend()
  *
  * Forces CPU into sleep state.
  *
  * r0 = value for PWRMODE M field for desired sleep state
- * r1 = v:p offset
  */
-ENTRY(pxa27x_cpu_suspend)
-
-#ifndef CONFIG_IWMMXT
-       mra     r2, r3, acc0
-#endif
-       stmfd   sp!, {r2 - r12, lr}             @ save registers on stack
-       mov     r4, r0                          @ save sleep mode
-       ldr     r3, =pxa_cpu_resume             @ resume function
-       bl      cpu_suspend
-
+ENTRY(pxa27x_finish_suspend)
        @ Put the processor to sleep
        @ (also workaround for sighting 28071)
 
        @ prepare value for sleep mode
-       mov     r1, r4                          @ sleep mode
+       mov     r1, r0                          @ sleep mode
 
        @ prepare pointer to physical address 0 (virtual mapping in generic.c)
        mov     r2, #UNCACHED_PHYS_0
@@ -99,21 +78,16 @@ ENTRY(pxa27x_cpu_suspend)
 
 #ifdef CONFIG_PXA25x
 /*
- * pxa25x_cpu_suspend()
+ * pxa25x_finish_suspend()
  *
  * Forces CPU into sleep state.
  *
  * r0 = value for PWRMODE M field for desired sleep state
- * r1 = v:p offset
  */
 
-ENTRY(pxa25x_cpu_suspend)
-       stmfd   sp!, {r2 - r12, lr}             @ save registers on stack
-       mov     r4, r0                          @ save sleep mode
-       ldr     r3, =pxa_cpu_resume             @ resume function
-       bl      cpu_suspend
+ENTRY(pxa25x_finish_suspend)
        @ prepare value for sleep mode
-       mov     r1, r4                          @ sleep mode
+       mov     r1, r0                          @ sleep mode
 
        @ prepare pointer to physical address 0 (virtual mapping in generic.c)
        mov     r2, #UNCACHED_PHYS_0
@@ -195,16 +169,3 @@ pxa_cpu_do_suspend:
        mcr     p14, 0, r1, c7, c0, 0           @ PWRMODE
 
 20:    b       20b                             @ loop waiting for sleep
-
-/*
- * pxa_cpu_resume()
- *
- * entry point from bootloader into kernel during resume
- */
-       .align 5
-pxa_cpu_resume:
-       ldmfd   sp!, {r2, r3}
-#ifndef CONFIG_IWMMXT
-       mar     acc0, r2, r3
-#endif
-       ldmfd   sp!, {r4 - r12, pc}             @ return to caller
index 7fe74067d85fc7e2aeb1469878f28c8cd73b3456..094279aefe9c6fd5d6a86dd88fbbc44b6d55c07b 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
+#include <linux/gpio.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/apm-emulation.h>
index 00363c7ac1828c58e45035f7d37a5ea0bcbcfcf5..9b99cc164de533b8c7d209765275fd36d4a891ba 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/can/platform/mcp251x.h>
 
 #include <asm/mach-types.h>
+#include <asm/suspend.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 
@@ -676,7 +677,7 @@ static struct pxa2xx_udc_mach_info zeus_udc_info = {
 static void zeus_power_off(void)
 {
        local_irq_disable();
-       pxa27x_cpu_suspend(PWRMODE_DEEPSLEEP, PLAT_PHYS_OFFSET - PAGE_OFFSET);
+       cpu_suspend(PWRMODE_DEEPSLEEP, pxa27x_finish_suspend);
 }
 #else
 #define zeus_power_off   NULL
index b9a9805e4828623a8114ea04f034fe9492d43ebb..dba6d0c1fc170d22aa869b088323481a1aa18829 100644 (file)
@@ -50,6 +50,7 @@ config MACH_REALVIEW_PB1176
        bool "Support RealView(R) Platform Baseboard for ARM1176JZF-S"
        select CPU_V6
        select ARM_GIC
+       select HAVE_TCM
        help
          Include support for the ARM(R) RealView(R) Platform Baseboard for
          ARM1176JZF-S.
index 963bf0d8119aafb17ac3281f843820ff454d3886..4ae943bafa9299d530629d53707487f87c9a83fb 100644 (file)
@@ -68,14 +68,6 @@ void __init smp_init_cpus(void)
 
 void __init platform_smp_prepare_cpus(unsigned int max_cpus)
 {
-       int i;
-
-       /*
-        * Initialise the present map, which describes the set of CPUs
-        * actually populated at the present time.
-        */
-       for (i = 0; i < max_cpus; i++)
-               set_cpu_present(i, true);
 
        scu_enable(scu_base_addr());
 
index 752b13a7b3dbf15f9348d874bf928bde5ddbd018..f4077efa51fa4d4b4a2f7550c8298cc392cd9010 100644 (file)
 
 extern void s3c2412_sleep_enter(void);
 
-static void s3c2412_cpu_suspend(void)
+static int s3c2412_cpu_suspend(unsigned long arg)
 {
        unsigned long tmp;
 
-       flush_cache_all();
-
        /* set our standby method to sleep */
 
        tmp = __raw_readl(S3C2412_PWRCFG);
@@ -50,6 +48,8 @@ static void s3c2412_cpu_suspend(void)
        __raw_writel(tmp, S3C2412_PWRCFG);
 
        s3c2412_sleep_enter();
+
+       panic("sleep resumed to originator?");
 }
 
 static void s3c2412_pm_prepare(void)
index 41db2b21e213368e823dbafb68c5672a31bc2a30..9ec54f1d8e75349a665f5cd1dcd40742da7d1808 100644 (file)
 
 extern void s3c2412_sleep_enter(void);
 
-static void s3c2416_cpu_suspend(void)
+static int s3c2416_cpu_suspend(unsigned long arg)
 {
-       flush_cache_all();
-
        /* enable wakeup sources regardless of battery state */
        __raw_writel(S3C2443_PWRCFG_SLEEP, S3C2443_PWRCFG);
 
@@ -35,6 +33,8 @@ static void s3c2416_cpu_suspend(void)
        __raw_writel(0x2BED, S3C2443_PWRMODE);
 
        s3c2412_sleep_enter();
+
+       panic("sleep resumed to originator?");
 }
 
 static void s3c2416_pm_prepare(void)
index dd3120df09fe3299ca78f74945d0ff446ada8db8..fc2dc0b3d4feb4dd5599101db123b78de309f2ea 100644 (file)
@@ -552,7 +552,7 @@ struct mini2440_features_t {
        struct platform_device *optional[8];
 };
 
-static void mini2440_parse_features(
+static void __init mini2440_parse_features(
                struct mini2440_features_t * features,
                const char * features_str )
 {
index 82db072cb836b78899e917f4691bacdc3857557b..5e6b42089eb44d7048b39cc2855403cd44cc0ddc 100644 (file)
@@ -88,6 +88,7 @@ static struct s3c64xx_spi_info s3c64xx_spi0_pdata = {
        .cfg_gpio = s3c64xx_spi_cfg_gpio,
        .fifo_lvl_mask = 0x7f,
        .rx_lvl_offset = 13,
+       .tx_st_done = 21,
 };
 
 static u64 spi_dmamask = DMA_BIT_MASK(32);
@@ -132,6 +133,7 @@ static struct s3c64xx_spi_info s3c64xx_spi1_pdata = {
        .cfg_gpio = s3c64xx_spi_cfg_gpio,
        .fifo_lvl_mask = 0x7f,
        .rx_lvl_offset = 13,
+       .tx_st_done = 21,
 };
 
 struct platform_device s3c64xx_device_spi1 = {
index b197171e7d03c8219c34a126e90c78f13501a6d1..204bfafe4bfc214677e60fedbef98e4f43a5dd93 100644 (file)
@@ -113,7 +113,7 @@ found:
        return chan;
 }
 
-int s3c2410_dma_config(unsigned int channel, int xferunit)
+int s3c2410_dma_config(enum dma_ch channel, int xferunit)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
@@ -297,7 +297,7 @@ static int s3c64xx_dma_flush(struct s3c2410_dma_chan *chan)
        return 0;
 }
 
-int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op)
+int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
@@ -331,7 +331,7 @@ EXPORT_SYMBOL(s3c2410_dma_ctrl);
  *
  */
 
-int s3c2410_dma_enqueue(unsigned int channel, void *id,
+int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
                        dma_addr_t data, int size)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
@@ -415,7 +415,7 @@ err_buff:
 EXPORT_SYMBOL(s3c2410_dma_enqueue);
 
 
-int s3c2410_dma_devconfig(unsigned int channel,
+int s3c2410_dma_devconfig(enum dma_ch channel,
                          enum s3c2410_dmasrc source,
                          unsigned long devaddr)
 {
@@ -463,7 +463,7 @@ int s3c2410_dma_devconfig(unsigned int channel,
 EXPORT_SYMBOL(s3c2410_dma_devconfig);
 
 
-int s3c2410_dma_getposition(unsigned int channel,
+int s3c2410_dma_getposition(enum dma_ch channel,
                            dma_addr_t *src, dma_addr_t *dst)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
@@ -487,7 +487,7 @@ EXPORT_SYMBOL(s3c2410_dma_getposition);
  * get control of an dma channel
 */
 
-int s3c2410_dma_request(unsigned int channel,
+int s3c2410_dma_request(enum dma_ch channel,
                        struct s3c2410_dma_client *client,
                        void *dev)
 {
@@ -533,7 +533,7 @@ EXPORT_SYMBOL(s3c2410_dma_request);
  * allowed to go through.
 */
 
-int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client)
+int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
        unsigned long flags;
index bc1c470b7de69becd4a096e4551b3c2f74e374cd..8bad64370689b4163501631e3b7fc85f7e184510 100644 (file)
@@ -112,7 +112,7 @@ void s3c_pm_save_core(void)
  * this.
  */
 
-static void s3c64xx_cpu_suspend(void)
+static int s3c64xx_cpu_suspend(unsigned long arg)
 {
        unsigned long tmp;
 
index 1f87732b23206daa964f4af30eaf62c94dd73617..34313f9c8792888fd12ac329e932e9d91337f358 100644 (file)
 
        .text
 
-       /* s3c_cpu_save
-        *
-        * Save enough processor state to allow the restart of the pm.c
-        * code after resume.
-        *
-        * entry:
-        *      r1 = v:p offset
-       */
-
-ENTRY(s3c_cpu_save)
-       stmfd   sp!, { r4 - r12, lr }
-       ldr     r3, =resume_with_mmu
-       bl      cpu_suspend
-
-       @@ call final suspend code
-       ldr     r0, =pm_cpu_sleep
-       ldr     pc, [r0]
-       
-       @@ return to the caller, after the MMU is turned on.
-       @@ restore the last bits of the stack and return.
-resume_with_mmu:
-       ldmfd   sp!, { r4 - r12, pc }   @ return, from sp from s3c_cpu_save
-
        /* Sleep magic, the word before the resume entry point so that the
         * bootloader can check for a resumeable image. */
 
index e78ee18c76e321bf3a8b43862b3fa0dd0e5e6bc1..ac825e82632645c0a79300aad63ab85776cfded6 100644 (file)
@@ -112,12 +112,14 @@ static struct s3c64xx_spi_info s5p6440_spi0_pdata = {
        .cfg_gpio       = s5p6440_spi_cfg_gpio,
        .fifo_lvl_mask  = 0x1ff,
        .rx_lvl_offset  = 15,
+       .tx_st_done     = 25,
 };
 
 static struct s3c64xx_spi_info s5p6450_spi0_pdata = {
        .cfg_gpio       = s5p6450_spi_cfg_gpio,
        .fifo_lvl_mask  = 0x1ff,
        .rx_lvl_offset  = 15,
+       .tx_st_done     = 25,
 };
 
 static u64 spi_dmamask = DMA_BIT_MASK(32);
@@ -160,12 +162,14 @@ static struct s3c64xx_spi_info s5p6440_spi1_pdata = {
        .cfg_gpio       = s5p6440_spi_cfg_gpio,
        .fifo_lvl_mask  = 0x7f,
        .rx_lvl_offset  = 15,
+       .tx_st_done     = 25,
 };
 
 static struct s3c64xx_spi_info s5p6450_spi1_pdata = {
        .cfg_gpio       = s5p6450_spi_cfg_gpio,
        .fifo_lvl_mask  = 0x7f,
        .rx_lvl_offset  = 15,
+       .tx_st_done     = 25,
 };
 
 struct platform_device s5p64x0_device_spi1 = {
index 57b19794d9bbb50a5013023ed3d1b7e5902f27d0..e5d6c4dceb566db4f77d616eff07550e74ae0b28 100644 (file)
@@ -15,6 +15,7 @@
 #include <mach/dma.h>
 #include <mach/map.h>
 #include <mach/spi-clocks.h>
+#include <mach/irqs.h>
 
 #include <plat/s3c64xx-spi.h>
 #include <plat/gpio-cfg.h>
@@ -90,6 +91,7 @@ static struct s3c64xx_spi_info s5pc100_spi0_pdata = {
        .fifo_lvl_mask = 0x7f,
        .rx_lvl_offset = 13,
        .high_speed = 1,
+       .tx_st_done = 21,
 };
 
 static u64 spi_dmamask = DMA_BIT_MASK(32);
@@ -134,6 +136,7 @@ static struct s3c64xx_spi_info s5pc100_spi1_pdata = {
        .fifo_lvl_mask = 0x7f,
        .rx_lvl_offset = 13,
        .high_speed = 1,
+       .tx_st_done = 21,
 };
 
 struct platform_device s5pc100_device_spi1 = {
@@ -176,6 +179,7 @@ static struct s3c64xx_spi_info s5pc100_spi2_pdata = {
        .fifo_lvl_mask = 0x7f,
        .rx_lvl_offset = 13,
        .high_speed = 1,
+       .tx_st_done = 21,
 };
 
 struct platform_device s5pc100_device_spi2 = {
index e3249a47e3b1de90bc8ed5e5345debb75de29260..eaf9a7bff7a0bb6facc39dd27ba12592ec1dfb2b 100644 (file)
@@ -85,6 +85,7 @@ static struct s3c64xx_spi_info s5pv210_spi0_pdata = {
        .fifo_lvl_mask = 0x1ff,
        .rx_lvl_offset = 15,
        .high_speed = 1,
+       .tx_st_done = 25,
 };
 
 static u64 spi_dmamask = DMA_BIT_MASK(32);
@@ -129,6 +130,7 @@ static struct s3c64xx_spi_info s5pv210_spi1_pdata = {
        .fifo_lvl_mask = 0x7f,
        .rx_lvl_offset = 15,
        .high_speed = 1,
+       .tx_st_done = 25,
 };
 
 struct platform_device s5pv210_device_spi1 = {
index 24febae3d4c0241bf89069898aec3a7a37bc6f12..309e388a8a83388d711b1956f71aa74874a2cab0 100644 (file)
@@ -88,7 +88,7 @@ static struct sleep_save s5pv210_core_save[] = {
        SAVE_ITEM(S3C2410_TCNTO(0)),
 };
 
-void s5pv210_cpu_suspend(void)
+void s5pv210_cpu_suspend(unsigned long arg)
 {
        unsigned long tmp;
 
index a3d649466fb1b8b89086f55939011612e3e8b64e..e3452ccd4b085e75bfc719efe5ddfa0d1c902932 100644 (file)
 
        .text
 
-       /* s3c_cpu_save
-        *
-        * entry:
-        *      r1 = v:p offset
-       */
-
-ENTRY(s3c_cpu_save)
-
-       stmfd   sp!, { r3 - r12, lr }
-       ldr     r3, =resume_with_mmu
-       bl      cpu_suspend
-
-       ldr     r0, =pm_cpu_sleep
-       ldr     r0, [ r0 ]
-       mov     pc, r0
-
-resume_with_mmu:
-       ldmfd   sp!, { r3 - r12, pc }
-
-       .ltorg
-
        /* sleep magic, to allow the bootloader to check for an valid
         * image to resume to. Must be the first word before the
         * s3c_cpu_resume entry.
index c4661aab22fb5475200b22f584c80755d425b0ca..bf85b8b259d5b08a1f2b8e61f9a8792a800298f3 100644 (file)
 
 #include <mach/hardware.h>
 #include <asm/memory.h>
+#include <asm/suspend.h>
 #include <asm/system.h>
 #include <asm/mach/time.h>
 
-extern void sa1100_cpu_suspend(long);
+extern int sa1100_finish_suspend(unsigned long);
 
 #define SAVE(x)                sleep_save[SLEEP_SAVE_##x] = x
 #define RESTORE(x)     x = sleep_save[SLEEP_SAVE_##x]
@@ -75,9 +76,7 @@ static int sa11x0_pm_enter(suspend_state_t state)
        PSPR = virt_to_phys(cpu_resume);
 
        /* go zzz */
-       sa1100_cpu_suspend(PLAT_PHYS_OFFSET - PAGE_OFFSET);
-
-       cpu_init();
+       cpu_suspend(0, sa1100_finish_suspend);
 
        /*
         * Ensure not to come back here if it wasn't intended
index 04f2a618d4ef11b20976ecc3eb141e10835f4aa9..e8223315b44271ede96dc94a31d993edda913d6a 100644 (file)
 
                .text
 /*
- * sa1100_cpu_suspend()
+ * sa1100_finish_suspend()
  *
  * Causes sa11x0 to enter sleep state
  *
  */
 
-ENTRY(sa1100_cpu_suspend)
-       stmfd   sp!, {r4 - r12, lr}             @ save registers on stack
-       mov     r1, r0
-       ldr     r3, =sa1100_cpu_resume          @ return function
-       bl      cpu_suspend
-
+ENTRY(sa1100_finish_suspend)
        @ disable clock switching
        mcr     p15, 0, r1, c15, c2, 2
 
@@ -139,13 +134,3 @@ sa1110_sdram_controller_fix:
        str     r13, [r12]
 
 20:    b       20b                     @ loop waiting for sleep
-
-/*
- * cpu_sa1100_resume()
- *
- * entry point from bootloader into kernel during resume
- */
-       .align 5
-sa1100_cpu_resume:
-       mcr     p15, 0, r1, c15, c1, 2          @ enable clock switching
-       ldmfd   sp!, {r4 - r12, pc}             @ return to caller
index e2853c0a3333bc403749a231196d06b4f3ea56c1..0bb6cc626eb71d78b184905020ded7a769e230c3 100644 (file)
                .endm
 
                .macro  get_irqnr_preamble, base, tmp
+               mov     \base, #0xe0000000
                .endm
 
                .macro  arch_ret_to_user, tmp1, tmp2
                .endm
 
                .macro  get_irqnr_and_base, irqnr, irqstat, base, tmp
-               mov     r4, #0xe0000000
 
                mov     \irqstat, #0x0C
-               strb    \irqstat, [r4, #0x20]           @outb(0x0C, 0x20) /* Poll command */
-               ldrb    \irqnr, [r4, #0x20]             @irq = inb(0x20) & 7
+               strb    \irqstat, [\base, #0x20]        @outb(0x0C, 0x20) /* Poll command */
+               ldrb    \irqnr, [\base, #0x20]          @irq = inb(0x20) & 7
                and     \irqstat, \irqnr, #0x80
                teq     \irqstat, #0
                beq     43f
@@ -29,8 +29,8 @@
                teq     \irqnr, #2
                bne     44f
 43:            mov     \irqstat, #0x0C
-               strb    \irqstat, [r4, #0xa0]           @outb(0x0C, 0xA0) /* Poll command */
-               ldrb    \irqnr, [r4, #0xa0]             @irq = (inb(0xA0) & 7) + 8
+               strb    \irqstat, [\base, #0xa0]        @outb(0x0C, 0xA0) /* Poll command */
+               ldrb    \irqnr, [\base, #0xa0]          @irq = (inb(0xA0) & 7) + 8
                and     \irqstat, \irqnr, #0x80
                teq     \irqstat, #0
                beq     44f
index 1e2aba23e0d6d56ea6ddcd696937344027493e8b..ce5c2513c6ce93f62f37356d55f2835fbf3a6332 100644 (file)
@@ -381,7 +381,7 @@ void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state)
        gpio_set_value(GPIO_PORT114, state);
 }
 
-static struct sh_mobile_sdhi_info sh_sdhi1_platdata = {
+static struct sh_mobile_sdhi_info sh_sdhi1_info = {
        .tmio_flags     = TMIO_MMC_WRPROTECT_DISABLE,
        .tmio_caps      = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ,
        .tmio_ocr_mask  = MMC_VDD_32_33 | MMC_VDD_33_34,
@@ -413,7 +413,7 @@ static struct platform_device sdhi1_device = {
        .name           = "sh_mobile_sdhi",
        .id             = 1,
        .dev            = {
-               .platform_data  = &sh_sdhi1_platdata,
+               .platform_data  = &sh_sdhi1_info,
        },
        .num_resources  = ARRAY_SIZE(sdhi1_resources),
        .resource       = sdhi1_resources,
index f6b687f61c28ebe4def01c748e0a576a53a362d9..803bc6edfca477a6c67634f0ad9c435338b89502 100644 (file)
@@ -913,7 +913,7 @@ static struct i2c_board_info imx074_info = {
        I2C_BOARD_INFO("imx074", 0x1a),
 };
 
-struct soc_camera_link imx074_link = {
+static struct soc_camera_link imx074_link = {
        .bus_id         = 0,
        .board_info     = &imx074_info,
        .i2c_adapter_id = 0,
index 7e1d375843211e52944720a562a5c580dbe20149..3802f2afabef28f320b52fdb7a238b5306a07585 100644 (file)
@@ -1287,9 +1287,9 @@ static struct platform_device *mackerel_devices[] __initdata = {
        &nor_flash_device,
        &smc911x_device,
        &lcdc_device,
-       &usbhs0_device,
        &usb1_host_device,
        &usbhs1_device,
+       &usbhs0_device,
        &leds_device,
        &fsi_device,
        &fsi_ak4643_device,
diff --git a/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h b/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h
new file mode 100644 (file)
index 0000000..4a81b01
--- /dev/null
@@ -0,0 +1,21 @@
+#ifndef SDHI_SH7372_H
+#define SDHI_SH7372_H
+
+#define SDGENCNTA       0xfe40009c
+
+/* The countdown of SDGENCNTA is controlled by
+ * ZB3D2CLK which runs at 149.5MHz.
+ * That is 149.5ticks/us. Approximate this as 150ticks/us.
+ */
+static void udelay(int us)
+{
+       __raw_writel(us * 150, SDGENCNTA);
+       while(__raw_readl(SDGENCNTA)) ;
+}
+
+static void msleep(int ms)
+{
+       udelay(ms * 1000);
+}
+
+#endif
diff --git a/arch/arm/mach-shmobile/include/mach/sdhi.h b/arch/arm/mach-shmobile/include/mach/sdhi.h
new file mode 100644 (file)
index 0000000..0ec9e69
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef SDHI_H
+#define SDHI_H
+
+/**************************************************
+ *
+ *             CPU specific settings
+ *
+ **************************************************/
+
+#ifdef CONFIG_ARCH_SH7372
+#include "mach/sdhi-sh7372.h"
+#else
+#error "unsupported CPU."
+#endif
+
+#endif /* SDHI_H */
index f3888feb1c684de1911b20cbad2fc7208cf1aee6..66f980625a33e1e05fed0d142abd3fc0090d6c4e 100644 (file)
@@ -64,10 +64,5 @@ void __init smp_init_cpus(void)
 
 void __init platform_smp_prepare_cpus(unsigned int max_cpus)
 {
-       int i;
-
-       for (i = 0; i < max_cpus; i++)
-               set_cpu_present(i, true);
-
        shmobile_smp_prepare_cpus();
 }
index b8ae3c978dee30a26ce8ca4df3828714c3551f87..1a594dce8fbc1a8817f11545722c0786c7ffab93 100644 (file)
@@ -129,14 +129,6 @@ void __init smp_init_cpus(void)
 
 void __init platform_smp_prepare_cpus(unsigned int max_cpus)
 {
-       int i;
-
-       /*
-        * Initialise the present map, which describes the set of CPUs
-        * actually populated at the present time.
-        */
-       for (i = 0; i < max_cpus; i++)
-               set_cpu_present(i, true);
 
        scu_enable(scu_base);
 }
index fd4cf1ca5efd06c4a6367edc1c6cae332a6ea785..70cdbd60596a906d3c6264a4610c3151f4aaa14d 100644 (file)
@@ -110,10 +110,18 @@ static pin_cfg_t mop500_pins_common[] = {
        GPIO168_KP_O0,
 
        /* UART */
-       GPIO0_U0_CTSn   | PIN_INPUT_PULLUP,
-       GPIO1_U0_RTSn   | PIN_OUTPUT_HIGH,
-       GPIO2_U0_RXD    | PIN_INPUT_PULLUP,
-       GPIO3_U0_TXD    | PIN_OUTPUT_HIGH,
+       /* uart-0 pins gpio configuration should be
+        * kept intact to prevent glitch in tx line
+        * when tty dev is opened. Later these pins
+        * are configured to uart mop500_pins_uart0
+        *
+        * It will be replaced with uart configuration
+        * once the issue is solved.
+        */
+       GPIO0_GPIO      | PIN_INPUT_PULLUP,
+       GPIO1_GPIO      | PIN_OUTPUT_HIGH,
+       GPIO2_GPIO      | PIN_INPUT_PULLUP,
+       GPIO3_GPIO      | PIN_OUTPUT_HIGH,
 
        GPIO29_U2_RXD   | PIN_INPUT_PULLUP,
        GPIO30_U2_TXD   | PIN_OUTPUT_HIGH,
index bb26f40493e697ffe9a6805ae65acd896b9e12e5..2a08c07dec6dfd3c9d44cb837f4f43e01ea75e77 100644 (file)
 #include <linux/leds-lp5521.h>
 #include <linux/input.h>
 #include <linux/gpio_keys.h>
+#include <linux/delay.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
 #include <plat/i2c.h>
 #include <plat/ste_dma40.h>
+#include <plat/pincfg.h>
 
 #include <mach/hardware.h>
 #include <mach/setup.h>
 #include <mach/devices.h>
 #include <mach/irqs.h>
 
+#include "pins-db8500.h"
 #include "ste-dma40-db8500.h"
 #include "devices-db8500.h"
 #include "board-mop500.h"
@@ -393,12 +396,63 @@ static struct stedma40_chan_cfg uart2_dma_cfg_tx = {
 };
 #endif
 
+
+static pin_cfg_t mop500_pins_uart0[] = {
+       GPIO0_U0_CTSn   | PIN_INPUT_PULLUP,
+       GPIO1_U0_RTSn   | PIN_OUTPUT_HIGH,
+       GPIO2_U0_RXD    | PIN_INPUT_PULLUP,
+       GPIO3_U0_TXD    | PIN_OUTPUT_HIGH,
+};
+
+#define PRCC_K_SOFTRST_SET      0x18
+#define PRCC_K_SOFTRST_CLEAR    0x1C
+static void ux500_uart0_reset(void)
+{
+       void __iomem *prcc_rst_set, *prcc_rst_clr;
+
+       prcc_rst_set = (void __iomem *)IO_ADDRESS(U8500_CLKRST1_BASE +
+                       PRCC_K_SOFTRST_SET);
+       prcc_rst_clr = (void __iomem *)IO_ADDRESS(U8500_CLKRST1_BASE +
+                       PRCC_K_SOFTRST_CLEAR);
+
+       /* Activate soft reset PRCC_K_SOFTRST_CLEAR */
+       writel((readl(prcc_rst_clr) | 0x1), prcc_rst_clr);
+       udelay(1);
+
+       /* Release soft reset PRCC_K_SOFTRST_SET */
+       writel((readl(prcc_rst_set) | 0x1), prcc_rst_set);
+       udelay(1);
+}
+
+static void ux500_uart0_init(void)
+{
+       int ret;
+
+       ret = nmk_config_pins(mop500_pins_uart0,
+                       ARRAY_SIZE(mop500_pins_uart0));
+       if (ret < 0)
+               pr_err("pl011: uart pins_enable failed\n");
+}
+
+static void ux500_uart0_exit(void)
+{
+       int ret;
+
+       ret = nmk_config_pins_sleep(mop500_pins_uart0,
+                       ARRAY_SIZE(mop500_pins_uart0));
+       if (ret < 0)
+               pr_err("pl011: uart pins_disable failed\n");
+}
+
 static struct amba_pl011_data uart0_plat = {
 #ifdef CONFIG_STE_DMA40
        .dma_filter = stedma40_filter,
        .dma_rx_param = &uart0_dma_cfg_rx,
        .dma_tx_param = &uart0_dma_cfg_tx,
 #endif
+       .init = ux500_uart0_init,
+       .exit = ux500_uart0_exit,
+       .reset = ux500_uart0_reset,
 };
 
 static struct amba_pl011_data uart1_plat = {
index 0c527fe2cebb1dd7c93a0f651323eaf7559fd77c..a33df5f4c27a1d33ca534e5b8071ad2677edb7fa 100644 (file)
@@ -172,14 +172,6 @@ void __init smp_init_cpus(void)
 
 void __init platform_smp_prepare_cpus(unsigned int max_cpus)
 {
-       int i;
-
-       /*
-        * Initialise the present map, which describes the set of CPUs
-        * actually populated at the present time.
-        */
-       for (i = 0; i < max_cpus; i++)
-               set_cpu_present(i, true);
 
        scu_enable(scu_base_addr());
        wakeup_secondary();
index 765a71ff7f3b5d6ef95fc76cdd95b2e9836367d8..bfd32f52c2dbebf0dbe175ee343dcc37ad78d6f7 100644 (file)
@@ -229,10 +229,6 @@ static void ct_ca9x4_init_cpu_map(void)
 
 static void ct_ca9x4_smp_enable(unsigned int max_cpus)
 {
-       int i;
-       for (i = 0; i < max_cpus; i++)
-               set_cpu_present(i, true);
-
        scu_enable(MMIO_P2V(A9_MPCORE_SCU));
 }
 #endif
index 245140c0df107d5b00830032f3351fe6cc82ff88..642de0408f25731eb6081958cafa67beb793c4ab 100644 (file)
 static void __iomem *ic_regbase;
 static void __iomem *sic_regbase;
 
-static void vt8500_irq_mask(unsigned int irq)
+static void vt8500_irq_mask(struct irq_data *d)
 {
        void __iomem *base = ic_regbase;
+       unsigned irq = d->irq;
        u8 edge;
 
        if (irq >= 64) {
@@ -64,9 +65,10 @@ static void vt8500_irq_mask(unsigned int irq)
        }
 }
 
-static void vt8500_irq_unmask(unsigned int irq)
+static void vt8500_irq_unmask(struct irq_data *d)
 {
        void __iomem *base = ic_regbase;
+       unsigned irq = d->irq;
        u8 dctr;
 
        if (irq >= 64) {
@@ -78,10 +80,11 @@ static void vt8500_irq_unmask(unsigned int irq)
        writeb(dctr, base + VT8500_IC_DCTR + irq);
 }
 
-static int vt8500_irq_set_type(unsigned int irq, unsigned int flow_type)
+static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type)
 {
        void __iomem *base = ic_regbase;
-       unsigned int orig_irq = irq;
+       unsigned irq = d->irq;
+       unsigned orig_irq = irq;
        u8 dctr;
 
        if (irq >= 64) {
@@ -114,11 +117,11 @@ static int vt8500_irq_set_type(unsigned int irq, unsigned int flow_type)
 }
 
 static struct irq_chip vt8500_irq_chip = {
-       .name      = "vt8500",
-       .ack       = vt8500_irq_mask,
-       .mask      = vt8500_irq_mask,
-       .unmask    = vt8500_irq_unmask,
-       .set_type  = vt8500_irq_set_type,
+       .name = "vt8500",
+       .irq_ack = vt8500_irq_mask,
+       .irq_mask = vt8500_irq_mask,
+       .irq_unmask = vt8500_irq_unmask,
+       .irq_set_type = vt8500_irq_set_type,
 };
 
 void __init vt8500_init_irq(void)
index 4f18f9e87bae9058700287a9fce5e529e8b90b31..54473cd4aba951c793f25df8f3e9fb17be7f0160 100644 (file)
@@ -3,14 +3,11 @@
 /*
  * Function: v4_early_abort
  *
- * Params  : r2 = address of aborted instruction
- *         : r3 = saved SPSR
+ * Params  : r2 = pt_regs
+ *        : r4 = aborted context pc
+ *        : r5 = aborted context psr
  *
- * Returns : r0 = address of abort
- *        : r1 = FSR, bit 11 = write
- *        : r2-r8 = corrupted
- *        : r9 = preserved
- *        : sp = pointer to registers
+ * Returns : r4 - r11, r13 preserved
  *
  * Purpose : obtain information about current aborted instruction.
  * Note: we read user space.  This means we might cause a data
 ENTRY(v4_early_abort)
        mrc     p15, 0, r1, c5, c0, 0           @ get FSR
        mrc     p15, 0, r0, c6, c0, 0           @ get FAR
-       ldr     r3, [r2]                        @ read aborted ARM instruction
+       ldr     r3, [r4]                        @ read aborted ARM instruction
        bic     r1, r1, #1 << 11 | 1 << 10      @ clear bits 11 and 10 of FSR
        tst     r3, #1 << 20                    @ L = 1 -> write?
        orreq   r1, r1, #1 << 11                @ yes.
-       mov     pc, lr
-
-
+       b       do_DataAbort
index b6282548f922eb6d2c9653f6c61e301915bacef5..9da704e7b86e64669a3ee42fffce8d045ac010b3 100644 (file)
@@ -4,14 +4,11 @@
 /*
  * Function: v4t_early_abort
  *
- * Params  : r2 = address of aborted instruction
- *         : r3 = saved SPSR
+ * Params  : r2 = pt_regs
+ *        : r4 = aborted context pc
+ *        : r5 = aborted context psr
  *
- * Returns : r0 = address of abort
- *        : r1 = FSR, bit 11 = write
- *        : r2-r8 = corrupted
- *        : r9 = preserved
- *        : sp = pointer to registers
+ * Returns : r4 - r11, r13 preserved
  *
  * Purpose : obtain information about current aborted instruction.
  * Note: we read user space.  This means we might cause a data
@@ -22,9 +19,9 @@
 ENTRY(v4t_early_abort)
        mrc     p15, 0, r1, c5, c0, 0           @ get FSR
        mrc     p15, 0, r0, c6, c0, 0           @ get FAR
-       do_thumb_abort
-       ldreq   r3, [r2]                        @ read aborted ARM instruction
+       do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
+       ldreq   r3, [r4]                        @ read aborted ARM instruction
        bic     r1, r1, #1 << 11 | 1 << 10      @ clear bits 11 and 10 of FSR
        tst     r3, #1 << 20                    @ check write
        orreq   r1, r1, #1 << 11
-       mov     pc, lr
+       b       do_DataAbort
index 02251b526c0dfaf182138c71b27b0455b516ab41..a0908d4653a34a2241d95c58af16dcdde4dafa9a 100644 (file)
@@ -4,14 +4,11 @@
 /*
  * Function: v5t_early_abort
  *
- * Params  : r2 = address of aborted instruction
- *         : r3 = saved SPSR
+ * Params  : r2 = pt_regs
+ *        : r4 = aborted context pc
+ *        : r5 = aborted context psr
  *
- * Returns : r0 = address of abort
- *        : r1 = FSR, bit 11 = write
- *        : r2-r8 = corrupted
- *        : r9 = preserved
- *        : sp = pointer to registers
+ * Returns : r4 - r11, r13 preserved
  *
  * Purpose : obtain information about current aborted instruction.
  * Note: we read user space.  This means we might cause a data
 ENTRY(v5t_early_abort)
        mrc     p15, 0, r1, c5, c0, 0           @ get FSR
        mrc     p15, 0, r0, c6, c0, 0           @ get FAR
-       do_thumb_abort
-       ldreq   r3, [r2]                        @ read aborted ARM instruction
+       do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
+       ldreq   r3, [r4]                        @ read aborted ARM instruction
        bic     r1, r1, #1 << 11                @ clear bits 11 of FSR
-       do_ldrd_abort
+       do_ldrd_abort tmp=ip, insn=r3
        tst     r3, #1 << 20                    @ check write
        orreq   r1, r1, #1 << 11
-       mov     pc, lr
+       b       do_DataAbort
index bce68d601c8be5aa4fb8496ca78e56ddb98a0a46..4006b7a612642b7fa4ec36b5a995ccc9bc1e3a40 100644 (file)
@@ -4,14 +4,11 @@
 /*
  * Function: v5tj_early_abort
  *
- * Params  : r2 = address of aborted instruction
- *         : r3 = saved SPSR
+ * Params  : r2 = pt_regs
+ *        : r4 = aborted context pc
+ *        : r5 = aborted context psr
  *
- * Returns : r0 = address of abort
- *        : r1 = FSR, bit 11 = write
- *        : r2-r8 = corrupted
- *        : r9 = preserved
- *        : sp = pointer to registers
+ * Returns : r4 - r11, r13 preserved
  *
  * Purpose : obtain information about current aborted instruction.
  * Note: we read user space.  This means we might cause a data
@@ -23,13 +20,11 @@ ENTRY(v5tj_early_abort)
        mrc     p15, 0, r1, c5, c0, 0           @ get FSR
        mrc     p15, 0, r0, c6, c0, 0           @ get FAR
        bic     r1, r1, #1 << 11 | 1 << 10      @ clear bits 11 and 10 of FSR
-       tst     r3, #PSR_J_BIT                  @ Java?
-       movne   pc, lr
-       do_thumb_abort
-       ldreq   r3, [r2]                        @ read aborted ARM instruction
-       do_ldrd_abort
+       tst     r5, #PSR_J_BIT                  @ Java?
+       bne     do_DataAbort
+       do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
+       ldreq   r3, [r4]                        @ read aborted ARM instruction
+       do_ldrd_abort tmp=ip, insn=r3
        tst     r3, #1 << 20                    @ L = 0 -> write
        orreq   r1, r1, #1 << 11                @ yes.
-       mov     pc, lr
-
-
+       b       do_DataAbort
index 1478aa5221449af05fddbae38440f1858afa8299..ff1f7cc11f87bdee1509757c7cd0c3cf7c105754 100644 (file)
@@ -4,14 +4,11 @@
 /*
  * Function: v6_early_abort
  *
- * Params  : r2 = address of aborted instruction
- *         : r3 = saved SPSR
+ * Params  : r2 = pt_regs
+ *        : r4 = aborted context pc
+ *        : r5 = aborted context psr
  *
- * Returns : r0 = address of abort
- *        : r1 = FSR, bit 11 = write
- *        : r2-r8 = corrupted
- *        : r9 = preserved
- *        : sp = pointer to registers
+ * Returns : r4 - r11, r13 preserved
  *
  * Purpose : obtain information about current aborted instruction.
  * Note: we read user space.  This means we might cause a data
@@ -33,16 +30,14 @@ ENTRY(v6_early_abort)
  * The test below covers all the write situations, including Java bytecodes
  */
        bic     r1, r1, #1 << 11                @ clear bit 11 of FSR
-       tst     r3, #PSR_J_BIT                  @ Java?
-       movne   pc, lr
-       do_thumb_abort
-       ldreq   r3, [r2]                        @ read aborted ARM instruction
+       tst     r5, #PSR_J_BIT                  @ Java?
+       bne     do_DataAbort
+       do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
+       ldreq   r3, [r4]                        @ read aborted ARM instruction
 #ifdef CONFIG_CPU_ENDIAN_BE8
        reveq   r3, r3
 #endif
-       do_ldrd_abort
+       do_ldrd_abort tmp=ip, insn=r3
        tst     r3, #1 << 20                    @ L = 0 -> write
        orreq   r1, r1, #1 << 11                @ yes.
-       mov     pc, lr
-
-
+       b       do_DataAbort
index ec88b157d3bb6893df82209b9ccc80302dbdc77b..703375277ba6d3dcdae7f93404d2d19e531aad68 100644 (file)
@@ -3,14 +3,11 @@
 /*
  * Function: v7_early_abort
  *
- * Params  : r2 = address of aborted instruction
- *         : r3 = saved SPSR
+ * Params  : r2 = pt_regs
+ *        : r4 = aborted context pc
+ *        : r5 = aborted context psr
  *
- * Returns : r0 = address of abort
- *        : r1 = FSR, bit 11 = write
- *        : r2-r8 = corrupted
- *        : r9 = preserved
- *        : sp = pointer to registers
+ * Returns : r4 - r11, r13 preserved
  *
  * Purpose : obtain information about current aborted instruction.
  */
@@ -37,18 +34,18 @@ ENTRY(v7_early_abort)
        ldr     r3, =0x40d                      @ On permission fault
        and     r3, r1, r3
        cmp     r3, #0x0d
-       movne   pc, lr
+       bne     do_DataAbort
 
        mcr     p15, 0, r0, c7, c8, 0           @ Retranslate FAR
        isb
-       mrc     p15, 0, r2, c7, c4, 0           @ Read the PAR
-       and     r3, r2, #0x7b                   @ On translation fault
+       mrc     p15, 0, ip, c7, c4, 0           @ Read the PAR
+       and     r3, ip, #0x7b                   @ On translation fault
        cmp     r3, #0x0b
-       movne   pc, lr
+       bne     do_DataAbort
        bic     r1, r1, #0xf                    @ Fix up FSR FS[5:0]
-       and     r2, r2, #0x7e
-       orr     r1, r1, r2, LSR #1
+       and     ip, ip, #0x7e
+       orr     r1, r1, ip, LSR #1
 #endif
 
-       mov     pc, lr
+       b       do_DataAbort
 ENDPROC(v7_early_abort)
index 9fb7b0e25ea1094cec42e47b76c69e45ddf13f44..f3982580c273057b89a1c025cb52f0f54093014f 100644 (file)
@@ -3,14 +3,11 @@
 /*
  * Function: v4t_late_abort
  *
- * Params  : r2 = address of aborted instruction
- *         : r3 = saved SPSR
+ * Params  : r2 = pt_regs
+ *        : r4 = aborted context pc
+ *        : r5 = aborted context psr
  *
- * Returns : r0 = address of abort
- *        : r1 = FSR, bit 11 = write
- *        : r2-r8 = corrupted
- *        : r9 = preserved
- *        : sp = pointer to registers
+ * Returns : r4-r5, r10-r11, r13 preserved
  *
  * Purpose : obtain information about current aborted instruction.
  * Note: we read user space.  This means we might cause a data
@@ -18,7 +15,7 @@
  * picture.  Unfortunately, this does happen.  We live with it.
  */
 ENTRY(v4t_late_abort)
-       tst     r3, #PSR_T_BIT                  @ check for thumb mode
+       tst     r5, #PSR_T_BIT                  @ check for thumb mode
 #ifdef CONFIG_CPU_CP15_MMU
        mrc     p15, 0, r1, c5, c0, 0           @ get FSR
        mrc     p15, 0, r0, c6, c0, 0           @ get FAR
@@ -28,7 +25,7 @@ ENTRY(v4t_late_abort)
        mov     r1, #0
 #endif
        bne     .data_thumb_abort
-       ldr     r8, [r2]                        @ read arm instruction
+       ldr     r8, [r4]                        @ read arm instruction
        tst     r8, #1 << 20                    @ L = 1 -> write?
        orreq   r1, r1, #1 << 11                @ yes.
        and     r7, r8, #15 << 24
@@ -47,86 +44,84 @@ ENTRY(v4t_late_abort)
 /* 9 */        b       .data_arm_ldmstm                @ ldm*b rn, <rlist>
 /* a */        b       .data_unknown
 /* b */        b       .data_unknown
-/* c */        mov     pc, lr                          @ ldc   rd, [rn], #m    @ Same as ldr   rd, [rn], #m
-/* d */        mov     pc, lr                          @ ldc   rd, [rn, #m]
+/* c */        b       do_DataAbort                    @ ldc   rd, [rn], #m    @ Same as ldr   rd, [rn], #m
+/* d */        b       do_DataAbort                    @ ldc   rd, [rn, #m]
 /* e */        b       .data_unknown
 /* f */
 .data_unknown: @ Part of jumptable
-       mov     r0, r2
+       mov     r0, r4
        mov     r1, r8
-       mov     r2, sp
-       bl      baddataabort
-       b       ret_from_exception
+       b       baddataabort
 
 .data_arm_ldmstm:
        tst     r8, #1 << 21                    @ check writeback bit
-       moveq   pc, lr                          @ no writeback -> no fixup
+       beq     do_DataAbort                    @ no writeback -> no fixup
        mov     r7, #0x11
        orr     r7, r7, #0x1100
        and     r6, r8, r7
-       and     r2, r8, r7, lsl #1
-       add     r6, r6, r2, lsr #1
-       and     r2, r8, r7, lsl #2
-       add     r6, r6, r2, lsr #2
-       and     r2, r8, r7, lsl #3
-       add     r6, r6, r2, lsr #3
+       and     r9, r8, r7, lsl #1
+       add     r6, r6, r9, lsr #1
+       and     r9, r8, r7, lsl #2
+       add     r6, r6, r9, lsr #2
+       and     r9, r8, r7, lsl #3
+       add     r6, r6, r9, lsr #3
        add     r6, r6, r6, lsr #8
        add     r6, r6, r6, lsr #4
        and     r6, r6, #15                     @ r6 = no. of registers to transfer.
-       and     r5, r8, #15 << 16               @ Extract 'n' from instruction
-       ldr     r7, [sp, r5, lsr #14]           @ Get register 'Rn'
+       and     r9, r8, #15 << 16               @ Extract 'n' from instruction
+       ldr     r7, [r2, r9, lsr #14]           @ Get register 'Rn'
        tst     r8, #1 << 23                    @ Check U bit
        subne   r7, r7, r6, lsl #2              @ Undo increment
        addeq   r7, r7, r6, lsl #2              @ Undo decrement
-       str     r7, [sp, r5, lsr #14]           @ Put register 'Rn'
-       mov     pc, lr
+       str     r7, [r2, r9, lsr #14]           @ Put register 'Rn'
+       b       do_DataAbort
 
 .data_arm_lateldrhpre:
        tst     r8, #1 << 21                    @ Check writeback bit
-       moveq   pc, lr                          @ No writeback -> no fixup
+       beq     do_DataAbort                    @ No writeback -> no fixup
 .data_arm_lateldrhpost:
-       and     r5, r8, #0x00f                  @ get Rm / low nibble of immediate value
+       and     r9, r8, #0x00f                  @ get Rm / low nibble of immediate value
        tst     r8, #1 << 22                    @ if (immediate offset)
        andne   r6, r8, #0xf00                  @ { immediate high nibble
-       orrne   r6, r5, r6, lsr #4              @   combine nibbles } else
-       ldreq   r6, [sp, r5, lsl #2]            @ { load Rm value }
+       orrne   r6, r9, r6, lsr #4              @   combine nibbles } else
+       ldreq   r6, [r2, r9, lsl #2]            @ { load Rm value }
 .data_arm_apply_r6_and_rn:
-       and     r5, r8, #15 << 16               @ Extract 'n' from instruction
-       ldr     r7, [sp, r5, lsr #14]           @ Get register 'Rn'
+       and     r9, r8, #15 << 16               @ Extract 'n' from instruction
+       ldr     r7, [r2, r9, lsr #14]           @ Get register 'Rn'
        tst     r8, #1 << 23                    @ Check U bit
        subne   r7, r7, r6                      @ Undo incrmenet
        addeq   r7, r7, r6                      @ Undo decrement
-       str     r7, [sp, r5, lsr #14]           @ Put register 'Rn'
-       mov     pc, lr
+       str     r7, [r2, r9, lsr #14]           @ Put register 'Rn'
+       b       do_DataAbort
 
 .data_arm_lateldrpreconst:
        tst     r8, #1 << 21                    @ check writeback bit
-       moveq   pc, lr                          @ no writeback -> no fixup
+       beq     do_DataAbort                    @ no writeback -> no fixup
 .data_arm_lateldrpostconst:
-       movs    r2, r8, lsl #20                 @ Get offset
-       moveq   pc, lr                          @ zero -> no fixup
-       and     r5, r8, #15 << 16               @ Extract 'n' from instruction
-       ldr     r7, [sp, r5, lsr #14]           @ Get register 'Rn'
+       movs    r6, r8, lsl #20                 @ Get offset
+       beq     do_DataAbort                    @ zero -> no fixup
+       and     r9, r8, #15 << 16               @ Extract 'n' from instruction
+       ldr     r7, [r2, r9, lsr #14]           @ Get register 'Rn'
        tst     r8, #1 << 23                    @ Check U bit
-       subne   r7, r7, r2, lsr #20             @ Undo increment
-       addeq   r7, r7, r2, lsr #20             @ Undo decrement
-       str     r7, [sp, r5, lsr #14]           @ Put register 'Rn'
-       mov     pc, lr
+       subne   r7, r7, r6, lsr #20             @ Undo increment
+       addeq   r7, r7, r6, lsr #20             @ Undo decrement
+       str     r7, [r2, r9, lsr #14]           @ Put register 'Rn'
+       b       do_DataAbort
 
 .data_arm_lateldrprereg:
        tst     r8, #1 << 21                    @ check writeback bit
-       moveq   pc, lr                          @ no writeback -> no fixup
+       beq     do_DataAbort                    @ no writeback -> no fixup
 .data_arm_lateldrpostreg:
        and     r7, r8, #15                     @ Extract 'm' from instruction
-       ldr     r6, [sp, r7, lsl #2]            @ Get register 'Rm'
-       mov     r5, r8, lsr #7                  @ get shift count
-       ands    r5, r5, #31
+       ldr     r6, [r2, r7, lsl #2]            @ Get register 'Rm'
+       mov     r9, r8, lsr #7                  @ get shift count
+       ands    r9, r9, #31
        and     r7, r8, #0x70                   @ get shift type
        orreq   r7, r7, #8                      @ shift count = 0
        add     pc, pc, r7
        nop
 
-       mov     r6, r6, lsl r5                  @ 0: LSL #!0
+       mov     r6, r6, lsl r9                  @ 0: LSL #!0
        b       .data_arm_apply_r6_and_rn
        b       .data_arm_apply_r6_and_rn       @ 1: LSL #0
        nop
@@ -134,7 +129,7 @@ ENTRY(v4t_late_abort)
        nop
        b       .data_unknown                   @ 3: MUL?
        nop
-       mov     r6, r6, lsr r5                  @ 4: LSR #!0
+       mov     r6, r6, lsr r9                  @ 4: LSR #!0
        b       .data_arm_apply_r6_and_rn
        mov     r6, r6, lsr #32                 @ 5: LSR #32
        b       .data_arm_apply_r6_and_rn
@@ -142,7 +137,7 @@ ENTRY(v4t_late_abort)
        nop
        b       .data_unknown                   @ 7: MUL?
        nop
-       mov     r6, r6, asr r5                  @ 8: ASR #!0
+       mov     r6, r6, asr r9                  @ 8: ASR #!0
        b       .data_arm_apply_r6_and_rn
        mov     r6, r6, asr #32                 @ 9: ASR #32
        b       .data_arm_apply_r6_and_rn
@@ -150,7 +145,7 @@ ENTRY(v4t_late_abort)
        nop
        b       .data_unknown                   @ B: MUL?
        nop
-       mov     r6, r6, ror r5                  @ C: ROR #!0
+       mov     r6, r6, ror r9                  @ C: ROR #!0
        b       .data_arm_apply_r6_and_rn
        mov     r6, r6, rrx                     @ D: RRX
        b       .data_arm_apply_r6_and_rn
@@ -159,7 +154,7 @@ ENTRY(v4t_late_abort)
        b       .data_unknown                   @ F: MUL?
 
 .data_thumb_abort:
-       ldrh    r8, [r2]                        @ read instruction
+       ldrh    r8, [r4]                        @ read instruction
        tst     r8, #1 << 11                    @ L = 1 -> write?
        orreq   r1, r1, #1 << 8                 @ yes
        and     r7, r8, #15 << 12
@@ -172,10 +167,10 @@ ENTRY(v4t_late_abort)
 /* 3 */        b       .data_unknown
 /* 4 */        b       .data_unknown
 /* 5 */        b       .data_thumb_reg
-/* 6 */        mov     pc, lr
-/* 7 */        mov     pc, lr
-/* 8 */        mov     pc, lr
-/* 9 */        mov     pc, lr
+/* 6 */        b       do_DataAbort
+/* 7 */        b       do_DataAbort
+/* 8 */        b       do_DataAbort
+/* 9 */        b       do_DataAbort
 /* A */        b       .data_unknown
 /* B */        b       .data_thumb_pushpop
 /* C */        b       .data_thumb_ldmstm
@@ -185,41 +180,41 @@ ENTRY(v4t_late_abort)
 
 .data_thumb_reg:
        tst     r8, #1 << 9
-       moveq   pc, lr
+       beq     do_DataAbort
        tst     r8, #1 << 10                    @ If 'S' (signed) bit is set
        movne   r1, #0                          @ it must be a load instr
-       mov     pc, lr
+       b       do_DataAbort
 
 .data_thumb_pushpop:
        tst     r8, #1 << 10
        beq     .data_unknown
        and     r6, r8, #0x55                   @ hweight8(r8) + R bit
-       and     r2, r8, #0xaa
-       add     r6, r6, r2, lsr #1
-       and     r2, r6, #0xcc
+       and     r9, r8, #0xaa
+       add     r6, r6, r9, lsr #1
+       and     r9, r6, #0xcc
        and     r6, r6, #0x33
-       add     r6, r6, r2, lsr #2
+       add     r6, r6, r9, lsr #2
        movs    r7, r8, lsr #9                  @ C = r8 bit 8 (R bit)
        adc     r6, r6, r6, lsr #4              @ high + low nibble + R bit
        and     r6, r6, #15                     @ number of regs to transfer
-       ldr     r7, [sp, #13 << 2]
+       ldr     r7, [r2, #13 << 2]
        tst     r8, #1 << 11
        addeq   r7, r7, r6, lsl #2              @ increment SP if PUSH
        subne   r7, r7, r6, lsl #2              @ decrement SP if POP
-       str     r7, [sp, #13 << 2]
-       mov     pc, lr
+       str     r7, [r2, #13 << 2]
+       b       do_DataAbort
 
 .data_thumb_ldmstm:
        and     r6, r8, #0x55                   @ hweight8(r8)
-       and     r2, r8, #0xaa
-       add     r6, r6, r2, lsr #1
-       and     r2, r6, #0xcc
+       and     r9, r8, #0xaa
+       add     r6, r6, r9, lsr #1
+       and     r9, r6, #0xcc
        and     r6, r6, #0x33
-       add     r6, r6, r2, lsr #2
+       add     r6, r6, r9, lsr #2
        add     r6, r6, r6, lsr #4
-       and     r5, r8, #7 << 8
-       ldr     r7, [sp, r5, lsr #6]
+       and     r9, r8, #7 << 8
+       ldr     r7, [r2, r9, lsr #6]
        and     r6, r6, #15                     @ number of regs to transfer
        sub     r7, r7, r6, lsl #2              @ always decrement
-       str     r7, [sp, r5, lsr #6]
-       mov     pc, lr
+       str     r7, [r2, r9, lsr #6]
+       b       do_DataAbort
index d7cb1bfa51a4c48f9d6cde1c32cf07abec36481d..52162d59407a4679eee5fce0f4d848a62b2ea182 100644 (file)
@@ -9,34 +9,32 @@
  *
  */
 
-       .macro  do_thumb_abort
-       tst     r3, #PSR_T_BIT
+       .macro  do_thumb_abort, fsr, pc, psr, tmp
+       tst     \psr, #PSR_T_BIT
        beq     not_thumb
-       ldrh    r3, [r2]                        @ Read aborted Thumb instruction
-       and     r3, r3, # 0xfe00                @ Mask opcode field
-       cmp     r3, # 0x5600                    @ Is it ldrsb?
-       orreq   r3, r3, #1 << 11                @ Set L-bit if yes
-       tst     r3, #1 << 11                    @ L = 0 -> write
-       orreq   r1, r1, #1 << 11                @ yes.
-       mov     pc, lr
+       ldrh    \tmp, [\pc]                     @ Read aborted Thumb instruction
+       and     \tmp, \tmp, # 0xfe00            @ Mask opcode field
+       cmp     \tmp, # 0x5600                  @ Is it ldrsb?
+       orreq   \tmp, \tmp, #1 << 11            @ Set L-bit if yes
+       tst     \tmp, #1 << 11                  @ L = 0 -> write
+       orreq   \psr, \psr, #1 << 11            @ yes.
+       b       do_DataAbort
 not_thumb:
        .endm
 
 /*
- * We check for the following insturction encoding for LDRD.
+ * We check for the following instruction encoding for LDRD.
  *
- * [27:25] == 0
+ * [27:25] == 000
  *   [7:4] == 1101
  *    [20] == 0
  */
-       .macro  do_ldrd_abort
-       tst     r3, #0x0e000000                 @ [27:25] == 0
+       .macro  do_ldrd_abort, tmp, insn
+       tst     \insn, #0x0e100000              @ [27:25,20] == 0
        bne     not_ldrd
-       and     r2, r3, #0x000000f0             @ [7:4] == 1101
-       cmp     r2, #0x000000d0
-       bne     not_ldrd
-       tst     r3, #1 << 20                    @ [20] == 0
-       moveq   pc, lr
+       and     \tmp, \insn, #0x000000f0        @ [7:4] == 1101
+       cmp     \tmp, #0x000000d0
+       beq     do_DataAbort
 not_ldrd:
        .endm
 
index 625e580945b51580af02a3663cd2666ff74893db..119cb479c2aba0a8ff1b19a5e9e30964a0e892c6 100644 (file)
@@ -3,11 +3,11 @@
 /*
  * Function: nommu_early_abort
  *
- * Params  : r2 = address of aborted instruction
- *         : r3 = saved SPSR
+ * Params  : r2 = pt_regs
+ *        : r4 = aborted context pc
+ *        : r5 = aborted context psr
  *
- * Returns : r0 = 0 (abort address)
- *        : r1 = 0 (FSR)
+ * Returns : r4 - r11, r13 preserved
  *
  * Note: There is no FSR/FAR on !CPU_CP15_MMU cores.
  *       Just fill zero into the registers.
@@ -16,5 +16,5 @@
 ENTRY(nommu_early_abort)
        mov     r0, #0                          @ clear r0, r1 (no FSR/FAR)
        mov     r1, #0
-       mov     pc, lr
+       b       do_DataAbort
 ENDPROC(nommu_early_abort)
index 724ba3bce72c952ff44645d2a50e5566b568c943..be7c638b648bb9771665719adbb2c7e74b5aafef 100644 (file)
@@ -727,6 +727,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        int isize = 4;
        int thumb2_32b = 0;
 
+       if (interrupts_enabled(regs))
+               local_irq_enable();
+
        instrptr = instruction_pointer(regs);
 
        fs = get_fs();
index ef59099a5463e5bbb409caf7e5a97ffbaf065d24..44c086710d2ba5d21a40741290ef8c40de35928c 100644 (file)
@@ -120,17 +120,22 @@ static void l2x0_cache_sync(void)
        spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
-static void l2x0_flush_all(void)
+static void __l2x0_flush_all(void)
 {
-       unsigned long flags;
-
-       /* clean all ways */
-       spin_lock_irqsave(&l2x0_lock, flags);
        debug_writel(0x03);
        writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
        cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
        cache_sync();
        debug_writel(0x00);
+}
+
+static void l2x0_flush_all(void)
+{
+       unsigned long flags;
+
+       /* clean all ways */
+       spin_lock_irqsave(&l2x0_lock, flags);
+       __l2x0_flush_all();
        spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
@@ -266,7 +271,9 @@ static void l2x0_disable(void)
        unsigned long flags;
 
        spin_lock_irqsave(&l2x0_lock, flags);
-       writel(0, l2x0_base + L2X0_CTRL);
+       __l2x0_flush_all();
+       writel_relaxed(0, l2x0_base + L2X0_CTRL);
+       dsb();
        spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
index bdba6c65c901a1c682f1aaf0fce3875ad8fa448b..63cca0097130c9250c1838a8358b27354a9ea8e0 100644 (file)
@@ -41,7 +41,6 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
        kfrom = kmap_atomic(from, KM_USER0);
        kto = kmap_atomic(to, KM_USER1);
        copy_page(kto, kfrom);
-       __cpuc_flush_dcache_area(kto, PAGE_SIZE);
        kunmap_atomic(kto, KM_USER1);
        kunmap_atomic(kfrom, KM_USER0);
 }
index 82a093cee09a781677ebf639a6450bcd9f4e59fd..0a0a1e7c20d2b2b7f459197be96575f757171cfd 100644 (file)
 #include <asm/tlbflush.h>
 #include <asm/sizes.h>
 
+#include "mm.h"
+
 static u64 get_coherent_dma_mask(struct device *dev)
 {
-       u64 mask = ISA_DMA_THRESHOLD;
+       u64 mask = (u64)arm_dma_limit;
 
        if (dev) {
                mask = dev->coherent_dma_mask;
@@ -41,10 +43,10 @@ static u64 get_coherent_dma_mask(struct device *dev)
                        return 0;
                }
 
-               if ((~mask) & ISA_DMA_THRESHOLD) {
+               if ((~mask) & (u64)arm_dma_limit) {
                        dev_warn(dev, "coherent DMA mask %#llx is smaller "
                                 "than system GFP_DMA mask %#llx\n",
-                                mask, (unsigned long long)ISA_DMA_THRESHOLD);
+                                mask, (u64)arm_dma_limit);
                        return 0;
                }
        }
@@ -657,6 +659,33 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 }
 EXPORT_SYMBOL(dma_sync_sg_for_device);
 
+/*
+ * Return whether the given device DMA address mask can be supported
+ * properly.  For example, if your device can only drive the low 24-bits
+ * during bus mastering, then you would pass 0x00ffffff as the mask
+ * to this function.
+ */
+int dma_supported(struct device *dev, u64 mask)
+{
+       if (mask < (u64)arm_dma_limit)
+               return 0;
+       return 1;
+}
+EXPORT_SYMBOL(dma_supported);
+
+int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+       if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+               return -EIO;
+
+#ifndef CONFIG_DMABOUNCE
+       *dev->dma_mask = dma_mask;
+#endif
+
+       return 0;
+}
+EXPORT_SYMBOL(dma_set_mask);
+
 #define PREALLOC_DMA_DEBUG_ENTRIES     4096
 
 static int __init dma_debug_do_init(void)
index bc0e1d88fd3ba8b7863edfc9eca9cb11d90413dd..55657c222d7c770425138242c248ee767e4d31e6 100644 (file)
@@ -94,7 +94,7 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
 
                pud = pud_offset(pgd, addr);
                if (PTRS_PER_PUD != 1)
-                       printk(", *pud=%08lx", pud_val(*pud));
+                       printk(", *pud=%08llx", (long long)pud_val(*pud));
 
                if (pud_none(*pud))
                        break;
@@ -285,6 +285,10 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        tsk = current;
        mm  = tsk->mm;
 
+       /* Enable interrupts if they were enabled in the parent context. */
+       if (interrupts_enabled(regs))
+               local_irq_enable();
+
        /*
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
index c19571c40a21ca62902d65cd01dcb8db9ea4f178..e5ab4362322fcf2cdef2b598e9425579e28690ef 100644 (file)
@@ -212,6 +212,14 @@ static void __init arm_bootmem_init(unsigned long start_pfn,
 }
 
 #ifdef CONFIG_ZONE_DMA
+/*
+ * The DMA mask corresponding to the maximum bus address allocatable
+ * using GFP_DMA.  The default here places no restriction on DMA
+ * allocations.  This must be the smallest DMA mask in the system,
+ * so a successful GFP_DMA allocation will always satisfy this.
+ */
+u32 arm_dma_limit;
+
 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
        unsigned long dma_size)
 {
@@ -278,6 +286,8 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
         */
        arm_adjust_dma_zone(zone_size, zhole_size,
                ARM_DMA_ZONE_SIZE >> PAGE_SHIFT);
+
+       arm_dma_limit = PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1;
 #endif
 
        free_area_init_node(0, zone_size, min, zhole_size);
@@ -422,6 +432,17 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s)
        return pages;
 }
 
+/*
+ * Poison init memory with an undefined instruction (ARM) or a branch to an
+ * undefined instruction (Thumb).
+ */
+static inline void poison_init_mem(void *s, size_t count)
+{
+       u32 *p = (u32 *)s;
+       while ((count = count - 4))
+               *p++ = 0xe7fddef0;
+}
+
 static inline void
 free_memmap(unsigned long start_pfn, unsigned long end_pfn)
 {
@@ -639,8 +660,8 @@ void __init mem_init(void)
                        "    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n"
 #endif
                        "    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
-                       "      .init : 0x%p" " - 0x%p" "   (%4d kB)\n"
                        "      .text : 0x%p" " - 0x%p" "   (%4d kB)\n"
+                       "      .init : 0x%p" " - 0x%p" "   (%4d kB)\n"
                        "      .data : 0x%p" " - 0x%p" "   (%4d kB)\n"
                        "       .bss : 0x%p" " - 0x%p" "   (%4d kB)\n",
 
@@ -662,8 +683,8 @@ void __init mem_init(void)
 #endif
                        MLM(MODULES_VADDR, MODULES_END),
 
-                       MLK_ROUNDUP(__init_begin, __init_end),
                        MLK_ROUNDUP(_text, _etext),
+                       MLK_ROUNDUP(__init_begin, __init_end),
                        MLK_ROUNDUP(_sdata, _edata),
                        MLK_ROUNDUP(__bss_start, __bss_stop));
 
@@ -704,11 +725,13 @@ void free_initmem(void)
 #ifdef CONFIG_HAVE_TCM
        extern char __tcm_start, __tcm_end;
 
+       poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
        totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
                                    __phys_to_pfn(__pa(&__tcm_end)),
                                    "TCM link");
 #endif
 
+       poison_init_mem(__init_begin, __init_end - __init_begin);
        if (!machine_is_integrator() && !machine_is_cintegrator())
                totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
                                            __phys_to_pfn(__pa(__init_end)),
@@ -721,10 +744,12 @@ static int keep_initrd;
 
 void free_initrd_mem(unsigned long start, unsigned long end)
 {
-       if (!keep_initrd)
+       if (!keep_initrd) {
+               poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
                totalram_pages += free_area(__phys_to_pfn(__pa(start)),
                                            __phys_to_pfn(__pa(end)),
                                            "initrd");
+       }
 }
 
 static int __init keepinitrd_setup(char *__unused)
index 5b3d7d543659154b36d3a568be429ebc096819e0..010566799c80c27680390154ae85a0a7b5124567 100644 (file)
@@ -23,5 +23,11 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
 
 #endif
 
+#ifdef CONFIG_ZONE_DMA
+extern u32 arm_dma_limit;
+#else
+#define arm_dma_limit ((u32)~0)
+#endif
+
 void __init bootmem_init(void);
 void arm_mm_memblock_reserve(void);
index 9d9e736c2b4f4afe2a90190d8b583c8e1f0c656a..594d677b92c883c25c4d5051bcc635394984b6f6 100644 (file)
@@ -759,7 +759,7 @@ early_param("vmalloc", early_vmalloc);
 
 static phys_addr_t lowmem_limit __initdata = 0;
 
-static void __init sanity_check_meminfo(void)
+void __init sanity_check_meminfo(void)
 {
        int i, j, highmem = 0;
 
@@ -1032,8 +1032,9 @@ void __init paging_init(struct machine_desc *mdesc)
 {
        void *zero_page;
 
+       memblock_set_current_limit(lowmem_limit);
+
        build_mem_type_table();
-       sanity_check_meminfo();
        prepare_page_table();
        map_lowmem();
        devicemaps_init(mdesc);
index 687d02319a41ea68afcfe65698204a4db84d8400..941a98c9e8aaf327b5131227ccaddb1d697ad2e5 100644 (file)
@@ -27,6 +27,10 @@ void __init arm_mm_memblock_reserve(void)
        memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE);
 }
 
+void __init sanity_check_meminfo(void)
+{
+}
+
 /*
  * paging_init() sets up the page tables, initialises the zone memory
  * maps, and sets up the zero page, bad page and bad page tables.
index 87970eba88ea2fe8f54c29fe2caf05b1b846f4e3..8bbff025269a26b7d436f70bc8806434c605f202 100644 (file)
@@ -4,16 +4,18 @@
 /*
  * Function: legacy_pabort
  *
- * Params  : r0 = address of aborted instruction
+ * Params  : r2 = pt_regs
+ *        : r4 = address of aborted instruction
+ *        : r5 = psr for parent context
  *
- * Returns : r0 = address of abort
- *        : r1 = Simulated IFSR with section translation fault status
+ * Returns : r4 - r11, r13 preserved
  *
  * Purpose : obtain information about current prefetch abort.
  */
 
        .align  5
 ENTRY(legacy_pabort)
+       mov     r0, r4
        mov     r1, #5
-       mov     pc, lr
+       b       do_PrefetchAbort
 ENDPROC(legacy_pabort)
index 06e3d1ef2115a04b6695ca1cc7276fff41088079..9627646ce7832adc9cc8026e9a8dccd101f297fc 100644 (file)
@@ -4,16 +4,18 @@
 /*
  * Function: v6_pabort
  *
- * Params  : r0 = address of aborted instruction
+ * Params  : r2 = pt_regs
+ *        : r4 = address of aborted instruction
+ *        : r5 = psr for parent context
  *
- * Returns : r0 = address of abort
- *        : r1 = IFSR
+ * Returns : r4 - r11, r13 preserved
  *
  * Purpose : obtain information about current prefetch abort.
  */
 
        .align  5
 ENTRY(v6_pabort)
+       mov     r0, r4
        mrc     p15, 0, r1, c5, c0, 1           @ get IFSR
-       mov     pc, lr
+       b       do_PrefetchAbort
 ENDPROC(v6_pabort)
index a8b3b300a18dd88055582cba24441ba6f8f4ce97..875761f44f3bbb7d5bb064ab0e6a06144de22a96 100644 (file)
@@ -2,12 +2,13 @@
 #include <asm/assembler.h>
 
 /*
- * Function: v6_pabort
+ * Function: v7_pabort
  *
- * Params  : r0 = address of aborted instruction
+ * Params  : r2 = pt_regs
+ *        : r4 = address of aborted instruction
+ *        : r5 = psr for parent context
  *
- * Returns : r0 = address of abort
- *        : r1 = IFSR
+ * Returns : r4 - r11, r13 preserved
  *
  * Purpose : obtain information about current prefetch abort.
  */
@@ -16,5 +17,5 @@
 ENTRY(v7_pabort)
        mrc     p15, 0, r0, c6, c0, 2           @ get IFAR
        mrc     p15, 0, r1, c5, c0, 1           @ get IFSR
-       mov     pc, lr
+       b       do_PrefetchAbort
 ENDPROC(v7_pabort)
index 5f79dc4ce3fbc98d76d32423b7b0dcb619ffe778..50e3543d03bfa3fa9d6a89fb191cbe339ccafd00 100644 (file)
@@ -29,19 +29,19 @@ ENTRY(cpu_arm7_dcache_clean_area)
 /*
  * Function: arm6_7_data_abort ()
  *
- * Params  : r2 = address of aborted instruction
- *        : sp = pointer to registers
+ * Params  : r2 = pt_regs
+ *        : r4 = aborted context pc
+ *        : r5 = aborted context psr
  *
  * Purpose : obtain information about current aborted instruction
  *
- * Returns : r0 = address of abort
- *        : r1 = FSR
+ * Returns : r4-r5, r10-r11, r13 preserved
  */
 
 ENTRY(cpu_arm7_data_abort)
        mrc     p15, 0, r1, c5, c0, 0           @ get FSR
        mrc     p15, 0, r0, c6, c0, 0           @ get FAR
-       ldr     r8, [r2]                        @ read arm instruction
+       ldr     r8, [r4]                        @ read arm instruction
        tst     r8, #1 << 20                    @ L = 0 -> write?
        orreq   r1, r1, #1 << 11                @ yes.
        and     r7, r8, #15 << 24
@@ -49,7 +49,7 @@ ENTRY(cpu_arm7_data_abort)
        nop
 
 /* 0 */        b       .data_unknown
-/* 1 */        mov     pc, lr                          @ swp
+/* 1 */        b       do_DataAbort                    @ swp
 /* 2 */        b       .data_unknown
 /* 3 */        b       .data_unknown
 /* 4 */        b       .data_arm_lateldrpostconst      @ ldr   rd, [rn], #m
@@ -60,87 +60,85 @@ ENTRY(cpu_arm7_data_abort)
 /* 9 */        b       .data_arm_ldmstm                @ ldm*b rn, <rlist>
 /* a */        b       .data_unknown
 /* b */        b       .data_unknown
-/* c */        mov     pc, lr                          @ ldc   rd, [rn], #m    @ Same as ldr   rd, [rn], #m
-/* d */        mov     pc, lr                          @ ldc   rd, [rn, #m]
+/* c */        b       do_DataAbort                    @ ldc   rd, [rn], #m    @ Same as ldr   rd, [rn], #m
+/* d */        b       do_DataAbort                    @ ldc   rd, [rn, #m]
 /* e */        b       .data_unknown
 /* f */
 .data_unknown: @ Part of jumptable
-       mov     r0, r2
+       mov     r0, r4
        mov     r1, r8
-       mov     r2, sp
-       bl      baddataabort
-       b       ret_from_exception
+       b       baddataabort
 
 ENTRY(cpu_arm6_data_abort)
        mrc     p15, 0, r1, c5, c0, 0           @ get FSR
        mrc     p15, 0, r0, c6, c0, 0           @ get FAR
-       ldr     r8, [r2]                        @ read arm instruction
+       ldr     r8, [r4]                        @ read arm instruction
        tst     r8, #1 << 20                    @ L = 0 -> write?
        orreq   r1, r1, #1 << 11                @ yes.
        and     r7, r8, #14 << 24
        teq     r7, #8 << 24                    @ was it ldm/stm
-       movne   pc, lr
+       bne     do_DataAbort
 
 .data_arm_ldmstm:
        tst     r8, #1 << 21                    @ check writeback bit
-       moveq   pc, lr                          @ no writeback -> no fixup
+       beq     do_DataAbort                    @ no writeback -> no fixup
        mov     r7, #0x11
        orr     r7, r7, #0x1100
        and     r6, r8, r7
-       and     r2, r8, r7, lsl #1
-       add     r6, r6, r2, lsr #1
-       and     r2, r8, r7, lsl #2
-       add     r6, r6, r2, lsr #2
-       and     r2, r8, r7, lsl #3
-       add     r6, r6, r2, lsr #3
+       and     r9, r8, r7, lsl #1
+       add     r6, r6, r9, lsr #1
+       and     r9, r8, r7, lsl #2
+       add     r6, r6, r9, lsr #2
+       and     r9, r8, r7, lsl #3
+       add     r6, r6, r9, lsr #3
        add     r6, r6, r6, lsr #8
        add     r6, r6, r6, lsr #4
        and     r6, r6, #15                     @ r6 = no. of registers to transfer.
-       and     r5, r8, #15 << 16               @ Extract 'n' from instruction
-       ldr     r7, [sp, r5, lsr #14]           @ Get register 'Rn'
+       and     r9, r8, #15 << 16               @ Extract 'n' from instruction
+       ldr     r7, [r2, r9, lsr #14]           @ Get register 'Rn'
        tst     r8, #1 << 23                    @ Check U bit
        subne   r7, r7, r6, lsl #2              @ Undo increment
        addeq   r7, r7, r6, lsl #2              @ Undo decrement
-       str     r7, [sp, r5, lsr #14]           @ Put register 'Rn'
-       mov     pc, lr
+       str     r7, [r2, r9, lsr #14]           @ Put register 'Rn'
+       b       do_DataAbort
 
 .data_arm_apply_r6_and_rn:
-       and     r5, r8, #15 << 16               @ Extract 'n' from instruction
-       ldr     r7, [sp, r5, lsr #14]           @ Get register 'Rn'
+       and     r9, r8, #15 << 16               @ Extract 'n' from instruction
+       ldr     r7, [r2, r9, lsr #14]           @ Get register 'Rn'
        tst     r8, #1 << 23                    @ Check U bit
        subne   r7, r7, r6                      @ Undo incrmenet
        addeq   r7, r7, r6                      @ Undo decrement
-       str     r7, [sp, r5, lsr #14]           @ Put register 'Rn'
-       mov     pc, lr
+       str     r7, [r2, r9, lsr #14]           @ Put register 'Rn'
+       b       do_DataAbort
 
 .data_arm_lateldrpreconst:
        tst     r8, #1 << 21                    @ check writeback bit
-       moveq   pc, lr                          @ no writeback -> no fixup
+       beq     do_DataAbort                    @ no writeback -> no fixup
 .data_arm_lateldrpostconst:
-       movs    r2, r8, lsl #20                 @ Get offset
-       moveq   pc, lr                          @ zero -> no fixup
-       and     r5, r8, #15 << 16               @ Extract 'n' from instruction
-       ldr     r7, [sp, r5, lsr #14]           @ Get register 'Rn'
+       movs    r6, r8, lsl #20                 @ Get offset
+       beq     do_DataAbort                    @ zero -> no fixup
+       and     r9, r8, #15 << 16               @ Extract 'n' from instruction
+       ldr     r7, [r2, r9, lsr #14]           @ Get register 'Rn'
        tst     r8, #1 << 23                    @ Check U bit
-       subne   r7, r7, r2, lsr #20             @ Undo increment
-       addeq   r7, r7, r2, lsr #20             @ Undo decrement
-       str     r7, [sp, r5, lsr #14]           @ Put register 'Rn'
-       mov     pc, lr
+       subne   r7, r7, r6, lsr #20             @ Undo increment
+       addeq   r7, r7, r6, lsr #20             @ Undo decrement
+       str     r7, [r2, r9, lsr #14]           @ Put register 'Rn'
+       b       do_DataAbort
 
 .data_arm_lateldrprereg:
        tst     r8, #1 << 21                    @ check writeback bit
-       moveq   pc, lr                          @ no writeback -> no fixup
+       beq     do_DataAbort                    @ no writeback -> no fixup
 .data_arm_lateldrpostreg:
        and     r7, r8, #15                     @ Extract 'm' from instruction
-       ldr     r6, [sp, r7, lsl #2]            @ Get register 'Rm'
-       mov     r5, r8, lsr #7                  @ get shift count
-       ands    r5, r5, #31
+       ldr     r6, [r2, r7, lsl #2]            @ Get register 'Rm'
+       mov     r9, r8, lsr #7                  @ get shift count
+       ands    r9, r9, #31
        and     r7, r8, #0x70                   @ get shift type
        orreq   r7, r7, #8                      @ shift count = 0
        add     pc, pc, r7
        nop
 
-       mov     r6, r6, lsl r5                  @ 0: LSL #!0
+       mov     r6, r6, lsl r9                  @ 0: LSL #!0
        b       .data_arm_apply_r6_and_rn
        b       .data_arm_apply_r6_and_rn       @ 1: LSL #0
        nop
@@ -148,7 +146,7 @@ ENTRY(cpu_arm6_data_abort)
        nop
        b       .data_unknown                   @ 3: MUL?
        nop
-       mov     r6, r6, lsr r5                  @ 4: LSR #!0
+       mov     r6, r6, lsr r9                  @ 4: LSR #!0
        b       .data_arm_apply_r6_and_rn
        mov     r6, r6, lsr #32                 @ 5: LSR #32
        b       .data_arm_apply_r6_and_rn
@@ -156,7 +154,7 @@ ENTRY(cpu_arm6_data_abort)
        nop
        b       .data_unknown                   @ 7: MUL?
        nop
-       mov     r6, r6, asr r5                  @ 8: ASR #!0
+       mov     r6, r6, asr r9                  @ 8: ASR #!0
        b       .data_arm_apply_r6_and_rn
        mov     r6, r6, asr #32                 @ 9: ASR #32
        b       .data_arm_apply_r6_and_rn
@@ -164,7 +162,7 @@ ENTRY(cpu_arm6_data_abort)
        nop
        b       .data_unknown                   @ B: MUL?
        nop
-       mov     r6, r6, ror r5                  @ C: ROR #!0
+       mov     r6, r6, ror r9                  @ C: ROR #!0
        b       .data_arm_apply_r6_and_rn
        mov     r6, r6, rrx                     @ D: RRX
        b       .data_arm_apply_r6_and_rn
index 184a9c997e36616dcc6819418d380f9d9e439291..e9c47271732ddcc3687e31b7c1c7b2795d35c95a 100644 (file)
@@ -34,7 +34,7 @@
  */
 #define DCACHELINESIZE 32
 
-       __INIT
+       .section .text
 
 /*
  * cpu_sa1100_proc_init()
@@ -45,8 +45,6 @@ ENTRY(cpu_sa1100_proc_init)
        mcr     p15, 0, r0, c9, c0, 5           @ Allow read-buffer operations from userland
        mov     pc, lr
 
-       .section .text
-
 /*
  * cpu_sa1100_proc_fin()
  *
index 3c3867850a3011d1e163d347a25ffbabc4aa4dc7..089c0b5e454fccbdbc63a4d4e9d72d15baa7075a 100644 (file)
@@ -210,19 +210,21 @@ cpu_v7_name:
 
 /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
 .globl cpu_v7_suspend_size
-.equ   cpu_v7_suspend_size, 4 * 8
+.equ   cpu_v7_suspend_size, 4 * 9
 #ifdef CONFIG_PM_SLEEP
 ENTRY(cpu_v7_do_suspend)
        stmfd   sp!, {r4 - r11, lr}
        mrc     p15, 0, r4, c13, c0, 0  @ FCSE/PID
        mrc     p15, 0, r5, c13, c0, 1  @ Context ID
+       mrc     p15, 0, r6, c13, c0, 3  @ User r/o thread ID
+       stmia   r0!, {r4 - r6}
        mrc     p15, 0, r6, c3, c0, 0   @ Domain ID
        mrc     p15, 0, r7, c2, c0, 0   @ TTB 0
        mrc     p15, 0, r8, c2, c0, 1   @ TTB 1
        mrc     p15, 0, r9, c1, c0, 0   @ Control register
        mrc     p15, 0, r10, c1, c0, 1  @ Auxiliary control register
        mrc     p15, 0, r11, c1, c0, 2  @ Co-processor access control
-       stmia   r0, {r4 - r11}
+       stmia   r0, {r6 - r11}
        ldmfd   sp!, {r4 - r11, pc}
 ENDPROC(cpu_v7_do_suspend)
 
@@ -230,9 +232,11 @@ ENTRY(cpu_v7_do_resume)
        mov     ip, #0
        mcr     p15, 0, ip, c8, c7, 0   @ invalidate TLBs
        mcr     p15, 0, ip, c7, c5, 0   @ invalidate I cache
-       ldmia   r0, {r4 - r11}
+       ldmia   r0!, {r4 - r6}
        mcr     p15, 0, r4, c13, c0, 0  @ FCSE/PID
        mcr     p15, 0, r5, c13, c0, 1  @ Context ID
+       mcr     p15, 0, r6, c13, c0, 3  @ User r/o thread ID
+       ldmia   r0, {r6 - r11}
        mcr     p15, 0, r6, c3, c0, 0   @ Domain ID
        mcr     p15, 0, r7, c2, c0, 0   @ TTB 0
        mcr     p15, 0, r8, c2, c0, 1   @ TTB 1
@@ -418,9 +422,9 @@ ENTRY(v7_processor_functions)
        .word   cpu_v7_dcache_clean_area
        .word   cpu_v7_switch_mm
        .word   cpu_v7_set_pte_ext
-       .word   0
-       .word   0
-       .word   0
+       .word   cpu_v7_suspend_size
+       .word   cpu_v7_do_suspend
+       .word   cpu_v7_do_resume
        .size   v7_processor_functions, . - v7_processor_functions
 
        .section ".rodata"
index 9694f1f6f4854a7261853f5d6329684f9dd98ba4..d887a31faaae7d1d93fa6265add0b84c7184e2f3 100644 (file)
@@ -46,7 +46,6 @@ ENTRY(fa_flush_user_tlb_range)
        add     r0, r0, #PAGE_SZ
        cmp     r0, r1
        blo     1b
-       mcr     p15, 0, r3, c7, c5, 6           @ invalidate BTB
        mcr     p15, 0, r3, c7, c10, 4          @ data write barrier
        mov     pc, lr
 
@@ -60,9 +59,8 @@ ENTRY(fa_flush_kern_tlb_range)
        add     r0, r0, #PAGE_SZ
        cmp     r0, r1
        blo     1b
-       mcr     p15, 0, r3, c7, c5, 6           @ invalidate BTB
        mcr     p15, 0, r3, c7, c10, 4          @ data write barrier
-       mcr     p15, 0, r3, c7, c5, 4           @ prefetch flush
+       mcr     p15, 0, r3, c7, c5, 4           @ prefetch flush (isb)
        mov     pc, lr
 
        __INITDATA
index 73d7d89b04c48a93c13d3f1e204853065fbcdc7c..ffe06a69a6e558d905628d235bd255362ae099ec 100644 (file)
@@ -54,7 +54,6 @@ ENTRY(v6wbi_flush_user_tlb_range)
        add     r0, r0, #PAGE_SZ
        cmp     r0, r1
        blo     1b
-       mcr     p15, 0, ip, c7, c5, 6           @ flush BTAC/BTB
        mcr     p15, 0, ip, c7, c10, 4          @ data synchronization barrier
        mov     pc, lr
 
@@ -83,9 +82,8 @@ ENTRY(v6wbi_flush_kern_tlb_range)
        add     r0, r0, #PAGE_SZ
        cmp     r0, r1
        blo     1b
-       mcr     p15, 0, r2, c7, c5, 6           @ flush BTAC/BTB
        mcr     p15, 0, r2, c7, c10, 4          @ data synchronization barrier
-       mcr     p15, 0, r2, c7, c5, 4           @ prefetch flush
+       mcr     p15, 0, r2, c7, c5, 4           @ prefetch flush (isb)
        mov     pc, lr
 
        __INIT
index 53cd5b45467318e7fae97127bcdead01e3f4e8eb..86bb7166450830a272a2c79fddbf11d181468ac3 100644 (file)
@@ -48,9 +48,6 @@ ENTRY(v7wbi_flush_user_tlb_range)
        add     r0, r0, #PAGE_SZ
        cmp     r0, r1
        blo     1b
-       mov     ip, #0
-       ALT_SMP(mcr     p15, 0, ip, c7, c1, 6)  @ flush BTAC/BTB Inner Shareable
-       ALT_UP(mcr      p15, 0, ip, c7, c5, 6)  @ flush BTAC/BTB
        dsb
        mov     pc, lr
 ENDPROC(v7wbi_flush_user_tlb_range)
@@ -75,9 +72,6 @@ ENTRY(v7wbi_flush_kern_tlb_range)
        add     r0, r0, #PAGE_SZ
        cmp     r0, r1
        blo     1b
-       mov     r2, #0
-       ALT_SMP(mcr     p15, 0, r2, c7, c1, 6)  @ flush BTAC/BTB Inner Shareable
-       ALT_UP(mcr      p15, 0, r2, c7, c5, 6)  @ flush BTAC/BTB
        dsb
        isb
        mov     pc, lr
index 9612a87e2a881df2f49126e02ed0b6ed60982977..bab73e2c79db9653701e32a1babc63f770fff3d7 100644 (file)
@@ -18,6 +18,7 @@
  */
 #include <linux/init.h>
 #include <asm/traps.h>
+#include <asm/ptrace.h>
 
 static int cp6_trap(struct pt_regs *regs, unsigned int instr)
 {
index 2e49e71b1b98bfaeb754d79d5905ac98df08cf05..066d464d322d7617025cd0adf6346802ec687a8f 100644 (file)
@@ -78,7 +78,3 @@
        movs \irqnr, \irqnr
 #endif
        .endm
-
-       @ irq priority table (not used)
-       .macro  irq_prio_table
-       .endm
index a37b8eb65b76a6c2b13a019545852b3c9b48a7cc..49fc0df0c21f58be364d6da00e14d73eb8c32344 100644 (file)
@@ -84,6 +84,7 @@
 #include <linux/io.h>
 #include <linux/clk.h>
 #include <linux/clkdev.h>
+#include <linux/pm_runtime.h>
 
 #include <plat/omap_device.h>
 #include <plat/omap_hwmod.h>
@@ -539,20 +540,34 @@ int omap_early_device_register(struct omap_device *od)
 static int _od_runtime_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
+       int ret;
+
+       ret = pm_generic_runtime_suspend(dev);
+
+       if (!ret)
+               omap_device_idle(pdev);
+
+       return ret;
+}
 
-       return omap_device_idle(pdev);
+static int _od_runtime_idle(struct device *dev)
+{
+       return pm_generic_runtime_idle(dev);
 }
 
 static int _od_runtime_resume(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
 
-       return omap_device_enable(pdev);
+       omap_device_enable(pdev);
+
+       return pm_generic_runtime_resume(dev);
 }
 
 static struct dev_power_domain omap_device_power_domain = {
        .ops = {
                .runtime_suspend = _od_runtime_suspend,
+               .runtime_idle = _od_runtime_idle,
                .runtime_resume = _od_runtime_resume,
                USE_PLATFORM_PM_SLEEP_OPS
        }
index 6af3d0b1f8d058e8387697b3c560bce32f7e31ef..363c91e44efb3fb5909c9465872b9fc968601d9e 100644 (file)
@@ -394,20 +394,15 @@ void omap3_sram_restore_context(void)
 }
 #endif /* CONFIG_PM */
 
-static int __init omap34xx_sram_init(void)
-{
-       _omap3_sram_configure_core_dpll =
-               omap_sram_push(omap3_sram_configure_core_dpll,
-                              omap3_sram_configure_core_dpll_sz);
-       omap_push_sram_idle();
-       return 0;
-}
-#else
+#endif /* CONFIG_ARCH_OMAP3 */
+
 static inline int omap34xx_sram_init(void)
 {
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
+       omap3_sram_restore_context();
+#endif
        return 0;
 }
-#endif
 
 int __init omap_sram_init(void)
 {
index 5b4fffab1eb4647f9712e61141cc6fb2f08068af..41ab97ebe4cfc8877fc58cd09f446c02636a030d 100644 (file)
@@ -432,7 +432,7 @@ void __init orion_gpio_init(int gpio_base, int ngpio,
        ct->regs.mask = ochip->mask_offset + GPIO_EDGE_MASK_OFF;
        ct->regs.ack = GPIO_EDGE_CAUSE_OFF;
        ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
-       ct->chip.irq_ack = irq_gc_ack;
+       ct->chip.irq_ack = irq_gc_ack_clr_bit;
        ct->chip.irq_mask = irq_gc_mask_clr_bit;
        ct->chip.irq_unmask = irq_gc_mask_set_bit;
        ct->chip.irq_set_type = gpio_irq_set_type;
index 48ebb9479b619708b37d385a75ce752d719d835f..a11dc36705051956d992e30085f41e12dd5f68d7 100644 (file)
@@ -50,7 +50,7 @@ static inline void __iomem *gpio_chip_base(struct gpio_chip *c)
        return container_of(c, struct pxa_gpio_chip, chip)->regbase;
 }
 
-static inline struct pxa_gpio_chip *gpio_to_chip(unsigned gpio)
+static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio)
 {
        return &pxa_gpio_chips[gpio_to_bank(gpio)];
 }
@@ -161,7 +161,7 @@ static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
        int gpio = irq_to_gpio(d->irq);
        unsigned long gpdr, mask = GPIO_bit(gpio);
 
-       c = gpio_to_chip(gpio);
+       c = gpio_to_pxachip(gpio);
 
        if (type == IRQ_TYPE_PROBE) {
                /* Don't mess with enabled GPIOs using preconfigured edges or
@@ -230,7 +230,7 @@ static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
 static void pxa_ack_muxed_gpio(struct irq_data *d)
 {
        int gpio = irq_to_gpio(d->irq);
-       struct pxa_gpio_chip *c = gpio_to_chip(gpio);
+       struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
 
        __raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
 }
@@ -238,7 +238,7 @@ static void pxa_ack_muxed_gpio(struct irq_data *d)
 static void pxa_mask_muxed_gpio(struct irq_data *d)
 {
        int gpio = irq_to_gpio(d->irq);
-       struct pxa_gpio_chip *c = gpio_to_chip(gpio);
+       struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
        uint32_t grer, gfer;
 
        c->irq_mask &= ~GPIO_bit(gpio);
@@ -252,7 +252,7 @@ static void pxa_mask_muxed_gpio(struct irq_data *d)
 static void pxa_unmask_muxed_gpio(struct irq_data *d)
 {
        int gpio = irq_to_gpio(d->irq);
-       struct pxa_gpio_chip *c = gpio_to_chip(gpio);
+       struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
 
        c->irq_mask |= GPIO_bit(gpio);
        update_edge_detect(c);
index 2abf9660bc6cc6eaa9879b82281bf79d3c83225d..539bd0e3defdc2ab9be00294a997a80fd060f6d6 100644 (file)
@@ -712,7 +712,7 @@ static struct s3c2410_dma_chan *s3c2410_dma_map_channel(int channel);
  * get control of an dma channel
 */
 
-int s3c2410_dma_request(unsigned int channel,
+int s3c2410_dma_request(enum dma_ch channel,
                        struct s3c2410_dma_client *client,
                        void *dev)
 {
@@ -783,7 +783,7 @@ EXPORT_SYMBOL(s3c2410_dma_request);
  * allowed to go through.
 */
 
-int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client)
+int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
        unsigned long flags;
@@ -974,7 +974,7 @@ static int s3c2410_dma_started(struct s3c2410_dma_chan *chan)
 }
 
 int
-s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op)
+s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
@@ -1021,23 +1021,19 @@ EXPORT_SYMBOL(s3c2410_dma_ctrl);
  * xfersize:     size of unit in bytes (1,2,4)
 */
 
-int s3c2410_dma_config(unsigned int channel,
+int s3c2410_dma_config(enum dma_ch channel,
                       int xferunit)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
        unsigned int dcon;
 
-       pr_debug("%s: chan=%d, xfer_unit=%d, dcon=%08x\n",
-                __func__, channel, xferunit, dcon);
+       pr_debug("%s: chan=%d, xfer_unit=%d\n", __func__, channel, xferunit);
 
        if (chan == NULL)
                return -EINVAL;
 
-       pr_debug("%s: Initial dcon is %08x\n", __func__, dcon);
-
        dcon = chan->dcon & dma_sel.dcon_mask;
-
-       pr_debug("%s: New dcon is %08x\n", __func__, dcon);
+       pr_debug("%s: dcon is %08x\n", __func__, dcon);
 
        switch (chan->req_ch) {
        case DMACH_I2S_IN:
@@ -1104,7 +1100,7 @@ EXPORT_SYMBOL(s3c2410_dma_config);
  * devaddr:   physical address of the source
 */
 
-int s3c2410_dma_devconfig(unsigned int channel,
+int s3c2410_dma_devconfig(enum dma_ch channel,
                          enum s3c2410_dmasrc source,
                          unsigned long devaddr)
 {
@@ -1177,7 +1173,7 @@ EXPORT_SYMBOL(s3c2410_dma_devconfig);
  * returns the current transfer points for the dma source and destination
 */
 
-int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *dst)
+int s3c2410_dma_getposition(enum dma_ch channel, dma_addr_t *src, dma_addr_t *dst)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
@@ -1235,7 +1231,7 @@ static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp)
        /* restore channel's hardware configuration */
 
        if (!cp->in_use)
-               return 0;
+               return;
 
        printk(KERN_INFO "dma%d: restoring configuration\n", cp->number);
 
@@ -1246,8 +1242,6 @@ static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp)
 
        if (cp->map != NULL)
                dma_sel.select(cp, cp->map);
-
-       return 0;
 }
 
 static void s3c2410_dma_resume(void)
index fd7032f84ae7623198f97a28fc40854941be61b3..c56612569b40e315a096ed6eea40aa09e477d107 100644 (file)
 
        .text
 
-       /* s3c_cpu_save
-        *
-        * entry:
-        *      r1 = v:p offset
-       */
-
-ENTRY(s3c_cpu_save)
-       stmfd   sp!, { r4 - r12, lr }
-       ldr     r3, =resume_with_mmu
-       bl      cpu_suspend
-
-       @@ jump to final code to send system to sleep
-       ldr     r0, =pm_cpu_sleep
-       @@ldr   pc, [ r0 ]
-       ldr     r0, [ r0 ]
-       mov     pc, r0
-       
-       @@ return to the caller, after having the MMU
-       @@ turned on, this restores the last bits from the
-       @@ stack
-resume_with_mmu:
-       ldmfd   sp!, { r4 - r12, pc }
-
-       .ltorg
-
        /* sleep magic, to allow the bootloader to check for an valid
         * image to resume to. Must be the first word before the
         * s3c_cpu_resume entry.
index 135abda31c9adfd9f1681f607bc92c2f27b9d92c..327ab9f662e8bca3019489817bb649b0eff68389 100644 (file)
@@ -152,7 +152,7 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip)
        if (!gc)
                return -ENOMEM;
        ct = gc->chip_types;
-       ct->chip.irq_ack = irq_gc_ack;
+       ct->chip.irq_ack = irq_gc_ack_set_bit;
        ct->chip.irq_mask = irq_gc_mask_set_bit;
        ct->chip.irq_unmask = irq_gc_mask_clr_bit;
        ct->chip.irq_set_type = s5p_gpioint_set_type,
index 899a8cc011fffc6cc98ff8bd87e07b4f54e97f1c..612934c48b0d25fc3d2bacbb1eed22e2806f191d 100644 (file)
@@ -370,11 +370,11 @@ static void __init s5p_clocksource_init(void)
 
        clock_rate = clk_get_rate(tin_source);
 
-       init_sched_clock(&cd, s5p_update_sched_clock, 32, clock_rate);
-
        s5p_time_setup(timer_source.source_id, TCNT_MAX);
        s5p_time_start(timer_source.source_id, PERIODIC);
 
+       init_sched_clock(&cd, s5p_update_sched_clock, 32, clock_rate);
+
        if (clocksource_register_hz(&time_clocksource, clock_rate))
                panic("%s: can't register clocksource\n", time_clocksource.name);
 }
index cb459dd9545957cde60a7f61a2f80bd0820493ff..6143aa1476880a0a37b4328fe6e699caa022daa3 100644 (file)
@@ -41,7 +41,7 @@ struct s3c2410_dma_chan *s3c_dma_lookup_channel(unsigned int channel)
  * irq?
 */
 
-int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn)
+int s3c2410_dma_set_opfn(enum dma_ch channel, s3c2410_dma_opfn_t rtn)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
@@ -56,7 +56,7 @@ int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn)
 }
 EXPORT_SYMBOL(s3c2410_dma_set_opfn);
 
-int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn)
+int s3c2410_dma_set_buffdone_fn(enum dma_ch channel, s3c2410_dma_cbfn_t rtn)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
@@ -71,7 +71,7 @@ int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn)
 }
 EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
 
-int s3c2410_dma_setflags(unsigned int channel, unsigned int flags)
+int s3c2410_dma_setflags(enum dma_ch channel, unsigned int flags)
 {
        struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 
index 4af108ff41121da579b41e0717802daf6c56285b..e3b31c26ac3eaa7fc1737daec9725e09990b9a21 100644 (file)
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
 */
+
+#ifndef __PLAT_DEVS_H
+#define __PLAT_DEVS_H __FILE__
+
 #include <linux/platform_device.h>
 
 struct s3c24xx_uart_resources {
@@ -159,3 +163,5 @@ extern struct platform_device s3c_device_ac97;
  */
 extern void *s3c_set_platdata(void *pd, size_t pdsize,
                              struct platform_device *pdev);
+
+#endif /* __PLAT_DEVS_H */
index 2e8f8c6560d72128c92cdfa16b4f908565865216..8c273b7a6f56593015ccec86869670bedc1b5a3d 100644 (file)
@@ -42,6 +42,7 @@ struct s3c2410_dma_client {
 };
 
 struct s3c2410_dma_chan;
+enum dma_ch;
 
 /* s3c2410_dma_cbfn_t
  *
@@ -62,7 +63,7 @@ typedef int  (*s3c2410_dma_opfn_t)(struct s3c2410_dma_chan *,
  * request a dma channel exclusivley
 */
 
-extern int s3c2410_dma_request(unsigned int channel,
+extern int s3c2410_dma_request(enum dma_ch channel,
                               struct s3c2410_dma_client *, void *dev);
 
 
@@ -71,14 +72,14 @@ extern int s3c2410_dma_request(unsigned int channel,
  * change the state of the dma channel
 */
 
-extern int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op);
+extern int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op);
 
 /* s3c2410_dma_setflags
  *
  * set the channel's flags to a given state
 */
 
-extern int s3c2410_dma_setflags(unsigned int channel,
+extern int s3c2410_dma_setflags(enum dma_ch channel,
                                unsigned int flags);
 
 /* s3c2410_dma_free
@@ -86,7 +87,7 @@ extern int s3c2410_dma_setflags(unsigned int channel,
  * free the dma channel (will also abort any outstanding operations)
 */
 
-extern int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *);
+extern int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *);
 
 /* s3c2410_dma_enqueue
  *
@@ -95,7 +96,7 @@ extern int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *);
  * drained before the buffer is given to the DMA system.
 */
 
-extern int s3c2410_dma_enqueue(unsigned int channel, void *id,
+extern int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
                               dma_addr_t data, int size);
 
 /* s3c2410_dma_config
@@ -103,14 +104,14 @@ extern int s3c2410_dma_enqueue(unsigned int channel, void *id,
  * configure the dma channel
 */
 
-extern int s3c2410_dma_config(unsigned int channel, int xferunit);
+extern int s3c2410_dma_config(enum dma_ch channel, int xferunit);
 
 /* s3c2410_dma_devconfig
  *
  * configure the device we're talking to
 */
 
-extern int s3c2410_dma_devconfig(unsigned int channel,
+extern int s3c2410_dma_devconfig(enum dma_ch channel,
                enum s3c2410_dmasrc source, unsigned long devaddr);
 
 /* s3c2410_dma_getposition
@@ -118,10 +119,10 @@ extern int s3c2410_dma_devconfig(unsigned int channel,
  * get the position that the dma transfer is currently at
 */
 
-extern int s3c2410_dma_getposition(unsigned int channel,
+extern int s3c2410_dma_getposition(enum dma_ch channel,
                                   dma_addr_t *src, dma_addr_t *dest);
 
-extern int s3c2410_dma_set_opfn(unsigned int, s3c2410_dma_opfn_t rtn);
-extern int s3c2410_dma_set_buffdone_fn(unsigned int, s3c2410_dma_cbfn_t rtn);
+extern int s3c2410_dma_set_opfn(enum dma_ch, s3c2410_dma_opfn_t rtn);
+extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn);
 
 
index 7fb6f6be8c81e6e14b74a800b1a865f8e1225d7e..f6749916d194b8d1e2a2e0a59cc35ccfa44ded16 100644 (file)
@@ -42,7 +42,7 @@ extern unsigned long s3c_irqwake_eintallow;
 /* per-cpu sleep functions */
 
 extern void (*pm_cpu_prep)(void);
-extern void (*pm_cpu_sleep)(void);
+extern int (*pm_cpu_sleep)(unsigned long);
 
 /* Flags for PM Control */
 
@@ -52,10 +52,9 @@ extern unsigned char pm_uart_udivslot;  /* true to save UART UDIVSLOT */
 
 /* from sleep.S */
 
-extern int  s3c_cpu_save(unsigned long *saveblk, long);
 extern void s3c_cpu_resume(void);
 
-extern void s3c2410_cpu_suspend(void);
+extern int s3c2410_cpu_suspend(unsigned long);
 
 /* sleep save info */
 
index c151c5f94a87b0e647863b5bb48016a6f27f55a8..116edfe120b972612aad96fa9217ad2fdc279c84 100644 (file)
 #define S5PV210_UFSTAT_RXMASK  (255<<0)
 #define S5PV210_UFSTAT_RXSHIFT (0)
 
+#define NO_NEED_CHECK_CLKSRC   1
+
 #ifndef __ASSEMBLY__
 
 /* struct s3c24xx_uart_clksrc
index 0ffe34a215544b1008dc0d0ff6dbf0bc852da0cf..4c16fa3621bb813f1dbd80f25e9a987dbd5e04ca 100644 (file)
@@ -39,6 +39,7 @@ struct s3c64xx_spi_csinfo {
  * @fifo_lvl_mask: All tx fifo_lvl fields start at offset-6
  * @rx_lvl_offset: Depends on tx fifo_lvl field and bus number
  * @high_speed: If the controller supports HIGH_SPEED_EN bit
+ * @tx_st_done: Depends on tx fifo_lvl field
  */
 struct s3c64xx_spi_info {
        int src_clk_nr;
@@ -53,6 +54,7 @@ struct s3c64xx_spi_info {
        int fifo_lvl_mask;
        int rx_lvl_offset;
        int high_speed;
+       int tx_st_done;
 };
 
 /**
index 32582c0958e3676c74818bbcee40388953d099a9..657405c481d04bed1dc310e245c182192b25e83d 100644 (file)
@@ -54,8 +54,15 @@ static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq)
 
        gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base,
                                    handle_level_irq);
+
+       if (!gc) {
+               pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n",
+                      __func__, uirq->base_irq);
+               return;
+       }
+
        ct = gc->chip_types;
-       ct->chip.irq_ack = irq_gc_ack;
+       ct->chip.irq_ack = irq_gc_ack_set_bit;
        ct->chip.irq_mask = irq_gc_mask_set_bit;
        ct->chip.irq_unmask = irq_gc_mask_clr_bit;
        ct->regs.ack = S3C64XX_UINTP;
index a607546ddbd0c25dd9c71f1517b51455324bd11f..f714d060370d6f1647e29e2367591dafa242b4b6 100644 (file)
@@ -54,6 +54,13 @@ void __init s3c_init_vic_timer_irq(unsigned int num, unsigned int timer_irq)
 
        s3c_tgc = irq_alloc_generic_chip("s3c-timer", 1, timer_irq,
                                         S3C64XX_TINT_CSTAT, handle_level_irq);
+
+       if (!s3c_tgc) {
+               pr_err("%s: irq_alloc_generic_chip for IRQ %d failed\n",
+                      __func__, timer_irq);
+               return;
+       }
+
        ct = s3c_tgc->chip_types;
        ct->chip.irq_mask = irq_gc_mask_clr_bit;
        ct->chip.irq_unmask = irq_gc_mask_set_bit;
index 5c0a440d6e16741f34f547b7aa6583a16997b5c4..5fa1742d019bed872067426b1e1a16452eb32217 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/io.h>
 
 #include <asm/cacheflush.h>
+#include <asm/suspend.h>
 #include <mach/hardware.h>
 #include <mach/map.h>
 
@@ -231,7 +232,7 @@ static void __maybe_unused s3c_pm_show_resume_irqs(int start,
 
 
 void (*pm_cpu_prep)(void);
-void (*pm_cpu_sleep)(void);
+int (*pm_cpu_sleep)(unsigned long);
 
 #define any_allowed(mask, allow) (((mask) & (allow)) != (allow))
 
@@ -294,15 +295,11 @@ static int s3c_pm_enter(suspend_state_t state)
 
        s3c_pm_arch_stop_clocks();
 
-       /* s3c_cpu_save will also act as our return point from when
+       /* this will also act as our return point from when
         * we resume as it saves its own register state and restores it
         * during the resume.  */
 
-       s3c_cpu_save(0, PLAT_PHYS_OFFSET - PAGE_OFFSET);
-
-       /* restore the cpu state using the kernel's cpu init code. */
-
-       cpu_init();
+       cpu_suspend(0, pm_cpu_sleep);
 
        /* restore the system state */
 
index 9897dcfc16d67a0e104552814adaad8ec1230e32..2d30c7f6edd32ddd5b93e6b8356ae679ce983041 100644 (file)
@@ -77,27 +77,27 @@ ENTRY(vfp_support_entry)
        bne     look_for_VFP_exceptions @ VFP is already enabled
 
        DBGSTR1 "enable %x", r10
-       ldr     r3, last_VFP_context_address
+       ldr     r3, vfp_current_hw_state_address
        orr     r1, r1, #FPEXC_EN       @ user FPEXC has the enable bit set
-       ldr     r4, [r3, r11, lsl #2]   @ last_VFP_context pointer
+       ldr     r4, [r3, r11, lsl #2]   @ vfp_current_hw_state pointer
        bic     r5, r1, #FPEXC_EX       @ make sure exceptions are disabled
-       cmp     r4, r10
-       beq     check_for_exception     @ we are returning to the same
-                                       @ process, so the registers are
-                                       @ still there.  In this case, we do
-                                       @ not want to drop a pending exception.
+       cmp     r4, r10                 @ this thread owns the hw context?
+#ifndef CONFIG_SMP
+       @ For UP, checking that this thread owns the hw context is
+       @ sufficient to determine that the hardware state is valid.
+       beq     vfp_hw_state_valid
+
+       @ On UP, we lazily save the VFP context.  As a different
+       @ thread wants ownership of the VFP hardware, save the old
+       @ state if there was a previous (valid) owner.
 
        VFPFMXR FPEXC, r5               @ enable VFP, disable any pending
                                        @ exceptions, so we can get at the
                                        @ rest of it
 
-#ifndef CONFIG_SMP
-       @ Save out the current registers to the old thread state
-       @ No need for SMP since this is not done lazily
-
        DBGSTR1 "save old state %p", r4
-       cmp     r4, #0
-       beq     no_old_VFP_process
+       cmp     r4, #0                  @ if the vfp_current_hw_state is NULL
+       beq     vfp_reload_hw           @ then the hw state needs reloading
        VFPFSTMIA r4, r5                @ save the working registers
        VFPFMRX r5, FPSCR               @ current status
 #ifndef CONFIG_CPU_FEROCEON
@@ -110,13 +110,35 @@ ENTRY(vfp_support_entry)
 1:
 #endif
        stmia   r4, {r1, r5, r6, r8}    @ save FPEXC, FPSCR, FPINST, FPINST2
-                                       @ and point r4 at the word at the
-                                       @ start of the register dump
+vfp_reload_hw:
+
+#else
+       @ For SMP, if this thread does not own the hw context, then we
+       @ need to reload it.  No need to save the old state as on SMP,
+       @ we always save the state when we switch away from a thread.
+       bne     vfp_reload_hw
+
+       @ This thread has ownership of the current hardware context.
+       @ However, it may have been migrated to another CPU, in which
+       @ case the saved state is newer than the hardware context.
+       @ Check this by looking at the CPU number which the state was
+       @ last loaded onto.
+       ldr     ip, [r10, #VFP_CPU]
+       teq     ip, r11
+       beq     vfp_hw_state_valid
+
+vfp_reload_hw:
+       @ We're loading this threads state into the VFP hardware. Update
+       @ the CPU number which contains the most up to date VFP context.
+       str     r11, [r10, #VFP_CPU]
+
+       VFPFMXR FPEXC, r5               @ enable VFP, disable any pending
+                                       @ exceptions, so we can get at the
+                                       @ rest of it
 #endif
 
-no_old_VFP_process:
        DBGSTR1 "load state %p", r10
-       str     r10, [r3, r11, lsl #2]  @ update the last_VFP_context pointer
+       str     r10, [r3, r11, lsl #2]  @ update the vfp_current_hw_state pointer
                                        @ Load the saved state back into the VFP
        VFPFLDMIA r10, r5               @ reload the working registers while
                                        @ FPEXC is in a safe state
@@ -132,7 +154,8 @@ no_old_VFP_process:
 #endif
        VFPFMXR FPSCR, r5               @ restore status
 
-check_for_exception:
+@ The context stored in the VFP hardware is up to date with this thread
+vfp_hw_state_valid:
        tst     r1, #FPEXC_EX
        bne     process_exception       @ might as well handle the pending
                                        @ exception before retrying branch
@@ -207,8 +230,8 @@ ENTRY(vfp_save_state)
 ENDPROC(vfp_save_state)
 
        .align
-last_VFP_context_address:
-       .word   last_VFP_context
+vfp_current_hw_state_address:
+       .word   vfp_current_hw_state
 
        .macro  tbl_branch, base, tmp, shift
 #ifdef CONFIG_THUMB2_KERNEL
index f25e7ec8941696f3bd10b765d387b6ea7936d4c0..0a96f71f0abd3394d292737e65c832d7c2f1b926 100644 (file)
@@ -33,7 +33,6 @@ void vfp_support_entry(void);
 void vfp_null_entry(void);
 
 void (*vfp_vector)(void) = vfp_null_entry;
-union vfp_state *last_VFP_context[NR_CPUS];
 
 /*
  * Dual-use variable.
@@ -42,6 +41,46 @@ union vfp_state *last_VFP_context[NR_CPUS];
  */
 unsigned int VFP_arch;
 
+/*
+ * The pointer to the vfpstate structure of the thread which currently
+ * owns the context held in the VFP hardware, or NULL if the hardware
+ * context is invalid.
+ *
+ * For UP, this is sufficient to tell which thread owns the VFP context.
+ * However, for SMP, we also need to check the CPU number stored in the
+ * saved state too to catch migrations.
+ */
+union vfp_state *vfp_current_hw_state[NR_CPUS];
+
+/*
+ * Is 'thread's most up to date state stored in this CPUs hardware?
+ * Must be called from non-preemptible context.
+ */
+static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread)
+{
+#ifdef CONFIG_SMP
+       if (thread->vfpstate.hard.cpu != cpu)
+               return false;
+#endif
+       return vfp_current_hw_state[cpu] == &thread->vfpstate;
+}
+
+/*
+ * Force a reload of the VFP context from the thread structure.  We do
+ * this by ensuring that access to the VFP hardware is disabled, and
+ * clear last_VFP_context.  Must be called from non-preemptible context.
+ */
+static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
+{
+       if (vfp_state_in_hw(cpu, thread)) {
+               fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+               vfp_current_hw_state[cpu] = NULL;
+       }
+#ifdef CONFIG_SMP
+       thread->vfpstate.hard.cpu = NR_CPUS;
+#endif
+}
+
 /*
  * Per-thread VFP initialization.
  */
@@ -50,21 +89,27 @@ static void vfp_thread_flush(struct thread_info *thread)
        union vfp_state *vfp = &thread->vfpstate;
        unsigned int cpu;
 
-       memset(vfp, 0, sizeof(union vfp_state));
-
-       vfp->hard.fpexc = FPEXC_EN;
-       vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
-
        /*
         * Disable VFP to ensure we initialize it first.  We must ensure
-        * that the modification of last_VFP_context[] and hardware disable
-        * are done for the same CPU and without preemption.
+        * that the modification of vfp_current_hw_state[] and hardware
+        * disable are done for the same CPU and without preemption.
+        *
+        * Do this first to ensure that preemption won't overwrite our
+        * state saving should access to the VFP be enabled at this point.
         */
        cpu = get_cpu();
-       if (last_VFP_context[cpu] == vfp)
-               last_VFP_context[cpu] = NULL;
+       if (vfp_current_hw_state[cpu] == vfp)
+               vfp_current_hw_state[cpu] = NULL;
        fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
        put_cpu();
+
+       memset(vfp, 0, sizeof(union vfp_state));
+
+       vfp->hard.fpexc = FPEXC_EN;
+       vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
+#ifdef CONFIG_SMP
+       vfp->hard.cpu = NR_CPUS;
+#endif
 }
 
 static void vfp_thread_exit(struct thread_info *thread)
@@ -73,8 +118,8 @@ static void vfp_thread_exit(struct thread_info *thread)
        union vfp_state *vfp = &thread->vfpstate;
        unsigned int cpu = get_cpu();
 
-       if (last_VFP_context[cpu] == vfp)
-               last_VFP_context[cpu] = NULL;
+       if (vfp_current_hw_state[cpu] == vfp)
+               vfp_current_hw_state[cpu] = NULL;
        put_cpu();
 }
 
@@ -84,6 +129,9 @@ static void vfp_thread_copy(struct thread_info *thread)
 
        vfp_sync_hwstate(parent);
        thread->vfpstate = parent->vfpstate;
+#ifdef CONFIG_SMP
+       thread->vfpstate.hard.cpu = NR_CPUS;
+#endif
 }
 
 /*
@@ -129,17 +177,8 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
                 * case the thread migrates to a different CPU. The
                 * restoring is done lazily.
                 */
-               if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) {
-                       vfp_save_state(last_VFP_context[cpu], fpexc);
-                       last_VFP_context[cpu]->hard.cpu = cpu;
-               }
-               /*
-                * Thread migration, just force the reloading of the
-                * state on the new CPU in case the VFP registers
-                * contain stale data.
-                */
-               if (thread->vfpstate.hard.cpu != cpu)
-                       last_VFP_context[cpu] = NULL;
+               if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu])
+                       vfp_save_state(vfp_current_hw_state[cpu], fpexc);
 #endif
 
                /*
@@ -415,7 +454,7 @@ static int vfp_pm_suspend(void)
        }
 
        /* clear any information we had about last context state */
-       memset(last_VFP_context, 0, sizeof(last_VFP_context));
+       memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state));
 
        return 0;
 }
@@ -443,15 +482,15 @@ static void vfp_pm_init(void)
 static inline void vfp_pm_init(void) { }
 #endif /* CONFIG_PM */
 
+/*
+ * Ensure that the VFP state stored in 'thread->vfpstate' is up to date
+ * with the hardware state.
+ */
 void vfp_sync_hwstate(struct thread_info *thread)
 {
        unsigned int cpu = get_cpu();
 
-       /*
-        * If the thread we're interested in is the current owner of the
-        * hardware VFP state, then we need to save its state.
-        */
-       if (last_VFP_context[cpu] == &thread->vfpstate) {
+       if (vfp_state_in_hw(cpu, thread)) {
                u32 fpexc = fmrx(FPEXC);
 
                /*
@@ -465,36 +504,13 @@ void vfp_sync_hwstate(struct thread_info *thread)
        put_cpu();
 }
 
+/* Ensure that the thread reloads the hardware VFP state on the next use. */
 void vfp_flush_hwstate(struct thread_info *thread)
 {
        unsigned int cpu = get_cpu();
 
-       /*
-        * If the thread we're interested in is the current owner of the
-        * hardware VFP state, then we need to save its state.
-        */
-       if (last_VFP_context[cpu] == &thread->vfpstate) {
-               u32 fpexc = fmrx(FPEXC);
-
-               fmxr(FPEXC, fpexc & ~FPEXC_EN);
-
-               /*
-                * Set the context to NULL to force a reload the next time
-                * the thread uses the VFP.
-                */
-               last_VFP_context[cpu] = NULL;
-       }
+       vfp_force_reload(cpu, thread);
 
-#ifdef CONFIG_SMP
-       /*
-        * For SMP we still have to take care of the case where the thread
-        * migrates to another CPU and then back to the original CPU on which
-        * the last VFP user is still the same thread. Mark the thread VFP
-        * state as belonging to a non-existent CPU so that the saved one will
-        * be reloaded in the above case.
-        */
-       thread->vfpstate.hard.cpu = NR_CPUS;
-#endif
        put_cpu();
 }
 
@@ -513,8 +529,7 @@ static int vfp_hotplug(struct notifier_block *b, unsigned long action,
        void *hcpu)
 {
        if (action == CPU_DYING || action == CPU_DYING_FROZEN) {
-               unsigned int cpu = (long)hcpu;
-               last_VFP_context[cpu] = NULL;
+               vfp_force_reload((long)hcpu, current_thread_info());
        } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
                vfp_enable(NULL);
        return NOTIFY_OK;
index 9f3b5accda88a0b9d9c275b6c1cd9b69ce9e50f7..115ced33febdbe59de005c23f07abbf01371423a 100644 (file)
@@ -14,12 +14,6 @@ extern struct pglist_data *node_data[];
 #define NODE_DATA(nid)         (node_data[nid])
 
 #define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
-#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid)                                              \
-({                                                                     \
-       pg_data_t *__pgdat = NODE_DATA(nid);                            \
-       __pgdat->node_start_pfn + __pgdat->node_spanned_pages - 1;      \
-})
 
 #define pmd_page(pmd)          (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
 /*
@@ -44,7 +38,7 @@ static __inline__ int pfn_to_nid(unsigned long pfn)
        int node;
 
        for (node = 0 ; node < MAX_NUMNODES ; node++)
-               if (pfn >= node_start_pfn(node) && pfn <= node_end_pfn(node))
+               if (pfn >= node_start_pfn(node) && pfn < node_end_pfn(node))
                        break;
 
        return node;
index c018696765d4c86d5a474049f5a13023387f437d..5c74eb797f08f1f4a6cc24df2d1c9567e5a198ed 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
-#include <linux/sysdev.h>
+#include <linux/syscore_ops.h>
 #include <linux/irq.h>
 
 #include <asm/i8259.h>
@@ -215,14 +215,13 @@ spurious_8259A_irq:
        }
 }
 
-static int i8259A_resume(struct sys_device *dev)
+static void i8259A_resume(void)
 {
        if (i8259A_auto_eoi >= 0)
                init_8259A(i8259A_auto_eoi);
-       return 0;
 }
 
-static int i8259A_shutdown(struct sys_device *dev)
+static void i8259A_shutdown(void)
 {
        /* Put the i8259A into a quiescent state that
         * the kernel initialization code can get it
@@ -232,26 +231,17 @@ static int i8259A_shutdown(struct sys_device *dev)
                outb(0xff, PIC_MASTER_IMR);     /* mask all of 8259A-1 */
                outb(0xff, PIC_SLAVE_IMR);      /* mask all of 8259A-1 */
        }
-       return 0;
 }
 
-static struct sysdev_class i8259_sysdev_class = {
-       .name = "i8259",
+static struct syscore_ops i8259_syscore_ops = {
        .resume = i8259A_resume,
        .shutdown = i8259A_shutdown,
 };
 
-static struct sys_device device_i8259A = {
-       .id     = 0,
-       .cls    = &i8259_sysdev_class,
-};
-
 static int __init i8259A_init_sysfs(void)
 {
-       int error = sysdev_class_register(&i8259_sysdev_class);
-       if (!error)
-               error = sysdev_register(&device_i8259A);
-       return error;
+       register_syscore_ops(&i8259_syscore_ops);
+       return 0;
 }
 
 device_initcall(i8259A_init_sysfs);
index 3d6e60dad9d98a2b99a44029658638c48f300d50..780560b330d9ef9109a76b0210185f3bc178ca8e 100644 (file)
@@ -15,6 +15,7 @@
  * User space memory access functions
  */
 #include <linux/thread_info.h>
+#include <linux/kernel.h>
 #include <asm/page.h>
 #include <asm/errno.h>
 
index 9608d2cf214ac7d33d76661b6bff10cc9dad00c1..e67eb9c3d1bfb47769d2de11e3532a22250b308d 100644 (file)
@@ -14,13 +14,6 @@ extern struct node_map_data node_data[];
 
 #define NODE_DATA(nid)          (&node_data[nid].pg_data)
 
-#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid)                                              \
-({                                                                     \
-       pg_data_t *__pgdat = NODE_DATA(nid);                            \
-       __pgdat->node_start_pfn + __pgdat->node_spanned_pages;          \
-})
-
 /* We have these possible memory map layouts:
  * Astro: 0-3.75, 67.75-68, 4-64
  * zx1: 0-1, 257-260, 4-256
index 4f685a779f4cd591ee3f941f1bea0b6dc3b6e717..98d9426d4b85429d511192aba3b7a4bb5b2dd3dd 100644 (file)
                        wm8776:codec@1a {
                                compatible = "wlf,wm8776";
                                reg = <0x1a>;
-                               /* MCLK source is a stand-alone oscillator */
-                               clock-frequency = <12288000>;
+                               /*
+                                * clock-frequency will be set by U-Boot if
+                                * the clock is enabled.
+                                */
                        };
                };
 
                        codec-handle = <&wm8776>;
                        fsl,playback-dma = <&dma00>;
                        fsl,capture-dma = <&dma01>;
-                       fsl,fifo-depth = <16>;
+                       fsl,fifo-depth = <15>;
+                       fsl,ssi-asynchronous;
                };
 
                dma@c300 {
index c9f212b5f3ded98122423e13754ad40fe202e839..80bc5de7ee1d3c87d3b627c49c4eb0ae8e3d6c66 100644 (file)
@@ -148,7 +148,6 @@ CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_SCSI_CXGB3_ISCSI=m
 CONFIG_SCSI_CXGB4_ISCSI=m
 CONFIG_SCSI_BNX2_ISCSI=m
-CONFIG_SCSI_BNX2_ISCSI=m
 CONFIG_BE2ISCSI=m
 CONFIG_SCSI_IBMVSCSI=y
 CONFIG_SCSI_IBMVFC=m
index fd3fd58bad845df6f2fbb1753257de994cd7cc1d..7b589178be46b293519bf2244c0e4dd6251fe9a1 100644 (file)
@@ -38,13 +38,6 @@ u64 memory_hotplug_max(void);
 #define memory_hotplug_max() memblock_end_of_DRAM()
 #endif
 
-/*
- * Following are macros that each numa implmentation must define.
- */
-
-#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid)      (NODE_DATA(nid)->node_end_pfn)
-
 #else
 #define memory_hotplug_max() memblock_end_of_DRAM()
 #endif /* CONFIG_NEED_MULTIPLE_NODES */
index 77578c093ddafc6b57936f9e9715d725ec740829..c57c19358a263941e1053465c25539e48fec8cdb 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/init.h>
 #include <linux/rtc.h>
 #include <linux/delay.h>
+#include <linux/ratelimit.h>
 #include <asm/prom.h>
 #include <asm/rtas.h>
 #include <asm/time.h>
@@ -29,9 +30,10 @@ unsigned long __init rtas_get_boot_time(void)
                }
        } while (wait_time && (get_tb() < max_wait_tb));
 
-       if (error != 0 && printk_ratelimit()) {
-               printk(KERN_WARNING "error: reading the clock failed (%d)\n",
-                       error);
+       if (error != 0) {
+               printk_ratelimited(KERN_WARNING
+                                  "error: reading the clock failed (%d)\n",
+                                  error);
                return 0;
        }
 
@@ -55,19 +57,21 @@ void rtas_get_rtc_time(struct rtc_time *rtc_tm)
 
                wait_time = rtas_busy_delay_time(error);
                if (wait_time) {
-                       if (in_interrupt() && printk_ratelimit()) {
+                       if (in_interrupt()) {
                                memset(rtc_tm, 0, sizeof(struct rtc_time));
-                               printk(KERN_WARNING "error: reading clock"
-                                      " would delay interrupt\n");
+                               printk_ratelimited(KERN_WARNING
+                                                  "error: reading clock "
+                                                  "would delay interrupt\n");
                                return; /* delay not allowed */
                        }
                        msleep(wait_time);
                }
        } while (wait_time && (get_tb() < max_wait_tb));
 
-        if (error != 0 && printk_ratelimit()) {
-                printk(KERN_WARNING "error: reading the clock failed (%d)\n",
-                      error);
+       if (error != 0) {
+               printk_ratelimited(KERN_WARNING
+                                  "error: reading the clock failed (%d)\n",
+                                  error);
                return;
         }
 
@@ -99,9 +103,10 @@ int rtas_set_rtc_time(struct rtc_time *tm)
                }
        } while (wait_time && (get_tb() < max_wait_tb));
 
-        if (error != 0 && printk_ratelimit())
-                printk(KERN_WARNING "error: setting the clock failed (%d)\n",
-                      error);
+       if (error != 0)
+               printk_ratelimited(KERN_WARNING
+                                  "error: setting the clock failed (%d)\n",
+                                  error);
 
         return 0;
 }
index b96a3a010c26859ab93f8cca5ec74cd4905dd16b..78b76dc54dfb27847a24228e1bac2e2ef804354a 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/errno.h>
 #include <linux/elf.h>
 #include <linux/ptrace.h>
+#include <linux/ratelimit.h>
 #ifdef CONFIG_PPC64
 #include <linux/syscalls.h>
 #include <linux/compat.h>
@@ -892,11 +893,12 @@ badframe:
        printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
               regs, frame, newsp);
 #endif
-       if (show_unhandled_signals && printk_ratelimit())
-               printk(KERN_INFO "%s[%d]: bad frame in handle_rt_signal32: "
-                       "%p nip %08lx lr %08lx\n",
-                       current->comm, current->pid,
-                       addr, regs->nip, regs->link);
+       if (show_unhandled_signals)
+               printk_ratelimited(KERN_INFO
+                                  "%s[%d]: bad frame in handle_rt_signal32: "
+                                  "%p nip %08lx lr %08lx\n",
+                                  current->comm, current->pid,
+                                  addr, regs->nip, regs->link);
 
        force_sigsegv(sig, current);
        return 0;
@@ -1058,11 +1060,12 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
        return 0;
 
  bad:
-       if (show_unhandled_signals && printk_ratelimit())
-               printk(KERN_INFO "%s[%d]: bad frame in sys_rt_sigreturn: "
-                       "%p nip %08lx lr %08lx\n",
-                       current->comm, current->pid,
-                       rt_sf, regs->nip, regs->link);
+       if (show_unhandled_signals)
+               printk_ratelimited(KERN_INFO
+                                  "%s[%d]: bad frame in sys_rt_sigreturn: "
+                                  "%p nip %08lx lr %08lx\n",
+                                  current->comm, current->pid,
+                                  rt_sf, regs->nip, regs->link);
 
        force_sig(SIGSEGV, current);
        return 0;
@@ -1149,12 +1152,12 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
         * We kill the task with a SIGSEGV in this situation.
         */
        if (do_setcontext(ctx, regs, 1)) {
-               if (show_unhandled_signals && printk_ratelimit())
-                       printk(KERN_INFO "%s[%d]: bad frame in "
-                               "sys_debug_setcontext: %p nip %08lx "
-                               "lr %08lx\n",
-                               current->comm, current->pid,
-                               ctx, regs->nip, regs->link);
+               if (show_unhandled_signals)
+                       printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
+                                          "sys_debug_setcontext: %p nip %08lx "
+                                          "lr %08lx\n",
+                                          current->comm, current->pid,
+                                          ctx, regs->nip, regs->link);
 
                force_sig(SIGSEGV, current);
                goto out;
@@ -1236,11 +1239,12 @@ badframe:
        printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
               regs, frame, newsp);
 #endif
-       if (show_unhandled_signals && printk_ratelimit())
-               printk(KERN_INFO "%s[%d]: bad frame in handle_signal32: "
-                       "%p nip %08lx lr %08lx\n",
-                       current->comm, current->pid,
-                       frame, regs->nip, regs->link);
+       if (show_unhandled_signals)
+               printk_ratelimited(KERN_INFO
+                                  "%s[%d]: bad frame in handle_signal32: "
+                                  "%p nip %08lx lr %08lx\n",
+                                  current->comm, current->pid,
+                                  frame, regs->nip, regs->link);
 
        force_sigsegv(sig, current);
        return 0;
@@ -1288,11 +1292,12 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
        return 0;
 
 badframe:
-       if (show_unhandled_signals && printk_ratelimit())
-               printk(KERN_INFO "%s[%d]: bad frame in sys_sigreturn: "
-                       "%p nip %08lx lr %08lx\n",
-                       current->comm, current->pid,
-                       addr, regs->nip, regs->link);
+       if (show_unhandled_signals)
+               printk_ratelimited(KERN_INFO
+                                  "%s[%d]: bad frame in sys_sigreturn: "
+                                  "%p nip %08lx lr %08lx\n",
+                                  current->comm, current->pid,
+                                  addr, regs->nip, regs->link);
 
        force_sig(SIGSEGV, current);
        return 0;
index da989fff19ccb4eeb5663785bfb99fa73754d28e..e91c736cc8428a596818957c517bfe71eaf74e76 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/elf.h>
 #include <linux/ptrace.h>
 #include <linux/module.h>
+#include <linux/ratelimit.h>
 
 #include <asm/sigcontext.h>
 #include <asm/ucontext.h>
@@ -380,10 +381,10 @@ badframe:
        printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n",
               regs, uc, &uc->uc_mcontext);
 #endif
-       if (show_unhandled_signals && printk_ratelimit())
-               printk(regs->msr & MSR_64BIT ? fmt64 : fmt32,
-                       current->comm, current->pid, "rt_sigreturn",
-                       (long)uc, regs->nip, regs->link);
+       if (show_unhandled_signals)
+               printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
+                                  current->comm, current->pid, "rt_sigreturn",
+                                  (long)uc, regs->nip, regs->link);
 
        force_sig(SIGSEGV, current);
        return 0;
@@ -468,10 +469,10 @@ badframe:
        printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n",
               regs, frame, newsp);
 #endif
-       if (show_unhandled_signals && printk_ratelimit())
-               printk(regs->msr & MSR_64BIT ? fmt64 : fmt32,
-                       current->comm, current->pid, "setup_rt_frame",
-                       (long)frame, regs->nip, regs->link);
+       if (show_unhandled_signals)
+               printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
+                                  current->comm, current->pid, "setup_rt_frame",
+                                  (long)frame, regs->nip, regs->link);
 
        force_sigsegv(signr, current);
        return 0;
index 0ff4ab98d50ca713f1c8a983349aa2369fcad22e..1a0141426cda8ba86bc64840197617f9cc099b89 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/bug.h>
 #include <linux/kdebug.h>
 #include <linux/debugfs.h>
+#include <linux/ratelimit.h>
 
 #include <asm/emulated_ops.h>
 #include <asm/pgtable.h>
@@ -197,12 +198,11 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
                if (die("Exception in kernel mode", regs, signr))
                        return;
        } else if (show_unhandled_signals &&
-                   unhandled_signal(current, signr) &&
-                   printk_ratelimit()) {
-                       printk(regs->msr & MSR_64BIT ? fmt64 : fmt32,
-                               current->comm, current->pid, signr,
-                               addr, regs->nip, regs->link, code);
-               }
+                  unhandled_signal(current, signr)) {
+               printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
+                                  current->comm, current->pid, signr,
+                                  addr, regs->nip, regs->link, code);
+       }
 
        memset(&info, 0, sizeof(info));
        info.si_signo = signr;
@@ -425,7 +425,7 @@ int machine_check_e500mc(struct pt_regs *regs)
        unsigned long reason = mcsr;
        int recoverable = 1;
 
-       if (reason & MCSR_BUS_RBERR) {
+       if (reason & MCSR_LD) {
                recoverable = fsl_rio_mcheck_exception(regs);
                if (recoverable == 1)
                        goto silent_out;
@@ -1342,9 +1342,8 @@ void altivec_assist_exception(struct pt_regs *regs)
        } else {
                /* didn't recognize the instruction */
                /* XXX quick hack for now: set the non-Java bit in the VSCR */
-               if (printk_ratelimit())
-                       printk(KERN_ERR "Unrecognized altivec instruction "
-                              "in %s at %lx\n", current->comm, regs->nip);
+               printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
+                                  "in %s at %lx\n", current->comm, regs->nip);
                current->thread.vscr.u[3] |= 0x10000;
        }
 }
@@ -1548,9 +1547,8 @@ u32 ppc_warn_emulated;
 
 void ppc_warn_emulated_print(const char *type)
 {
-       if (printk_ratelimit())
-               pr_warning("%s used emulated %s instruction\n", current->comm,
-                          type);
+       pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
+                           type);
 }
 
 static int __init ppc_warn_emulated_init(void)
index 54f4fb994e99aae549ccb38b4ebcd490d0df0e0d..ad35f66c69e867893e17172c470a11904c2eac9d 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/kdebug.h>
 #include <linux/perf_event.h>
 #include <linux/magic.h>
+#include <linux/ratelimit.h>
 
 #include <asm/firmware.h>
 #include <asm/page.h>
@@ -346,11 +347,10 @@ bad_area_nosemaphore:
                return 0;
        }
 
-       if (is_exec && (error_code & DSISR_PROTFAULT)
-           && printk_ratelimit())
-               printk(KERN_CRIT "kernel tried to execute NX-protected"
-                      " page (%lx) - exploit attempt? (uid: %d)\n",
-                      address, current_uid());
+       if (is_exec && (error_code & DSISR_PROTFAULT))
+               printk_ratelimited(KERN_CRIT "kernel tried to execute NX-protected"
+                                  " page (%lx) - exploit attempt? (uid: %d)\n",
+                                  address, current_uid());
 
        return SIGSEGV;
 
index 33867ec4a234086b6b0ede0c39dc2df26883956b..9d6a8effeda2dfb7947102d0031830a11735b200 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/of.h>
 #include <linux/memblock.h>
 #include <linux/vmalloc.h>
+#include <linux/memory.h>
+
 #include <asm/firmware.h>
 #include <asm/machdep.h>
 #include <asm/pSeries_reconfig.h>
 static unsigned long get_memblock_size(void)
 {
        struct device_node *np;
-       unsigned int memblock_size = 0;
+       unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
+       struct resource r;
 
        np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
        if (np) {
-               const unsigned long *size;
+               const __be64 *size;
 
                size = of_get_property(np, "ibm,lmb-size", NULL);
-               memblock_size = size ? *size : 0;
-
+               if (size)
+                       memblock_size = be64_to_cpup(size);
                of_node_put(np);
-       } else {
+       } else  if (machine_is(pseries)) {
+               /* This fallback really only applies to pseries */
                unsigned int memzero_size = 0;
-               const unsigned int *regs;
 
                np = of_find_node_by_path("/memory@0");
                if (np) {
-                       regs = of_get_property(np, "reg", NULL);
-                       memzero_size = regs ? regs[3] : 0;
+                       if (!of_address_to_resource(np, 0, &r))
+                               memzero_size = resource_size(&r);
                        of_node_put(np);
                }
 
@@ -50,16 +53,21 @@ static unsigned long get_memblock_size(void)
                        sprintf(buf, "/memory@%x", memzero_size);
                        np = of_find_node_by_path(buf);
                        if (np) {
-                               regs = of_get_property(np, "reg", NULL);
-                               memblock_size = regs ? regs[3] : 0;
+                               if (!of_address_to_resource(np, 0, &r))
+                                       memblock_size = resource_size(&r);
                                of_node_put(np);
                        }
                }
        }
-
        return memblock_size;
 }
 
+/* WARNING: This is going to override the generic definition whenever
+ * pseries is built-in regardless of what platform is active at boot
+ * time. This is fine for now as this is the only "option" and it
+ * should work everywhere. If not, we'll have to turn this into a
+ * ppc_md. callback
+ */
 unsigned long memory_block_size_bytes(void)
 {
        return get_memblock_size();
index 5b206a2fe17c47653632e516051984746ad2d587..b3fd081d56f5fdfbb5ee650889a7a645b7598edb 100644 (file)
@@ -283,23 +283,24 @@ static void __iomem *rio_regs_win;
 #ifdef CONFIG_E500
 int fsl_rio_mcheck_exception(struct pt_regs *regs)
 {
-       const struct exception_table_entry *entry = NULL;
-       unsigned long reason = mfspr(SPRN_MCSR);
-
-       if (reason & MCSR_BUS_RBERR) {
-               reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
-               if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) {
-                       /* Check if we are prepared to handle this fault */
-                       entry = search_exception_tables(regs->nip);
-                       if (entry) {
-                               pr_debug("RIO: %s - MC Exception handled\n",
-                                        __func__);
-                               out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
-                                        0);
-                               regs->msr |= MSR_RI;
-                               regs->nip = entry->fixup;
-                               return 1;
-                       }
+       const struct exception_table_entry *entry;
+       unsigned long reason;
+
+       if (!rio_regs_win)
+               return 0;
+
+       reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
+       if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) {
+               /* Check if we are prepared to handle this fault */
+               entry = search_exception_tables(regs->nip);
+               if (entry) {
+                       pr_debug("RIO: %s - MC Exception handled\n",
+                                __func__);
+                       out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
+                                0);
+                       regs->msr |= MSR_RI;
+                       regs->nip = entry->fixup;
+                       return 1;
                }
        }
 
index 3a8de5bb628ae9483c0dec79ea5c22ae72b88bb0..58d7a534f877662c7d31078535778680e66c53d7 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/syscore_ops.h>
+#include <linux/ratelimit.h>
 
 #include <asm/ptrace.h>
 #include <asm/signal.h>
@@ -1648,9 +1649,8 @@ static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg)
                return NO_IRQ;
        }
        if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
-               if (printk_ratelimit())
-                       printk(KERN_WARNING "%s: Got protected source %d !\n",
-                              mpic->name, (int)src);
+               printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n",
+                                  mpic->name, (int)src);
                mpic_eoi(mpic);
                return NO_IRQ;
        }
@@ -1688,9 +1688,8 @@ unsigned int mpic_get_coreint_irq(void)
                return NO_IRQ;
        }
        if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
-               if (printk_ratelimit())
-                       printk(KERN_WARNING "%s: Got protected source %d !\n",
-                              mpic->name, (int)src);
+               printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n",
+                                  mpic->name, (int)src);
                return NO_IRQ;
        }
 
index 90d77bd078f51acc243b21ee0c63432d01369ee9..c03fef7a9c2220c45ca1584ad8f65191bb5c9e76 100644 (file)
@@ -579,6 +579,7 @@ config S390_GUEST
        def_bool y
        prompt "s390 guest support for KVM (EXPERIMENTAL)"
        depends on 64BIT && EXPERIMENTAL
+       select VIRTUALIZATION
        select VIRTIO
        select VIRTIO_RING
        select VIRTIO_CONSOLE
index 52420d2785b3607e1c682568358ea8f31a398d17..1d55c95f617c76d3f86784d34750395fb7573391 100644 (file)
@@ -262,7 +262,7 @@ void smp_ctl_set_bit(int cr, int bit)
 
        memset(&parms.orvals, 0, sizeof(parms.orvals));
        memset(&parms.andvals, 0xff, sizeof(parms.andvals));
-       parms.orvals[cr] = 1 << bit;
+       parms.orvals[cr] = 1UL << bit;
        on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 }
 EXPORT_SYMBOL(smp_ctl_set_bit);
@@ -276,7 +276,7 @@ void smp_ctl_clear_bit(int cr, int bit)
 
        memset(&parms.orvals, 0, sizeof(parms.orvals));
        memset(&parms.andvals, 0xff, sizeof(parms.andvals));
-       parms.andvals[cr] = ~(1L << bit);
+       parms.andvals[cr] = ~(1UL << bit);
        on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 }
 EXPORT_SYMBOL(smp_ctl_clear_bit);
index 5995e9bc72d9c291d57df38ab66c0a1929f43dd6..0e358c2cffeb5e6bf1955ba6ca2c3de7092e03b3 100644 (file)
@@ -25,7 +25,7 @@ extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
 
 #include "hwsampler.h"
 
-#define DEFAULT_INTERVAL       4096
+#define DEFAULT_INTERVAL       4127518
 
 #define DEFAULT_SDBT_BLOCKS    1
 #define DEFAULT_SDB_BLOCKS     511
@@ -151,6 +151,12 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
        if (oprofile_max_interval == 0)
                return -ENODEV;
 
+       /* The initial value should be sane */
+       if (oprofile_hw_interval < oprofile_min_interval)
+               oprofile_hw_interval = oprofile_min_interval;
+       if (oprofile_hw_interval > oprofile_max_interval)
+               oprofile_hw_interval = oprofile_max_interval;
+
        if (oprofile_timer_init(ops))
                return -ENODEV;
 
index f03338c2f0886bbb830973f15e4a929d19981e6d..bbdeb48bbf8e40fb80cdf4cd3d4c0b3e132dc6b1 100644 (file)
@@ -348,6 +348,7 @@ config CPU_SUBTYPE_SH7720
        select SYS_SUPPORTS_CMT
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select USB_ARCH_HAS_OHCI
+       select USB_OHCI_SH if USB_OHCI_HCD
        help
          Select SH7720 if you have a SH3-DSP SH7720 CPU.
 
@@ -357,6 +358,7 @@ config CPU_SUBTYPE_SH7721
        select CPU_HAS_DSP
        select SYS_SUPPORTS_CMT
        select USB_ARCH_HAS_OHCI
+       select USB_OHCI_SH if USB_OHCI_HCD
        help
          Select SH7721 if you have a SH3-DSP SH7721 CPU.
 
@@ -440,6 +442,7 @@ config CPU_SUBTYPE_SH7763
        bool "Support SH7763 processor"
        select CPU_SH4A
        select USB_ARCH_HAS_OHCI
+       select USB_OHCI_SH if USB_OHCI_HCD
        help
          Select SH7763 if you have a SH4A SH7763(R5S77631) CPU.
 
@@ -467,7 +470,9 @@ config CPU_SUBTYPE_SH7786
        select GENERIC_CLOCKEVENTS_BROADCAST if SMP
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select USB_ARCH_HAS_OHCI
+       select USB_OHCI_SH if USB_OHCI_HCD
        select USB_ARCH_HAS_EHCI
+       select USB_EHCI_SH if USB_EHCI_HCD
 
 config CPU_SUBTYPE_SHX3
        bool "Support SH-X3 processor"
index 33ddb130a7c8c2f2e9b66ae4c8fab68352a4be6e..cfde98ddb29d3e437b1c49d07d1e659c09158c1c 100644 (file)
@@ -9,7 +9,6 @@ CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 # CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_KALLSYMS_ALL=y
 CONFIG_SLAB=y
@@ -39,8 +38,6 @@ CONFIG_IPV6=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_M25P80=y
@@ -56,18 +53,19 @@ CONFIG_SH_ETH=y
 # CONFIG_KEYBOARD_ATKBD is not set
 # CONFIG_MOUSE_PS2 is not set
 # CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
 CONFIG_SERIAL_SH_SCI=y
 CONFIG_SERIAL_SH_SCI_NR_UARTS=3
 CONFIG_SERIAL_SH_SCI_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
 # CONFIG_HW_RANDOM is not set
 CONFIG_SPI=y
 CONFIG_SPI_SH=y
 # CONFIG_HWMON is not set
-CONFIG_MFD_SH_MOBILE_SDHI=y
 CONFIG_USB=y
 CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_SH=y
 CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_SH=y
 CONFIG_USB_STORAGE=y
 CONFIG_MMC=y
 CONFIG_MMC_SDHI=y
index 8887baff5effef0db2657c24f4a52eaf7cf89b0b..15a8496960e6a9987105939832b13c94f33aebaa 100644 (file)
@@ -9,10 +9,6 @@
 extern struct pglist_data *node_data[];
 #define NODE_DATA(nid)         (node_data[nid])
 
-#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid)      (NODE_DATA(nid)->node_start_pfn + \
-                                NODE_DATA(nid)->node_spanned_pages)
-
 static inline int pfn_to_nid(unsigned long pfn)
 {
        int nid;
index 423dabf542d35373ec905d3f23bb7b4fb6bfbc2e..e915deafac89ba8af2774272a3ec04545bae9c1a 100644 (file)
@@ -183,7 +183,7 @@ static const struct sh_dmae_slave_config sh7757_dmae1_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_SCIF2_RX,
                .addr           = 0x1f4b0014,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x22,
        },
@@ -197,7 +197,7 @@ static const struct sh_dmae_slave_config sh7757_dmae1_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_SCIF3_RX,
                .addr           = 0x1f4c0014,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x2a,
        },
@@ -211,7 +211,7 @@ static const struct sh_dmae_slave_config sh7757_dmae1_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_SCIF4_RX,
                .addr           = 0x1f4d0014,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x42,
        },
@@ -228,7 +228,7 @@ static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_RIIC0_RX,
                .addr           = 0x1e500013,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x22,
        },
@@ -242,7 +242,7 @@ static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_RIIC1_RX,
                .addr           = 0x1e510013,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x2a,
        },
@@ -256,7 +256,7 @@ static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_RIIC2_RX,
                .addr           = 0x1e520013,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0xa2,
        },
@@ -265,12 +265,12 @@ static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = {
                .addr           = 0x1e530012,
                .chcr           = SM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
-               .mid_rid        = 0xab,
+               .mid_rid        = 0xa9,
        },
        {
                .slave_id       = SHDMA_SLAVE_RIIC3_RX,
                .addr           = 0x1e530013,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0xaf,
        },
@@ -279,14 +279,14 @@ static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = {
                .addr           = 0x1e540012,
                .chcr           = SM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
-               .mid_rid        = 0xc1,
+               .mid_rid        = 0xc5,
        },
        {
                .slave_id       = SHDMA_SLAVE_RIIC4_RX,
                .addr           = 0x1e540013,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
-               .mid_rid        = 0xc2,
+               .mid_rid        = 0xc6,
        },
 };
 
@@ -301,7 +301,7 @@ static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_RIIC5_RX,
                .addr           = 0x1e550013,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x22,
        },
@@ -315,7 +315,7 @@ static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_RIIC6_RX,
                .addr           = 0x1e560013,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x2a,
        },
@@ -329,7 +329,7 @@ static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_RIIC7_RX,
                .addr           = 0x1e570013,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x42,
        },
@@ -343,7 +343,7 @@ static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_RIIC8_RX,
                .addr           = 0x1e580013,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x46,
        },
@@ -357,7 +357,7 @@ static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = {
        {
                .slave_id       = SHDMA_SLAVE_RIIC9_RX,
                .addr           = 0x1e590013,
-               .chcr           = SM_INC | 0x800 | 0x40000000 |
+               .chcr           = DM_INC | 0x800 | 0x40000000 |
                                  TS_INDEX2VAL(XMIT_SZ_8BIT),
                .mid_rid        = 0x52,
        },
@@ -659,6 +659,54 @@ static struct platform_device spi0_device = {
        .resource       = spi0_resources,
 };
 
+static struct resource usb_ehci_resources[] = {
+       [0] = {
+               .start  = 0xfe4f1000,
+               .end    = 0xfe4f10ff,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = 57,
+               .end    = 57,
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+static struct platform_device usb_ehci_device = {
+       .name           = "sh_ehci",
+       .id             = -1,
+       .dev = {
+               .dma_mask = &usb_ehci_device.dev.coherent_dma_mask,
+               .coherent_dma_mask = DMA_BIT_MASK(32),
+       },
+       .num_resources  = ARRAY_SIZE(usb_ehci_resources),
+       .resource       = usb_ehci_resources,
+};
+
+static struct resource usb_ohci_resources[] = {
+       [0] = {
+               .start  = 0xfe4f1800,
+               .end    = 0xfe4f18ff,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = 57,
+               .end    = 57,
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+static struct platform_device usb_ohci_device = {
+       .name           = "sh_ohci",
+       .id             = -1,
+       .dev = {
+               .dma_mask = &usb_ohci_device.dev.coherent_dma_mask,
+               .coherent_dma_mask = DMA_BIT_MASK(32),
+       },
+       .num_resources  = ARRAY_SIZE(usb_ohci_resources),
+       .resource       = usb_ohci_resources,
+};
+
 static struct platform_device *sh7757_devices[] __initdata = {
        &scif2_device,
        &scif3_device,
@@ -670,6 +718,8 @@ static struct platform_device *sh7757_devices[] __initdata = {
        &dma2_device,
        &dma3_device,
        &spi0_device,
+       &usb_ehci_device,
+       &usb_ohci_device,
 };
 
 static int __init sh7757_devices_setup(void)
@@ -1039,13 +1089,13 @@ static DECLARE_INTC_DESC(intc_desc, "sh7757", vectors, groups,
 
 /* Support for external interrupt pins in IRQ mode */
 static struct intc_vect vectors_irq0123[] __initdata = {
-       INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
-       INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
+       INTC_VECT(IRQ0, 0x200), INTC_VECT(IRQ1, 0x240),
+       INTC_VECT(IRQ2, 0x280), INTC_VECT(IRQ3, 0x2c0),
 };
 
 static struct intc_vect vectors_irq4567[] __initdata = {
-       INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380),
-       INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200),
+       INTC_VECT(IRQ4, 0x300), INTC_VECT(IRQ5, 0x340),
+       INTC_VECT(IRQ6, 0x380), INTC_VECT(IRQ7, 0x3c0),
 };
 
 static struct intc_sense_reg sense_registers[] __initdata = {
@@ -1079,14 +1129,14 @@ static struct intc_vect vectors_irl0123[] __initdata = {
 };
 
 static struct intc_vect vectors_irl4567[] __initdata = {
-       INTC_VECT(IRL4_LLLL, 0xb00), INTC_VECT(IRL4_LLLH, 0xb20),
-       INTC_VECT(IRL4_LLHL, 0xb40), INTC_VECT(IRL4_LLHH, 0xb60),
-       INTC_VECT(IRL4_LHLL, 0xb80), INTC_VECT(IRL4_LHLH, 0xba0),
-       INTC_VECT(IRL4_LHHL, 0xbc0), INTC_VECT(IRL4_LHHH, 0xbe0),
-       INTC_VECT(IRL4_HLLL, 0xc00), INTC_VECT(IRL4_HLLH, 0xc20),
-       INTC_VECT(IRL4_HLHL, 0xc40), INTC_VECT(IRL4_HLHH, 0xc60),
-       INTC_VECT(IRL4_HHLL, 0xc80), INTC_VECT(IRL4_HHLH, 0xca0),
-       INTC_VECT(IRL4_HHHL, 0xcc0),
+       INTC_VECT(IRL4_LLLL, 0x200), INTC_VECT(IRL4_LLLH, 0x220),
+       INTC_VECT(IRL4_LLHL, 0x240), INTC_VECT(IRL4_LLHH, 0x260),
+       INTC_VECT(IRL4_LHLL, 0x280), INTC_VECT(IRL4_LHLH, 0x2a0),
+       INTC_VECT(IRL4_LHHL, 0x2c0), INTC_VECT(IRL4_LHHH, 0x2e0),
+       INTC_VECT(IRL4_HLLL, 0x300), INTC_VECT(IRL4_HLLH, 0x320),
+       INTC_VECT(IRL4_HLHL, 0x340), INTC_VECT(IRL4_HLHH, 0x360),
+       INTC_VECT(IRL4_HHLL, 0x380), INTC_VECT(IRL4_HHLH, 0x3a0),
+       INTC_VECT(IRL4_HHHL, 0x3c0),
 };
 
 static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7757-irl0123", vectors_irl0123,
index 91971103b62b56e7bd4352a441bf7282b8dac061..a3ee91971129099826493db6c23b463417284f56 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/seq_file.h>
 #include <linux/ftrace.h>
 #include <linux/delay.h>
+#include <linux/ratelimit.h>
 #include <asm/processor.h>
 #include <asm/machvec.h>
 #include <asm/uaccess.h>
@@ -268,9 +269,8 @@ void migrate_irqs(void)
                        unsigned int newcpu = cpumask_any_and(data->affinity,
                                                              cpu_online_mask);
                        if (newcpu >= nr_cpu_ids) {
-                               if (printk_ratelimit())
-                                       printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
-                                              irq, cpu);
+                               pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
+                                                   irq, cpu);
 
                                cpumask_setall(data->affinity);
                                newcpu = cpumask_any_and(data->affinity,
index b2595b8548ee353b4c5099be26ba6fa940565cb7..620fa7ff9eec88164bdbac180562974f31485912 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/seq_file.h>
 #include <linux/proc_fs.h>
 #include <linux/uaccess.h>
+#include <linux/ratelimit.h>
 #include <asm/alignment.h>
 #include <asm/processor.h>
 
@@ -95,13 +96,13 @@ int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
 void unaligned_fixups_notify(struct task_struct *tsk, insn_size_t insn,
                             struct pt_regs *regs)
 {
-       if (user_mode(regs) && (se_usermode & UM_WARN) && printk_ratelimit())
-               pr_notice("Fixing up unaligned userspace access "
+       if (user_mode(regs) && (se_usermode & UM_WARN))
+               pr_notice_ratelimited("Fixing up unaligned userspace access "
                          "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
                          tsk->comm, task_pid_nr(tsk),
                          (void *)instruction_pointer(regs), insn);
-       else if (se_kernmode_warn && printk_ratelimit())
-               pr_notice("Fixing up unaligned kernel access "
+       else if (se_kernmode_warn)
+               pr_notice_ratelimited("Fixing up unaligned kernel access "
                          "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
                          tsk->comm, task_pid_nr(tsk),
                          (void *)instruction_pointer(regs), insn);
index d4d0711de0f9f5031439927d02517d6a5e743509..14848909e0dec49c7b5a7cc56b482bc9a068d26f 100644 (file)
@@ -18,7 +18,7 @@ extern void arch_local_irq_restore(unsigned long);
 extern unsigned long arch_local_irq_save(void);
 extern void arch_local_irq_enable(void);
 
-static inline unsigned long arch_local_save_flags(void)
+static inline notrace unsigned long arch_local_save_flags(void)
 {
        unsigned long flags;
 
@@ -26,17 +26,17 @@ static inline unsigned long arch_local_save_flags(void)
        return flags;
 }
 
-static inline void arch_local_irq_disable(void)
+static inline notrace void arch_local_irq_disable(void)
 {
        arch_local_irq_save();
 }
 
-static inline bool arch_irqs_disabled_flags(unsigned long flags)
+static inline notrace bool arch_irqs_disabled_flags(unsigned long flags)
 {
        return (flags & PSR_PIL) != 0;
 }
 
-static inline bool arch_irqs_disabled(void)
+static inline notrace bool arch_irqs_disabled(void)
 {
        return arch_irqs_disabled_flags(arch_local_save_flags());
 }
index aab969c82c2b654391089b180d77d45e62416058..23cd27f6beb47e689842fbfdafc3bbf5826c7977 100644 (file)
@@ -14,7 +14,7 @@
 
 #ifndef __ASSEMBLY__
 
-static inline unsigned long arch_local_save_flags(void)
+static inline notrace unsigned long arch_local_save_flags(void)
 {
        unsigned long flags;
 
@@ -26,7 +26,7 @@ static inline unsigned long arch_local_save_flags(void)
        return flags;
 }
 
-static inline void arch_local_irq_restore(unsigned long flags)
+static inline notrace void arch_local_irq_restore(unsigned long flags)
 {
        __asm__ __volatile__(
                "wrpr   %0, %%pil"
@@ -36,7 +36,7 @@ static inline void arch_local_irq_restore(unsigned long flags)
        );
 }
 
-static inline void arch_local_irq_disable(void)
+static inline notrace void arch_local_irq_disable(void)
 {
        __asm__ __volatile__(
                "wrpr   %0, %%pil"
@@ -46,7 +46,7 @@ static inline void arch_local_irq_disable(void)
        );
 }
 
-static inline void arch_local_irq_enable(void)
+static inline notrace void arch_local_irq_enable(void)
 {
        __asm__ __volatile__(
                "wrpr   0, %%pil"
@@ -56,17 +56,17 @@ static inline void arch_local_irq_enable(void)
        );
 }
 
-static inline int arch_irqs_disabled_flags(unsigned long flags)
+static inline notrace int arch_irqs_disabled_flags(unsigned long flags)
 {
        return (flags > 0);
 }
 
-static inline int arch_irqs_disabled(void)
+static inline notrace int arch_irqs_disabled(void)
 {
        return arch_irqs_disabled_flags(arch_local_save_flags());
 }
 
-static inline unsigned long arch_local_irq_save(void)
+static inline notrace unsigned long arch_local_irq_save(void)
 {
        unsigned long flags, tmp;
 
index e8c648741ed4d12a52949f095257d3aec6176850..99d9b9f577bfb15826faa7e5b488f3e76d7f81d7 100644 (file)
@@ -8,8 +8,6 @@
 extern struct pglist_data *node_data[];
 
 #define NODE_DATA(nid)         (node_data[nid])
-#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid)      (NODE_DATA(nid)->node_end_pfn)
 
 extern int numa_cpu_lookup_table[];
 extern cpumask_t numa_cpumask_lookup_table[];
index 9fe08a1ea6c6ea226f9cb4a91bf1f183067d98da..f445e98463e6d332c0191f13092738dc8336677b 100644 (file)
@@ -293,7 +293,7 @@ maybe_smp4m_msg:
        WRITE_PAUSE
        wr      %l4, PSR_ET, %psr
        WRITE_PAUSE
-       sll     %o3, 28, %o2            ! shift for simpler checks below
+       srl     %o3, 28, %o2            ! shift for simpler checks below
 maybe_smp4m_msg_check_single:
        andcc   %o2, 0x1, %g0
        beq,a   maybe_smp4m_msg_check_mask
index c0e01297e64eb84a03b8582e135c616b7c129ba0..e485a680499824319b5e9bd6fb750806d99a9429 100644 (file)
@@ -226,7 +226,7 @@ void leon3_getCacheRegs(struct leon3_cacheregs *regs)
  * Leon2 and Leon3 differ in their way of telling cache information
  *
  */
-int leon_flush_needed(void)
+int __init leon_flush_needed(void)
 {
        int flush_needed = -1;
        unsigned int ssize, sets;
index c6344c4f32ac17ddf27dcbcd5563a302708052c2..9d3dbce8f953167aca40e12eb56dc376ee0acc73 100644 (file)
@@ -40,17 +40,6 @@ static inline int pfn_to_nid(unsigned long pfn)
        return highbits_to_node[__pfn_to_highbits(pfn)];
 }
 
-/*
- * Following are macros that each numa implmentation must define.
- */
-
-#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid)                                              \
-({                                                                     \
-       pg_data_t *__pgdat = NODE_DATA(nid);                            \
-       __pgdat->node_start_pfn + __pgdat->node_spanned_pages;          \
-})
-
 #define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr)
 
 static inline int pfn_valid(int pfn)
diff --git a/arch/um/include/asm/percpu.h b/arch/um/include/asm/percpu.h
new file mode 100644 (file)
index 0000000..efe7508
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __UM_PERCPU_H
+#define __UM_PERCPU_H
+
+#include <asm-generic/percpu.h>
+
+#endif /* __UM_PERCPU_H */
index da349723d4115cef7d75aac4680ba2284deaf0d0..37357a599dcac02e4407467e844bbac0d9e8d224 100644 (file)
@@ -1170,7 +1170,7 @@ comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
 config AMD_NUMA
        def_bool y
        prompt "Old style AMD Opteron NUMA detection"
-       depends on NUMA && PCI
+       depends on X86_64 && NUMA && PCI
        ---help---
          Enable AMD NUMA node topology detection.  You should say Y here if
          you have a multi processor AMD system. This uses an old method to
index 2fefa501d3ba64ee5db2e3541555a28ebe27598e..af60d8a2e28847069eb2014622cac76b89a5e9ff 100644 (file)
@@ -62,7 +62,7 @@ extern int sfi_mtimer_num;
 #else /* CONFIG_APB_TIMER */
 
 static inline unsigned long apbt_quick_calibrate(void) {return 0; }
-static inline void apbt_time_init(void) {return 0; }
+static inline void apbt_time_init(void) { }
 
 #endif
 #endif /* ASM_X86_APBT_H */
index 19ae14ba69780c38defd3fbcfe7fdcb05859f9c0..0cd3800f33b9dcf68e39a1288ebf7ccd442f3a97 100644 (file)
@@ -4,7 +4,6 @@
 #define ARCH_DISCARD_MEMBLOCK
 
 u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align);
-void memblock_x86_to_bootmem(u64 start, u64 end);
 
 void memblock_x86_reserve_range(u64 start, u64 end, char *name);
 void memblock_x86_free_range(u64 start, u64 end);
@@ -19,5 +18,6 @@ u64 memblock_x86_hole_size(u64 start, u64 end);
 u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
 u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
 u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
+bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align);
 
 #endif
index 5e83a416eca8941410c0b93a99beff853831f433..ffa037f28d39a7542c364bbef12689f23d1a22a2 100644 (file)
@@ -48,17 +48,6 @@ static inline int pfn_to_nid(unsigned long pfn)
 #endif
 }
 
-/*
- * Following are macros that each numa implmentation must define.
- */
-
-#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid)                                              \
-({                                                                     \
-       pg_data_t *__pgdat = NODE_DATA(nid);                            \
-       __pgdat->node_start_pfn + __pgdat->node_spanned_pages;          \
-})
-
 static inline int pfn_valid(int pfn)
 {
        int nid = pfn_to_nid(pfn);
@@ -68,6 +57,8 @@ static inline int pfn_valid(int pfn)
        return 0;
 }
 
+#define early_pfn_valid(pfn)   pfn_valid((pfn))
+
 #endif /* CONFIG_DISCONTIGMEM */
 
 #ifdef CONFIG_NEED_MULTIPLE_NODES
index b3f88d7867c79235372b5ad6f5cfc6edd254a083..129d9aa3ceb3646665370b37bdae1e7665b906a4 100644 (file)
@@ -13,8 +13,5 @@ extern struct pglist_data *node_data[];
 
 #define NODE_DATA(nid)         (node_data[nid])
 
-#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid)       (NODE_DATA(nid)->node_start_pfn +      \
-                                NODE_DATA(nid)->node_spanned_pages)
 #endif
 #endif /* _ASM_X86_MMZONE_64_H */
index 31d84acc15125646018914b51a758821ec796590..a518c0a4504465e6ac46068ba6d34a68d66a6fd3 100644 (file)
@@ -22,6 +22,8 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
        u64 product;
 #ifdef __i386__
        u32 tmp1, tmp2;
+#else
+       ulong tmp;
 #endif
 
        if (shift < 0)
@@ -42,8 +44,11 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
                : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
 #elif defined(__x86_64__)
        __asm__ (
-               "mul %%rdx ; shrd $32,%%rdx,%%rax"
-               : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
+               "mul %[mul_frac] ; shrd $32, %[hi], %[lo]"
+               : [lo]"=a"(product),
+                 [hi]"=d"(tmp)
+               : "0"(delta),
+                 [mul_frac]"rm"((u64)mul_frac));
 #else
 #error implement me!
 #endif
index ead21b6631175df124210113025f49f994186a98..b4fd836e405377363754eb5ac412c2c6a6e57147 100644 (file)
@@ -28,6 +28,8 @@ pmode_cr3:    .long   0       /* Saved %cr3 */
 pmode_cr4:     .long   0       /* Saved %cr4 */
 pmode_efer:    .quad   0       /* Saved EFER */
 pmode_gdt:     .quad   0
+pmode_misc_en: .quad   0       /* Saved MISC_ENABLE MSR */
+pmode_behavior:        .long   0       /* Wakeup behavior flags */
 realmode_flags:        .long   0
 real_magic:    .long   0
 trampoline_segment:    .word 0
@@ -91,6 +93,18 @@ wakeup_code:
        /* Call the C code */
        calll   main
 
+       /* Restore MISC_ENABLE before entering protected mode, in case
+          BIOS decided to clear XD_DISABLE during S3. */
+       movl    pmode_behavior, %eax
+       btl     $WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE, %eax
+       jnc     1f
+
+       movl    pmode_misc_en, %eax
+       movl    pmode_misc_en + 4, %edx
+       movl    $MSR_IA32_MISC_ENABLE, %ecx
+       wrmsr
+1:
+
        /* Do any other stuff... */
 
 #ifndef CONFIG_64BIT
index e1828c07e79cdb2f47a9ecfa3661621229ead8f9..97a29e1430e3c245a314661af02fbed5ca7565af 100644 (file)
@@ -21,6 +21,9 @@ struct wakeup_header {
        u32 pmode_efer_low;     /* Protected mode EFER */
        u32 pmode_efer_high;
        u64 pmode_gdt;
+       u32 pmode_misc_en_low;  /* Protected mode MISC_ENABLE */
+       u32 pmode_misc_en_high;
+       u32 pmode_behavior;     /* Wakeup routine behavior flags */
        u32 realmode_flags;
        u32 real_magic;
        u16 trampoline_segment; /* segment with trampoline code, 64-bit only */
@@ -39,4 +42,7 @@ extern struct wakeup_header wakeup_header;
 #define WAKEUP_HEADER_SIGNATURE 0x51ee1111
 #define WAKEUP_END_SIGNATURE   0x65a22c82
 
+/* Wakeup behavior bits */
+#define WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE     0
+
 #endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */
index 18a857ba7a25a920a1ad83f3acd477d587b28477..103b6ab368d39315bc752e02a7bdc5b83ede0acf 100644 (file)
@@ -77,6 +77,12 @@ int acpi_suspend_lowlevel(void)
 
        header->pmode_cr0 = read_cr0();
        header->pmode_cr4 = read_cr4_safe();
+       header->pmode_behavior = 0;
+       if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
+                       &header->pmode_misc_en_low,
+                       &header->pmode_misc_en_high))
+               header->pmode_behavior |=
+                       (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
        header->realmode_flags = acpi_realmode_flags;
        header->real_magic = 0x12345678;
 
index 0c016f727695e9ab1a4544e845fa698a8c540737..14eed214b58468dab58d6c06ff52b486c7f07cf5 100644 (file)
@@ -294,6 +294,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"),
                },
        },
+       { /* Handle reboot issue on Acer Aspire one */
+               .callback = set_bios_reboot,
+               .ident = "Acer Aspire One A110",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
+               },
+       },
        { }
 };
 
@@ -411,6 +419,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
                },
        },
+       {       /* Handle problems with rebooting on the Latitude E6320. */
+               .callback = set_pci_reboot,
+               .ident = "Dell Latitude E6320",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
+               },
+       },
        { }
 };
 
index 6df88c7885c0e24bbbdfcad54682d52cbfb085fb..adc98675cda03505cff59be011df491cd271072a 100644 (file)
@@ -3372,7 +3372,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
        int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
        bool op_prefix = false;
        struct opcode opcode;
-       struct operand memop = { .type = OP_NONE };
+       struct operand memop = { .type = OP_NONE }, *memopp = NULL;
 
        c->eip = ctxt->eip;
        c->fetch.start = c->eip;
@@ -3547,9 +3547,6 @@ done_prefixes:
        if (memop.type == OP_MEM && c->ad_bytes != 8)
                memop.addr.mem.ea = (u32)memop.addr.mem.ea;
 
-       if (memop.type == OP_MEM && c->rip_relative)
-               memop.addr.mem.ea += c->eip;
-
        /*
         * Decode and fetch the source operand: register, memory
         * or immediate.
@@ -3571,6 +3568,7 @@ done_prefixes:
                                                           c->op_bytes;
        srcmem_common:
                c->src = memop;
+               memopp = &c->src;
                break;
        case SrcImmU16:
                rc = decode_imm(ctxt, &c->src, 2, false);
@@ -3667,6 +3665,7 @@ done_prefixes:
        case DstMem:
        case DstMem64:
                c->dst = memop;
+               memopp = &c->dst;
                if ((c->d & DstMask) == DstMem64)
                        c->dst.bytes = 8;
                else
@@ -3700,10 +3699,13 @@ done_prefixes:
                /* Special instructions do their own operand decoding. */
        default:
                c->dst.type = OP_NONE; /* Disable writeback. */
-               return 0;
+               break;
        }
 
 done:
+       if (memopp && memopp->type == OP_MEM && c->rip_relative)
+               memopp->addr.mem.ea += c->eip;
+
        return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
 }
 
index bd14bb4c8594c4cb7b8375e6567e6a0aa56c61cc..aee38623b768edae62394fc09c64742f54b5b955 100644 (file)
@@ -565,7 +565,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
 
 static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
 {
-       return gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
+       return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
 }
 
 static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
index 6c4dc010c4cbfdc2dd49f1e82696022373e817fd..9d03ad4dd5ec95366b1e5b22ccf76c8a737a7155 100644 (file)
@@ -121,7 +121,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
                                    gva_t addr, u32 access)
 {
        pt_element_t pte;
-       pt_element_t __user *ptep_user;
+       pt_element_t __user *uninitialized_var(ptep_user);
        gfn_t table_gfn;
        unsigned index, pt_access, uninitialized_var(pte_access);
        gpa_t pte_gpa;
index 4c3fa0f6746970cef9bb5d26e5808d8bbdabcb1f..d48ec60ea421a8211271195db0193bc42a9a7532 100644 (file)
@@ -2047,7 +2047,8 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
                                        unsigned long cr0,
                                        struct kvm_vcpu *vcpu)
 {
-       vmx_decache_cr3(vcpu);
+       if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
+               vmx_decache_cr3(vcpu);
        if (!(cr0 & X86_CR0_PG)) {
                /* From paging/starting to nonpaging */
                vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
index d865c4aeec55fb6c57638006c0a05e8fec30aed1..bbaaa005bf0e865a9c3fc84a7ff1ed9854b80888 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/poison.h>
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
+#include <linux/memory.h>
 #include <linux/memory_hotplug.h>
 #include <linux/nmi.h>
 #include <linux/gfp.h>
@@ -895,8 +896,6 @@ const char *arch_vma_name(struct vm_area_struct *vma)
 }
 
 #ifdef CONFIG_X86_UV
-#define MIN_MEMORY_BLOCK_SIZE   (1 << SECTION_SIZE_BITS)
-
 unsigned long memory_block_size_bytes(void)
 {
        if (is_uv_system()) {
index aa1169392b83eb44350ec5da13cd23bfc3477986..992da5ec5a64d69ddc3381d9e8508e4d6061d4ef 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/range.h>
 
 /* Check for already reserved areas */
-static bool __init check_with_memblock_reserved_size(u64 *addrp, u64 *sizep, u64 align)
+bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align)
 {
        struct memblock_region *r;
        u64 addr = *addrp, last;
@@ -59,7 +59,7 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
                if (addr >= ei_last)
                        continue;
                *sizep = ei_last - addr;
-               while (check_with_memblock_reserved_size(&addr, sizep, align))
+               while (memblock_x86_check_reserved_size(&addr, sizep, align))
                        ;
 
                if (*sizep)
index cf9750004a08fe673eb312414090c7b0c7759b09..68894fdc034bc3cb01cd79b66ec995dadc7b7c67 100644 (file)
@@ -112,8 +112,10 @@ static void nmi_cpu_start(void *dummy)
 static int nmi_start(void)
 {
        get_online_cpus();
-       on_each_cpu(nmi_cpu_start, NULL, 1);
        ctr_running = 1;
+       /* make ctr_running visible to the nmi handler: */
+       smp_mb();
+       on_each_cpu(nmi_cpu_start, NULL, 1);
        put_online_cpus();
        return 0;
 }
@@ -504,15 +506,18 @@ static int nmi_setup(void)
 
        nmi_enabled = 0;
        ctr_running = 0;
-       barrier();
+       /* make variables visible to the nmi handler: */
+       smp_mb();
        err = register_die_notifier(&profile_exceptions_nb);
        if (err)
                goto fail;
 
        get_online_cpus();
        register_cpu_notifier(&oprofile_cpu_nb);
-       on_each_cpu(nmi_cpu_setup, NULL, 1);
        nmi_enabled = 1;
+       /* make nmi_enabled visible to the nmi handler: */
+       smp_mb();
+       on_each_cpu(nmi_cpu_setup, NULL, 1);
        put_online_cpus();
 
        return 0;
@@ -531,7 +536,8 @@ static void nmi_shutdown(void)
        nmi_enabled = 0;
        ctr_running = 0;
        put_online_cpus();
-       barrier();
+       /* make variables visible to the nmi handler: */
+       smp_mb();
        unregister_die_notifier(&profile_exceptions_nb);
        msrs = &get_cpu_var(cpu_msrs);
        model->shutdown(msrs);
index 0972315c3860c40c719f1b03081b892c3e8f20bf..68c3c1395202eec5ad68bfc584d14ee9f4c1975e 100644 (file)
@@ -188,7 +188,7 @@ static bool resource_contains(struct resource *res, resource_size_t point)
        return false;
 }
 
-static void coalesce_windows(struct pci_root_info *info, int type)
+static void coalesce_windows(struct pci_root_info *info, unsigned long type)
 {
        int i, j;
        struct resource *res1, *res2;
index 8214724ce54dafeef19290199e4a9657a6468766..f567965c06201cf0315605cde43c6eaa9bb228a5 100644 (file)
@@ -327,7 +327,7 @@ int __init pci_xen_hvm_init(void)
 }
 
 #ifdef CONFIG_XEN_DOM0
-static int xen_register_pirq(u32 gsi, int triggering)
+static int xen_register_pirq(u32 gsi, int gsi_override, int triggering)
 {
        int rc, pirq, irq = -1;
        struct physdev_map_pirq map_irq;
@@ -344,16 +344,18 @@ static int xen_register_pirq(u32 gsi, int triggering)
                shareable = 1;
                name = "ioapic-level";
        }
-
        pirq = xen_allocate_pirq_gsi(gsi);
        if (pirq < 0)
                goto out;
 
-       irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name);
+       if (gsi_override >= 0)
+               irq = xen_bind_pirq_gsi_to_irq(gsi_override, pirq, shareable, name);
+       else
+               irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name);
        if (irq < 0)
                goto out;
 
-       printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d\n", pirq, irq);
+       printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", pirq, irq, gsi);
 
        map_irq.domid = DOMID_SELF;
        map_irq.type = MAP_PIRQ_TYPE_GSI;
@@ -370,7 +372,7 @@ out:
        return irq;
 }
 
-static int xen_register_gsi(u32 gsi, int triggering, int polarity)
+static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity)
 {
        int rc, irq;
        struct physdev_setup_gsi setup_gsi;
@@ -381,7 +383,7 @@ static int xen_register_gsi(u32 gsi, int triggering, int polarity)
        printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
                        gsi, triggering, polarity);
 
-       irq = xen_register_pirq(gsi, triggering);
+       irq = xen_register_pirq(gsi, gsi_override, triggering);
 
        setup_gsi.gsi = gsi;
        setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
@@ -403,6 +405,8 @@ static __init void xen_setup_acpi_sci(void)
        int rc;
        int trigger, polarity;
        int gsi = acpi_sci_override_gsi;
+       int irq = -1;
+       int gsi_override = -1;
 
        if (!gsi)
                return;
@@ -419,7 +423,25 @@ static __init void xen_setup_acpi_sci(void)
        printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
                        "polarity=%d\n", gsi, trigger, polarity);
 
-       gsi = xen_register_gsi(gsi, trigger, polarity);
+       /* Before we bind the GSI to a Linux IRQ, check whether
+        * we need to override it with bus_irq (IRQ) value. Usually for
+        * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
+        *  ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
+        * but there are oddballs where the IRQ != GSI:
+        *  ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
+        * which ends up being: gsi_to_irq[9] == 20
+        * (which is what acpi_gsi_to_irq ends up calling when starting the
+        * the ACPI interpreter and keels over since IRQ 9 has not been
+        * setup as we had setup IRQ 20 for it).
+        */
+       /* Check whether the GSI != IRQ */
+       if (acpi_gsi_to_irq(gsi, &irq) == 0) {
+               if (irq >= 0 && irq != gsi)
+                       /* Bugger, we MUST have that IRQ. */
+                       gsi_override = irq;
+       }
+
+       gsi = xen_register_gsi(gsi, gsi_override, trigger, polarity);
        printk(KERN_INFO "xen: acpi sci %d\n", gsi);
 
        return;
@@ -428,7 +450,7 @@ static __init void xen_setup_acpi_sci(void)
 static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
                                 int trigger, int polarity)
 {
-       return xen_register_gsi(gsi, trigger, polarity);
+       return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity);
 }
 
 static int __init pci_xen_initial_domain(void)
@@ -467,7 +489,7 @@ void __init xen_setup_pirqs(void)
                if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
                        continue;
 
-               xen_register_pirq(irq,
+               xen_register_pirq(irq, -1 /* no GSI override */,
                        trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE);
        }
 }
index 0d3a4fa34560018c55a2619251bcb0db6546c4d1..899e393d8e7326352f29d64cfc27c9b1820e0314 100644 (file)
@@ -310,14 +310,31 @@ void __init efi_reserve_boot_services(void)
 
        for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
                efi_memory_desc_t *md = p;
-               unsigned long long start = md->phys_addr;
-               unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+               u64 start = md->phys_addr;
+               u64 size = md->num_pages << EFI_PAGE_SHIFT;
 
                if (md->type != EFI_BOOT_SERVICES_CODE &&
                    md->type != EFI_BOOT_SERVICES_DATA)
                        continue;
-
-               memblock_x86_reserve_range(start, start + size, "EFI Boot");
+               /* Only reserve where possible:
+                * - Not within any already allocated areas
+                * - Not over any memory area (really needed, if above?)
+                * - Not within any part of the kernel
+                * - Not the bios reserved area
+               */
+               if ((start+size >= virt_to_phys(_text)
+                               && start <= virt_to_phys(_end)) ||
+                       !e820_all_mapped(start, start+size, E820_RAM) ||
+                       memblock_x86_check_reserved_size(&start, &size,
+                                                       1<<EFI_PAGE_SHIFT)) {
+                       /* Could not reserve, skip it */
+                       md->num_pages = 0;
+                       memblock_dbg(PFX "Could not reserve boot range "
+                                       "[0x%010llx-0x%010llx]\n",
+                                               start, start+size-1);
+               } else
+                       memblock_x86_reserve_range(start, start+size,
+                                                       "EFI Boot");
        }
 }
 
@@ -334,6 +351,10 @@ static void __init efi_free_boot_services(void)
                    md->type != EFI_BOOT_SERVICES_DATA)
                        continue;
 
+               /* Could not reserve boot area */
+               if (!size)
+                       continue;
+
                free_bootmem_late(start, size);
        }
 }
@@ -483,9 +504,6 @@ void __init efi_init(void)
        x86_platform.set_wallclock = efi_set_rtc_mmss;
 #endif
 
-       /* Setup for EFI runtime service */
-       reboot_type = BOOT_EFI;
-
 #if EFI_DEBUG
        print_efi_memmap();
 #endif
index dd7b88f2ec7a6f30bf4ec628fc7f59112cf4d65e..5525163a03985a517e7a043c3daa92fe04d4bb6e 100644 (file)
@@ -1033,6 +1033,13 @@ static void xen_machine_halt(void)
        xen_reboot(SHUTDOWN_poweroff);
 }
 
+static void xen_machine_power_off(void)
+{
+       if (pm_power_off)
+               pm_power_off();
+       xen_reboot(SHUTDOWN_poweroff);
+}
+
 static void xen_crash_shutdown(struct pt_regs *regs)
 {
        xen_reboot(SHUTDOWN_crash);
@@ -1058,7 +1065,7 @@ int xen_panic_handler_init(void)
 static const struct machine_ops xen_machine_ops __initconst = {
        .restart = xen_restart,
        .halt = xen_machine_halt,
-       .power_off = xen_machine_halt,
+       .power_off = xen_machine_power_off,
        .shutdown = xen_machine_halt,
        .crash_shutdown = xen_crash_shutdown,
        .emergency_restart = xen_emergency_restart,
index dc708dcc62f1e5106fefebd390bc24bb1cf97cf1..0ccccb67a99300d3cd6277cb427382bdbe3ce3a5 100644 (file)
@@ -59,6 +59,7 @@
 #include <asm/page.h>
 #include <asm/init.h>
 #include <asm/pat.h>
+#include <asm/smp.h>
 
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
@@ -1231,7 +1232,11 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
 {
        struct {
                struct mmuext_op op;
+#ifdef CONFIG_SMP
+               DECLARE_BITMAP(mask, num_processors);
+#else
                DECLARE_BITMAP(mask, NR_CPUS);
+#endif
        } *args;
        struct multicall_space mcs;
 
@@ -1599,6 +1604,11 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
                for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
                        pte_t pte;
 
+#ifdef CONFIG_X86_32
+                       if (pfn > max_pfn_mapped)
+                               max_pfn_mapped = pfn;
+#endif
+
                        if (!pte_none(pte_page[pteidx]))
                                continue;
 
@@ -1766,7 +1776,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
        initial_kernel_pmd =
                extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
 
-       max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
+       max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
+                                 xen_start_info->nr_pt_frames * PAGE_SIZE +
+                                 512*1024);
 
        kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
        memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
index be1a464f6d66eac7f6d24ae74894b321dc1cd4b5..60aeeb56948f753f87559f3e4ff8c1f8704d21a4 100644 (file)
@@ -227,11 +227,7 @@ char * __init xen_memory_setup(void)
 
        memcpy(map_raw, map, sizeof(map));
        e820.nr_map = 0;
-#ifdef CONFIG_X86_32
        xen_extra_mem_start = mem_end;
-#else
-       xen_extra_mem_start = max((1ULL << 32), mem_end);
-#endif
        for (i = 0; i < memmap.nr_entries; i++) {
                unsigned long long end;
 
@@ -266,6 +262,12 @@ char * __init xen_memory_setup(void)
                if (map[i].size > 0)
                        e820_add_region(map[i].addr, map[i].size, map[i].type);
        }
+       /* Align the balloon area so that max_low_pfn does not get set
+        * to be at the _end_ of the PCI gap at the far end (fee01000).
+        * Note that xen_extra_mem_start gets set in the loop above to be
+        * past the last E820 region. */
+       if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32)))
+               xen_extra_mem_start = (1ULL<<32);
 
        /*
         * In domU, the ISA region is normal, usable memory, but we
index 41038c01de403e48c71052ab615dcf7f647f2653..b4533a86d7e410668e24033cae32d8382bf4111d 100644 (file)
@@ -205,11 +205,18 @@ static void __init xen_smp_prepare_boot_cpu(void)
 static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
 {
        unsigned cpu;
+       unsigned int i;
 
        xen_init_lock_cpu(0);
 
        smp_store_cpu_info(0);
        cpu_data(0).x86_max_cores = 1;
+
+       for_each_possible_cpu(i) {
+               zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
+               zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
+               zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
+       }
        set_cpu_sibling_map(0);
 
        if (xen_smp_intr_init(0))
index a62be8d0dc1b34cfdf832947856669a8ee449a84..3689f833afdc57c12916e2fc985ef8c18ccc2904 100644 (file)
@@ -927,7 +927,7 @@ static int throtl_dispatch(struct request_queue *q)
 
        bio_list_init(&bio_list_on_stack);
 
-       throtl_log(td, "dispatch nr_queued=%lu read=%u write=%u",
+       throtl_log(td, "dispatch nr_queued=%d read=%u write=%u",
                        total_nr_queued(td), td->nr_queued[READ],
                        td->nr_queued[WRITE]);
 
@@ -1204,7 +1204,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
        }
 
 queue_bio:
-       throtl_log_tg(td, tg, "[%c] bio. bdisp=%u sz=%u bps=%llu"
+       throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
                        " iodisp=%u iops=%u queued=%d/%d",
                        rw == READ ? 'R' : 'W',
                        tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
index 3c7b537bf9081f9f43b90c7cdcd9a95073f1c02b..ae21919f15e1edf2ea6880efc6d5d43deae7fd9f 100644 (file)
@@ -988,9 +988,10 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
 
        cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
                                        st->min_vdisktime);
-       cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
-                       " sect=%u", used_sl, cfqq->slice_dispatch, charge,
-                       iops_mode(cfqd), cfqq->nr_sectors);
+       cfq_log_cfqq(cfqq->cfqd, cfqq,
+                    "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
+                    used_sl, cfqq->slice_dispatch, charge,
+                    iops_mode(cfqd), cfqq->nr_sectors);
        cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
                                          unaccounted_sl);
        cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
@@ -2023,8 +2024,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
         */
        if (sample_valid(cic->ttime_samples) &&
            (cfqq->slice_end - jiffies < cic->ttime_mean)) {
-               cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
-                               cic->ttime_mean);
+               cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
+                            cic->ttime_mean);
                return;
        }
 
@@ -2772,8 +2773,14 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
        smp_wmb();
        cic->key = cfqd_dead_key(cfqd);
 
-       if (ioc->ioc_data == cic)
+       rcu_read_lock();
+       if (rcu_dereference(ioc->ioc_data) == cic) {
+               rcu_read_unlock();
+               spin_lock(&ioc->lock);
                rcu_assign_pointer(ioc->ioc_data, NULL);
+               spin_unlock(&ioc->lock);
+       } else
+               rcu_read_unlock();
 
        if (cic->cfqq[BLK_RW_ASYNC]) {
                cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
@@ -3080,7 +3087,8 @@ cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
 
        spin_lock_irqsave(&ioc->lock, flags);
 
-       BUG_ON(ioc->ioc_data == cic);
+       BUG_ON(rcu_dereference_check(ioc->ioc_data,
+               lockdep_is_held(&ioc->lock)) == cic);
 
        radix_tree_delete(&ioc->radix_root, cfqd->cic_index);
        hlist_del_rcu(&cic->cic_list);
index 95822ae25cfe3213863d4a1e4d8718eea290afe8..3608289c8ecdc0c8506af62421ccaabbae38438b 100644 (file)
@@ -1371,6 +1371,7 @@ struct disk_events {
        struct gendisk          *disk;          /* the associated disk */
        spinlock_t              lock;
 
+       struct mutex            block_mutex;    /* protects blocking */
        int                     block;          /* event blocking depth */
        unsigned int            pending;        /* events already sent out */
        unsigned int            clearing;       /* events being cleared */
@@ -1414,22 +1415,44 @@ static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
        return msecs_to_jiffies(intv_msecs);
 }
 
-static void __disk_block_events(struct gendisk *disk, bool sync)
+/**
+ * disk_block_events - block and flush disk event checking
+ * @disk: disk to block events for
+ *
+ * On return from this function, it is guaranteed that event checking
+ * isn't in progress and won't happen until unblocked by
+ * disk_unblock_events().  Events blocking is counted and the actual
+ * unblocking happens after the matching number of unblocks are done.
+ *
+ * Note that this intentionally does not block event checking from
+ * disk_clear_events().
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+void disk_block_events(struct gendisk *disk)
 {
        struct disk_events *ev = disk->ev;
        unsigned long flags;
        bool cancel;
 
+       if (!ev)
+               return;
+
+       /*
+        * Outer mutex ensures that the first blocker completes canceling
+        * the event work before further blockers are allowed to finish.
+        */
+       mutex_lock(&ev->block_mutex);
+
        spin_lock_irqsave(&ev->lock, flags);
        cancel = !ev->block++;
        spin_unlock_irqrestore(&ev->lock, flags);
 
-       if (cancel) {
-               if (sync)
-                       cancel_delayed_work_sync(&disk->ev->dwork);
-               else
-                       cancel_delayed_work(&disk->ev->dwork);
-       }
+       if (cancel)
+               cancel_delayed_work_sync(&disk->ev->dwork);
+
+       mutex_unlock(&ev->block_mutex);
 }
 
 static void __disk_unblock_events(struct gendisk *disk, bool check_now)
@@ -1460,27 +1483,6 @@ out_unlock:
        spin_unlock_irqrestore(&ev->lock, flags);
 }
 
-/**
- * disk_block_events - block and flush disk event checking
- * @disk: disk to block events for
- *
- * On return from this function, it is guaranteed that event checking
- * isn't in progress and won't happen until unblocked by
- * disk_unblock_events().  Events blocking is counted and the actual
- * unblocking happens after the matching number of unblocks are done.
- *
- * Note that this intentionally does not block event checking from
- * disk_clear_events().
- *
- * CONTEXT:
- * Might sleep.
- */
-void disk_block_events(struct gendisk *disk)
-{
-       if (disk->ev)
-               __disk_block_events(disk, true);
-}
-
 /**
  * disk_unblock_events - unblock disk event checking
  * @disk: disk to unblock events for
@@ -1508,10 +1510,18 @@ void disk_unblock_events(struct gendisk *disk)
  */
 void disk_check_events(struct gendisk *disk)
 {
-       if (disk->ev) {
-               __disk_block_events(disk, false);
-               __disk_unblock_events(disk, true);
+       struct disk_events *ev = disk->ev;
+       unsigned long flags;
+
+       if (!ev)
+               return;
+
+       spin_lock_irqsave(&ev->lock, flags);
+       if (!ev->block) {
+               cancel_delayed_work(&ev->dwork);
+               queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
        }
+       spin_unlock_irqrestore(&ev->lock, flags);
 }
 EXPORT_SYMBOL_GPL(disk_check_events);
 
@@ -1546,7 +1556,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
        spin_unlock_irq(&ev->lock);
 
        /* uncondtionally schedule event check and wait for it to finish */
-       __disk_block_events(disk, true);
+       disk_block_events(disk);
        queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
        flush_delayed_work(&ev->dwork);
        __disk_unblock_events(disk, false);
@@ -1664,7 +1674,7 @@ static ssize_t disk_events_poll_msecs_store(struct device *dev,
        if (intv < 0 && intv != -1)
                return -EINVAL;
 
-       __disk_block_events(disk, true);
+       disk_block_events(disk);
        disk->ev->poll_msecs = intv;
        __disk_unblock_events(disk, true);
 
@@ -1750,6 +1760,7 @@ static void disk_add_events(struct gendisk *disk)
        INIT_LIST_HEAD(&ev->node);
        ev->disk = disk;
        spin_lock_init(&ev->lock);
+       mutex_init(&ev->block_mutex);
        ev->block = 1;
        ev->poll_msecs = -1;
        INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
@@ -1770,7 +1781,7 @@ static void disk_del_events(struct gendisk *disk)
        if (!disk->ev)
                return;
 
-       __disk_block_events(disk, true);
+       disk_block_events(disk);
 
        mutex_lock(&disk_events_mutex);
        list_del_init(&disk->ev->node);
index b5ccae29be7491571301b81beae5b135fa064628..b0165ecad0c5c4a407d76a24b2753bed2b77cf87 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/interrupt.h>
 #include <linux/mm.h>
 #include <linux/net.h>
-#include <linux/slab.h>
 
 #define DEFLATE_DEF_LEVEL              Z_DEFAULT_COMPRESSION
 #define DEFLATE_DEF_WINBITS            11
@@ -73,7 +72,7 @@ static int deflate_decomp_init(struct deflate_ctx *ctx)
        int ret = 0;
        struct z_stream_s *stream = &ctx->decomp_stream;
 
-       stream->workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
+       stream->workspace = vzalloc(zlib_inflate_workspacesize());
        if (!stream->workspace) {
                ret = -ENOMEM;
                goto out;
@@ -86,7 +85,7 @@ static int deflate_decomp_init(struct deflate_ctx *ctx)
 out:
        return ret;
 out_free:
-       kfree(stream->workspace);
+       vfree(stream->workspace);
        goto out;
 }
 
@@ -99,7 +98,7 @@ static void deflate_comp_exit(struct deflate_ctx *ctx)
 static void deflate_decomp_exit(struct deflate_ctx *ctx)
 {
        zlib_inflateEnd(&ctx->decomp_stream);
-       kfree(ctx->decomp_stream.workspace);
+       vfree(ctx->decomp_stream.workspace);
 }
 
 static int deflate_init(struct crypto_tfm *tfm)
index d11d761a5e418191c29a987f923ac3e2be0f20a6..06b62e5cdcc72a93281051a7c07b5090be9abeaa 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/interrupt.h>
 #include <linux/mm.h>
 #include <linux/net.h>
-#include <linux/slab.h>
 
 #include <crypto/internal/compress.h>
 
@@ -60,7 +59,7 @@ static void zlib_decomp_exit(struct zlib_ctx *ctx)
 
        if (stream->workspace) {
                zlib_inflateEnd(stream);
-               kfree(stream->workspace);
+               vfree(stream->workspace);
                stream->workspace = NULL;
        }
 }
@@ -228,13 +227,13 @@ static int zlib_decompress_setup(struct crypto_pcomp *tfm, void *params,
                                 ? nla_get_u32(tb[ZLIB_DECOMP_WINDOWBITS])
                                 : DEF_WBITS;
 
-       stream->workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
+       stream->workspace = vzalloc(zlib_inflate_workspacesize());
        if (!stream->workspace)
                return -ENOMEM;
 
        ret = zlib_inflateInit2(stream, ctx->decomp_windowBits);
        if (ret != Z_OK) {
-               kfree(stream->workspace);
+               vfree(stream->workspace);
                stream->workspace = NULL;
                return -EINVAL;
        }
index abda3786a5d70c4b22738b1245303dbabe2722d6..181bc2f7bb7411a4b300c23c2450ad79ae129a4b 100644 (file)
@@ -139,13 +139,23 @@ static int __init hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)
 {
        struct platform_device *ghes_dev;
        struct ghes_arr *ghes_arr = data;
-       int rc;
+       int rc, i;
 
        if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
                return 0;
 
        if (!((struct acpi_hest_generic *)hest_hdr)->enabled)
                return 0;
+       for (i = 0; i < ghes_arr->count; i++) {
+               struct acpi_hest_header *hdr;
+               ghes_dev = ghes_arr->ghes_devs[i];
+               hdr = *(struct acpi_hest_header **)ghes_dev->dev.platform_data;
+               if (hdr->source_id == hest_hdr->source_id) {
+                       pr_warning(FW_WARN HEST_PFX "Duplicated hardware error source ID: %d.\n",
+                                  hdr->source_id);
+                       return -EIO;
+               }
+       }
        ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id);
        if (!ghes_dev)
                return -ENOMEM;
index 52ca9649d76925abc1718e9c3bed4391cf189006..372f9b70f7f4dc98e2985532fb19707861c920a8 100644 (file)
@@ -1332,23 +1332,6 @@ int acpi_resources_are_enforced(void)
 }
 EXPORT_SYMBOL(acpi_resources_are_enforced);
 
-/*
- * Create and initialize a spinlock.
- */
-acpi_status
-acpi_os_create_lock(acpi_spinlock *out_handle)
-{
-       spinlock_t *lock;
-
-       lock = ACPI_ALLOCATE(sizeof(spinlock_t));
-       if (!lock)
-               return AE_NO_MEMORY;
-       spin_lock_init(lock);
-       *out_handle = lock;
-
-       return AE_OK;
-}
-
 /*
  * Deallocate the memory for a spinlock.
  */
index d38c40fe4ddbfeace9bb1460505aac4add9d23f1..41223c7f0206f9a5483ebb8b94bcb66a7c8eb24c 100644 (file)
@@ -452,7 +452,7 @@ void ahci_save_initial_config(struct device *dev,
        }
 
        if (mask_port_map) {
-               dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n",
+               dev_printk(KERN_WARNING, dev, "masking port_map 0x%x -> 0x%x\n",
                           port_map,
                           port_map & mask_port_map);
                port_map &= mask_port_map;
index 736bee5dafebcffc3e13bcf9f8a4b48d13680903..000d03ae6653d7ad294d7477ab84d254d7e2f012 100644 (file)
@@ -4143,9 +4143,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
         * Devices which choke on SETXFER.  Applies only if both the
         * device and controller are SATA.
         */
-       { "PIONEER DVD-RW  DVRTD08",    "1.00", ATA_HORKAGE_NOSETXFER },
-       { "PIONEER DVD-RW  DVR-212D",   "1.28", ATA_HORKAGE_NOSETXFER },
-       { "PIONEER DVD-RW  DVR-216D",   "1.08", ATA_HORKAGE_NOSETXFER },
+       { "PIONEER DVD-RW  DVRTD08",    NULL,   ATA_HORKAGE_NOSETXFER },
+       { "PIONEER DVD-RW  DVR-212D",   NULL,   ATA_HORKAGE_NOSETXFER },
+       { "PIONEER DVD-RW  DVR-216D",   NULL,   ATA_HORKAGE_NOSETXFER },
 
        /* End Marker */
        { }
index d51f9795c064bbc20b1b4c2a48bc277f09980060..927f968e99d9f8b4dd896c8c8eb5f521fb1cff72 100644 (file)
@@ -3797,6 +3797,12 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
  */
 int ata_sas_port_start(struct ata_port *ap)
 {
+       /*
+        * the port is marked as frozen at allocation time, but if we don't
+        * have new eh, we won't thaw it
+        */
+       if (!ap->ops->error_handler)
+               ap->pflags &= ~ATA_PFLAG_FROZEN;
        return 0;
 }
 EXPORT_SYMBOL_GPL(ata_sas_port_start);
index 75a6a0c0094fb1b8220a50ddd2cc94765003c18b..5d7f58a7e34dba624e5319cbdf15aab24a1f3ea6 100644 (file)
@@ -161,6 +161,9 @@ static const struct pci_device_id marvell_pci_tbl[] = {
        { PCI_DEVICE(0x11AB, 0x6121), },
        { PCI_DEVICE(0x11AB, 0x6123), },
        { PCI_DEVICE(0x11AB, 0x6145), },
+       { PCI_DEVICE(0x1B4B, 0x91A0), },
+       { PCI_DEVICE(0x1B4B, 0x91A4), },
+
        { }     /* terminate list */
 };
 
index 1c4b3aa4c7c40da65c8c6f86f936be6891839e6c..dc88a39e7db8b04c9a2ec3e85ed888ee90194e69 100644 (file)
@@ -389,7 +389,7 @@ static void sata_dwc_tf_dump(struct ata_taskfile *tf)
 /*
  * Function: get_burst_length_encode
  * arguments: datalength: length in bytes of data
- * returns value to be programmed in register corrresponding to data length
+ * returns value to be programmed in register corresponding to data length
  * This value is effectively the log(base 2) of the length
  */
 static  int get_burst_length_encode(int datalength)
index 9f9b2359f718f486ba5336807fcd003ab6bff1f5..45d7c8fc73bd46ca3c0ec68baad8a1be695b47c3 100644 (file)
@@ -30,7 +30,6 @@
 static DEFINE_MUTEX(mem_sysfs_mutex);
 
 #define MEMORY_CLASS_NAME      "memory"
-#define MIN_MEMORY_BLOCK_SIZE  (1 << SECTION_SIZE_BITS)
 
 static int sections_per_block;
 
index 1c291af637b3582e1419e665ee81e07ab6bdb019..6040717b62bb4bfc521f76756f3bdba7145d11da 100644 (file)
@@ -367,7 +367,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
  *
  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
  */
-struct platform_device *__init_or_module platform_device_register_resndata(
+struct platform_device *platform_device_register_resndata(
                struct device *parent,
                const char *name, int id,
                const struct resource *res, unsigned int num,
index eaa8a854af03f6d4adc902fece195e7aabb69fd6..ad367c4139b14ead6102ed2e31d03f52c63d1fcf 100644 (file)
@@ -387,7 +387,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
        clknb = container_of(nb, struct pm_clk_notifier_block, nb);
 
        switch (action) {
-       case BUS_NOTIFY_ADD_DEVICE:
+       case BUS_NOTIFY_BIND_DRIVER:
                if (clknb->con_ids[0]) {
                        for (con_id = clknb->con_ids; *con_id; con_id++)
                                enable_clock(dev, *con_id);
@@ -395,7 +395,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
                        enable_clock(dev, NULL);
                }
                break;
-       case BUS_NOTIFY_DEL_DEVICE:
+       case BUS_NOTIFY_UNBOUND_DRIVER:
                if (clknb->con_ids[0]) {
                        for (con_id = clknb->con_ids; *con_id; con_id++)
                                disable_clock(dev, *con_id);
index aa6320207745d636641178f3d9ca89c2d489fe6e..06f09bf89cb2bab3319d800ca2dd7939dfb59a3b 100644 (file)
@@ -57,7 +57,8 @@ static int async_error;
  */
 void device_pm_init(struct device *dev)
 {
-       dev->power.in_suspend = false;
+       dev->power.is_prepared = false;
+       dev->power.is_suspended = false;
        init_completion(&dev->power.completion);
        complete_all(&dev->power.completion);
        dev->power.wakeup = NULL;
@@ -91,7 +92,7 @@ void device_pm_add(struct device *dev)
        pr_debug("PM: Adding info for %s:%s\n",
                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
        mutex_lock(&dpm_list_mtx);
-       if (dev->parent && dev->parent->power.in_suspend)
+       if (dev->parent && dev->parent->power.is_prepared)
                dev_warn(dev, "parent %s should not be sleeping\n",
                        dev_name(dev->parent));
        list_add_tail(&dev->power.entry, &dpm_list);
@@ -511,7 +512,14 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
        dpm_wait(dev->parent, async);
        device_lock(dev);
 
-       dev->power.in_suspend = false;
+       /*
+        * This is a fib.  But we'll allow new children to be added below
+        * a resumed device, even if the device hasn't been completed yet.
+        */
+       dev->power.is_prepared = false;
+
+       if (!dev->power.is_suspended)
+               goto Unlock;
 
        if (dev->pwr_domain) {
                pm_dev_dbg(dev, state, "power domain ");
@@ -548,6 +556,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
        }
 
  End:
+       dev->power.is_suspended = false;
+
+ Unlock:
        device_unlock(dev);
        complete_all(&dev->power.completion);
 
@@ -670,7 +681,7 @@ void dpm_complete(pm_message_t state)
                struct device *dev = to_device(dpm_prepared_list.prev);
 
                get_device(dev);
-               dev->power.in_suspend = false;
+               dev->power.is_prepared = false;
                list_move(&dev->power.entry, &list);
                mutex_unlock(&dpm_list_mtx);
 
@@ -835,11 +846,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        device_lock(dev);
 
        if (async_error)
-               goto End;
+               goto Unlock;
 
        if (pm_wakeup_pending()) {
                async_error = -EBUSY;
-               goto End;
+               goto Unlock;
        }
 
        if (dev->pwr_domain) {
@@ -877,6 +888,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        }
 
  End:
+       dev->power.is_suspended = !error;
+
+ Unlock:
        device_unlock(dev);
        complete_all(&dev->power.completion);
 
@@ -1042,7 +1056,7 @@ int dpm_prepare(pm_message_t state)
                        put_device(dev);
                        break;
                }
-               dev->power.in_suspend = true;
+               dev->power.is_prepared = true;
                if (!list_empty(&dev->power.entry))
                        list_move_tail(&dev->power.entry, &dpm_prepared_list);
                put_device(dev);
index c126db3cb7d12c92be4b5d62abfd58d4a699a961..e8d11b6630eeb6ed7b815ffe2e21588965882587 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/syscore_ops.h>
 #include <linux/mutex.h>
 #include <linux/module.h>
+#include <linux/interrupt.h>
 
 static LIST_HEAD(syscore_ops_list);
 static DEFINE_MUTEX(syscore_ops_lock);
@@ -48,6 +49,13 @@ int syscore_suspend(void)
        struct syscore_ops *ops;
        int ret = 0;
 
+       pr_debug("Checking wakeup interrupts\n");
+
+       /* Return error code if there are any wakeup interrupts pending. */
+       ret = check_wakeup_irqs();
+       if (ret)
+               return ret;
+
        WARN_ONCE(!irqs_disabled(),
                "Interrupts enabled before system core suspend.\n");
 
index 09ef9a878ef06335393cbc4509f38425a5465713..cf0e63dd97da9bf09a88bf364cba87956cea4f42 100644 (file)
@@ -79,7 +79,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
        md_io.error = 0;
 
        if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
-               rw |= REQ_FUA;
+               rw |= REQ_FUA | REQ_FLUSH;
        rw |= REQ_SYNC;
 
        bio = bio_alloc(GFP_NOIO, 1);
index f440a02dfdb15519799c64e2ab7c219db9a95ab5..7b976296b564faeed6a676afb22119bb99500174 100644 (file)
@@ -112,9 +112,6 @@ struct drbd_bitmap {
        struct task_struct *bm_task;
 };
 
-static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
-                              unsigned long e, int val, const enum km_type km);
-
 #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
 static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
 {
@@ -994,6 +991,9 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
                bio_endio(bio, -EIO);
        } else {
                submit_bio(rw, bio);
+               /* this should not count as user activity and cause the
+                * resync to throttle -- see drbd_rs_should_slow_down(). */
+               atomic_add(len >> 9, &mdev->rs_sect_ev);
        }
 }
 
@@ -1256,7 +1256,7 @@ unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_f
  * expected to be called for only a few bits (e - s about BITS_PER_LONG).
  * Must hold bitmap lock already. */
 static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
-       unsigned long e, int val, const enum km_type km)
+       unsigned long e, int val)
 {
        struct drbd_bitmap *b = mdev->bitmap;
        unsigned long *p_addr = NULL;
@@ -1274,14 +1274,14 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
                unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
                if (page_nr != last_page_nr) {
                        if (p_addr)
-                               __bm_unmap(p_addr, km);
+                               __bm_unmap(p_addr, KM_IRQ1);
                        if (c < 0)
                                bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
                        else if (c > 0)
                                bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
                        changed_total += c;
                        c = 0;
-                       p_addr = __bm_map_pidx(b, page_nr, km);
+                       p_addr = __bm_map_pidx(b, page_nr, KM_IRQ1);
                        last_page_nr = page_nr;
                }
                if (val)
@@ -1290,7 +1290,7 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
                        c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
        }
        if (p_addr)
-               __bm_unmap(p_addr, km);
+               __bm_unmap(p_addr, KM_IRQ1);
        if (c < 0)
                bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
        else if (c > 0)
@@ -1318,7 +1318,7 @@ static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
        if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
                bm_print_lock_info(mdev);
 
-       c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1);
+       c = __bm_change_bits_to(mdev, s, e, val);
 
        spin_unlock_irqrestore(&b->bm_lock, flags);
        return c;
@@ -1343,16 +1343,17 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
 {
        int i;
        int bits;
-       unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_USER0);
+       unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_IRQ1);
        for (i = first_word; i < last_word; i++) {
                bits = hweight_long(paddr[i]);
                paddr[i] = ~0UL;
                b->bm_set += BITS_PER_LONG - bits;
        }
-       kunmap_atomic(paddr, KM_USER0);
+       kunmap_atomic(paddr, KM_IRQ1);
 }
 
-/* Same thing as drbd_bm_set_bits, but without taking the spin_lock_irqsave.
+/* Same thing as drbd_bm_set_bits,
+ * but more efficient for a large bit range.
  * You must first drbd_bm_lock().
  * Can be called to set the whole bitmap in one go.
  * Sets bits from s to e _inclusive_. */
@@ -1366,6 +1367,7 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
         * Do not use memset, because we must account for changes,
         * so we need to loop over the words with hweight() anyways.
         */
+       struct drbd_bitmap *b = mdev->bitmap;
        unsigned long sl = ALIGN(s,BITS_PER_LONG);
        unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
        int first_page;
@@ -1376,15 +1378,19 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
 
        if (e - s <= 3*BITS_PER_LONG) {
                /* don't bother; el and sl may even be wrong. */
-               __bm_change_bits_to(mdev, s, e, 1, KM_USER0);
+               spin_lock_irq(&b->bm_lock);
+               __bm_change_bits_to(mdev, s, e, 1);
+               spin_unlock_irq(&b->bm_lock);
                return;
        }
 
        /* difference is large enough that we can trust sl and el */
 
+       spin_lock_irq(&b->bm_lock);
+
        /* bits filling the current long */
        if (sl)
-               __bm_change_bits_to(mdev, s, sl-1, 1, KM_USER0);
+               __bm_change_bits_to(mdev, s, sl-1, 1);
 
        first_page = sl >> (3 + PAGE_SHIFT);
        last_page = el >> (3 + PAGE_SHIFT);
@@ -1397,8 +1403,10 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
        /* first and full pages, unless first page == last page */
        for (page_nr = first_page; page_nr < last_page; page_nr++) {
                bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
+               spin_unlock_irq(&b->bm_lock);
                cond_resched();
                first_word = 0;
+               spin_lock_irq(&b->bm_lock);
        }
 
        /* last page (respectively only page, for first page == last page) */
@@ -1411,7 +1419,8 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
         * it would trigger an assert in __bm_change_bits_to()
         */
        if (el <= e)
-               __bm_change_bits_to(mdev, el, e, 1, KM_USER0);
+               __bm_change_bits_to(mdev, el, e, 1);
+       spin_unlock_irq(&b->bm_lock);
 }
 
 /* returns bit state
index 25d32c5aa50ab58e0b2fdb4e7dfde2377ec96878..43beaca53179846f591fc940bb7108a6cc494e3c 100644 (file)
@@ -4602,6 +4602,11 @@ int drbd_asender(struct drbd_thread *thi)
                        dev_err(DEV, "meta connection shut down by peer.\n");
                        goto reconnect;
                } else if (rv == -EAGAIN) {
+                       /* If the data socket received something meanwhile,
+                        * that is good enough: peer is still alive. */
+                       if (time_after(mdev->last_received,
+                               jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
+                               continue;
                        if (ping_timeout_active) {
                                dev_err(DEV, "PingAck did not arrive in time.\n");
                                goto reconnect;
@@ -4637,6 +4642,7 @@ int drbd_asender(struct drbd_thread *thi)
                                goto reconnect;
                }
                if (received == expect) {
+                       mdev->last_received = jiffies;
                        D_ASSERT(cmd != NULL);
                        if (!cmd->process(mdev, h))
                                goto reconnect;
index 4d76b06b6b20966f176e9b3dc9a795345a23798b..4d3e6f6213ba0436cc2eceff16b7876e58d952b6 100644 (file)
@@ -536,12 +536,7 @@ static int w_make_resync_request(struct drbd_conf *mdev,
                return 1;
        }
 
-       /* starting with drbd 8.3.8, we can handle multi-bio EEs,
-        * if it should be necessary */
-       max_bio_size =
-               mdev->agreed_pro_version < 94 ? queue_max_hw_sectors(mdev->rq_queue) << 9 :
-               mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_BIO_SIZE;
-
+       max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
        number = drbd_rs_number_requests(mdev);
        if (number == 0)
                goto requeue;
index fd6305bf953e4084c9b7557a26acb002deb101fe..8ecf4c6c28740243979920dd98ea7bdc638e5172 100644 (file)
@@ -64,6 +64,8 @@ static ssize_t btmrvl_hscfgcmd_write(struct file *file,
                return -EFAULT;
 
        ret = strict_strtol(buf, 10, &result);
+       if (ret)
+               return ret;
 
        priv->btmrvl_dev.hscfgcmd = result;
 
@@ -108,6 +110,8 @@ static ssize_t btmrvl_psmode_write(struct file *file, const char __user *ubuf,
                return -EFAULT;
 
        ret = strict_strtol(buf, 10, &result);
+       if (ret)
+               return ret;
 
        priv->btmrvl_dev.psmode = result;
 
@@ -147,6 +151,8 @@ static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf,
                return -EFAULT;
 
        ret = strict_strtol(buf, 10, &result);
+       if (ret)
+               return ret;
 
        priv->btmrvl_dev.pscmd = result;
 
@@ -191,6 +197,8 @@ static ssize_t btmrvl_gpiogap_write(struct file *file, const char __user *ubuf,
                return -EFAULT;
 
        ret = strict_strtol(buf, 16, &result);
+       if (ret)
+               return ret;
 
        priv->btmrvl_dev.gpio_gap = result;
 
@@ -230,6 +238,8 @@ static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf,
                return -EFAULT;
 
        ret = strict_strtol(buf, 10, &result);
+       if (ret)
+               return ret;
 
        priv->btmrvl_dev.hscmd = result;
        if (priv->btmrvl_dev.hscmd) {
@@ -272,6 +282,8 @@ static ssize_t btmrvl_hsmode_write(struct file *file, const char __user *ubuf,
                return -EFAULT;
 
        ret = strict_strtol(buf, 10, &result);
+       if (ret)
+               return ret;
 
        priv->btmrvl_dev.hsmode = result;
 
index 999803ce10dc5cae9c3571c155de3a5d1276a5c7..5da67f165afaf8df358d1c884083429948cf781e 100644 (file)
 #define G4x_GMCH_SIZE_MASK     (0xf << 8)
 #define G4x_GMCH_SIZE_1M       (0x1 << 8)
 #define G4x_GMCH_SIZE_2M       (0x3 << 8)
-#define G4x_GMCH_SIZE_VT_1M    (0x9 << 8)
-#define G4x_GMCH_SIZE_VT_1_5M  (0xa << 8)
-#define G4x_GMCH_SIZE_VT_2M    (0xc << 8)
+#define G4x_GMCH_SIZE_VT_EN    (0x8 << 8)
+#define G4x_GMCH_SIZE_VT_1M    (G4x_GMCH_SIZE_1M | G4x_GMCH_SIZE_VT_EN)
+#define G4x_GMCH_SIZE_VT_1_5M  ((0x2 << 8) | G4x_GMCH_SIZE_VT_EN)
+#define G4x_GMCH_SIZE_VT_2M    (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
 
 #define GFX_FLSH_CNTL          0x2170 /* 915+ */
 
index 219d88a0eeae07c1e963c4997f6894fe79046d2a..dde6a0fad4087fa6225670b2a1393fb03cec0041 100644 (file)
@@ -139,6 +139,7 @@ static int cn_call_callback(struct sk_buff *skb)
        spin_unlock_bh(&dev->cbdev->queue_lock);
 
        if (cbq != NULL) {
+               err = 0;
                cbq->callback(msg, nsp);
                kfree_skb(skb);
                cn_queue_release_callback(cbq);
index 4e04e1274388744efa6689da9fb7922974182999..596d5dd32f4153826e7a05d73dc818bd522b691b 100644 (file)
@@ -759,7 +759,7 @@ static void __exit acpi_cpufreq_exit(void)
 
        cpufreq_unregister_driver(&acpi_cpufreq_driver);
 
-       free_percpu(acpi_perf_data);
+       free_acpi_perf_data();
 }
 
 module_param(acpi_pstate_strict, uint, 0644);
index 853f92d23ddb5ed80c8b2836e878422121ae3f5f..faf7c521784874c0dbbce7a65b9b05d3ea379f2f 100644 (file)
@@ -298,11 +298,13 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
        old_index = stat->last_index;
        new_index = freq_table_get_index(stat, freq->new);
 
-       cpufreq_stats_update(freq->cpu);
-       if (old_index == new_index)
+       /* We can't do stat->time_in_state[-1]= .. */
+       if (old_index == -1 || new_index == -1)
                return 0;
 
-       if (old_index == -1 || new_index == -1)
+       cpufreq_stats_update(freq->cpu);
+
+       if (old_index == new_index)
                return 0;
 
        spin_lock(&cpufreq_stats_lock);
index 83479b6fb9a14fc9ef9826c18274560951c45f19..bce576d7478ed41f9b69ac727cc5d143d850bb83 100644 (file)
@@ -1079,6 +1079,9 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
        }
 
        res = transition_fid_vid(data, fid, vid);
+       if (res)
+               return res;
+
        freqs.new = find_khz_freq_from_fid(data->currfid);
 
        for_each_cpu(i, data->available_cores) {
@@ -1101,7 +1104,8 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
        /* get MSR index for hardware pstate transition */
        pstate = index & HW_PSTATE_MASK;
        if (pstate > data->max_hw_pstate)
-               return 0;
+               return -EINVAL;
+
        freqs.old = find_khz_freq_from_pstate(data->powernow_table,
                        data->currpstate);
        freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
index d0e65d6ddc77caf5e860bae1d5fb92f4a25fb00e..676d957c22b0bc8e14bff79e1020845bdb6bbb1a 100644 (file)
@@ -238,9 +238,9 @@ static int build_sh_desc_ipsec(struct caam_ctx *ctx)
 
        /* build shared descriptor for this session */
        sh_desc = kmalloc(CAAM_CMD_SZ * DESC_AEAD_SHARED_TEXT_LEN +
-                         keys_fit_inline ?
-                         ctx->split_key_pad_len + ctx->enckeylen :
-                         CAAM_PTR_SZ * 2, GFP_DMA | GFP_KERNEL);
+                         (keys_fit_inline ?
+                          ctx->split_key_pad_len + ctx->enckeylen :
+                          CAAM_PTR_SZ * 2), GFP_DMA | GFP_KERNEL);
        if (!sh_desc) {
                dev_err(jrdev, "could not allocate shared descriptor\n");
                return -ENOMEM;
index 438e6c83117087d8d10c13418ff3a68edf9314ff..ebb897329c1e947d3392f091f9ef627bc1f5fa82 100644 (file)
@@ -264,6 +264,7 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
 #define PCI_DEVICE_ID_AGERE_FW643      0x5901
 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW        0x2380
 #define PCI_DEVICE_ID_TI_TSB12LV22     0x8009
+#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
 
 #define QUIRK_CYCLE_TIMER              1
 #define QUIRK_RESET_PACKET             2
@@ -3190,6 +3191,11 @@ static int __devinit pci_probe(struct pci_dev *dev,
        int i, err;
        size_t size;
 
+       if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
+               dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
+               return -ENOSYS;
+       }
+
        ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
        if (ohci == NULL) {
                err = -ENOMEM;
index 87096b6ca5c96473b46b97a907ad15863e37268d..2f21b0bfe6534c6478b6ee6ec8fbd8c86351329e 100644 (file)
@@ -13,6 +13,7 @@ menu "Google Firmware Drivers"
 config GOOGLE_SMI
        tristate "SMI interface for Google platforms"
        depends on ACPI && DMI
+       select EFI
        select EFI_VARS
        help
          Say Y here if you want to enable SMI callbacks for Google
index 01f74a8459d99e6b12a7d5bebe5820f17769a62d..35bebde23e835c7674aaaa5e6bbf0e998ab0fd32 100644 (file)
@@ -469,8 +469,9 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
                                        + OMAP24XX_GPIO_CLEARWKUENA);
                }
        }
-       /* This part needs to be executed always for OMAP34xx */
-       if (cpu_is_omap34xx() || (bank->non_wakeup_gpios & gpio_bit)) {
+       /* This part needs to be executed always for OMAP{34xx, 44xx} */
+       if (cpu_is_omap34xx() || cpu_is_omap44xx() ||
+                       (bank->non_wakeup_gpios & gpio_bit)) {
                /*
                 * Log the edge gpio and manually trigger the IRQ
                 * after resume if the input level changes
index bd6571e0097a8be17b8fe3c27db173f3c8d9438b..644ba1255d3c8f0777e7d519efb8c7030c46f39d 100644 (file)
@@ -223,7 +223,7 @@ static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
                gedr = gpio_reg(&lnw->chip, base, GEDR);
                pending = readl(gedr);
                while (pending) {
-                       gpio = __ffs(pending) - 1;
+                       gpio = __ffs(pending);
                        mask = BIT(gpio);
                        pending &= ~mask;
                        /* Clear before handling so we can't lose an edge */
index 8d1ddfdd63eb2ab6b89eb749ee831e37ede6b96d..15097ca616d69606e150ab96c62bfa89a38159cf 100644 (file)
@@ -81,8 +81,10 @@ void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base)
        switch(tps65910_chip_id(tps65910)) {
        case TPS65910:
                tps65910->gpio.ngpio    = 6;
+               break;
        case TPS65911:
                tps65910->gpio.ngpio    = 9;
+               break;
        default:
                return;
        }
index 309644cf4d9b178cd8d91a2d57e348497f7c76c0..2bcfb0be09ff38e1db182c77a78c506fba8f6149 100644 (file)
@@ -180,6 +180,7 @@ static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
                        break;
                case WM831X_GPIO_PULL_UP:
                        pull = "pullup";
+                       break;
                default:
                        pull = "INVALID PULL";
                        break;
index 21058e6ad2b80ccbc10be90795402701270d1585..82db1850666253dc021f8a43e635fe91ebce94df 100644 (file)
@@ -886,9 +886,6 @@ int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
        total_objects += dev->mode_config.num_connector;
        total_objects += dev->mode_config.num_encoder;
 
-       if (total_objects == 0)
-               return -EINVAL;
-
        group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
        if (!group->id_list)
                return -ENOMEM;
index 74e4ff578017b90d3e0649219d71a87163fb5b19..4012fe4234607c1b586c76c1a88e50f99d93267b 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/module.h>
 #include <linux/mman.h>
 #include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
 #include "drmP.h"
 
 /** @file drm_gem.c
index 4d46441cbe2d830de73ffacdb5821dbdd11f6340..0a893f7400fa1fa58724ce1c621684c643cf3f9f 100644 (file)
@@ -1207,13 +1207,17 @@ static int i915_context_status(struct seq_file *m, void *unused)
        if (ret)
                return ret;
 
-       seq_printf(m, "power context ");
-       describe_obj(m, dev_priv->pwrctx);
-       seq_printf(m, "\n");
+       if (dev_priv->pwrctx) {
+               seq_printf(m, "power context ");
+               describe_obj(m, dev_priv->pwrctx);
+               seq_printf(m, "\n");
+       }
 
-       seq_printf(m, "render context ");
-       describe_obj(m, dev_priv->renderctx);
-       seq_printf(m, "\n");
+       if (dev_priv->renderctx) {
+               seq_printf(m, "render context ");
+               describe_obj(m, dev_priv->renderctx);
+               seq_printf(m, "\n");
+       }
 
        mutex_unlock(&dev->mode_config.mutex);
 
index 0239e9974bf29cf8053c54fd42cb0239b21e65d6..296fbd66f0e168076a195543d9c5ae8c4baa2cd7 100644 (file)
@@ -1266,30 +1266,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
        intel_modeset_gem_init(dev);
 
-       if (IS_IVYBRIDGE(dev)) {
-               /* Share pre & uninstall handlers with ILK/SNB */
-               dev->driver->irq_handler = ivybridge_irq_handler;
-               dev->driver->irq_preinstall = ironlake_irq_preinstall;
-               dev->driver->irq_postinstall = ivybridge_irq_postinstall;
-               dev->driver->irq_uninstall = ironlake_irq_uninstall;
-               dev->driver->enable_vblank = ivybridge_enable_vblank;
-               dev->driver->disable_vblank = ivybridge_disable_vblank;
-       } else if (HAS_PCH_SPLIT(dev)) {
-               dev->driver->irq_handler = ironlake_irq_handler;
-               dev->driver->irq_preinstall = ironlake_irq_preinstall;
-               dev->driver->irq_postinstall = ironlake_irq_postinstall;
-               dev->driver->irq_uninstall = ironlake_irq_uninstall;
-               dev->driver->enable_vblank = ironlake_enable_vblank;
-               dev->driver->disable_vblank = ironlake_disable_vblank;
-       } else {
-               dev->driver->irq_preinstall = i915_driver_irq_preinstall;
-               dev->driver->irq_postinstall = i915_driver_irq_postinstall;
-               dev->driver->irq_uninstall = i915_driver_irq_uninstall;
-               dev->driver->irq_handler = i915_driver_irq_handler;
-               dev->driver->enable_vblank = i915_enable_vblank;
-               dev->driver->disable_vblank = i915_disable_vblank;
-       }
-
        ret = drm_irq_install(dev);
        if (ret)
                goto cleanup_gem;
@@ -1967,7 +1943,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        if (!dev_priv->mm.gtt) {
                DRM_ERROR("Failed to initialize GTT\n");
                ret = -ENODEV;
-               goto out_iomapfree;
+               goto out_rmmap;
        }
 
        agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
@@ -2011,18 +1987,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        if (dev_priv->wq == NULL) {
                DRM_ERROR("Failed to create our workqueue.\n");
                ret = -ENOMEM;
-               goto out_iomapfree;
+               goto out_mtrrfree;
        }
 
        /* enable GEM by default */
        dev_priv->has_gem = 1;
 
-       dev->driver->get_vblank_counter = i915_get_vblank_counter;
-       dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
-       if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
-               dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
-               dev->driver->get_vblank_counter = gm45_get_vblank_counter;
-       }
+       intel_irq_init(dev);
 
        /* Try to make sure MCHBAR is enabled before poking at it */
        intel_setup_mchbar(dev);
@@ -2103,13 +2074,21 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        return 0;
 
 out_gem_unload:
+       if (dev_priv->mm.inactive_shrinker.shrink)
+               unregister_shrinker(&dev_priv->mm.inactive_shrinker);
+
        if (dev->pdev->msi_enabled)
                pci_disable_msi(dev->pdev);
 
        intel_teardown_gmbus(dev);
        intel_teardown_mchbar(dev);
        destroy_workqueue(dev_priv->wq);
-out_iomapfree:
+out_mtrrfree:
+       if (dev_priv->mm.gtt_mtrr >= 0) {
+               mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
+                        dev->agp->agp_info.aper_size * 1024 * 1024);
+               dev_priv->mm.gtt_mtrr = -1;
+       }
        io_mapping_free(dev_priv->mm.gtt_mapping);
 out_rmmap:
        pci_iounmap(dev->pdev, dev_priv->regs);
@@ -2182,9 +2161,8 @@ int i915_driver_unload(struct drm_device *dev)
                /* Flush any outstanding unpin_work. */
                flush_workqueue(dev_priv->wq);
 
-               i915_gem_free_all_phys_object(dev);
-
                mutex_lock(&dev->struct_mutex);
+               i915_gem_free_all_phys_object(dev);
                i915_gem_cleanup_ringbuffer(dev);
                mutex_unlock(&dev->struct_mutex);
                if (I915_HAS_FBC(dev) && i915_powersave)
index 0defd42705943e1776b3e9447a770048142d1a73..eb91e2dd791495ed5f40575a4f2190a0b8ad276c 100644 (file)
@@ -52,7 +52,7 @@ module_param_named(powersave, i915_powersave, int, 0600);
 unsigned int i915_semaphores = 0;
 module_param_named(semaphores, i915_semaphores, int, 0600);
 
-unsigned int i915_enable_rc6 = 1;
+unsigned int i915_enable_rc6 = 0;
 module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
 
 unsigned int i915_enable_fbc = 0;
@@ -577,8 +577,12 @@ int i915_reset(struct drm_device *dev, u8 flags)
        if (get_seconds() - dev_priv->last_gpu_reset < 5) {
                DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
        } else switch (INTEL_INFO(dev)->gen) {
+       case 7:
        case 6:
                ret = gen6_do_reset(dev, flags);
+               /* If reset with a user forcewake, try to restore */
+               if (atomic_read(&dev_priv->forcewake_count))
+                       __gen6_gt_force_wake_get(dev_priv);
                break;
        case 5:
                ret = ironlake_do_reset(dev, flags);
@@ -762,14 +766,6 @@ static struct drm_driver driver = {
        .resume = i915_resume,
 
        .device_is_agp = i915_driver_device_is_agp,
-       .enable_vblank = i915_enable_vblank,
-       .disable_vblank = i915_disable_vblank,
-       .get_vblank_timestamp = i915_get_vblank_timestamp,
-       .get_scanout_position = i915_get_crtc_scanoutpos,
-       .irq_preinstall = i915_driver_irq_preinstall,
-       .irq_postinstall = i915_driver_irq_postinstall,
-       .irq_uninstall = i915_driver_irq_uninstall,
-       .irq_handler = i915_driver_irq_handler,
        .reclaim_buffers = drm_core_reclaim_buffers,
        .master_create = i915_master_create,
        .master_destroy = i915_master_destroy,
index f63ee162f1245dcffc5844c6c401df7ce9b885c8..f245c588ae954fe8fe07d4a235dd46b219038a16 100644 (file)
@@ -211,6 +211,9 @@ struct drm_i915_display_funcs {
        void (*fdi_link_train)(struct drm_crtc *crtc);
        void (*init_clock_gating)(struct drm_device *dev);
        void (*init_pch_clock_gating)(struct drm_device *dev);
+       int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
+                         struct drm_framebuffer *fb,
+                         struct drm_i915_gem_object *obj);
        /* clock updates for mode set */
        /* cursor updates */
        /* render clock increase/decrease */
@@ -994,8 +997,6 @@ extern unsigned int i915_enable_fbc;
 
 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
 extern int i915_resume(struct drm_device *dev);
-extern void i915_save_display(struct drm_device *dev);
-extern void i915_restore_display(struct drm_device *dev);
 extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
 
@@ -1030,33 +1031,12 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
 extern int i915_irq_wait(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
 
-extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
-extern void i915_driver_irq_preinstall(struct drm_device * dev);
-extern int i915_driver_irq_postinstall(struct drm_device *dev);
-extern void i915_driver_irq_uninstall(struct drm_device * dev);
-
-extern irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS);
-extern void ironlake_irq_preinstall(struct drm_device *dev);
-extern int ironlake_irq_postinstall(struct drm_device *dev);
-extern void ironlake_irq_uninstall(struct drm_device *dev);
-
-extern irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS);
-extern void ivybridge_irq_preinstall(struct drm_device *dev);
-extern int ivybridge_irq_postinstall(struct drm_device *dev);
-extern void ivybridge_irq_uninstall(struct drm_device *dev);
+extern void intel_irq_init(struct drm_device *dev);
 
 extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
-extern int i915_enable_vblank(struct drm_device *dev, int crtc);
-extern void i915_disable_vblank(struct drm_device *dev, int crtc);
-extern int ironlake_enable_vblank(struct drm_device *dev, int crtc);
-extern void ironlake_disable_vblank(struct drm_device *dev, int crtc);
-extern int ivybridge_enable_vblank(struct drm_device *dev, int crtc);
-extern void ivybridge_disable_vblank(struct drm_device *dev, int crtc);
-extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
-extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
 extern int i915_vblank_swap(struct drm_device *dev, void *data,
                            struct drm_file *file_priv);
 
@@ -1067,13 +1047,6 @@ void
 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 
 void intel_enable_asle (struct drm_device *dev);
-int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
-                             int *max_error,
-                             struct timeval *vblank_time,
-                             unsigned flags);
-
-int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
-                            int *vpos, int *hpos);
 
 #ifdef CONFIG_DEBUG_FS
 extern void i915_destroy_error_state(struct drm_device *dev);
index 94c84d7441007f029ebad7b7d87eb158de5f971a..5c0d1247f4535e1e9ee83076151b95552eb10060 100644 (file)
@@ -31,6 +31,7 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
+#include <linux/shmem_fs.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/pci.h>
@@ -359,8 +360,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
                if ((page_offset + remain) > PAGE_SIZE)
                        page_length = PAGE_SIZE - page_offset;
 
-               page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
-                                          GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
                if (IS_ERR(page))
                        return PTR_ERR(page);
 
@@ -463,8 +463,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
                if ((data_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - data_page_offset;
 
-               page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
-                                          GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
                if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
                        goto out;
@@ -797,8 +796,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
                if ((page_offset + remain) > PAGE_SIZE)
                        page_length = PAGE_SIZE - page_offset;
 
-               page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
-                                          GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
                if (IS_ERR(page))
                        return PTR_ERR(page);
 
@@ -907,8 +905,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
                if ((data_page_offset + page_length) > PAGE_SIZE)
                        page_length = PAGE_SIZE - data_page_offset;
 
-               page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
-                                          GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
                if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
                        goto out;
@@ -1219,11 +1216,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                ret = i915_gem_object_bind_to_gtt(obj, 0, true);
                if (ret)
                        goto unlock;
-       }
 
-       ret = i915_gem_object_set_to_gtt_domain(obj, write);
-       if (ret)
-               goto unlock;
+               ret = i915_gem_object_set_to_gtt_domain(obj, write);
+               if (ret)
+                       goto unlock;
+       }
 
        if (obj->tiling_mode == I915_TILING_NONE)
                ret = i915_gem_object_put_fence(obj);
@@ -1558,12 +1555,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
 
        inode = obj->base.filp->f_path.dentry->d_inode;
        mapping = inode->i_mapping;
+       gfpmask |= mapping_gfp_mask(mapping);
+
        for (i = 0; i < page_count; i++) {
-               page = read_cache_page_gfp(mapping, i,
-                                          GFP_HIGHUSER |
-                                          __GFP_COLD |
-                                          __GFP_RECLAIMABLE |
-                                          gfpmask);
+               page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
                if (IS_ERR(page))
                        goto err_pages;
 
@@ -1701,13 +1696,10 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
        /* Our goal here is to return as much of the memory as
         * is possible back to the system as we are called from OOM.
         * To do this we must instruct the shmfs to drop all of its
-        * backing pages, *now*. Here we mirror the actions taken
-        * when by shmem_delete_inode() to release the backing store.
+        * backing pages, *now*.
         */
        inode = obj->base.filp->f_path.dentry->d_inode;
-       truncate_inode_pages(inode->i_mapping, 0);
-       if (inode->i_op->truncate_range)
-               inode->i_op->truncate_range(inode, 0, (loff_t)-1);
+       shmem_truncate_range(inode, 0, (loff_t)-1);
 
        obj->madv = __I915_MADV_PURGED;
 }
@@ -2080,8 +2072,8 @@ i915_wait_request(struct intel_ring_buffer *ring,
                if (!ier) {
                        DRM_ERROR("something (likely vbetool) disabled "
                                  "interrupts, re-enabling\n");
-                       i915_driver_irq_preinstall(ring->dev);
-                       i915_driver_irq_postinstall(ring->dev);
+                       ring->dev->driver->irq_preinstall(ring->dev);
+                       ring->dev->driver->irq_postinstall(ring->dev);
                }
 
                trace_i915_gem_request_wait_begin(ring, seqno);
@@ -2926,8 +2918,6 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
         */
        wmb();
 
-       i915_gem_release_mmap(obj);
-
        old_write_domain = obj->base.write_domain;
        obj->base.write_domain = 0;
 
@@ -3567,6 +3557,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
+       struct address_space *mapping;
 
        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
        if (obj == NULL)
@@ -3577,6 +3568,9 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                return NULL;
        }
 
+       mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+       mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+
        i915_gem_info_add_obj(dev_priv, size);
 
        obj->base.write_domain = I915_GEM_DOMAIN_CPU;
@@ -3952,8 +3946,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
 
        page_count = obj->base.size / PAGE_SIZE;
        for (i = 0; i < page_count; i++) {
-               struct page *page = read_cache_page_gfp(mapping, i,
-                                                       GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               struct page *page = shmem_read_mapping_page(mapping, i);
                if (!IS_ERR(page)) {
                        char *dst = kmap_atomic(page);
                        memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
@@ -4014,8 +4007,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
                struct page *page;
                char *dst, *src;
 
-               page = read_cache_page_gfp(mapping, i,
-                                          GFP_HIGHUSER | __GFP_RECLAIMABLE);
+               page = shmem_read_mapping_page(mapping, i);
                if (IS_ERR(page))
                        return PTR_ERR(page);
 
index 20a4cc5b818f51632ad50a94bdfa4092c3f1a454..4934cf84c320336aa84320da544cc1e0d091da1c 100644 (file)
@@ -187,10 +187,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
        if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
                i915_gem_clflush_object(obj);
 
-       /* blow away mappings if mapped through GTT */
-       if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
-               i915_gem_release_mmap(obj);
-
        if (obj->base.pending_write_domain)
                cd->flips |= atomic_read(&obj->pending_flip);
 
index b9fafe3b045bda5817047f2a5cde8a6fc1b58265..3b03f85ea6276fc1f3d6d5beb67e7c004172a472 100644 (file)
@@ -152,7 +152,7 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
 /* Called from drm generic code, passed a 'crtc', which
  * we use as a pipe index
  */
-u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
+static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long high_frame;
@@ -184,7 +184,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
        return (high1 << 8) | low;
 }
 
-u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
+static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int reg = PIPE_FRMCOUNT_GM45(pipe);
@@ -198,7 +198,7 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
        return I915_READ(reg);
 }
 
-int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
                             int *vpos, int *hpos)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -264,7 +264,7 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
        return ret;
 }
 
-int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
+static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
                              int *max_error,
                              struct timeval *vblank_time,
                              unsigned flags)
@@ -462,7 +462,7 @@ static void pch_irq_handler(struct drm_device *dev)
                DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
 }
 
-irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -550,7 +550,7 @@ done:
        return ret;
 }
 
-irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1209,7 +1209,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
        }
 }
 
-irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1454,7 +1454,7 @@ int i915_irq_wait(struct drm_device *dev, void *data,
 /* Called from drm generic code, passed 'crtc' which
  * we use as a pipe index
  */
-int i915_enable_vblank(struct drm_device *dev, int pipe)
+static int i915_enable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
@@ -1478,7 +1478,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
        return 0;
 }
 
-int ironlake_enable_vblank(struct drm_device *dev, int pipe)
+static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
@@ -1494,7 +1494,7 @@ int ironlake_enable_vblank(struct drm_device *dev, int pipe)
        return 0;
 }
 
-int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
+static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
@@ -1513,7 +1513,7 @@ int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
 /* Called from drm generic code, passed 'crtc' which
  * we use as a pipe index
  */
-void i915_disable_vblank(struct drm_device *dev, int pipe)
+static void i915_disable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
@@ -1529,7 +1529,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
-void ironlake_disable_vblank(struct drm_device *dev, int pipe)
+static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
@@ -1540,7 +1540,7 @@ void ironlake_disable_vblank(struct drm_device *dev, int pipe)
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
-void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
+static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
@@ -1728,7 +1728,7 @@ repeat:
 
 /* drm_dma.h hooks
 */
-void ironlake_irq_preinstall(struct drm_device *dev)
+static void ironlake_irq_preinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
@@ -1740,6 +1740,17 @@ void ironlake_irq_preinstall(struct drm_device *dev)
                INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
 
        I915_WRITE(HWSTAM, 0xeffe);
+       if (IS_GEN6(dev) || IS_GEN7(dev)) {
+               /* Workaround stalls observed on Sandy Bridge GPUs by
+                * making the blitter command streamer generate a
+                * write to the Hardware Status Page for
+                * MI_USER_INTERRUPT.  This appears to serialize the
+                * previous seqno write out before the interrupt
+                * happens.
+                */
+               I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
+               I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT);
+       }
 
        /* XXX hotplug from PCH */
 
@@ -1758,7 +1769,7 @@ void ironlake_irq_preinstall(struct drm_device *dev)
        POSTING_READ(SDEIER);
 }
 
-int ironlake_irq_postinstall(struct drm_device *dev)
+static int ironlake_irq_postinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        /* enable kind of interrupts always enabled */
@@ -1830,7 +1841,7 @@ int ironlake_irq_postinstall(struct drm_device *dev)
        return 0;
 }
 
-int ivybridge_irq_postinstall(struct drm_device *dev)
+static int ivybridge_irq_postinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        /* enable kind of interrupts always enabled */
@@ -1880,7 +1891,7 @@ int ivybridge_irq_postinstall(struct drm_device *dev)
        return 0;
 }
 
-void i915_driver_irq_preinstall(struct drm_device * dev)
+static void i915_driver_irq_preinstall(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
@@ -1907,7 +1918,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
  * Must be called after intel_modeset_init or hotplug interrupts won't be
  * enabled correctly.
  */
-int i915_driver_irq_postinstall(struct drm_device *dev)
+static int i915_driver_irq_postinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
@@ -1983,7 +1994,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
        return 0;
 }
 
-void ironlake_irq_uninstall(struct drm_device *dev)
+static void ironlake_irq_uninstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
@@ -2003,7 +2014,7 @@ void ironlake_irq_uninstall(struct drm_device *dev)
        I915_WRITE(GTIIR, I915_READ(GTIIR));
 }
 
-void i915_driver_irq_uninstall(struct drm_device * dev)
+static void i915_driver_irq_uninstall(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
@@ -2029,3 +2040,41 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
                           I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
        I915_WRITE(IIR, I915_READ(IIR));
 }
+
+void intel_irq_init(struct drm_device *dev)
+{
+       dev->driver->get_vblank_counter = i915_get_vblank_counter;
+       dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+       if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+               dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+               dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+       }
+
+
+       dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
+       dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+
+       if (IS_IVYBRIDGE(dev)) {
+               /* Share pre & uninstall handlers with ILK/SNB */
+               dev->driver->irq_handler = ivybridge_irq_handler;
+               dev->driver->irq_preinstall = ironlake_irq_preinstall;
+               dev->driver->irq_postinstall = ivybridge_irq_postinstall;
+               dev->driver->irq_uninstall = ironlake_irq_uninstall;
+               dev->driver->enable_vblank = ivybridge_enable_vblank;
+               dev->driver->disable_vblank = ivybridge_disable_vblank;
+       } else if (HAS_PCH_SPLIT(dev)) {
+               dev->driver->irq_handler = ironlake_irq_handler;
+               dev->driver->irq_preinstall = ironlake_irq_preinstall;
+               dev->driver->irq_postinstall = ironlake_irq_postinstall;
+               dev->driver->irq_uninstall = ironlake_irq_uninstall;
+               dev->driver->enable_vblank = ironlake_enable_vblank;
+               dev->driver->disable_vblank = ironlake_disable_vblank;
+       } else {
+               dev->driver->irq_preinstall = i915_driver_irq_preinstall;
+               dev->driver->irq_postinstall = i915_driver_irq_postinstall;
+               dev->driver->irq_uninstall = i915_driver_irq_uninstall;
+               dev->driver->irq_handler = i915_driver_irq_handler;
+               dev->driver->enable_vblank = i915_enable_vblank;
+               dev->driver->disable_vblank = i915_disable_vblank;
+       }
+}
index 2f967af8e62edced1deff2dc906c8ca28203e20a..5d5def756c9e5beee9ae10def1cbe39d5f7cf02f 100644 (file)
 #define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE           0
 #define   GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR                   (1 << 3)
 
+#define GEN6_BSD_HWSTAM                        0x12098
 #define GEN6_BSD_IMR                   0x120a8
 #define   GEN6_BSD_USER_INTERRUPT      (1 << 12)
 
index 60a94d2b526482532667dc7b4fba4bf8f5cfbe5c..5257cfc34c3570641929cbb7825d469093170a36 100644 (file)
@@ -597,7 +597,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
        return;
 }
 
-void i915_save_display(struct drm_device *dev)
+static void i915_save_display(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -689,7 +689,7 @@ void i915_save_display(struct drm_device *dev)
        i915_save_vga(dev);
 }
 
-void i915_restore_display(struct drm_device *dev)
+static void i915_restore_display(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -780,6 +780,7 @@ void i915_restore_display(struct drm_device *dev)
                I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
        else
                I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+
        I915_WRITE(VGA0, dev_priv->saveVGA0);
        I915_WRITE(VGA1, dev_priv->saveVGA1);
        I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
@@ -796,6 +797,8 @@ int i915_save_state(struct drm_device *dev)
 
        pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
 
+       mutex_lock(&dev->struct_mutex);
+
        /* Hardware status page */
        dev_priv->saveHWS = I915_READ(HWS_PGA);
 
@@ -835,6 +838,8 @@ int i915_save_state(struct drm_device *dev)
        for (i = 0; i < 3; i++)
                dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
 
+       mutex_unlock(&dev->struct_mutex);
+
        return 0;
 }
 
@@ -845,6 +850,8 @@ int i915_restore_state(struct drm_device *dev)
 
        pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
 
+       mutex_lock(&dev->struct_mutex);
+
        /* Hardware status page */
        I915_WRITE(HWS_PGA, dev_priv->saveHWS);
 
@@ -862,6 +869,7 @@ int i915_restore_state(struct drm_device *dev)
                I915_WRITE(IER, dev_priv->saveIER);
                I915_WRITE(IMR, dev_priv->saveIMR);
        }
+       mutex_unlock(&dev->struct_mutex);
 
        intel_init_clock_gating(dev);
 
@@ -873,6 +881,8 @@ int i915_restore_state(struct drm_device *dev)
        if (IS_GEN6(dev))
                gen6_enable_rps(dev_priv);
 
+       mutex_lock(&dev->struct_mutex);
+
        /* Cache mode state */
        I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
 
@@ -886,6 +896,8 @@ int i915_restore_state(struct drm_device *dev)
        for (i = 0; i < 3; i++)
                I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
 
+       mutex_unlock(&dev->struct_mutex);
+
        intel_i2c_reset(dev);
 
        return 0;
index 81a9059b6a94ea0278bd691342294f4c0893cf21..21b6f93fe9196d277b2b2c7b926a8faf729706b8 100644 (file)
@@ -4687,6 +4687,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
 
        I915_WRITE(DSPCNTR(plane), dspcntr);
        POSTING_READ(DSPCNTR(plane));
+       intel_enable_plane(dev_priv, plane, pipe);
 
        ret = intel_pipe_set_base(crtc, x, y, old_fb);
 
@@ -5217,8 +5218,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 
        I915_WRITE(DSPCNTR(plane), dspcntr);
        POSTING_READ(DSPCNTR(plane));
-       if (!HAS_PCH_SPLIT(dev))
-               intel_enable_plane(dev_priv, plane, pipe);
 
        ret = intel_pipe_set_base(crtc, x, y, old_fb);
 
@@ -6262,6 +6261,197 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
        spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
+static int intel_gen2_queue_flip(struct drm_device *dev,
+                                struct drm_crtc *crtc,
+                                struct drm_framebuffer *fb,
+                                struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       unsigned long offset;
+       u32 flip_mask;
+       int ret;
+
+       ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+       if (ret)
+               goto out;
+
+       /* Offset into the new buffer for cases of shared fbs between CRTCs */
+       offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+
+       ret = BEGIN_LP_RING(6);
+       if (ret)
+               goto out;
+
+       /* Can't queue multiple flips, so wait for the previous
+        * one to finish before executing the next.
+        */
+       if (intel_crtc->plane)
+               flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+       else
+               flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+       OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
+       OUT_RING(MI_NOOP);
+       OUT_RING(MI_DISPLAY_FLIP |
+                MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+       OUT_RING(fb->pitch);
+       OUT_RING(obj->gtt_offset + offset);
+       OUT_RING(MI_NOOP);
+       ADVANCE_LP_RING();
+out:
+       return ret;
+}
+
+static int intel_gen3_queue_flip(struct drm_device *dev,
+                                struct drm_crtc *crtc,
+                                struct drm_framebuffer *fb,
+                                struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       unsigned long offset;
+       u32 flip_mask;
+       int ret;
+
+       ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+       if (ret)
+               goto out;
+
+       /* Offset into the new buffer for cases of shared fbs between CRTCs */
+       offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+
+       ret = BEGIN_LP_RING(6);
+       if (ret)
+               goto out;
+
+       if (intel_crtc->plane)
+               flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+       else
+               flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+       OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
+       OUT_RING(MI_NOOP);
+       OUT_RING(MI_DISPLAY_FLIP_I915 |
+                MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+       OUT_RING(fb->pitch);
+       OUT_RING(obj->gtt_offset + offset);
+       OUT_RING(MI_NOOP);
+
+       ADVANCE_LP_RING();
+out:
+       return ret;
+}
+
+static int intel_gen4_queue_flip(struct drm_device *dev,
+                                struct drm_crtc *crtc,
+                                struct drm_framebuffer *fb,
+                                struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       uint32_t pf, pipesrc;
+       int ret;
+
+       ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+       if (ret)
+               goto out;
+
+       ret = BEGIN_LP_RING(4);
+       if (ret)
+               goto out;
+
+       /* i965+ uses the linear or tiled offsets from the
+        * Display Registers (which do not change across a page-flip)
+        * so we need only reprogram the base address.
+        */
+       OUT_RING(MI_DISPLAY_FLIP |
+                MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+       OUT_RING(fb->pitch);
+       OUT_RING(obj->gtt_offset | obj->tiling_mode);
+
+       /* XXX Enabling the panel-fitter across page-flip is so far
+        * untested on non-native modes, so ignore it for now.
+        * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+        */
+       pf = 0;
+       pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+       OUT_RING(pf | pipesrc);
+       ADVANCE_LP_RING();
+out:
+       return ret;
+}
+
+static int intel_gen6_queue_flip(struct drm_device *dev,
+                                struct drm_crtc *crtc,
+                                struct drm_framebuffer *fb,
+                                struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       uint32_t pf, pipesrc;
+       int ret;
+
+       ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+       if (ret)
+               goto out;
+
+       ret = BEGIN_LP_RING(4);
+       if (ret)
+               goto out;
+
+       OUT_RING(MI_DISPLAY_FLIP |
+                MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+       OUT_RING(fb->pitch | obj->tiling_mode);
+       OUT_RING(obj->gtt_offset);
+
+       pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
+       pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+       OUT_RING(pf | pipesrc);
+       ADVANCE_LP_RING();
+out:
+       return ret;
+}
+
+/*
+ * On gen7 we currently use the blit ring because (in early silicon at least)
+ * the render ring doesn't give us interrpts for page flip completion, which
+ * means clients will hang after the first flip is queued.  Fortunately the
+ * blit ring generates interrupts properly, so use it instead.
+ */
+static int intel_gen7_queue_flip(struct drm_device *dev,
+                                struct drm_crtc *crtc,
+                                struct drm_framebuffer *fb,
+                                struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+       int ret;
+
+       ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+       if (ret)
+               goto out;
+
+       ret = intel_ring_begin(ring, 4);
+       if (ret)
+               goto out;
+
+       intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
+       intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
+       intel_ring_emit(ring, (obj->gtt_offset));
+       intel_ring_emit(ring, (MI_NOOP));
+       intel_ring_advance(ring);
+out:
+       return ret;
+}
+
+static int intel_default_queue_flip(struct drm_device *dev,
+                                   struct drm_crtc *crtc,
+                                   struct drm_framebuffer *fb,
+                                   struct drm_i915_gem_object *obj)
+{
+       return -ENODEV;
+}
+
 static int intel_crtc_page_flip(struct drm_crtc *crtc,
                                struct drm_framebuffer *fb,
                                struct drm_pending_vblank_event *event)
@@ -6272,9 +6462,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        struct drm_i915_gem_object *obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_unpin_work *work;
-       unsigned long flags, offset;
-       int pipe = intel_crtc->pipe;
-       u32 pf, pipesrc;
+       unsigned long flags;
        int ret;
 
        work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -6303,9 +6491,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        obj = intel_fb->obj;
 
        mutex_lock(&dev->struct_mutex);
-       ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
-       if (ret)
-               goto cleanup_work;
 
        /* Reference the objects for the scheduled work. */
        drm_gem_object_reference(&work->old_fb_obj->base);
@@ -6317,91 +6502,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        if (ret)
                goto cleanup_objs;
 
-       if (IS_GEN3(dev) || IS_GEN2(dev)) {
-               u32 flip_mask;
-
-               /* Can't queue multiple flips, so wait for the previous
-                * one to finish before executing the next.
-                */
-               ret = BEGIN_LP_RING(2);
-               if (ret)
-                       goto cleanup_objs;
-
-               if (intel_crtc->plane)
-                       flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
-               else
-                       flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-               OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
-               OUT_RING(MI_NOOP);
-               ADVANCE_LP_RING();
-       }
-
        work->pending_flip_obj = obj;
 
        work->enable_stall_check = true;
 
-       /* Offset into the new buffer for cases of shared fbs between CRTCs */
-       offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
-
-       ret = BEGIN_LP_RING(4);
-       if (ret)
-               goto cleanup_objs;
-
        /* Block clients from rendering to the new back buffer until
         * the flip occurs and the object is no longer visible.
         */
        atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
 
-       switch (INTEL_INFO(dev)->gen) {
-       case 2:
-               OUT_RING(MI_DISPLAY_FLIP |
-                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-               OUT_RING(fb->pitch);
-               OUT_RING(obj->gtt_offset + offset);
-               OUT_RING(MI_NOOP);
-               break;
-
-       case 3:
-               OUT_RING(MI_DISPLAY_FLIP_I915 |
-                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-               OUT_RING(fb->pitch);
-               OUT_RING(obj->gtt_offset + offset);
-               OUT_RING(MI_NOOP);
-               break;
-
-       case 4:
-       case 5:
-               /* i965+ uses the linear or tiled offsets from the
-                * Display Registers (which do not change across a page-flip)
-                * so we need only reprogram the base address.
-                */
-               OUT_RING(MI_DISPLAY_FLIP |
-                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-               OUT_RING(fb->pitch);
-               OUT_RING(obj->gtt_offset | obj->tiling_mode);
-
-               /* XXX Enabling the panel-fitter across page-flip is so far
-                * untested on non-native modes, so ignore it for now.
-                * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
-                */
-               pf = 0;
-               pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
-               OUT_RING(pf | pipesrc);
-               break;
-
-       case 6:
-       case 7:
-               OUT_RING(MI_DISPLAY_FLIP |
-                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-               OUT_RING(fb->pitch | obj->tiling_mode);
-               OUT_RING(obj->gtt_offset);
-
-               pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE;
-               pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
-               OUT_RING(pf | pipesrc);
-               break;
-       }
-       ADVANCE_LP_RING();
+       ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
+       if (ret)
+               goto cleanup_pending;
 
        mutex_unlock(&dev->struct_mutex);
 
@@ -6409,10 +6521,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
        return 0;
 
+cleanup_pending:
+       atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
 cleanup_objs:
        drm_gem_object_unreference(&work->old_fb_obj->base);
        drm_gem_object_unreference(&obj->base);
-cleanup_work:
        mutex_unlock(&dev->struct_mutex);
 
        spin_lock_irqsave(&dev->event_lock, flags);
@@ -7657,6 +7770,31 @@ static void intel_init_display(struct drm_device *dev)
                else
                        dev_priv->display.get_fifo_size = i830_get_fifo_size;
        }
+
+       /* Default just returns -ENODEV to indicate unsupported */
+       dev_priv->display.queue_flip = intel_default_queue_flip;
+
+       switch (INTEL_INFO(dev)->gen) {
+       case 2:
+               dev_priv->display.queue_flip = intel_gen2_queue_flip;
+               break;
+
+       case 3:
+               dev_priv->display.queue_flip = intel_gen3_queue_flip;
+               break;
+
+       case 4:
+       case 5:
+               dev_priv->display.queue_flip = intel_gen4_queue_flip;
+               break;
+
+       case 6:
+               dev_priv->display.queue_flip = intel_gen6_queue_flip;
+               break;
+       case 7:
+               dev_priv->display.queue_flip = intel_gen7_queue_flip;
+               break;
+       }
 }
 
 /*
index 391b55f1cc7496e2e313d77332fafb35ecd4aa15..e2aced6eec4c78c9ee63a8802ac782ce05daa2a0 100644 (file)
@@ -50,7 +50,6 @@ struct intel_dp {
        bool has_audio;
        int force_audio;
        uint32_t color_range;
-       int dpms_mode;
        uint8_t link_bw;
        uint8_t lane_count;
        uint8_t dpcd[4];
@@ -138,8 +137,8 @@ intel_dp_max_lane_count(struct intel_dp *intel_dp)
 {
        int max_lane_count = 4;
 
-       if (intel_dp->dpcd[0] >= 0x11) {
-               max_lane_count = intel_dp->dpcd[2] & 0x1f;
+       if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+               max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
                switch (max_lane_count) {
                case 1: case 2: case 4:
                        break;
@@ -153,7 +152,7 @@ intel_dp_max_lane_count(struct intel_dp *intel_dp)
 static int
 intel_dp_max_link_bw(struct intel_dp *intel_dp)
 {
-       int max_link_bw = intel_dp->dpcd[1];
+       int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
 
        switch (max_link_bw) {
        case DP_LINK_BW_1_62:
@@ -774,7 +773,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
        /*
         * Check for DPCD version > 1.1 and enhanced framing support
         */
-       if (intel_dp->dpcd[0] >= 0x11 && (intel_dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
+       if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+           (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
                intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
                intel_dp->DP |= DP_ENHANCED_FRAMING;
        }
@@ -942,11 +942,44 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder)
        udelay(200);
 }
 
+/* If the sink supports it, try to set the power state appropriately */
+static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
+{
+       int ret, i;
+
+       /* Should have a valid DPCD by this point */
+       if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+               return;
+
+       if (mode != DRM_MODE_DPMS_ON) {
+               ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
+                                                 DP_SET_POWER_D3);
+               if (ret != 1)
+                       DRM_DEBUG_DRIVER("failed to write sink power state\n");
+       } else {
+               /*
+                * When turning on, we need to retry for 1ms to give the sink
+                * time to wake up.
+                */
+               for (i = 0; i < 3; i++) {
+                       ret = intel_dp_aux_native_write_1(intel_dp,
+                                                         DP_SET_POWER,
+                                                         DP_SET_POWER_D0);
+                       if (ret == 1)
+                               break;
+                       msleep(1);
+               }
+       }
+}
+
 static void intel_dp_prepare(struct drm_encoder *encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
        struct drm_device *dev = encoder->dev;
 
+       /* Wake up the sink first */
+       intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+
        if (is_edp(intel_dp)) {
                ironlake_edp_backlight_off(dev);
                ironlake_edp_panel_off(dev);
@@ -990,6 +1023,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
        if (mode != DRM_MODE_DPMS_ON) {
                if (is_edp(intel_dp))
                        ironlake_edp_backlight_off(dev);
+               intel_dp_sink_dpms(intel_dp, mode);
                intel_dp_link_down(intel_dp);
                if (is_edp(intel_dp))
                        ironlake_edp_panel_off(dev);
@@ -998,6 +1032,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
        } else {
                if (is_edp(intel_dp))
                        ironlake_edp_panel_vdd_on(intel_dp);
+               intel_dp_sink_dpms(intel_dp, mode);
                if (!(dp_reg & DP_PORT_EN)) {
                        intel_dp_start_link_train(intel_dp);
                        if (is_edp(intel_dp)) {
@@ -1009,7 +1044,31 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
                if (is_edp(intel_dp))
                        ironlake_edp_backlight_on(dev);
        }
-       intel_dp->dpms_mode = mode;
+}
+
+/*
+ * Native read with retry for link status and receiver capability reads for
+ * cases where the sink may still be asleep.
+ */
+static bool
+intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
+                              uint8_t *recv, int recv_bytes)
+{
+       int ret, i;
+
+       /*
+        * Sinks are *supposed* to come up within 1ms from an off state,
+        * but we're also supposed to retry 3 times per the spec.
+        */
+       for (i = 0; i < 3; i++) {
+               ret = intel_dp_aux_native_read(intel_dp, address, recv,
+                                              recv_bytes);
+               if (ret == recv_bytes)
+                       return true;
+               msleep(1);
+       }
+
+       return false;
 }
 
 /*
@@ -1019,14 +1078,10 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
 static bool
 intel_dp_get_link_status(struct intel_dp *intel_dp)
 {
-       int ret;
-
-       ret = intel_dp_aux_native_read(intel_dp,
-                                      DP_LANE0_1_STATUS,
-                                      intel_dp->link_status, DP_LINK_STATUS_SIZE);
-       if (ret != DP_LINK_STATUS_SIZE)
-               return false;
-       return true;
+       return intel_dp_aux_native_read_retry(intel_dp,
+                                             DP_LANE0_1_STATUS,
+                                             intel_dp->link_status,
+                                             DP_LINK_STATUS_SIZE);
 }
 
 static uint8_t
@@ -1515,6 +1570,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
 static void
 intel_dp_check_link_status(struct intel_dp *intel_dp)
 {
+       int ret;
+
        if (!intel_dp->base.base.crtc)
                return;
 
@@ -1523,6 +1580,15 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
                return;
        }
 
+       /* Try to read receiver status if the link appears to be up */
+       ret = intel_dp_aux_native_read(intel_dp,
+                                      0x000, intel_dp->dpcd,
+                                      sizeof (intel_dp->dpcd));
+       if (ret != sizeof(intel_dp->dpcd)) {
+               intel_dp_link_down(intel_dp);
+               return;
+       }
+
        if (!intel_channel_eq_ok(intel_dp)) {
                intel_dp_start_link_train(intel_dp);
                intel_dp_complete_link_train(intel_dp);
@@ -1533,6 +1599,7 @@ static enum drm_connector_status
 ironlake_dp_detect(struct intel_dp *intel_dp)
 {
        enum drm_connector_status status;
+       bool ret;
 
        /* Can't disconnect eDP, but you can close the lid... */
        if (is_edp(intel_dp)) {
@@ -1543,13 +1610,11 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
        }
 
        status = connector_status_disconnected;
-       if (intel_dp_aux_native_read(intel_dp,
-                                    0x000, intel_dp->dpcd,
-                                    sizeof (intel_dp->dpcd))
-           == sizeof(intel_dp->dpcd)) {
-               if (intel_dp->dpcd[0] != 0)
-                       status = connector_status_connected;
-       }
+       ret = intel_dp_aux_native_read_retry(intel_dp,
+                                            0x000, intel_dp->dpcd,
+                                            sizeof (intel_dp->dpcd));
+       if (ret && intel_dp->dpcd[DP_DPCD_REV] != 0)
+               status = connector_status_connected;
        DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0],
                      intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]);
        return status;
@@ -1586,7 +1651,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
        if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd,
                                     sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
        {
-               if (intel_dp->dpcd[0] != 0)
+               if (intel_dp->dpcd[DP_DPCD_REV] != 0)
                        status = connector_status_connected;
        }
 
@@ -1790,8 +1855,7 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
 {
        struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
 
-       if (intel_dp->dpms_mode == DRM_MODE_DPMS_ON)
-               intel_dp_check_link_status(intel_dp);
+       intel_dp_check_link_status(intel_dp);
 }
 
 /* Return which DP Port should be selected for Transcoder DP control */
@@ -1859,7 +1923,6 @@ intel_dp_init(struct drm_device *dev, int output_reg)
                return;
 
        intel_dp->output_reg = output_reg;
-       intel_dp->dpms_mode = -1;
 
        intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
        if (!intel_connector) {
@@ -1954,8 +2017,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
                                               sizeof(intel_dp->dpcd));
                ironlake_edp_panel_vdd_off(intel_dp);
                if (ret == sizeof(intel_dp->dpcd)) {
-                       if (intel_dp->dpcd[0] >= 0x11)
-                               dev_priv->no_aux_handshake = intel_dp->dpcd[3] &
+                       if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
+                               dev_priv->no_aux_handshake =
+                                       intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
                                        DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
                } else {
                        /* if this fails, presume the device is a ghost */
index a670c006982e5056e418f81aaec1d439a589a7ae..9e2959bc91cddf97e9998fe9f822a49501d90c0e 100644 (file)
@@ -1409,6 +1409,11 @@ void intel_setup_overlay(struct drm_device *dev)
        overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
        if (!overlay)
                return;
+
+       mutex_lock(&dev->struct_mutex);
+       if (WARN_ON(dev_priv->overlay))
+               goto out_free;
+
        overlay->dev = dev;
 
        reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
@@ -1448,7 +1453,7 @@ void intel_setup_overlay(struct drm_device *dev)
 
        regs = intel_overlay_map_regs(overlay);
        if (!regs)
-               goto out_free_bo;
+               goto out_unpin_bo;
 
        memset(regs, 0, sizeof(struct overlay_registers));
        update_polyphase_filter(regs);
@@ -1457,14 +1462,17 @@ void intel_setup_overlay(struct drm_device *dev)
        intel_overlay_unmap_regs(overlay, regs);
 
        dev_priv->overlay = overlay;
+       mutex_unlock(&dev->struct_mutex);
        DRM_INFO("initialized overlay support\n");
        return;
 
 out_unpin_bo:
-       i915_gem_object_unpin(reg_bo);
+       if (!OVERLAY_NEEDS_PHYSICAL(dev))
+               i915_gem_object_unpin(reg_bo);
 out_free_bo:
        drm_gem_object_unreference(&reg_bo->base);
 out_free:
+       mutex_unlock(&dev->struct_mutex);
        kfree(overlay);
        return;
 }
index c0e0ee63fbf4fb012b06beac97238d68f0fa3f4d..39ac2b634ae58dd9aeb28fc1a24426c02f362f7d 100644 (file)
@@ -165,7 +165,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
 int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
 static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
 {
-       return intel_wait_ring_buffer(ring, ring->space - 8);
+       return intel_wait_ring_buffer(ring, ring->size - 8);
 }
 
 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
index f0d459bb46e4152a2f40c0fa2e0b54311c79a328..525744d593c1e981cc1039c79391f6aa38813b1d 100644 (file)
@@ -262,7 +262,6 @@ static bool nouveau_dsm_detect(void)
                vga_count++;
 
                retval = nouveau_dsm_pci_probe(pdev);
-               printk("ret val is %d\n", retval);
                if (retval & NOUVEAU_DSM_HAS_MUX)
                        has_dsm |= 1;
                if (retval & NOUVEAU_DSM_HAS_OPT)
index 4b9f4493c9f9d3008d46b64597d7c27d51a21197..7347075ca5b873a2192f57ad0647ee8fb15d8908 100644 (file)
@@ -339,11 +339,12 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
        int ret;
 
        if (dev_priv->chipset < 0x84) {
-               ret = RING_SPACE(chan, 3);
+               ret = RING_SPACE(chan, 4);
                if (ret)
                        return ret;
 
-               BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 2);
+               BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3);
+               OUT_RING  (chan, NvSema);
                OUT_RING  (chan, sema->mem->start);
                OUT_RING  (chan, 1);
        } else
@@ -351,10 +352,12 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
                struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
                u64 offset = vma->offset + sema->mem->start;
 
-               ret = RING_SPACE(chan, 5);
+               ret = RING_SPACE(chan, 7);
                if (ret)
                        return ret;
 
+               BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
+               OUT_RING  (chan, chan->vram_handle);
                BEGIN_RING(chan, NvSubSw, 0x0010, 4);
                OUT_RING  (chan, upper_32_bits(offset));
                OUT_RING  (chan, lower_32_bits(offset));
@@ -394,11 +397,12 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
        int ret;
 
        if (dev_priv->chipset < 0x84) {
-               ret = RING_SPACE(chan, 4);
+               ret = RING_SPACE(chan, 5);
                if (ret)
                        return ret;
 
-               BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
+               BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2);
+               OUT_RING  (chan, NvSema);
                OUT_RING  (chan, sema->mem->start);
                BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
                OUT_RING  (chan, 1);
@@ -407,10 +411,12 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
                struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
                u64 offset = vma->offset + sema->mem->start;
 
-               ret = RING_SPACE(chan, 5);
+               ret = RING_SPACE(chan, 7);
                if (ret)
                        return ret;
 
+               BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
+               OUT_RING  (chan, chan->vram_handle);
                BEGIN_RING(chan, NvSubSw, 0x0010, 4);
                OUT_RING  (chan, upper_32_bits(offset));
                OUT_RING  (chan, lower_32_bits(offset));
@@ -504,22 +510,22 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
        struct nouveau_gpuobj *obj = NULL;
        int ret;
 
-       if (dev_priv->card_type >= NV_C0)
-               goto out_initialised;
+       if (dev_priv->card_type < NV_C0) {
+               /* Create an NV_SW object for various sync purposes */
+               ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
+               if (ret)
+                       return ret;
 
-       /* Create an NV_SW object for various sync purposes */
-       ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
-       if (ret)
-               return ret;
+               ret = RING_SPACE(chan, 2);
+               if (ret)
+                       return ret;
 
-       /* we leave subchannel empty for nvc0 */
-       ret = RING_SPACE(chan, 2);
-       if (ret)
-               return ret;
-       BEGIN_RING(chan, NvSubSw, 0, 1);
-       OUT_RING(chan, NvSw);
+               BEGIN_RING(chan, NvSubSw, 0, 1);
+               OUT_RING  (chan, NvSw);
+               FIRE_RING (chan);
+       }
 
-       /* Create a DMA object for the shared cross-channel sync area. */
+       /* Setup area of memory shared between all channels for x-chan sync */
        if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
                struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
 
@@ -534,23 +540,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
                nouveau_gpuobj_ref(NULL, &obj);
                if (ret)
                        return ret;
-
-               ret = RING_SPACE(chan, 2);
-               if (ret)
-                       return ret;
-               BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
-               OUT_RING(chan, NvSema);
-       } else {
-               ret = RING_SPACE(chan, 2);
-               if (ret)
-                       return ret;
-               BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
-               OUT_RING  (chan, chan->vram_handle); /* whole VM */
        }
 
-       FIRE_RING(chan);
-
-out_initialised:
        INIT_LIST_HEAD(&chan->fence.pending);
        spin_lock_init(&chan->fence.lock);
        atomic_set(&chan->fence.last_sequence_irq, 0);
index 922fb6b664edda718e2e15f929500a2fe3967d21..ef9dec0e6f8b343c293eb7002560686838731180 100644 (file)
@@ -182,6 +182,11 @@ nouveau_perf_init(struct drm_device *dev)
                entries   = perf[2];
        }
 
+       if (entries > NOUVEAU_PM_MAX_LEVEL) {
+               NV_DEBUG(dev, "perf table has too many entries - buggy vbios?\n");
+               entries = NOUVEAU_PM_MAX_LEVEL;
+       }
+
        entry = perf + headerlen;
        for (i = 0; i < entries; i++) {
                struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
index 80218887e0a004fc19337f180a93fe4ecb4196d2..731acea865b514e1a918e648de24ed3fa5957314 100644 (file)
@@ -371,7 +371,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->vram.flags_valid        = nv50_vram_flags_valid;
                break;
        case 0xC0:
-       case 0xD0:
                engine->instmem.init            = nvc0_instmem_init;
                engine->instmem.takedown        = nvc0_instmem_takedown;
                engine->instmem.suspend         = nvc0_instmem_suspend;
@@ -881,8 +880,8 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
 
 #ifdef __BIG_ENDIAN
        /* Put the card in BE mode if it's not */
-       if (nv_rd32(dev, NV03_PMC_BOOT_1))
-               nv_wr32(dev, NV03_PMC_BOOT_1, 0x00000001);
+       if (nv_rd32(dev, NV03_PMC_BOOT_1) != 0x01000001)
+               nv_wr32(dev, NV03_PMC_BOOT_1, 0x01000001);
 
        DRM_MEMORYBARRIER();
 #endif
@@ -923,7 +922,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
                dev_priv->card_type = NV_50;
                break;
        case 0xc0:
-       case 0xd0:
                dev_priv->card_type = NV_C0;
                break;
        default:
index 74a3f687270124cdfb32ddd55ce78f9ae70a6157..08da478ba544e312ab72b4b23b1db23607a433e5 100644 (file)
@@ -409,7 +409,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        struct nouveau_channel *evo = dispc->sync;
        int ret;
 
-       ret = RING_SPACE(evo, 24);
+       ret = RING_SPACE(evo, chan ? 25 : 27);
        if (unlikely(ret))
                return ret;
 
@@ -458,8 +458,19 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        /* queue the flip on the crtc's "display sync" channel */
        BEGIN_RING(evo, 0, 0x0100, 1);
        OUT_RING  (evo, 0xfffe0000);
-       BEGIN_RING(evo, 0, 0x0084, 5);
-       OUT_RING  (evo, chan ? 0x00000100 : 0x00000010);
+       if (chan) {
+               BEGIN_RING(evo, 0, 0x0084, 1);
+               OUT_RING  (evo, 0x00000100);
+       } else {
+               BEGIN_RING(evo, 0, 0x0084, 1);
+               OUT_RING  (evo, 0x00000010);
+               /* allows gamma somehow, PDISP will bitch at you if
+                * you don't wait for vblank before changing this..
+                */
+               BEGIN_RING(evo, 0, 0x00e0, 1);
+               OUT_RING  (evo, 0x40000000);
+       }
+       BEGIN_RING(evo, 0, 0x0088, 4);
        OUT_RING  (evo, dispc->sem.offset);
        OUT_RING  (evo, 0xf00d0000 | dispc->sem.value);
        OUT_RING  (evo, 0x74b1e000);
index 7e3d96e7ac042814527db77a3bd305d5a3214de4..15bd0477a3e8714da218abdc07bec96038f5244b 100644 (file)
@@ -140,11 +140,17 @@ void evergreen_pm_misc(struct radeon_device *rdev)
        struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
 
        if (voltage->type == VOLTAGE_SW) {
+               /* 0xff01 is a flag rather then an actual voltage */
+               if (voltage->voltage == 0xff01)
+                       return;
                if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
                        radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
                        rdev->pm.current_vddc = voltage->voltage;
                        DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
                }
+               /* 0xff01 is a flag rather then an actual voltage */
+               if (voltage->vddci == 0xff01)
+                       return;
                if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
                        radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
                        rdev->pm.current_vddci = voltage->vddci;
@@ -979,17 +985,19 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
 {
        save->vga_control[0] = RREG32(D1VGA_CONTROL);
        save->vga_control[1] = RREG32(D2VGA_CONTROL);
-       save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
-       save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
-       save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
-       save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
        save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
        save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
        save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
        save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
-       if (!(rdev->flags & RADEON_IS_IGP)) {
+       if (rdev->num_crtc >= 4) {
+               save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
+               save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
                save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
                save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+       }
+       if (rdev->num_crtc >= 6) {
+               save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
+               save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
                save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
                save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
        }
@@ -998,35 +1006,45 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
        WREG32(VGA_RENDER_CONTROL, 0);
        WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
        WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
-       if (!(rdev->flags & RADEON_IS_IGP)) {
+       if (rdev->num_crtc >= 4) {
                WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
                WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+       }
+       if (rdev->num_crtc >= 6) {
                WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
                WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
        }
        WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
        WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-       if (!(rdev->flags & RADEON_IS_IGP)) {
+       if (rdev->num_crtc >= 4) {
                WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
                WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+       }
+       if (rdev->num_crtc >= 6) {
                WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
                WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
        }
        WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
        WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-       if (!(rdev->flags & RADEON_IS_IGP)) {
+       if (rdev->num_crtc >= 4) {
                WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
                WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+       }
+       if (rdev->num_crtc >= 6) {
                WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
                WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
        }
 
        WREG32(D1VGA_CONTROL, 0);
        WREG32(D2VGA_CONTROL, 0);
-       WREG32(EVERGREEN_D3VGA_CONTROL, 0);
-       WREG32(EVERGREEN_D4VGA_CONTROL, 0);
-       WREG32(EVERGREEN_D5VGA_CONTROL, 0);
-       WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+       if (rdev->num_crtc >= 4) {
+               WREG32(EVERGREEN_D3VGA_CONTROL, 0);
+               WREG32(EVERGREEN_D4VGA_CONTROL, 0);
+       }
+       if (rdev->num_crtc >= 6) {
+               WREG32(EVERGREEN_D5VGA_CONTROL, 0);
+               WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+       }
 }
 
 void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -1049,7 +1067,7 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
        WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
               (u32)rdev->mc.vram_start);
 
-       if (!(rdev->flags & RADEON_IS_IGP)) {
+       if (rdev->num_crtc >= 4) {
                WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
                       upper_32_bits(rdev->mc.vram_start));
                WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
@@ -1067,7 +1085,8 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
                       (u32)rdev->mc.vram_start);
                WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
                       (u32)rdev->mc.vram_start);
-
+       }
+       if (rdev->num_crtc >= 6) {
                WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
                       upper_32_bits(rdev->mc.vram_start));
                WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
@@ -1095,31 +1114,41 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
        /* Restore video state */
        WREG32(D1VGA_CONTROL, save->vga_control[0]);
        WREG32(D2VGA_CONTROL, save->vga_control[1]);
-       WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
-       WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
-       WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
-       WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
+       if (rdev->num_crtc >= 4) {
+               WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
+               WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
+       }
+       if (rdev->num_crtc >= 6) {
+               WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
+               WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
+       }
        WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
        WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
-       if (!(rdev->flags & RADEON_IS_IGP)) {
+       if (rdev->num_crtc >= 4) {
                WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
                WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+       }
+       if (rdev->num_crtc >= 6) {
                WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
                WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
        }
        WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
        WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
-       if (!(rdev->flags & RADEON_IS_IGP)) {
+       if (rdev->num_crtc >= 4) {
                WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
                WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
+       }
+       if (rdev->num_crtc >= 6) {
                WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
                WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
        }
        WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
        WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-       if (!(rdev->flags & RADEON_IS_IGP)) {
+       if (rdev->num_crtc >= 4) {
                WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
                WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+       }
+       if (rdev->num_crtc >= 6) {
                WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
                WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
        }
@@ -1971,7 +2000,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                        gb_backend_map = 0x66442200;
                        break;
                case CHIP_JUNIPER:
-                       gb_backend_map = 0x00006420;
+                       gb_backend_map = 0x00002200;
                        break;
                default:
                        gb_backend_map =
@@ -2007,9 +2036,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.tile_config |= (3 << 0);
                break;
        }
-       /* num banks is 8 on all fusion asics */
+       /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
        if (rdev->flags & RADEON_IS_IGP)
-               rdev->config.evergreen.tile_config |= 8 << 4;
+               rdev->config.evergreen.tile_config |= 1 << 4;
        else
                rdev->config.evergreen.tile_config |=
                        ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
@@ -2242,7 +2271,10 @@ int evergreen_mc_init(struct radeon_device *rdev)
 
        /* Get VRAM informations */
        rdev->mc.vram_is_ddr = true;
-       tmp = RREG32(MC_ARB_RAMCFG);
+       if (rdev->flags & RADEON_IS_IGP)
+               tmp = RREG32(FUS_MC_ARB_RAMCFG);
+       else
+               tmp = RREG32(MC_ARB_RAMCFG);
        if (tmp & CHANSIZE_OVERRIDE) {
                chansize = 16;
        } else if (tmp & CHANSIZE_MASK) {
@@ -2408,18 +2440,22 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
        WREG32(GRBM_INT_CNTL, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-       if (!(rdev->flags & RADEON_IS_IGP)) {
+       if (rdev->num_crtc >= 4) {
                WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
                WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+       }
+       if (rdev->num_crtc >= 6) {
                WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
                WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
        }
 
        WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
        WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-       if (!(rdev->flags & RADEON_IS_IGP)) {
+       if (rdev->num_crtc >= 4) {
                WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
                WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+       }
+       if (rdev->num_crtc >= 6) {
                WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
                WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
        }
@@ -2538,19 +2574,25 @@ int evergreen_irq_set(struct radeon_device *rdev)
 
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
        WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
-       if (!(rdev->flags & RADEON_IS_IGP)) {
+       if (rdev->num_crtc >= 4) {
                WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
                WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+       }
+       if (rdev->num_crtc >= 6) {
                WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
                WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
        }
 
        WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
        WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
-       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
-       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
-       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
-       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+       if (rdev->num_crtc >= 4) {
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+       }
+       if (rdev->num_crtc >= 6) {
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+       }
 
        WREG32(DC_HPD1_INT_CONTROL, hpd1);
        WREG32(DC_HPD2_INT_CONTROL, hpd2);
@@ -2574,53 +2616,57 @@ static inline void evergreen_irq_ack(struct radeon_device *rdev)
        rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
        rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
        rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
-       rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
-       rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
-       rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
-       rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+       if (rdev->num_crtc >= 4) {
+               rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+               rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+       }
+       if (rdev->num_crtc >= 6) {
+               rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+               rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+       }
 
        if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
                WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
        if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
                WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
-       if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
-               WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
-       if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
-               WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
-       if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
-               WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
-       if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
-               WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
-
        if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
                WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
        if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
                WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
-
        if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
                WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
        if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
                WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
 
-       if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
-               WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
-       if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
-               WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
-
-       if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
-               WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
-       if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
-               WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
-
-       if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
-               WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
-       if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
-               WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
-
-       if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
-               WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
-       if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
-               WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+       if (rdev->num_crtc >= 4) {
+               if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
+                       WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+               if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
+                       WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+               if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+                       WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
+               if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+                       WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
+               if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+                       WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
+               if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+                       WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
+       }
+
+       if (rdev->num_crtc >= 6) {
+               if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
+                       WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+               if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
+                       WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+               if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+                       WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
+               if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+                       WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
+               if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+                       WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
+               if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+                       WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+       }
 
        if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
                tmp = RREG32(DC_HPD1_INT_CONTROL);
@@ -2695,28 +2741,25 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
 
 int evergreen_irq_process(struct radeon_device *rdev)
 {
-       u32 wptr = evergreen_get_ih_wptr(rdev);
-       u32 rptr = rdev->ih.rptr;
+       u32 wptr;
+       u32 rptr;
        u32 src_id, src_data;
        u32 ring_index;
        unsigned long flags;
        bool queue_hotplug = false;
 
-       DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
-       if (!rdev->ih.enabled)
+       if (!rdev->ih.enabled || rdev->shutdown)
                return IRQ_NONE;
 
-       spin_lock_irqsave(&rdev->ih.lock, flags);
+       wptr = evergreen_get_ih_wptr(rdev);
+       rptr = rdev->ih.rptr;
+       DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
 
+       spin_lock_irqsave(&rdev->ih.lock, flags);
        if (rptr == wptr) {
                spin_unlock_irqrestore(&rdev->ih.lock, flags);
                return IRQ_NONE;
        }
-       if (rdev->shutdown) {
-               spin_unlock_irqrestore(&rdev->ih.lock, flags);
-               return IRQ_NONE;
-       }
-
 restart_ih:
        /* display interrupts */
        evergreen_irq_ack(rdev);
@@ -3231,6 +3274,7 @@ void evergreen_fini(struct radeon_device *rdev)
        r700_cp_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
+       radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
        evergreen_pcie_gart_fini(rdev);
        radeon_gem_fini(rdev);
index 57f3bc17b87e09a9dd0d1fd99fe66800e43ee9fb..2eb251858e7283d37b5311637815abae8dd50e0e 100644 (file)
@@ -252,7 +252,7 @@ draw_auto(struct radeon_device *rdev)
 
 }
 
-/* emits 36 */
+/* emits 39 */
 static void
 set_default_state(struct radeon_device *rdev)
 {
@@ -531,6 +531,11 @@ set_default_state(struct radeon_device *rdev)
                radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
                radeon_ring_write(rdev, 0);
 
+               /* setup LDS */
+               radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+               radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
+               radeon_ring_write(rdev, 0x10001000);
+
                /* SQ config */
                radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
                radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
@@ -773,7 +778,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
        /* calculate number of loops correctly */
        ring_size = num_loops * dwords_per_loop;
        /* set default  + shaders */
-       ring_size += 52; /* shaders + def state */
+       ring_size += 55; /* shaders + def state */
        ring_size += 10; /* fence emit for VB IB */
        ring_size += 5; /* done copy */
        ring_size += 10; /* fence emit for done copy */
index 1636e34498252c3550e17a6515aa6097974bbf7f..b7b2714f0b327d379aa21d2a478b463e067f1bce 100644 (file)
 #define IH_RB_WPTR_ADDR_LO                                0x3e14
 #define IH_CNTL                                           0x3e18
 #       define ENABLE_INTR                                (1 << 0)
-#       define IH_MC_SWAP(x)                              ((x) << 2)
+#       define IH_MC_SWAP(x)                              ((x) << 1)
 #       define IH_MC_SWAP_NONE                            0
 #       define IH_MC_SWAP_16BIT                           1
 #       define IH_MC_SWAP_32BIT                           2
 #       define LB_D5_VBLANK_INTERRUPT                   (1 << 3)
 #       define DC_HPD5_INTERRUPT                        (1 << 17)
 #       define DC_HPD5_RX_INTERRUPT                     (1 << 18)
-#define DISP_INTERRUPT_STATUS_CONTINUE5                 0x6050
+#define DISP_INTERRUPT_STATUS_CONTINUE5                 0x6150
 #       define LB_D6_VLINE_INTERRUPT                    (1 << 2)
 #       define LB_D6_VBLANK_INTERRUPT                   (1 << 3)
 #       define DC_HPD6_INTERRUPT                        (1 << 17)
index 16caafeadf5e5603208a3055b2344355106d5a05..559dbd412906096f762f3d41cacecdf4e0810794 100644 (file)
@@ -1581,6 +1581,7 @@ void cayman_fini(struct radeon_device *rdev)
        cayman_cp_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
+       radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
        cayman_pcie_gart_fini(rdev);
        radeon_gem_fini(rdev);
index 9736746da2d6d79b3a9b4c3e6e2e286fc524c901..4672869cdb265f0d4f0bd5b58ea99ee5afa761ec 100644 (file)
 #define        CGTS_USER_TCC_DISABLE                           0x914C
 #define                TCC_DISABLE_MASK                                0xFFFF0000
 #define                TCC_DISABLE_SHIFT                               16
-#define        CGTS_SM_CTRL_REG                                0x915C
+#define        CGTS_SM_CTRL_REG                                0x9150
 #define                OVERRIDE                                (1 << 21)
 
 #define        TA_CNTL_AUX                                     0x9508
index 7dd45ca64e29464b717a837f04a1be049e5a9292..bc54b26cb32f75be52d31daff0e4f4effe40784c 100644 (file)
@@ -590,6 +590,9 @@ void r600_pm_misc(struct radeon_device *rdev)
        struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
 
        if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+               /* 0xff01 is a flag rather then an actual voltage */
+               if (voltage->voltage == 0xff01)
+                       return;
                if (voltage->voltage != rdev->pm.current_vddc) {
                        radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
                        rdev->pm.current_vddc = voltage->voltage;
@@ -2625,6 +2628,7 @@ void r600_fini(struct radeon_device *rdev)
        r600_cp_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
+       radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
        r600_pcie_gart_fini(rdev);
        radeon_agp_fini(rdev);
@@ -3294,27 +3298,26 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
 
 int r600_irq_process(struct radeon_device *rdev)
 {
-       u32 wptr = r600_get_ih_wptr(rdev);
-       u32 rptr = rdev->ih.rptr;
+       u32 wptr;
+       u32 rptr;
        u32 src_id, src_data;
        u32 ring_index;
        unsigned long flags;
        bool queue_hotplug = false;
 
-       DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
-       if (!rdev->ih.enabled)
+       if (!rdev->ih.enabled || rdev->shutdown)
                return IRQ_NONE;
 
+       wptr = r600_get_ih_wptr(rdev);
+       rptr = rdev->ih.rptr;
+       DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
        spin_lock_irqsave(&rdev->ih.lock, flags);
 
        if (rptr == wptr) {
                spin_unlock_irqrestore(&rdev->ih.lock, flags);
                return IRQ_NONE;
        }
-       if (rdev->shutdown) {
-               spin_unlock_irqrestore(&rdev->ih.lock, flags);
-               return IRQ_NONE;
-       }
 
 restart_ih:
        /* display interrupts */
index f140a0d5cb543c0281c8e8a85f62df873fa51809..0245ae6c204ec60a739dd2200eff1d858e9a38da 100644 (file)
 #define IH_RB_WPTR_ADDR_LO                                0x3e14
 #define IH_CNTL                                           0x3e18
 #       define ENABLE_INTR                                (1 << 0)
-#       define IH_MC_SWAP(x)                              ((x) << 2)
+#       define IH_MC_SWAP(x)                              ((x) << 1)
 #       define IH_MC_SWAP_NONE                            0
 #       define IH_MC_SWAP_16BIT                           1
 #       define IH_MC_SWAP_32BIT                           2
index 27f45579e64ba4d895037db76241814bd24f5d14..ef0e0e016914d53aadcddbd897913d07da495bd4 100644 (file)
@@ -179,6 +179,7 @@ void radeon_pm_resume(struct radeon_device *rdev);
 void radeon_combios_get_power_modes(struct radeon_device *rdev);
 void radeon_atombios_get_power_modes(struct radeon_device *rdev);
 void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
+int radeon_atom_get_max_vddc(struct radeon_device *rdev, u16 *voltage);
 void rs690_pm_info(struct radeon_device *rdev);
 extern int rv6xx_get_temp(struct radeon_device *rdev);
 extern int rv770_get_temp(struct radeon_device *rdev);
index fa62a503ae70e9daf5b371aa12b2ff1dbdc0a508..bf2b61584cdb5a70fc06fbc41dd2ca51733532fc 100644 (file)
@@ -2320,6 +2320,14 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
                        le16_to_cpu(clock_info->r600.usVDDC);
        }
 
+       /* patch up vddc if necessary */
+       if (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage == 0xff01) {
+               u16 vddc;
+
+               if (radeon_atom_get_max_vddc(rdev, &vddc) == 0)
+                       rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = vddc;
+       }
+
        if (rdev->flags & RADEON_IS_IGP) {
                /* skip invalid modes */
                if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
@@ -2607,6 +2615,10 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
        if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
                return;
 
+       /* 0xff01 is a flag rather then an actual voltage */
+       if (voltage_level == 0xff01)
+               return;
+
        switch (crev) {
        case 1:
                args.v1.ucVoltageType = voltage_type;
@@ -2626,7 +2638,35 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
        atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 }
 
+int radeon_atom_get_max_vddc(struct radeon_device *rdev,
+                            u16 *voltage)
+{
+       union set_voltage args;
+       int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+       u8 frev, crev;
+
+       if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+               return -EINVAL;
+
+       switch (crev) {
+       case 1:
+               return -EINVAL;
+       case 2:
+               args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE;
+               args.v2.ucVoltageMode = 0;
+               args.v2.usVoltageLevel = 0;
+
+               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
+               *voltage = le16_to_cpu(args.v2.usVoltageLevel);
+               break;
+       default:
+               DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+               return -EINVAL;
+       }
+
+       return 0;
+}
 
 void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
 {
index 1aba85cad1a812956f3caee266e75a11894310c8..229a20f10e2b0c548b02cd5df527b9b72d6fe31a 100644 (file)
@@ -104,7 +104,7 @@ static bool radeon_read_bios(struct radeon_device *rdev)
 static bool radeon_atrm_get_bios(struct radeon_device *rdev)
 {
        int ret;
-       int size = 64 * 1024;
+       int size = 256 * 1024;
        int i;
 
        if (!radeon_atrm_supported(rdev->pdev))
@@ -331,7 +331,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
 
        seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
        viph_control = RREG32(RADEON_VIPH_CONTROL);
-       bus_cntl = RREG32(RADEON_BUS_CNTL);
+       bus_cntl = RREG32(RV370_BUS_CNTL);
        d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
        d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
        vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
@@ -350,7 +350,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
        WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
 
        /* enable the rom */
-       WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+       WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
 
        /* Disable VGA mode */
        WREG32(AVIVO_D1VGA_CONTROL,
@@ -367,7 +367,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
        /* restore regs */
        WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
        WREG32(RADEON_VIPH_CONTROL, viph_control);
-       WREG32(RADEON_BUS_CNTL, bus_cntl);
+       WREG32(RV370_BUS_CNTL, bus_cntl);
        WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
        WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
        WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
@@ -390,7 +390,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
 
        seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
        viph_control = RREG32(RADEON_VIPH_CONTROL);
-       bus_cntl = RREG32(RADEON_BUS_CNTL);
+       if (rdev->flags & RADEON_IS_PCIE)
+               bus_cntl = RREG32(RV370_BUS_CNTL);
+       else
+               bus_cntl = RREG32(RADEON_BUS_CNTL);
        crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
        crtc2_gen_cntl = 0;
        crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
@@ -412,7 +415,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
        WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
 
        /* enable the rom */
-       WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+       if (rdev->flags & RADEON_IS_PCIE)
+               WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
+       else
+               WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
 
        /* Turn off mem requests and CRTC for both controllers */
        WREG32(RADEON_CRTC_GEN_CNTL,
@@ -439,7 +445,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
        /* restore regs */
        WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
        WREG32(RADEON_VIPH_CONTROL, viph_control);
-       WREG32(RADEON_BUS_CNTL, bus_cntl);
+       if (rdev->flags & RADEON_IS_PCIE)
+               WREG32(RV370_BUS_CNTL, bus_cntl);
+       else
+               WREG32(RADEON_BUS_CNTL, bus_cntl);
        WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
        if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
                WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
index cbfca3a24fdf9caac828d4cca8ce8445963af4c4..9792d4ffdc86250e102457c5bd36765098dde7e3 100644 (file)
@@ -52,6 +52,12 @@ void radeon_connector_hotplug(struct drm_connector *connector)
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
+       /* bail if the connector does not have hpd pin, e.g.,
+        * VGA, TV, etc.
+        */
+       if (radeon_connector->hpd.hpd == RADEON_HPD_NONE)
+               return;
+
        radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
 
        /* powering up/down the eDP panel generates hpd events which
index f55b64cb59d1d0170b25e35605354bca56bfd3ab..b293487e5aa3dc2193fee36eb0f9c00fe7080a0c 100644 (file)
@@ -1090,9 +1090,10 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
                        break;
                }
 
-               if (is_dp)
+               if (is_dp) {
                        args.v2.acConfig.fCoherentMode = 1;
-               else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+                       args.v2.acConfig.fDPConnector = 1;
+               } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
                        if (dig->coherent_mode)
                                args.v2.acConfig.fCoherentMode = 1;
                        if (radeon_encoder->pixel_clock > 165000)
@@ -1431,7 +1432,11 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
        if (is_dig) {
                switch (mode) {
                case DRM_MODE_DPMS_ON:
-                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+                       /* some early dce3.2 boards have a bug in their transmitter control table */
+                       if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
+                               atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+                       else
+                               atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
                        if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
                                struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
index ec93a75369e671c9b9d0b98407ad5c5e901a4eb2..bc44a3d35ec6f49de04a9570e8fa7b51f600523c 100644 (file)
 #       define RADEON_BUS_READ_BURST         (1 << 30)
 #define RADEON_BUS_CNTL1                    0x0034
 #       define RADEON_BUS_WAIT_ON_LOCK_EN    (1 << 4)
+#define RV370_BUS_CNTL                      0x004c
+#       define RV370_BUS_BIOS_DIS_ROM        (1 << 2)
 /* rv370/rv380, rv410, r423/r430/r480, r5xx */
 #define RADEON_MSI_REARM_EN                0x0160
 #      define RV370_MSI_REARM_EN            (1 << 0)
index 6e3b11e5abbe5a83842e4e9b1b49d6531e460d92..1f5850e473cc35716f5c70d9a4640209caea2e41 100644 (file)
@@ -426,7 +426,7 @@ int rs600_gart_init(struct radeon_device *rdev)
        return radeon_gart_table_vram_alloc(rdev);
 }
 
-int rs600_gart_enable(struct radeon_device *rdev)
+static int rs600_gart_enable(struct radeon_device *rdev)
 {
        u32 tmp;
        int r, i;
@@ -440,8 +440,8 @@ int rs600_gart_enable(struct radeon_device *rdev)
                return r;
        radeon_gart_restore(rdev);
        /* Enable bus master */
-       tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
-       WREG32(R_00004C_BUS_CNTL, tmp);
+       tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+       WREG32(RADEON_BUS_CNTL, tmp);
        /* FIXME: setup default page */
        WREG32_MC(R_000100_MC_PT0_CNTL,
                  (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
index ef8a5babe9f7679fce662775faa98afb4f1002a2..4de51891aa6d911abd16502b1b7bb7aabc7cbe17 100644 (file)
@@ -105,6 +105,9 @@ void rv770_pm_misc(struct radeon_device *rdev)
        struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
 
        if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+               /* 0xff01 is a flag rather then an actual voltage */
+               if (voltage->voltage == 0xff01)
+                       return;
                if (voltage->voltage != rdev->pm.current_vddc) {
                        radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
                        rdev->pm.current_vddc = voltage->voltage;
@@ -572,6 +575,12 @@ static void rv770_program_channel_remap(struct radeon_device *rdev)
        else
                tcp_chan_steer = 0x00fac688;
 
+       /* RV770 CE has special chremap setup */
+       if (rdev->pdev->device == 0x944e) {
+               tcp_chan_steer = 0x00b08b08;
+               mc_shared_chremap = 0x00b08b08;
+       }
+
        WREG32(TCP_CHAN_STEER, tcp_chan_steer);
        WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
 }
@@ -1359,6 +1368,7 @@ void rv770_fini(struct radeon_device *rdev)
        r700_cp_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
+       radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
        rv770_pcie_gart_fini(rdev);
        rv770_vram_scratch_fini(rdev);
index 90e23e0bfadb054ffbcf8ee65c6cbac98881790a..58c271ebc0f73f6829dae7cffa9d680480ad0dd1 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/sched.h>
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
 #include <linux/file.h>
 #include <linux/swap.h>
 #include <linux/slab.h>
@@ -484,7 +485,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
        swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
 
        for (i = 0; i < ttm->num_pages; ++i) {
-               from_page = read_mapping_page(swap_space, i, NULL);
+               from_page = shmem_read_mapping_page(swap_space, i);
                if (IS_ERR(from_page)) {
                        ret = PTR_ERR(from_page);
                        goto out_err;
@@ -557,7 +558,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
                from_page = ttm->pages[i];
                if (unlikely(from_page == NULL))
                        continue;
-               to_page = read_mapping_page(swap_space, i, NULL);
+               to_page = shmem_read_mapping_page(swap_space, i);
                if (unlikely(IS_ERR(to_page))) {
                        ret = PTR_ERR(to_page);
                        goto out_err;
index 67d2a7585934c31b3607710c675e531acb21e6a6..36ca465c00cefac036a22fad61fc70b2568a1ccc 100644 (file)
@@ -305,6 +305,7 @@ config HID_MULTITOUCH
          - 3M PCT touch screens
          - ActionStar dual touch panels
          - Cando dual touch panels
+         - Chunghwa panels
          - CVTouch panels
          - Cypress TrueTouch panels
          - Elo TouchSystems IntelliTouch Plus panels
index c957c4b4fe703368a4d19e739277b5c9bd41383b..6f3289a5788812ef573561482f581d04ca6524cc 100644 (file)
@@ -1359,6 +1359,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
@@ -1422,6 +1423,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH_DUAL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
index 0b374a6d6db007853f6762ef489e9191a5e1258e..a756ee6c7df59389c3f79b8a7f06a2343a28aed8 100644 (file)
 #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH      0xb19d
 #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
 
+#define USB_VENDOR_ID_CHUNGHWAT                0x2247
+#define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH     0x0001
+
 #define USB_VENDOR_ID_CIDC             0x1677
 
 #define USB_VENDOR_ID_CMEDIA           0x0d8c
 
 #define USB_VENDOR_ID_LUMIO            0x202e
 #define USB_DEVICE_ID_CRYSTALTOUCH     0x0006
+#define USB_DEVICE_ID_CRYSTALTOUCH_DUAL        0x0007
 
 #define USB_VENDOR_ID_MCC              0x09db
 #define USB_DEVICE_ID_MCC_PMD1024LS    0x0076
 #define USB_VENDOR_ID_UCLOGIC          0x5543
 #define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209    0x0042
 #define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5      0x6001
+#define USB_DEVICE_ID_UCLOGIC_TABLET_TWA60     0x0064
 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U   0x0003
 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U   0x0004
 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U   0x0005
index a5eda4c8127a274ee20343d26a82383135c3048b..0ec91c18a4216a52a4a3c7d85292a22f61a6e828 100644 (file)
@@ -501,17 +501,9 @@ static int magicmouse_probe(struct hid_device *hdev,
        }
        report->size = 6;
 
-       /*
-        * The device reponds with 'invalid report id' when feature
-        * report switching it into multitouch mode is sent to it.
-        *
-        * This results in -EIO from the _raw low-level transport callback,
-        * but there seems to be no other way of switching the mode.
-        * Thus the super-ugly hacky success check below.
-        */
        ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
                        HID_FEATURE_REPORT);
-       if (ret != -EIO) {
+       if (ret != sizeof(feature)) {
                hid_err(hdev, "unable to request touch data (%d)\n", ret);
                goto err_stop_hw;
        }
index ecd4d2db9e800ca2397c3ad18945d30b2080a028..62cac4dc3b627c6bb598a77ce0c1cb78eee62be5 100644 (file)
@@ -64,6 +64,7 @@ struct mt_device {
        struct mt_class *mtclass;       /* our mt device class */
        unsigned last_field_index;      /* last field index of the report */
        unsigned last_slot_field;       /* the last field of a slot */
+       int last_mt_collection; /* last known mt-related collection */
        __s8 inputmode;         /* InputMode HID feature, -1 if non-existent */
        __u8 num_received;      /* how many contacts we received */
        __u8 num_expected;      /* expected last contact index */
@@ -225,8 +226,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                                cls->sn_move);
                        /* touchscreen emulation */
                        set_abs(hi->input, ABS_X, field, cls->sn_move);
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_GD_Y:
                        if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
@@ -237,8 +240,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                                cls->sn_move);
                        /* touchscreen emulation */
                        set_abs(hi->input, ABS_Y, field, cls->sn_move);
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                }
                return 0;
@@ -246,31 +251,42 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
        case HID_UP_DIGITIZER:
                switch (usage->hid) {
                case HID_DG_INRANGE:
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_DG_CONFIDENCE:
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_DG_TIPSWITCH:
                        hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
                        input_set_capability(hi->input, EV_KEY, BTN_TOUCH);
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_DG_CONTACTID:
+                       if (!td->maxcontacts)
+                               td->maxcontacts = MT_DEFAULT_MAXCONTACT;
                        input_mt_init_slots(hi->input, td->maxcontacts);
                        td->last_slot_field = usage->hid;
                        td->last_field_index = field->index;
+                       td->last_mt_collection = usage->collection_index;
                        return 1;
                case HID_DG_WIDTH:
                        hid_map_usage(hi, usage, bit, max,
                                        EV_ABS, ABS_MT_TOUCH_MAJOR);
                        set_abs(hi->input, ABS_MT_TOUCH_MAJOR, field,
                                cls->sn_width);
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_DG_HEIGHT:
                        hid_map_usage(hi, usage, bit, max,
@@ -279,8 +295,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                                cls->sn_height);
                        input_set_abs_params(hi->input,
                                        ABS_MT_ORIENTATION, 0, 1, 0, 0);
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_DG_TIPPRESSURE:
                        if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
@@ -292,16 +310,20 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                        /* touchscreen emulation */
                        set_abs(hi->input, ABS_PRESSURE, field,
                                cls->sn_pressure);
-                       td->last_slot_field = usage->hid;
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index) {
+                               td->last_slot_field = usage->hid;
+                               td->last_field_index = field->index;
+                       }
                        return 1;
                case HID_DG_CONTACTCOUNT:
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index)
+                               td->last_field_index = field->index;
                        return 1;
                case HID_DG_CONTACTMAX:
                        /* we don't set td->last_slot_field as contactcount and
                         * contact max are global to the report */
-                       td->last_field_index = field->index;
+                       if (td->last_mt_collection == usage->collection_index)
+                               td->last_field_index = field->index;
                        return -1;
                }
                /* let hid-input decide for the others */
@@ -516,6 +538,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
        }
        td->mtclass = mtclass;
        td->inputmode = -1;
+       td->last_mt_collection = -1;
        hid_set_drvdata(hdev, td);
 
        ret = hid_parse(hdev);
@@ -526,9 +549,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
        if (ret)
                goto fail;
 
-       if (!td->maxcontacts)
-               td->maxcontacts = MT_DEFAULT_MAXCONTACT;
-
        td->slots = kzalloc(td->maxcontacts * sizeof(struct mt_slot),
                                GFP_KERNEL);
        if (!td->slots) {
@@ -593,6 +613,11 @@ static const struct hid_device_id mt_devices[] = {
                HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
                        USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
 
+       /* Chunghwa Telecom touch panels */
+       {  .driver_data = MT_CLS_DEFAULT,
+               HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
+                       USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
+
        /* CVTouch panels */
        { .driver_data = MT_CLS_DEFAULT,
                HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH,
@@ -651,6 +676,9 @@ static const struct hid_device_id mt_devices[] = {
        { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
                HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
                        USB_DEVICE_ID_CRYSTALTOUCH) },
+       { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
+               HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
+                       USB_DEVICE_ID_CRYSTALTOUCH_DUAL) },
 
        /* MosArt panels */
        { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
@@ -681,10 +709,10 @@ static const struct hid_device_id mt_devices[] = {
                HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
                        USB_DEVICE_ID_MTP)},
        { .driver_data = MT_CLS_CONFIDENCE,
-               HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
+               HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM,
                        USB_DEVICE_ID_MTP_STM)},
        { .driver_data = MT_CLS_CONFIDENCE,
-               HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
+               HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_SITRONIX,
                        USB_DEVICE_ID_MTP_SITRONIX)},
 
        /* Touch International panels */
index 0e30b140edca173d3fb47b4d2b756ad7c53f9f43..621959d5cc42c6b6798328fe32ce32072fa8669a 100644 (file)
@@ -74,6 +74,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH, HID_QUIRK_MULTI_INPUT },
index ff3c644888b1ba8b0bf14e2426313b66db3688c0..7c1188b53c3ec0ea4aca4364239d9e3596cb7cfa 100644 (file)
@@ -248,12 +248,15 @@ static int hiddev_release(struct inode * inode, struct file * file)
                        usbhid_close(list->hiddev->hid);
                        usbhid_put_power(list->hiddev->hid);
                } else {
+                       mutex_unlock(&list->hiddev->existancelock);
                        kfree(list->hiddev);
+                       kfree(list);
+                       return 0;
                }
        }
 
-       kfree(list);
        mutex_unlock(&list->hiddev->existancelock);
+       kfree(list);
 
        return 0;
 }
@@ -923,10 +926,11 @@ void hiddev_disconnect(struct hid_device *hid)
        usb_deregister_dev(usbhid->intf, &hiddev_class);
 
        if (hiddev->open) {
+               mutex_unlock(&hiddev->existancelock);
                usbhid_close(hiddev->hid);
                wake_up_interruptible(&hiddev->wait);
        } else {
+               mutex_unlock(&hiddev->existancelock);
                kfree(hiddev);
        }
-       mutex_unlock(&hiddev->existancelock);
 }
index 16db83c83c8b9ecea02a19f1e0b0e80971053b1c..5f888f7e7dcb827586c47a2fb2bfdbb3936c13f2 100644 (file)
@@ -333,7 +333,7 @@ config SENSORS_F71882FG
            F71858FG
            F71862FG
            F71863FG
-           F71869F/E
+           F71869F/E/A
            F71882FG
            F71883FG
            F71889FG/ED/A
index c2ee2048ab9128d777ba7e41414b282809537eac..8bc1bd663721fb3b95cf792dce4779cf18550fa7 100644 (file)
@@ -32,6 +32,7 @@ static int adm1275_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
        int config;
+       int ret;
        struct pmbus_driver_info *info;
 
        if (!i2c_check_functionality(client->adapter,
@@ -43,30 +44,32 @@ static int adm1275_probe(struct i2c_client *client,
                return -ENOMEM;
 
        config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
-       if (config < 0)
-               return config;
+       if (config < 0) {
+               ret = config;
+               goto err_mem;
+       }
 
        info->pages = 1;
        info->direct[PSC_VOLTAGE_IN] = true;
        info->direct[PSC_VOLTAGE_OUT] = true;
        info->direct[PSC_CURRENT_OUT] = true;
-       info->m[PSC_CURRENT_OUT] = 800;
+       info->m[PSC_CURRENT_OUT] = 807;
        info->b[PSC_CURRENT_OUT] = 20475;
        info->R[PSC_CURRENT_OUT] = -1;
        info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
 
        if (config & ADM1275_VRANGE) {
-               info->m[PSC_VOLTAGE_IN] = 19045;
+               info->m[PSC_VOLTAGE_IN] = 19199;
                info->b[PSC_VOLTAGE_IN] = 0;
                info->R[PSC_VOLTAGE_IN] = -2;
-               info->m[PSC_VOLTAGE_OUT] = 19045;
+               info->m[PSC_VOLTAGE_OUT] = 19199;
                info->b[PSC_VOLTAGE_OUT] = 0;
                info->R[PSC_VOLTAGE_OUT] = -2;
        } else {
-               info->m[PSC_VOLTAGE_IN] = 6666;
+               info->m[PSC_VOLTAGE_IN] = 6720;
                info->b[PSC_VOLTAGE_IN] = 0;
                info->R[PSC_VOLTAGE_IN] = -1;
-               info->m[PSC_VOLTAGE_OUT] = 6666;
+               info->m[PSC_VOLTAGE_OUT] = 6720;
                info->b[PSC_VOLTAGE_OUT] = 0;
                info->R[PSC_VOLTAGE_OUT] = -1;
        }
@@ -76,7 +79,14 @@ static int adm1275_probe(struct i2c_client *client,
        else
                info->func[0] |= PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT;
 
-       return pmbus_do_probe(client, id, info);
+       ret = pmbus_do_probe(client, id, info);
+       if (ret)
+               goto err_mem;
+       return 0;
+
+err_mem:
+       kfree(info);
+       return ret;
 }
 
 static int adm1275_remove(struct i2c_client *client)
index b5e892017e0c57497f7726d59812a56144f35ea8..00e98517f94c6d5bbb26e384cb08bb687b00cc16 100644 (file)
@@ -268,6 +268,7 @@ static struct device_attribute atk_name_attr =
 static void atk_init_attribute(struct device_attribute *attr, char *name,
                sysfs_show_func show)
 {
+       sysfs_attr_init(&attr->attr);
        attr->attr.name = name;
        attr->attr.mode = 0444;
        attr->show = show;
@@ -673,6 +674,7 @@ static int atk_debugfs_gitm_get(void *p, u64 *val)
        else
                err = -EIO;
 
+       ACPI_FREE(ret);
        return err;
 }
 
@@ -1188,19 +1190,15 @@ static int atk_create_files(struct atk_data *data)
        int err;
 
        list_for_each_entry(s, &data->sensor_list, list) {
-               sysfs_attr_init(&s->input_attr.attr);
                err = device_create_file(data->hwmon_dev, &s->input_attr);
                if (err)
                        return err;
-               sysfs_attr_init(&s->label_attr.attr);
                err = device_create_file(data->hwmon_dev, &s->label_attr);
                if (err)
                        return err;
-               sysfs_attr_init(&s->limit1_attr.attr);
                err = device_create_file(data->hwmon_dev, &s->limit1_attr);
                if (err)
                        return err;
-               sysfs_attr_init(&s->limit2_attr.attr);
                err = device_create_file(data->hwmon_dev, &s->limit2_attr);
                if (err)
                        return err;
index 85e937984ff7594a8a610db84c08871898c67f15..0070d5476dd0b5ee96bba29259c821d5419bf117 100644 (file)
@@ -97,9 +97,7 @@ struct platform_data {
 struct pdev_entry {
        struct list_head list;
        struct platform_device *pdev;
-       unsigned int cpu;
        u16 phys_proc_id;
-       u16 cpu_core_id;
 };
 
 static LIST_HEAD(pdev_list);
@@ -653,9 +651,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
        }
 
        pdev_entry->pdev = pdev;
-       pdev_entry->cpu = cpu;
        pdev_entry->phys_proc_id = TO_PHYS_ID(cpu);
-       pdev_entry->cpu_core_id = TO_CORE_ID(cpu);
 
        list_add_tail(&pdev_entry->list, &pdev_list);
        mutex_unlock(&pdev_list_mutex);
index e0ef32378ac6a63663e9cd84e1b7fbb9e13e7061..0064432f361f159041a37ec8ca2cedec9da8274c 100644 (file)
@@ -78,8 +78,9 @@ static u16 emc6w201_read16(struct i2c_client *client, u8 reg)
 
        lsb = i2c_smbus_read_byte_data(client, reg);
        msb = i2c_smbus_read_byte_data(client, reg + 1);
-       if (lsb < 0 || msb < 0) {
-               dev_err(&client->dev, "16-bit read failed at 0x%02x\n", reg);
+       if (unlikely(lsb < 0 || msb < 0)) {
+               dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+                       16, "read", reg);
                return 0xFFFF;  /* Arbitrary value */
        }
 
@@ -95,10 +96,39 @@ static int emc6w201_write16(struct i2c_client *client, u8 reg, u16 val)
        int err;
 
        err = i2c_smbus_write_byte_data(client, reg, val & 0xff);
-       if (!err)
+       if (likely(!err))
                err = i2c_smbus_write_byte_data(client, reg + 1, val >> 8);
-       if (err < 0)
-               dev_err(&client->dev, "16-bit write failed at 0x%02x\n", reg);
+       if (unlikely(err < 0))
+               dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+                       16, "write", reg);
+
+       return err;
+}
+
+/* Read 8-bit value from register */
+static u8 emc6w201_read8(struct i2c_client *client, u8 reg)
+{
+       int val;
+
+       val = i2c_smbus_read_byte_data(client, reg);
+       if (unlikely(val < 0)) {
+               dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+                       8, "read", reg);
+               return 0x00;    /* Arbitrary value */
+       }
+
+       return val;
+}
+
+/* Write 8-bit value to register */
+static int emc6w201_write8(struct i2c_client *client, u8 reg, u8 val)
+{
+       int err;
+
+       err = i2c_smbus_write_byte_data(client, reg, val);
+       if (unlikely(err < 0))
+               dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+                       8, "write", reg);
 
        return err;
 }
@@ -114,25 +144,25 @@ static struct emc6w201_data *emc6w201_update_device(struct device *dev)
        if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
                for (nr = 0; nr < 6; nr++) {
                        data->in[input][nr] =
-                               i2c_smbus_read_byte_data(client,
+                               emc6w201_read8(client,
                                                EMC6W201_REG_IN(nr));
                        data->in[min][nr] =
-                               i2c_smbus_read_byte_data(client,
+                               emc6w201_read8(client,
                                                EMC6W201_REG_IN_LOW(nr));
                        data->in[max][nr] =
-                               i2c_smbus_read_byte_data(client,
+                               emc6w201_read8(client,
                                                EMC6W201_REG_IN_HIGH(nr));
                }
 
                for (nr = 0; nr < 6; nr++) {
                        data->temp[input][nr] =
-                               i2c_smbus_read_byte_data(client,
+                               emc6w201_read8(client,
                                                EMC6W201_REG_TEMP(nr));
                        data->temp[min][nr] =
-                               i2c_smbus_read_byte_data(client,
+                               emc6w201_read8(client,
                                                EMC6W201_REG_TEMP_LOW(nr));
                        data->temp[max][nr] =
-                               i2c_smbus_read_byte_data(client,
+                               emc6w201_read8(client,
                                                EMC6W201_REG_TEMP_HIGH(nr));
                }
 
@@ -192,7 +222,7 @@ static ssize_t set_in(struct device *dev, struct device_attribute *devattr,
 
        mutex_lock(&data->update_lock);
        data->in[sf][nr] = SENSORS_LIMIT(val, 0, 255);
-       err = i2c_smbus_write_byte_data(client, reg, data->in[sf][nr]);
+       err = emc6w201_write8(client, reg, data->in[sf][nr]);
        mutex_unlock(&data->update_lock);
 
        return err < 0 ? err : count;
@@ -229,7 +259,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
 
        mutex_lock(&data->update_lock);
        data->temp[sf][nr] = SENSORS_LIMIT(val, -127, 128);
-       err = i2c_smbus_write_byte_data(client, reg, data->temp[sf][nr]);
+       err = emc6w201_write8(client, reg, data->temp[sf][nr]);
        mutex_unlock(&data->update_lock);
 
        return err < 0 ? err : count;
@@ -444,7 +474,7 @@ static int emc6w201_detect(struct i2c_client *client,
 
        /* Check configuration */
        config = i2c_smbus_read_byte_data(client, EMC6W201_REG_CONFIG);
-       if ((config & 0xF4) != 0x04)
+       if (config < 0 || (config & 0xF4) != 0x04)
                return -ENODEV;
        if (!(config & 0x01)) {
                dev_err(&client->dev, "Monitoring not enabled\n");
index a4a94a096c90e993a3019e75de2c285939748ab9..2d96ed2bf8edadb8647afe035ba2a531c53edace 100644 (file)
@@ -52,6 +52,7 @@
 #define SIO_F71858_ID          0x0507  /* Chipset ID */
 #define SIO_F71862_ID          0x0601  /* Chipset ID */
 #define SIO_F71869_ID          0x0814  /* Chipset ID */
+#define SIO_F71869A_ID         0x1007  /* Chipset ID */
 #define SIO_F71882_ID          0x0541  /* Chipset ID */
 #define SIO_F71889_ID          0x0723  /* Chipset ID */
 #define SIO_F71889E_ID         0x0909  /* Chipset ID */
@@ -108,8 +109,8 @@ static unsigned short force_id;
 module_param(force_id, ushort, 0);
 MODULE_PARM_DESC(force_id, "Override the detected device ID");
 
-enum chips { f71808e, f71808a, f71858fg, f71862fg, f71869, f71882fg, f71889fg,
-            f71889ed, f71889a, f8000, f81865f };
+enum chips { f71808e, f71808a, f71858fg, f71862fg, f71869, f71869a, f71882fg,
+            f71889fg, f71889ed, f71889a, f8000, f81865f };
 
 static const char *f71882fg_names[] = {
        "f71808e",
@@ -117,6 +118,7 @@ static const char *f71882fg_names[] = {
        "f71858fg",
        "f71862fg",
        "f71869", /* Both f71869f and f71869e, reg. compatible and same id */
+       "f71869a",
        "f71882fg",
        "f71889fg", /* f81801u too, same id */
        "f71889ed",
@@ -131,6 +133,7 @@ static const char f71882fg_has_in[][F71882FG_MAX_INS] = {
        [f71858fg]      = { 1, 1, 1, 0, 0, 0, 0, 0, 0 },
        [f71862fg]      = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
        [f71869]        = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
+       [f71869a]       = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
        [f71882fg]      = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
        [f71889fg]      = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
        [f71889ed]      = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
@@ -145,6 +148,7 @@ static const char f71882fg_has_in1_alarm[] = {
        [f71858fg]      = 0,
        [f71862fg]      = 0,
        [f71869]        = 0,
+       [f71869a]       = 0,
        [f71882fg]      = 1,
        [f71889fg]      = 1,
        [f71889ed]      = 1,
@@ -159,6 +163,7 @@ static const char f71882fg_fan_has_beep[] = {
        [f71858fg]      = 0,
        [f71862fg]      = 1,
        [f71869]        = 1,
+       [f71869a]       = 1,
        [f71882fg]      = 1,
        [f71889fg]      = 1,
        [f71889ed]      = 1,
@@ -173,6 +178,7 @@ static const char f71882fg_nr_fans[] = {
        [f71858fg]      = 3,
        [f71862fg]      = 3,
        [f71869]        = 3,
+       [f71869a]       = 3,
        [f71882fg]      = 4,
        [f71889fg]      = 3,
        [f71889ed]      = 3,
@@ -187,6 +193,7 @@ static const char f71882fg_temp_has_beep[] = {
        [f71858fg]      = 0,
        [f71862fg]      = 1,
        [f71869]        = 1,
+       [f71869a]       = 1,
        [f71882fg]      = 1,
        [f71889fg]      = 1,
        [f71889ed]      = 1,
@@ -201,6 +208,7 @@ static const char f71882fg_nr_temps[] = {
        [f71858fg]      = 3,
        [f71862fg]      = 3,
        [f71869]        = 3,
+       [f71869a]       = 3,
        [f71882fg]      = 3,
        [f71889fg]      = 3,
        [f71889ed]      = 3,
@@ -2243,6 +2251,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
                case f71808e:
                case f71808a:
                case f71869:
+               case f71869a:
                        /* These always have signed auto point temps */
                        data->auto_point_temp_signed = 1;
                        /* Fall through to select correct fan/pwm reg bank! */
@@ -2305,6 +2314,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
                case f71808e:
                case f71808a:
                case f71869:
+               case f71869a:
                case f71889fg:
                case f71889ed:
                case f71889a:
@@ -2528,6 +2538,9 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
        case SIO_F71869_ID:
                sio_data->type = f71869;
                break;
+       case SIO_F71869A_ID:
+               sio_data->type = f71869a;
+               break;
        case SIO_F71882_ID:
                sio_data->type = f71882fg;
                break;
@@ -2662,7 +2675,7 @@ static void __exit f71882fg_exit(void)
 }
 
 MODULE_DESCRIPTION("F71882FG Hardware Monitoring Driver");
-MODULE_AUTHOR("Hans Edgington, Hans de Goede (hdegoede@redhat.com)");
+MODULE_AUTHOR("Hans Edgington, Hans de Goede <hdegoede@redhat.com>");
 MODULE_LICENSE("GPL");
 
 module_init(f71882fg_init);
index 2582bfef6ccb8a1accc4bad5a660dd621eba7fdb..c8195a077da364a14e0146bf24a4691bbac78bb5 100644 (file)
@@ -202,7 +202,7 @@ static struct vrm_model vrm_models[] = {
 
        {X86_VENDOR_CENTAUR, 0x6, 0x7, ANY, 85},        /* Eden ESP/Ezra */
        {X86_VENDOR_CENTAUR, 0x6, 0x8, 0x7, 85},        /* Ezra T */
-       {X86_VENDOR_CENTAUR, 0x6, 0x9, 0x7, 85},        /* Nemiah */
+       {X86_VENDOR_CENTAUR, 0x6, 0x9, 0x7, 85},        /* Nehemiah */
        {X86_VENDOR_CENTAUR, 0x6, 0x9, ANY, 17},        /* C3-M, Eden-N */
        {X86_VENDOR_CENTAUR, 0x6, 0xA, 0x7, 0},         /* No information */
        {X86_VENDOR_CENTAUR, 0x6, 0xA, ANY, 13},        /* C7, Esther */
index 537409d07ee730b55e358fcb1e9e6d515d27e1e8..1a409c5bc9bce687922ce5389150599c6e928f39 100644 (file)
@@ -947,6 +947,7 @@ static int aem_register_sensors(struct aem_data *data,
 
        /* Set up read-only sensors */
        while (ro->label) {
+               sysfs_attr_init(&sensors->dev_attr.attr);
                sensors->dev_attr.attr.name = ro->label;
                sensors->dev_attr.attr.mode = S_IRUGO;
                sensors->dev_attr.show = ro->show;
@@ -963,6 +964,7 @@ static int aem_register_sensors(struct aem_data *data,
 
        /* Set up read-write sensors */
        while (rw->label) {
+               sysfs_attr_init(&sensors->dev_attr.attr);
                sensors->dev_attr.attr.name = rw->label;
                sensors->dev_attr.attr.mode = S_IRUGO | S_IWUSR;
                sensors->dev_attr.show = rw->show;
index 06d4eafcf76b231fdc74e0418341e496387b78d9..41dbf8161ed7b4fb1bc29811c61f006152c9a41c 100644 (file)
@@ -358,6 +358,7 @@ static int create_sensor(struct ibmpex_bmc_data *data, int type,
        else if (type == POWER_SENSOR)
                sprintf(n, power_sensor_name_templates[func], "power", counter);
 
+       sysfs_attr_init(&data->sensors[sensor].attr[func].dev_attr.attr);
        data->sensors[sensor].attr[func].dev_attr.attr.name = n;
        data->sensors[sensor].attr[func].dev_attr.attr.mode = S_IRUGO;
        data->sensors[sensor].attr[func].dev_attr.show = ibmpex_show_sensor;
index bb6405b92007b7b9a9ba2c95038e49f01211c7a0..5f52477504305e9f679e8d74e6cd4a402808c821 100644 (file)
@@ -1538,7 +1538,7 @@ static struct attribute *it87_attributes_label[] = {
 };
 
 static const struct attribute_group it87_group_label = {
-       .attrs = it87_attributes_vid,
+       .attrs = it87_attributes_label,
 };
 
 /* SuperIO detection - will change isa_address if a chip is found */
index 1a6dfb6df1e7ff121ce4903ab80f1b2e57ba2ef1..d3b464b74ced3b743969e2f0377523b0883243f2 100644 (file)
@@ -98,11 +98,16 @@ struct lm95241_data {
 };
 
 /* Conversions */
-static int TempFromReg(u8 val_h, u8 val_l)
+static int temp_from_reg_signed(u8 val_h, u8 val_l)
 {
-       if (val_h & 0x80)
-               return val_h - 0x100;
-       return val_h * 1000 + val_l * 1000 / 256;
+       s16 val_hl = (val_h << 8) | val_l;
+       return val_hl * 1000 / 256;
+}
+
+static int temp_from_reg_unsigned(u8 val_h, u8 val_l)
+{
+       u16 val_hl = (val_h << 8) | val_l;
+       return val_hl * 1000 / 256;
 }
 
 static struct lm95241_data *lm95241_update_device(struct device *dev)
@@ -135,10 +140,13 @@ static ssize_t show_input(struct device *dev, struct device_attribute *attr,
                          char *buf)
 {
        struct lm95241_data *data = lm95241_update_device(dev);
+       int index = to_sensor_dev_attr(attr)->index;
 
        return snprintf(buf, PAGE_SIZE - 1, "%d\n",
-               TempFromReg(data->temp[to_sensor_dev_attr(attr)->index],
-                           data->temp[to_sensor_dev_attr(attr)->index + 1]));
+                       index == 0 || (data->config & (1 << (index / 2))) ?
+               temp_from_reg_signed(data->temp[index], data->temp[index + 1]) :
+               temp_from_reg_unsigned(data->temp[index],
+                                      data->temp[index + 1]));
 }
 
 static ssize_t show_type(struct device *dev, struct device_attribute *attr,
@@ -339,7 +347,7 @@ static int lm95241_detect(struct i2c_client *new_client,
        if ((i2c_smbus_read_byte_data(new_client, LM95241_REG_R_MAN_ID)
             == MANUFACTURER_ID)
            && (i2c_smbus_read_byte_data(new_client, LM95241_REG_R_CHIP_ID)
-               >= DEFAULT_REVISION)) {
+               == DEFAULT_REVISION)) {
                name = DEVNAME;
        } else {
                dev_dbg(&adapter->dev, "LM95241 detection failed at 0x%02x\n",
index 12a54aa297760b1c6913a3fe772ab3dc7cb0c3ee..14335bbc9bdce30512422770f8772bb86827bf5e 100644 (file)
@@ -40,6 +40,8 @@ struct max1111_data {
        struct spi_transfer     xfer[2];
        uint8_t *tx_buf;
        uint8_t *rx_buf;
+       struct mutex            drvdata_lock;
+       /* protect msg, xfer and buffers from multiple access */
 };
 
 static int max1111_read(struct device *dev, int channel)
@@ -48,6 +50,9 @@ static int max1111_read(struct device *dev, int channel)
        uint8_t v1, v2;
        int err;
 
+       /* writing to drvdata struct is not thread safe, wait on mutex */
+       mutex_lock(&data->drvdata_lock);
+
        data->tx_buf[0] = (channel << MAX1111_CTRL_SEL_SH) |
                MAX1111_CTRL_PD0 | MAX1111_CTRL_PD1 |
                MAX1111_CTRL_SGL | MAX1111_CTRL_UNI | MAX1111_CTRL_STR;
@@ -55,12 +60,15 @@ static int max1111_read(struct device *dev, int channel)
        err = spi_sync(data->spi, &data->msg);
        if (err < 0) {
                dev_err(dev, "spi_sync failed with %d\n", err);
+               mutex_unlock(&data->drvdata_lock);
                return err;
        }
 
        v1 = data->rx_buf[0];
        v2 = data->rx_buf[1];
 
+       mutex_unlock(&data->drvdata_lock);
+
        if ((v1 & 0xc0) || (v2 & 0x3f))
                return -EINVAL;
 
@@ -176,6 +184,8 @@ static int __devinit max1111_probe(struct spi_device *spi)
        if (err)
                goto err_free_data;
 
+       mutex_init(&data->drvdata_lock);
+
        data->spi = spi;
        spi_set_drvdata(spi, data);
 
@@ -213,6 +223,7 @@ static int __devexit max1111_remove(struct spi_device *spi)
 
        hwmon_device_unregister(data->hwmon_dev);
        sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
+       mutex_destroy(&data->drvdata_lock);
        kfree(data->rx_buf);
        kfree(data->tx_buf);
        kfree(data);
index 98e2e28899e2cd24f07d2eac0095479745d23a1d..9b1f0c37ef77bb22d0c14bcf43e891b7e7a6eb4f 100644 (file)
@@ -47,22 +47,29 @@ static void pmbus_find_sensor_groups(struct i2c_client *client,
        if (info->func[0]
            && pmbus_check_byte_register(client, 0, PMBUS_STATUS_INPUT))
                info->func[0] |= PMBUS_HAVE_STATUS_INPUT;
-       if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_1)) {
+       if (pmbus_check_byte_register(client, 0, PMBUS_FAN_CONFIG_12) &&
+           pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_1)) {
                info->func[0] |= PMBUS_HAVE_FAN12;
                if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_12))
                        info->func[0] |= PMBUS_HAVE_STATUS_FAN12;
        }
-       if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) {
+       if (pmbus_check_byte_register(client, 0, PMBUS_FAN_CONFIG_34) &&
+           pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) {
                info->func[0] |= PMBUS_HAVE_FAN34;
                if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_34))
                        info->func[0] |= PMBUS_HAVE_STATUS_FAN34;
        }
-       if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_1)) {
+       if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_1))
                info->func[0] |= PMBUS_HAVE_TEMP;
-               if (pmbus_check_byte_register(client, 0,
-                                             PMBUS_STATUS_TEMPERATURE))
+       if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_2))
+               info->func[0] |= PMBUS_HAVE_TEMP2;
+       if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_3))
+               info->func[0] |= PMBUS_HAVE_TEMP3;
+       if (info->func[0] & (PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2
+                            | PMBUS_HAVE_TEMP3)
+           && pmbus_check_byte_register(client, 0,
+                                        PMBUS_STATUS_TEMPERATURE))
                        info->func[0] |= PMBUS_HAVE_STATUS_TEMP;
-       }
 
        /* Sensors detected on all pages */
        for (page = 0; page < info->pages; page++) {
index 354770ed3186cdfa30ae07fcd829f5b46906f7e4..8e31a8e2c746e8848c7268ab5c8091b966b648b4 100644 (file)
@@ -362,8 +362,8 @@ static struct pmbus_data *pmbus_update_device(struct device *dev)
  * Convert linear sensor values to milli- or micro-units
  * depending on sensor type.
  */
-static int pmbus_reg2data_linear(struct pmbus_data *data,
-                                struct pmbus_sensor *sensor)
+static long pmbus_reg2data_linear(struct pmbus_data *data,
+                                 struct pmbus_sensor *sensor)
 {
        s16 exponent;
        s32 mantissa;
@@ -397,15 +397,15 @@ static int pmbus_reg2data_linear(struct pmbus_data *data,
        else
                val >>= -exponent;
 
-       return (int)val;
+       return val;
 }
 
 /*
  * Convert direct sensor values to milli- or micro-units
  * depending on sensor type.
  */
-static int pmbus_reg2data_direct(struct pmbus_data *data,
-                                struct pmbus_sensor *sensor)
+static long pmbus_reg2data_direct(struct pmbus_data *data,
+                                 struct pmbus_sensor *sensor)
 {
        long val = (s16) sensor->data;
        long m, b, R;
@@ -440,12 +440,12 @@ static int pmbus_reg2data_direct(struct pmbus_data *data,
                R++;
        }
 
-       return (int)((val - b) / m);
+       return (val - b) / m;
 }
 
-static int pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
+static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
 {
-       int val;
+       long val;
 
        if (data->info->direct[sensor->class])
                val = pmbus_reg2data_direct(data, sensor);
@@ -619,7 +619,7 @@ static int pmbus_get_boolean(struct pmbus_data *data, int index, int *val)
        if (!s1 && !s2)
                *val = !!regval;
        else {
-               int v1, v2;
+               long v1, v2;
                struct pmbus_sensor *sensor1, *sensor2;
 
                sensor1 = &data->sensors[s1];
@@ -661,7 +661,7 @@ static ssize_t pmbus_show_sensor(struct device *dev,
        if (sensor->data < 0)
                return sensor->data;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", pmbus_reg2data(data, sensor));
+       return snprintf(buf, PAGE_SIZE, "%ld\n", pmbus_reg2data(data, sensor));
 }
 
 static ssize_t pmbus_set_sensor(struct device *dev,
@@ -1430,14 +1430,9 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
        i2c_set_clientdata(client, data);
        mutex_init(&data->update_lock);
 
-       /*
-        * Bail out if status register or PMBus revision register
-        * does not exist.
-        */
-       if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0
-           || i2c_smbus_read_byte_data(client, PMBUS_REVISION) < 0) {
-               dev_err(&client->dev,
-                       "Status or revision register not found\n");
+       /* Bail out if PMBus status register does not exist. */
+       if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0) {
+               dev_err(&client->dev, "PMBus status register not found\n");
                ret = -ENODEV;
                goto out_data;
        }
index 92b42db43bcfd9e5706c41bfb9049a26fd967b93..b39f52e2752a7bca54a1bb25c7083382b01aa52e 100644 (file)
@@ -232,6 +232,7 @@ static int s3c_hwmon_create_attr(struct device *dev,
 
        attr = &attrs->in;
        attr->index = channel;
+       sysfs_attr_init(&attr->dev_attr.attr);
        attr->dev_attr.attr.name  = attrs->in_name;
        attr->dev_attr.attr.mode  = S_IRUGO;
        attr->dev_attr.show = s3c_hwmon_ch_show;
@@ -249,6 +250,7 @@ static int s3c_hwmon_create_attr(struct device *dev,
 
                attr = &attrs->label;
                attr->index = channel;
+               sysfs_attr_init(&attr->dev_attr.attr);
                attr->dev_attr.attr.name  = attrs->label_name;
                attr->dev_attr.attr.mode  = S_IRUGO;
                attr->dev_attr.show = s3c_hwmon_label_show;
index 020c87273ea11d6c60383889a8a9cccae7dfdf02..3494a4cce414304784fb95dadec1379c010286c4 100644 (file)
@@ -887,7 +887,7 @@ static void __exit sch5627_exit(void)
 }
 
 MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver");
-MODULE_AUTHOR("Hans de Goede (hdegoede@redhat.com)");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
 MODULE_LICENSE("GPL");
 
 module_init(sch5627_init);
index 52b545a795f2ae6e533bf0920385623e8e6e1b04..cbc98aea5b098872cee1c181f24c25c676f62f2e 100644 (file)
@@ -193,7 +193,13 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface,
                return;
        }
        if (twi_int_status & MCOMP) {
-               if (iface->cur_mode == TWI_I2C_MODE_COMBINED) {
+               if ((read_MASTER_CTL(iface) & MEN) == 0 &&
+                       (iface->cur_mode == TWI_I2C_MODE_REPEAT ||
+                       iface->cur_mode == TWI_I2C_MODE_COMBINED)) {
+                       iface->result = -1;
+                       write_INT_MASK(iface, 0);
+                       write_MASTER_CTL(iface, 0);
+               } else if (iface->cur_mode == TWI_I2C_MODE_COMBINED) {
                        if (iface->readNum == 0) {
                                /* set the read number to 1 and ask for manual
                                 * stop in block combine mode
index 6c00c107ebf30af4360bd3fef78fac03b62a4683..f84a63c6dd9708e329d1571feefde07d56f0de97 100644 (file)
@@ -248,12 +248,12 @@ static inline int is_msgend(struct s3c24xx_i2c *i2c)
        return i2c->msg_ptr >= i2c->msg->len;
 }
 
-/* i2s_s3c_irq_nextbyte
+/* i2c_s3c_irq_nextbyte
  *
  * process an interrupt and work out what to do
  */
 
-static int i2s_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
+static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
 {
        unsigned long tmp;
        unsigned char byte;
@@ -264,7 +264,6 @@ static int i2s_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
        case STATE_IDLE:
                dev_err(i2c->dev, "%s: called in STATE_IDLE\n", __func__);
                goto out;
-               break;
 
        case STATE_STOP:
                dev_err(i2c->dev, "%s: called in STATE_STOP\n", __func__);
@@ -444,7 +443,7 @@ static irqreturn_t s3c24xx_i2c_irq(int irqno, void *dev_id)
        /* pretty much this leaves us with the fact that we've
         * transmitted or received whatever byte we last sent */
 
-       i2s_s3c_irq_nextbyte(i2c, status);
+       i2c_s3c_irq_nextbyte(i2c, status);
 
  out:
        return IRQ_HANDLED;
index dd39c1eb03ed2556537ca38315f9e127dd3cf5bb..26c352a09298b929207b7fed35492a52321033c3 100644 (file)
@@ -234,7 +234,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
 
        if (taos->state != TAOS_STATE_IDLE) {
                err = -ENODEV;
-               dev_dbg(&serio->dev, "TAOS EVM reset failed (state=%d, "
+               dev_err(&serio->dev, "TAOS EVM reset failed (state=%d, "
                        "pos=%d)\n", taos->state, taos->pos);
                goto exit_close;
        }
@@ -255,7 +255,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
                                         msecs_to_jiffies(250));
        if (taos->state != TAOS_STATE_IDLE) {
                err = -ENODEV;
-               dev_err(&adapter->dev, "Echo off failed "
+               dev_err(&serio->dev, "TAOS EVM echo off failed "
                        "(state=%d)\n", taos->state);
                goto exit_close;
        }
@@ -263,7 +263,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
        err = i2c_add_adapter(adapter);
        if (err)
                goto exit_close;
-       dev_dbg(&serio->dev, "Connected to TAOS EVM\n");
+       dev_info(&serio->dev, "Connected to TAOS EVM\n");
 
        taos->client = taos_instantiate_device(adapter);
        return 0;
@@ -288,7 +288,7 @@ static void taos_disconnect(struct serio *serio)
        serio_set_drvdata(serio, NULL);
        kfree(taos);
 
-       dev_dbg(&serio->dev, "Disconnected from TAOS EVM\n");
+       dev_info(&serio->dev, "Disconnected from TAOS EVM\n");
 }
 
 static struct serio_device_id taos_serio_ids[] = {
index 4d9319665e328176993886494516e208181c38aa..fb3b4f8f8152f41879ab736ee2c758332f1facb8 100644 (file)
 #define I2C_CNFG_NEW_MASTER_FSM                        (1<<11)
 #define I2C_STATUS                             0x01C
 #define I2C_SL_CNFG                            0x020
+#define I2C_SL_CNFG_NACK                       (1<<1)
 #define I2C_SL_CNFG_NEWSL                      (1<<2)
 #define I2C_SL_ADDR1                           0x02c
+#define I2C_SL_ADDR2                           0x030
 #define I2C_TX_FIFO                            0x050
 #define I2C_RX_FIFO                            0x054
 #define I2C_PACKET_TRANSFER_STATUS             0x058
@@ -337,7 +339,11 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
 
        if (!i2c_dev->is_dvc) {
                u32 sl_cfg = i2c_readl(i2c_dev, I2C_SL_CNFG);
-               i2c_writel(i2c_dev, sl_cfg | I2C_SL_CNFG_NEWSL, I2C_SL_CNFG);
+               sl_cfg |= I2C_SL_CNFG_NACK | I2C_SL_CNFG_NEWSL;
+               i2c_writel(i2c_dev, sl_cfg, I2C_SL_CNFG);
+               i2c_writel(i2c_dev, 0xfc, I2C_SL_ADDR1);
+               i2c_writel(i2c_dev, 0x00, I2C_SL_ADDR2);
+
        }
 
        val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT |
index 54e1ce73534b8dfb6aa962fc1d06c9df2be530c1..6f89536646365db2cf316d166c2df23959d2988e 100644 (file)
@@ -201,10 +201,11 @@ static int pca954x_probe(struct i2c_client *client,
 
        i2c_set_clientdata(client, data);
 
-       /* Read the mux register at addr to verify
-        * that the mux is in fact present.
+       /* Write the mux register at addr to verify
+        * that the mux is in fact present. This also
+        * initializes the mux to disconnected state.
         */
-       if (i2c_smbus_read_byte(client) < 0) {
+       if (i2c_smbus_write_byte(client, 0) < 0) {
                dev_warn(&client->dev, "probe failed\n");
                goto exit_free;
        }
index f62f52fb9ece776000fb98dc830d52bf8bfb4f57..fc0f2bd9ca82518b50653e794d68ac35237d40e7 100644 (file)
@@ -3641,7 +3641,8 @@ static struct kobj_type cm_port_obj_type = {
 
 static char *cm_devnode(struct device *dev, mode_t *mode)
 {
-       *mode = 0666;
+       if (mode)
+               *mode = 0666;
        return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
 }
 
index e49a85f8a44debe499e0bcd2c4d489174b5aab3d..56898b6578a49e4e4b3d006763b6f6a861a78d93 100644 (file)
@@ -826,7 +826,8 @@ static void ib_uverbs_remove_one(struct ib_device *device)
 
 static char *uverbs_devnode(struct device *dev, mode_t *mode)
 {
-       *mode = 0666;
+       if (mode)
+               *mode = 0666;
        return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
 }
 
index f660cd04ec2f31774ed678b16dbb85d3a8ed6b39..31fb44085c9b4dd7e3cdd4f97f59727f4a82d107 100644 (file)
@@ -1463,9 +1463,9 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
        struct c4iw_qp_attributes attrs;
        int disconnect = 1;
        int release = 0;
-       int abort = 0;
        struct tid_info *t = dev->rdev.lldi.tids;
        unsigned int tid = GET_TID(hdr);
+       int ret;
 
        ep = lookup_tid(t, tid);
        PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
@@ -1501,10 +1501,12 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
                start_ep_timer(ep);
                __state_set(&ep->com, CLOSING);
                attrs.next_state = C4IW_QP_STATE_CLOSING;
-               abort = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+               ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
                                       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
-               peer_close_upcall(ep);
-               disconnect = 1;
+               if (ret != -ECONNRESET) {
+                       peer_close_upcall(ep);
+                       disconnect = 1;
+               }
                break;
        case ABORTING:
                disconnect = 0;
@@ -2109,15 +2111,16 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
                break;
        }
 
-       mutex_unlock(&ep->com.mutex);
        if (close) {
-               if (abrupt)
-                       ret = abort_connection(ep, NULL, gfp);
-               else
+               if (abrupt) {
+                       close_complete_upcall(ep);
+                       ret = send_abort(ep, NULL, gfp);
+               } else
                        ret = send_halfclose(ep, gfp);
                if (ret)
                        fatal = 1;
        }
+       mutex_unlock(&ep->com.mutex);
        if (fatal)
                release_ep_resources(ep);
        return ret;
@@ -2301,6 +2304,31 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
        return 0;
 }
 
+static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+       struct cpl_abort_req_rss *req = cplhdr(skb);
+       struct c4iw_ep *ep;
+       struct tid_info *t = dev->rdev.lldi.tids;
+       unsigned int tid = GET_TID(req);
+
+       ep = lookup_tid(t, tid);
+       if (is_neg_adv_abort(req->status)) {
+               PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
+                    ep->hwtid);
+               kfree_skb(skb);
+               return 0;
+       }
+       PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
+            ep->com.state);
+
+       /*
+        * Wake up any threads in rdma_init() or rdma_fini().
+        */
+       c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+       sched(dev, skb);
+       return 0;
+}
+
 /*
  * Most upcalls from the T4 Core go to sched() to
  * schedule the processing on a work queue.
@@ -2317,7 +2345,7 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
        [CPL_PASS_ESTABLISH] = sched,
        [CPL_PEER_CLOSE] = sched,
        [CPL_CLOSE_CON_RPL] = sched,
-       [CPL_ABORT_REQ_RSS] = sched,
+       [CPL_ABORT_REQ_RSS] = peer_abort_intr,
        [CPL_RDMA_TERMINATE] = sched,
        [CPL_FW4_ACK] = sched,
        [CPL_SET_TCB_RPL] = set_tcb_rpl,
index 8d8f8add6fcd93e71fd5b8931616554099bd7b00..1720dc790d13d1367594478a95ad1c9148bb91b1 100644 (file)
@@ -801,6 +801,10 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
        if (ucontext) {
                memsize = roundup(memsize, PAGE_SIZE);
                hwentries = memsize / sizeof *chp->cq.queue;
+               while (hwentries > T4_MAX_IQ_SIZE) {
+                       memsize -= PAGE_SIZE;
+                       hwentries = memsize / sizeof *chp->cq.queue;
+               }
        }
        chp->cq.size = hwentries;
        chp->cq.memsize = memsize;
index 273ffe49525a5d3d1642a292730af15a388ad9da..0347eed4a16778f173415f47c49c45d9f6d99acc 100644 (file)
@@ -625,7 +625,7 @@ pbl_done:
        mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
        mhp->attr.va_fbo = virt;
        mhp->attr.page_size = shift - 12;
-       mhp->attr.len = (u32) length;
+       mhp->attr.len = length;
 
        err = register_mem(rhp, php, mhp, shift);
        if (err)
index 3b773b05a8989a0a7b81bda16f64c0f747577344..a41578e48c7b0bb366795bfd3da1dd961f3d42d1 100644 (file)
@@ -1207,11 +1207,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                                c4iw_get_ep(&qhp->ep->com);
                        }
                        ret = rdma_fini(rhp, qhp, ep);
-                       if (ret) {
-                               if (internal)
-                                       c4iw_get_ep(&qhp->ep->com);
+                       if (ret)
                                goto err;
-                       }
                        break;
                case C4IW_QP_STATE_TERMINATE:
                        set_state(qhp, C4IW_QP_STATE_TERMINATE);
index 9f53e68a096a329f499cbb1c9d63b6800b5add55..8ec5237031a08f3aa493ccf84715a2d5652f0e39 100644 (file)
@@ -469,6 +469,8 @@ static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
 #define IB_7322_LT_STATE_RECOVERIDLE     0x0f
 #define IB_7322_LT_STATE_CFGENH          0x10
 #define IB_7322_LT_STATE_CFGTEST         0x11
+#define IB_7322_LT_STATE_CFGWAITRMTTEST  0x12
+#define IB_7322_LT_STATE_CFGWAITENH      0x13
 
 /* link state machine states from IBC */
 #define IB_7322_L_STATE_DOWN             0x0
@@ -498,8 +500,10 @@ static const u8 qib_7322_physportstate[0x20] = {
                IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
        [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
        [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
-       [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
-       [0x13] = IB_PHYSPORTSTATE_CFG_WAIT_ENH,
+       [IB_7322_LT_STATE_CFGWAITRMTTEST] =
+               IB_PHYSPORTSTATE_CFG_TRAIN,
+       [IB_7322_LT_STATE_CFGWAITENH] =
+               IB_PHYSPORTSTATE_CFG_WAIT_ENH,
        [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
        [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
        [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
@@ -1692,7 +1696,9 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
                break;
        }
 
-       if (ibclt == IB_7322_LT_STATE_CFGTEST &&
+       if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
+             ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
+            ibclt == IB_7322_LT_STATE_LINKUP) &&
            (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
                force_h1(ppd);
                ppd->cpspec->qdr_reforce = 1;
@@ -7301,12 +7307,17 @@ static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
 static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
 {
        u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
-       printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n",
-               ppd->dd->unit, ppd->port, (enable ? "on" : "off"));
-       if (enable)
+       u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
+
+       if (enable && !state) {
+               printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS on\n",
+                       ppd->dd->unit, ppd->port);
                data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
-       else
+       } else if (!enable && state) {
+               printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS off\n",
+                       ppd->dd->unit, ppd->port);
                data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
+       }
        qib_write_kreg_port(ppd, krp_serdesctrl, data);
 }
 
index a693c56ec8a6bf97df295c23868e74272c21ef40..6ae57d23004a11310f6f1e77556a961f50584c7c 100644 (file)
@@ -96,8 +96,12 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
         * states, or if it transitions from any of the up (INIT or better)
         * states into any of the down states (except link recovery), then
         * call the chip-specific code to take appropriate actions.
+        *
+        * ppd->lflags could be 0 if this is the first time the interrupt
+        * handlers has been called but the link is already up.
         */
-       if (lstate >= IB_PORT_INIT && (ppd->lflags & QIBL_LINKDOWN) &&
+       if (lstate >= IB_PORT_INIT &&
+           (!ppd->lflags || (ppd->lflags & QIBL_LINKDOWN)) &&
            ltstate == IB_PHYSPORTSTATE_LINKUP) {
                /* transitioned to UP */
                if (dd->f_ib_updown(ppd, 1, ibcs))
index be0921ef6b52ace2a2e15a5d1437c6134799dbf0..4cf25347b01546b1578e0b9987d6717c38db0366 100644 (file)
@@ -111,7 +111,8 @@ static void evdev_event(struct input_handle *handle,
 
        rcu_read_unlock();
 
-       wake_up_interruptible(&evdev->wait);
+       if (type == EV_SYN && code == SYN_REPORT)
+               wake_up_interruptible(&evdev->wait);
 }
 
 static int evdev_fasync(int fd, struct file *file, int on)
index 75e11c7b70fda5fb8d9ecc66a33d3ed460649a01..da38d97a51b1c67782f3265a19bac3cdb5fd2686 100644 (file)
@@ -1756,7 +1756,7 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
        } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
                mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
                           dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
-               clamp(mt_slots, 2, 32);
+               mt_slots = clamp(mt_slots, 2, 32);
        } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
                mt_slots = 2;
        } else {
index f23a743817dbb1932445a4704046dd7e4ac58d37..33d0bdc837c099a486e2bffc95dbe0fb6a8ee7c4 100644 (file)
@@ -209,6 +209,7 @@ static void omap_kp_tasklet(unsigned long data)
 #endif
                }
        }
+       input_sync(omap_kp_data->input);
        memcpy(keypad_state, new_state, sizeof(keypad_state));
 
        if (key_down) {
index 40b02ae96f864b0ff2014300cf8b3b2d94afe1d0..6229c3e8e78b807c90ab1a8f549e8b250601679e 100644 (file)
@@ -520,7 +520,8 @@ static void pmic8xxx_kp_close(struct input_dev *dev)
  */
 static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev)
 {
-       const struct pm8xxx_keypad_platform_data *pdata = mfd_get_data(pdev);
+       const struct pm8xxx_keypad_platform_data *pdata =
+                                       dev_get_platdata(&pdev->dev);
        const struct matrix_keymap_data *keymap_data;
        struct pmic8xxx_kp *kp;
        int rc;
index 834cf98e7efb060fdcc163aa223312d8806d001a..6876700a4469d191446a20a9c81dc2606279b9d2 100644 (file)
@@ -32,7 +32,7 @@ static const struct {
        [SH_KEYSC_MODE_3] = { 2, 4, 7 },
        [SH_KEYSC_MODE_4] = { 3, 6, 6 },
        [SH_KEYSC_MODE_5] = { 4, 6, 7 },
-       [SH_KEYSC_MODE_6] = { 5, 7, 7 },
+       [SH_KEYSC_MODE_6] = { 5, 8, 8 },
 };
 
 struct sh_keysc_priv {
index 97e07e786e41023da526b3ec7a25e617379759c2..b3cfb9c71e664244f85bb77427e13ca01a932d20 100644 (file)
@@ -90,7 +90,8 @@ static int __devinit pmic8xxx_pwrkey_probe(struct platform_device *pdev)
        unsigned int delay;
        u8 pon_cntl;
        struct pmic8xxx_pwrkey *pwrkey;
-       const struct pm8xxx_pwrkey_platform_data *pdata = mfd_get_data(pdev);
+       const struct pm8xxx_pwrkey_platform_data *pdata =
+                                       dev_get_platdata(&pdev->dev);
 
        if (!pdata) {
                dev_err(&pdev->dev, "power key platform data not supplied\n");
index 257e033986e40f61ad9a6bcb957439bd5d3f744f..0110b5a3a1678a7a0843672b8505e6f42c664fc1 100644 (file)
@@ -187,7 +187,7 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
                if (size == 0)
                        size = xres ? : 1;
 
-               clamp(value, min, max);
+               value = clamp(value, min, max);
 
                mousedev->packet.x = ((value - min) * xres) / size;
                mousedev->packet.abs_event = 1;
@@ -201,7 +201,7 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
                if (size == 0)
                        size = yres ? : 1;
 
-               clamp(value, min, max);
+               value = clamp(value, min, max);
 
                mousedev->packet.y = yres - ((value - min) * yres) / size;
                mousedev->packet.abs_event = 1;
index 59de638225fe4b30b71bcddc1cdf4ff8b50a4fa5..e35058bcd7b98e0f1b690f2e359cec96df05140e 100644 (file)
@@ -156,8 +156,10 @@ static int if_open(struct tty_struct *tty, struct file *filp)
        if (!cs || !try_module_get(cs->driver->owner))
                return -ENODEV;
 
-       if (mutex_lock_interruptible(&cs->mutex))
+       if (mutex_lock_interruptible(&cs->mutex)) {
+               module_put(cs->driver->owner);
                return -ERESTARTSYS;
+       }
        tty->driver_data = cs;
 
        ++cs->open_count;
index c0cff64a1ae64240e76c06f2cba931a908435d8a..cc1dc4817facfcc52e08d0d9f8169dec64271aee 100644 (file)
@@ -593,7 +593,7 @@ static void lp5521_unregister_sysfs(struct i2c_client *client)
                                &lp5521_led_attribute_group);
 }
 
-static int __init lp5521_init_led(struct lp5521_led *led,
+static int __devinit lp5521_init_led(struct lp5521_led *led,
                                struct i2c_client *client,
                                int chan, struct lp5521_platform_data *pdata)
 {
@@ -637,7 +637,7 @@ static int __init lp5521_init_led(struct lp5521_led *led,
        return 0;
 }
 
-static int lp5521_probe(struct i2c_client *client,
+static int __devinit lp5521_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
        struct lp5521_chip              *chip;
index e19fed25f1376e2632995d71661df0e9d49dc091..5971e309b2342390a1988cd7f58d6e49d5a24301 100644 (file)
@@ -826,7 +826,7 @@ static int __init lp5523_init_engine(struct lp5523_engine *engine, int id)
        return 0;
 }
 
-static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
+static int __devinit lp5523_init_led(struct lp5523_led *led, struct device *dev,
                           int chan, struct lp5523_platform_data *pdata)
 {
        char name[32];
@@ -872,7 +872,7 @@ static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
 
 static struct i2c_driver lp5523_driver;
 
-static int lp5523_probe(struct i2c_client *client,
+static int __devinit lp5523_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
        struct lp5523_chip              *chip;
index d8d3a1e910a1bbb7f81e9c1d7c4cb58452415691..a2c874623e3521550ab6917be4b56eb3e42b4aaa 100644 (file)
@@ -88,7 +88,7 @@ static const struct pca9532_chip_info pca9532_chip_info_tbl[] = {
 
 static struct i2c_driver pca9532_driver = {
        .driver = {
-               .name = "pca953x",
+               .name = "leds-pca953x",
        },
        .probe = pca9532_probe,
        .remove = pca9532_remove,
index 4332fc2f25d4a2260b575f66229514bad10a2fa4..91e31e260b4afff9a51589351e08ec9c6f63da95 100644 (file)
@@ -7088,6 +7088,7 @@ static int remove_and_add_spares(mddev_t *mddev)
                list_for_each_entry(rdev, &mddev->disks, same_set) {
                        if (rdev->raid_disk >= 0 &&
                            !test_bit(In_sync, &rdev->flags) &&
+                           !test_bit(Faulty, &rdev->flags) &&
                            !test_bit(Blocked, &rdev->flags))
                                spares++;
                        if (rdev->raid_disk < 0
index 98278041d75f5b790affdc7a024180ca91c0854a..5b6b451d46940db41b8d2b3fd4473e7a1429383a 100644 (file)
@@ -1988,6 +1988,14 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
        if (dvbdev->users == -1 && fe->ops.ts_bus_ctrl) {
                if ((ret = fe->ops.ts_bus_ctrl(fe, 1)) < 0)
                        goto err0;
+
+               /* If we took control of the bus, we need to force
+                  reinitialization.  This is because many ts_bus_ctrl()
+                  functions strobe the RESET pin on the demod, and if the
+                  frontend thread already exists then the dvb_init() routine
+                  won't get called (which is what usually does initial
+                  register configuration). */
+               fepriv->reinitialise = 1;
        }
 
        if ((ret = dvb_generic_open (inode, file)) < 0)
index e4c97fd6f05a329db01408effae324c4f2ef3d42..52798a111e16cb5a55df761aa228e60ec3f9343f 100644 (file)
@@ -168,7 +168,7 @@ config RADIO_MAXIRADIO
 
 config RADIO_MIROPCM20
        tristate "miroSOUND PCM20 radio"
-       depends on ISA && VIDEO_V4L2 && SND
+       depends on ISA && ISA_DMA_API && VIDEO_V4L2 && SND
        select SND_ISA
        select SND_MIRO
        ---help---
@@ -201,7 +201,7 @@ config RADIO_SF16FMI
 
 config RADIO_SF16FMR2
        tristate "SF16FMR2 Radio"
-       depends on ISA && VIDEO_V4L2
+       depends on ISA && VIDEO_V4L2 && SND
        ---help---
          Choose Y here if you have one of these FM radio cards.
 
index deca2e06ff2203bba0caae2e5737499fd0f9cdc2..c9f4a8e65dc45daccc97c70bdaa99c8510fddae1 100644 (file)
@@ -1033,7 +1033,7 @@ static int si4713_write_econtrol_string(struct si4713_device *sdev,
                char ps_name[MAX_RDS_PS_NAME + 1];
 
                len = control->size - 1;
-               if (len > MAX_RDS_PS_NAME) {
+               if (len < 0 || len > MAX_RDS_PS_NAME) {
                        rval = -ERANGE;
                        goto exit;
                }
@@ -1057,7 +1057,7 @@ static int si4713_write_econtrol_string(struct si4713_device *sdev,
                char radio_text[MAX_RDS_RADIO_TEXT + 1];
 
                len = control->size - 1;
-               if (len > MAX_RDS_RADIO_TEXT) {
+               if (len < 0 || len > MAX_RDS_RADIO_TEXT) {
                        rval = -ERANGE;
                        goto exit;
                }
index 8fa539dde1b4be8547ecee3247add51bb2957a19..7f7079b12f2321ab2174a764dca2959e0c0a6952 100644 (file)
@@ -597,12 +597,17 @@ static void __devexit fintek_remove(struct pnp_dev *pdev)
 static int fintek_suspend(struct pnp_dev *pdev, pm_message_t state)
 {
        struct fintek_dev *fintek = pnp_get_drvdata(pdev);
+       unsigned long flags;
 
        fit_dbg("%s called", __func__);
 
+       spin_lock_irqsave(&fintek->fintek_lock, flags);
+
        /* disable all CIR interrupts */
        fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
 
+       spin_unlock_irqrestore(&fintek->fintek_lock, flags);
+
        fintek_config_mode_enable(fintek);
 
        /* disable cir logical dev */
index 3f3c70716268d2e40faf343a0d1320791a265924..6bc35eeb653bf0cbc3ff8a82766481e862d9b198 100644 (file)
@@ -307,6 +307,14 @@ static const struct {
        /* 0xffdc iMON MCE VFD */
        { 0x00010000ffffffeell, KEY_VOLUMEUP },
        { 0x01000000ffffffeell, KEY_VOLUMEDOWN },
+       { 0x00000001ffffffeell, KEY_MUTE },
+       { 0x0000000fffffffeell, KEY_MEDIA },
+       { 0x00000012ffffffeell, KEY_UP },
+       { 0x00000013ffffffeell, KEY_DOWN },
+       { 0x00000014ffffffeell, KEY_LEFT },
+       { 0x00000015ffffffeell, KEY_RIGHT },
+       { 0x00000016ffffffeell, KEY_ENTER },
+       { 0x00000017ffffffeell, KEY_ESC },
        /* iMON Knob values */
        { 0x000100ffffffffeell, KEY_VOLUMEUP },
        { 0x010000ffffffffeell, KEY_VOLUMEDOWN },
@@ -1582,16 +1590,16 @@ static void imon_incoming_packet(struct imon_context *ictx,
        /* Only panel type events left to process now */
        spin_lock_irqsave(&ictx->kc_lock, flags);
 
+       do_gettimeofday(&t);
        /* KEY_MUTE repeats from knob need to be suppressed */
        if (ictx->kc == KEY_MUTE && ictx->kc == ictx->last_keycode) {
-               do_gettimeofday(&t);
                msec = tv2int(&t, &prev_time);
-               prev_time = t;
                if (msec < ictx->idev->rep[REP_DELAY]) {
                        spin_unlock_irqrestore(&ictx->kc_lock, flags);
                        return;
                }
        }
+       prev_time = t;
        kc = ictx->kc;
 
        spin_unlock_irqrestore(&ictx->kc_lock, flags);
@@ -1603,7 +1611,9 @@ static void imon_incoming_packet(struct imon_context *ictx,
        input_report_key(ictx->idev, kc, 0);
        input_sync(ictx->idev);
 
+       spin_lock_irqsave(&ictx->kc_lock, flags);
        ictx->last_keycode = kc;
+       spin_unlock_irqrestore(&ictx->kc_lock, flags);
 
        return;
 
@@ -1740,6 +1750,8 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
                detected_display_type = IMON_DISPLAY_TYPE_VFD;
                break;
        /* iMON VFD, MCE IR */
+       case 0x46:
+       case 0x7e:
        case 0x9e:
                dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR");
                detected_display_type = IMON_DISPLAY_TYPE_VFD;
@@ -1755,6 +1767,9 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
                dev_info(ictx->dev, "Unknown 0xffdc device, "
                         "defaulting to VFD and iMON IR");
                detected_display_type = IMON_DISPLAY_TYPE_VFD;
+               /* We don't know which one it is, allow user to set the
+                * RC6 one from userspace if OTHER wasn't correct. */
+               allowed_protos |= RC_TYPE_RC6;
                break;
        }
 
index 11c19d8d0ee0a7edad7aeb3ea241130585a6d381..423ed45d6c55a4e3ccb43f3ae1284bcf55d952da 100644 (file)
@@ -114,18 +114,20 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
        s64                     delta; /* ns */
        DEFINE_IR_RAW_EVENT(ev);
        int                     rc = 0;
+       int                     delay;
 
        if (!dev->raw)
                return -EINVAL;
 
        now = ktime_get();
        delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
+       delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]);
 
        /* Check for a long duration since last event or if we're
         * being called for the first time, note that delta can't
         * possibly be negative.
         */
-       if (delta > IR_MAX_DURATION || !dev->raw->last_type)
+       if (delta > delay || !dev->raw->last_type)
                type |= IR_START_EVENT;
        else
                ev.duration = delta;
index e716b931cf7e69d52228dc258ae644bde14f09a3..ecd3d028076852b3ee8da36794bbac1c58a59157 100644 (file)
@@ -1347,6 +1347,7 @@ static const struct ite_dev_params ite_dev_descs[] = {
        {       /* 0: ITE8704 */
               .model = "ITE8704 CIR transceiver",
               .io_region_size = IT87_IOREG_LENGTH,
+              .io_rsrc_no = 0,
               .hw_tx_capable = true,
               .sample_period = (u32) (1000000000ULL / 115200),
               .tx_carrier_freq = 38000,
@@ -1371,6 +1372,7 @@ static const struct ite_dev_params ite_dev_descs[] = {
        {       /* 1: ITE8713 */
               .model = "ITE8713 CIR transceiver",
               .io_region_size = IT87_IOREG_LENGTH,
+              .io_rsrc_no = 0,
               .hw_tx_capable = true,
               .sample_period = (u32) (1000000000ULL / 115200),
               .tx_carrier_freq = 38000,
@@ -1395,6 +1397,7 @@ static const struct ite_dev_params ite_dev_descs[] = {
        {       /* 2: ITE8708 */
               .model = "ITE8708 CIR transceiver",
               .io_region_size = IT8708_IOREG_LENGTH,
+              .io_rsrc_no = 0,
               .hw_tx_capable = true,
               .sample_period = (u32) (1000000000ULL / 115200),
               .tx_carrier_freq = 38000,
@@ -1420,6 +1423,7 @@ static const struct ite_dev_params ite_dev_descs[] = {
        {       /* 3: ITE8709 */
               .model = "ITE8709 CIR transceiver",
               .io_region_size = IT8709_IOREG_LENGTH,
+              .io_rsrc_no = 2,
               .hw_tx_capable = true,
               .sample_period = (u32) (1000000000ULL / 115200),
               .tx_carrier_freq = 38000,
@@ -1461,6 +1465,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
        struct rc_dev *rdev = NULL;
        int ret = -ENOMEM;
        int model_no;
+       int io_rsrc_no;
 
        ite_dbg("%s called", __func__);
 
@@ -1490,10 +1495,11 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
 
        /* get the description for the device */
        dev_desc = &ite_dev_descs[model_no];
+       io_rsrc_no = dev_desc->io_rsrc_no;
 
        /* validate pnp resources */
-       if (!pnp_port_valid(pdev, 0) ||
-           pnp_port_len(pdev, 0) != dev_desc->io_region_size) {
+       if (!pnp_port_valid(pdev, io_rsrc_no) ||
+           pnp_port_len(pdev, io_rsrc_no) != dev_desc->io_region_size) {
                dev_err(&pdev->dev, "IR PNP Port not valid!\n");
                goto failure;
        }
@@ -1504,7 +1510,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
        }
 
        /* store resource values */
-       itdev->cir_addr = pnp_port_start(pdev, 0);
+       itdev->cir_addr = pnp_port_start(pdev, io_rsrc_no);
        itdev->cir_irq = pnp_irq(pdev, 0);
 
        /* initialize spinlocks */
index 16a19f5fd71890e95a9cbf230d0e18947101284a..aa899a0b9750770da5ee1a4b8a079d8f6d21bc4a 100644 (file)
@@ -57,6 +57,9 @@ struct ite_dev_params {
        /* size of the I/O region */
        int io_region_size;
 
+       /* IR pnp I/O resource number */
+       int io_rsrc_no;
+
        /* true if the hardware supports transmission */
        bool hw_tx_capable;
 
index bb10ffe086b415ad252948d4d4c761ce236adaba..8d558ae63456488da72ba9982c7aae6739a3dd48 100644 (file)
 /* Pinnacle PCTV HD 800i mini remote */
 
 static struct rc_map_table pinnacle_pctv_hd[] = {
-
-       { 0x0f, KEY_1 },
-       { 0x15, KEY_2 },
-       { 0x10, KEY_3 },
-       { 0x18, KEY_4 },
-       { 0x1b, KEY_5 },
-       { 0x1e, KEY_6 },
-       { 0x11, KEY_7 },
-       { 0x21, KEY_8 },
-       { 0x12, KEY_9 },
-       { 0x27, KEY_0 },
-
-       { 0x24, KEY_ZOOM },
-       { 0x2a, KEY_SUBTITLE },
-
-       { 0x00, KEY_MUTE },
-       { 0x01, KEY_ENTER },    /* Pinnacle Logo */
-       { 0x39, KEY_POWER },
-
-       { 0x03, KEY_VOLUMEUP },
-       { 0x09, KEY_VOLUMEDOWN },
-       { 0x06, KEY_CHANNELUP },
-       { 0x0c, KEY_CHANNELDOWN },
-
-       { 0x2d, KEY_REWIND },
-       { 0x30, KEY_PLAYPAUSE },
-       { 0x33, KEY_FASTFORWARD },
-       { 0x3c, KEY_STOP },
-       { 0x36, KEY_RECORD },
-       { 0x3f, KEY_EPG },      /* Labeled "?" */
+       /* Key codes for the tiny Pinnacle remote*/
+       { 0x0700, KEY_MUTE },
+       { 0x0701, KEY_MENU }, /* Pinnacle logo */
+       { 0x0739, KEY_POWER },
+       { 0x0703, KEY_VOLUMEUP },
+       { 0x0709, KEY_VOLUMEDOWN },
+       { 0x0706, KEY_CHANNELUP },
+       { 0x070c, KEY_CHANNELDOWN },
+       { 0x070f, KEY_1 },
+       { 0x0715, KEY_2 },
+       { 0x0710, KEY_3 },
+       { 0x0718, KEY_4 },
+       { 0x071b, KEY_5 },
+       { 0x071e, KEY_6 },
+       { 0x0711, KEY_7 },
+       { 0x0721, KEY_8 },
+       { 0x0712, KEY_9 },
+       { 0x0727, KEY_0 },
+       { 0x0724, KEY_ZOOM }, /* 'Square' key */
+       { 0x072a, KEY_SUBTITLE },   /* 'T' key */
+       { 0x072d, KEY_REWIND },
+       { 0x0730, KEY_PLAYPAUSE },
+       { 0x0733, KEY_FASTFORWARD },
+       { 0x0736, KEY_RECORD },
+       { 0x073c, KEY_STOP },
+       { 0x073f, KEY_HELP }, /* '?' key */
 };
 
 static struct rc_map_list pinnacle_pctv_hd_map = {
        .map = {
                .scan    = pinnacle_pctv_hd,
                .size    = ARRAY_SIZE(pinnacle_pctv_hd),
-               .rc_type = RC_TYPE_UNKNOWN,     /* Legacy IR type */
+               .rc_type = RC_TYPE_RC5,
                .name    = RC_MAP_PINNACLE_PCTV_HD,
        }
 };
index fd237ab120bbbd8a3a753b379f3edddb41079f61..27997a9ceb0d4552944bbbb6605bfc711ad9d9ea 100644 (file)
@@ -55,6 +55,8 @@ struct irctl {
        struct lirc_buffer *buf;
        unsigned int chunk_size;
 
+       struct cdev *cdev;
+
        struct task_struct *task;
        long jiffies_to_wait;
 };
@@ -62,7 +64,6 @@ struct irctl {
 static DEFINE_MUTEX(lirc_dev_lock);
 
 static struct irctl *irctls[MAX_IRCTL_DEVICES];
-static struct cdev cdevs[MAX_IRCTL_DEVICES];
 
 /* Only used for sysfs but defined to void otherwise */
 static struct class *lirc_class;
@@ -167,9 +168,13 @@ static struct file_operations lirc_dev_fops = {
 
 static int lirc_cdev_add(struct irctl *ir)
 {
-       int retval;
+       int retval = -ENOMEM;
        struct lirc_driver *d = &ir->d;
-       struct cdev *cdev = &cdevs[d->minor];
+       struct cdev *cdev;
+
+       cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+       if (!cdev)
+               goto err_out;
 
        if (d->fops) {
                cdev_init(cdev, d->fops);
@@ -180,12 +185,20 @@ static int lirc_cdev_add(struct irctl *ir)
        }
        retval = kobject_set_name(&cdev->kobj, "lirc%d", d->minor);
        if (retval)
-               return retval;
+               goto err_out;
 
        retval = cdev_add(cdev, MKDEV(MAJOR(lirc_base_dev), d->minor), 1);
-       if (retval)
+       if (retval) {
                kobject_put(&cdev->kobj);
+               goto err_out;
+       }
+
+       ir->cdev = cdev;
+
+       return 0;
 
+err_out:
+       kfree(cdev);
        return retval;
 }
 
@@ -214,7 +227,7 @@ int lirc_register_driver(struct lirc_driver *d)
        if (MAX_IRCTL_DEVICES <= d->minor) {
                dev_err(d->dev, "lirc_dev: lirc_register_driver: "
                        "\"minor\" must be between 0 and %d (%d)!\n",
-                       MAX_IRCTL_DEVICES-1, d->minor);
+                       MAX_IRCTL_DEVICES - 1, d->minor);
                err = -EBADRQC;
                goto out;
        }
@@ -369,7 +382,7 @@ int lirc_unregister_driver(int minor)
 
        if (minor < 0 || minor >= MAX_IRCTL_DEVICES) {
                printk(KERN_ERR "lirc_dev: %s: minor (%d) must be between "
-                      "0 and %d!\n", __func__, minor, MAX_IRCTL_DEVICES-1);
+                      "0 and %d!\n", __func__, minor, MAX_IRCTL_DEVICES - 1);
                return -EBADRQC;
        }
 
@@ -380,7 +393,7 @@ int lirc_unregister_driver(int minor)
                return -ENOENT;
        }
 
-       cdev = &cdevs[minor];
+       cdev = ir->cdev;
 
        mutex_lock(&lirc_dev_lock);
 
@@ -410,6 +423,7 @@ int lirc_unregister_driver(int minor)
        } else {
                lirc_irctl_cleanup(ir);
                cdev_del(cdev);
+               kfree(cdev);
                kfree(ir);
                irctls[minor] = NULL;
        }
@@ -453,7 +467,7 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
                goto error;
        }
 
-       cdev = &cdevs[iminor(inode)];
+       cdev = ir->cdev;
        if (try_module_get(cdev->owner)) {
                ir->open++;
                retval = ir->d.set_use_inc(ir->d.data);
@@ -484,13 +498,15 @@ EXPORT_SYMBOL(lirc_dev_fop_open);
 int lirc_dev_fop_close(struct inode *inode, struct file *file)
 {
        struct irctl *ir = irctls[iminor(inode)];
-       struct cdev *cdev = &cdevs[iminor(inode)];
+       struct cdev *cdev;
 
        if (!ir) {
                printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
                return -EINVAL;
        }
 
+       cdev = ir->cdev;
+
        dev_dbg(ir->d.dev, LOGHEAD "close called\n", ir->d.name, ir->d.minor);
 
        WARN_ON(mutex_lock_killable(&lirc_dev_lock));
@@ -503,6 +519,7 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
                lirc_irctl_cleanup(ir);
                cdev_del(cdev);
                irctls[ir->d.minor] = NULL;
+               kfree(cdev);
                kfree(ir);
        }
 
index ad927fcaa0203e3128346b8911fc962c5008c420..ec972dc25790ea8b18e2641a0e045e1f5858bc3f 100644 (file)
@@ -108,6 +108,12 @@ static int debug = 1;
 static int debug;
 #endif
 
+#define mce_dbg(dev, fmt, ...)                                 \
+       do {                                                    \
+               if (debug)                                      \
+                       dev_info(dev, fmt, ## __VA_ARGS__);     \
+       } while (0)
+
 /* general constants */
 #define SEND_FLAG_IN_PROGRESS  1
 #define SEND_FLAG_COMPLETE     2
@@ -246,6 +252,9 @@ static struct usb_device_id mceusb_dev_table[] = {
          .driver_info = MCE_GEN2_TX_INV },
        /* SMK eHome Infrared Transceiver */
        { USB_DEVICE(VENDOR_SMK, 0x0338) },
+       /* SMK/I-O Data GV-MC7/RCKIT Receiver */
+       { USB_DEVICE(VENDOR_SMK, 0x0353),
+         .driver_info = MCE_GEN2_NO_TX },
        /* Tatung eHome Infrared Transceiver */
        { USB_DEVICE(VENDOR_TATUNG, 0x9150) },
        /* Shuttle eHome Infrared Transceiver */
@@ -549,9 +558,10 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
                                 inout, data1);
                        break;
                case MCE_CMD_S_TIMEOUT:
-                       /* value is in units of 50us, so x*50/100 or x/2 ms */
+                       /* value is in units of 50us, so x*50/1000 ms */
                        dev_info(dev, "%s receive timeout of %d ms\n",
-                                inout, ((data1 << 8) | data2) / 2);
+                                inout,
+                                ((data1 << 8) | data2) * MCE_TIME_UNIT / 1000);
                        break;
                case MCE_CMD_G_TIMEOUT:
                        dev_info(dev, "Get receive timeout\n");
@@ -606,12 +616,15 @@ static void mce_async_callback(struct urb *urb, struct pt_regs *regs)
        if (ir) {
                len = urb->actual_length;
 
-               dev_dbg(ir->dev, "callback called (status=%d len=%d)\n",
+               mce_dbg(ir->dev, "callback called (status=%d len=%d)\n",
                        urb->status, len);
 
                mceusb_dev_printdata(ir, urb->transfer_buffer, 0, len, true);
        }
 
+       /* the transfer buffer and urb were allocated in mce_request_packet */
+       kfree(urb->transfer_buffer);
+       usb_free_urb(urb);
 }
 
 /* request incoming or send outgoing usb packet - used to initialize remote */
@@ -655,17 +668,17 @@ static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
                return;
        }
 
-       dev_dbg(dev, "receive request called (size=%#x)\n", size);
+       mce_dbg(dev, "receive request called (size=%#x)\n", size);
 
        async_urb->transfer_buffer_length = size;
        async_urb->dev = ir->usbdev;
 
        res = usb_submit_urb(async_urb, GFP_ATOMIC);
        if (res) {
-               dev_dbg(dev, "receive request FAILED! (res=%d)\n", res);
+               mce_dbg(dev, "receive request FAILED! (res=%d)\n", res);
                return;
        }
-       dev_dbg(dev, "receive request complete (res=%d)\n", res);
+       mce_dbg(dev, "receive request complete (res=%d)\n", res);
 }
 
 static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
@@ -673,9 +686,9 @@ static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
        mce_request_packet(ir, data, size, MCEUSB_TX);
 }
 
-static void mce_sync_in(struct mceusb_dev *ir, unsigned char *data, int size)
+static void mce_flush_rx_buffer(struct mceusb_dev *ir, int size)
 {
-       mce_request_packet(ir, data, size, MCEUSB_RX);
+       mce_request_packet(ir, NULL, size, MCEUSB_RX);
 }
 
 /* Send data out the IR blaster port(s) */
@@ -794,7 +807,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
                        ir->carrier = carrier;
                        cmdbuf[2] = MCE_CMD_SIG_END;
                        cmdbuf[3] = MCE_IRDATA_TRAILER;
-                       dev_dbg(ir->dev, "%s: disabling carrier "
+                       mce_dbg(ir->dev, "%s: disabling carrier "
                                "modulation\n", __func__);
                        mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
                        return carrier;
@@ -806,7 +819,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
                                ir->carrier = carrier;
                                cmdbuf[2] = prescaler;
                                cmdbuf[3] = divisor;
-                               dev_dbg(ir->dev, "%s: requesting %u HZ "
+                               mce_dbg(ir->dev, "%s: requesting %u HZ "
                                        "carrier\n", __func__, carrier);
 
                                /* Transmit new carrier to mce device */
@@ -835,7 +848,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
        switch (ir->buf_in[index]) {
        /* 2-byte return value commands */
        case MCE_CMD_S_TIMEOUT:
-               ir->rc->timeout = US_TO_NS((hi << 8 | lo) / 2);
+               ir->rc->timeout = US_TO_NS((hi << 8 | lo) * MCE_TIME_UNIT);
                break;
 
        /* 1-byte return value commands */
@@ -879,7 +892,7 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
                        rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
                                         * US_TO_NS(MCE_TIME_UNIT);
 
-                       dev_dbg(ir->dev, "Storing %s with duration %d\n",
+                       mce_dbg(ir->dev, "Storing %s with duration %d\n",
                                rawir.pulse ? "pulse" : "space",
                                rawir.duration);
 
@@ -911,7 +924,7 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
                if (ir->parser_state != CMD_HEADER && !ir->rem)
                        ir->parser_state = CMD_HEADER;
        }
-       dev_dbg(ir->dev, "processed IR data, calling ir_raw_event_handle\n");
+       mce_dbg(ir->dev, "processed IR data, calling ir_raw_event_handle\n");
        ir_raw_event_handle(ir->rc);
 }
 
@@ -933,7 +946,7 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
 
        if (ir->send_flags == RECV_FLAG_IN_PROGRESS) {
                ir->send_flags = SEND_FLAG_COMPLETE;
-               dev_dbg(ir->dev, "setup answer received %d bytes\n",
+               mce_dbg(ir->dev, "setup answer received %d bytes\n",
                        buf_len);
        }
 
@@ -951,7 +964,7 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
 
        case -EPIPE:
        default:
-               dev_dbg(ir->dev, "Error: urb status = %d\n", urb->status);
+               mce_dbg(ir->dev, "Error: urb status = %d\n", urb->status);
                break;
        }
 
@@ -961,7 +974,6 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
 static void mceusb_gen1_init(struct mceusb_dev *ir)
 {
        int ret;
-       int maxp = ir->len_in;
        struct device *dev = ir->dev;
        char *data;
 
@@ -978,8 +990,8 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
        ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
                              USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0,
                              data, USB_CTRL_MSG_SZ, HZ * 3);
-       dev_dbg(dev, "%s - ret = %d\n", __func__, ret);
-       dev_dbg(dev, "%s - data[0] = %d, data[1] = %d\n",
+       mce_dbg(dev, "%s - ret = %d\n", __func__, ret);
+       mce_dbg(dev, "%s - data[0] = %d, data[1] = %d\n",
                __func__, data[0], data[1]);
 
        /* set feature: bit rate 38400 bps */
@@ -987,71 +999,56 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
                              USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
                              0xc04e, 0x0000, NULL, 0, HZ * 3);
 
-       dev_dbg(dev, "%s - ret = %d\n", __func__, ret);
+       mce_dbg(dev, "%s - ret = %d\n", __func__, ret);
 
        /* bRequest 4: set char length to 8 bits */
        ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
                              4, USB_TYPE_VENDOR,
                              0x0808, 0x0000, NULL, 0, HZ * 3);
-       dev_dbg(dev, "%s - retB = %d\n", __func__, ret);
+       mce_dbg(dev, "%s - retB = %d\n", __func__, ret);
 
        /* bRequest 2: set handshaking to use DTR/DSR */
        ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
                              2, USB_TYPE_VENDOR,
                              0x0000, 0x0100, NULL, 0, HZ * 3);
-       dev_dbg(dev, "%s - retC = %d\n", __func__, ret);
+       mce_dbg(dev, "%s - retC = %d\n", __func__, ret);
 
        /* device reset */
        mce_async_out(ir, DEVICE_RESET, sizeof(DEVICE_RESET));
-       mce_sync_in(ir, NULL, maxp);
 
        /* get hw/sw revision? */
        mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
-       mce_sync_in(ir, NULL, maxp);
 
        kfree(data);
 };
 
 static void mceusb_gen2_init(struct mceusb_dev *ir)
 {
-       int maxp = ir->len_in;
-
        /* device reset */
        mce_async_out(ir, DEVICE_RESET, sizeof(DEVICE_RESET));
-       mce_sync_in(ir, NULL, maxp);
 
        /* get hw/sw revision? */
        mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
-       mce_sync_in(ir, NULL, maxp);
 
        /* unknown what the next two actually return... */
        mce_async_out(ir, GET_UNKNOWN, sizeof(GET_UNKNOWN));
-       mce_sync_in(ir, NULL, maxp);
        mce_async_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
-       mce_sync_in(ir, NULL, maxp);
 }
 
 static void mceusb_get_parameters(struct mceusb_dev *ir)
 {
-       int maxp = ir->len_in;
-
        /* get the carrier and frequency */
        mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
-       mce_sync_in(ir, NULL, maxp);
 
-       if (!ir->flags.no_tx) {
+       if (!ir->flags.no_tx)
                /* get the transmitter bitmask */
                mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
-               mce_sync_in(ir, NULL, maxp);
-       }
 
        /* get receiver timeout value */
        mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
-       mce_sync_in(ir, NULL, maxp);
 
        /* get receiver sensor setting */
        mce_async_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
-       mce_sync_in(ir, NULL, maxp);
 }
 
 static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
@@ -1082,7 +1079,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
        rc->priv = ir;
        rc->driver_type = RC_DRIVER_IR_RAW;
        rc->allowed_protos = RC_TYPE_ALL;
-       rc->timeout = US_TO_NS(1000);
+       rc->timeout = MS_TO_NS(100);
        if (!ir->flags.no_tx) {
                rc->s_tx_mask = mceusb_set_tx_mask;
                rc->s_tx_carrier = mceusb_set_tx_carrier;
@@ -1122,7 +1119,7 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
        bool tx_mask_normal;
        int ir_intfnum;
 
-       dev_dbg(&intf->dev, "%s called\n", __func__);
+       mce_dbg(&intf->dev, "%s called\n", __func__);
 
        idesc  = intf->cur_altsetting;
 
@@ -1150,7 +1147,7 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
                        ep_in = ep;
                        ep_in->bmAttributes = USB_ENDPOINT_XFER_INT;
                        ep_in->bInterval = 1;
-                       dev_dbg(&intf->dev, "acceptable inbound endpoint "
+                       mce_dbg(&intf->dev, "acceptable inbound endpoint "
                                "found\n");
                }
 
@@ -1165,12 +1162,12 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
                        ep_out = ep;
                        ep_out->bmAttributes = USB_ENDPOINT_XFER_INT;
                        ep_out->bInterval = 1;
-                       dev_dbg(&intf->dev, "acceptable outbound endpoint "
+                       mce_dbg(&intf->dev, "acceptable outbound endpoint "
                                "found\n");
                }
        }
        if (ep_in == NULL) {
-               dev_dbg(&intf->dev, "inbound and/or endpoint not found\n");
+               mce_dbg(&intf->dev, "inbound and/or endpoint not found\n");
                return -ENODEV;
        }
 
@@ -1215,16 +1212,16 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
        if (!ir->rc)
                goto rc_dev_fail;
 
-       /* flush buffers on the device */
-       mce_sync_in(ir, NULL, maxp);
-       mce_sync_in(ir, NULL, maxp);
-
        /* wire up inbound data handler */
        usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in,
                maxp, (usb_complete_t) mceusb_dev_recv, ir, ep_in->bInterval);
        ir->urb_in->transfer_dma = ir->dma_in;
        ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 
+       /* flush buffers on the device */
+       mce_dbg(&intf->dev, "Flushing receive buffers\n");
+       mce_flush_rx_buffer(ir, maxp);
+
        /* initialize device */
        if (ir->flags.microsoft_gen1)
                mceusb_gen1_init(ir);
index bf3060ea610782a42d596b3848629b905100a8df..ce595f9ab4c7a41c16ff0d69ecc33faf9fc3c4f4 100644 (file)
@@ -991,7 +991,6 @@ static int nvt_open(struct rc_dev *dev)
        unsigned long flags;
 
        spin_lock_irqsave(&nvt->nvt_lock, flags);
-       nvt->in_use = true;
        nvt_enable_cir(nvt);
        spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 
@@ -1004,7 +1003,6 @@ static void nvt_close(struct rc_dev *dev)
        unsigned long flags;
 
        spin_lock_irqsave(&nvt->nvt_lock, flags);
-       nvt->in_use = false;
        nvt_disable_cir(nvt);
        spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 }
@@ -1112,7 +1110,7 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
        rdev->dev.parent = &pdev->dev;
        rdev->driver_name = NVT_DRIVER_NAME;
        rdev->map_name = RC_MAP_RC6_MCE;
-       rdev->timeout = US_TO_NS(1000);
+       rdev->timeout = MS_TO_NS(100);
        /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
        rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
 #if 0
index 379795d61ea7684f4eac4dc08ddb23b7ab4d9459..1241fc89a36c10c02dfa56ed5e351c89f28ac4e7 100644 (file)
@@ -70,7 +70,6 @@ struct nvt_dev {
        struct ir_raw_event rawir;
 
        spinlock_t nvt_lock;
-       bool in_use;
 
        /* for rx */
        u8 buf[RX_BUF_LEN];
index f57cd5677ac27c4bee3ebbbbfda25997585d6f1b..3186ac7c2c108c00d4346076d4bc20fc0d765df2 100644 (file)
@@ -522,18 +522,20 @@ EXPORT_SYMBOL_GPL(rc_g_keycode_from_table);
 /**
  * ir_do_keyup() - internal function to signal the release of a keypress
  * @dev:       the struct rc_dev descriptor of the device
+ * @sync:      whether or not to call input_sync
  *
  * This function is used internally to release a keypress, it must be
  * called with keylock held.
  */
-static void ir_do_keyup(struct rc_dev *dev)
+static void ir_do_keyup(struct rc_dev *dev, bool sync)
 {
        if (!dev->keypressed)
                return;
 
        IR_dprintk(1, "keyup key 0x%04x\n", dev->last_keycode);
        input_report_key(dev->input_dev, dev->last_keycode, 0);
-       input_sync(dev->input_dev);
+       if (sync)
+               input_sync(dev->input_dev);
        dev->keypressed = false;
 }
 
@@ -549,7 +551,7 @@ void rc_keyup(struct rc_dev *dev)
        unsigned long flags;
 
        spin_lock_irqsave(&dev->keylock, flags);
-       ir_do_keyup(dev);
+       ir_do_keyup(dev, true);
        spin_unlock_irqrestore(&dev->keylock, flags);
 }
 EXPORT_SYMBOL_GPL(rc_keyup);
@@ -578,7 +580,7 @@ static void ir_timer_keyup(unsigned long cookie)
         */
        spin_lock_irqsave(&dev->keylock, flags);
        if (time_is_before_eq_jiffies(dev->keyup_jiffies))
-               ir_do_keyup(dev);
+               ir_do_keyup(dev, true);
        spin_unlock_irqrestore(&dev->keylock, flags);
 }
 
@@ -597,6 +599,7 @@ void rc_repeat(struct rc_dev *dev)
        spin_lock_irqsave(&dev->keylock, flags);
 
        input_event(dev->input_dev, EV_MSC, MSC_SCAN, dev->last_scancode);
+       input_sync(dev->input_dev);
 
        if (!dev->keypressed)
                goto out;
@@ -622,29 +625,28 @@ EXPORT_SYMBOL_GPL(rc_repeat);
 static void ir_do_keydown(struct rc_dev *dev, int scancode,
                          u32 keycode, u8 toggle)
 {
-       input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode);
-
-       /* Repeat event? */
-       if (dev->keypressed &&
-           dev->last_scancode == scancode &&
-           dev->last_toggle == toggle)
-               return;
+       bool new_event = !dev->keypressed ||
+                        dev->last_scancode != scancode ||
+                        dev->last_toggle != toggle;
 
-       /* Release old keypress */
-       ir_do_keyup(dev);
+       if (new_event && dev->keypressed)
+               ir_do_keyup(dev, false);
 
-       dev->last_scancode = scancode;
-       dev->last_toggle = toggle;
-       dev->last_keycode = keycode;
+       input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode);
 
-       if (keycode == KEY_RESERVED)
-               return;
+       if (new_event && keycode != KEY_RESERVED) {
+               /* Register a keypress */
+               dev->keypressed = true;
+               dev->last_scancode = scancode;
+               dev->last_toggle = toggle;
+               dev->last_keycode = keycode;
+
+               IR_dprintk(1, "%s: key down event, "
+                          "key 0x%04x, scancode 0x%04x\n",
+                          dev->input_name, keycode, scancode);
+               input_report_key(dev->input_dev, keycode, 1);
+       }
 
-       /* Register a keypress */
-       dev->keypressed = true;
-       IR_dprintk(1, "%s: key down event, key 0x%04x, scancode 0x%04x\n",
-                  dev->input_name, keycode, scancode);
-       input_report_key(dev->input_dev, dev->last_keycode, 1);
        input_sync(dev->input_dev);
 }
 
index a97cf2750bd99a3c41d071707dd16a3c285b1183..834a48394bce1bc4546f2754c24553a54119d862 100644 (file)
@@ -3474,7 +3474,7 @@ static int radio_s_tuner(struct file *file, void *priv,
        if (0 != t->index)
                return -EINVAL;
 
-       bttv_call_all(btv, tuner, g_tuner, t);
+       bttv_call_all(btv, tuner, s_tuner, t);
        return 0;
 }
 
index 1933d4d11bf20ac526adcfc8a1714b28560bcad6..e80134f52ef5c3ba41b00a4d3751d4ba95db3365 100644 (file)
@@ -695,14 +695,10 @@ static int cx18_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
 
        cx18_call_all(cx, tuner, g_tuner, vt);
 
-       if (test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) {
+       if (vt->type == V4L2_TUNER_RADIO)
                strlcpy(vt->name, "cx18 Radio Tuner", sizeof(vt->name));
-               vt->type = V4L2_TUNER_RADIO;
-       } else {
+       else
                strlcpy(vt->name, "cx18 TV Tuner", sizeof(vt->name));
-               vt->type = V4L2_TUNER_ANALOG_TV;
-       }
-
        return 0;
 }
 
index 64d9b2136ff6b536fa1c22e11eb1c9bc80a9185f..419777a832ee2fd929bf0e087add45ad7d09e973 100644 (file)
@@ -2060,12 +2060,8 @@ static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
                goto fail_irq;
        }
 
-       if (!pci_enable_msi(pci_dev))
-               err = request_irq(pci_dev->irq, cx23885_irq,
-                                 IRQF_DISABLED, dev->name, dev);
-       else
-               err = request_irq(pci_dev->irq, cx23885_irq,
-                                 IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
+       err = request_irq(pci_dev->irq, cx23885_irq,
+                         IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
        if (err < 0) {
                printk(KERN_ERR "%s: can't get IRQ %d\n",
                       dev->name, pci_dev->irq);
@@ -2114,7 +2110,6 @@ static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
 
        /* unregister stuff */
        free_irq(pci_dev->irq, dev);
-       pci_disable_msi(pci_dev);
 
        cx23885_dev_unregister(dev);
        v4l2_device_unregister(v4l2_dev);
index f9e347dae7391a4487e15ac17e9e16578f59d16e..120c7d8e0895ea36619e97759b34b0184edd2bf6 100644 (file)
@@ -1184,14 +1184,10 @@ static int ivtv_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
 
        ivtv_call_all(itv, tuner, g_tuner, vt);
 
-       if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) {
+       if (vt->type == V4L2_TUNER_RADIO)
                strlcpy(vt->name, "ivtv Radio Tuner", sizeof(vt->name));
-               vt->type = V4L2_TUNER_RADIO;
-       } else {
+       else
                strlcpy(vt->name, "ivtv TV Tuner", sizeof(vt->name));
-               vt->type = V4L2_TUNER_ANALOG_TV;
-       }
-
        return 0;
 }
 
index 10b55c854487e5b9ff3628056eb6032fe30cb1c3..89d09a8914f8ea5099d6031d59b5f9117c214b78 100644 (file)
@@ -2,10 +2,10 @@
  * Header for M-5MOLS 8M Pixel camera sensor with ISP
  *
  * Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
  *
  * Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -106,23 +106,23 @@ struct m5mols_capture {
  * The each value according to each scenemode is recommended in the documents.
  */
 struct m5mols_scenemode {
-       u32 metering;
-       u32 ev_bias;
-       u32 wb_mode;
-       u32 wb_preset;
-       u32 chroma_en;
-       u32 chroma_lvl;
-       u32 edge_en;
-       u32 edge_lvl;
-       u32 af_range;
-       u32 fd_mode;
-       u32 mcc;
-       u32 light;
-       u32 flash;
-       u32 tone;
-       u32 iso;
-       u32 capt_mode;
-       u32 wdr;
+       u8 metering;
+       u8 ev_bias;
+       u8 wb_mode;
+       u8 wb_preset;
+       u8 chroma_en;
+       u8 chroma_lvl;
+       u8 edge_en;
+       u8 edge_lvl;
+       u8 af_range;
+       u8 fd_mode;
+       u8 mcc;
+       u8 light;
+       u8 flash;
+       u8 tone;
+       u8 iso;
+       u8 capt_mode;
+       u8 wdr;
 };
 
 /**
@@ -154,7 +154,6 @@ struct m5mols_version {
        u8      str[VERSION_STRING_SIZE];
        u8      af;
 };
-#define VERSION_SIZE sizeof(struct m5mols_version)
 
 /**
  * struct m5mols_info - M-5MOLS driver data structure
@@ -216,9 +215,9 @@ struct m5mols_info {
        bool lock_ae;
        bool lock_awb;
        u8 resolution;
-       u32 interrupt;
-       u32 mode;
-       u32 mode_save;
+       u8 interrupt;
+       u8 mode;
+       u8 mode_save;
        int (*set_power)(struct device *dev, int on);
 };
 
@@ -256,9 +255,11 @@ struct m5mols_info {
  *   +-------+---+----------+-----+------+------+------+------+
  *   - d[0..3]: according to size1
  */
-int m5mols_read(struct v4l2_subdev *sd, u32 reg_comb, u32 *val);
+int m5mols_read_u8(struct v4l2_subdev *sd, u32 reg_comb, u8 *val);
+int m5mols_read_u16(struct v4l2_subdev *sd, u32 reg_comb, u16 *val);
+int m5mols_read_u32(struct v4l2_subdev *sd, u32 reg_comb, u32 *val);
 int m5mols_write(struct v4l2_subdev *sd, u32 reg_comb, u32 val);
-int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 value);
+int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u8 value);
 
 /*
  * Mode operation of the M-5MOLS
@@ -280,12 +281,12 @@ int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 value);
  * The available executing order between each modes are as follows:
  *   PARAMETER <---> MONITOR <---> CAPTURE
  */
-int m5mols_mode(struct m5mols_info *info, u32 mode);
+int m5mols_mode(struct m5mols_info *info, u8 mode);
 
-int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg);
+int m5mols_enable_interrupt(struct v4l2_subdev *sd, u8 reg);
 int m5mols_sync_controls(struct m5mols_info *info);
 int m5mols_start_capture(struct m5mols_info *info);
-int m5mols_do_scenemode(struct m5mols_info *info, u32 mode);
+int m5mols_do_scenemode(struct m5mols_info *info, u8 mode);
 int m5mols_lock_3a(struct m5mols_info *info, bool lock);
 int m5mols_set_ctrl(struct v4l2_ctrl *ctrl);
 
index d71a3903b60fbd1ec579c5e5a9aa375472a0010e..d9471928369df378adf9c14f5134f0be53563f40 100644 (file)
@@ -2,10 +2,10 @@
  * The Capture code for Fujitsu M-5MOLS ISP
  *
  * Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
  *
  * Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -58,9 +58,9 @@ static int m5mols_read_rational(struct v4l2_subdev *sd, u32 addr_num,
 {
        u32 num, den;
 
-       int ret = m5mols_read(sd, addr_num, &num);
+       int ret = m5mols_read_u32(sd, addr_num, &num);
        if (!ret)
-               ret = m5mols_read(sd, addr_den, &den);
+               ret = m5mols_read_u32(sd, addr_den, &den);
        if (ret)
                return ret;
        *val = den == 0 ? 0 : num / den;
@@ -99,20 +99,20 @@ static int m5mols_capture_info(struct m5mols_info *info)
        if (ret)
                return ret;
 
-       ret = m5mols_read(sd, EXIF_INFO_ISO, (u32 *)&exif->iso_speed);
+       ret = m5mols_read_u16(sd, EXIF_INFO_ISO, &exif->iso_speed);
        if (!ret)
-               ret = m5mols_read(sd, EXIF_INFO_FLASH, (u32 *)&exif->flash);
+               ret = m5mols_read_u16(sd, EXIF_INFO_FLASH, &exif->flash);
        if (!ret)
-               ret = m5mols_read(sd, EXIF_INFO_SDR, (u32 *)&exif->sdr);
+               ret = m5mols_read_u16(sd, EXIF_INFO_SDR, &exif->sdr);
        if (!ret)
-               ret = m5mols_read(sd, EXIF_INFO_QVAL, (u32 *)&exif->qval);
+               ret = m5mols_read_u16(sd, EXIF_INFO_QVAL, &exif->qval);
        if (ret)
                return ret;
 
        if (!ret)
-               ret = m5mols_read(sd, CAPC_IMAGE_SIZE, &info->cap.main);
+               ret = m5mols_read_u32(sd, CAPC_IMAGE_SIZE, &info->cap.main);
        if (!ret)
-               ret = m5mols_read(sd, CAPC_THUMB_SIZE, &info->cap.thumb);
+               ret = m5mols_read_u32(sd, CAPC_THUMB_SIZE, &info->cap.thumb);
        if (!ret)
                info->cap.total = info->cap.main + info->cap.thumb;
 
@@ -122,7 +122,7 @@ static int m5mols_capture_info(struct m5mols_info *info)
 int m5mols_start_capture(struct m5mols_info *info)
 {
        struct v4l2_subdev *sd = &info->sd;
-       u32 resolution = info->resolution;
+       u8 resolution = info->resolution;
        int timeout;
        int ret;
 
index 817c16fec368c409cd09d8ea8e4f28608b6cc00e..d135d20d09cfb79348cf98c041137d6b90bd0039 100644 (file)
@@ -2,10 +2,10 @@
  * Controls for M-5MOLS 8M Pixel camera sensor with ISP
  *
  * Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
  *
  * Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -130,7 +130,7 @@ static struct m5mols_scenemode m5mols_default_scenemode[] = {
  *
  * WARNING: The execution order is important. Do not change the order.
  */
-int m5mols_do_scenemode(struct m5mols_info *info, u32 mode)
+int m5mols_do_scenemode(struct m5mols_info *info, u8 mode)
 {
        struct v4l2_subdev *sd = &info->sd;
        struct m5mols_scenemode scenemode = m5mols_default_scenemode[mode];
index 76eac26e84ae6877ee3f8408f28ca79ad35f48dd..43c68f51c5ce07822b58b44de9efe7749e1c3cf9 100644 (file)
@@ -2,10 +2,10 @@
  * Driver for M-5MOLS 8M Pixel camera sensor with ISP
  *
  * Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
  *
  * Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -133,13 +133,13 @@ static u32 m5mols_swap_byte(u8 *data, u8 length)
 /**
  * m5mols_read -  I2C read function
  * @reg: combination of size, category and command for the I2C packet
+ * @size: desired size of I2C packet
  * @val: read value
  */
-int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val)
+static int m5mols_read(struct v4l2_subdev *sd, u32 size, u32 reg, u32 *val)
 {
        struct i2c_client *client = v4l2_get_subdevdata(sd);
        u8 rbuf[M5MOLS_I2C_MAX_SIZE + 1];
-       u8 size = I2C_SIZE(reg);
        u8 category = I2C_CATEGORY(reg);
        u8 cmd = I2C_COMMAND(reg);
        struct i2c_msg msg[2];
@@ -149,11 +149,6 @@ int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val)
        if (!client->adapter)
                return -ENODEV;
 
-       if (size != 1 && size != 2 && size != 4) {
-               v4l2_err(sd, "Wrong data size\n");
-               return -EINVAL;
-       }
-
        msg[0].addr = client->addr;
        msg[0].flags = 0;
        msg[0].len = 5;
@@ -184,6 +179,52 @@ int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val)
        return 0;
 }
 
+int m5mols_read_u8(struct v4l2_subdev *sd, u32 reg, u8 *val)
+{
+       u32 val_32;
+       int ret;
+
+       if (I2C_SIZE(reg) != 1) {
+               v4l2_err(sd, "Wrong data size\n");
+               return -EINVAL;
+       }
+
+       ret = m5mols_read(sd, I2C_SIZE(reg), reg, &val_32);
+       if (ret)
+               return ret;
+
+       *val = (u8)val_32;
+       return ret;
+}
+
+int m5mols_read_u16(struct v4l2_subdev *sd, u32 reg, u16 *val)
+{
+       u32 val_32;
+       int ret;
+
+       if (I2C_SIZE(reg) != 2) {
+               v4l2_err(sd, "Wrong data size\n");
+               return -EINVAL;
+       }
+
+       ret = m5mols_read(sd, I2C_SIZE(reg), reg, &val_32);
+       if (ret)
+               return ret;
+
+       *val = (u16)val_32;
+       return ret;
+}
+
+int m5mols_read_u32(struct v4l2_subdev *sd, u32 reg, u32 *val)
+{
+       if (I2C_SIZE(reg) != 4) {
+               v4l2_err(sd, "Wrong data size\n");
+               return -EINVAL;
+       }
+
+       return m5mols_read(sd, I2C_SIZE(reg), reg, val);
+}
+
 /**
  * m5mols_write - I2C command write function
  * @reg: combination of size, category and command for the I2C packet
@@ -231,13 +272,14 @@ int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val)
        return 0;
 }
 
-int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 mask)
+int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u8 mask)
 {
-       u32 busy, i;
+       u8 busy;
+       int i;
        int ret;
 
        for (i = 0; i < M5MOLS_I2C_CHECK_RETRY; i++) {
-               ret = m5mols_read(sd, I2C_REG(category, cmd, 1), &busy);
+               ret = m5mols_read_u8(sd, I2C_REG(category, cmd, 1), &busy);
                if (ret < 0)
                        return ret;
                if ((busy & mask) == mask)
@@ -252,14 +294,14 @@ int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 mask)
  * Before writing desired interrupt value the INT_FACTOR register should
  * be read to clear pending interrupts.
  */
-int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg)
+int m5mols_enable_interrupt(struct v4l2_subdev *sd, u8 reg)
 {
        struct m5mols_info *info = to_m5mols(sd);
-       u32 mask = is_available_af(info) ? REG_INT_AF : 0;
-       u32 dummy;
+       u8 mask = is_available_af(info) ? REG_INT_AF : 0;
+       u8 dummy;
        int ret;
 
-       ret = m5mols_read(sd, SYSTEM_INT_FACTOR, &dummy);
+       ret = m5mols_read_u8(sd, SYSTEM_INT_FACTOR, &dummy);
        if (!ret)
                ret = m5mols_write(sd, SYSTEM_INT_ENABLE, reg & ~mask);
        return ret;
@@ -271,7 +313,7 @@ int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg)
  * It always accompanies a little delay changing the M-5MOLS mode, so it is
  * needed checking current busy status to guarantee right mode.
  */
-static int m5mols_reg_mode(struct v4l2_subdev *sd, u32 mode)
+static int m5mols_reg_mode(struct v4l2_subdev *sd, u8 mode)
 {
        int ret = m5mols_write(sd, SYSTEM_SYSMODE, mode);
 
@@ -286,16 +328,16 @@ static int m5mols_reg_mode(struct v4l2_subdev *sd, u32 mode)
  * can be guaranteed only when the sensor is operating in mode which which
  * a command belongs to.
  */
-int m5mols_mode(struct m5mols_info *info, u32 mode)
+int m5mols_mode(struct m5mols_info *info, u8 mode)
 {
        struct v4l2_subdev *sd = &info->sd;
        int ret = -EINVAL;
-       u32 reg;
+       u8 reg;
 
        if (mode < REG_PARAMETER && mode > REG_CAPTURE)
                return ret;
 
-       ret = m5mols_read(sd, SYSTEM_SYSMODE, &reg);
+       ret = m5mols_read_u8(sd, SYSTEM_SYSMODE, &reg);
        if ((!ret && reg == mode) || ret)
                return ret;
 
@@ -344,41 +386,37 @@ int m5mols_mode(struct m5mols_info *info, u32 mode)
 static int m5mols_get_version(struct v4l2_subdev *sd)
 {
        struct m5mols_info *info = to_m5mols(sd);
-       union {
-               struct m5mols_version ver;
-               u8 bytes[VERSION_SIZE];
-       } version;
-       u32 *value;
-       u8 cmd = CAT0_VER_CUSTOMER;
+       struct m5mols_version *ver = &info->ver;
+       u8 *str = ver->str;
+       int i;
        int ret;
 
-       do {
-               value = (u32 *)&version.bytes[cmd];
-               ret = m5mols_read(sd, SYSTEM_CMD(cmd), value);
-               if (ret)
-                       return ret;
-       } while (cmd++ != CAT0_VER_AWB);
+       ret = m5mols_read_u8(sd, SYSTEM_VER_CUSTOMER, &ver->customer);
+       if (!ret)
+               ret = m5mols_read_u8(sd, SYSTEM_VER_PROJECT, &ver->project);
+       if (!ret)
+               ret = m5mols_read_u16(sd, SYSTEM_VER_FIRMWARE, &ver->fw);
+       if (!ret)
+               ret = m5mols_read_u16(sd, SYSTEM_VER_HARDWARE, &ver->hw);
+       if (!ret)
+               ret = m5mols_read_u16(sd, SYSTEM_VER_PARAMETER, &ver->param);
+       if (!ret)
+               ret = m5mols_read_u16(sd, SYSTEM_VER_AWB, &ver->awb);
+       if (!ret)
+               ret = m5mols_read_u8(sd, AF_VERSION, &ver->af);
+       if (ret)
+               return ret;
 
-       do {
-               value = (u32 *)&version.bytes[cmd];
-               ret = m5mols_read(sd, SYSTEM_VER_STRING, value);
+       for (i = 0; i < VERSION_STRING_SIZE; i++) {
+               ret = m5mols_read_u8(sd, SYSTEM_VER_STRING, &str[i]);
                if (ret)
                        return ret;
-               if (cmd >= VERSION_SIZE - 1)
-                       return -EINVAL;
-       } while (version.bytes[cmd++]);
-
-       value = (u32 *)&version.bytes[cmd];
-       ret = m5mols_read(sd, AF_VERSION, value);
-       if (ret)
-               return ret;
+       }
 
-       /* store version information swapped for being readable */
-       info->ver       = version.ver;
-       info->ver.fw    = be16_to_cpu(info->ver.fw);
-       info->ver.hw    = be16_to_cpu(info->ver.hw);
-       info->ver.param = be16_to_cpu(info->ver.param);
-       info->ver.awb   = be16_to_cpu(info->ver.awb);
+       ver->fw = be16_to_cpu(ver->fw);
+       ver->hw = be16_to_cpu(ver->hw);
+       ver->param = be16_to_cpu(ver->param);
+       ver->awb = be16_to_cpu(ver->awb);
 
        v4l2_info(sd, "Manufacturer\t[%s]\n",
                        is_manufacturer(info, REG_SAMSUNG_ELECTRO) ?
@@ -722,7 +760,7 @@ static int m5mols_init_controls(struct m5mols_info *info)
        int ret;
 
        /* Determine value's range & step of controls for various FW version */
-       ret = m5mols_read(sd, AE_MAX_GAIN_MON, (u32 *)&max_exposure);
+       ret = m5mols_read_u16(sd, AE_MAX_GAIN_MON, &max_exposure);
        if (!ret)
                step_zoom = is_manufacturer(info, REG_SAMSUNG_OPTICS) ? 31 : 1;
        if (ret)
@@ -842,18 +880,18 @@ static void m5mols_irq_work(struct work_struct *work)
        struct m5mols_info *info =
                container_of(work, struct m5mols_info, work_irq);
        struct v4l2_subdev *sd = &info->sd;
-       u32 reg;
+       u8 reg;
        int ret;
 
        if (!is_powered(info) ||
-                       m5mols_read(sd, SYSTEM_INT_FACTOR, &info->interrupt))
+                       m5mols_read_u8(sd, SYSTEM_INT_FACTOR, &info->interrupt))
                return;
 
        switch (info->interrupt & REG_INT_MASK) {
        case REG_INT_AF:
                if (!is_available_af(info))
                        break;
-               ret = m5mols_read(sd, AF_STATUS, &reg);
+               ret = m5mols_read_u8(sd, AF_STATUS, &reg);
                v4l2_dbg(2, m5mols_debug, sd, "AF %s\n",
                         reg == REG_AF_FAIL ? "Failed" :
                         reg == REG_AF_SUCCESS ? "Success" :
index b83e36fc6ac658240ea9699fafaa56ee76fa8ca9..c755bd6edfe9b0a485731e641766d39396356ee0 100644 (file)
@@ -2,10 +2,10 @@
  * Register map for M-5MOLS 8M Pixel camera sensor with ISP
  *
  * Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
  *
  * Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * more specific contents, see definition if file m5mols.h.
  */
 #define CAT0_VER_CUSTOMER      0x00    /* customer version */
-#define CAT0_VER_AWB           0x09    /* Auto WB version */
+#define CAT0_VER_PROJECT       0x01    /* project version */
+#define CAT0_VER_FIRMWARE      0x02    /* Firmware version */
+#define CAT0_VER_HARDWARE      0x04    /* Hardware version */
+#define CAT0_VER_PARAMETER     0x06    /* Parameter version */
+#define CAT0_VER_AWB           0x08    /* Auto WB version */
 #define CAT0_VER_STRING                0x0a    /* string including M-5MOLS */
 #define CAT0_SYSMODE           0x0b    /* SYSTEM mode register */
 #define CAT0_STATUS            0x0c    /* SYSTEM mode status register */
 #define CAT0_INT_FACTOR                0x10    /* interrupt pending register */
 #define CAT0_INT_ENABLE                0x11    /* interrupt enable register */
 
+#define SYSTEM_VER_CUSTOMER    I2C_REG(CAT_SYSTEM, CAT0_VER_CUSTOMER, 1)
+#define SYSTEM_VER_PROJECT     I2C_REG(CAT_SYSTEM, CAT0_VER_PROJECT, 1)
+#define SYSTEM_VER_FIRMWARE    I2C_REG(CAT_SYSTEM, CAT0_VER_FIRMWARE, 2)
+#define SYSTEM_VER_HARDWARE    I2C_REG(CAT_SYSTEM, CAT0_VER_HARDWARE, 2)
+#define SYSTEM_VER_PARAMETER   I2C_REG(CAT_SYSTEM, CAT0_VER_PARAMETER, 2)
+#define SYSTEM_VER_AWB         I2C_REG(CAT_SYSTEM, CAT0_VER_AWB, 2)
+
 #define SYSTEM_SYSMODE         I2C_REG(CAT_SYSTEM, CAT0_SYSMODE, 1)
 #define REG_SYSINIT            0x00    /* SYSTEM mode */
 #define REG_PARAMETER          0x01    /* PARAMETER mode */
 #define REG_CAP_START_MAIN     0x01
 #define REG_CAP_START_THUMB    0x03
 
-#define CAPC_IMAGE_SIZE                I2C_REG(CAT_CAPT_CTRL, CATC_CAP_IMAGE_SIZE, 1)
-#define CAPC_THUMB_SIZE                I2C_REG(CAT_CAPT_CTRL, CATC_CAP_THUMB_SIZE, 1)
+#define CAPC_IMAGE_SIZE                I2C_REG(CAT_CAPT_CTRL, CATC_CAP_IMAGE_SIZE, 4)
+#define CAPC_THUMB_SIZE                I2C_REG(CAT_CAPT_CTRL, CATC_CAP_THUMB_SIZE, 4)
 
 /*
  * Category F - Flash
index de5d481b03287dfe826b202fb62114e91c9fa9a5..c43c81f5f9784ea6b9ac0691753aa7b05fdd39a7 100644 (file)
@@ -480,12 +480,14 @@ static int msp_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
        struct msp_state *state = to_state(sd);
        struct i2c_client *client = v4l2_get_subdevdata(sd);
 
-       if (state->radio)
+       if (vt->type != V4L2_TUNER_ANALOG_TV)
                return 0;
-       if (state->opmode == OPMODE_AUTOSELECT)
-               msp_detect_stereo(client);
-       vt->audmode    = state->audmode;
-       vt->rxsubchans = state->rxsubchans;
+       if (!state->radio) {
+               if (state->opmode == OPMODE_AUTOSELECT)
+                       msp_detect_stereo(client);
+               vt->rxsubchans = state->rxsubchans;
+       }
+       vt->audmode = state->audmode;
        vt->capability |= V4L2_TUNER_CAP_STEREO |
                V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
        return 0;
index bc0c23a1009c2a89b5b40841d2de802c17a62037..63f8a0cc33d89ec167701c1bee1a0614eebed89f 100644 (file)
@@ -444,12 +444,9 @@ static int mx1_camera_add_device(struct soc_camera_device *icd)
 {
        struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
        struct mx1_camera_dev *pcdev = ici->priv;
-       int ret;
 
-       if (pcdev->icd) {
-               ret = -EBUSY;
-               goto ebusy;
-       }
+       if (pcdev->icd)
+               return -EBUSY;
 
        dev_info(icd->dev.parent, "MX1 Camera driver attached to camera %d\n",
                 icd->devnum);
@@ -458,8 +455,7 @@ static int mx1_camera_add_device(struct soc_camera_device *icd)
 
        pcdev->icd = icd;
 
-ebusy:
-       return ret;
+       return 0;
 }
 
 static void mx1_camera_remove_device(struct soc_camera_device *icd)
index 4ada9be1d430d0f50f9f63e866b5ca82e835226a..4d07c58444024c48dd2bb7859d7149da324e6d71 100644 (file)
@@ -982,6 +982,14 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
        startindex = (vout->vid == OMAP_VIDEO1) ?
                video1_numbuffers : video2_numbuffers;
 
+       /* Check the size of the buffer */
+       if (*size > vout->buffer_size) {
+               v4l2_err(&vout->vid_dev->v4l2_dev,
+                               "buffer allocation mismatch [%u] [%u]\n",
+                               *size, vout->buffer_size);
+               return -ENOMEM;
+       }
+
        for (i = startindex; i < *count; i++) {
                vout->buffer_size = *size;
 
@@ -1228,6 +1236,14 @@ static int omap_vout_mmap(struct file *file, struct vm_area_struct *vma)
                                (vma->vm_pgoff << PAGE_SHIFT));
                return -EINVAL;
        }
+       /* Check the size of the buffer */
+       if (size > vout->buffer_size) {
+               v4l2_err(&vout->vid_dev->v4l2_dev,
+                               "insufficient memory [%lu] [%u]\n",
+                               size, vout->buffer_size);
+               return -ENOMEM;
+       }
+
        q->bufs[i]->baddr = vma->vm_start;
 
        vma->vm_flags |= VM_RESERVED;
@@ -2391,7 +2407,7 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
                /* Register the Video device with V4L2
                 */
                vfd = vout->vfd;
-               if (video_register_device(vfd, VFL_TYPE_GRABBER, k + 1) < 0) {
+               if (video_register_device(vfd, VFL_TYPE_GRABBER, -1) < 0) {
                        dev_err(&pdev->dev, ": Could not register "
                                        "Video for Linux device\n");
                        vfd->minor = -1;
index 2aa6a76c5e593742090843242dfdb578a38d2df2..8ae74817a1105251d07fff196d63c26b8c736de5 100644 (file)
@@ -193,7 +193,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
                return -EINVAL;
 
        if (cpu_is_omap24xx()) {
-               if (crop->height != win->w.height) {
+               if (try_crop.height != win->w.height) {
                        /* If we're resizing vertically, we can't support a
                         * crop width wider than 768 pixels.
                         */
@@ -202,7 +202,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
                }
        }
        /* vertical resizing */
-       vresize = (1024 * crop->height) / win->w.height;
+       vresize = (1024 * try_crop.height) / win->w.height;
        if (cpu_is_omap24xx() && (vresize > 2048))
                vresize = 2048;
        else if (cpu_is_omap34xx() && (vresize > 4096))
@@ -221,7 +221,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
                        try_crop.height = 2;
        }
        /* horizontal resizing */
-       hresize = (1024 * crop->width) / win->w.width;
+       hresize = (1024 * try_crop.width) / win->w.width;
        if (cpu_is_omap24xx() && (hresize > 2048))
                hresize = 2048;
        else if (cpu_is_omap34xx() && (hresize > 4096))
index c9fd04ee70a8ae766811059c6942657334ac7621..94b6ed89e195866e8eb1af83950faea772c7d0c6 100644 (file)
@@ -1748,7 +1748,7 @@ static int isp_register_entities(struct isp_device *isp)
                goto done;
 
        /* Register external entities */
-       for (subdevs = pdata->subdevs; subdevs->subdevs; ++subdevs) {
+       for (subdevs = pdata->subdevs; subdevs && subdevs->subdevs; ++subdevs) {
                struct v4l2_subdev *sensor;
                struct media_entity *input;
                unsigned int flags;
index 9d0dd08f57f8ccc4b360424e4a53ad45883c07a1..e98d38212791034cdac61d11a24dec00ca636297 100644 (file)
@@ -3046,6 +3046,8 @@ static void pvr2_subdev_update(struct pvr2_hdw *hdw)
        if (hdw->input_dirty || hdw->audiomode_dirty || hdw->force_dirty) {
                struct v4l2_tuner vt;
                memset(&vt, 0, sizeof(vt));
+               vt.type = (hdw->input_val == PVR2_CVAL_INPUT_RADIO) ?
+                       V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
                vt.audmode = hdw->audiomode_val;
                v4l2_device_call_all(&hdw->v4l2_dev, 0, tuner, s_tuner, &vt);
        }
@@ -5171,6 +5173,8 @@ void pvr2_hdw_status_poll(struct pvr2_hdw *hdw)
 {
        struct v4l2_tuner *vtp = &hdw->tuner_signal_info;
        memset(vtp, 0, sizeof(*vtp));
+       vtp->type = (hdw->input_val == PVR2_CVAL_INPUT_RADIO) ?
+               V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
        hdw->tuner_signal_stale = 0;
        /* Note: There apparently is no replacement for VIDIOC_CROPCAP
           using v4l2-subdev - therefore we can't support that AT ALL right
index 1593f8deb8105e18bc9fd1d5b09e5c8b8e9f68ea..760b4de13adf6fa852b358ae88a9751f7cd1b597 100644 (file)
@@ -1414,7 +1414,7 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
        {
                ARG_DEF(struct pwc_probe, probe)
 
-               strcpy(ARGR(probe).name, pdev->vdev->name);
+               strcpy(ARGR(probe).name, pdev->vdev.name);
                ARGR(probe).type = pdev->type;
                ARG_OUT(probe)
                break;
index 356cd42b593b3f36b72569d24217768c220e6c70..b0bde5a87c8a8b08575973f3158dde40ed546cc2 100644 (file)
@@ -40,7 +40,7 @@
    Oh yes, convention: to disctinguish between all the various pointers to
    device-structures, I use these names for the pointer variables:
    udev: struct usb_device *
-   vdev: struct video_device *
+   vdev: struct video_device (member of pwc_dev)
    pdev: struct pwc_devive *
 */
 
@@ -152,6 +152,7 @@ static ssize_t pwc_video_read(struct file *file, char __user *buf,
                          size_t count, loff_t *ppos);
 static unsigned int pwc_video_poll(struct file *file, poll_table *wait);
 static int  pwc_video_mmap(struct file *file, struct vm_area_struct *vma);
+static void pwc_video_release(struct video_device *vfd);
 
 static const struct v4l2_file_operations pwc_fops = {
        .owner =        THIS_MODULE,
@@ -164,41 +165,11 @@ static const struct v4l2_file_operations pwc_fops = {
 };
 static struct video_device pwc_template = {
        .name =         "Philips Webcam",       /* Filled in later */
-       .release =      video_device_release,
+       .release =      pwc_video_release,
        .fops =         &pwc_fops,
+       .ioctl_ops =    &pwc_ioctl_ops,
 };
 
-/***************************************************************************/
-
-/* Okay, this is some magic that I worked out and the reasoning behind it...
-
-   The biggest problem with any USB device is of course: "what to do
-   when the user unplugs the device while it is in use by an application?"
-   We have several options:
-   1) Curse them with the 7 plagues when they do (requires divine intervention)
-   2) Tell them not to (won't work: they'll do it anyway)
-   3) Oops the kernel (this will have a negative effect on a user's uptime)
-   4) Do something sensible.
-
-   Of course, we go for option 4.
-
-   It happens that this device will be linked to two times, once from
-   usb_device and once from the video_device in their respective 'private'
-   pointers. This is done when the device is probed() and all initialization
-   succeeded. The pwc_device struct links back to both structures.
-
-   When a device is unplugged while in use it will be removed from the
-   list of known USB devices; I also de-register it as a V4L device, but
-   unfortunately I can't free the memory since the struct is still in use
-   by the file descriptor. This free-ing is then deferend until the first
-   opportunity. Crude, but it works.
-
-   A small 'advantage' is that if a user unplugs the cam and plugs it back
-   in, it should get assigned the same video device minor, but unfortunately
-   it's non-trivial to re-link the cam back to the video device... (that
-   would surely be magic! :))
-*/
-
 /***************************************************************************/
 /* Private functions */
 
@@ -1016,16 +987,15 @@ static ssize_t show_snapshot_button_status(struct device *class_dev,
 static DEVICE_ATTR(button, S_IRUGO | S_IWUSR, show_snapshot_button_status,
                   NULL);
 
-static int pwc_create_sysfs_files(struct video_device *vdev)
+static int pwc_create_sysfs_files(struct pwc_device *pdev)
 {
-       struct pwc_device *pdev = video_get_drvdata(vdev);
        int rc;
 
-       rc = device_create_file(&vdev->dev, &dev_attr_button);
+       rc = device_create_file(&pdev->vdev.dev, &dev_attr_button);
        if (rc)
                goto err;
        if (pdev->features & FEATURE_MOTOR_PANTILT) {
-               rc = device_create_file(&vdev->dev, &dev_attr_pan_tilt);
+               rc = device_create_file(&pdev->vdev.dev, &dev_attr_pan_tilt);
                if (rc)
                        goto err_button;
        }
@@ -1033,19 +1003,17 @@ static int pwc_create_sysfs_files(struct video_device *vdev)
        return 0;
 
 err_button:
-       device_remove_file(&vdev->dev, &dev_attr_button);
+       device_remove_file(&pdev->vdev.dev, &dev_attr_button);
 err:
        PWC_ERROR("Could not create sysfs files.\n");
        return rc;
 }
 
-static void pwc_remove_sysfs_files(struct video_device *vdev)
+static void pwc_remove_sysfs_files(struct pwc_device *pdev)
 {
-       struct pwc_device *pdev = video_get_drvdata(vdev);
-
        if (pdev->features & FEATURE_MOTOR_PANTILT)
-               device_remove_file(&vdev->dev, &dev_attr_pan_tilt);
-       device_remove_file(&vdev->dev, &dev_attr_button);
+               device_remove_file(&pdev->vdev.dev, &dev_attr_pan_tilt);
+       device_remove_file(&pdev->vdev.dev, &dev_attr_button);
 }
 
 #ifdef CONFIG_USB_PWC_DEBUG
@@ -1106,7 +1074,7 @@ static int pwc_video_open(struct file *file)
                if (ret >= 0)
                {
                        PWC_DEBUG_OPEN("This %s camera is equipped with a %s (%d).\n",
-                                       pdev->vdev->name,
+                                       pdev->vdev.name,
                                        pwc_sensor_type_to_string(i), i);
                }
        }
@@ -1180,16 +1148,15 @@ static int pwc_video_open(struct file *file)
        return 0;
 }
 
-
-static void pwc_cleanup(struct pwc_device *pdev)
+static void pwc_video_release(struct video_device *vfd)
 {
-       pwc_remove_sysfs_files(pdev->vdev);
-       video_unregister_device(pdev->vdev);
+       struct pwc_device *pdev = container_of(vfd, struct pwc_device, vdev);
+       int hint;
 
-#ifdef CONFIG_USB_PWC_INPUT_EVDEV
-       if (pdev->button_dev)
-               input_unregister_device(pdev->button_dev);
-#endif
+       /* search device_hint[] table if we occupy a slot, by any chance */
+       for (hint = 0; hint < MAX_DEV_HINTS; hint++)
+               if (device_hint[hint].pdev == pdev)
+                       device_hint[hint].pdev = NULL;
 
        kfree(pdev);
 }
@@ -1199,7 +1166,7 @@ static int pwc_video_close(struct file *file)
 {
        struct video_device *vdev = file->private_data;
        struct pwc_device *pdev;
-       int i, hint;
+       int i;
 
        PWC_DEBUG_OPEN(">> video_close called(vdev = 0x%p).\n", vdev);
 
@@ -1234,12 +1201,6 @@ static int pwc_video_close(struct file *file)
                }
                pdev->vopen--;
                PWC_DEBUG_OPEN("<< video_close() vopen=%d\n", pdev->vopen);
-       } else {
-               pwc_cleanup(pdev);
-               /* search device_hint[] table if we occupy a slot, by any chance */
-               for (hint = 0; hint < MAX_DEV_HINTS; hint++)
-                       if (device_hint[hint].pdev == pdev)
-                               device_hint[hint].pdev = NULL;
        }
 
        return 0;
@@ -1715,19 +1676,12 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
        init_waitqueue_head(&pdev->frameq);
        pdev->vcompression = pwc_preferred_compression;
 
-       /* Allocate video_device structure */
-       pdev->vdev = video_device_alloc();
-       if (!pdev->vdev) {
-               PWC_ERROR("Err, cannot allocate video_device struture. Failing probe.");
-               rc = -ENOMEM;
-               goto err_free_mem;
-       }
-       memcpy(pdev->vdev, &pwc_template, sizeof(pwc_template));
-       pdev->vdev->parent = &intf->dev;
-       pdev->vdev->lock = &pdev->modlock;
-       pdev->vdev->ioctl_ops = &pwc_ioctl_ops;
-       strcpy(pdev->vdev->name, name);
-       video_set_drvdata(pdev->vdev, pdev);
+       /* Init video_device structure */
+       memcpy(&pdev->vdev, &pwc_template, sizeof(pwc_template));
+       pdev->vdev.parent = &intf->dev;
+       pdev->vdev.lock = &pdev->modlock;
+       strcpy(pdev->vdev.name, name);
+       video_set_drvdata(&pdev->vdev, pdev);
 
        pdev->release = le16_to_cpu(udev->descriptor.bcdDevice);
        PWC_DEBUG_PROBE("Release: %04x\n", pdev->release);
@@ -1746,8 +1700,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
                }
        }
 
-       pdev->vdev->release = video_device_release;
-
        /* occupy slot */
        if (hint < MAX_DEV_HINTS)
                device_hint[hint].pdev = pdev;
@@ -1759,16 +1711,16 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
        pwc_set_leds(pdev, 0, 0);
        pwc_camera_power(pdev, 0);
 
-       rc = video_register_device(pdev->vdev, VFL_TYPE_GRABBER, video_nr);
+       rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, video_nr);
        if (rc < 0) {
                PWC_ERROR("Failed to register as video device (%d).\n", rc);
-               goto err_video_release;
+               goto err_free_mem;
        }
-       rc = pwc_create_sysfs_files(pdev->vdev);
+       rc = pwc_create_sysfs_files(pdev);
        if (rc)
                goto err_video_unreg;
 
-       PWC_INFO("Registered as %s.\n", video_device_node_name(pdev->vdev));
+       PWC_INFO("Registered as %s.\n", video_device_node_name(&pdev->vdev));
 
 #ifdef CONFIG_USB_PWC_INPUT_EVDEV
        /* register webcam snapshot button input device */
@@ -1776,7 +1728,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
        if (!pdev->button_dev) {
                PWC_ERROR("Err, insufficient memory for webcam snapshot button device.");
                rc = -ENOMEM;
-               pwc_remove_sysfs_files(pdev->vdev);
+               pwc_remove_sysfs_files(pdev);
                goto err_video_unreg;
        }
 
@@ -1794,7 +1746,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
        if (rc) {
                input_free_device(pdev->button_dev);
                pdev->button_dev = NULL;
-               pwc_remove_sysfs_files(pdev->vdev);
+               pwc_remove_sysfs_files(pdev);
                goto err_video_unreg;
        }
 #endif
@@ -1804,10 +1756,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
 err_video_unreg:
        if (hint < MAX_DEV_HINTS)
                device_hint[hint].pdev = NULL;
-       video_unregister_device(pdev->vdev);
-       pdev->vdev = NULL;      /* So we don't try to release it below */
-err_video_release:
-       video_device_release(pdev->vdev);
+       video_unregister_device(&pdev->vdev);
 err_free_mem:
        kfree(pdev);
        return rc;
@@ -1816,10 +1765,8 @@ err_free_mem:
 /* The user yanked out the cable... */
 static void usb_pwc_disconnect(struct usb_interface *intf)
 {
-       struct pwc_device *pdev;
-       int hint;
+       struct pwc_device *pdev  = usb_get_intfdata(intf);
 
-       pdev = usb_get_intfdata (intf);
        mutex_lock(&pdev->modlock);
        usb_set_intfdata (intf, NULL);
        if (pdev == NULL) {
@@ -1836,30 +1783,25 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
        }
 
        /* We got unplugged; this is signalled by an EPIPE error code */
-       if (pdev->vopen) {
-               PWC_INFO("Disconnected while webcam is in use!\n");
-               pdev->error_status = EPIPE;
-       }
+       pdev->error_status = EPIPE;
+       pdev->unplugged = 1;
 
        /* Alert waiting processes */
        wake_up_interruptible(&pdev->frameq);
-       /* Wait until device is closed */
-       if (pdev->vopen) {
-               pdev->unplugged = 1;
-               pwc_iso_stop(pdev);
-       } else {
-               /* Device is closed, so we can safely unregister it */
-               PWC_DEBUG_PROBE("Unregistering video device in disconnect().\n");
 
-disconnect_out:
-               /* search device_hint[] table if we occupy a slot, by any chance */
-               for (hint = 0; hint < MAX_DEV_HINTS; hint++)
-                       if (device_hint[hint].pdev == pdev)
-                               device_hint[hint].pdev = NULL;
-       }
+       /* No need to keep the urbs around after disconnection */
+       pwc_isoc_cleanup(pdev);
 
+disconnect_out:
        mutex_unlock(&pdev->modlock);
-       pwc_cleanup(pdev);
+
+       pwc_remove_sysfs_files(pdev);
+       video_unregister_device(&pdev->vdev);
+
+#ifdef CONFIG_USB_PWC_INPUT_EVDEV
+       if (pdev->button_dev)
+               input_unregister_device(pdev->button_dev);
+#endif
 }
 
 
index e947766337d63d9233ed333539705e677f81a742..083f8b15df7337323666f01af18dc9ab5aa183bc 100644 (file)
@@ -162,9 +162,9 @@ struct pwc_imgbuf
 
 struct pwc_device
 {
-   struct video_device *vdev;
+       struct video_device vdev;
 
-   /* Pointer to our usb_device */
+   /* Pointer to our usb_device, may be NULL after unplug */
    struct usb_device *udev;
 
    int type;                    /* type of cam (645, 646, 675, 680, 690, 720, 730, 740, 750) */
index d142b40ea64efd1135dbfc242b5fd7d4c7092697..81b4a826ee5e3338d25910d52e498c0cd0779a1f 100644 (file)
@@ -1,7 +1,7 @@
 /*
- * Samsung S5P SoC series camera interface (camera capture) driver
+ * Samsung S5P/EXYNOS4 SoC series camera interface (camera capture) driver
  *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd
+ * Copyright (C) 2010 - 2011 Samsung Electronics Co., Ltd.
  * Author: Sylwester Nawrocki, <s.nawrocki@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
@@ -262,12 +262,7 @@ static unsigned int get_plane_size(struct fimc_frame *fr, unsigned int plane)
 {
        if (!fr || plane >= fr->fmt->memplanes)
                return 0;
-
-       dbg("%s: w: %d. h: %d. depth[%d]: %d",
-           __func__, fr->width, fr->height, plane, fr->fmt->depth[plane]);
-
        return fr->f_width * fr->f_height * fr->fmt->depth[plane] / 8;
-
 }
 
 static int queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
@@ -283,24 +278,14 @@ static int queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
 
        *num_planes = fmt->memplanes;
 
-       dbg("%s, buffer count=%d, plane count=%d",
-           __func__, *num_buffers, *num_planes);
-
        for (i = 0; i < fmt->memplanes; i++) {
                sizes[i] = get_plane_size(&ctx->d_frame, i);
-               dbg("plane: %u, plane_size: %lu", i, sizes[i]);
                allocators[i] = ctx->fimc_dev->alloc_ctx;
        }
 
        return 0;
 }
 
-static int buffer_init(struct vb2_buffer *vb)
-{
-       /* TODO: */
-       return 0;
-}
-
 static int buffer_prepare(struct vb2_buffer *vb)
 {
        struct vb2_queue *vq = vb->vb2_queue;
@@ -380,7 +365,6 @@ static struct vb2_ops fimc_capture_qops = {
        .queue_setup            = queue_setup,
        .buf_prepare            = buffer_prepare,
        .buf_queue              = buffer_queue,
-       .buf_init               = buffer_init,
        .wait_prepare           = fimc_unlock,
        .wait_finish            = fimc_lock,
        .start_streaming        = start_streaming,
@@ -903,6 +887,7 @@ err_vd_reg:
 err_v4l2_reg:
        v4l2_device_unregister(v4l2_dev);
 err_info:
+       kfree(ctx);
        dev_err(&fimc->pdev->dev, "failed to install\n");
        return ret;
 }
index dc91a8511af66ca1bb0f898a68dba0ac4c873257..bdf19ada91725eb45a03d6b710691dd9778f6a97 100644 (file)
@@ -1,9 +1,8 @@
 /*
- * S5P camera interface (video postprocessor) driver
+ * Samsung S5P/EXYNOS4 SoC series camera interface (video postprocessor) driver
  *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd
- *
- * Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ * Copyright (C) 2010-2011 Samsung Electronics Co., Ltd.
+ * Contact: Sylwester Nawrocki, <s.nawrocki@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published
@@ -42,7 +41,6 @@ static struct fimc_fmt fimc_formats[] = {
                .color          = S5P_FIMC_RGB565,
                .memplanes      = 1,
                .colplanes      = 1,
-               .mbus_code      = V4L2_MBUS_FMT_RGB565_2X8_BE,
                .flags          = FMT_FLAGS_M2M,
        }, {
                .name           = "BGR666",
@@ -232,11 +230,7 @@ static int fimc_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
                        return 0;
                }
        }
-
        *shift = 0, *ratio = 1;
-
-       dbg("s: %d, t: %d, shift: %d, ratio: %d",
-           src, tar, *shift, *ratio);
        return 0;
 }
 
@@ -268,10 +262,8 @@ int fimc_set_scaler_info(struct fimc_ctx *ctx)
                err("invalid source size: %d x %d", sx, sy);
                return -EINVAL;
        }
-
        sc->real_width = sx;
        sc->real_height = sy;
-       dbg("sx= %d, sy= %d, tx= %d, ty= %d", sx, sy, tx, ty);
 
        ret = fimc_get_scaler_factor(sx, tx, &sc->pre_hratio, &sc->hfactor);
        if (ret)
@@ -711,22 +703,18 @@ static int fimc_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
        f = ctx_get_frame(ctx, vq->type);
        if (IS_ERR(f))
                return PTR_ERR(f);
-
        /*
         * Return number of non-contigous planes (plane buffers)
         * depending on the configured color format.
         */
-       if (f->fmt)
-               *num_planes = f->fmt->memplanes;
+       if (!f->fmt)
+               return -EINVAL;
 
+       *num_planes = f->fmt->memplanes;
        for (i = 0; i < f->fmt->memplanes; i++) {
-               sizes[i] = (f->width * f->height * f->fmt->depth[i]) >> 3;
+               sizes[i] = (f->f_width * f->f_height * f->fmt->depth[i]) / 8;
                allocators[i] = ctx->fimc_dev->alloc_ctx;
        }
-
-       if (*num_buffers == 0)
-               *num_buffers = 1;
-
        return 0;
 }
 
@@ -852,7 +840,7 @@ struct fimc_fmt *find_format(struct v4l2_format *f, unsigned int mask)
 
        for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) {
                fmt = &fimc_formats[i];
-               if (fmt->fourcc == f->fmt.pix.pixelformat &&
+               if (fmt->fourcc == f->fmt.pix_mp.pixelformat &&
                   (fmt->flags & mask))
                        break;
        }
index 3beb1e5320ce71ef674b5f810d8014ec454c8d68..1f70772daaf0056fad709c7f26845595191f6ab0 100644 (file)
@@ -1,7 +1,5 @@
 /*
- * Copyright (c) 2010 Samsung Electronics
- *
- * Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ * Copyright (C) 2010 - 2011 Samsung Electronics Co., Ltd.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -135,9 +133,10 @@ enum fimc_color_fmt {
  * @name: format description
  * @fourcc: the fourcc code for this format, 0 if not applicable
  * @color: the corresponding fimc_color_fmt
- * @depth: per plane driver's private 'number of bits per pixel'
  * @memplanes: number of physically non-contiguous data planes
  * @colplanes: number of physically contiguous data planes
+ * @depth: per plane driver's private 'number of bits per pixel'
+ * @flags: flags indicating which operation mode format applies to
  */
 struct fimc_fmt {
        enum v4l2_mbus_pixelcode mbus_code;
@@ -171,7 +170,7 @@ struct fimc_dma_offset {
 };
 
 /**
- * struct fimc_effect - the configuration data for the "Arbitrary" image effect
+ * struct fimc_effect - color effect information
  * @type:      effect type
  * @pat_cb:    cr value when type is "arbitrary"
  * @pat_cr:    cr value when type is "arbitrary"
@@ -184,7 +183,6 @@ struct fimc_effect {
 
 /**
  * struct fimc_scaler - the configuration data for FIMC inetrnal scaler
- *
  * @scaleup_h:         flag indicating scaling up horizontally
  * @scaleup_v:         flag indicating scaling up vertically
  * @copy_mode:         flag indicating transparent DMA transfer (no scaling
@@ -220,7 +218,6 @@ struct fimc_scaler {
 
 /**
  * struct fimc_addr - the FIMC physical address set for DMA
- *
  * @y:  luminance plane physical address
  * @cb:         Cb plane physical address
  * @cr:         Cr plane physical address
@@ -234,6 +231,7 @@ struct fimc_addr {
 /**
  * struct fimc_vid_buffer - the driver's video buffer
  * @vb:    v4l videobuf buffer
+ * @list:  linked list structure for buffer queue
  * @paddr: precalculated physical address set
  * @index: buffer index for the output DMA engine
  */
@@ -254,11 +252,10 @@ struct fimc_vid_buffer {
  * @offs_v:    image vertical pixel offset
  * @width:     image pixel width
  * @height:    image pixel weight
- * @paddr:     image frame buffer physical addresses
- * @buf_cnt:   number of buffers depending on a color format
  * @payload:   image size in bytes (w x h x bpp)
- * @color:     color format
+ * @paddr:     image frame buffer physical addresses
  * @dma_offset:        DMA offset in bytes
+ * @fmt:       fimc color format pointer
  */
 struct fimc_frame {
        u32     f_width;
@@ -390,21 +387,22 @@ struct fimc_ctx;
 
 /**
  * struct fimc_dev - abstraction for FIMC entity
- *
  * @slock:     the spinlock protecting this data structure
  * @lock:      the mutex protecting this data structure
  * @pdev:      pointer to the FIMC platform device
  * @pdata:     pointer to the device platform data
+ * @variant:   the IP variant information
  * @id:                FIMC device index (0..FIMC_MAX_DEVS)
  * @num_clocks: the number of clocks managed by this device instance
- * @clock[]:   the clocks required for FIMC operation
+ * @clock:     clocks required for FIMC operation
  * @regs:      the mapped hardware registers
  * @regs_res:  the resource claimed for IO registers
- * @irq:       interrupt number of the FIMC subdevice
- * @irq_queue:
+ * @irq:       FIMC interrupt number
+ * @irq_queue: interrupt handler waitqueue
  * @m2m:       memory-to-memory V4L2 device information
  * @vid_cap:   camera capture device information
  * @state:     flags used to synchronize m2m and capture mode operation
+ * @alloc_ctx: videobuf2 memory allocator context
  */
 struct fimc_dev {
        spinlock_t                      slock;
@@ -427,8 +425,7 @@ struct fimc_dev {
 
 /**
  * fimc_ctx - the device context data
- *
- * @lock:              mutex protecting this data structure
+ * @slock:             spinlock protecting this data structure
  * @s_frame:           source frame properties
  * @d_frame:           destination frame properties
  * @out_order_1p:      output 1-plane YCBCR order
index ff6c0e97563e90339458e1eb1c499ceed6009475..d4ee24bf692820dd0f58004d480d010d1be18c29 100644 (file)
@@ -963,7 +963,7 @@ static int saa7134_raw_decode_irq(struct saa7134_dev *dev)
         * to work with other protocols.
         */
        if (!ir->active) {
-               timeout = jiffies + jiffies_to_msecs(15);
+               timeout = jiffies + msecs_to_jiffies(15);
                mod_timer(&ir->timer, timeout);
                ir->active = true;
        }
index 9363ed91a4cbda40a91ce2e1bf3958e9da097283..a03945ab9f08f14218501d28a5eee4ba4cd629f8 100644 (file)
@@ -714,29 +714,34 @@ static int tuner_remove(struct i2c_client *client)
  * returns 0.
  * This function is needed for boards that have a separate tuner for
  * radio (like devices with tea5767).
+ * NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to
+ *       select a TV frequency. So, t_mode = T_ANALOG_TV could actually
+ *      be used to represent a Digital TV too.
  */
 static inline int check_mode(struct tuner *t, enum v4l2_tuner_type mode)
 {
-       if ((1 << mode & t->mode_mask) == 0)
+       int t_mode;
+       if (mode == V4L2_TUNER_RADIO)
+               t_mode = T_RADIO;
+       else
+               t_mode = T_ANALOG_TV;
+
+       if ((t_mode & t->mode_mask) == 0)
                return -EINVAL;
 
        return 0;
 }
 
 /**
- * set_mode_freq - Switch tuner to other mode.
- * @client:    struct i2c_client pointer
+ * set_mode - Switch tuner to other mode.
  * @t:         a pointer to the module's internal struct_tuner
  * @mode:      enum v4l2_type (radio or TV)
- * @freq:      frequency to set (0 means to use the previous one)
  *
  * If tuner doesn't support the needed mode (radio or TV), prints a
  * debug message and returns -EINVAL, changing its state to standby.
- * Otherwise, changes the state and sets frequency to the last value, if
- * the tuner can sleep or if it supports both Radio and TV.
+ * Otherwise, changes the mode and returns 0.
  */
-static int set_mode_freq(struct i2c_client *client, struct tuner *t,
-                        enum v4l2_tuner_type mode, unsigned int freq)
+static int set_mode(struct tuner *t, enum v4l2_tuner_type mode)
 {
        struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
 
@@ -752,17 +757,27 @@ static int set_mode_freq(struct i2c_client *client, struct tuner *t,
                t->mode = mode;
                tuner_dbg("Changing to mode %d\n", mode);
        }
+       return 0;
+}
+
+/**
+ * set_freq - Set the tuner to the desired frequency.
+ * @t:         a pointer to the module's internal struct_tuner
+ * @freq:      frequency to set (0 means to use the current frequency)
+ */
+static void set_freq(struct tuner *t, unsigned int freq)
+{
+       struct i2c_client *client = v4l2_get_subdevdata(&t->sd);
+
        if (t->mode == V4L2_TUNER_RADIO) {
-               if (freq)
-                       t->radio_freq = freq;
-               set_radio_freq(client, t->radio_freq);
+               if (!freq)
+                       freq = t->radio_freq;
+               set_radio_freq(client, freq);
        } else {
-               if (freq)
-                       t->tv_freq = freq;
-               set_tv_freq(client, t->tv_freq);
+               if (!freq)
+                       freq = t->tv_freq;
+               set_tv_freq(client, freq);
        }
-
-       return 0;
 }
 
 /*
@@ -817,7 +832,8 @@ static void set_tv_freq(struct i2c_client *c, unsigned int freq)
 /**
  * tuner_fixup_std - force a given video standard variant
  *
- * @t: tuner internal struct
+ * @t: tuner internal struct
+ * @std:       TV standard
  *
  * A few devices or drivers have problem to detect some standard variations.
  * On other operational systems, the drivers generally have a per-country
@@ -827,57 +843,39 @@ static void set_tv_freq(struct i2c_client *c, unsigned int freq)
  * to distinguish all video standard variations, a modprobe parameter can
  * be used to force a video standard match.
  */
-static int tuner_fixup_std(struct tuner *t)
+static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
 {
-       if ((t->std & V4L2_STD_PAL) == V4L2_STD_PAL) {
+       if (pal[0] != '-' && (std & V4L2_STD_PAL) == V4L2_STD_PAL) {
                switch (pal[0]) {
                case '6':
-                       tuner_dbg("insmod fixup: PAL => PAL-60\n");
-                       t->std = V4L2_STD_PAL_60;
-                       break;
+                       return V4L2_STD_PAL_60;
                case 'b':
                case 'B':
                case 'g':
                case 'G':
-                       tuner_dbg("insmod fixup: PAL => PAL-BG\n");
-                       t->std = V4L2_STD_PAL_BG;
-                       break;
+                       return V4L2_STD_PAL_BG;
                case 'i':
                case 'I':
-                       tuner_dbg("insmod fixup: PAL => PAL-I\n");
-                       t->std = V4L2_STD_PAL_I;
-                       break;
+                       return V4L2_STD_PAL_I;
                case 'd':
                case 'D':
                case 'k':
                case 'K':
-                       tuner_dbg("insmod fixup: PAL => PAL-DK\n");
-                       t->std = V4L2_STD_PAL_DK;
-                       break;
+                       return V4L2_STD_PAL_DK;
                case 'M':
                case 'm':
-                       tuner_dbg("insmod fixup: PAL => PAL-M\n");
-                       t->std = V4L2_STD_PAL_M;
-                       break;
+                       return V4L2_STD_PAL_M;
                case 'N':
                case 'n':
-                       if (pal[1] == 'c' || pal[1] == 'C') {
-                               tuner_dbg("insmod fixup: PAL => PAL-Nc\n");
-                               t->std = V4L2_STD_PAL_Nc;
-                       } else {
-                               tuner_dbg("insmod fixup: PAL => PAL-N\n");
-                               t->std = V4L2_STD_PAL_N;
-                       }
-                       break;
-               case '-':
-                       /* default parameter, do nothing */
-                       break;
+                       if (pal[1] == 'c' || pal[1] == 'C')
+                               return V4L2_STD_PAL_Nc;
+                       return V4L2_STD_PAL_N;
                default:
                        tuner_warn("pal= argument not recognised\n");
                        break;
                }
        }
-       if ((t->std & V4L2_STD_SECAM) == V4L2_STD_SECAM) {
+       if (secam[0] != '-' && (std & V4L2_STD_SECAM) == V4L2_STD_SECAM) {
                switch (secam[0]) {
                case 'b':
                case 'B':
@@ -885,63 +883,42 @@ static int tuner_fixup_std(struct tuner *t)
                case 'G':
                case 'h':
                case 'H':
-                       tuner_dbg("insmod fixup: SECAM => SECAM-BGH\n");
-                       t->std = V4L2_STD_SECAM_B |
-                                V4L2_STD_SECAM_G |
-                                V4L2_STD_SECAM_H;
-                       break;
+                       return V4L2_STD_SECAM_B |
+                              V4L2_STD_SECAM_G |
+                              V4L2_STD_SECAM_H;
                case 'd':
                case 'D':
                case 'k':
                case 'K':
-                       tuner_dbg("insmod fixup: SECAM => SECAM-DK\n");
-                       t->std = V4L2_STD_SECAM_DK;
-                       break;
+                       return V4L2_STD_SECAM_DK;
                case 'l':
                case 'L':
-                       if ((secam[1] == 'C') || (secam[1] == 'c')) {
-                               tuner_dbg("insmod fixup: SECAM => SECAM-L'\n");
-                               t->std = V4L2_STD_SECAM_LC;
-                       } else {
-                               tuner_dbg("insmod fixup: SECAM => SECAM-L\n");
-                               t->std = V4L2_STD_SECAM_L;
-                       }
-                       break;
-               case '-':
-                       /* default parameter, do nothing */
-                       break;
+                       if ((secam[1] == 'C') || (secam[1] == 'c'))
+                               return V4L2_STD_SECAM_LC;
+                       return V4L2_STD_SECAM_L;
                default:
                        tuner_warn("secam= argument not recognised\n");
                        break;
                }
        }
 
-       if ((t->std & V4L2_STD_NTSC) == V4L2_STD_NTSC) {
+       if (ntsc[0] != '-' && (std & V4L2_STD_NTSC) == V4L2_STD_NTSC) {
                switch (ntsc[0]) {
                case 'm':
                case 'M':
-                       tuner_dbg("insmod fixup: NTSC => NTSC-M\n");
-                       t->std = V4L2_STD_NTSC_M;
-                       break;
+                       return V4L2_STD_NTSC_M;
                case 'j':
                case 'J':
-                       tuner_dbg("insmod fixup: NTSC => NTSC_M_JP\n");
-                       t->std = V4L2_STD_NTSC_M_JP;
-                       break;
+                       return V4L2_STD_NTSC_M_JP;
                case 'k':
                case 'K':
-                       tuner_dbg("insmod fixup: NTSC => NTSC_M_KR\n");
-                       t->std = V4L2_STD_NTSC_M_KR;
-                       break;
-               case '-':
-                       /* default parameter, do nothing */
-                       break;
+                       return V4L2_STD_NTSC_M_KR;
                default:
                        tuner_info("ntsc= argument not recognised\n");
                        break;
                }
        }
-       return 0;
+       return std;
 }
 
 /*
@@ -1016,7 +993,7 @@ static void tuner_status(struct dvb_frontend *fe)
        case V4L2_TUNER_RADIO:
                p = "radio";
                break;
-       case V4L2_TUNER_DIGITAL_TV:
+       case V4L2_TUNER_DIGITAL_TV: /* Used by mt20xx */
                p = "digital TV";
                break;
        case V4L2_TUNER_ANALOG_TV:
@@ -1058,10 +1035,9 @@ static void tuner_status(struct dvb_frontend *fe)
 static int tuner_s_radio(struct v4l2_subdev *sd)
 {
        struct tuner *t = to_tuner(sd);
-       struct i2c_client *client = v4l2_get_subdevdata(sd);
 
-       if (set_mode_freq(client, t, V4L2_TUNER_RADIO, 0) == -EINVAL)
-               return 0;
+       if (set_mode(t, V4L2_TUNER_RADIO) == 0)
+               set_freq(t, 0);
        return 0;
 }
 
@@ -1072,16 +1048,20 @@ static int tuner_s_radio(struct v4l2_subdev *sd)
 /**
  * tuner_s_power - controls the power state of the tuner
  * @sd: pointer to struct v4l2_subdev
- * @on: a zero value puts the tuner to sleep
+ * @on: a zero value puts the tuner to sleep, non-zero wakes it up
  */
 static int tuner_s_power(struct v4l2_subdev *sd, int on)
 {
        struct tuner *t = to_tuner(sd);
        struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
 
-       /* FIXME: Why this function don't wake the tuner if on != 0 ? */
-       if (on)
+       if (on) {
+               if (t->standby && set_mode(t, t->mode) == 0) {
+                       tuner_dbg("Waking up tuner\n");
+                       set_freq(t, 0);
+               }
                return 0;
+       }
 
        tuner_dbg("Putting tuner to sleep\n");
        t->standby = true;
@@ -1093,28 +1073,36 @@ static int tuner_s_power(struct v4l2_subdev *sd, int on)
 static int tuner_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
 {
        struct tuner *t = to_tuner(sd);
-       struct i2c_client *client = v4l2_get_subdevdata(sd);
 
-       if (set_mode_freq(client, t, V4L2_TUNER_ANALOG_TV, 0) == -EINVAL)
+       if (set_mode(t, V4L2_TUNER_ANALOG_TV))
                return 0;
 
-       t->std = std;
-       tuner_fixup_std(t);
-
+       t->std = tuner_fixup_std(t, std);
+       if (t->std != std)
+               tuner_dbg("Fixup standard %llx to %llx\n", std, t->std);
+       set_freq(t, 0);
        return 0;
 }
 
 static int tuner_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
 {
        struct tuner *t = to_tuner(sd);
-       struct i2c_client *client = v4l2_get_subdevdata(sd);
-
-       if (set_mode_freq(client, t, f->type, f->frequency) == -EINVAL)
-               return 0;
 
+       if (set_mode(t, f->type) == 0)
+               set_freq(t, f->frequency);
        return 0;
 }
 
+/**
+ * tuner_g_frequency - Get the tuned frequency for the tuner
+ * @sd: pointer to struct v4l2_subdev
+ * @f: pointer to struct v4l2_frequency
+ *
+ * At return, the structure f will be filled with tuner frequency
+ * if the tuner matches the f->type.
+ * Note: f->type should be initialized before calling it.
+ * This is done by either video_ioctl2 or by the bridge driver.
+ */
 static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
 {
        struct tuner *t = to_tuner(sd);
@@ -1122,8 +1110,7 @@ static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
 
        if (check_mode(t, f->type) == -EINVAL)
                return 0;
-       f->type = t->mode;
-       if (fe_tuner_ops->get_frequency && !t->standby) {
+       if (f->type == t->mode && fe_tuner_ops->get_frequency && !t->standby) {
                u32 abs_freq;
 
                fe_tuner_ops->get_frequency(&t->fe, &abs_freq);
@@ -1131,12 +1118,22 @@ static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
                        DIV_ROUND_CLOSEST(abs_freq * 2, 125) :
                        DIV_ROUND_CLOSEST(abs_freq, 62500);
        } else {
-               f->frequency = (V4L2_TUNER_RADIO == t->mode) ?
+               f->frequency = (V4L2_TUNER_RADIO == f->type) ?
                        t->radio_freq : t->tv_freq;
        }
        return 0;
 }
 
+/**
+ * tuner_g_tuner - Fill in tuner information
+ * @sd: pointer to struct v4l2_subdev
+ * @vt: pointer to struct v4l2_tuner
+ *
+ * At return, the structure vt will be filled with tuner information
+ * if the tuner matches vt->type.
+ * Note: vt->type should be initialized before calling it.
+ * This is done by either video_ioctl2 or by the bridge driver.
+ */
 static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
 {
        struct tuner *t = to_tuner(sd);
@@ -1145,48 +1142,57 @@ static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
 
        if (check_mode(t, vt->type) == -EINVAL)
                return 0;
-       vt->type = t->mode;
-       if (analog_ops->get_afc)
+       if (vt->type == t->mode && analog_ops->get_afc)
                vt->afc = analog_ops->get_afc(&t->fe);
-       if (t->mode == V4L2_TUNER_ANALOG_TV)
-               vt->capability |= V4L2_TUNER_CAP_NORM;
        if (t->mode != V4L2_TUNER_RADIO) {
+               vt->capability |= V4L2_TUNER_CAP_NORM;
                vt->rangelow = tv_range[0] * 16;
                vt->rangehigh = tv_range[1] * 16;
                return 0;
        }
 
        /* radio mode */
-       vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
-       if (fe_tuner_ops->get_status) {
-               u32 tuner_status;
-
-               fe_tuner_ops->get_status(&t->fe, &tuner_status);
-               vt->rxsubchans =
-                       (tuner_status & TUNER_STATUS_STEREO) ?
-                       V4L2_TUNER_SUB_STEREO :
-                       V4L2_TUNER_SUB_MONO;
+       if (vt->type == t->mode) {
+               vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
+               if (fe_tuner_ops->get_status) {
+                       u32 tuner_status;
+
+                       fe_tuner_ops->get_status(&t->fe, &tuner_status);
+                       vt->rxsubchans =
+                               (tuner_status & TUNER_STATUS_STEREO) ?
+                               V4L2_TUNER_SUB_STEREO :
+                               V4L2_TUNER_SUB_MONO;
+               }
+               if (analog_ops->has_signal)
+                       vt->signal = analog_ops->has_signal(&t->fe);
+               vt->audmode = t->audmode;
        }
-       if (analog_ops->has_signal)
-               vt->signal = analog_ops->has_signal(&t->fe);
        vt->capability |= V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
-       vt->audmode = t->audmode;
        vt->rangelow = radio_range[0] * 16000;
        vt->rangehigh = radio_range[1] * 16000;
 
        return 0;
 }
 
+/**
+ * tuner_s_tuner - Set the tuner's audio mode
+ * @sd: pointer to struct v4l2_subdev
+ * @vt: pointer to struct v4l2_tuner
+ *
+ * Sets the audio mode if the tuner matches vt->type.
+ * Note: vt->type should be initialized before calling it.
+ * This is done by either video_ioctl2 or by the bridge driver.
+ */
 static int tuner_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
 {
        struct tuner *t = to_tuner(sd);
-       struct i2c_client *client = v4l2_get_subdevdata(sd);
 
-       if (set_mode_freq(client, t, vt->type, 0) == -EINVAL)
+       if (set_mode(t, vt->type))
                return 0;
 
        if (t->mode == V4L2_TUNER_RADIO)
                t->audmode = vt->audmode;
+       set_freq(t, 0);
 
        return 0;
 }
@@ -1221,7 +1227,8 @@ static int tuner_resume(struct i2c_client *c)
        tuner_dbg("resume\n");
 
        if (!t->standby)
-               set_mode_freq(c, t, t->type, 0);
+               if (set_mode(t, t->mode) == 0)
+                       set_freq(t, 0);
 
        return 0;
 }
index c3ab0c813be249d4957fa93e83cfac0df567b703..48fea373c25ae99df89224d945065c25a302311f 100644 (file)
@@ -27,14 +27,20 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
        struct uvc_entity *entity)
 {
        const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE;
-       struct uvc_entity *remote;
+       struct media_entity *sink;
        unsigned int i;
-       u8 remote_pad;
-       int ret = 0;
+       int ret;
+
+       sink = (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING)
+            ? (entity->vdev ? &entity->vdev->entity : NULL)
+            : &entity->subdev.entity;
+       if (sink == NULL)
+               return 0;
 
        for (i = 0; i < entity->num_pads; ++i) {
                struct media_entity *source;
-               struct media_entity *sink;
+               struct uvc_entity *remote;
+               u8 remote_pad;
 
                if (!(entity->pads[i].flags & MEDIA_PAD_FL_SINK))
                        continue;
@@ -43,10 +49,11 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
                if (remote == NULL)
                        return -EINVAL;
 
-               source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
-                      ? &remote->vdev->entity : &remote->subdev.entity;
-               sink = (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING)
-                    ? &entity->vdev->entity : &entity->subdev.entity;
+               source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING)
+                      ? (remote->vdev ? &remote->vdev->entity : NULL)
+                      : &remote->subdev.entity;
+               if (source == NULL)
+                       continue;
 
                remote_pad = remote->num_pads - 1;
                ret = media_entity_create_link(source, remote_pad,
@@ -55,11 +62,10 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
                        return ret;
        }
 
-       if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING)
-               ret = v4l2_device_register_subdev(&chain->dev->vdev,
-                                                 &entity->subdev);
+       if (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING)
+               return 0;
 
-       return ret;
+       return v4l2_device_register_subdev(&chain->dev->vdev, &entity->subdev);
 }
 
 static struct v4l2_subdev_ops uvc_subdev_ops = {
@@ -84,9 +90,11 @@ static int uvc_mc_init_entity(struct uvc_entity *entity)
 
                ret = media_entity_init(&entity->subdev.entity,
                                        entity->num_pads, entity->pads, 0);
-       } else
+       } else if (entity->vdev != NULL) {
                ret = media_entity_init(&entity->vdev->entity,
                                        entity->num_pads, entity->pads, 0);
+       } else
+               ret = 0;
 
        return ret;
 }
index 109a06384a8f73d945c9f5d5014b88b10442c8c2..f90ce9fce53927566b5569a9beeb387d034cb946 100644 (file)
@@ -104,6 +104,8 @@ static int __uvc_free_buffers(struct uvc_video_queue *queue)
        }
 
        if (queue->count) {
+               uvc_queue_cancel(queue, 0);
+               INIT_LIST_HEAD(&queue->mainqueue);
                vfree(queue->mem);
                queue->count = 0;
        }
index fc766b9f24c529e3b1083dbe0b7e37f365550669..49994793cc777ab36c38a8dc43685b5886aaaa4d 100644 (file)
@@ -1255,8 +1255,10 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
 
        /* Commit the streaming parameters. */
        ret = uvc_commit_video(stream, &stream->ctrl);
-       if (ret < 0)
+       if (ret < 0) {
+               uvc_queue_enable(&stream->queue, 0);
                return ret;
+       }
 
        return uvc_init_video(stream, GFP_KERNEL);
 }
index 19d5ae2937802a41e8784bf691400162c57c9152..06f14008b346443a50a55cc58c2a2009e1664ecd 100644 (file)
@@ -167,6 +167,12 @@ static void v4l2_device_release(struct device *cd)
 
        mutex_unlock(&videodev_lock);
 
+#if defined(CONFIG_MEDIA_CONTROLLER)
+       if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
+           vdev->vfl_type != VFL_TYPE_SUBDEV)
+               media_device_unregister_entity(&vdev->entity);
+#endif
+
        /* Release video_device and perform other
           cleanups as needed. */
        vdev->release(vdev);
@@ -389,9 +395,6 @@ static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
 static int v4l2_open(struct inode *inode, struct file *filp)
 {
        struct video_device *vdev;
-#if defined(CONFIG_MEDIA_CONTROLLER)
-       struct media_entity *entity = NULL;
-#endif
        int ret = 0;
 
        /* Check if the video device is available */
@@ -405,17 +408,6 @@ static int v4l2_open(struct inode *inode, struct file *filp)
        /* and increase the device refcount */
        video_get(vdev);
        mutex_unlock(&videodev_lock);
-#if defined(CONFIG_MEDIA_CONTROLLER)
-       if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
-           vdev->vfl_type != VFL_TYPE_SUBDEV) {
-               entity = media_entity_get(&vdev->entity);
-               if (!entity) {
-                       ret = -EBUSY;
-                       video_put(vdev);
-                       return ret;
-               }
-       }
-#endif
        if (vdev->fops->open) {
                if (vdev->lock && mutex_lock_interruptible(vdev->lock)) {
                        ret = -ERESTARTSYS;
@@ -431,14 +423,8 @@ static int v4l2_open(struct inode *inode, struct file *filp)
 
 err:
        /* decrease the refcount in case of an error */
-       if (ret) {
-#if defined(CONFIG_MEDIA_CONTROLLER)
-               if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
-                   vdev->vfl_type != VFL_TYPE_SUBDEV)
-                       media_entity_put(entity);
-#endif
+       if (ret)
                video_put(vdev);
-       }
        return ret;
 }
 
@@ -455,11 +441,6 @@ static int v4l2_release(struct inode *inode, struct file *filp)
                if (vdev->lock)
                        mutex_unlock(vdev->lock);
        }
-#if defined(CONFIG_MEDIA_CONTROLLER)
-       if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
-           vdev->vfl_type != VFL_TYPE_SUBDEV)
-               media_entity_put(&vdev->entity);
-#endif
        /* decrease the refcount unconditionally since the release()
           return value is ignored. */
        video_put(vdev);
@@ -754,12 +735,6 @@ void video_unregister_device(struct video_device *vdev)
        if (!vdev || !video_is_registered(vdev))
                return;
 
-#if defined(CONFIG_MEDIA_CONTROLLER)
-       if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
-           vdev->vfl_type != VFL_TYPE_SUBDEV)
-               media_device_unregister_entity(&vdev->entity);
-#endif
-
        mutex_lock(&videodev_lock);
        /* This must be in a critical section to prevent a race with v4l2_open.
         * Once this bit has been cleared video_get may never be called again.
index 506edcc2ddeb722eedae63934ab4be34b65371f3..69e8c6ffcc49b8b64700c8049acaf0b172d25350 100644 (file)
@@ -1822,6 +1822,8 @@ static long __video_do_ioctl(struct file *file,
                if (!ops->vidioc_g_tuner)
                        break;
 
+               p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+                       V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
                ret = ops->vidioc_g_tuner(file, fh, p);
                if (!ret)
                        dbgarg(cmd, "index=%d, name=%s, type=%d, "
@@ -1840,6 +1842,8 @@ static long __video_do_ioctl(struct file *file,
 
                if (!ops->vidioc_s_tuner)
                        break;
+               p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+                       V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
                dbgarg(cmd, "index=%d, name=%s, type=%d, "
                                "capability=0x%x, rangelow=%d, "
                                "rangehigh=%d, signal=%d, afc=%d, "
@@ -1858,6 +1862,8 @@ static long __video_do_ioctl(struct file *file,
                if (!ops->vidioc_g_frequency)
                        break;
 
+               p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+                       V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
                ret = ops->vidioc_g_frequency(file, fh, p);
                if (!ret)
                        dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n",
@@ -1940,13 +1946,19 @@ static long __video_do_ioctl(struct file *file,
        case VIDIOC_S_HW_FREQ_SEEK:
        {
                struct v4l2_hw_freq_seek *p = arg;
+               enum v4l2_tuner_type type;
 
                if (!ops->vidioc_s_hw_freq_seek)
                        break;
+               type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+                       V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
                dbgarg(cmd,
-                       "tuner=%d, type=%d, seek_upward=%d, wrap_around=%d\n",
-                       p->tuner, p->type, p->seek_upward, p->wrap_around);
-               ret = ops->vidioc_s_hw_freq_seek(file, fh, p);
+                       "tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u\n",
+                       p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing);
+               if (p->type != type)
+                       ret = -EINVAL;
+               else
+                       ret = ops->vidioc_s_hw_freq_seek(file, fh, p);
                break;
        }
        case VIDIOC_ENUM_FRAMESIZES:
index 6ba1461d51ef9456be18be6421837c0c279064b8..3015e6000946b735a89d9b0a5e90f70540ab10c2 100644 (file)
@@ -492,13 +492,6 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
                return -EINVAL;
        }
 
-       /*
-        * If the same number of buffers and memory access method is requested
-        * then return immediately.
-        */
-       if (q->memory == req->memory && req->count == q->num_buffers)
-               return 0;
-
        if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
                /*
                 * We already have buffers allocated, so first check if they
@@ -539,9 +532,9 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
        /* Finally, allocate buffers and video memory */
        ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes,
                                plane_sizes);
-       if (ret < 0) {
-               dprintk(1, "Memory allocation failed with error: %d\n", ret);
-               return ret;
+       if (ret == 0) {
+               dprintk(1, "Memory allocation failed\n");
+               return -ENOMEM;
        }
 
        /*
@@ -1196,6 +1189,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
         * has not already dequeued before initiating cancel.
         */
        INIT_LIST_HEAD(&q->done_list);
+       atomic_set(&q->queued_count, 0);
        wake_up_all(&q->done_wq);
 
        /*
index b2d9485aac75e7ade363e79a6994af5e003c4534..10a20d9509d982434f456ee300b771c6389f872e 100644 (file)
@@ -62,7 +62,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
                goto fail_pages_array_alloc;
 
        for (i = 0; i < buf->sg_desc.num_pages; ++i) {
-               buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
                if (NULL == buf->pages[i])
                        goto fail_pages_alloc;
                sg_set_page(&buf->sg_desc.sglist[i],
index 0f09c057e7960ca203886b61be108dfb9d3003d3..6ca938a6bf94d336ddbfd830c02bc4739436ffc9 100644 (file)
@@ -728,6 +728,9 @@ config MFD_TPS65910
          if you say yes here you get support for the TPS65910 series of
          Power Management chips.
 
+config TPS65911_COMPARATOR
+       tristate
+
 endif # MFD_SUPPORT
 
 menu "Multimedia Capabilities Port drivers"
index efe3cc33ed92ed83a7a8ece78f32686c87bb6170..d7d47d2a4c7615c17d4aec23080692f2b603dda7 100644 (file)
@@ -94,3 +94,4 @@ obj-$(CONFIG_MFD_OMAP_USB_HOST)       += omap-usb-host.o
 obj-$(CONFIG_MFD_PM8921_CORE)  += pm8921-core.o
 obj-$(CONFIG_MFD_PM8XXX_IRQ)   += pm8xxx-irq.o
 obj-$(CONFIG_MFD_TPS65910)     += tps65910.o tps65910-irq.o
+obj-$(CONFIG_TPS65911_COMPARATOR)      += tps65911-comparator.o
index c27fd1fc3b86789a73cf725ce57a5863e5175d6f..c71ae09430c5c580e7f0cc5bbd0d3610fd8b5048 100644 (file)
@@ -619,6 +619,7 @@ static void asic3_clk_disable(struct asic3 *asic, struct asic3_clk *clk)
 /* MFD cells (SPI, PWM, LED, DS1WM, MMC) */
 static struct ds1wm_driver_data ds1wm_pdata = {
        .active_high = 1,
+       .reset_recover_delay = 1,
 };
 
 static struct resource ds1wm_resources[] = {
index 2808bd125d1352303ee49ed968aca8913dc2d6c8..04c7093d6499cb88f330b2f768713e49e58c9ffc 100644 (file)
@@ -99,6 +99,7 @@ static int ds1wm_disable(struct platform_device *pdev)
 
 static struct ds1wm_driver_data ds1wm_pdata = {
        .active_high = 0,
+       .reset_recover_delay = 1,
 };
 
 static struct resource ds1wm_resources[] __initdata = {
index 855219526ccb9ec390afe9e2a74b138b1b415f31..1717144fe7f4f220f6cd79ccea856f39f87ecac3 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/spinlock.h>
 #include <linux/gpio.h>
 #include <plat/usb.h>
-#include <linux/pm_runtime.h>
 
 #define USBHS_DRIVER_NAME      "usbhs-omap"
 #define OMAP_EHCI_DEVICE       "ehci-omap"
 
 
 struct usbhs_hcd_omap {
+       struct clk                      *usbhost_ick;
+       struct clk                      *usbhost_hs_fck;
+       struct clk                      *usbhost_fs_fck;
        struct clk                      *xclk60mhsp1_ck;
        struct clk                      *xclk60mhsp2_ck;
        struct clk                      *utmi_p1_fck;
@@ -156,6 +158,8 @@ struct usbhs_hcd_omap {
        struct clk                      *usbhost_p2_fck;
        struct clk                      *usbtll_p2_fck;
        struct clk                      *init_60m_fclk;
+       struct clk                      *usbtll_fck;
+       struct clk                      *usbtll_ick;
 
        void __iomem                    *uhh_base;
        void __iomem                    *tll_base;
@@ -349,13 +353,46 @@ static int __devinit usbhs_omap_probe(struct platform_device *pdev)
        omap->platdata.ehci_data = pdata->ehci_data;
        omap->platdata.ohci_data = pdata->ohci_data;
 
-       pm_runtime_enable(&pdev->dev);
+       omap->usbhost_ick = clk_get(dev, "usbhost_ick");
+       if (IS_ERR(omap->usbhost_ick)) {
+               ret =  PTR_ERR(omap->usbhost_ick);
+               dev_err(dev, "usbhost_ick failed error:%d\n", ret);
+               goto err_end;
+       }
+
+       omap->usbhost_hs_fck = clk_get(dev, "hs_fck");
+       if (IS_ERR(omap->usbhost_hs_fck)) {
+               ret = PTR_ERR(omap->usbhost_hs_fck);
+               dev_err(dev, "usbhost_hs_fck failed error:%d\n", ret);
+               goto err_usbhost_ick;
+       }
+
+       omap->usbhost_fs_fck = clk_get(dev, "fs_fck");
+       if (IS_ERR(omap->usbhost_fs_fck)) {
+               ret = PTR_ERR(omap->usbhost_fs_fck);
+               dev_err(dev, "usbhost_fs_fck failed error:%d\n", ret);
+               goto err_usbhost_hs_fck;
+       }
+
+       omap->usbtll_fck = clk_get(dev, "usbtll_fck");
+       if (IS_ERR(omap->usbtll_fck)) {
+               ret = PTR_ERR(omap->usbtll_fck);
+               dev_err(dev, "usbtll_fck failed error:%d\n", ret);
+               goto err_usbhost_fs_fck;
+       }
+
+       omap->usbtll_ick = clk_get(dev, "usbtll_ick");
+       if (IS_ERR(omap->usbtll_ick)) {
+               ret = PTR_ERR(omap->usbtll_ick);
+               dev_err(dev, "usbtll_ick failed error:%d\n", ret);
+               goto err_usbtll_fck;
+       }
 
        omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
        if (IS_ERR(omap->utmi_p1_fck)) {
                ret = PTR_ERR(omap->utmi_p1_fck);
                dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
-               goto err_end;
+               goto err_usbtll_ick;
        }
 
        omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
@@ -485,8 +522,22 @@ err_xclk60mhsp1_ck:
 err_utmi_p1_fck:
        clk_put(omap->utmi_p1_fck);
 
+err_usbtll_ick:
+       clk_put(omap->usbtll_ick);
+
+err_usbtll_fck:
+       clk_put(omap->usbtll_fck);
+
+err_usbhost_fs_fck:
+       clk_put(omap->usbhost_fs_fck);
+
+err_usbhost_hs_fck:
+       clk_put(omap->usbhost_hs_fck);
+
+err_usbhost_ick:
+       clk_put(omap->usbhost_ick);
+
 err_end:
-       pm_runtime_disable(&pdev->dev);
        kfree(omap);
 
 end_probe:
@@ -520,7 +571,11 @@ static int __devexit usbhs_omap_remove(struct platform_device *pdev)
        clk_put(omap->utmi_p2_fck);
        clk_put(omap->xclk60mhsp1_ck);
        clk_put(omap->utmi_p1_fck);
-       pm_runtime_disable(&pdev->dev);
+       clk_put(omap->usbtll_ick);
+       clk_put(omap->usbtll_fck);
+       clk_put(omap->usbhost_fs_fck);
+       clk_put(omap->usbhost_hs_fck);
+       clk_put(omap->usbhost_ick);
        kfree(omap);
 
        return 0;
@@ -640,6 +695,7 @@ static int usbhs_enable(struct device *dev)
        struct usbhs_omap_platform_data *pdata = &omap->platdata;
        unsigned long                   flags = 0;
        int                             ret = 0;
+       unsigned long                   timeout;
        unsigned                        reg;
 
        dev_dbg(dev, "starting TI HSUSB Controller\n");
@@ -652,7 +708,11 @@ static int usbhs_enable(struct device *dev)
        if (omap->count > 0)
                goto end_count;
 
-       pm_runtime_get_sync(dev);
+       clk_enable(omap->usbhost_ick);
+       clk_enable(omap->usbhost_hs_fck);
+       clk_enable(omap->usbhost_fs_fck);
+       clk_enable(omap->usbtll_fck);
+       clk_enable(omap->usbtll_ick);
 
        if (pdata->ehci_data->phy_reset) {
                if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) {
@@ -676,6 +736,50 @@ static int usbhs_enable(struct device *dev)
        omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
        dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
 
+       /* perform TLL soft reset, and wait until reset is complete */
+       usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+                       OMAP_USBTLL_SYSCONFIG_SOFTRESET);
+
+       /* Wait for TLL reset to complete */
+       timeout = jiffies + msecs_to_jiffies(1000);
+       while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
+                       & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
+               cpu_relax();
+
+               if (time_after(jiffies, timeout)) {
+                       dev_dbg(dev, "operation timed out\n");
+                       ret = -EINVAL;
+                       goto err_tll;
+               }
+       }
+
+       dev_dbg(dev, "TLL RESET DONE\n");
+
+       /* (1<<3) = no idle mode only for initial debugging */
+       usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+                       OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
+                       OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
+                       OMAP_USBTLL_SYSCONFIG_AUTOIDLE);
+
+       /* Put UHH in NoIdle/NoStandby mode */
+       reg = usbhs_read(omap->uhh_base, OMAP_UHH_SYSCONFIG);
+       if (is_omap_usbhs_rev1(omap)) {
+               reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
+                               | OMAP_UHH_SYSCONFIG_SIDLEMODE
+                               | OMAP_UHH_SYSCONFIG_CACTIVITY
+                               | OMAP_UHH_SYSCONFIG_MIDLEMODE);
+               reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
+
+
+       } else if (is_omap_usbhs_rev2(omap)) {
+               reg &= ~OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR;
+               reg |= OMAP4_UHH_SYSCONFIG_NOIDLE;
+               reg &= ~OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR;
+               reg |= OMAP4_UHH_SYSCONFIG_NOSTDBY;
+       }
+
+       usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
+
        reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
        /* setup ULPI bypass and burst configurations */
        reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
@@ -815,8 +919,6 @@ end_count:
        return 0;
 
 err_tll:
-       pm_runtime_put_sync(dev);
-       spin_unlock_irqrestore(&omap->lock, flags);
        if (pdata->ehci_data->phy_reset) {
                if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
                        gpio_free(pdata->ehci_data->reset_gpio_port[0]);
@@ -824,6 +926,13 @@ err_tll:
                if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
                        gpio_free(pdata->ehci_data->reset_gpio_port[1]);
        }
+
+       clk_disable(omap->usbtll_ick);
+       clk_disable(omap->usbtll_fck);
+       clk_disable(omap->usbhost_fs_fck);
+       clk_disable(omap->usbhost_hs_fck);
+       clk_disable(omap->usbhost_ick);
+       spin_unlock_irqrestore(&omap->lock, flags);
        return ret;
 }
 
@@ -896,7 +1005,11 @@ static void usbhs_disable(struct device *dev)
                clk_disable(omap->utmi_p1_fck);
        }
 
-       pm_runtime_put_sync(dev);
+       clk_disable(omap->usbtll_ick);
+       clk_disable(omap->usbtll_fck);
+       clk_disable(omap->usbhost_fs_fck);
+       clk_disable(omap->usbhost_hs_fck);
+       clk_disable(omap->usbhost_ick);
 
        /* The gpio_free migh sleep; so unlock the spinlock */
        spin_unlock_irqrestore(&omap->lock, flags);
index 3d2dc56a3d40c927277c81a980ad0c04783743c7..283ac67597575831b778ad9653420b6750aab763 100644 (file)
@@ -125,7 +125,7 @@ static DEVICE_ATTR(comp2_threshold, S_IRUGO, comp_threshold_show, NULL);
 static __devinit int tps65911_comparator_probe(struct platform_device *pdev)
 {
        struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
-       struct tps65910_platform_data *pdata = dev_get_platdata(tps65910->dev);
+       struct tps65910_board *pdata = dev_get_platdata(tps65910->dev);
        int ret;
 
        ret = comp_threshold_set(tps65910, COMP1,  pdata->vmbch_threshold);
index d019746551f3481f00c06e209b30a833586e92fa..2a40d0efdff5d33115ac61a0fc97287638ae18ca 100644 (file)
@@ -47,7 +47,7 @@ static uint32_t sg_dwiter_read_buffer(struct sg_mapping_iter *miter)
 
 static inline bool needs_unaligned_copy(const void *ptr)
 {
-#ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
        return false;
 #else
        return ((ptr - NULL) & 3) != 0;
index 668d41e594a9bed240ac4863364f0b26e9f4bb9b..df03dd3bd0e2b331db3459f3a632ffd2f85f6155 100644 (file)
@@ -270,7 +270,7 @@ ioc4_variant(struct ioc4_driver_data *idd)
        return IOC4_VARIANT_PCI_RT;
 }
 
-static void __devinit
+static void
 ioc4_load_modules(struct work_struct *work)
 {
        request_module("sgiioc4");
index 81d7fa4ec0db0a284891a31ab280720a19bd32d9..150cd7061b808019d4a775ab29fc287558d7508e 100644 (file)
@@ -120,6 +120,7 @@ static int recur_count = REC_NUM_DEFAULT;
 static enum cname cpoint = CN_INVALID;
 static enum ctype cptype = CT_NONE;
 static int count = DEFAULT_COUNT;
+static DEFINE_SPINLOCK(count_lock);
 
 module_param(recur_count, int, 0644);
 MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\
@@ -230,11 +231,14 @@ static const char *cp_name_to_str(enum cname name)
 static int lkdtm_parse_commandline(void)
 {
        int i;
+       unsigned long flags;
 
        if (cpoint_count < 1 || recur_count < 1)
                return -EINVAL;
 
+       spin_lock_irqsave(&count_lock, flags);
        count = cpoint_count;
+       spin_unlock_irqrestore(&count_lock, flags);
 
        /* No special parameters */
        if (!cpoint_type && !cpoint_name)
@@ -349,6 +353,9 @@ static void lkdtm_do_action(enum ctype which)
 
 static void lkdtm_handler(void)
 {
+       unsigned long flags;
+
+       spin_lock_irqsave(&count_lock, flags);
        count--;
        printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n",
                        cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
@@ -357,6 +364,7 @@ static void lkdtm_handler(void)
                lkdtm_do_action(cptype);
                count = cpoint_count;
        }
+       spin_unlock_irqrestore(&count_lock, flags);
 }
 
 static int lkdtm_register_cpoint(enum cname which)
index bb6f9255c17c3a8869b31c5bdb4a4a862b1cfa72..374dfcfccd07af35de8f5c45e8b0d6ac106a6bef 100644 (file)
@@ -317,7 +317,8 @@ EXPORT_SYMBOL_GPL(pti_request_masterchannel);
  *                             a master, channel ID address
  *                             used to write to PTI HW.
  *
- * @mc: master, channel apeture ID address to be released.
+ * @mc: master, channel apeture ID address to be released.  This
+ *      will de-allocate the structure via kfree().
  */
 void pti_release_masterchannel(struct pti_masterchannel *mc)
 {
@@ -475,8 +476,10 @@ static int pti_tty_install(struct tty_driver *driver, struct tty_struct *tty)
                else
                        pti_tty_data->mc = pti_request_masterchannel(2);
 
-               if (pti_tty_data->mc == NULL)
+               if (pti_tty_data->mc == NULL) {
+                       kfree(pti_tty_data);
                        return -ENXIO;
+               }
                tty->driver_data = pti_tty_data;
        }
 
@@ -495,7 +498,7 @@ static void pti_tty_cleanup(struct tty_struct *tty)
        if (pti_tty_data == NULL)
                return;
        pti_release_masterchannel(pti_tty_data->mc);
-       kfree(tty->driver_data);
+       kfree(pti_tty_data);
        tty->driver_data = NULL;
 }
 
@@ -581,7 +584,7 @@ static int pti_char_open(struct inode *inode, struct file *filp)
 static int pti_char_release(struct inode *inode, struct file *filp)
 {
        pti_release_masterchannel(filp->private_data);
-       kfree(filp->private_data);
+       filp->private_data = NULL;
        return 0;
 }
 
index ee5109a3cd984f287c824fd71d3221ead361bccd..42f067347bc70fd8f24466ed4384408191812611 100644 (file)
@@ -495,14 +495,14 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                }
        }
 
+       dev->stats.tx_packets++;
+       dev->stats.tx_bytes += skb->len;
+
        if (atomic_dec_return(&queued_msg->use_count) == 0) {
                dev_kfree_skb(skb);
                kfree(queued_msg);
        }
 
-       dev->stats.tx_packets++;
-       dev->stats.tx_bytes += skb->len;
-
        return NETDEV_TX_OK;
 }
 
index f91f82eabda72a311602a8979ab835e0d938ffd6..54c91ffe4a9154fb7142f5f61a654658e8ab1fd0 100644 (file)
@@ -605,7 +605,7 @@ long st_unregister(struct st_proto_s *proto)
        pr_debug("%s: %d ", __func__, proto->chnl_id);
 
        st_kim_ref(&st_gdata, 0);
-       if (proto->chnl_id >= ST_MAX_CHANNELS) {
+       if (!st_gdata || proto->chnl_id >= ST_MAX_CHANNELS) {
                pr_err(" chnl_id %d not supported", proto->chnl_id);
                return -EPROTONOSUPPORT;
        }
index 5da93ee6f6bea20b54cab0f95b83c1201dbf7248..38fd2f04c07eed8df424dd7ef6f04ba25d7f4054 100644 (file)
@@ -245,9 +245,9 @@ void skip_change_remote_baud(unsigned char **ptr, long *len)
                pr_err("invalid action after change remote baud command");
        } else {
                *ptr = *ptr + sizeof(struct bts_action) +
-                       ((struct bts_action *)nxt_action)->size;
+                       ((struct bts_action *)cur_action)->size;
                *len = *len - (sizeof(struct bts_action) +
-                               ((struct bts_action *)nxt_action)->size);
+                               ((struct bts_action *)cur_action)->size);
                /* warn user on not commenting these in firmware */
                pr_warn("skipping the wait event of change remote baud");
        }
@@ -604,6 +604,10 @@ void st_kim_ref(struct st_data_s **core_data, int id)
        struct kim_data_s       *kim_gdata;
        /* get kim_gdata reference from platform device */
        pdev = st_get_plat_device(id);
+       if (!pdev) {
+               *core_data = NULL;
+               return;
+       }
        kim_gdata = dev_get_drvdata(&pdev->dev);
        *core_data = kim_gdata->core_data;
 }
index 71da5641e258e041cb9b72439d3d18eac9d9baa9..f85e42224559679c43b9840c24d17397ad5c95a7 100644 (file)
@@ -1024,7 +1024,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
        INIT_LIST_HEAD(&md->part);
        md->usage = 1;
 
-       ret = mmc_init_queue(&md->queue, card, &md->lock);
+       ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
        if (ret)
                goto err_putdisk;
 
@@ -1297,6 +1297,9 @@ static void mmc_blk_remove(struct mmc_card *card)
        struct mmc_blk_data *md = mmc_get_drvdata(card);
 
        mmc_blk_remove_parts(card, md);
+       mmc_claim_host(card->host);
+       mmc_blk_part_switch(card, md);
+       mmc_release_host(card->host);
        mmc_blk_remove_req(md);
        mmc_set_drvdata(card, NULL);
 }
index c07322c2658cd171049bf59d4325f2e04ae2a90c..6413afa318d2c65532bd11dc6f36a539c6291107 100644 (file)
@@ -106,10 +106,12 @@ static void mmc_request(struct request_queue *q)
  * @mq: mmc queue
  * @card: mmc card to attach this queue
  * @lock: queue lock
+ * @subname: partition subname
  *
  * Initialise a MMC card request queue.
  */
-int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
+int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+                  spinlock_t *lock, const char *subname)
 {
        struct mmc_host *host = card->host;
        u64 limit = BLK_BOUNCE_HIGH;
@@ -133,12 +135,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
                mq->queue->limits.max_discard_sectors = UINT_MAX;
                if (card->erased_byte == 0)
                        mq->queue->limits.discard_zeroes_data = 1;
-               if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) {
-                       mq->queue->limits.discard_granularity =
-                                                       card->erase_size << 9;
-                       mq->queue->limits.discard_alignment =
-                                                       card->erase_size << 9;
-               }
+               mq->queue->limits.discard_granularity = card->pref_erase << 9;
                if (mmc_can_secure_erase_trim(card))
                        queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
                                                mq->queue);
@@ -209,8 +206,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
 
        sema_init(&mq->thread_sem, 1);
 
-       mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d",
-               host->index);
+       mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
+               host->index, subname ? subname : "");
 
        if (IS_ERR(mq->thread)) {
                ret = PTR_ERR(mq->thread);
index 64e66e0d4994a9176662354cd123fb0cee2315a0..6223ef8dc9cd08e6b7468a286b3e43fcb96ac26f 100644 (file)
@@ -19,7 +19,8 @@ struct mmc_queue {
        unsigned int            bounce_sg_len;
 };
 
-extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *);
+extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
+                         const char *);
 extern void mmc_cleanup_queue(struct mmc_queue *);
 extern void mmc_queue_suspend(struct mmc_queue *);
 extern void mmc_queue_resume(struct mmc_queue *);
index 68091dda3f31f49ab9d7130cbad178e93d5ca1c8..7843efe22359916e82bb9781d74826ad457829f6 100644 (file)
@@ -1245,7 +1245,7 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
                 */
                timeout_clks <<= 1;
                timeout_us += (timeout_clks * 1000) /
-                             (card->host->ios.clock / 1000);
+                             (mmc_host_clk_rate(card->host) / 1000);
 
                erase_timeout = timeout_us / 1000;
 
index 2a7e43bc796dfd1e798ad60a1ae6d0e7b98278a2..aa7d1d79b8c554c143c004db5e3ef9565ca653dd 100644 (file)
@@ -247,12 +247,12 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                return 0;
 
        /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
+       card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
        if (card->csd.structure == 3) {
-               int ext_csd_struct = ext_csd[EXT_CSD_STRUCTURE];
-               if (ext_csd_struct > 2) {
+               if (card->ext_csd.raw_ext_csd_structure > 2) {
                        printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
                                "version %d\n", mmc_hostname(card->host),
-                                       ext_csd_struct);
+                                       card->ext_csd.raw_ext_csd_structure);
                        err = -EINVAL;
                        goto out;
                }
@@ -266,6 +266,10 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                goto out;
        }
 
+       card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
+       card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
+       card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
+       card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
        if (card->ext_csd.rev >= 2) {
                card->ext_csd.sectors =
                        ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
@@ -277,7 +281,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
                        mmc_card_set_blockaddr(card);
        }
-
+       card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
        switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
        case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
             EXT_CSD_CARD_TYPE_26:
@@ -307,6 +311,11 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                        mmc_hostname(card->host));
        }
 
+       card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
+       card->ext_csd.raw_erase_timeout_mult =
+               ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
+       card->ext_csd.raw_hc_erase_grp_size =
+               ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
        if (card->ext_csd.rev >= 3) {
                u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
                card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
@@ -334,6 +343,16 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
        }
 
+       card->ext_csd.raw_hc_erase_gap_size =
+               ext_csd[EXT_CSD_PARTITION_ATTRIBUTE];
+       card->ext_csd.raw_sec_trim_mult =
+               ext_csd[EXT_CSD_SEC_TRIM_MULT];
+       card->ext_csd.raw_sec_erase_mult =
+               ext_csd[EXT_CSD_SEC_ERASE_MULT];
+       card->ext_csd.raw_sec_feature_support =
+               ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
+       card->ext_csd.raw_trim_mult =
+               ext_csd[EXT_CSD_TRIM_MULT];
        if (card->ext_csd.rev >= 4) {
                /*
                 * Enhanced area feature support -- check whether the eMMC
@@ -341,7 +360,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                 * area offset and size to user by adding sysfs interface.
                 */
                if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
-                               (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
+                   (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
                        u8 hc_erase_grp_sz =
                                ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
                        u8 hc_wp_grp_sz =
@@ -401,17 +420,17 @@ static inline void mmc_free_ext_csd(u8 *ext_csd)
 }
 
 
-static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd,
-                       unsigned bus_width)
+static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
 {
        u8 *bw_ext_csd;
        int err;
 
+       if (bus_width == MMC_BUS_WIDTH_1)
+               return 0;
+
        err = mmc_get_ext_csd(card, &bw_ext_csd);
-       if (err)
-               return err;
 
-       if ((ext_csd == NULL || bw_ext_csd == NULL)) {
+       if (err || bw_ext_csd == NULL) {
                if (bus_width != MMC_BUS_WIDTH_1)
                        err = -EINVAL;
                goto out;
@@ -421,35 +440,40 @@ static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd,
                goto out;
 
        /* only compare read only fields */
-       err = (!(ext_csd[EXT_CSD_PARTITION_SUPPORT] ==
+       err = (!(card->ext_csd.raw_partition_support ==
                        bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
-               (ext_csd[EXT_CSD_ERASED_MEM_CONT] ==
+               (card->ext_csd.raw_erased_mem_count ==
                        bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
-               (ext_csd[EXT_CSD_REV] ==
+               (card->ext_csd.rev ==
                        bw_ext_csd[EXT_CSD_REV]) &&
-               (ext_csd[EXT_CSD_STRUCTURE] ==
+               (card->ext_csd.raw_ext_csd_structure ==
                        bw_ext_csd[EXT_CSD_STRUCTURE]) &&
-               (ext_csd[EXT_CSD_CARD_TYPE] ==
+               (card->ext_csd.raw_card_type ==
                        bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
-               (ext_csd[EXT_CSD_S_A_TIMEOUT] ==
+               (card->ext_csd.raw_s_a_timeout ==
                        bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
-               (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
+               (card->ext_csd.raw_hc_erase_gap_size ==
                        bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
-               (ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT] ==
+               (card->ext_csd.raw_erase_timeout_mult ==
                        bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
-               (ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
+               (card->ext_csd.raw_hc_erase_grp_size ==
                        bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
-               (ext_csd[EXT_CSD_SEC_TRIM_MULT] ==
+               (card->ext_csd.raw_sec_trim_mult ==
                        bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
-               (ext_csd[EXT_CSD_SEC_ERASE_MULT] ==
+               (card->ext_csd.raw_sec_erase_mult ==
                        bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
-               (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] ==
+               (card->ext_csd.raw_sec_feature_support ==
                        bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
-               (ext_csd[EXT_CSD_TRIM_MULT] ==
+               (card->ext_csd.raw_trim_mult ==
                        bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
-               memcmp(&ext_csd[EXT_CSD_SEC_CNT],
-                      &bw_ext_csd[EXT_CSD_SEC_CNT],
-                      4) != 0);
+               (card->ext_csd.raw_sectors[0] ==
+                       bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
+               (card->ext_csd.raw_sectors[1] ==
+                       bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
+               (card->ext_csd.raw_sectors[2] ==
+                       bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
+               (card->ext_csd.raw_sectors[3] ==
+                       bw_ext_csd[EXT_CSD_SEC_CNT + 3]));
        if (err)
                err = -EINVAL;
 
@@ -770,7 +794,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                                 */
                                if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
                                        err = mmc_compare_ext_csds(card,
-                                               ext_csd,
                                                bus_width);
                                else
                                        err = mmc_bus_test(card, bus_width);
index 4d0c15bfa51465c91026972417c17652129e09d8..262fff0191779301232fefba02bc8c916e026b33 100644 (file)
@@ -691,15 +691,54 @@ static int mmc_sdio_resume(struct mmc_host *host)
 static int mmc_sdio_power_restore(struct mmc_host *host)
 {
        int ret;
+       u32 ocr;
 
        BUG_ON(!host);
        BUG_ON(!host->card);
 
        mmc_claim_host(host);
+
+       /*
+        * Reset the card by performing the same steps that are taken by
+        * mmc_rescan_try_freq() and mmc_attach_sdio() during a "normal" probe.
+        *
+        * sdio_reset() is technically not needed. Having just powered up the
+        * hardware, it should already be in reset state. However, some
+        * platforms (such as SD8686 on OLPC) do not instantly cut power,
+        * meaning that a reset is required when restoring power soon after
+        * powering off. It is harmless in other cases.
+        *
+        * The CMD5 reset (mmc_send_io_op_cond()), according to the SDIO spec,
+        * is not necessary for non-removable cards. However, it is required
+        * for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and
+        * harmless in other situations.
+        *
+        * With these steps taken, mmc_select_voltage() is also required to
+        * restore the correct voltage setting of the card.
+        */
+       sdio_reset(host);
+       mmc_go_idle(host);
+       mmc_send_if_cond(host, host->ocr_avail);
+
+       ret = mmc_send_io_op_cond(host, 0, &ocr);
+       if (ret)
+               goto out;
+
+       if (host->ocr_avail_sdio)
+               host->ocr_avail = host->ocr_avail_sdio;
+
+       host->ocr = mmc_select_voltage(host, ocr & ~0x7F);
+       if (!host->ocr) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        ret = mmc_sdio_init_card(host, host->ocr, host->card,
                                mmc_card_keep_power(host));
        if (!ret && host->sdio_irqs)
                mmc_signal_sdio_irq(host);
+
+out:
        mmc_release_host(host);
 
        return ret;
index d29b9c36919a1d05fb8948d2f6231e366cb92c34..d2565df8a7fb54ce534fb16d57791b351f89d00e 100644 (file)
@@ -189,7 +189,7 @@ static int sdio_bus_remove(struct device *dev)
 
        /* Then undo the runtime PM settings in sdio_bus_probe() */
        if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
-               pm_runtime_put_noidle(dev);
+               pm_runtime_put_sync(dev);
 
 out:
        return ret;
index c471cdb1a0ba999dabbd0e3632b5036bf3f28613..50f4f77ed20250466511d8d12b94165ad64329ba 100644 (file)
@@ -582,6 +582,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
                        data->error = -EILSEQ;
                } else if (status & MCI_DATATIMEOUT) {
                        data->error = -ETIMEDOUT;
+               } else if (status & MCI_STARTBITERR) {
+                       data->error = -ECOMM;
                } else if (status & MCI_TXUNDERRUN) {
                        data->error = -EIO;
                } else if (status & MCI_RXOVERRUN) {
index bb32e21c09dbefcc1a0599b7f278d230f5d35689..2164e8c6476c1bca8e085ff7c04871a38c33fad9 100644 (file)
@@ -86,6 +86,7 @@
 #define MCI_CMDRESPEND         (1 << 6)
 #define MCI_CMDSENT            (1 << 7)
 #define MCI_DATAEND            (1 << 8)
+#define MCI_STARTBITERR                (1 << 9)
 #define MCI_DATABLOCKEND       (1 << 10)
 #define MCI_CMDACTIVE          (1 << 11)
 #define MCI_TXACTIVE           (1 << 12)
 #define MCI_CMDRESPENDCLR      (1 << 6)
 #define MCI_CMDSENTCLR         (1 << 7)
 #define MCI_DATAENDCLR         (1 << 8)
+#define MCI_STARTBITERRCLR     (1 << 9)
 #define MCI_DATABLOCKENDCLR    (1 << 10)
 /* Extended status bits for the ST Micro variants */
 #define MCI_ST_SDIOITC         (1 << 22)
 #define MCI_CMDRESPENDMASK     (1 << 6)
 #define MCI_CMDSENTMASK                (1 << 7)
 #define MCI_DATAENDMASK                (1 << 8)
+#define MCI_STARTBITERRMASK    (1 << 9)
 #define MCI_DATABLOCKENDMASK   (1 << 10)
 #define MCI_CMDACTIVEMASK      (1 << 11)
 #define MCI_TXACTIVEMASK       (1 << 12)
 #define MCI_IRQENABLE  \
        (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK|     \
        MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK|       \
-       MCI_CMDRESPENDMASK|MCI_CMDSENTMASK)
+       MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_STARTBITERRMASK)
 
 /* These interrupts are directed to IRQ1 when two IRQ lines are available */
 #define MCI_IRQ1MASK \
index e2aecb7f1d5cfe32aab9ffe35aabfbe0ba9a643c..ab66f2454dc48fbc5fe2d68fe51b1d2611de1f7c 100644 (file)
 #include <linux/mmc/core.h>
 #include <linux/mmc/host.h>
 
+/* For archs that don't support NO_IRQ (such as mips), provide a dummy value */
+#ifndef NO_IRQ
+#define NO_IRQ 0
+#endif
+
 MODULE_LICENSE("GPL");
 
 enum {
index 5b2e2155b413bc988cabbd5125fe5c4c64f27d5b..dedf3dab8a3ba8216bef897e9bac0b612699b127 100644 (file)
@@ -429,7 +429,6 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
                                return -EINVAL;
                        }
                }
-               mmc_slot(host).ocr_mask = mmc_regulator_get_ocrmask(reg);
 
                /* Allow an aux regulator */
                reg = regulator_get(host->dev, "vmmc_aux");
@@ -962,7 +961,8 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
        spin_unlock(&host->irq_lock);
 
        if (host->use_dma && dma_ch != -1) {
-               dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
+               dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
+                       host->data->sg_len,
                        omap_hsmmc_get_dma_dir(host, host->data));
                omap_free_dma(dma_ch);
        }
@@ -1346,7 +1346,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
                return;
        }
 
-       dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
+       dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
                omap_hsmmc_get_dma_dir(host, data));
 
        req_in_progress = host->req_in_progress;
index b3654293017bca33d4de5b77d0f654e07fb92031..ce500f03df859a6f9af275461d7ef29515aedb4e 100644 (file)
@@ -92,7 +92,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
                mmc_data->ocr_mask = p->tmio_ocr_mask;
                mmc_data->capabilities |= p->tmio_caps;
 
-               if (p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
+               if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) {
                        priv->param_tx.slave_id = p->dma_slave_tx;
                        priv->param_rx.slave_id = p->dma_slave_rx;
                        priv->dma_priv.chan_priv_tx = &priv->param_tx;
@@ -165,13 +165,14 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
 
        p->pdata = NULL;
 
+       tmio_mmc_host_remove(host);
+
        for (i = 0; i < 3; i++) {
                irq = platform_get_irq(pdev, i);
                if (irq >= 0)
                        free_irq(irq, host);
        }
 
-       tmio_mmc_host_remove(host);
        clk_disable(priv->clk);
        clk_put(priv->clk);
        kfree(priv);
index ad6347bb02ddd9ee20a6085185fb6ca03924d666..0b09e8239aa05d09abf3b6b376d838b2ba8d178b 100644 (file)
@@ -824,8 +824,8 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
        struct tmio_mmc_host *host = mmc_priv(mmc);
        struct tmio_mmc_data *pdata = host->pdata;
 
-       return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
-               !(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
+       return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
+                (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
 }
 
 static int tmio_mmc_get_cd(struct mmc_host *mmc)
index cbb03305b77bb6507c5f18a3a6e9a9b23305205d..d4455ffbefd8a931317beaad43eefb563b10c62b 100644 (file)
@@ -2096,7 +2096,7 @@ static struct mmc_host_ops vub300_mmc_ops = {
 static int vub300_probe(struct usb_interface *interface,
                        const struct usb_device_id *id)
 {                              /* NOT irq */
-       struct vub300_mmc_host *vub300 = NULL;
+       struct vub300_mmc_host *vub300;
        struct usb_host_interface *iface_desc;
        struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface));
        int i;
@@ -2118,23 +2118,20 @@ static int vub300_probe(struct usb_interface *interface,
        command_out_urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!command_out_urb) {
                retval = -ENOMEM;
-               dev_err(&vub300->udev->dev,
-                       "not enough memory for the command_out_urb\n");
+               dev_err(&udev->dev, "not enough memory for command_out_urb\n");
                goto error0;
        }
        command_res_urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!command_res_urb) {
                retval = -ENOMEM;
-               dev_err(&vub300->udev->dev,
-                       "not enough memory for the command_res_urb\n");
+               dev_err(&udev->dev, "not enough memory for command_res_urb\n");
                goto error1;
        }
        /* this also allocates memory for our VUB300 mmc host device */
        mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev);
        if (!mmc) {
                retval = -ENOMEM;
-               dev_err(&vub300->udev->dev,
-                       "not enough memory for the mmc_host\n");
+               dev_err(&udev->dev, "not enough memory for the mmc_host\n");
                goto error4;
        }
        /* MMC core transfer sizes tunable parameters */
index 0bb254c7d2b1288b5716ca54b578438453c2ef80..33d8aad8bba5a47b2c0135acf4132454bab12eea 100644 (file)
@@ -339,9 +339,9 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
                                    (FIR_OP_UA  << FIR_OP1_SHIFT) |
                                    (FIR_OP_RBW << FIR_OP2_SHIFT));
                out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
-               /* 5 bytes for manuf, device and exts */
-               out_be32(&lbc->fbcr, 5);
-               elbc_fcm_ctrl->read_bytes = 5;
+               /* nand_get_flash_type() reads 8 bytes of entire ID string */
+               out_be32(&lbc->fbcr, 8);
+               elbc_fcm_ctrl->read_bytes = 8;
                elbc_fcm_ctrl->use_mdr = 1;
                elbc_fcm_ctrl->mdr = 0;
 
index d84f6e8903a58f6334acd9e67d1811823c73ee86..5b732988d49389c9811ae2d5c19c87ebdda92807 100644 (file)
@@ -412,7 +412,7 @@ el2_open(struct net_device *dev)
                outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
                outb_p(0x00, E33G_IDCFR);
                msleep(1);
-               free_irq(*irqp, el2_probe_interrupt);
+               free_irq(*irqp, &seen);
                if (!seen)
                        continue;
 
@@ -422,6 +422,7 @@ el2_open(struct net_device *dev)
                        continue;
                if (retval < 0)
                        goto err_disable;
+               break;
        } while (*++irqp);
 
        if (*irqp == 0) {
index 98517a373473e18b1a9f6982407831580079263a..e3bad8247fd1b237c93bb210b02480d11a1f9e94 100644 (file)
@@ -992,6 +992,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
         * features
         */
        dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
+       dev->vlan_features = dev->features;
 
        dev->irq = pdev->irq;
 
index 19f04a34783a2356fd8ad19ada6be3ca3fa46b04..93359fab240e13f95e61b8a94f8045b61bf13f01 100644 (file)
@@ -3416,7 +3416,8 @@ config NETCONSOLE
 
 config NETCONSOLE_DYNAMIC
        bool "Dynamic reconfiguration of logging targets"
-       depends on NETCONSOLE && SYSFS && CONFIGFS_FS
+       depends on NETCONSOLE && SYSFS && CONFIGFS_FS && \
+                       !(NETCONSOLE=y && CONFIGFS_FS=m)
        help
          This option enables the ability to dynamically reconfigure target
          parameters (interface, IP addresses, port numbers, MAC addresses)
index 68d45ba2d9b9a0844c0c1691a31e07bf7374168b..6c019e14854691ab70ebece126dcf0e8cf0d4195 100644 (file)
@@ -52,13 +52,13 @@ MODULE_DESCRIPTION(DRV_DESC);
 MODULE_ALIAS("platform:bfin_mac");
 
 #if defined(CONFIG_BFIN_MAC_USE_L1)
-# define bfin_mac_alloc(dma_handle, size)  l1_data_sram_zalloc(size)
-# define bfin_mac_free(dma_handle, ptr)    l1_data_sram_free(ptr)
+# define bfin_mac_alloc(dma_handle, size, num)  l1_data_sram_zalloc(size*num)
+# define bfin_mac_free(dma_handle, ptr, num)    l1_data_sram_free(ptr)
 #else
-# define bfin_mac_alloc(dma_handle, size) \
-       dma_alloc_coherent(NULL, size, dma_handle, GFP_KERNEL)
-# define bfin_mac_free(dma_handle, ptr) \
-       dma_free_coherent(NULL, sizeof(*ptr), ptr, dma_handle)
+# define bfin_mac_alloc(dma_handle, size, num) \
+       dma_alloc_coherent(NULL, size*num, dma_handle, GFP_KERNEL)
+# define bfin_mac_free(dma_handle, ptr, num) \
+       dma_free_coherent(NULL, sizeof(*ptr)*num, ptr, dma_handle)
 #endif
 
 #define PKT_BUF_SZ 1580
@@ -95,7 +95,7 @@ static void desc_list_free(void)
                                t = t->next;
                        }
                }
-               bfin_mac_free(dma_handle, tx_desc);
+               bfin_mac_free(dma_handle, tx_desc, CONFIG_BFIN_TX_DESC_NUM);
        }
 
        if (rx_desc) {
@@ -109,7 +109,7 @@ static void desc_list_free(void)
                                r = r->next;
                        }
                }
-               bfin_mac_free(dma_handle, rx_desc);
+               bfin_mac_free(dma_handle, rx_desc, CONFIG_BFIN_RX_DESC_NUM);
        }
 }
 
@@ -126,13 +126,13 @@ static int desc_list_init(void)
 #endif
 
        tx_desc = bfin_mac_alloc(&dma_handle,
-                               sizeof(struct net_dma_desc_tx) *
+                               sizeof(struct net_dma_desc_tx),
                                CONFIG_BFIN_TX_DESC_NUM);
        if (tx_desc == NULL)
                goto init_error;
 
        rx_desc = bfin_mac_alloc(&dma_handle,
-                               sizeof(struct net_dma_desc_rx) *
+                               sizeof(struct net_dma_desc_rx),
                                CONFIG_BFIN_RX_DESC_NUM);
        if (rx_desc == NULL)
                goto init_error;
index 7d25a97d33f6fd2daffd467e9be3984dd8153bda..44e219c910daf4d33eec02d92724baa23395c229 100644 (file)
@@ -1111,7 +1111,7 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
                    struct bna_intr_info *intr_info)
 {
        int             err = 0;
-       unsigned long   flags;
+       unsigned long   irq_flags = 0, flags;
        u32     irq;
        irq_handler_t   irq_handler;
 
@@ -1125,18 +1125,17 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
        if (bnad->cfg_flags & BNAD_CF_MSIX) {
                irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
                irq = bnad->msix_table[bnad->msix_num - 1].vector;
-               flags = 0;
                intr_info->intr_type = BNA_INTR_T_MSIX;
                intr_info->idl[0].vector = bnad->msix_num - 1;
        } else {
                irq_handler = (irq_handler_t)bnad_isr;
                irq = bnad->pcidev->irq;
-               flags = IRQF_SHARED;
+               irq_flags = IRQF_SHARED;
                intr_info->intr_type = BNA_INTR_T_INTX;
                /* intr_info->idl.vector = 0 ? */
        }
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
-
+       flags = irq_flags;
        sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
 
        /*
index 4b70311a11ef9f4a3ff5b631585f0d8bded19984..74be989f51c5fff1157e8eeba5438eb13003b6f1 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/zlib.h>
 #include <linux/io.h>
 #include <linux/stringify.h>
+#include <linux/vmalloc.h>
 
 #define BNX2X_MAIN
 #include "bnx2x.h"
@@ -4537,8 +4538,7 @@ static int bnx2x_gunzip_init(struct bnx2x *bp)
        if (bp->strm  == NULL)
                goto gunzip_nomem2;
 
-       bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
-                                     GFP_KERNEL);
+       bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
        if (bp->strm->workspace == NULL)
                goto gunzip_nomem3;
 
@@ -4562,7 +4562,7 @@ gunzip_nomem1:
 static void bnx2x_gunzip_end(struct bnx2x *bp)
 {
        if (bp->strm) {
-               kfree(bp->strm->workspace);
+               vfree(bp->strm->workspace);
                kfree(bp->strm);
                bp->strm = NULL;
        }
index 652b30e525d01d8c4b547465bb66f943929fd4ea..63c22b0bb5ad0fa1d192967371fd6071f824ce2a 100644 (file)
@@ -1297,6 +1297,7 @@ static inline int slave_enable_netpoll(struct slave *slave)
                goto out;
 
        np->dev = slave->dev;
+       strlcpy(np->dev_name, slave->dev->name, IFNAMSIZ);
        err = __netpoll_setup(np);
        if (err) {
                kfree(np);
@@ -1427,9 +1428,9 @@ out:
        return features;
 }
 
-#define BOND_VLAN_FEATURES     (NETIF_F_ALL_TX_OFFLOADS | \
-                                NETIF_F_SOFT_FEATURES | \
-                                NETIF_F_LRO)
+#define BOND_VLAN_FEATURES     (NETIF_F_ALL_CSUM | NETIF_F_SG | \
+                                NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+                                NETIF_F_HIGHDMA | NETIF_F_LRO)
 
 static void bond_compute_features(struct bonding *bond)
 {
index 1d699e3df54704f7fc30a0c8facc757d6dbc9387..754df5ef1729505f32a95a50c0a672812a1e6bae 100644 (file)
@@ -36,7 +36,7 @@ config CAN_SLCAN
 config CAN_DEV
        tristate "Platform CAN drivers with Netlink support"
        depends on CAN
-       default Y
+       default y
        ---help---
          Enables the common framework for platform CAN drivers with Netlink
          support. This is the standard library for CAN drivers.
@@ -45,7 +45,7 @@ config CAN_DEV
 config CAN_CALC_BITTIMING
        bool "CAN bit-timing calculation"
        depends on CAN_DEV
-       default Y
+       default y
        ---help---
          If enabled, CAN bit-timing parameters will be calculated for the
          bit-rate specified via Netlink argument "bitrate" when the device
index 3f562ba2f0c9c70a81a94f52711929d26b6a46e5..76bf5892b962bbddb6ce19a64d672f42752f7a67 100644 (file)
@@ -2026,7 +2026,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
                skb->ip_summed = CHECKSUM_UNNECESSARY;
        } else
                skb_checksum_none_assert(skb);
-       skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
+       skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
 
        if (unlikely(p->vlan_valid)) {
                struct vlan_group *grp = pi->vlan_grp;
@@ -2145,7 +2145,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
        if (!complete)
                return;
 
-       skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
+       skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
 
        if (unlikely(cpl->vlan_valid)) {
                struct vlan_group *grp = pi->vlan_grp;
index 7a84e45487e83ba184dcccf4def37529c4607ba9..7583a9572bcc5a55c1d4fc87378f0d1475bccc45 100644 (file)
@@ -105,7 +105,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
                goto out_ep;
 
        fep->fcc.mem = (void __iomem *)cpm2_immr;
-       fpi->dpram_offset = cpm_dpalloc(128, 8);
+       fpi->dpram_offset = cpm_dpalloc(128, 32);
        if (IS_ERR_VALUE(fpi->dpram_offset)) {
                ret = fpi->dpram_offset;
                goto out_fcccp;
index 2dfcc8047847b12ade17da7c324a717133398680..dfa55f94ba7fcbe612acfb460bff7125b834f96b 100644 (file)
@@ -2289,6 +2289,23 @@ static int gfar_set_mac_address(struct net_device *dev)
        return 0;
 }
 
+/* Check if rx parser should be activated */
+void gfar_check_rx_parser_mode(struct gfar_private *priv)
+{
+       struct gfar __iomem *regs;
+       u32 tempval;
+
+       regs = priv->gfargrp[0].regs;
+
+       tempval = gfar_read(&regs->rctrl);
+       /* If parse is no longer required, then disable parser */
+       if (tempval & RCTRL_REQ_PARSER)
+               tempval |= RCTRL_PRSDEP_INIT;
+       else
+               tempval &= ~RCTRL_PRSDEP_INIT;
+       gfar_write(&regs->rctrl, tempval);
+}
+
 
 /* Enables and disables VLAN insertion/extraction */
 static void gfar_vlan_rx_register(struct net_device *dev,
@@ -2325,12 +2342,9 @@ static void gfar_vlan_rx_register(struct net_device *dev,
                /* Disable VLAN tag extraction */
                tempval = gfar_read(&regs->rctrl);
                tempval &= ~RCTRL_VLEX;
-               /* If parse is no longer required, then disable parser */
-               if (tempval & RCTRL_REQ_PARSER)
-                       tempval |= RCTRL_PRSDEP_INIT;
-               else
-                       tempval &= ~RCTRL_PRSDEP_INIT;
                gfar_write(&regs->rctrl, tempval);
+
+               gfar_check_rx_parser_mode(priv);
        }
 
        gfar_change_mtu(dev, dev->mtu);
index ba36dc7a34356c0fac622cbd2de682201049d5eb..440e69d8beff6a121a53723f07439c24bb043d81 100644 (file)
@@ -274,7 +274,7 @@ extern const char gfar_driver_version[];
 #define RCTRL_PROM             0x00000008
 #define RCTRL_EMEN             0x00000002
 #define RCTRL_REQ_PARSER       (RCTRL_VLEX | RCTRL_IPCSEN | \
-                                RCTRL_TUCSEN)
+                                RCTRL_TUCSEN | RCTRL_FILREN)
 #define RCTRL_CHECKSUMMING     (RCTRL_IPCSEN | RCTRL_TUCSEN | \
                                RCTRL_PRSDEP_INIT)
 #define RCTRL_EXTHASH          (RCTRL_GHTX)
@@ -1156,6 +1156,7 @@ extern void gfar_configure_coalescing(struct gfar_private *priv,
                unsigned long tx_mask, unsigned long rx_mask);
 void gfar_init_sysfs(struct net_device *dev);
 int gfar_set_features(struct net_device *dev, u32 features);
+extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
 
 extern const struct ethtool_ops gfar_ethtool_ops;
 
index f181304a7ab6464b809b0bf7fc7a436a3721f006..672f096fe0905d8a59bf6efb495922e51b031c48 100644 (file)
@@ -1015,11 +1015,10 @@ static int greth_set_mac_add(struct net_device *dev, void *p)
                return -EINVAL;
 
        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+       GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
+       GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
+                     dev->dev_addr[4] << 8 | dev->dev_addr[5]);
 
-       GRETH_REGSAVE(regs->esa_msb, addr->sa_data[0] << 8 | addr->sa_data[1]);
-       GRETH_REGSAVE(regs->esa_lsb,
-                     addr->sa_data[2] << 24 | addr->
-                     sa_data[3] << 16 | addr->sa_data[4] << 8 | addr->sa_data[5]);
        return 0;
 }
 
index 3e5d0b6b6516133039192fa5dfd1fdf88660d81b..0d283781bc5e54a72d72a608b667f0e067b6d680 100644 (file)
@@ -692,10 +692,10 @@ static void sixpack_close(struct tty_struct *tty)
 {
        struct sixpack *sp;
 
-       write_lock(&disc_data_lock);
+       write_lock_bh(&disc_data_lock);
        sp = tty->disc_data;
        tty->disc_data = NULL;
-       write_unlock(&disc_data_lock);
+       write_unlock_bh(&disc_data_lock);
        if (!sp)
                return;
 
index 4c628393c8b157cbc09de52d902b5fa8c3d370a3..bc02968cee161f017dc3749cc6d57d8928b059d0 100644 (file)
@@ -813,10 +813,10 @@ static void mkiss_close(struct tty_struct *tty)
 {
        struct mkiss *ax;
 
-       write_lock(&disc_data_lock);
+       write_lock_bh(&disc_data_lock);
        ax = tty->disc_data;
        tty->disc_data = NULL;
-       write_unlock(&disc_data_lock);
+       write_unlock_bh(&disc_data_lock);
 
        if (!ax)
                return;
index 8e10d2f6a5adc8340d239c729e835fb0122b3acd..c3ecb118c1df39f925e40f023f6e2f8a63ec763f 100644 (file)
@@ -1580,12 +1580,12 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
        hp100_outl(ringptr->pdl_paddr, TX_PDA_L);       /* Low Prio. Queue */
 
        lp->txrcommit++;
-       spin_unlock_irqrestore(&lp->lock, flags);
 
-       /* Update statistics */
        dev->stats.tx_packets++;
        dev->stats.tx_bytes += skb->len;
 
+       spin_unlock_irqrestore(&lp->lock, flags);
+
        return NETDEV_TX_OK;
 
 drop:
index b6060f7538dfc42d250ef9bbd59bb93221fd6fd7..a900d5bf294889578df802c280ca4388bce57631 100644 (file)
@@ -135,7 +135,7 @@ static void __devexit hplance_remove_one(struct dio_dev *d)
 }
 
 /* Initialise a single lance board at the given DIO device */
-static void __init hplance_init(struct net_device *dev, struct dio_dev *d)
+static void __devinit hplance_init(struct net_device *dev, struct dio_dev *d)
 {
         unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
         struct hplance_private *lp;
index b78be088c4ad53b54be4c61b37cb0cb7d93c9373..60f46bc2bf64076bca146c272b9dcf6059dab609 100644 (file)
@@ -140,7 +140,7 @@ MODULE_LICENSE("GPL");
 module_param(mtu, int, 0);
 module_param(debug, int, 0);
 module_param(rx_copybreak, int, 0);
-module_param(dspcfg_workaround, int, 1);
+module_param(dspcfg_workaround, int, 0);
 module_param_array(options, int, NULL, 0);
 module_param_array(full_duplex, int, NULL, 0);
 MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
@@ -2028,8 +2028,8 @@ static void drain_rx(struct net_device *dev)
                np->rx_ring[i].cmd_status = 0;
                np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
                if (np->rx_skbuff[i]) {
-                       pci_unmap_single(np->pci_dev,
-                               np->rx_dma[i], buflen,
+                       pci_unmap_single(np->pci_dev, np->rx_dma[i],
+                               buflen + NATSEMI_PADDING,
                                PCI_DMA_FROMDEVICE);
                        dev_kfree_skb(np->rx_skbuff[i]);
                }
@@ -2360,7 +2360,8 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
                                        PCI_DMA_FROMDEVICE);
                        } else {
                                pci_unmap_single(np->pci_dev, np->rx_dma[entry],
-                                       buflen, PCI_DMA_FROMDEVICE);
+                                                buflen + NATSEMI_PADDING,
+                                                PCI_DMA_FROMDEVICE);
                                skb_put(skb = np->rx_skbuff[entry], pkt_len);
                                np->rx_skbuff[entry] = NULL;
                        }
index b644383017f91eba49e845048dde1e51d3241796..c0788a31ff0f4a8c0ee8b637ba335e69b7cd3056 100644 (file)
@@ -1965,11 +1965,11 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
        netxen_tso_check(netdev, tx_ring, first_desc, skb);
 
-       netxen_nic_update_cmd_producer(adapter, tx_ring);
-
        adapter->stats.txbytes += skb->len;
        adapter->stats.xmitcalled++;
 
+       netxen_nic_update_cmd_producer(adapter, tx_ring);
+
        return NETDEV_TX_OK;
 
 drop_packet:
index 392a6c4b72e5e4decda29324c4b82708311ec7dd..a70244306c9462830c4ebde0b87334ef7667fa0e 100644 (file)
@@ -58,6 +58,7 @@ config BROADCOM_PHY
 
 config BCM63XX_PHY
        tristate "Drivers for Broadcom 63xx SOCs internal PHY"
+       depends on BCM63XX
        ---help---
          Currently supports the 6348 and 6358 PHYs.
 
index b0c9522bb535293ff72f304ddf2c77fe1a7524e6..2cd8dc5847b4432f5d0567e0cb7bc083e7a63fa3 100644 (file)
@@ -543,11 +543,20 @@ static void recalibrate(struct dp83640_clock *clock)
 
 /* time stamping methods */
 
-static void decode_evnt(struct dp83640_private *dp83640,
-                       struct phy_txts *phy_txts, u16 ests)
+static int decode_evnt(struct dp83640_private *dp83640,
+                      void *data, u16 ests)
 {
+       struct phy_txts *phy_txts;
        struct ptp_clock_event event;
        int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK;
+       u16 ext_status = 0;
+
+       if (ests & MULT_EVNT) {
+               ext_status = *(u16 *) data;
+               data += sizeof(ext_status);
+       }
+
+       phy_txts = data;
 
        switch (words) { /* fall through in every case */
        case 3:
@@ -565,6 +574,9 @@ static void decode_evnt(struct dp83640_private *dp83640,
        event.timestamp = phy2txts(&dp83640->edata);
 
        ptp_clock_event(dp83640->clock->ptp_clock, &event);
+
+       words = ext_status ? words + 2 : words + 1;
+       return words * sizeof(u16);
 }
 
 static void decode_rxts(struct dp83640_private *dp83640,
@@ -643,9 +655,7 @@ static void decode_status_frame(struct dp83640_private *dp83640,
 
                } else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) {
 
-                       phy_txts = (struct phy_txts *) ptr;
-                       decode_evnt(dp83640, phy_txts, ests);
-                       size = sizeof(*phy_txts);
+                       size = decode_evnt(dp83640, ptr, ests);
 
                } else {
                        size = 0;
@@ -1034,8 +1044,8 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
 
        if (is_status_frame(skb, type)) {
                decode_status_frame(dp83640, skb);
-               /* Let the stack drop this frame. */
-               return false;
+               kfree_skb(skb);
+               return true;
        }
 
        SKB_PTP_TYPE(skb) = type;
index a1b82c9c67d246c64f41ee1ec51f83b04fe0a6fa..c554a397e558c2b0fbd5ac0ff2d94523612f8a42 100644 (file)
@@ -523,7 +523,7 @@ static void ppp_async_process(unsigned long arg)
 #define PUT_BYTE(ap, buf, c, islcp)    do {            \
        if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
                *buf++ = PPP_ESCAPE;                    \
-               *buf++ = c ^ 0x20;                      \
+               *buf++ = c ^ PPP_TRANS;                 \
        } else                                          \
                *buf++ = c;                             \
 } while (0)
@@ -896,7 +896,7 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
                                sp = skb_put(skb, n);
                                memcpy(sp, buf, n);
                                if (ap->state & SC_ESCAPE) {
-                                       sp[0] ^= 0x20;
+                                       sp[0] ^= PPP_TRANS;
                                        ap->state &= ~SC_ESCAPE;
                                }
                        }
index 31e9407a07394e6bdeaf976afe59b012160e2c9b..1dbdf82a6dfd81427321d9cba84df47aacff8999 100644 (file)
@@ -305,7 +305,7 @@ static void z_decomp_free(void *arg)
 
        if (state) {
                zlib_inflateEnd(&state->strm);
-               kfree(state->strm.workspace);
+               vfree(state->strm.workspace);
                kfree(state);
        }
 }
@@ -345,8 +345,7 @@ static void *z_decomp_alloc(unsigned char *options, int opt_len)
 
        state->w_size         = w_size;
        state->strm.next_out  = NULL;
-       state->strm.workspace = kmalloc(zlib_inflate_workspacesize(),
-                                       GFP_KERNEL|__GFP_REPEAT);
+       state->strm.workspace = vmalloc(zlib_inflate_workspacesize());
        if (state->strm.workspace == NULL)
                goto out_free;
 
index 718879b35b7d2ac2ea5415dcd70b63e64da94255..bc9a4bb31980f1758a38e38daebdc5a7ab8f2a12 100644 (file)
@@ -348,8 +348,9 @@ static int pppoe_device_event(struct notifier_block *this,
 
        /* Only look at sockets that are using this specific device. */
        switch (event) {
+       case NETDEV_CHANGEADDR:
        case NETDEV_CHANGEMTU:
-               /* A change in mtu is a bad thing, requiring
+               /* A change in mtu or address is a bad thing, requiring
                 * LCP re-negotiation.
                 */
 
index 89f7540d90f909708fa318dbe9ecf6458252a922..5f597ca592bb8bf94a80861de3d315bc1b3c2026 100644 (file)
@@ -1273,7 +1273,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
        wmb();
        wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
 
-       stats->tx_bytes += skb->len;
+       stats->tx_bytes += length;
        stats->tx_packets++;
        dev->trans_start = jiffies;
        if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
index d32850715f5c309350ca8b29d81267c6c33e3735..ca306fd5f588727cf235b692e8147d4a916ef164 100644 (file)
@@ -16,7 +16,7 @@
  */
 #define DRV_NAME       "qlge"
 #define DRV_STRING     "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION    "v1.00.00.27.00.00-01"
+#define DRV_VERSION    "v1.00.00.29.00.00-01"
 
 #define WQ_ADDR_ALIGN  0x3     /* 4 byte alignment */
 
@@ -1996,6 +1996,7 @@ enum {
        QL_LB_LINK_UP = 10,
        QL_FRC_COREDUMP = 11,
        QL_EEH_FATAL = 12,
+       QL_ASIC_RECOVERY = 14, /* We are in ascic recovery. */
 };
 
 /* link_status bit definitions */
index 930ae45457bbc11df0316f07ab803a347fc5b75d..6b4ff970972b493bd0c326bdef2dd0f52b133b32 100644 (file)
@@ -2152,6 +2152,10 @@ void ql_queue_asic_error(struct ql_adapter *qdev)
         * thread
         */
        clear_bit(QL_ADAPTER_UP, &qdev->flags);
+       /* Set asic recovery bit to indicate reset process that we are
+        * in fatal error recovery process rather than normal close
+        */
+       set_bit(QL_ASIC_RECOVERY, &qdev->flags);
        queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
 }
 
@@ -2166,23 +2170,20 @@ static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
                return;
 
        case CAM_LOOKUP_ERR_EVENT:
-               netif_err(qdev, link, qdev->ndev,
-                         "Multiple CAM hits lookup occurred.\n");
-               netif_err(qdev, drv, qdev->ndev,
-                         "This event shouldn't occur.\n");
+               netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
+               netdev_err(qdev->ndev, "This event shouldn't occur.\n");
                ql_queue_asic_error(qdev);
                return;
 
        case SOFT_ECC_ERROR_EVENT:
-               netif_err(qdev, rx_err, qdev->ndev,
-                         "Soft ECC error detected.\n");
+               netdev_err(qdev->ndev, "Soft ECC error detected.\n");
                ql_queue_asic_error(qdev);
                break;
 
        case PCI_ERR_ANON_BUF_RD:
-               netif_err(qdev, rx_err, qdev->ndev,
-                         "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
-                         ib_ae_rsp->q_id);
+               netdev_err(qdev->ndev, "PCI error occurred when reading "
+                                       "anonymous buffers from rx_ring %d.\n",
+                                       ib_ae_rsp->q_id);
                ql_queue_asic_error(qdev);
                break;
 
@@ -2437,11 +2438,10 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
         */
        if (var & STS_FE) {
                ql_queue_asic_error(qdev);
-               netif_err(qdev, intr, qdev->ndev,
-                         "Got fatal error, STS = %x.\n", var);
+               netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
                var = ql_read32(qdev, ERR_STS);
-               netif_err(qdev, intr, qdev->ndev,
-                         "Resetting chip. Error Status Register = 0x%x\n", var);
+               netdev_err(qdev->ndev, "Resetting chip. "
+                                       "Error Status Register = 0x%x\n", var);
                return IRQ_HANDLED;
        }
 
@@ -3818,11 +3818,17 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
        end_jiffies = jiffies +
                max((unsigned long)1, usecs_to_jiffies(30));
 
-       /* Stop management traffic. */
-       ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
+       /* Check if bit is set then skip the mailbox command and
+        * clear the bit, else we are in normal reset process.
+        */
+       if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
+               /* Stop management traffic. */
+               ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
 
-       /* Wait for the NIC and MGMNT FIFOs to empty. */
-       ql_wait_fifo_empty(qdev);
+               /* Wait for the NIC and MGMNT FIFOs to empty. */
+               ql_wait_fifo_empty(qdev);
+       } else
+               clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
 
        ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
 
index 200a363c3bf59ac47d5da56e762486af6572b252..0ffec46084416958db7d7b9b756c859aa416e7d0 100644 (file)
@@ -677,9 +677,11 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
                if (status & RX_FIFO_FULL)
                        dev->stats.rx_fifo_errors++;
 
-               /* Mask off RX interrupt */
-               misr &= ~RX_INTS;
-               napi_schedule(&lp->napi);
+               if (likely(napi_schedule_prep(&lp->napi))) {
+                       /* Mask off RX interrupt */
+                       misr &= ~RX_INTS;
+                       __napi_schedule(&lp->napi);
+               }
        }
 
        /* TX interrupt request */
index ef1ce2ebeb4a6784ecca5d3c10f7ca084308ab9a..5990621fb5cd3f401a0db5d30c8507b2b866cc9e 100644 (file)
@@ -742,7 +742,7 @@ static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
        msleep(2);
        for (i = 0; i < 5; i++) {
                udelay(100);
-               if (!(RTL_R32(ERIDR) & ERIAR_FLAG))
+               if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
                        break;
        }
 
@@ -1621,7 +1621,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
         *
         * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
         */
-       static const struct {
+       static const struct rtl_mac_info {
                u32 mask;
                u32 val;
                int mac_version;
@@ -1689,7 +1689,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
 
                /* Catch-all */
                { 0x00000000, 0x00000000,       RTL_GIGA_MAC_NONE   }
-       }, *p = mac_info;
+       };
+       const struct rtl_mac_info *p = mac_info;
        u32 reg;
 
        reg = RTL_R32(TxConfig);
@@ -3681,7 +3682,7 @@ static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
 
 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
 {
-       static const struct {
+       static const struct rtl_cfg2_info {
                u32 mac_version;
                u32 clk;
                u32 val;
@@ -3690,7 +3691,8 @@ static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
                { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
                { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
                { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
-       }, *p = cfg2_info;
+       };
+       const struct rtl_cfg2_info *p = cfg2_info;
        unsigned int i;
        u32 clk;
 
index 77c5092a6a408089bbfd650747f981a17af8ac50..5d3436d47edd45d52a45334e2a40a3ee86957429 100644 (file)
@@ -378,7 +378,7 @@ static int rionet_close(struct net_device *ndev)
 
 static void rionet_remove(struct rio_dev *rdev)
 {
-       struct net_device *ndev = NULL;
+       struct net_device *ndev = rio_get_drvdata(rdev);
        struct rionet_peer *peer, *tmp;
 
        free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
@@ -433,22 +433,12 @@ static const struct net_device_ops rionet_netdev_ops = {
        .ndo_set_mac_address    = eth_mac_addr,
 };
 
-static int rionet_setup_netdev(struct rio_mport *mport)
+static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
 {
        int rc = 0;
-       struct net_device *ndev = NULL;
        struct rionet_private *rnet;
        u16 device_id;
 
-       /* Allocate our net_device structure */
-       ndev = alloc_etherdev(sizeof(struct rionet_private));
-       if (ndev == NULL) {
-               printk(KERN_INFO "%s: could not allocate ethernet device.\n",
-                      DRV_NAME);
-               rc = -ENOMEM;
-               goto out;
-       }
-
        rionet_active = (struct rio_dev **)__get_free_pages(GFP_KERNEL,
                        mport->sys_size ? __fls(sizeof(void *)) + 4 : 0);
        if (!rionet_active) {
@@ -504,11 +494,21 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
        int rc = -ENODEV;
        u32 lpef, lsrc_ops, ldst_ops;
        struct rionet_peer *peer;
+       struct net_device *ndev = NULL;
 
        /* If local device is not rionet capable, give up quickly */
        if (!rionet_capable)
                goto out;
 
+       /* Allocate our net_device structure */
+       ndev = alloc_etherdev(sizeof(struct rionet_private));
+       if (ndev == NULL) {
+               printk(KERN_INFO "%s: could not allocate ethernet device.\n",
+                      DRV_NAME);
+               rc = -ENOMEM;
+               goto out;
+       }
+
        /*
         * First time through, make sure local device is rionet
         * capable, setup netdev,  and set flags so this is skipped
@@ -529,7 +529,7 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
                        goto out;
                }
 
-               rc = rionet_setup_netdev(rdev->net->hport);
+               rc = rionet_setup_netdev(rdev->net->hport, ndev);
                rionet_check = 1;
        }
 
@@ -546,6 +546,8 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
                list_add_tail(&peer->node, &rionet_peers);
        }
 
+       rio_set_drvdata(rdev, ndev);
+
       out:
        return rc;
 }
index 8a72a979ee711262a613ddbe6e0fd3276b6f77d4..1f3f7b4dd6389d6bcdf4ea8c76baf66d92896f63 100644 (file)
@@ -140,6 +140,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
        .tpauser        = 1,
        .hw_swap        = 1,
        .no_ade         = 1,
+       .rpadir         = 1,
+       .rpadir_value   = 2 << 16,
 };
 
 #define SH_GIGA_ETH_BASE       0xfee00000
@@ -1184,8 +1186,8 @@ static void sh_eth_adjust_link(struct net_device *ndev)
                                mdp->cd->set_rate(ndev);
                }
                if (mdp->link == PHY_DOWN) {
-                       sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_TXF)
-                                       | ECMR_DM, ECMR);
+                       sh_eth_write(ndev,
+                               (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
                        new_state = 1;
                        mdp->link = phydev->link;
                }
index 8ec1a9a0bb9ae007c69865b2599f07d3f23c99c8..2f110fb30daa9e9417c99b1e8f925e5e606bec96 100644 (file)
@@ -182,10 +182,10 @@ static int sl_alloc_bufs(struct slip *sl, int mtu)
 #ifdef SL_INCLUDE_CSLIP
        cbuff = xchg(&sl->cbuff, cbuff);
        slcomp = xchg(&sl->slcomp, slcomp);
+#endif
 #ifdef CONFIG_SLIP_MODE_SLIP6
        sl->xdata    = 0;
        sl->xbits    = 0;
-#endif
 #endif
        spin_unlock_bh(&sl->lock);
        err = 0;
index 4685127319669eadb65aa596ffed7ce8caa3b506..9a21ca3873fce6d0dbc455b6cda5025db9be6d59 100644 (file)
@@ -879,7 +879,6 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
        txptr = db->tx_remove_ptr;
        while(db->tx_packet_cnt) {
                tdes0 = le32_to_cpu(txptr->tdes0);
-               pr_debug("tdes0=%x\n", tdes0);
                if (tdes0 & 0x80000000)
                        break;
 
@@ -889,7 +888,6 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
 
                /* Transmit statistic counter */
                if ( tdes0 != 0x7fffffff ) {
-                       pr_debug("tdes0=%x\n", tdes0);
                        dev->stats.collisions += (tdes0 >> 3) & 0xf;
                        dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
                        if (tdes0 & TDES0_ERR_MASK) {
@@ -986,7 +984,6 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
                        /* error summary bit check */
                        if (rdes0 & 0x8000) {
                                /* This is a error packet */
-                               pr_debug("rdes0: %x\n", rdes0);
                                dev->stats.rx_errors++;
                                if (rdes0 & 1)
                                        dev->stats.rx_fifo_errors++;
@@ -1638,7 +1635,6 @@ static u8 dmfe_sense_speed(struct dmfe_board_info * db)
                else                            /* DM9102/DM9102A */
                        phy_mode = phy_read(db->ioaddr,
                                    db->phy_addr, 17, db->chip_id) & 0xf000;
-               pr_debug("Phy_mode %x\n", phy_mode);
                switch (phy_mode) {
                case 0x1000: db->op_mode = DMFE_10MHF; break;
                case 0x2000: db->op_mode = DMFE_10MFD; break;
index 74e94054ab1a29d41828c7b9358518733eba0754..5235f48be1be677b7ca57fd275c21392fb846762 100644 (file)
@@ -460,7 +460,23 @@ static u32 tun_net_fix_features(struct net_device *dev, u32 features)
 
        return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
 }
-
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void tun_poll_controller(struct net_device *dev)
+{
+       /*
+        * Tun only receives frames when:
+        * 1) the char device endpoint gets data from user space
+        * 2) the tun socket gets a sendmsg call from user space
+        * Since both of those are syncronous operations, we are guaranteed
+        * never to have pending data when we poll for it
+        * so theres nothing to do here but return.
+        * We need this though so netpoll recognizes us as an interface that
+        * supports polling, which enables bridge devices in virt setups to
+        * still use netconsole
+        */
+       return;
+}
+#endif
 static const struct net_device_ops tun_netdev_ops = {
        .ndo_uninit             = tun_net_uninit,
        .ndo_open               = tun_net_open,
@@ -468,6 +484,9 @@ static const struct net_device_ops tun_netdev_ops = {
        .ndo_start_xmit         = tun_net_xmit,
        .ndo_change_mtu         = tun_net_change_mtu,
        .ndo_fix_features       = tun_net_fix_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = tun_poll_controller,
+#endif
 };
 
 static const struct net_device_ops tap_netdev_ops = {
@@ -480,6 +499,9 @@ static const struct net_device_ops tap_netdev_ops = {
        .ndo_set_multicast_list = tun_net_mclist,
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = tun_poll_controller,
+#endif
 };
 
 /* Initialize net device. */
index 9d4f9117260f0a613c7d72e86867f724f107f5ae..84d4608153c992556a2d14a4a6bab37160abbf1b 100644 (file)
@@ -385,6 +385,16 @@ config USB_NET_CX82310_ETH
          router with USB ethernet port. This driver is for routers only,
          it will not work with ADSL modems (use cxacru driver instead).
 
+config USB_NET_KALMIA
+       tristate "Samsung Kalmia based LTE USB modem"
+       depends on USB_USBNET
+       help
+         Choose this option if you have a Samsung Kalmia based USB modem
+         as Samsung GT-B3730.
+
+         To compile this driver as a module, choose M here: the
+         module will be called kalmia.
+
 config USB_HSO
        tristate "Option USB High Speed Mobile Devices"
        depends on USB && RFKILL
index c7ec8a5f0a90d46fffcaa5a34e72468584086dd0..c203fa21f6b12473faa7efe3f6e851ab6af94954 100644 (file)
@@ -23,6 +23,7 @@ obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
 obj-$(CONFIG_USB_USBNET)       += usbnet.o
 obj-$(CONFIG_USB_NET_INT51X1)  += int51x1.o
 obj-$(CONFIG_USB_CDC_PHONET)   += cdc-phonet.o
+obj-$(CONFIG_USB_NET_KALMIA)   += kalmia.o
 obj-$(CONFIG_USB_IPHETH)       += ipheth.o
 obj-$(CONFIG_USB_SIERRA_NET)   += sierra_net.o
 obj-$(CONFIG_USB_NET_CX82310_ETH)      += cx82310_eth.o
index 387ca43f26f4c3942e098702b55d39dbca325a90..304fe78ff60e3b287b608fb4b19bafacb915e315 100644 (file)
@@ -2421,10 +2421,8 @@ static void hso_free_net_device(struct hso_device *hso_dev)
 
        remove_net_device(hso_net->parent);
 
-       if (hso_net->net) {
+       if (hso_net->net)
                unregister_netdev(hso_net->net);
-               free_netdev(hso_net->net);
-       }
 
        /* start freeing */
        for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
@@ -2436,6 +2434,9 @@ static void hso_free_net_device(struct hso_device *hso_dev)
        kfree(hso_net->mux_bulk_tx_buf);
        hso_net->mux_bulk_tx_buf = NULL;
 
+       if (hso_net->net)
+               free_netdev(hso_net->net);
+
        kfree(hso_dev);
 }
 
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
new file mode 100644 (file)
index 0000000..a9b6c63
--- /dev/null
@@ -0,0 +1,392 @@
+/*
+ * USB network interface driver for Samsung Kalmia based LTE USB modem like the
+ * Samsung GT-B3730 and GT-B3710.
+ *
+ * Copyright (C) 2011 Marius Bjoernstad Kotsbak <marius@kotsbak.com>
+ *
+ * Sponsored by Quicklink Video Distribution Services Ltd.
+ *
+ * Based on the cdc_eem module.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ctype.h>
+#include <linux/ethtool.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/usbnet.h>
+#include <linux/gfp.h>
+
+/*
+ * The Samsung Kalmia based LTE USB modems have a CDC ACM port for modem control
+ * handled by the "option" module and an ethernet data port handled by this
+ * module.
+ *
+ * The stick must first be switched into modem mode by usb_modeswitch
+ * or similar tool. Then the modem gets sent two initialization packets by
+ * this module, which gives the MAC address of the device. User space can then
+ * connect the modem using AT commands through the ACM port and then use
+ * DHCP on the network interface exposed by this module. Network packets are
+ * sent to and from the modem in a proprietary format discovered after watching
+ * the behavior of the windows driver for the modem.
+ *
+ * More information about the use of the modem is available in usb_modeswitch
+ * forum and the project page:
+ *
+ * http://www.draisberghof.de/usb_modeswitch/bb/viewtopic.php?t=465
+ * https://github.com/mkotsbak/Samsung-GT-B3730-linux-driver
+ */
+
+/* #define     DEBUG */
+/* #define     VERBOSE */
+
+#define KALMIA_HEADER_LENGTH 6
+#define KALMIA_ALIGN_SIZE 4
+#define KALMIA_USB_TIMEOUT 10000
+
+/*-------------------------------------------------------------------------*/
+
+static int
+kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
+       u8 *buffer, u8 expected_len)
+{
+       int act_len;
+       int status;
+
+       netdev_dbg(dev->net, "Sending init packet");
+
+       status = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 0x02),
+               init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT);
+       if (status != 0) {
+               netdev_err(dev->net,
+                       "Error sending init packet. Status %i, length %i\n",
+                       status, act_len);
+               return status;
+       }
+       else if (act_len != init_msg_len) {
+               netdev_err(dev->net,
+                       "Did not send all of init packet. Bytes sent: %i",
+                       act_len);
+       }
+       else {
+               netdev_dbg(dev->net, "Successfully sent init packet.");
+       }
+
+       status = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, 0x81),
+               buffer, expected_len, &act_len, KALMIA_USB_TIMEOUT);
+
+       if (status != 0)
+               netdev_err(dev->net,
+                       "Error receiving init result. Status %i, length %i\n",
+                       status, act_len);
+       else if (act_len != expected_len)
+               netdev_err(dev->net, "Unexpected init result length: %i\n",
+                       act_len);
+
+       return status;
+}
+
+static int
+kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
+{
+       const static char init_msg_1[] =
+               { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
+               0x00, 0x00 };
+       const static char init_msg_2[] =
+               { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0xf4,
+               0x00, 0x00 };
+       const static int buflen = 28;
+       char *usb_buf;
+       int status;
+
+       usb_buf = kmalloc(buflen, GFP_DMA | GFP_KERNEL);
+       if (!usb_buf)
+               return -ENOMEM;
+
+       memcpy(usb_buf, init_msg_1, 12);
+       status = kalmia_send_init_packet(dev, usb_buf, sizeof(init_msg_1)
+               / sizeof(init_msg_1[0]), usb_buf, 24);
+       if (status != 0)
+               return status;
+
+       memcpy(usb_buf, init_msg_2, 12);
+       status = kalmia_send_init_packet(dev, usb_buf, sizeof(init_msg_2)
+               / sizeof(init_msg_2[0]), usb_buf, 28);
+       if (status != 0)
+               return status;
+
+       memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN);
+
+       kfree(usb_buf);
+       return status;
+}
+
+static int
+kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+       int status;
+       u8 ethernet_addr[ETH_ALEN];
+
+       /* Don't bind to AT command interface */
+       if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
+               return -EINVAL;
+
+       dev->in = usb_rcvbulkpipe(dev->udev, 0x81 & USB_ENDPOINT_NUMBER_MASK);
+       dev->out = usb_sndbulkpipe(dev->udev, 0x02 & USB_ENDPOINT_NUMBER_MASK);
+       dev->status = NULL;
+
+       dev->net->hard_header_len += KALMIA_HEADER_LENGTH;
+       dev->hard_mtu = 1400;
+       dev->rx_urb_size = dev->hard_mtu * 10; // Found as optimal after testing
+
+       status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr);
+
+       if (status < 0) {
+               usb_set_intfdata(intf, NULL);
+               usb_driver_release_interface(driver_of(intf), intf);
+               return status;
+       }
+
+       memcpy(dev->net->dev_addr, ethernet_addr, ETH_ALEN);
+       memcpy(dev->net->perm_addr, ethernet_addr, ETH_ALEN);
+
+       return status;
+}
+
+static struct sk_buff *
+kalmia_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
+{
+       struct sk_buff *skb2 = NULL;
+       u16 content_len;
+       unsigned char *header_start;
+       unsigned char ether_type_1, ether_type_2;
+       u8 remainder, padlen = 0;
+
+       if (!skb_cloned(skb)) {
+               int headroom = skb_headroom(skb);
+               int tailroom = skb_tailroom(skb);
+
+               if ((tailroom >= KALMIA_ALIGN_SIZE) && (headroom
+                       >= KALMIA_HEADER_LENGTH))
+                       goto done;
+
+               if ((headroom + tailroom) > (KALMIA_HEADER_LENGTH
+                       + KALMIA_ALIGN_SIZE)) {
+                       skb->data = memmove(skb->head + KALMIA_HEADER_LENGTH,
+                               skb->data, skb->len);
+                       skb_set_tail_pointer(skb, skb->len);
+                       goto done;
+               }
+       }
+
+       skb2 = skb_copy_expand(skb, KALMIA_HEADER_LENGTH,
+               KALMIA_ALIGN_SIZE, flags);
+       if (!skb2)
+               return NULL;
+
+       dev_kfree_skb_any(skb);
+       skb = skb2;
+
+done:
+       header_start = skb_push(skb, KALMIA_HEADER_LENGTH);
+       ether_type_1 = header_start[KALMIA_HEADER_LENGTH + 12];
+       ether_type_2 = header_start[KALMIA_HEADER_LENGTH + 13];
+
+       netdev_dbg(dev->net, "Sending etherType: %02x%02x", ether_type_1,
+               ether_type_2);
+
+       /* According to empiric data for data packages */
+       header_start[0] = 0x57;
+       header_start[1] = 0x44;
+       content_len = skb->len - KALMIA_HEADER_LENGTH;
+
+       put_unaligned_le16(content_len, &header_start[2]);
+       header_start[4] = ether_type_1;
+       header_start[5] = ether_type_2;
+
+       /* Align to 4 bytes by padding with zeros */
+       remainder = skb->len % KALMIA_ALIGN_SIZE;
+       if (remainder > 0) {
+               padlen = KALMIA_ALIGN_SIZE - remainder;
+               memset(skb_put(skb, padlen), 0, padlen);
+       }
+
+       netdev_dbg(
+               dev->net,
+               "Sending package with length %i and padding %i. Header: %02x:%02x:%02x:%02x:%02x:%02x.",
+               content_len, padlen, header_start[0], header_start[1],
+               header_start[2], header_start[3], header_start[4],
+               header_start[5]);
+
+       return skb;
+}
+
+static int
+kalmia_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+       /*
+        * Our task here is to strip off framing, leaving skb with one
+        * data frame for the usbnet framework code to process.
+        */
+       const static u8 HEADER_END_OF_USB_PACKET[] =
+               { 0x57, 0x5a, 0x00, 0x00, 0x08, 0x00 };
+       const static u8 EXPECTED_UNKNOWN_HEADER_1[] =
+               { 0x57, 0x43, 0x1e, 0x00, 0x15, 0x02 };
+       const static u8 EXPECTED_UNKNOWN_HEADER_2[] =
+               { 0x57, 0x50, 0x0e, 0x00, 0x00, 0x00 };
+       int i = 0;
+
+       /* incomplete header? */
+       if (skb->len < KALMIA_HEADER_LENGTH)
+               return 0;
+
+       do {
+               struct sk_buff *skb2 = NULL;
+               u8 *header_start;
+               u16 usb_packet_length, ether_packet_length;
+               int is_last;
+
+               header_start = skb->data;
+
+               if (unlikely(header_start[0] != 0x57 || header_start[1] != 0x44)) {
+                       if (!memcmp(header_start, EXPECTED_UNKNOWN_HEADER_1,
+                               sizeof(EXPECTED_UNKNOWN_HEADER_1)) || !memcmp(
+                               header_start, EXPECTED_UNKNOWN_HEADER_2,
+                               sizeof(EXPECTED_UNKNOWN_HEADER_2))) {
+                               netdev_dbg(
+                                       dev->net,
+                                       "Received expected unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
+                                       header_start[0], header_start[1],
+                                       header_start[2], header_start[3],
+                                       header_start[4], header_start[5],
+                                       skb->len - KALMIA_HEADER_LENGTH);
+                       }
+                       else {
+                               netdev_err(
+                                       dev->net,
+                                       "Received unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
+                                       header_start[0], header_start[1],
+                                       header_start[2], header_start[3],
+                                       header_start[4], header_start[5],
+                                       skb->len - KALMIA_HEADER_LENGTH);
+                               return 0;
+                       }
+               }
+               else
+                       netdev_dbg(
+                               dev->net,
+                               "Received header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
+                               header_start[0], header_start[1], header_start[2],
+                               header_start[3], header_start[4], header_start[5],
+                               skb->len - KALMIA_HEADER_LENGTH);
+
+               /* subtract start header and end header */
+               usb_packet_length = skb->len - (2 * KALMIA_HEADER_LENGTH);
+               ether_packet_length = get_unaligned_le16(&header_start[2]);
+               skb_pull(skb, KALMIA_HEADER_LENGTH);
+
+               /* Some small packets misses end marker */
+               if (usb_packet_length < ether_packet_length) {
+                       ether_packet_length = usb_packet_length
+                               + KALMIA_HEADER_LENGTH;
+                       is_last = true;
+               }
+               else {
+                       netdev_dbg(dev->net, "Correct package length #%i", i
+                               + 1);
+
+                       is_last = (memcmp(skb->data + ether_packet_length,
+                               HEADER_END_OF_USB_PACKET,
+                               sizeof(HEADER_END_OF_USB_PACKET)) == 0);
+                       if (!is_last) {
+                               header_start = skb->data + ether_packet_length;
+                               netdev_dbg(
+                                       dev->net,
+                                       "End header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
+                                       header_start[0], header_start[1],
+                                       header_start[2], header_start[3],
+                                       header_start[4], header_start[5],
+                                       skb->len - KALMIA_HEADER_LENGTH);
+                       }
+               }
+
+               if (is_last) {
+                       skb2 = skb;
+               }
+               else {
+                       skb2 = skb_clone(skb, GFP_ATOMIC);
+                       if (unlikely(!skb2))
+                               return 0;
+               }
+
+               skb_trim(skb2, ether_packet_length);
+
+               if (is_last) {
+                       return 1;
+               }
+               else {
+                       usbnet_skb_return(dev, skb2);
+                       skb_pull(skb, ether_packet_length);
+               }
+
+               i++;
+       }
+       while (skb->len);
+
+       return 1;
+}
+
+static const struct driver_info kalmia_info = {
+       .description = "Samsung Kalmia LTE USB dongle",
+       .flags = FLAG_WWAN,
+       .bind = kalmia_bind,
+       .rx_fixup = kalmia_rx_fixup,
+       .tx_fixup = kalmia_tx_fixup
+};
+
+/*-------------------------------------------------------------------------*/
+
+static const struct usb_device_id products[] = {
+       /* The unswitched USB ID, to get the module auto loaded: */
+       { USB_DEVICE(0x04e8, 0x689a) },
+       /* The stick swithed into modem (by e.g. usb_modeswitch): */
+       { USB_DEVICE(0x04e8, 0x6889),
+               .driver_info = (unsigned long) &kalmia_info, },
+       { /* EMPTY == end of list */} };
+MODULE_DEVICE_TABLE( usb, products);
+
+static struct usb_driver kalmia_driver = {
+       .name = "kalmia",
+       .id_table = products,
+       .probe = usbnet_probe,
+       .disconnect = usbnet_disconnect,
+       .suspend = usbnet_suspend,
+       .resume = usbnet_resume
+};
+
+static int __init kalmia_init(void)
+{
+       return usb_register(&kalmia_driver);
+}
+module_init( kalmia_init);
+
+static void __exit kalmia_exit(void)
+{
+       usb_deregister(&kalmia_driver);
+}
+module_exit( kalmia_exit);
+
+MODULE_AUTHOR("Marius Bjoernstad Kotsbak <marius@kotsbak.com>");
+MODULE_DESCRIPTION("Samsung Kalmia USB network driver");
+MODULE_LICENSE("GPL");
index 241756e0e86f03e86a1b368408c761389967d8f8..1a2234c20514f88e22bd07dab26e4b25d4cc26ea 100644 (file)
@@ -331,17 +331,7 @@ static const struct usb_device_id  products [] = {
        ZAURUS_MASTER_INTERFACE,
        .driver_info = ZAURUS_PXA_INFO,
 },
-
-
-/* At least some of the newest PXA units have very different lies about
- * their standards support:  they claim to be cell phones offering
- * direct access to their radios!  (No, they don't conform to CDC MDLM.)
- */
 {
-       USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM,
-                       USB_CDC_PROTO_NONE),
-       .driver_info = (unsigned long) &bogus_mdlm_info,
-}, {
        /* Motorola MOTOMAGX phones */
        USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x6425, USB_CLASS_COMM,
                        USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
index fa6e2ac7475a337664117cc494930877cf5dc10e..67402350d0dffaae181015f816cf9c3d67313847 100644 (file)
@@ -575,7 +575,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
        struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
        u32 val;
 
-       while (num_allocated < num_to_alloc) {
+       while (num_allocated <= num_to_alloc) {
                struct vmxnet3_rx_buf_info *rbi;
                union Vmxnet3_GenericDesc *gd;
 
@@ -621,9 +621,15 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
 
                BUG_ON(rbi->dma_addr == 0);
                gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
-               gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT)
+               gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
                                           | val | rbi->len);
 
+               /* Fill the last buffer but dont mark it ready, or else the
+                * device will think that the queue is full */
+               if (num_allocated == num_to_alloc)
+                       break;
+
+               gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
                num_allocated++;
                vmxnet3_cmd_ring_adv_next2fill(ring);
        }
@@ -1140,6 +1146,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
        };
        u32 num_rxd = 0;
+       bool skip_page_frags = false;
        struct Vmxnet3_RxCompDesc *rcd;
        struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
 #ifdef __BIG_ENDIAN_BITFIELD
@@ -1150,11 +1157,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                          &rxComp);
        while (rcd->gen == rq->comp_ring.gen) {
                struct vmxnet3_rx_buf_info *rbi;
-               struct sk_buff *skb;
+               struct sk_buff *skb, *new_skb = NULL;
+               struct page *new_page = NULL;
                int num_to_alloc;
                struct Vmxnet3_RxDesc *rxd;
                u32 idx, ring_idx;
-
+               struct vmxnet3_cmd_ring *ring = NULL;
                if (num_rxd >= quota) {
                        /* we may stop even before we see the EOP desc of
                         * the current pkt
@@ -1165,6 +1173,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
                idx = rcd->rxdIdx;
                ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
+               ring = rq->rx_ring + ring_idx;
                vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
                                  &rxCmdDesc);
                rbi = rq->buf_info[ring_idx] + idx;
@@ -1193,37 +1202,80 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                                goto rcd_done;
                        }
 
+                       skip_page_frags = false;
                        ctx->skb = rbi->skb;
-                       rbi->skb = NULL;
+                       new_skb = dev_alloc_skb(rbi->len + NET_IP_ALIGN);
+                       if (new_skb == NULL) {
+                               /* Skb allocation failed, do not handover this
+                                * skb to stack. Reuse it. Drop the existing pkt
+                                */
+                               rq->stats.rx_buf_alloc_failure++;
+                               ctx->skb = NULL;
+                               rq->stats.drop_total++;
+                               skip_page_frags = true;
+                               goto rcd_done;
+                       }
 
                        pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
                                         PCI_DMA_FROMDEVICE);
 
                        skb_put(ctx->skb, rcd->len);
+
+                       /* Immediate refill */
+                       new_skb->dev = adapter->netdev;
+                       skb_reserve(new_skb, NET_IP_ALIGN);
+                       rbi->skb = new_skb;
+                       rbi->dma_addr = pci_map_single(adapter->pdev,
+                                       rbi->skb->data, rbi->len,
+                                       PCI_DMA_FROMDEVICE);
+                       rxd->addr = cpu_to_le64(rbi->dma_addr);
+                       rxd->len = rbi->len;
+
                } else {
-                       BUG_ON(ctx->skb == NULL);
+                       BUG_ON(ctx->skb == NULL && !skip_page_frags);
+
                        /* non SOP buffer must be type 1 in most cases */
-                       if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) {
-                               BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
+                       BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
+                       BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
 
-                               if (rcd->len) {
-                                       pci_unmap_page(adapter->pdev,
-                                                      rbi->dma_addr, rbi->len,
-                                                      PCI_DMA_FROMDEVICE);
+                       /* If an sop buffer was dropped, skip all
+                        * following non-sop fragments. They will be reused.
+                        */
+                       if (skip_page_frags)
+                               goto rcd_done;
 
-                                       vmxnet3_append_frag(ctx->skb, rcd, rbi);
-                                       rbi->page = NULL;
-                               }
-                       } else {
-                               /*
-                                * The only time a non-SOP buffer is type 0 is
-                                * when it's EOP and error flag is raised, which
-                                * has already been handled.
+                       new_page = alloc_page(GFP_ATOMIC);
+                       if (unlikely(new_page == NULL)) {
+                               /* Replacement page frag could not be allocated.
+                                * Reuse this page. Drop the pkt and free the
+                                * skb which contained this page as a frag. Skip
+                                * processing all the following non-sop frags.
                                 */
-                               BUG_ON(true);
+                               rq->stats.rx_buf_alloc_failure++;
+                               dev_kfree_skb(ctx->skb);
+                               ctx->skb = NULL;
+                               skip_page_frags = true;
+                               goto rcd_done;
+                       }
+
+                       if (rcd->len) {
+                               pci_unmap_page(adapter->pdev,
+                                              rbi->dma_addr, rbi->len,
+                                              PCI_DMA_FROMDEVICE);
+
+                               vmxnet3_append_frag(ctx->skb, rcd, rbi);
                        }
+
+                       /* Immediate refill */
+                       rbi->page = new_page;
+                       rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page,
+                                                    0, PAGE_SIZE,
+                                                    PCI_DMA_FROMDEVICE);
+                       rxd->addr = cpu_to_le64(rbi->dma_addr);
+                       rxd->len = rbi->len;
                }
 
+
                skb = ctx->skb;
                if (rcd->eop) {
                        skb->len += skb->data_len;
@@ -1244,26 +1296,27 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                }
 
 rcd_done:
-               /* device may skip some rx descs */
-               rq->rx_ring[ring_idx].next2comp = idx;
-               VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp,
-                                         rq->rx_ring[ring_idx].size);
-
-               /* refill rx buffers frequently to avoid starving the h/w */
-               num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring +
-                                                          ring_idx);
-               if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq,
-                                                       ring_idx, adapter))) {
-                       vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc,
-                                               adapter);
-
-                       /* if needed, update the register */
-                       if (unlikely(rq->shared->updateRxProd)) {
-                               VMXNET3_WRITE_BAR0_REG(adapter,
-                                       rxprod_reg[ring_idx] + rq->qid * 8,
-                                       rq->rx_ring[ring_idx].next2fill);
-                               rq->uncommitted[ring_idx] = 0;
-                       }
+               /* device may have skipped some rx descs */
+               ring->next2comp = idx;
+               num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
+               ring = rq->rx_ring + ring_idx;
+               while (num_to_alloc) {
+                       vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
+                                         &rxCmdDesc);
+                       BUG_ON(!rxd->addr);
+
+                       /* Recv desc is ready to be used by the device */
+                       rxd->gen = ring->gen;
+                       vmxnet3_cmd_ring_adv_next2fill(ring);
+                       num_to_alloc--;
+               }
+
+               /* if needed, update the register */
+               if (unlikely(rq->shared->updateRxProd)) {
+                       VMXNET3_WRITE_BAR0_REG(adapter,
+                               rxprod_reg[ring_idx] + rq->qid * 8,
+                               ring->next2fill);
+                       rq->uncommitted[ring_idx] = 0;
                }
 
                vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
@@ -2894,6 +2947,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
        else
 #endif
                num_rx_queues = 1;
+       num_rx_queues = rounddown_pow_of_two(num_rx_queues);
 
        if (enable_mq)
                num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
@@ -2901,6 +2955,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
        else
                num_tx_queues = 1;
 
+       num_tx_queues = rounddown_pow_of_two(num_tx_queues);
        netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
                                   max(num_tx_queues, num_rx_queues));
        printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
@@ -3085,6 +3140,7 @@ vmxnet3_remove_device(struct pci_dev *pdev)
        else
 #endif
                num_rx_queues = 1;
+       num_rx_queues = rounddown_pow_of_two(num_rx_queues);
 
        cancel_work_sync(&adapter->work);
 
index f50d36fdf4056a9774434a9fb51eebd937b53117..e08d75e3f170a3ae523957fde0ae80b1e60e3a20 100644 (file)
@@ -55,6 +55,7 @@
 #include <linux/if_vlan.h>
 #include <linux/if_arp.h>
 #include <linux/inetdevice.h>
+#include <linux/log2.h>
 
 #include "vmxnet3_defs.h"
 
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.1.9.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.1.18.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01010900
+#define VMXNET3_DRIVER_VERSION_NUM      0x01011200
 
 #if defined(CONFIG_PCI_MSI)
        /* RSS only makes sense if MSI-X is supported. */
index e050bd65e0378cbdd6643d893c01a4462a2dea2f..777d1a4e81b2cb0c43599e146e16d774e47abe66 100644 (file)
@@ -2203,8 +2203,10 @@ fst_open(struct net_device *dev)
 
        if (port->mode != FST_RAW) {
                err = hdlc_open(dev);
-               if (err)
+               if (err) {
+                       module_put(THIS_MODULE);
                        return err;
+               }
        }
 
        fst_openport(port);
index 62172d5857239549ba6fa233df721096773fd707..f82383b3ed30c11ae4bceaae6d8e0c5028ba5f01 100644 (file)
@@ -107,10 +107,13 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
                case AR5K_PKT_TYPE_BEACON:
                case AR5K_PKT_TYPE_PROBE_RESP:
                        frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY;
+                       break;
                case AR5K_PKT_TYPE_PIFS:
                        frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS;
+                       break;
                default:
                        frame_type = type;
+                       break;
                }
 
                tx_ctl->tx_control_0 |=
index 1fef84f87c78f5196e57ff5c9a3ad78ca7fed50a..392771f937599d7a9731589d018d505a66c4255c 100644 (file)
@@ -691,14 +691,12 @@ ath5k_eeprom_free_pcal_info(struct ath5k_hw *ah, int mode)
                if (!chinfo[pier].pd_curves)
                        continue;
 
-               for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
+               for (pdg = 0; pdg < AR5K_EEPROM_N_PD_CURVES; pdg++) {
                        struct ath5k_pdgain_info *pd =
                                        &chinfo[pier].pd_curves[pdg];
 
-                       if (pd != NULL) {
-                               kfree(pd->pd_step);
-                               kfree(pd->pd_pwr);
-                       }
+                       kfree(pd->pd_step);
+                       kfree(pd->pd_pwr);
                }
 
                kfree(chinfo[pier].pd_curves);
index 296c316a83412eaa64bd4c6bd47b264dc4910822..f2c0c236392f2663f970de02ecd8f0a7bd34f9d9 100644 (file)
@@ -297,7 +297,9 @@ ath5k_pci_remove(struct pci_dev *pdev)
 #ifdef CONFIG_PM_SLEEP
 static int ath5k_pci_suspend(struct device *dev)
 {
-       struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+       struct ath5k_softc *sc = hw->priv;
 
        ath5k_led_off(sc);
        return 0;
@@ -306,7 +308,8 @@ static int ath5k_pci_suspend(struct device *dev)
 static int ath5k_pci_resume(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
-       struct ath5k_softc *sc = pci_get_drvdata(pdev);
+       struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+       struct ath5k_softc *sc = hw->priv;
 
        /*
         * Suspend/Resume resets the PCI configuration space, so we have to
index 929c68cdf8ab498dc1f35e7cb5b20b08be861e47..a073cdce1f156fd3a1cfc1acc73992b38e87a257 100644 (file)
@@ -10,7 +10,8 @@ static ssize_t ath5k_attr_show_##name(struct device *dev,             \
                        struct device_attribute *attr,                  \
                        char *buf)                                      \
 {                                                                      \
-       struct ath5k_softc *sc = dev_get_drvdata(dev);                  \
+       struct ieee80211_hw *hw = dev_get_drvdata(dev);                 \
+       struct ath5k_softc *sc = hw->priv;                              \
        return snprintf(buf, PAGE_SIZE, "%d\n", get);                   \
 }                                                                      \
                                                                        \
@@ -18,7 +19,8 @@ static ssize_t ath5k_attr_store_##name(struct device *dev,            \
                        struct device_attribute *attr,                  \
                        const char *buf, size_t count)                  \
 {                                                                      \
-       struct ath5k_softc *sc = dev_get_drvdata(dev);                  \
+       struct ieee80211_hw *hw = dev_get_drvdata(dev);                 \
+       struct ath5k_softc *sc = hw->priv;                              \
        int val;                                                        \
                                                                        \
        val = (int)simple_strtoul(buf, NULL, 10);                       \
@@ -33,7 +35,8 @@ static ssize_t ath5k_attr_show_##name(struct device *dev,             \
                        struct device_attribute *attr,                  \
                        char *buf)                                      \
 {                                                                      \
-       struct ath5k_softc *sc = dev_get_drvdata(dev);                  \
+       struct ieee80211_hw *hw = dev_get_drvdata(dev);                 \
+       struct ath5k_softc *sc = hw->priv;                              \
        return snprintf(buf, PAGE_SIZE, "%d\n", get);                   \
 }                                                                      \
 static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL)
index b8cbfc7072137064f443ad0f137b9ccba99c85f4..3bad0b2cf9a3f6517525950a3fe55411d8d79fb0 100644 (file)
@@ -278,6 +278,12 @@ static int ath_pci_suspend(struct device *device)
 
        ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
 
+       /* The device has to be moved to FULLSLEEP forcibly.
+        * Otherwise the chip never moved to full sleep,
+        * when no interface is up.
+        */
+       ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
+
        return 0;
 }
 
index 3779b8977d4709a9ce68d85a8b2458d472deaea4..33443bcaa8d9b27c6b3a7d70be54a4e470c38efd 100644 (file)
@@ -671,7 +671,8 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
         * TODO - this could be improved to be dependent on the rate.
         *      The hardware can keep up at lower rates, but not higher rates
         */
-       if (fi->keyix != ATH9K_TXKEYIX_INVALID)
+       if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
+           !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
                ndelim += ATH_AGGR_ENCRYPTDELIM;
 
        /*
index 2fb53d0675124d90b0c7ba82c9183af2437b9a9a..333b69ef2ae23b792de0dc8f85b1050821e92e3c 100644 (file)
@@ -112,6 +112,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
        { USB_DEVICE(0x04bb, 0x093f) },
        /* NEC WL300NU-G */
        { USB_DEVICE(0x0409, 0x0249) },
+       /* NEC WL300NU-AG */
+       { USB_DEVICE(0x0409, 0x02b4) },
        /* AVM FRITZ!WLAN USB Stick N */
        { USB_DEVICE(0x057c, 0x8401) },
        /* AVM FRITZ!WLAN USB Stick N 2.4 */
index 61d4a11f566b4f9e9864f1109369eade4634c50a..2a88e73bb39cf51c3cea461160d2d0266fcda829 100644 (file)
@@ -36,6 +36,7 @@
 #include <net/mac80211.h>
 #include <linux/etherdevice.h>
 #include <asm/unaligned.h>
+#include <linux/stringify.h>
 
 #include "iwl-eeprom.h"
 #include "iwl-dev.h"
 #define IWL100_UCODE_API_MIN 5
 
 #define IWL1000_FW_PRE "iwlwifi-1000-"
-#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE #api ".ucode"
+#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode"
 
 #define IWL100_FW_PRE "iwlwifi-100-"
-#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE #api ".ucode"
+#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode"
 
 
 /*
index 2282279cffc454c5089277a6cb1f279a60ad2dab..3df76f53a41b7be0e75b880b9f94f9d9fdcca508 100644 (file)
@@ -36,6 +36,7 @@
 #include <net/mac80211.h>
 #include <linux/etherdevice.h>
 #include <asm/unaligned.h>
+#include <linux/stringify.h>
 
 #include "iwl-eeprom.h"
 #include "iwl-dev.h"
 #define IWL105_UCODE_API_MIN 5
 
 #define IWL2030_FW_PRE "iwlwifi-2030-"
-#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE #api ".ucode"
+#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode"
 
 #define IWL2000_FW_PRE "iwlwifi-2000-"
-#define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE #api ".ucode"
+#define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE __stringify(api) ".ucode"
 
 #define IWL105_FW_PRE "iwlwifi-105-"
-#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE #api ".ucode"
+#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE __stringify(api) ".ucode"
 
 static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
 {
index f99f9c1933524e6d06af64f191341fd67057dbce..e816c27db79455214dce66a833ef5fe30cfdcf0b 100644 (file)
@@ -37,6 +37,7 @@
 #include <net/mac80211.h>
 #include <linux/etherdevice.h>
 #include <asm/unaligned.h>
+#include <linux/stringify.h>
 
 #include "iwl-eeprom.h"
 #include "iwl-dev.h"
 #define IWL5150_UCODE_API_MIN 1
 
 #define IWL5000_FW_PRE "iwlwifi-5000-"
-#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE #api ".ucode"
+#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode"
 
 #define IWL5150_FW_PRE "iwlwifi-5150-"
-#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE #api ".ucode"
+#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode"
 
 /* NIC configuration for 5000 series */
 static void iwl5000_nic_config(struct iwl_priv *priv)
index fbe565c816e32ce367c09dad8ef5762e4c819d86..5b150bc70b0654a1b27641516062aa004235ad47 100644 (file)
@@ -36,6 +36,7 @@
 #include <net/mac80211.h>
 #include <linux/etherdevice.h>
 #include <asm/unaligned.h>
+#include <linux/stringify.h>
 
 #include "iwl-eeprom.h"
 #include "iwl-dev.h"
 #define IWL6000G2_UCODE_API_MIN 4
 
 #define IWL6000_FW_PRE "iwlwifi-6000-"
-#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
+#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode"
 
 #define IWL6050_FW_PRE "iwlwifi-6050-"
-#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode"
+#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE __stringify(api) ".ucode"
 
 #define IWL6005_FW_PRE "iwlwifi-6000g2a-"
-#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE #api ".ucode"
+#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE __stringify(api) ".ucode"
 
 #define IWL6030_FW_PRE "iwlwifi-6000g2b-"
-#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE #api ".ucode"
+#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode"
 
 static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
 {
index 213c80c6a6682d000efff4b0d356cb5bafa691d4..45cc51c9c93ec4caf312fcc631b35cbf13b7e001 100644 (file)
@@ -1763,6 +1763,7 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
        struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
        struct iwl_rxon_context *tmp;
+       enum nl80211_iftype newviftype = newtype;
        u32 interface_modes;
        int err;
 
@@ -1818,7 +1819,7 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 
        /* success */
        iwl_teardown_interface(priv, vif, true);
-       vif->type = newtype;
+       vif->type = newviftype;
        vif->p2p = newp2p;
        err = iwl_setup_interface(priv, ctx);
        WARN_ON(err);
index 686e176b5ebdeb80c992860b49e2e64150892ca0..137dba95b1ad765753425ec3afafb69949d2d16e 100644 (file)
@@ -126,7 +126,7 @@ static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
 }
 
 static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
-                            struct iwl_tfd *tfd)
+                            struct iwl_tfd *tfd, int dma_dir)
 {
        struct pci_dev *dev = priv->pci_dev;
        int i;
@@ -151,7 +151,7 @@ static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
        /* Unmap chunks, if any. */
        for (i = 1; i < num_tbs; i++)
                pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
-                               iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
+                               iwl_tfd_tb_get_len(tfd, i), dma_dir);
 }
 
 /**
@@ -167,7 +167,8 @@ void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
        struct iwl_tfd *tfd_tmp = txq->tfds;
        int index = txq->q.read_ptr;
 
-       iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index]);
+       iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index],
+                        PCI_DMA_TODEVICE);
 
        /* free SKB */
        if (txq->txb) {
@@ -310,9 +311,7 @@ void iwl_cmd_queue_unmap(struct iwl_priv *priv)
                i = get_cmd_index(q, q->read_ptr);
 
                if (txq->meta[i].flags & CMD_MAPPED) {
-                       pci_unmap_single(priv->pci_dev,
-                                        dma_unmap_addr(&txq->meta[i], mapping),
-                                        dma_unmap_len(&txq->meta[i], len),
+                       iwlagn_unmap_tfd(priv, &txq->meta[i], &txq->tfds[i],
                                         PCI_DMA_BIDIRECTIONAL);
                        txq->meta[i].flags = 0;
                }
@@ -535,12 +534,7 @@ out_free_arrays:
 void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
                        int slots_num, u32 txq_id)
 {
-       int actual_slots = slots_num;
-
-       if (txq_id == priv->cmd_queue)
-               actual_slots++;
-
-       memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
+       memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * slots_num);
 
        txq->need_update = 0;
 
@@ -700,10 +694,11 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
                if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
                        continue;
                phys_addr = pci_map_single(priv->pci_dev, (void *)cmd->data[i],
-                                          cmd->len[i], PCI_DMA_TODEVICE);
+                                          cmd->len[i], PCI_DMA_BIDIRECTIONAL);
                if (pci_dma_mapping_error(priv->pci_dev, phys_addr)) {
                        iwlagn_unmap_tfd(priv, out_meta,
-                                        &txq->tfds[q->write_ptr]);
+                                        &txq->tfds[q->write_ptr],
+                                        PCI_DMA_BIDIRECTIONAL);
                        idx = -ENOMEM;
                        goto out;
                }
@@ -807,7 +802,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
        cmd = txq->cmd[cmd_index];
        meta = &txq->meta[cmd_index];
 
-       iwlagn_unmap_tfd(priv, meta, &txq->tfds[index]);
+       iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], PCI_DMA_BIDIRECTIONAL);
 
        /* Input error checking is done when commands are added to queue. */
        if (meta->flags & CMD_WANT_SKB) {
index 660831ce293cf771dcd268bc5c9e8614d5b07923..687c1f223497e440939607e38303ca33e6fc9bcc 100644 (file)
@@ -1288,6 +1288,8 @@ int mwifiex_register_cfg80211(struct net_device *dev, u8 *mac,
 
        *(unsigned long *) wdev_priv = (unsigned long) priv;
 
+       set_wiphy_dev(wdev->wiphy, (struct device *) priv->adapter->dev);
+
        ret = wiphy_register(wdev->wiphy);
        if (ret < 0) {
                dev_err(priv->adapter->dev, "%s: registering cfg80211 device\n",
index 32261189bcef9065eac5cf14b961497a79a58f93..aeac3cc4dbe45d6df570a636ffafbfcc3ce104cf 100644 (file)
@@ -2474,6 +2474,7 @@ struct mwl8k_cmd_set_hw_spec {
  * faster client.
  */
 #define MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY 0x00000400
+#define MWL8K_SET_HW_SPEC_FLAG_GENERATE_CCMP_HDR       0x00000200
 #define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT          0x00000080
 #define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP      0x00000020
 #define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON         0x00000010
@@ -2510,7 +2511,8 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
        cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
                                 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
                                 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON |
-                                MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY);
+                                MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY |
+                                MWL8K_SET_HW_SPEC_FLAG_GENERATE_CCMP_HDR);
        cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
        cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
 
index 9f8ccae93317d92cf353cb595a189df0aa1960e9..254b64ba4bf6cddf3408b792c2987fdced845e38 100644 (file)
@@ -1624,6 +1624,16 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
        pci_read_config_byte(pdev, 0x8, &revisionid);
        pci_read_config_word(pdev, 0x3C, &irqline);
 
+       /* PCI ID 0x10ec:0x8192 occurs for both RTL8192E, which uses
+        * r8192e_pci, and RTL8192SE, which uses this driver. If the
+        * revision ID is RTL_PCI_REVISION_ID_8192PCIE (0x01), then
+        * the correct driver is r8192e_pci, thus this routine should
+        * return false.
+        */
+       if (deviceid == RTL_PCI_8192SE_DID &&
+           revisionid == RTL_PCI_REVISION_ID_8192PCIE)
+               return false;
+
        if (deviceid == RTL_PCI_8192_DID ||
            deviceid == RTL_PCI_0044_DID ||
            deviceid == RTL_PCI_0047_DID ||
@@ -1856,7 +1866,8 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
        pci_write_config_byte(pdev, 0x04, 0x07);
 
        /* find adapter */
-       _rtl_pci_find_adapter(pdev, hw);
+       if (!_rtl_pci_find_adapter(pdev, hw))
+               goto fail3;
 
        /* Init IO handler */
        _rtl_pci_io_handler_init(&pdev->dev, hw);
index bee7c1480f63c6c43e4fa87efb82d77eb63503c1..942f7a3969a79ceb857904053a689b2421f90fe0 100644 (file)
@@ -53,6 +53,8 @@ MODULE_FIRMWARE("rtlwifi/rtl8192cufw.bin");
 static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
+       const struct firmware *firmware;
+       int err;
 
        rtlpriv->dm.dm_initialgain_enable = 1;
        rtlpriv->dm.dm_flag = 0;
@@ -64,6 +66,24 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
                         ("Can't alloc buffer for fw.\n"));
                return 1;
        }
+       /* request fw */
+       err = request_firmware(&firmware, rtlpriv->cfg->fw_name,
+                       rtlpriv->io.dev);
+       if (err) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("Failed to request firmware!\n"));
+               return 1;
+       }
+       if (firmware->size > 0x4000) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        ("Firmware is too big!\n"));
+               release_firmware(firmware);
+               return 1;
+       }
+       memcpy(rtlpriv->rtlhal.pfirmware, firmware->data, firmware->size);
+       rtlpriv->rtlhal.fwsize = firmware->size;
+       release_firmware(firmware);
+
        return 0;
 }
 
@@ -278,6 +298,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
        {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
        {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+       {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
        {RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
        /* HP - Lite-On ,8188CUS Slim Combo */
index 135df164a4c1e897e4d36a7d5a49336518669480..46767c53917a5e28ee8fcec10d92de6c39609eba 100644 (file)
@@ -624,7 +624,7 @@ static int pci_pm_prepare(struct device *dev)
         * system from the sleep state, we'll have to prevent it from signaling
         * wake-up.
         */
-       pm_runtime_resume(dev);
+       pm_runtime_get_sync(dev);
 
        if (drv && drv->pm && drv->pm->prepare)
                error = drv->pm->prepare(dev);
@@ -638,6 +638,8 @@ static void pci_pm_complete(struct device *dev)
 
        if (drv && drv->pm && drv->pm->complete)
                drv->pm->complete(dev);
+
+       pm_runtime_put_sync(dev);
 }
 
 #else /* !CONFIG_PM_SLEEP */
index 5f10c23dff943a515698f361278cd931d6bd5909..692671b11667381516b9d95101e26415c10e35fc 100644 (file)
@@ -3284,7 +3284,7 @@ static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
  * @dev: the PCI device
  * @decode: true = enable decoding, false = disable decoding
  * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
- * @change_bridge_flags: traverse ancestors and change bridges
+ * @flags: traverse ancestors and change bridges
  * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
  */
 int pci_set_vga_state(struct pci_dev *dev, bool decode,
@@ -3483,6 +3483,8 @@ static int __init pci_setup(char *str)
                                pci_no_msi();
                        } else if (!strcmp(str, "noaer")) {
                                pci_no_aer();
+                       } else if (!strncmp(str, "realloc", 7)) {
+                               pci_realloc();
                        } else if (!strcmp(str, "nodomains")) {
                                pci_no_domains();
                        } else if (!strncmp(str, "cbiosize=", 9)) {
index 731e20265ace428114b4ba217470fb5868908ad1..3a39bf1f1e2c11a8ee6cd094a8f43f931a256e6b 100644 (file)
@@ -146,6 +146,8 @@ static inline void pci_no_msi(void) { }
 static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
 #endif
 
+extern void pci_realloc(void);
+
 static inline int pci_no_d1d2(struct pci_dev *dev)
 {
        unsigned int parent_dstates = 0;
index 48849ffdd67214265693c92076970e00ab329d2a..bafb3c3d4a8963e6a635d94a414e290e4998af40 100644 (file)
@@ -168,7 +168,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
                res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
                if (type == pci_bar_io) {
                        l &= PCI_BASE_ADDRESS_IO_MASK;
-                       mask = PCI_BASE_ADDRESS_IO_MASK & IO_SPACE_LIMIT;
+                       mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
                } else {
                        l &= PCI_BASE_ADDRESS_MEM_MASK;
                        mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
index e8a140669f90107aba6b2780de4b90494591aa34..02145e9697a9709fbcd615fb9597033f57e491dd 100644 (file)
@@ -2761,6 +2761,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
 }
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
 #endif /*CONFIG_MMC_RICOH_MMC*/
 
 #if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
index 1e9e5a5b8c81909b951ac4b4a058b203e956f3e4..9995842e45b5cb75fcc296bb668df3638ded8088 100644 (file)
@@ -47,6 +47,13 @@ struct resource_list_x {
        (head)->next = NULL;                            \
 } while (0)
 
+int pci_realloc_enable = 0;
+#define pci_realloc_enabled() pci_realloc_enable
+void pci_realloc(void)
+{
+       pci_realloc_enable = 1;
+}
+
 /**
  * add_to_list() - add a new resource tracker to the list
  * @head:      Head of the list
@@ -1025,6 +1032,7 @@ static int __init pci_get_max_depth(void)
        return depth;
 }
 
+
 /*
  * first try will not touch pci bridge res
  * second  and later try will clear small leaf bridge res
@@ -1068,6 +1076,13 @@ again:
        /* any device complain? */
        if (!head.next)
                goto enable_and_dump;
+
+       /* don't realloc if asked to do so */
+       if (!pci_realloc_enabled()) {
+               free_list(resource_list_x, &head);
+               goto enable_and_dump;
+       }
+
        failed_type = 0;
        for (list = head.next; list;) {
                failed_type |= list->flags;
index 435002dfc3caef7a418944e9e967af8a27c6b822..e956f659089a2c1b91716a7bccdf68f425837131 100644 (file)
@@ -11,6 +11,7 @@
  *
  */
 
+#include <linux/gpio.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 
@@ -75,10 +76,10 @@ static int vpac270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
 static void vpac270_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
 {
        if (skt->nr == 0)
-               gpio_request_array(vpac270_pcmcia_gpios,
+               gpio_free_array(vpac270_pcmcia_gpios,
                                        ARRAY_SIZE(vpac270_pcmcia_gpios));
        else
-               gpio_request_array(vpac270_cf_gpios,
+               gpio_free_array(vpac270_cf_gpios,
                                        ARRAY_SIZE(vpac270_cf_gpios));
 }
 
index 005417bd429e815f24cce88c23b4f53ef46afc74..e1c4938b301bbcdacbcc1303e9b8cde5bf288585 100644 (file)
@@ -1156,9 +1156,9 @@ static acpi_status wmid3_set_device_status(u32 value, u16 device)
        struct wmid3_gds_input_param params = {
                .function_num = 0x1,
                .hotkey_number = 0x01,
-               .devices = ACER_WMID3_GDS_WIRELESS &
-                               ACER_WMID3_GDS_THREEG &
-                               ACER_WMID3_GDS_WIMAX &
+               .devices = ACER_WMID3_GDS_WIRELESS |
+                               ACER_WMID3_GDS_THREEG |
+                               ACER_WMID3_GDS_WIMAX |
                                ACER_WMID3_GDS_BLUETOOTH,
        };
        struct acpi_buffer input = {
@@ -1445,6 +1445,8 @@ static void acer_wmi_notify(u32 value, void *context)
        union acpi_object *obj;
        struct event_return_value return_value;
        acpi_status status;
+       u16 device_state;
+       const struct key_entry *key;
 
        status = wmi_get_event_data(value, &response);
        if (status != AE_OK) {
@@ -1472,23 +1474,32 @@ static void acer_wmi_notify(u32 value, void *context)
 
        switch (return_value.function) {
        case WMID_HOTKEY_EVENT:
-               if (return_value.device_state) {
-                       u16 device_state = return_value.device_state;
-                       pr_debug("device state: 0x%x\n", device_state);
-                       if (has_cap(ACER_CAP_WIRELESS))
-                               rfkill_set_sw_state(wireless_rfkill,
-                               !(device_state & ACER_WMID3_GDS_WIRELESS));
-                       if (has_cap(ACER_CAP_BLUETOOTH))
-                               rfkill_set_sw_state(bluetooth_rfkill,
-                               !(device_state & ACER_WMID3_GDS_BLUETOOTH));
-                       if (has_cap(ACER_CAP_THREEG))
-                               rfkill_set_sw_state(threeg_rfkill,
-                               !(device_state & ACER_WMID3_GDS_THREEG));
-               }
-               if (!sparse_keymap_report_event(acer_wmi_input_dev,
-                               return_value.key_num, 1, true))
+               device_state = return_value.device_state;
+               pr_debug("device state: 0x%x\n", device_state);
+
+               key = sparse_keymap_entry_from_scancode(acer_wmi_input_dev,
+                                                       return_value.key_num);
+               if (!key) {
                        pr_warn("Unknown key number - 0x%x\n",
                                return_value.key_num);
+               } else {
+                       switch (key->keycode) {
+                       case KEY_WLAN:
+                       case KEY_BLUETOOTH:
+                               if (has_cap(ACER_CAP_WIRELESS))
+                                       rfkill_set_sw_state(wireless_rfkill,
+                                               !(device_state & ACER_WMID3_GDS_WIRELESS));
+                               if (has_cap(ACER_CAP_THREEG))
+                                       rfkill_set_sw_state(threeg_rfkill,
+                                               !(device_state & ACER_WMID3_GDS_THREEG));
+                               if (has_cap(ACER_CAP_BLUETOOTH))
+                                       rfkill_set_sw_state(bluetooth_rfkill,
+                                               !(device_state & ACER_WMID3_GDS_BLUETOOTH));
+                               break;
+                       }
+                       sparse_keymap_report_entry(acer_wmi_input_dev, key,
+                                                  1, true);
+               }
                break;
        default:
                pr_warn("Unknown function number - %d - %d\n",
index 00460cb9587b753119696647844759200a1bc8c8..3c7857c71a230e12b2fc1bac6446c08af0e0c893 100644 (file)
@@ -1025,6 +1025,7 @@ static int asus_wmi_backlight_init(struct asus_wmi *asus)
                return power;
 
        memset(&props, 0, sizeof(struct backlight_properties));
+       props.type = BACKLIGHT_PLATFORM;
        props.max_brightness = max;
        bd = backlight_device_register(asus->driver->name,
                                       &asus->platform_device->dev, asus,
index 3f204fde1b029e6433df6ad7e4fc0e6d26ab363a..8877b836d27cf4bde02014d9a37763a3672bb4a0 100644 (file)
@@ -1030,8 +1030,10 @@ static int __devinit compal_probe(struct platform_device *pdev)
        initialize_fan_control_data(data);
 
        err = sysfs_create_group(&pdev->dev.kobj, &compal_attribute_group);
-       if (err)
+       if (err) {
+               kfree(data);
                return err;
+       }
 
        data->hwmon_dev = hwmon_device_register(&pdev->dev);
        if (IS_ERR(data->hwmon_dev)) {
index d3841de6a8cf199ec08b24bd85f50a0af0490d37..e39ab1d3ed878b6044a391db4be16b58224da0d2 100644 (file)
@@ -292,12 +292,9 @@ static int dell_rfkill_set(void *data, bool blocked)
        dell_send_request(buffer, 17, 11);
 
        /* If the hardware switch controls this radio, and the hardware
-          switch is disabled, don't allow changing the software state.
-          If the hardware switch is reported as not supported, always
-          fire the SMI to toggle the killswitch. */
+          switch is disabled, don't allow changing the software state */
        if ((hwswitch_state & BIT(hwswitch_bit)) &&
-           !(buffer->output[1] & BIT(16)) &&
-           (buffer->output[1] & BIT(0))) {
+           !(buffer->output[1] & BIT(16))) {
                ret = -EINVAL;
                goto out;
        }
@@ -403,23 +400,6 @@ static const struct file_operations dell_debugfs_fops = {
 
 static void dell_update_rfkill(struct work_struct *ignored)
 {
-       int status;
-
-       get_buffer();
-       dell_send_request(buffer, 17, 11);
-       status = buffer->output[1];
-       release_buffer();
-
-       /* if hardware rfkill is not supported, set it explicitly */
-       if (!(status & BIT(0))) {
-               if (wifi_rfkill)
-                       dell_rfkill_set((void *)1, !((status & BIT(17)) >> 17));
-               if (bluetooth_rfkill)
-                       dell_rfkill_set((void *)2, !((status & BIT(18)) >> 18));
-               if (wwan_rfkill)
-                       dell_rfkill_set((void *)3, !((status & BIT(19)) >> 19));
-       }
-
        if (wifi_rfkill)
                dell_rfkill_query(wifi_rfkill, (void *)1);
        if (bluetooth_rfkill)
@@ -560,11 +540,11 @@ static int dell_get_intensity(struct backlight_device *bd)
        else
                dell_send_request(buffer, 0, 1);
 
+       ret = buffer->output[1];
+
 out:
        release_buffer();
-       if (ret)
-               return ret;
-       return buffer->output[1];
+       return ret;
 }
 
 static const struct backlight_ops dell_ops = {
index f94017bcdd6e9c8b10d80c70f7fd56ed27788d83..e2faa3cbb792e3f154d83db97ca0f9f45ed44402 100644 (file)
@@ -207,6 +207,7 @@ static int hp_wmi_perform_query(int query, int write, void *buffer,
        };
        struct acpi_buffer input = { sizeof(struct bios_args), &args };
        struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+       u32 rc;
 
        if (WARN_ON(insize > sizeof(args.data)))
                return -EINVAL;
@@ -224,13 +225,13 @@ static int hp_wmi_perform_query(int query, int write, void *buffer,
        }
 
        bios_return = (struct bios_return *)obj->buffer.pointer;
+       rc = bios_return->return_code;
 
-       if (bios_return->return_code) {
-               if (bios_return->return_code != HPWMI_RET_UNKNOWN_CMDTYPE)
-                       pr_warn("query 0x%x returned error 0x%x\n",
-                               query, bios_return->return_code);
+       if (rc) {
+               if (rc != HPWMI_RET_UNKNOWN_CMDTYPE)
+                       pr_warn("query 0x%x returned error 0x%x\n", query, rc);
                kfree(obj);
-               return bios_return->return_code;
+               return rc;
        }
 
        if (!outsize) {
index e936364a609d452f18534bf02f1e57647ffde97c..7f88c7923fc6ab18ad8bc674da4ee96411bbf2fc 100644 (file)
@@ -250,6 +250,7 @@ static int oaktrail_backlight_init(void)
        struct backlight_properties props;
 
        memset(&props, 0, sizeof(struct backlight_properties));
+       props.type = BACKLIGHT_PLATFORM;
        props.max_brightness = OT_EC_BL_BRIGHTNESS_MAX;
        bd = backlight_device_register(DRIVER_NAME,
                                       &oaktrail_device->dev, NULL,
index 77f6e707a2a9a43d4c08777f1a97e42dd49a11a0..26c5b117df22d5011c46c64f2f588f83dd7ed58c 100644 (file)
@@ -184,6 +184,10 @@ enum tpacpi_hkey_event_t {
 
        /* Misc bay events */
        TP_HKEY_EV_OPTDRV_EJ            = 0x3006, /* opt. drive tray ejected */
+       TP_HKEY_EV_HOTPLUG_DOCK         = 0x4010, /* docked into hotplug dock
+                                                    or port replicator */
+       TP_HKEY_EV_HOTPLUG_UNDOCK       = 0x4011, /* undocked from hotplug
+                                                    dock or port replicator */
 
        /* User-interface events */
        TP_HKEY_EV_LID_CLOSE            = 0x5001, /* laptop lid closed */
@@ -194,6 +198,10 @@ enum tpacpi_hkey_event_t {
        TP_HKEY_EV_PEN_REMOVED          = 0x500c, /* tablet pen removed */
        TP_HKEY_EV_BRGHT_CHANGED        = 0x5010, /* backlight control event */
 
+       /* Key-related user-interface events */
+       TP_HKEY_EV_KEY_NUMLOCK          = 0x6000, /* NumLock key pressed */
+       TP_HKEY_EV_KEY_FN               = 0x6005, /* Fn key pressed? E420 */
+
        /* Thermal events */
        TP_HKEY_EV_ALARM_BAT_HOT        = 0x6011, /* battery too hot */
        TP_HKEY_EV_ALARM_BAT_XHOT       = 0x6012, /* battery critically hot */
@@ -201,6 +209,10 @@ enum tpacpi_hkey_event_t {
        TP_HKEY_EV_ALARM_SENSOR_XHOT    = 0x6022, /* sensor critically hot */
        TP_HKEY_EV_THM_TABLE_CHANGED    = 0x6030, /* thermal table changed */
 
+       TP_HKEY_EV_UNK_6040             = 0x6040, /* Related to AC change?
+                                                    some sort of APM hint,
+                                                    W520 */
+
        /* Misc */
        TP_HKEY_EV_RFKILL_CHANGED       = 0x7000, /* rfkill switch changed */
 };
@@ -3513,6 +3525,34 @@ static bool hotkey_notify_wakeup(const u32 hkey,
        return true;
 }
 
+static bool hotkey_notify_dockevent(const u32 hkey,
+                                bool *send_acpi_ev,
+                                bool *ignore_acpi_ev)
+{
+       /* 0x4000-0x4FFF: dock-related events */
+       *send_acpi_ev = true;
+       *ignore_acpi_ev = false;
+
+       switch (hkey) {
+       case TP_HKEY_EV_UNDOCK_ACK:
+               /* ACPI undock operation completed after wakeup */
+               hotkey_autosleep_ack = 1;
+               pr_info("undocked\n");
+               hotkey_wakeup_hotunplug_complete_notify_change();
+               return true;
+
+       case TP_HKEY_EV_HOTPLUG_DOCK: /* docked to port replicator */
+               pr_info("docked into hotplug port replicator\n");
+               return true;
+       case TP_HKEY_EV_HOTPLUG_UNDOCK: /* undocked from port replicator */
+               pr_info("undocked from hotplug port replicator\n");
+               return true;
+
+       default:
+               return false;
+       }
+}
+
 static bool hotkey_notify_usrevent(const u32 hkey,
                                 bool *send_acpi_ev,
                                 bool *ignore_acpi_ev)
@@ -3547,13 +3587,13 @@ static bool hotkey_notify_usrevent(const u32 hkey,
 
 static void thermal_dump_all_sensors(void);
 
-static bool hotkey_notify_thermal(const u32 hkey,
+static bool hotkey_notify_6xxx(const u32 hkey,
                                 bool *send_acpi_ev,
                                 bool *ignore_acpi_ev)
 {
        bool known = true;
 
-       /* 0x6000-0x6FFF: thermal alarms */
+       /* 0x6000-0x6FFF: thermal alarms/notices and keyboard events */
        *send_acpi_ev = true;
        *ignore_acpi_ev = false;
 
@@ -3582,8 +3622,17 @@ static bool hotkey_notify_thermal(const u32 hkey,
                         "a sensor reports something is extremely hot!\n");
                /* recommended action: immediate sleep/hibernate */
                break;
+
+       case TP_HKEY_EV_KEY_NUMLOCK:
+       case TP_HKEY_EV_KEY_FN:
+               /* key press events, we just ignore them as long as the EC
+                * is still reporting them in the normal keyboard stream */
+               *send_acpi_ev = false;
+               *ignore_acpi_ev = true;
+               return true;
+
        default:
-               pr_alert("THERMAL ALERT: unknown thermal alarm received\n");
+               pr_warn("unknown possible thermal alarm or keyboard event received\n");
                known = false;
        }
 
@@ -3652,15 +3701,9 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
                        }
                        break;
                case 4:
-                       /* 0x4000-0x4FFF: dock-related wakeups */
-                       if (hkey == TP_HKEY_EV_UNDOCK_ACK) {
-                               hotkey_autosleep_ack = 1;
-                               pr_info("undocked\n");
-                               hotkey_wakeup_hotunplug_complete_notify_change();
-                               known_ev = true;
-                       } else {
-                               known_ev = false;
-                       }
+                       /* 0x4000-0x4FFF: dock-related events */
+                       known_ev = hotkey_notify_dockevent(hkey, &send_acpi_ev,
+                                               &ignore_acpi_ev);
                        break;
                case 5:
                        /* 0x5000-0x5FFF: human interface helpers */
@@ -3668,8 +3711,9 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
                                                 &ignore_acpi_ev);
                        break;
                case 6:
-                       /* 0x6000-0x6FFF: thermal alarms */
-                       known_ev = hotkey_notify_thermal(hkey, &send_acpi_ev,
+                       /* 0x6000-0x6FFF: thermal alarms/notices and
+                        *                keyboard events */
+                       known_ev = hotkey_notify_6xxx(hkey, &send_acpi_ev,
                                                 &ignore_acpi_ev);
                        break;
                case 7:
index e5f7b8fe51f4b74fe4a71599043e4b396823ee19..2bb8f451cc067302dcd4145c9de157aaf10cbd6d 100644 (file)
@@ -266,7 +266,7 @@ static struct regulator_ops db8500_regulator_switch_ops = {
  * Regulator information
  */
 static struct db8500_regulator_info
-               db8500_regulator_info[DB8500_NUM_REGULATORS] = {
+db8500_regulator_info[DB8500_NUM_REGULATORS] = {
        [DB8500_REGULATOR_VAPE] = {
                .desc = {
                        .name   = "db8500-vape",
@@ -492,11 +492,9 @@ static int __devinit db8500_regulator_probe(struct platform_device *pdev)
                                info->desc.name, err);
 
                        /* if failing, unregister all earlier regulators */
-                       i--;
-                       while (i >= 0) {
+                       while (--i >= 0) {
                                info = &db8500_regulator_info[i];
                                regulator_unregister(info->rdev);
-                               i--;
                        }
                        return err;
                }
@@ -536,13 +534,7 @@ static struct platform_driver db8500_regulator_driver = {
 
 static int __init db8500_regulator_init(void)
 {
-       int ret;
-
-       ret = platform_driver_register(&db8500_regulator_driver);
-       if (ret < 0)
-               return -ENODEV;
-
-       return 0;
+       return platform_driver_register(&db8500_regulator_driver);
 }
 
 static void __exit db8500_regulator_exit(void)
index daff7fd0e95c14d7ace35894e7ad8351334f4107..486ed8141fcddc93dc13b7c5f91a58e8fd8baf10 100644 (file)
@@ -139,7 +139,7 @@ static int max8952_set_voltage(struct regulator_dev *rdev,
        s8 vid = -1, i;
 
        if (!gpio_is_valid(max8952->pdata->gpio_vid0) ||
-                       !gpio_is_valid(max8952->pdata->gpio_vid0)) {
+                       !gpio_is_valid(max8952->pdata->gpio_vid1)) {
                /* DVS not supported */
                return -EPERM;
        }
index 10d5a1d9768e3fd973ea4f8ea6ab388d9a24eaaa..ad6628ca94f41378bdddbe11f1286d54a64f4fd0 100644 (file)
@@ -39,25 +39,28 @@ struct max8997_data {
        struct regulator_dev **rdev;
        int ramp_delay; /* in mV/us */
 
+       bool buck1_gpiodvs;
+       bool buck2_gpiodvs;
+       bool buck5_gpiodvs;
        u8 buck1_vol[8];
        u8 buck2_vol[8];
        u8 buck5_vol[8];
+       int buck125_gpios[3];
        int buck125_gpioindex;
+       bool ignore_gpiodvs_side_effect;
 
        u8 saved_states[MAX8997_REG_MAX];
 };
 
 static inline void max8997_set_gpio(struct max8997_data *max8997)
 {
-       struct max8997_platform_data *pdata =
-               dev_get_platdata(max8997->iodev->dev);
        int set3 = (max8997->buck125_gpioindex) & 0x1;
        int set2 = ((max8997->buck125_gpioindex) >> 1) & 0x1;
        int set1 = ((max8997->buck125_gpioindex) >> 2) & 0x1;
 
-       gpio_set_value(pdata->buck125_gpios[0], set1);
-       gpio_set_value(pdata->buck125_gpios[1], set2);
-       gpio_set_value(pdata->buck125_gpios[2], set3);
+       gpio_set_value(max8997->buck125_gpios[0], set1);
+       gpio_set_value(max8997->buck125_gpios[1], set2);
+       gpio_set_value(max8997->buck125_gpios[2], set3);
 }
 
 struct voltage_map_desc {
@@ -380,8 +383,6 @@ static int max8997_get_voltage_register(struct regulator_dev *rdev,
 static int max8997_get_voltage(struct regulator_dev *rdev)
 {
        struct max8997_data *max8997 = rdev_get_drvdata(rdev);
-       struct max8997_platform_data *pdata =
-               dev_get_platdata(max8997->iodev->dev);
        struct i2c_client *i2c = max8997->iodev->i2c;
        int reg, shift, mask, ret;
        int rid = max8997_get_rid(rdev);
@@ -391,9 +392,9 @@ static int max8997_get_voltage(struct regulator_dev *rdev)
        if (ret)
                return ret;
 
-       if ((rid == MAX8997_BUCK1 && pdata->buck1_gpiodvs) ||
-                       (rid == MAX8997_BUCK2 && pdata->buck2_gpiodvs) ||
-                       (rid == MAX8997_BUCK5 && pdata->buck5_gpiodvs))
+       if ((rid == MAX8997_BUCK1 && max8997->buck1_gpiodvs) ||
+                       (rid == MAX8997_BUCK2 && max8997->buck2_gpiodvs) ||
+                       (rid == MAX8997_BUCK5 && max8997->buck5_gpiodvs))
                reg += max8997->buck125_gpioindex;
 
        ret = max8997_read_reg(i2c, reg, &val);
@@ -543,7 +544,8 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
                        rid == MAX8997_BUCK4 || rid == MAX8997_BUCK5) {
                /* If the voltage is increasing */
                if (org < i)
-                       udelay(desc->step * (i - org) / max8997->ramp_delay);
+                       udelay(DIV_ROUND_UP(desc->step * (i - org),
+                                               max8997->ramp_delay));
        }
 
        return ret;
@@ -561,8 +563,6 @@ static int max8997_assess_side_effect(struct regulator_dev *rdev,
                u8 new_val, int *best)
 {
        struct max8997_data *max8997 = rdev_get_drvdata(rdev);
-       struct max8997_platform_data *pdata =
-               dev_get_platdata(max8997->iodev->dev);
        int rid = max8997_get_rid(rdev);
        u8 *buckx_val[3];
        bool buckx_gpiodvs[3];
@@ -589,9 +589,9 @@ static int max8997_assess_side_effect(struct regulator_dev *rdev,
        buckx_val[0] = max8997->buck1_vol;
        buckx_val[1] = max8997->buck2_vol;
        buckx_val[2] = max8997->buck5_vol;
-       buckx_gpiodvs[0] = pdata->buck1_gpiodvs;
-       buckx_gpiodvs[1] = pdata->buck2_gpiodvs;
-       buckx_gpiodvs[2] = pdata->buck5_gpiodvs;
+       buckx_gpiodvs[0] = max8997->buck1_gpiodvs;
+       buckx_gpiodvs[1] = max8997->buck2_gpiodvs;
+       buckx_gpiodvs[2] = max8997->buck5_gpiodvs;
 
        for (i = 0; i < 8; i++) {
                int others;
@@ -640,8 +640,6 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
                int min_uV, int max_uV, unsigned *selector)
 {
        struct max8997_data *max8997 = rdev_get_drvdata(rdev);
-       struct max8997_platform_data *pdata =
-               dev_get_platdata(max8997->iodev->dev);
        int rid = max8997_get_rid(rdev);
        const struct voltage_map_desc *desc;
        int new_val, new_idx, damage, tmp_val, tmp_idx, tmp_dmg;
@@ -653,15 +651,15 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
 
        switch (rid) {
        case MAX8997_BUCK1:
-               if (pdata->buck1_gpiodvs)
+               if (max8997->buck1_gpiodvs)
                        gpio_dvs_mode = true;
                break;
        case MAX8997_BUCK2:
-               if (pdata->buck2_gpiodvs)
+               if (max8997->buck2_gpiodvs)
                        gpio_dvs_mode = true;
                break;
        case MAX8997_BUCK5:
-               if (pdata->buck5_gpiodvs)
+               if (max8997->buck5_gpiodvs)
                        gpio_dvs_mode = true;
                break;
        }
@@ -695,7 +693,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
        new_idx = tmp_idx;
        new_val = tmp_val;
 
-       if (pdata->ignore_gpiodvs_side_effect == false)
+       if (max8997->ignore_gpiodvs_side_effect == false)
                return -EINVAL;
 
        dev_warn(&rdev->dev, "MAX8997 GPIO-DVS Side Effect Warning: GPIO SET:"
@@ -993,6 +991,11 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
        i2c = max8997->iodev->i2c;
 
        max8997->buck125_gpioindex = pdata->buck125_default_idx;
+       max8997->buck1_gpiodvs = pdata->buck1_gpiodvs;
+       max8997->buck2_gpiodvs = pdata->buck2_gpiodvs;
+       max8997->buck5_gpiodvs = pdata->buck5_gpiodvs;
+       memcpy(max8997->buck125_gpios, pdata->buck125_gpios, sizeof(int) * 3);
+       max8997->ignore_gpiodvs_side_effect = pdata->ignore_gpiodvs_side_effect;
 
        for (i = 0; i < 8; i++) {
                max8997->buck1_vol[i] = ret =
@@ -1124,6 +1127,10 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
                                0x3f);
        }
 
+       /* Misc Settings */
+       max8997->ramp_delay = 10; /* set 10mV/us, which is the default */
+       max8997_write_reg(i2c, MAX8997_REG_BUCKRAMP, (0xf << 4) | 0x9);
+
        for (i = 0; i < pdata->num_regulators; i++) {
                const struct voltage_map_desc *desc;
                int id = pdata->regulators[i].id;
@@ -1148,10 +1155,6 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
                }
        }
 
-       /* Misc Settings */
-       max8997->ramp_delay = 10; /* set 10mV/us, which is the default */
-       max8997_write_reg(i2c, MAX8997_REG_BUCKRAMP, (0xf << 4) | 0x9);
-
        return 0;
 err:
        for (i = 0; i < max8997->num_regulators; i++)
index 4724ba3acf1a51c7c3e42e6aa49ea4899436911e..b2005b44e4f7cc0ee053ab9ee37896e7bce66a2f 100644 (file)
@@ -149,6 +149,7 @@ static const struct i2c_device_id ds1307_id[] = {
        { "ds1340", ds_1340 },
        { "ds3231", ds_3231 },
        { "m41t00", m41t00 },
+       { "pt7c4338", ds_1307 },
        { "rx8025", rx_8025 },
        { }
 };
index b8bc862903ae587a332bd1547fcebe2ef758b303..efd6066b5cd25d3911bd626919ba9f683cf1b3e8 100644 (file)
@@ -78,7 +78,6 @@ struct vt8500_rtc {
        void __iomem            *regbase;
        struct resource         *res;
        int                     irq_alarm;
-       int                     irq_hz;
        struct rtc_device       *rtc;
        spinlock_t              lock;           /* Protects this structure */
 };
@@ -100,10 +99,6 @@ static irqreturn_t vt8500_rtc_irq(int irq, void *dev_id)
        if (isr & 1)
                events |= RTC_AF | RTC_IRQF;
 
-       /* Only second/minute interrupts are supported */
-       if (isr & 2)
-               events |= RTC_UF | RTC_IRQF;
-
        rtc_update_irq(vt8500_rtc->rtc, 1, events);
 
        return IRQ_HANDLED;
@@ -199,27 +194,12 @@ static int vt8500_alarm_irq_enable(struct device *dev, unsigned int enabled)
        return 0;
 }
 
-static int vt8500_update_irq_enable(struct device *dev, unsigned int enabled)
-{
-       struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
-       unsigned long tmp = readl(vt8500_rtc->regbase + VT8500_RTC_CR);
-
-       if (enabled)
-               tmp |= VT8500_RTC_CR_SM_SEC | VT8500_RTC_CR_SM_ENABLE;
-       else
-               tmp &= ~VT8500_RTC_CR_SM_ENABLE;
-
-       writel(tmp, vt8500_rtc->regbase + VT8500_RTC_CR);
-       return 0;
-}
-
 static const struct rtc_class_ops vt8500_rtc_ops = {
        .read_time = vt8500_rtc_read_time,
        .set_time = vt8500_rtc_set_time,
        .read_alarm = vt8500_rtc_read_alarm,
        .set_alarm = vt8500_rtc_set_alarm,
        .alarm_irq_enable = vt8500_alarm_irq_enable,
-       .update_irq_enable = vt8500_update_irq_enable,
 };
 
 static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
@@ -248,13 +228,6 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
                goto err_free;
        }
 
-       vt8500_rtc->irq_hz = platform_get_irq(pdev, 1);
-       if (vt8500_rtc->irq_hz < 0) {
-               dev_err(&pdev->dev, "No 1Hz IRQ resource defined\n");
-               ret = -ENXIO;
-               goto err_free;
-       }
-
        vt8500_rtc->res = request_mem_region(vt8500_rtc->res->start,
                                             resource_size(vt8500_rtc->res),
                                             "vt8500-rtc");
@@ -272,9 +245,8 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
                goto err_release;
        }
 
-       /* Enable the second/minute interrupt generation and enable RTC */
-       writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H
-               | VT8500_RTC_CR_SM_ENABLE | VT8500_RTC_CR_SM_SEC,
+       /* Enable RTC and set it to 24-hour mode */
+       writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H,
               vt8500_rtc->regbase + VT8500_RTC_CR);
 
        vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev,
@@ -286,26 +258,16 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
                goto err_unmap;
        }
 
-       ret = request_irq(vt8500_rtc->irq_hz, vt8500_rtc_irq, 0,
-                         "rtc 1Hz", vt8500_rtc);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "can't get irq %i, err %d\n",
-                       vt8500_rtc->irq_hz, ret);
-               goto err_unreg;
-       }
-
        ret = request_irq(vt8500_rtc->irq_alarm, vt8500_rtc_irq, 0,
                          "rtc alarm", vt8500_rtc);
        if (ret < 0) {
                dev_err(&pdev->dev, "can't get irq %i, err %d\n",
                        vt8500_rtc->irq_alarm, ret);
-               goto err_free_hz;
+               goto err_unreg;
        }
 
        return 0;
 
-err_free_hz:
-       free_irq(vt8500_rtc->irq_hz, vt8500_rtc);
 err_unreg:
        rtc_device_unregister(vt8500_rtc->rtc);
 err_unmap:
@@ -323,7 +285,6 @@ static int __devexit vt8500_rtc_remove(struct platform_device *pdev)
        struct vt8500_rtc *vt8500_rtc = platform_get_drvdata(pdev);
 
        free_irq(vt8500_rtc->irq_alarm, vt8500_rtc);
-       free_irq(vt8500_rtc->irq_hz, vt8500_rtc);
 
        rtc_device_unregister(vt8500_rtc->rtc);
 
index 4a1f029c4fe90a392301a0ac1d1b1728a22296cb..8d9dae89f065cfc650fdb9c8a3873425a904d741 100644 (file)
@@ -830,6 +830,19 @@ config SCSI_GDTH
          To compile this driver as a module, choose M here: the
          module will be called gdth.
 
+config SCSI_ISCI
+       tristate "Intel(R) C600 Series Chipset SAS Controller"
+       depends on PCI && SCSI
+       depends on X86
+       # (temporary): known alpha quality driver
+       depends on EXPERIMENTAL
+       select SCSI_SAS_LIBSAS
+       ---help---
+         This driver supports the 6Gb/s SAS capabilities of the storage
+         control unit found in the Intel(R) C600 series chipset.
+
+         The experimental tag will be removed after the driver exits alpha
+
 config SCSI_GENERIC_NCR5380
        tristate "Generic NCR5380/53c400 SCSI PIO support"
        depends on ISA && SCSI
index 7ad0b8a79ae8dac6245d6b555ad7ebd2ebd39173..3c08f5352b2d46819f190239da59a336e85a704f 100644 (file)
@@ -73,6 +73,7 @@ obj-$(CONFIG_SCSI_AACRAID)    += aacraid/
 obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o
 obj-$(CONFIG_SCSI_AIC94XX)     += aic94xx/
 obj-$(CONFIG_SCSI_PM8001)      += pm8001/
+obj-$(CONFIG_SCSI_ISCI)                += isci/
 obj-$(CONFIG_SCSI_IPS)         += ips.o
 obj-$(CONFIG_SCSI_FD_MCS)      += fd_mcs.o
 obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
index c6c0434d80345a122b5e5729bb423615c12e0657..6bba23a2630339fde5e29976c220d771dff0ba54 100644 (file)
@@ -1037,6 +1037,7 @@ static void complete_scsi_command(struct CommandList *cp)
        unsigned char sense_key;
        unsigned char asc;      /* additional sense code */
        unsigned char ascq;     /* additional sense code qualifier */
+       unsigned long sense_data_size;
 
        ei = cp->err_info;
        cmd = (struct scsi_cmnd *) cp->scsi_cmd;
@@ -1051,10 +1052,14 @@ static void complete_scsi_command(struct CommandList *cp)
        cmd->result |= ei->ScsiStatus;
 
        /* copy the sense data whether we need to or not. */
-       memcpy(cmd->sense_buffer, ei->SenseInfo,
-               ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
-                       SCSI_SENSE_BUFFERSIZE :
-                       ei->SenseLen);
+       if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
+               sense_data_size = SCSI_SENSE_BUFFERSIZE;
+       else
+               sense_data_size = sizeof(ei->SenseInfo);
+       if (ei->SenseLen < sense_data_size)
+               sense_data_size = ei->SenseLen;
+
+       memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
        scsi_set_resid(cmd, ei->ResidualCnt);
 
        if (ei->CommandStatus == 0) {
@@ -2580,7 +2585,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
                c->SG[0].Ext = 0; /* we are not chaining*/
        }
        hpsa_scsi_do_simple_cmd_core(h, c);
-       hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
+       if (iocommand.buf_size > 0)
+               hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
        check_ioctl_unit_attention(h, c);
 
        /* Copy the error information out */
index b7650613b8c2606caef87d83a6b0e2a74cc7ca09..bdfa223a7dbb9e047c278b6414c5135e49a47001 100644 (file)
@@ -4306,8 +4306,8 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
                spin_lock_irqsave(vhost->host->host_lock, flags);
                if (rc == H_CLOSED)
                        vio_enable_interrupts(to_vio_dev(vhost->dev));
-               else if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
-                        (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
+               if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
+                   (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
                        ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
                        dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
                }
diff --git a/drivers/scsi/isci/Makefile b/drivers/scsi/isci/Makefile
new file mode 100644 (file)
index 0000000..3359e10
--- /dev/null
@@ -0,0 +1,8 @@
+obj-$(CONFIG_SCSI_ISCI) += isci.o
+isci-objs := init.o phy.o request.o \
+            remote_device.o port.o \
+            host.o task.o probe_roms.o \
+            remote_node_context.o \
+            remote_node_table.o \
+            unsolicited_frame_control.o \
+            port_config.o \
diff --git a/drivers/scsi/isci/firmware/Makefile b/drivers/scsi/isci/firmware/Makefile
new file mode 100644 (file)
index 0000000..5f54461
--- /dev/null
@@ -0,0 +1,19 @@
+# Makefile for create_fw
+#
+CC=gcc
+CFLAGS=-c -Wall -O2 -g
+LDFLAGS=
+SOURCES=create_fw.c
+OBJECTS=$(SOURCES:.cpp=.o)
+EXECUTABLE=create_fw
+
+all: $(SOURCES) $(EXECUTABLE)
+
+$(EXECUTABLE): $(OBJECTS)
+       $(CC) $(LDFLAGS) $(OBJECTS) -o $@
+
+.c.o:
+       $(CC) $(CFLAGS) $< -O $@
+
+clean:
+       rm -f *.o $(EXECUTABLE)
diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README
new file mode 100644 (file)
index 0000000..8056d2b
--- /dev/null
@@ -0,0 +1,36 @@
+This defines the temporary binary blow we are to pass to the SCU
+driver to emulate the binary firmware that we will eventually be
+able to access via NVRAM on the SCU controller.
+
+The current size of the binary blob is expected to be 149 bytes or larger
+
+Header Types:
+0x1: Phy Masks
+0x2: Phy Gens
+0x3: SAS Addrs
+0xff: End of Data
+
+ID string - u8[12]: "#SCU MAGIC#\0"
+Version - u8: 1
+SubVersion - u8: 0
+
+Header Type - u8: 0x1
+Size - u8: 8
+Phy Mask - u32[8]
+
+Header Type - u8: 0x2
+Size - u8: 8
+Phy Gen - u32[8]
+
+Header Type - u8: 0x3
+Size - u8: 8
+Sas Addr - u64[8]
+
+Header Type - u8: 0xf
+
+
+==============================================================================
+
+Place isci_firmware.bin in /lib/firmware
+Be sure to recreate the initramfs image to include the firmware.
+
diff --git a/drivers/scsi/isci/firmware/create_fw.c b/drivers/scsi/isci/firmware/create_fw.c
new file mode 100644 (file)
index 0000000..c7a2887
--- /dev/null
@@ -0,0 +1,99 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <string.h>
+#include <errno.h>
+#include <asm/types.h>
+#include <strings.h>
+#include <stdint.h>
+
+#include "create_fw.h"
+#include "../probe_roms.h"
+
+int write_blob(struct isci_orom *isci_orom)
+{
+       FILE *fd;
+       int err;
+       size_t count;
+
+       fd = fopen(blob_name, "w+");
+       if (!fd) {
+               perror("Open file for write failed");
+               fclose(fd);
+               return -EIO;
+       }
+
+       count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd);
+       if (count != 1) {
+               perror("Write data failed");
+               fclose(fd);
+               return -EIO;
+       }
+
+       fclose(fd);
+
+       return 0;
+}
+
+void set_binary_values(struct isci_orom *isci_orom)
+{
+       int ctrl_idx, phy_idx, port_idx;
+
+       /* setting OROM signature */
+       strncpy(isci_orom->hdr.signature, sig, strlen(sig));
+       isci_orom->hdr.version = version;
+       isci_orom->hdr.total_block_length = sizeof(struct isci_orom);
+       isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr);
+       isci_orom->hdr.num_elements = num_elements;
+
+       for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) {
+               isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type;
+               isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up =
+                       max_num_concurrent_dev_spin_up;
+               isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc =
+                       enable_ssc;
+
+               for (port_idx = 0; port_idx < 4; port_idx++)
+                       isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask =
+                               phy_mask[ctrl_idx][port_idx];
+
+               for (phy_idx = 0; phy_idx < 4; phy_idx++) {
+                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high =
+                               (__u32)(sas_addr[ctrl_idx][phy_idx] >> 32);
+                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low =
+                               (__u32)(sas_addr[ctrl_idx][phy_idx]);
+
+                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 =
+                               afe_tx_amp_control0;
+                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 =
+                               afe_tx_amp_control1;
+                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 =
+                               afe_tx_amp_control2;
+                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 =
+                               afe_tx_amp_control3;
+               }
+       }
+}
+
+int main(void)
+{
+       int err;
+       struct isci_orom *isci_orom;
+
+       isci_orom = malloc(sizeof(struct isci_orom));
+       memset(isci_orom, 0, sizeof(struct isci_orom));
+
+       set_binary_values(isci_orom);
+
+       err = write_blob(isci_orom);
+       if (err < 0) {
+               free(isci_orom);
+               return err;
+       }
+
+       free(isci_orom);
+       return 0;
+}
diff --git a/drivers/scsi/isci/firmware/create_fw.h b/drivers/scsi/isci/firmware/create_fw.h
new file mode 100644 (file)
index 0000000..5f29882
--- /dev/null
@@ -0,0 +1,77 @@
+#ifndef _CREATE_FW_H_
+#define _CREATE_FW_H_
+#include "../probe_roms.h"
+
+
+/* we are configuring for 2 SCUs */
+static const int num_elements = 2;
+
+/*
+ * For all defined arrays:
+ * elements 0-3 are for SCU0, ports 0-3
+ * elements 4-7 are for SCU1, ports 0-3
+ *
+ * valid configurations for one SCU are:
+ *  P0  P1  P2  P3
+ * ----------------
+ * 0xF,0x0,0x0,0x0 # 1 x4 port
+ * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1
+ *                 # ports
+ * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2
+ *                 # port
+ * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port
+ * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration)
+ *
+ * if there is a port/phy on which you do not wish to override the default
+ * values, use the value assigned to UNINIT_PARAM (255).
+ */
+
+/* discovery mode type (port auto config mode by default ) */
+
+/*
+ * if there is a port/phy on which you do not wish to override the default
+ * values, use the value "0000000000000000". SAS address of zero's is
+ * considered invalid and will not be used.
+ */
+#ifdef MPC
+static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE;
+static const __u8 phy_mask[2][4] = { {1, 2, 4, 8},
+                                    {1, 2, 4, 8} };
+static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL,
+                                                    0x5FCFFFFFF0000002ULL,
+                                                    0x5FCFFFFFF0000003ULL,
+                                                    0x5FCFFFFFF0000004ULL },
+                                                  { 0x5FCFFFFFF0000005ULL,
+                                                    0x5FCFFFFFF0000006ULL,
+                                                    0x5FCFFFFFF0000007ULL,
+                                                    0x5FCFFFFFF0000008ULL } };
+#else  /* APC (default) */
+static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
+static const __u8 phy_mask[2][4];
+static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL,
+                                                    0x5FCFFFFF00000001ULL,
+                                                    0x5FCFFFFF00000001ULL,
+                                                    0x5FCFFFFF00000001ULL },
+                                                  { 0x5FCFFFFF00000002ULL,
+                                                    0x5FCFFFFF00000002ULL,
+                                                    0x5FCFFFFF00000002ULL,
+                                                    0x5FCFFFFF00000002ULL } };
+#endif
+
+/* Maximum number of concurrent device spin up */
+static const int max_num_concurrent_dev_spin_up = 1;
+
+/* enable of ssc operation */
+static const int enable_ssc;
+
+/* AFE_TX_AMP_CONTROL */
+static const unsigned int afe_tx_amp_control0 = 0x000bdd08;
+static const unsigned int afe_tx_amp_control1 = 0x000ffc00;
+static const unsigned int afe_tx_amp_control2 = 0x000b7c09;
+static const unsigned int afe_tx_amp_control3 = 0x000afc6e;
+
+static const char blob_name[] = "isci_firmware.bin";
+static const char sig[] = "ISCUOEMB";
+static const unsigned char version = 0x10;
+
+#endif
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
new file mode 100644 (file)
index 0000000..26072f1
--- /dev/null
@@ -0,0 +1,2751 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/circ_buf.h>
+#include <linux/device.h>
+#include <scsi/sas.h>
+#include "host.h"
+#include "isci.h"
+#include "port.h"
+#include "host.h"
+#include "probe_roms.h"
+#include "remote_device.h"
+#include "request.h"
+#include "scu_completion_codes.h"
+#include "scu_event_codes.h"
+#include "registers.h"
+#include "scu_remote_node_context.h"
+#include "scu_task_context.h"
+
+#define SCU_CONTEXT_RAM_INIT_STALL_TIME      200
+
+#define smu_max_ports(dcc_value) \
+       (\
+               (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
+                >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
+       )
+
+#define smu_max_task_contexts(dcc_value)       \
+       (\
+               (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
+                >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
+       )
+
+#define smu_max_rncs(dcc_value) \
+       (\
+               (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
+                >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
+       )
+
+#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT      100
+
+/**
+ *
+ *
+ * The number of milliseconds to wait while a given phy is consuming power
+ * before allowing another set of phys to consume power. Ultimately, this will
+ * be specified by OEM parameter.
+ */
+#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
+
+/**
+ * NORMALIZE_PUT_POINTER() -
+ *
+ * This macro will normalize the completion queue put pointer so its value can
+ * be used as an array inde
+ */
+#define NORMALIZE_PUT_POINTER(x) \
+       ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
+
+
+/**
+ * NORMALIZE_EVENT_POINTER() -
+ *
+ * This macro will normalize the completion queue event entry so its value can
+ * be used as an index.
+ */
+#define NORMALIZE_EVENT_POINTER(x) \
+       (\
+               ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
+               >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
+       )
+
+/**
+ * NORMALIZE_GET_POINTER() -
+ *
+ * This macro will normalize the completion queue get pointer so its value can
+ * be used as an index into an array
+ */
+#define NORMALIZE_GET_POINTER(x) \
+       ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
+
+/**
+ * NORMALIZE_GET_POINTER_CYCLE_BIT() -
+ *
+ * This macro will normalize the completion queue cycle pointer so it matches
+ * the completion queue cycle bit
+ */
+#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
+       ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
+
+/**
+ * COMPLETION_QUEUE_CYCLE_BIT() -
+ *
+ * This macro will return the cycle bit of the completion queue entry
+ */
+#define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
+
+/* Init the state machine and call the state entry function (if any) */
+void sci_init_sm(struct sci_base_state_machine *sm,
+                const struct sci_base_state *state_table, u32 initial_state)
+{
+       sci_state_transition_t handler;
+
+       sm->initial_state_id    = initial_state;
+       sm->previous_state_id   = initial_state;
+       sm->current_state_id    = initial_state;
+       sm->state_table         = state_table;
+
+       handler = sm->state_table[initial_state].enter_state;
+       if (handler)
+               handler(sm);
+}
+
+/* Call the state exit fn, update the current state, call the state entry fn */
+void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
+{
+       sci_state_transition_t handler;
+
+       handler = sm->state_table[sm->current_state_id].exit_state;
+       if (handler)
+               handler(sm);
+
+       sm->previous_state_id = sm->current_state_id;
+       sm->current_state_id = next_state;
+
+       handler = sm->state_table[sm->current_state_id].enter_state;
+       if (handler)
+               handler(sm);
+}
+
+static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
+{
+       u32 get_value = ihost->completion_queue_get;
+       u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
+
+       if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
+           COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
+               return true;
+
+       return false;
+}
+
+static bool sci_controller_isr(struct isci_host *ihost)
+{
+       if (sci_controller_completion_queue_has_entries(ihost)) {
+               return true;
+       } else {
+               /*
+                * we have a spurious interrupt it could be that we have already
+                * emptied the completion queue from a previous interrupt */
+               writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+
+               /*
+                * There is a race in the hardware that could cause us not to be notified
+                * of an interrupt completion if we do not take this step.  We will mask
+                * then unmask the interrupts so if there is another interrupt pending
+                * the clearing of the interrupt source we get the next interrupt message. */
+               writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
+               writel(0, &ihost->smu_registers->interrupt_mask);
+       }
+
+       return false;
+}
+
+irqreturn_t isci_msix_isr(int vec, void *data)
+{
+       struct isci_host *ihost = data;
+
+       if (sci_controller_isr(ihost))
+               tasklet_schedule(&ihost->completion_tasklet);
+
+       return IRQ_HANDLED;
+}
+
+static bool sci_controller_error_isr(struct isci_host *ihost)
+{
+       u32 interrupt_status;
+
+       interrupt_status =
+               readl(&ihost->smu_registers->interrupt_status);
+       interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
+
+       if (interrupt_status != 0) {
+               /*
+                * There is an error interrupt pending so let it through and handle
+                * in the callback */
+               return true;
+       }
+
+       /*
+        * There is a race in the hardware that could cause us not to be notified
+        * of an interrupt completion if we do not take this step.  We will mask
+        * then unmask the error interrupts so if there was another interrupt
+        * pending we will be notified.
+        * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
+       writel(0xff, &ihost->smu_registers->interrupt_mask);
+       writel(0, &ihost->smu_registers->interrupt_mask);
+
+       return false;
+}
+
+static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
+{
+       u32 index = SCU_GET_COMPLETION_INDEX(ent);
+       struct isci_request *ireq = ihost->reqs[index];
+
+       /* Make sure that we really want to process this IO request */
+       if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
+           ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
+           ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
+               /* Yep this is a valid io request pass it along to the
+                * io request handler
+                */
+               sci_io_request_tc_completion(ireq, ent);
+}
+
+static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
+{
+       u32 index;
+       struct isci_request *ireq;
+       struct isci_remote_device *idev;
+
+       index = SCU_GET_COMPLETION_INDEX(ent);
+
+       switch (scu_get_command_request_type(ent)) {
+       case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
+       case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
+               ireq = ihost->reqs[index];
+               dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
+                        __func__, ent, ireq);
+               /* @todo For a post TC operation we need to fail the IO
+                * request
+                */
+               break;
+       case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
+       case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
+       case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
+               idev = ihost->device_table[index];
+               dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
+                        __func__, ent, idev);
+               /* @todo For a port RNC operation we need to fail the
+                * device
+                */
+               break;
+       default:
+               dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
+                        __func__, ent);
+               break;
+       }
+}
+
+static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
+{
+       u32 index;
+       u32 frame_index;
+
+       struct scu_unsolicited_frame_header *frame_header;
+       struct isci_phy *iphy;
+       struct isci_remote_device *idev;
+
+       enum sci_status result = SCI_FAILURE;
+
+       frame_index = SCU_GET_FRAME_INDEX(ent);
+
+       frame_header = ihost->uf_control.buffers.array[frame_index].header;
+       ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
+
+       if (SCU_GET_FRAME_ERROR(ent)) {
+               /*
+                * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
+                * /       this cause a problem? We expect the phy initialization will
+                * /       fail if there is an error in the frame. */
+               sci_controller_release_frame(ihost, frame_index);
+               return;
+       }
+
+       if (frame_header->is_address_frame) {
+               index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+               iphy = &ihost->phys[index];
+               result = sci_phy_frame_handler(iphy, frame_index);
+       } else {
+
+               index = SCU_GET_COMPLETION_INDEX(ent);
+
+               if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+                       /*
+                        * This is a signature fis or a frame from a direct attached SATA
+                        * device that has not yet been created.  In either case forwared
+                        * the frame to the PE and let it take care of the frame data. */
+                       index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+                       iphy = &ihost->phys[index];
+                       result = sci_phy_frame_handler(iphy, frame_index);
+               } else {
+                       if (index < ihost->remote_node_entries)
+                               idev = ihost->device_table[index];
+                       else
+                               idev = NULL;
+
+                       if (idev != NULL)
+                               result = sci_remote_device_frame_handler(idev, frame_index);
+                       else
+                               sci_controller_release_frame(ihost, frame_index);
+               }
+       }
+
+       if (result != SCI_SUCCESS) {
+               /*
+                * / @todo Is there any reason to report some additional error message
+                * /       when we get this failure notifiction? */
+       }
+}
+
+static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
+{
+       struct isci_remote_device *idev;
+       struct isci_request *ireq;
+       struct isci_phy *iphy;
+       u32 index;
+
+       index = SCU_GET_COMPLETION_INDEX(ent);
+
+       switch (scu_get_event_type(ent)) {
+       case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
+               /* / @todo The driver did something wrong and we need to fix the condtion. */
+               dev_err(&ihost->pdev->dev,
+                       "%s: SCIC Controller 0x%p received SMU command error "
+                       "0x%x\n",
+                       __func__,
+                       ihost,
+                       ent);
+               break;
+
+       case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
+       case SCU_EVENT_TYPE_SMU_ERROR:
+       case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
+               /*
+                * / @todo This is a hardware failure and its likely that we want to
+                * /       reset the controller. */
+               dev_err(&ihost->pdev->dev,
+                       "%s: SCIC Controller 0x%p received fatal controller "
+                       "event  0x%x\n",
+                       __func__,
+                       ihost,
+                       ent);
+               break;
+
+       case SCU_EVENT_TYPE_TRANSPORT_ERROR:
+               ireq = ihost->reqs[index];
+               sci_io_request_event_handler(ireq, ent);
+               break;
+
+       case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
+               switch (scu_get_event_specifier(ent)) {
+               case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
+               case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
+                       ireq = ihost->reqs[index];
+                       if (ireq != NULL)
+                               sci_io_request_event_handler(ireq, ent);
+                       else
+                               dev_warn(&ihost->pdev->dev,
+                                        "%s: SCIC Controller 0x%p received "
+                                        "event 0x%x for io request object "
+                                        "that doesnt exist.\n",
+                                        __func__,
+                                        ihost,
+                                        ent);
+
+                       break;
+
+               case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
+                       idev = ihost->device_table[index];
+                       if (idev != NULL)
+                               sci_remote_device_event_handler(idev, ent);
+                       else
+                               dev_warn(&ihost->pdev->dev,
+                                        "%s: SCIC Controller 0x%p received "
+                                        "event 0x%x for remote device object "
+                                        "that doesnt exist.\n",
+                                        __func__,
+                                        ihost,
+                                        ent);
+
+                       break;
+               }
+               break;
+
+       case SCU_EVENT_TYPE_BROADCAST_CHANGE:
+       /*
+        * direct the broadcast change event to the phy first and then let
+        * the phy redirect the broadcast change to the port object */
+       case SCU_EVENT_TYPE_ERR_CNT_EVENT:
+       /*
+        * direct error counter event to the phy object since that is where
+        * we get the event notification.  This is a type 4 event. */
+       case SCU_EVENT_TYPE_OSSP_EVENT:
+               index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+               iphy = &ihost->phys[index];
+               sci_phy_event_handler(iphy, ent);
+               break;
+
+       case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+       case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+       case SCU_EVENT_TYPE_RNC_OPS_MISC:
+               if (index < ihost->remote_node_entries) {
+                       idev = ihost->device_table[index];
+
+                       if (idev != NULL)
+                               sci_remote_device_event_handler(idev, ent);
+               } else
+                       dev_err(&ihost->pdev->dev,
+                               "%s: SCIC Controller 0x%p received event 0x%x "
+                               "for remote device object 0x%0x that doesnt "
+                               "exist.\n",
+                               __func__,
+                               ihost,
+                               ent,
+                               index);
+
+               break;
+
+       default:
+               dev_warn(&ihost->pdev->dev,
+                        "%s: SCIC Controller received unknown event code %x\n",
+                        __func__,
+                        ent);
+               break;
+       }
+}
+
+static void sci_controller_process_completions(struct isci_host *ihost)
+{
+       u32 completion_count = 0;
+       u32 ent;
+       u32 get_index;
+       u32 get_cycle;
+       u32 event_get;
+       u32 event_cycle;
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: completion queue begining get:0x%08x\n",
+               __func__,
+               ihost->completion_queue_get);
+
+       /* Get the component parts of the completion queue */
+       get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
+       get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
+
+       event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
+       event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
+
+       while (
+               NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
+               == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
+               ) {
+               completion_count++;
+
+               ent = ihost->completion_queue[get_index];
+
+               /* increment the get pointer and check for rollover to toggle the cycle bit */
+               get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
+                            (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
+               get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
+
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: completion queue entry:0x%08x\n",
+                       __func__,
+                       ent);
+
+               switch (SCU_GET_COMPLETION_TYPE(ent)) {
+               case SCU_COMPLETION_TYPE_TASK:
+                       sci_controller_task_completion(ihost, ent);
+                       break;
+
+               case SCU_COMPLETION_TYPE_SDMA:
+                       sci_controller_sdma_completion(ihost, ent);
+                       break;
+
+               case SCU_COMPLETION_TYPE_UFI:
+                       sci_controller_unsolicited_frame(ihost, ent);
+                       break;
+
+               case SCU_COMPLETION_TYPE_EVENT:
+               case SCU_COMPLETION_TYPE_NOTIFY: {
+                       event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
+                                      (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
+                       event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
+
+                       sci_controller_event_completion(ihost, ent);
+                       break;
+               }
+               default:
+                       dev_warn(&ihost->pdev->dev,
+                                "%s: SCIC Controller received unknown "
+                                "completion type %x\n",
+                                __func__,
+                                ent);
+                       break;
+               }
+       }
+
+       /* Update the get register if we completed one or more entries */
+       if (completion_count > 0) {
+               ihost->completion_queue_get =
+                       SMU_CQGR_GEN_BIT(ENABLE) |
+                       SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
+                       event_cycle |
+                       SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
+                       get_cycle |
+                       SMU_CQGR_GEN_VAL(POINTER, get_index);
+
+               writel(ihost->completion_queue_get,
+                      &ihost->smu_registers->completion_queue_get);
+
+       }
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: completion queue ending get:0x%08x\n",
+               __func__,
+               ihost->completion_queue_get);
+
+}
+
+static void sci_controller_error_handler(struct isci_host *ihost)
+{
+       u32 interrupt_status;
+
+       interrupt_status =
+               readl(&ihost->smu_registers->interrupt_status);
+
+       if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
+           sci_controller_completion_queue_has_entries(ihost)) {
+
+               sci_controller_process_completions(ihost);
+               writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
+       } else {
+               dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
+                       interrupt_status);
+
+               sci_change_state(&ihost->sm, SCIC_FAILED);
+
+               return;
+       }
+
+       /* If we dont process any completions I am not sure that we want to do this.
+        * We are in the middle of a hardware fault and should probably be reset.
+        */
+       writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+irqreturn_t isci_intx_isr(int vec, void *data)
+{
+       irqreturn_t ret = IRQ_NONE;
+       struct isci_host *ihost = data;
+
+       if (sci_controller_isr(ihost)) {
+               writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+               tasklet_schedule(&ihost->completion_tasklet);
+               ret = IRQ_HANDLED;
+       } else if (sci_controller_error_isr(ihost)) {
+               spin_lock(&ihost->scic_lock);
+               sci_controller_error_handler(ihost);
+               spin_unlock(&ihost->scic_lock);
+               ret = IRQ_HANDLED;
+       }
+
+       return ret;
+}
+
+irqreturn_t isci_error_isr(int vec, void *data)
+{
+       struct isci_host *ihost = data;
+
+       if (sci_controller_error_isr(ihost))
+               sci_controller_error_handler(ihost);
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * isci_host_start_complete() - This function is called by the core library,
+ *    through the ISCI Module, to indicate controller start status.
+ * @isci_host: This parameter specifies the ISCI host object
+ * @completion_status: This parameter specifies the completion status from the
+ *    core library.
+ *
+ */
+static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
+{
+       if (completion_status != SCI_SUCCESS)
+               dev_info(&ihost->pdev->dev,
+                       "controller start timed out, continuing...\n");
+       isci_host_change_state(ihost, isci_ready);
+       clear_bit(IHOST_START_PENDING, &ihost->flags);
+       wake_up(&ihost->eventq);
+}
+
+int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+       struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
+
+       if (test_bit(IHOST_START_PENDING, &ihost->flags))
+               return 0;
+
+       /* todo: use sas_flush_discovery once it is upstream */
+       scsi_flush_work(shost);
+
+       scsi_flush_work(shost);
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: ihost->status = %d, time = %ld\n",
+                __func__, isci_host_get_state(ihost), time);
+
+       return 1;
+
+}
+
+/**
+ * sci_controller_get_suggested_start_timeout() - This method returns the
+ *    suggested sci_controller_start() timeout amount.  The user is free to
+ *    use any timeout value, but this method provides the suggested minimum
+ *    start timeout value.  The returned value is based upon empirical
+ *    information determined as a result of interoperability testing.
+ * @controller: the handle to the controller object for which to return the
+ *    suggested start timeout.
+ *
+ * This method returns the number of milliseconds for the suggested start
+ * operation timeout.
+ */
+static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
+{
+       /* Validate the user supplied parameters. */
+       if (!ihost)
+               return 0;
+
+       /*
+        * The suggested minimum timeout value for a controller start operation:
+        *
+        *     Signature FIS Timeout
+        *   + Phy Start Timeout
+        *   + Number of Phy Spin Up Intervals
+        *   ---------------------------------
+        *   Number of milliseconds for the controller start operation.
+        *
+        * NOTE: The number of phy spin up intervals will be equivalent
+        *       to the number of phys divided by the number phys allowed
+        *       per interval - 1 (once OEM parameters are supported).
+        *       Currently we assume only 1 phy per interval. */
+
+       return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
+               + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
+               + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+}
+
+static void sci_controller_enable_interrupts(struct isci_host *ihost)
+{
+       BUG_ON(ihost->smu_registers == NULL);
+       writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+void sci_controller_disable_interrupts(struct isci_host *ihost)
+{
+       BUG_ON(ihost->smu_registers == NULL);
+       writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
+}
+
+static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
+{
+       u32 port_task_scheduler_value;
+
+       port_task_scheduler_value =
+               readl(&ihost->scu_registers->peg0.ptsg.control);
+       port_task_scheduler_value |=
+               (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
+                SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
+       writel(port_task_scheduler_value,
+              &ihost->scu_registers->peg0.ptsg.control);
+}
+
+static void sci_controller_assign_task_entries(struct isci_host *ihost)
+{
+       u32 task_assignment;
+
+       /*
+        * Assign all the TCs to function 0
+        * TODO: Do we actually need to read this register to write it back?
+        */
+
+       task_assignment =
+               readl(&ihost->smu_registers->task_context_assignment[0]);
+
+       task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
+               (SMU_TCA_GEN_VAL(ENDING,  ihost->task_context_entries - 1)) |
+               (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
+
+       writel(task_assignment,
+               &ihost->smu_registers->task_context_assignment[0]);
+
+}
+
+static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
+{
+       u32 index;
+       u32 completion_queue_control_value;
+       u32 completion_queue_get_value;
+       u32 completion_queue_put_value;
+
+       ihost->completion_queue_get = 0;
+
+       completion_queue_control_value =
+               (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) |
+                SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1));
+
+       writel(completion_queue_control_value,
+              &ihost->smu_registers->completion_queue_control);
+
+
+       /* Set the completion queue get pointer and enable the queue */
+       completion_queue_get_value = (
+               (SMU_CQGR_GEN_VAL(POINTER, 0))
+               | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
+               | (SMU_CQGR_GEN_BIT(ENABLE))
+               | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
+               );
+
+       writel(completion_queue_get_value,
+              &ihost->smu_registers->completion_queue_get);
+
+       /* Set the completion queue put pointer */
+       completion_queue_put_value = (
+               (SMU_CQPR_GEN_VAL(POINTER, 0))
+               | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
+               );
+
+       writel(completion_queue_put_value,
+              &ihost->smu_registers->completion_queue_put);
+
+       /* Initialize the cycle bit of the completion queue entries */
+       for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
+               /*
+                * If get.cycle_bit != completion_queue.cycle_bit
+                * its not a valid completion queue entry
+                * so at system start all entries are invalid */
+               ihost->completion_queue[index] = 0x80000000;
+       }
+}
+
+static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
+{
+       u32 frame_queue_control_value;
+       u32 frame_queue_get_value;
+       u32 frame_queue_put_value;
+
+       /* Write the queue size */
+       frame_queue_control_value =
+               SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES);
+
+       writel(frame_queue_control_value,
+              &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
+
+       /* Setup the get pointer for the unsolicited frame queue */
+       frame_queue_get_value = (
+               SCU_UFQGP_GEN_VAL(POINTER, 0)
+               |  SCU_UFQGP_GEN_BIT(ENABLE_BIT)
+               );
+
+       writel(frame_queue_get_value,
+              &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+       /* Setup the put pointer for the unsolicited frame queue */
+       frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
+       writel(frame_queue_put_value,
+              &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
+}
+
+static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
+{
+       if (ihost->sm.current_state_id == SCIC_STARTING) {
+               /*
+                * We move into the ready state, because some of the phys/ports
+                * may be up and operational.
+                */
+               sci_change_state(&ihost->sm, SCIC_READY);
+
+               isci_host_start_complete(ihost, status);
+       }
+}
+
+static bool is_phy_starting(struct isci_phy *iphy)
+{
+       enum sci_phy_states state;
+
+       state = iphy->sm.current_state_id;
+       switch (state) {
+       case SCI_PHY_STARTING:
+       case SCI_PHY_SUB_INITIAL:
+       case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+       case SCI_PHY_SUB_AWAIT_IAF_UF:
+       case SCI_PHY_SUB_AWAIT_SAS_POWER:
+       case SCI_PHY_SUB_AWAIT_SATA_POWER:
+       case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+       case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+       case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+       case SCI_PHY_SUB_FINAL:
+               return true;
+       default:
+               return false;
+       }
+}
+
+/**
+ * sci_controller_start_next_phy - start phy
+ * @scic: controller
+ *
+ * If all the phys have been started, then attempt to transition the
+ * controller to the READY state and inform the user
+ * (sci_cb_controller_start_complete()).
+ */
+static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
+{
+       struct sci_oem_params *oem = &ihost->oem_parameters;
+       struct isci_phy *iphy;
+       enum sci_status status;
+
+       status = SCI_SUCCESS;
+
+       if (ihost->phy_startup_timer_pending)
+               return status;
+
+       if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
+               bool is_controller_start_complete = true;
+               u32 state;
+               u8 index;
+
+               for (index = 0; index < SCI_MAX_PHYS; index++) {
+                       iphy = &ihost->phys[index];
+                       state = iphy->sm.current_state_id;
+
+                       if (!phy_get_non_dummy_port(iphy))
+                               continue;
+
+                       /* The controller start operation is complete iff:
+                        * - all links have been given an opportunity to start
+                        * - have no indication of a connected device
+                        * - have an indication of a connected device and it has
+                        *   finished the link training process.
+                        */
+                       if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
+                           (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
+                           (iphy->is_in_link_training == true && is_phy_starting(iphy))) {
+                               is_controller_start_complete = false;
+                               break;
+                       }
+               }
+
+               /*
+                * The controller has successfully finished the start process.
+                * Inform the SCI Core user and transition to the READY state. */
+               if (is_controller_start_complete == true) {
+                       sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
+                       sci_del_timer(&ihost->phy_timer);
+                       ihost->phy_startup_timer_pending = false;
+               }
+       } else {
+               iphy = &ihost->phys[ihost->next_phy_to_start];
+
+               if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+                       if (phy_get_non_dummy_port(iphy) == NULL) {
+                               ihost->next_phy_to_start++;
+
+                               /* Caution recursion ahead be forwarned
+                                *
+                                * The PHY was never added to a PORT in MPC mode
+                                * so start the next phy in sequence This phy
+                                * will never go link up and will not draw power
+                                * the OEM parameters either configured the phy
+                                * incorrectly for the PORT or it was never
+                                * assigned to a PORT
+                                */
+                               return sci_controller_start_next_phy(ihost);
+                       }
+               }
+
+               status = sci_phy_start(iphy);
+
+               if (status == SCI_SUCCESS) {
+                       sci_mod_timer(&ihost->phy_timer,
+                                     SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
+                       ihost->phy_startup_timer_pending = true;
+               } else {
+                       dev_warn(&ihost->pdev->dev,
+                                "%s: Controller stop operation failed "
+                                "to stop phy %d because of status "
+                                "%d.\n",
+                                __func__,
+                                ihost->phys[ihost->next_phy_to_start].phy_index,
+                                status);
+               }
+
+               ihost->next_phy_to_start++;
+       }
+
+       return status;
+}
+
+static void phy_startup_timeout(unsigned long data)
+{
+       struct sci_timer *tmr = (struct sci_timer *)data;
+       struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
+       unsigned long flags;
+       enum sci_status status;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       if (tmr->cancel)
+               goto done;
+
+       ihost->phy_startup_timer_pending = false;
+
+       do {
+               status = sci_controller_start_next_phy(ihost);
+       } while (status != SCI_SUCCESS);
+
+done:
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static u16 isci_tci_active(struct isci_host *ihost)
+{
+       return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
+}
+
+static enum sci_status sci_controller_start(struct isci_host *ihost,
+                                            u32 timeout)
+{
+       enum sci_status result;
+       u16 index;
+
+       if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
+               dev_warn(&ihost->pdev->dev,
+                        "SCIC Controller start operation requested in "
+                        "invalid state\n");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       /* Build the TCi free pool */
+       BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
+       ihost->tci_head = 0;
+       ihost->tci_tail = 0;
+       for (index = 0; index < ihost->task_context_entries; index++)
+               isci_tci_free(ihost, index);
+
+       /* Build the RNi free pool */
+       sci_remote_node_table_initialize(&ihost->available_remote_nodes,
+                                        ihost->remote_node_entries);
+
+       /*
+        * Before anything else lets make sure we will not be
+        * interrupted by the hardware.
+        */
+       sci_controller_disable_interrupts(ihost);
+
+       /* Enable the port task scheduler */
+       sci_controller_enable_port_task_scheduler(ihost);
+
+       /* Assign all the task entries to ihost physical function */
+       sci_controller_assign_task_entries(ihost);
+
+       /* Now initialize the completion queue */
+       sci_controller_initialize_completion_queue(ihost);
+
+       /* Initialize the unsolicited frame queue for use */
+       sci_controller_initialize_unsolicited_frame_queue(ihost);
+
+       /* Start all of the ports on this controller */
+       for (index = 0; index < ihost->logical_port_entries; index++) {
+               struct isci_port *iport = &ihost->ports[index];
+
+               result = sci_port_start(iport);
+               if (result)
+                       return result;
+       }
+
+       sci_controller_start_next_phy(ihost);
+
+       sci_mod_timer(&ihost->timer, timeout);
+
+       sci_change_state(&ihost->sm, SCIC_STARTING);
+
+       return SCI_SUCCESS;
+}
+
+void isci_host_scan_start(struct Scsi_Host *shost)
+{
+       struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
+       unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
+
+       set_bit(IHOST_START_PENDING, &ihost->flags);
+
+       spin_lock_irq(&ihost->scic_lock);
+       sci_controller_start(ihost, tmo);
+       sci_controller_enable_interrupts(ihost);
+       spin_unlock_irq(&ihost->scic_lock);
+}
+
+static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
+{
+       isci_host_change_state(ihost, isci_stopped);
+       sci_controller_disable_interrupts(ihost);
+       clear_bit(IHOST_STOP_PENDING, &ihost->flags);
+       wake_up(&ihost->eventq);
+}
+
+static void sci_controller_completion_handler(struct isci_host *ihost)
+{
+       /* Empty out the completion queue */
+       if (sci_controller_completion_queue_has_entries(ihost))
+               sci_controller_process_completions(ihost);
+
+       /* Clear the interrupt and enable all interrupts again */
+       writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+       /* Could we write the value of SMU_ISR_COMPLETION? */
+       writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
+       writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+/**
+ * isci_host_completion_routine() - This function is the delayed service
+ *    routine that calls the sci core library's completion handler. It's
+ *    scheduled as a tasklet from the interrupt service routine when interrupts
+ *    in use, or set as the timeout function in polled mode.
+ * @data: This parameter specifies the ISCI host object
+ *
+ */
+static void isci_host_completion_routine(unsigned long data)
+{
+       struct isci_host *ihost = (struct isci_host *)data;
+       struct list_head    completed_request_list;
+       struct list_head    errored_request_list;
+       struct list_head    *current_position;
+       struct list_head    *next_position;
+       struct isci_request *request;
+       struct isci_request *next_request;
+       struct sas_task     *task;
+
+       INIT_LIST_HEAD(&completed_request_list);
+       INIT_LIST_HEAD(&errored_request_list);
+
+       spin_lock_irq(&ihost->scic_lock);
+
+       sci_controller_completion_handler(ihost);
+
+       /* Take the lists of completed I/Os from the host. */
+
+       list_splice_init(&ihost->requests_to_complete,
+                        &completed_request_list);
+
+       /* Take the list of errored I/Os from the host. */
+       list_splice_init(&ihost->requests_to_errorback,
+                        &errored_request_list);
+
+       spin_unlock_irq(&ihost->scic_lock);
+
+       /* Process any completions in the lists. */
+       list_for_each_safe(current_position, next_position,
+                          &completed_request_list) {
+
+               request = list_entry(current_position, struct isci_request,
+                                    completed_node);
+               task = isci_request_access_task(request);
+
+               /* Normal notification (task_done) */
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: Normal - request/task = %p/%p\n",
+                       __func__,
+                       request,
+                       task);
+
+               /* Return the task to libsas */
+               if (task != NULL) {
+
+                       task->lldd_task = NULL;
+                       if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+
+                               /* If the task is already in the abort path,
+                               * the task_done callback cannot be called.
+                               */
+                               task->task_done(task);
+                       }
+               }
+
+               spin_lock_irq(&ihost->scic_lock);
+               isci_free_tag(ihost, request->io_tag);
+               spin_unlock_irq(&ihost->scic_lock);
+       }
+       list_for_each_entry_safe(request, next_request, &errored_request_list,
+                                completed_node) {
+
+               task = isci_request_access_task(request);
+
+               /* Use sas_task_abort */
+               dev_warn(&ihost->pdev->dev,
+                        "%s: Error - request/task = %p/%p\n",
+                        __func__,
+                        request,
+                        task);
+
+               if (task != NULL) {
+
+                       /* Put the task into the abort path if it's not there
+                        * already.
+                        */
+                       if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
+                               sas_task_abort(task);
+
+               } else {
+                       /* This is a case where the request has completed with a
+                        * status such that it needed further target servicing,
+                        * but the sas_task reference has already been removed
+                        * from the request.  Since it was errored, it was not
+                        * being aborted, so there is nothing to do except free
+                        * it.
+                        */
+
+                       spin_lock_irq(&ihost->scic_lock);
+                       /* Remove the request from the remote device's list
+                       * of pending requests.
+                       */
+                       list_del_init(&request->dev_node);
+                       isci_free_tag(ihost, request->io_tag);
+                       spin_unlock_irq(&ihost->scic_lock);
+               }
+       }
+
+}
+
+/**
+ * sci_controller_stop() - This method will stop an individual controller
+ *    object.This method will invoke the associated user callback upon
+ *    completion.  The completion callback is called when the following
+ *    conditions are met: -# the method return status is SCI_SUCCESS. -# the
+ *    controller has been quiesced. This method will ensure that all IO
+ *    requests are quiesced, phys are stopped, and all additional operation by
+ *    the hardware is halted.
+ * @controller: the handle to the controller object to stop.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ *    stop operation should complete.
+ *
+ * The controller must be in the STARTED or STOPPED state. Indicate if the
+ * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
+ * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
+ * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
+ * controller is not either in the STARTED or STOPPED states.
+ */
+static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
+{
+       if (ihost->sm.current_state_id != SCIC_READY) {
+               dev_warn(&ihost->pdev->dev,
+                        "SCIC Controller stop operation requested in "
+                        "invalid state\n");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_mod_timer(&ihost->timer, timeout);
+       sci_change_state(&ihost->sm, SCIC_STOPPING);
+       return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_reset() - This method will reset the supplied core
+ *    controller regardless of the state of said controller.  This operation is
+ *    considered destructive.  In other words, all current operations are wiped
+ *    out.  No IO completions for outstanding devices occur.  Outstanding IO
+ *    requests are not aborted or completed at the actual remote device.
+ * @controller: the handle to the controller object to reset.
+ *
+ * Indicate if the controller reset method succeeded or failed in some way.
+ * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
+ * the controller reset operation is unable to complete.
+ */
+static enum sci_status sci_controller_reset(struct isci_host *ihost)
+{
+       switch (ihost->sm.current_state_id) {
+       case SCIC_RESET:
+       case SCIC_READY:
+       case SCIC_STOPPED:
+       case SCIC_FAILED:
+               /*
+                * The reset operation is not a graceful cleanup, just
+                * perform the state transition.
+                */
+               sci_change_state(&ihost->sm, SCIC_RESETTING);
+               return SCI_SUCCESS;
+       default:
+               dev_warn(&ihost->pdev->dev,
+                        "SCIC Controller reset operation requested in "
+                        "invalid state\n");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+void isci_host_deinit(struct isci_host *ihost)
+{
+       int i;
+
+       isci_host_change_state(ihost, isci_stopping);
+       for (i = 0; i < SCI_MAX_PORTS; i++) {
+               struct isci_port *iport = &ihost->ports[i];
+               struct isci_remote_device *idev, *d;
+
+               list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
+                       if (test_bit(IDEV_ALLOCATED, &idev->flags))
+                               isci_remote_device_stop(ihost, idev);
+               }
+       }
+
+       set_bit(IHOST_STOP_PENDING, &ihost->flags);
+
+       spin_lock_irq(&ihost->scic_lock);
+       sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
+       spin_unlock_irq(&ihost->scic_lock);
+
+       wait_for_stop(ihost);
+       sci_controller_reset(ihost);
+
+       /* Cancel any/all outstanding port timers */
+       for (i = 0; i < ihost->logical_port_entries; i++) {
+               struct isci_port *iport = &ihost->ports[i];
+               del_timer_sync(&iport->timer.timer);
+       }
+
+       /* Cancel any/all outstanding phy timers */
+       for (i = 0; i < SCI_MAX_PHYS; i++) {
+               struct isci_phy *iphy = &ihost->phys[i];
+               del_timer_sync(&iphy->sata_timer.timer);
+       }
+
+       del_timer_sync(&ihost->port_agent.timer.timer);
+
+       del_timer_sync(&ihost->power_control.timer.timer);
+
+       del_timer_sync(&ihost->timer.timer);
+
+       del_timer_sync(&ihost->phy_timer.timer);
+}
+
+static void __iomem *scu_base(struct isci_host *isci_host)
+{
+       struct pci_dev *pdev = isci_host->pdev;
+       int id = isci_host->id;
+
+       return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
+}
+
+static void __iomem *smu_base(struct isci_host *isci_host)
+{
+       struct pci_dev *pdev = isci_host->pdev;
+       int id = isci_host->id;
+
+       return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
+}
+
+static void isci_user_parameters_get(struct sci_user_parameters *u)
+{
+       int i;
+
+       for (i = 0; i < SCI_MAX_PHYS; i++) {
+               struct sci_phy_user_params *u_phy = &u->phys[i];
+
+               u_phy->max_speed_generation = phy_gen;
+
+               /* we are not exporting these for now */
+               u_phy->align_insertion_frequency = 0x7f;
+               u_phy->in_connection_align_insertion_frequency = 0xff;
+               u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
+       }
+
+       u->stp_inactivity_timeout = stp_inactive_to;
+       u->ssp_inactivity_timeout = ssp_inactive_to;
+       u->stp_max_occupancy_timeout = stp_max_occ_to;
+       u->ssp_max_occupancy_timeout = ssp_max_occ_to;
+       u->no_outbound_task_timeout = no_outbound_task_to;
+       u->max_number_concurrent_device_spin_up = max_concurr_spinup;
+}
+
+static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       sci_change_state(&ihost->sm, SCIC_RESET);
+}
+
+static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       sci_del_timer(&ihost->timer);
+}
+
+#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
+#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
+#define INTERRUPT_COALESCE_TIMEOUT_MAX_US                    2700000
+#define INTERRUPT_COALESCE_NUMBER_MAX                        256
+#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN                7
+#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX                28
+
+/**
+ * sci_controller_set_interrupt_coalescence() - This method allows the user to
+ *    configure the interrupt coalescence.
+ * @controller: This parameter represents the handle to the controller object
+ *    for which its interrupt coalesce register is overridden.
+ * @coalesce_number: Used to control the number of entries in the Completion
+ *    Queue before an interrupt is generated. If the number of entries exceed
+ *    this number, an interrupt will be generated. The valid range of the input
+ *    is [0, 256]. A setting of 0 results in coalescing being disabled.
+ * @coalesce_timeout: Timeout value in microseconds. The valid range of the
+ *    input is [0, 2700000] . A setting of 0 is allowed and results in no
+ *    interrupt coalescing timeout.
+ *
+ * Indicate if the user successfully set the interrupt coalesce parameters.
+ * SCI_SUCCESS The user successfully updated the interrutp coalescence.
+ * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
+ */
+static enum sci_status
+sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
+                                        u32 coalesce_number,
+                                        u32 coalesce_timeout)
+{
+       u8 timeout_encode = 0;
+       u32 min = 0;
+       u32 max = 0;
+
+       /* Check if the input parameters fall in the range. */
+       if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
+               return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+       /*
+        *  Defined encoding for interrupt coalescing timeout:
+        *              Value   Min      Max     Units
+        *              -----   ---      ---     -----
+        *              0       -        -       Disabled
+        *              1       13.3     20.0    ns
+        *              2       26.7     40.0
+        *              3       53.3     80.0
+        *              4       106.7    160.0
+        *              5       213.3    320.0
+        *              6       426.7    640.0
+        *              7       853.3    1280.0
+        *              8       1.7      2.6     us
+        *              9       3.4      5.1
+        *              10      6.8      10.2
+        *              11      13.7     20.5
+        *              12      27.3     41.0
+        *              13      54.6     81.9
+        *              14      109.2    163.8
+        *              15      218.5    327.7
+        *              16      436.9    655.4
+        *              17      873.8    1310.7
+        *              18      1.7      2.6     ms
+        *              19      3.5      5.2
+        *              20      7.0      10.5
+        *              21      14.0     21.0
+        *              22      28.0     41.9
+        *              23      55.9     83.9
+        *              24      111.8    167.8
+        *              25      223.7    335.5
+        *              26      447.4    671.1
+        *              27      894.8    1342.2
+        *              28      1.8      2.7     s
+        *              Others Undefined */
+
+       /*
+        * Use the table above to decide the encode of interrupt coalescing timeout
+        * value for register writing. */
+       if (coalesce_timeout == 0)
+               timeout_encode = 0;
+       else{
+               /* make the timeout value in unit of (10 ns). */
+               coalesce_timeout = coalesce_timeout * 100;
+               min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
+               max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
+
+               /* get the encode of timeout for register writing. */
+               for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
+                     timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
+                     timeout_encode++) {
+                       if (min <= coalesce_timeout &&  max > coalesce_timeout)
+                               break;
+                       else if (coalesce_timeout >= max && coalesce_timeout < min * 2
+                                && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
+                               if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
+                                       break;
+                               else{
+                                       timeout_encode++;
+                                       break;
+                               }
+                       } else {
+                               max = max * 2;
+                               min = min * 2;
+                       }
+               }
+
+               if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
+                       /* the value is out of range. */
+                       return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+       }
+
+       writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
+              SMU_ICC_GEN_VAL(TIMER, timeout_encode),
+              &ihost->smu_registers->interrupt_coalesce_control);
+
+
+       ihost->interrupt_coalesce_number = (u16)coalesce_number;
+       ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
+
+       return SCI_SUCCESS;
+}
+
+
+static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       /* set the default interrupt coalescence number and timeout value. */
+       sci_controller_set_interrupt_coalescence(ihost, 0x10, 250);
+}
+
+static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       /* disable interrupt coalescence. */
+       sci_controller_set_interrupt_coalescence(ihost, 0, 0);
+}
+
+static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
+{
+       u32 index;
+       enum sci_status status;
+       enum sci_status phy_status;
+
+       status = SCI_SUCCESS;
+
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               phy_status = sci_phy_stop(&ihost->phys[index]);
+
+               if (phy_status != SCI_SUCCESS &&
+                   phy_status != SCI_FAILURE_INVALID_STATE) {
+                       status = SCI_FAILURE;
+
+                       dev_warn(&ihost->pdev->dev,
+                                "%s: Controller stop operation failed to stop "
+                                "phy %d because of status %d.\n",
+                                __func__,
+                                ihost->phys[index].phy_index, phy_status);
+               }
+       }
+
+       return status;
+}
+
+static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
+{
+       u32 index;
+       enum sci_status port_status;
+       enum sci_status status = SCI_SUCCESS;
+
+       for (index = 0; index < ihost->logical_port_entries; index++) {
+               struct isci_port *iport = &ihost->ports[index];
+
+               port_status = sci_port_stop(iport);
+
+               if ((port_status != SCI_SUCCESS) &&
+                   (port_status != SCI_FAILURE_INVALID_STATE)) {
+                       status = SCI_FAILURE;
+
+                       dev_warn(&ihost->pdev->dev,
+                                "%s: Controller stop operation failed to "
+                                "stop port %d because of status %d.\n",
+                                __func__,
+                                iport->logical_port_index,
+                                port_status);
+               }
+       }
+
+       return status;
+}
+
+static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
+{
+       u32 index;
+       enum sci_status status;
+       enum sci_status device_status;
+
+       status = SCI_SUCCESS;
+
+       for (index = 0; index < ihost->remote_node_entries; index++) {
+               if (ihost->device_table[index] != NULL) {
+                       /* / @todo What timeout value do we want to provide to this request? */
+                       device_status = sci_remote_device_stop(ihost->device_table[index], 0);
+
+                       if ((device_status != SCI_SUCCESS) &&
+                           (device_status != SCI_FAILURE_INVALID_STATE)) {
+                               dev_warn(&ihost->pdev->dev,
+                                        "%s: Controller stop operation failed "
+                                        "to stop device 0x%p because of "
+                                        "status %d.\n",
+                                        __func__,
+                                        ihost->device_table[index], device_status);
+                       }
+               }
+       }
+
+       return status;
+}
+
+static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       /* Stop all of the components for this controller */
+       sci_controller_stop_phys(ihost);
+       sci_controller_stop_ports(ihost);
+       sci_controller_stop_devices(ihost);
+}
+
+static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       sci_del_timer(&ihost->timer);
+}
+
+static void sci_controller_reset_hardware(struct isci_host *ihost)
+{
+       /* Disable interrupts so we dont take any spurious interrupts */
+       sci_controller_disable_interrupts(ihost);
+
+       /* Reset the SCU */
+       writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
+
+       /* Delay for 1ms to before clearing the CQP and UFQPR. */
+       udelay(1000);
+
+       /* The write to the CQGR clears the CQP */
+       writel(0x00000000, &ihost->smu_registers->completion_queue_get);
+
+       /* The write to the UFQGP clears the UFQPR */
+       writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+}
+
+static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+       sci_controller_reset_hardware(ihost);
+       sci_change_state(&ihost->sm, SCIC_RESET);
+}
+
+static const struct sci_base_state sci_controller_state_table[] = {
+       [SCIC_INITIAL] = {
+               .enter_state = sci_controller_initial_state_enter,
+       },
+       [SCIC_RESET] = {},
+       [SCIC_INITIALIZING] = {},
+       [SCIC_INITIALIZED] = {},
+       [SCIC_STARTING] = {
+               .exit_state  = sci_controller_starting_state_exit,
+       },
+       [SCIC_READY] = {
+               .enter_state = sci_controller_ready_state_enter,
+               .exit_state  = sci_controller_ready_state_exit,
+       },
+       [SCIC_RESETTING] = {
+               .enter_state = sci_controller_resetting_state_enter,
+       },
+       [SCIC_STOPPING] = {
+               .enter_state = sci_controller_stopping_state_enter,
+               .exit_state = sci_controller_stopping_state_exit,
+       },
+       [SCIC_STOPPED] = {},
+       [SCIC_FAILED] = {}
+};
+
+static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
+{
+       /* these defaults are overridden by the platform / firmware */
+       u16 index;
+
+       /* Default to APC mode. */
+       ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
+
+       /* Default to APC mode. */
+       ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1;
+
+       /* Default to no SSC operation. */
+       ihost->oem_parameters.controller.do_enable_ssc = false;
+
+       /* Initialize all of the port parameter information to narrow ports. */
+       for (index = 0; index < SCI_MAX_PORTS; index++) {
+               ihost->oem_parameters.ports[index].phy_mask = 0;
+       }
+
+       /* Initialize all of the phy parameter information. */
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               /* Default to 6G (i.e. Gen 3) for now. */
+               ihost->user_parameters.phys[index].max_speed_generation = 3;
+
+               /* the frequencies cannot be 0 */
+               ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
+               ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
+               ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
+
+               /*
+                * Previous Vitesse based expanders had a arbitration issue that
+                * is worked around by having the upper 32-bits of SAS address
+                * with a value greater then the Vitesse company identifier.
+                * Hence, usage of 0x5FCFFFFF. */
+               ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
+               ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
+       }
+
+       ihost->user_parameters.stp_inactivity_timeout = 5;
+       ihost->user_parameters.ssp_inactivity_timeout = 5;
+       ihost->user_parameters.stp_max_occupancy_timeout = 5;
+       ihost->user_parameters.ssp_max_occupancy_timeout = 20;
+       ihost->user_parameters.no_outbound_task_timeout = 20;
+}
+
+static void controller_timeout(unsigned long data)
+{
+       struct sci_timer *tmr = (struct sci_timer *)data;
+       struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
+       struct sci_base_state_machine *sm = &ihost->sm;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       if (tmr->cancel)
+               goto done;
+
+       if (sm->current_state_id == SCIC_STARTING)
+               sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
+       else if (sm->current_state_id == SCIC_STOPPING) {
+               sci_change_state(sm, SCIC_FAILED);
+               isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
+       } else  /* / @todo Now what do we want to do in this case? */
+               dev_err(&ihost->pdev->dev,
+                       "%s: Controller timer fired when controller was not "
+                       "in a state being timed.\n",
+                       __func__);
+
+done:
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static enum sci_status sci_controller_construct(struct isci_host *ihost,
+                                               void __iomem *scu_base,
+                                               void __iomem *smu_base)
+{
+       u8 i;
+
+       sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
+
+       ihost->scu_registers = scu_base;
+       ihost->smu_registers = smu_base;
+
+       sci_port_configuration_agent_construct(&ihost->port_agent);
+
+       /* Construct the ports for this controller */
+       for (i = 0; i < SCI_MAX_PORTS; i++)
+               sci_port_construct(&ihost->ports[i], i, ihost);
+       sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
+
+       /* Construct the phys for this controller */
+       for (i = 0; i < SCI_MAX_PHYS; i++) {
+               /* Add all the PHYs to the dummy port */
+               sci_phy_construct(&ihost->phys[i],
+                                 &ihost->ports[SCI_MAX_PORTS], i);
+       }
+
+       ihost->invalid_phy_mask = 0;
+
+       sci_init_timer(&ihost->timer, controller_timeout);
+
+       /* Initialize the User and OEM parameters to default values. */
+       sci_controller_set_default_config_parameters(ihost);
+
+       return sci_controller_reset(ihost);
+}
+
+int sci_oem_parameters_validate(struct sci_oem_params *oem)
+{
+       int i;
+
+       for (i = 0; i < SCI_MAX_PORTS; i++)
+               if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
+                       return -EINVAL;
+
+       for (i = 0; i < SCI_MAX_PHYS; i++)
+               if (oem->phys[i].sas_address.high == 0 &&
+                   oem->phys[i].sas_address.low == 0)
+                       return -EINVAL;
+
+       if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
+               for (i = 0; i < SCI_MAX_PHYS; i++)
+                       if (oem->ports[i].phy_mask != 0)
+                               return -EINVAL;
+       } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+               u8 phy_mask = 0;
+
+               for (i = 0; i < SCI_MAX_PHYS; i++)
+                       phy_mask |= oem->ports[i].phy_mask;
+
+               if (phy_mask == 0)
+                       return -EINVAL;
+       } else
+               return -EINVAL;
+
+       if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
+               return -EINVAL;
+
+       return 0;
+}
+
+static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
+{
+       u32 state = ihost->sm.current_state_id;
+
+       if (state == SCIC_RESET ||
+           state == SCIC_INITIALIZING ||
+           state == SCIC_INITIALIZED) {
+
+               if (sci_oem_parameters_validate(&ihost->oem_parameters))
+                       return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+               return SCI_SUCCESS;
+       }
+
+       return SCI_FAILURE_INVALID_STATE;
+}
+
+static void power_control_timeout(unsigned long data)
+{
+       struct sci_timer *tmr = (struct sci_timer *)data;
+       struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
+       struct isci_phy *iphy;
+       unsigned long flags;
+       u8 i;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       if (tmr->cancel)
+               goto done;
+
+       ihost->power_control.phys_granted_power = 0;
+
+       if (ihost->power_control.phys_waiting == 0) {
+               ihost->power_control.timer_started = false;
+               goto done;
+       }
+
+       for (i = 0; i < SCI_MAX_PHYS; i++) {
+
+               if (ihost->power_control.phys_waiting == 0)
+                       break;
+
+               iphy = ihost->power_control.requesters[i];
+               if (iphy == NULL)
+                       continue;
+
+               if (ihost->power_control.phys_granted_power >=
+                   ihost->oem_parameters.controller.max_concurrent_dev_spin_up)
+                       break;
+
+               ihost->power_control.requesters[i] = NULL;
+               ihost->power_control.phys_waiting--;
+               ihost->power_control.phys_granted_power++;
+               sci_phy_consume_power_handler(iphy);
+       }
+
+       /*
+        * It doesn't matter if the power list is empty, we need to start the
+        * timer in case another phy becomes ready.
+        */
+       sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+       ihost->power_control.timer_started = true;
+
+done:
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+void sci_controller_power_control_queue_insert(struct isci_host *ihost,
+                                              struct isci_phy *iphy)
+{
+       BUG_ON(iphy == NULL);
+
+       if (ihost->power_control.phys_granted_power <
+           ihost->oem_parameters.controller.max_concurrent_dev_spin_up) {
+               ihost->power_control.phys_granted_power++;
+               sci_phy_consume_power_handler(iphy);
+
+               /*
+                * stop and start the power_control timer. When the timer fires, the
+                * no_of_phys_granted_power will be set to 0
+                */
+               if (ihost->power_control.timer_started)
+                       sci_del_timer(&ihost->power_control.timer);
+
+               sci_mod_timer(&ihost->power_control.timer,
+                                SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+               ihost->power_control.timer_started = true;
+
+       } else {
+               /* Add the phy in the waiting list */
+               ihost->power_control.requesters[iphy->phy_index] = iphy;
+               ihost->power_control.phys_waiting++;
+       }
+}
+
+void sci_controller_power_control_queue_remove(struct isci_host *ihost,
+                                              struct isci_phy *iphy)
+{
+       BUG_ON(iphy == NULL);
+
+       if (ihost->power_control.requesters[iphy->phy_index])
+               ihost->power_control.phys_waiting--;
+
+       ihost->power_control.requesters[iphy->phy_index] = NULL;
+}
+
+#define AFE_REGISTER_WRITE_DELAY 10
+
+/* Initialize the AFE for this phy index. We need to read the AFE setup from
+ * the OEM parameters
+ */
+static void sci_controller_afe_initialization(struct isci_host *ihost)
+{
+       const struct sci_oem_params *oem = &ihost->oem_parameters;
+       struct pci_dev *pdev = ihost->pdev;
+       u32 afe_status;
+       u32 phy_id;
+
+       /* Clear DFX Status registers */
+       writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0);
+       udelay(AFE_REGISTER_WRITE_DELAY);
+
+       if (is_b0(pdev)) {
+               /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
+                * Timer, PM Stagger Timer */
+               writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+       }
+
+       /* Configure bias currents to normal */
+       if (is_a2(pdev))
+               writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control);
+       else if (is_b0(pdev) || is_c0(pdev))
+               writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control);
+
+       udelay(AFE_REGISTER_WRITE_DELAY);
+
+       /* Enable PLL */
+       if (is_b0(pdev) || is_c0(pdev))
+               writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0);
+       else
+               writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0);
+
+       udelay(AFE_REGISTER_WRITE_DELAY);
+
+       /* Wait for the PLL to lock */
+       do {
+               afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+       } while ((afe_status & 0x00001000) == 0);
+
+       if (is_a2(pdev)) {
+               /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
+               writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+       }
+
+       for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
+               const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
+
+               if (is_b0(pdev)) {
+                        /* Configure transmitter SSC parameters */
+                       writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+               } else if (is_c0(pdev)) {
+                        /* Configure transmitter SSC parameters */
+                       writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       /*
+                        * All defaults, except the Receive Word Alignament/Comma Detect
+                        * Enable....(0xe800) */
+                       writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+               } else {
+                       /*
+                        * All defaults, except the Receive Word Alignament/Comma Detect
+                        * Enable....(0xe800) */
+                       writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+               }
+
+               /*
+                * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
+                * & increase TX int & ext bias 20%....(0xe85c) */
+               if (is_a2(pdev))
+                       writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+               else if (is_b0(pdev)) {
+                        /* Power down TX and RX (PWRDNTX and PWRDNRX) */
+                       writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       /*
+                        * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
+                        * & increase TX int & ext bias 20%....(0xe85c) */
+                       writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+               } else {
+                       writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       /*
+                        * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
+                        * & increase TX int & ext bias 20%....(0xe85c) */
+                       writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+               }
+               udelay(AFE_REGISTER_WRITE_DELAY);
+
+               if (is_a2(pdev)) {
+                       /* Enable TX equalization (0xe824) */
+                       writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+               }
+
+               /*
+                * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
+                * RDD=0x0(RX Detect Enabled) ....(0xe800) */
+               writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+
+               /* Leave DFE/FFE on */
+               if (is_a2(pdev))
+                       writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+               else if (is_b0(pdev)) {
+                       writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+                       /* Enable TX equalization (0xe824) */
+                       writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+               } else {
+                       writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       /* Enable TX equalization (0xe824) */
+                       writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+               }
+
+               udelay(AFE_REGISTER_WRITE_DELAY);
+
+               writel(oem_phy->afe_tx_amp_control0,
+                       &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+
+               writel(oem_phy->afe_tx_amp_control1,
+                       &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+
+               writel(oem_phy->afe_tx_amp_control2,
+                       &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+
+               writel(oem_phy->afe_tx_amp_control3,
+                       &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+       }
+
+       /* Transfer control to the PEs */
+       writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0);
+       udelay(AFE_REGISTER_WRITE_DELAY);
+}
+
+static void sci_controller_initialize_power_control(struct isci_host *ihost)
+{
+       sci_init_timer(&ihost->power_control.timer, power_control_timeout);
+
+       memset(ihost->power_control.requesters, 0,
+              sizeof(ihost->power_control.requesters));
+
+       ihost->power_control.phys_waiting = 0;
+       ihost->power_control.phys_granted_power = 0;
+}
+
+static enum sci_status sci_controller_initialize(struct isci_host *ihost)
+{
+       struct sci_base_state_machine *sm = &ihost->sm;
+       enum sci_status result = SCI_FAILURE;
+       unsigned long i, state, val;
+
+       if (ihost->sm.current_state_id != SCIC_RESET) {
+               dev_warn(&ihost->pdev->dev,
+                        "SCIC Controller initialize operation requested "
+                        "in invalid state\n");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_change_state(sm, SCIC_INITIALIZING);
+
+       sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
+
+       ihost->next_phy_to_start = 0;
+       ihost->phy_startup_timer_pending = false;
+
+       sci_controller_initialize_power_control(ihost);
+
+       /*
+        * There is nothing to do here for B0 since we do not have to
+        * program the AFE registers.
+        * / @todo The AFE settings are supposed to be correct for the B0 but
+        * /       presently they seem to be wrong. */
+       sci_controller_afe_initialization(ihost);
+
+
+       /* Take the hardware out of reset */
+       writel(0, &ihost->smu_registers->soft_reset_control);
+
+       /*
+        * / @todo Provide meaningfull error code for hardware failure
+        * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
+       for (i = 100; i >= 1; i--) {
+               u32 status;
+
+               /* Loop until the hardware reports success */
+               udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
+               status = readl(&ihost->smu_registers->control_status);
+
+               if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
+                       break;
+       }
+       if (i == 0)
+               goto out;
+
+       /*
+        * Determine what are the actaul device capacities that the
+        * hardware will support */
+       val = readl(&ihost->smu_registers->device_context_capacity);
+
+       /* Record the smaller of the two capacity values */
+       ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
+       ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
+       ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
+
+       /*
+        * Make all PEs that are unassigned match up with the
+        * logical ports
+        */
+       for (i = 0; i < ihost->logical_port_entries; i++) {
+               struct scu_port_task_scheduler_group_registers __iomem
+                       *ptsg = &ihost->scu_registers->peg0.ptsg;
+
+               writel(i, &ptsg->protocol_engine[i]);
+       }
+
+       /* Initialize hardware PCI Relaxed ordering in DMA engines */
+       val = readl(&ihost->scu_registers->sdma.pdma_configuration);
+       val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
+       writel(val, &ihost->scu_registers->sdma.pdma_configuration);
+
+       val = readl(&ihost->scu_registers->sdma.cdma_configuration);
+       val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
+       writel(val, &ihost->scu_registers->sdma.cdma_configuration);
+
+       /*
+        * Initialize the PHYs before the PORTs because the PHY registers
+        * are accessed during the port initialization.
+        */
+       for (i = 0; i < SCI_MAX_PHYS; i++) {
+               result = sci_phy_initialize(&ihost->phys[i],
+                                           &ihost->scu_registers->peg0.pe[i].tl,
+                                           &ihost->scu_registers->peg0.pe[i].ll);
+               if (result != SCI_SUCCESS)
+                       goto out;
+       }
+
+       for (i = 0; i < ihost->logical_port_entries; i++) {
+               struct isci_port *iport = &ihost->ports[i];
+
+               iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
+               iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
+               iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
+       }
+
+       result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
+
+ out:
+       /* Advance the controller state machine */
+       if (result == SCI_SUCCESS)
+               state = SCIC_INITIALIZED;
+       else
+               state = SCIC_FAILED;
+       sci_change_state(sm, state);
+
+       return result;
+}
+
+static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
+                                              struct sci_user_parameters *sci_parms)
+{
+       u32 state = ihost->sm.current_state_id;
+
+       if (state == SCIC_RESET ||
+           state == SCIC_INITIALIZING ||
+           state == SCIC_INITIALIZED) {
+               u16 index;
+
+               /*
+                * Validate the user parameters.  If they are not legal, then
+                * return a failure.
+                */
+               for (index = 0; index < SCI_MAX_PHYS; index++) {
+                       struct sci_phy_user_params *user_phy;
+
+                       user_phy = &sci_parms->phys[index];
+
+                       if (!((user_phy->max_speed_generation <=
+                                               SCIC_SDS_PARM_MAX_SPEED) &&
+                             (user_phy->max_speed_generation >
+                                               SCIC_SDS_PARM_NO_SPEED)))
+                               return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+                       if (user_phy->in_connection_align_insertion_frequency <
+                                       3)
+                               return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+                       if ((user_phy->in_connection_align_insertion_frequency <
+                                               3) ||
+                           (user_phy->align_insertion_frequency == 0) ||
+                           (user_phy->
+                               notify_enable_spin_up_insertion_frequency ==
+                                               0))
+                               return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+               }
+
+               if ((sci_parms->stp_inactivity_timeout == 0) ||
+                   (sci_parms->ssp_inactivity_timeout == 0) ||
+                   (sci_parms->stp_max_occupancy_timeout == 0) ||
+                   (sci_parms->ssp_max_occupancy_timeout == 0) ||
+                   (sci_parms->no_outbound_task_timeout == 0))
+                       return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+               memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
+
+               return SCI_SUCCESS;
+       }
+
+       return SCI_FAILURE_INVALID_STATE;
+}
+
+static int sci_controller_mem_init(struct isci_host *ihost)
+{
+       struct device *dev = &ihost->pdev->dev;
+       dma_addr_t dma;
+       size_t size;
+       int err;
+
+       size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
+       ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
+       if (!ihost->completion_queue)
+               return -ENOMEM;
+
+       writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower);
+       writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper);
+
+       size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
+       ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma,
+                                                              GFP_KERNEL);
+       if (!ihost->remote_node_context_table)
+               return -ENOMEM;
+
+       writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower);
+       writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper);
+
+       size = ihost->task_context_entries * sizeof(struct scu_task_context),
+       ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
+       if (!ihost->task_context_table)
+               return -ENOMEM;
+
+       ihost->task_context_dma = dma;
+       writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower);
+       writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper);
+
+       err = sci_unsolicited_frame_control_construct(ihost);
+       if (err)
+               return err;
+
+       /*
+        * Inform the silicon as to the location of the UF headers and
+        * address table.
+        */
+       writel(lower_32_bits(ihost->uf_control.headers.physical_address),
+               &ihost->scu_registers->sdma.uf_header_base_address_lower);
+       writel(upper_32_bits(ihost->uf_control.headers.physical_address),
+               &ihost->scu_registers->sdma.uf_header_base_address_upper);
+
+       writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
+               &ihost->scu_registers->sdma.uf_address_table_lower);
+       writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
+               &ihost->scu_registers->sdma.uf_address_table_upper);
+
+       return 0;
+}
+
+int isci_host_init(struct isci_host *ihost)
+{
+       int err = 0, i;
+       enum sci_status status;
+       struct sci_user_parameters sci_user_params;
+       struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
+
+       spin_lock_init(&ihost->state_lock);
+       spin_lock_init(&ihost->scic_lock);
+       init_waitqueue_head(&ihost->eventq);
+
+       isci_host_change_state(ihost, isci_starting);
+
+       status = sci_controller_construct(ihost, scu_base(ihost),
+                                         smu_base(ihost));
+
+       if (status != SCI_SUCCESS) {
+               dev_err(&ihost->pdev->dev,
+                       "%s: sci_controller_construct failed - status = %x\n",
+                       __func__,
+                       status);
+               return -ENODEV;
+       }
+
+       ihost->sas_ha.dev = &ihost->pdev->dev;
+       ihost->sas_ha.lldd_ha = ihost;
+
+       /*
+        * grab initial values stored in the controller object for OEM and USER
+        * parameters
+        */
+       isci_user_parameters_get(&sci_user_params);
+       status = sci_user_parameters_set(ihost, &sci_user_params);
+       if (status != SCI_SUCCESS) {
+               dev_warn(&ihost->pdev->dev,
+                        "%s: sci_user_parameters_set failed\n",
+                        __func__);
+               return -ENODEV;
+       }
+
+       /* grab any OEM parameters specified in orom */
+       if (pci_info->orom) {
+               status = isci_parse_oem_parameters(&ihost->oem_parameters,
+                                                  pci_info->orom,
+                                                  ihost->id);
+               if (status != SCI_SUCCESS) {
+                       dev_warn(&ihost->pdev->dev,
+                                "parsing firmware oem parameters failed\n");
+                       return -EINVAL;
+               }
+       }
+
+       status = sci_oem_parameters_set(ihost);
+       if (status != SCI_SUCCESS) {
+               dev_warn(&ihost->pdev->dev,
+                               "%s: sci_oem_parameters_set failed\n",
+                               __func__);
+               return -ENODEV;
+       }
+
+       tasklet_init(&ihost->completion_tasklet,
+                    isci_host_completion_routine, (unsigned long)ihost);
+
+       INIT_LIST_HEAD(&ihost->requests_to_complete);
+       INIT_LIST_HEAD(&ihost->requests_to_errorback);
+
+       spin_lock_irq(&ihost->scic_lock);
+       status = sci_controller_initialize(ihost);
+       spin_unlock_irq(&ihost->scic_lock);
+       if (status != SCI_SUCCESS) {
+               dev_warn(&ihost->pdev->dev,
+                        "%s: sci_controller_initialize failed -"
+                        " status = 0x%x\n",
+                        __func__, status);
+               return -ENODEV;
+       }
+
+       err = sci_controller_mem_init(ihost);
+       if (err)
+               return err;
+
+       for (i = 0; i < SCI_MAX_PORTS; i++)
+               isci_port_init(&ihost->ports[i], ihost, i);
+
+       for (i = 0; i < SCI_MAX_PHYS; i++)
+               isci_phy_init(&ihost->phys[i], ihost, i);
+
+       for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
+               struct isci_remote_device *idev = &ihost->devices[i];
+
+               INIT_LIST_HEAD(&idev->reqs_in_process);
+               INIT_LIST_HEAD(&idev->node);
+       }
+
+       for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
+               struct isci_request *ireq;
+               dma_addr_t dma;
+
+               ireq = dmam_alloc_coherent(&ihost->pdev->dev,
+                                          sizeof(struct isci_request), &dma,
+                                          GFP_KERNEL);
+               if (!ireq)
+                       return -ENOMEM;
+
+               ireq->tc = &ihost->task_context_table[i];
+               ireq->owning_controller = ihost;
+               spin_lock_init(&ireq->state_lock);
+               ireq->request_daddr = dma;
+               ireq->isci_host = ihost;
+               ihost->reqs[i] = ireq;
+       }
+
+       return 0;
+}
+
+void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
+                           struct isci_phy *iphy)
+{
+       switch (ihost->sm.current_state_id) {
+       case SCIC_STARTING:
+               sci_del_timer(&ihost->phy_timer);
+               ihost->phy_startup_timer_pending = false;
+               ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
+                                                 iport, iphy);
+               sci_controller_start_next_phy(ihost);
+               break;
+       case SCIC_READY:
+               ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
+                                                 iport, iphy);
+               break;
+       default:
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: SCIC Controller linkup event from phy %d in "
+                       "unexpected state %d\n", __func__, iphy->phy_index,
+                       ihost->sm.current_state_id);
+       }
+}
+
+void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
+                             struct isci_phy *iphy)
+{
+       switch (ihost->sm.current_state_id) {
+       case SCIC_STARTING:
+       case SCIC_READY:
+               ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
+                                                  iport, iphy);
+               break;
+       default:
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: SCIC Controller linkdown event from phy %d in "
+                       "unexpected state %d\n",
+                       __func__,
+                       iphy->phy_index,
+                       ihost->sm.current_state_id);
+       }
+}
+
+static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
+{
+       u32 index;
+
+       for (index = 0; index < ihost->remote_node_entries; index++) {
+               if ((ihost->device_table[index] != NULL) &&
+                  (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
+                       return true;
+       }
+
+       return false;
+}
+
+void sci_controller_remote_device_stopped(struct isci_host *ihost,
+                                         struct isci_remote_device *idev)
+{
+       if (ihost->sm.current_state_id != SCIC_STOPPING) {
+               dev_dbg(&ihost->pdev->dev,
+                       "SCIC Controller 0x%p remote device stopped event "
+                       "from device 0x%p in unexpected state %d\n",
+                       ihost, idev,
+                       ihost->sm.current_state_id);
+               return;
+       }
+
+       if (!sci_controller_has_remote_devices_stopping(ihost))
+               sci_change_state(&ihost->sm, SCIC_STOPPED);
+}
+
+void sci_controller_post_request(struct isci_host *ihost, u32 request)
+{
+       dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
+               __func__, ihost->id, request);
+
+       writel(request, &ihost->smu_registers->post_context_port);
+}
+
+struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
+{
+       u16 task_index;
+       u16 task_sequence;
+
+       task_index = ISCI_TAG_TCI(io_tag);
+
+       if (task_index < ihost->task_context_entries) {
+               struct isci_request *ireq = ihost->reqs[task_index];
+
+               if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
+                       task_sequence = ISCI_TAG_SEQ(io_tag);
+
+                       if (task_sequence == ihost->io_request_sequence[task_index])
+                               return ireq;
+               }
+       }
+
+       return NULL;
+}
+
+/**
+ * This method allocates remote node index and the reserves the remote node
+ *    context space for use. This method can fail if there are no more remote
+ *    node index available.
+ * @scic: This is the controller object which contains the set of
+ *    free remote node ids
+ * @sci_dev: This is the device object which is requesting the a remote node
+ *    id
+ * @node_id: This is the remote node id that is assinged to the device if one
+ *    is available
+ *
+ * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
+ * node index available.
+ */
+enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
+                                                           struct isci_remote_device *idev,
+                                                           u16 *node_id)
+{
+       u16 node_index;
+       u32 remote_node_count = sci_remote_device_node_count(idev);
+
+       node_index = sci_remote_node_table_allocate_remote_node(
+               &ihost->available_remote_nodes, remote_node_count
+               );
+
+       if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+               ihost->device_table[node_index] = idev;
+
+               *node_id = node_index;
+
+               return SCI_SUCCESS;
+       }
+
+       return SCI_FAILURE_INSUFFICIENT_RESOURCES;
+}
+
+void sci_controller_free_remote_node_context(struct isci_host *ihost,
+                                            struct isci_remote_device *idev,
+                                            u16 node_id)
+{
+       u32 remote_node_count = sci_remote_device_node_count(idev);
+
+       if (ihost->device_table[node_id] == idev) {
+               ihost->device_table[node_id] = NULL;
+
+               sci_remote_node_table_release_remote_node_index(
+                       &ihost->available_remote_nodes, remote_node_count, node_id
+                       );
+       }
+}
+
+void sci_controller_copy_sata_response(void *response_buffer,
+                                      void *frame_header,
+                                      void *frame_buffer)
+{
+       /* XXX type safety? */
+       memcpy(response_buffer, frame_header, sizeof(u32));
+
+       memcpy(response_buffer + sizeof(u32),
+              frame_buffer,
+              sizeof(struct dev_to_host_fis) - sizeof(u32));
+}
+
+void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
+{
+       if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
+               writel(ihost->uf_control.get,
+                       &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+}
+
+void isci_tci_free(struct isci_host *ihost, u16 tci)
+{
+       u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
+
+       ihost->tci_pool[tail] = tci;
+       ihost->tci_tail = tail + 1;
+}
+
+static u16 isci_tci_alloc(struct isci_host *ihost)
+{
+       u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
+       u16 tci = ihost->tci_pool[head];
+
+       ihost->tci_head = head + 1;
+       return tci;
+}
+
+static u16 isci_tci_space(struct isci_host *ihost)
+{
+       return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
+}
+
+u16 isci_alloc_tag(struct isci_host *ihost)
+{
+       if (isci_tci_space(ihost)) {
+               u16 tci = isci_tci_alloc(ihost);
+               u8 seq = ihost->io_request_sequence[tci];
+
+               return ISCI_TAG(seq, tci);
+       }
+
+       return SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
+{
+       u16 tci = ISCI_TAG_TCI(io_tag);
+       u16 seq = ISCI_TAG_SEQ(io_tag);
+
+       /* prevent tail from passing head */
+       if (isci_tci_active(ihost) == 0)
+               return SCI_FAILURE_INVALID_IO_TAG;
+
+       if (seq == ihost->io_request_sequence[tci]) {
+               ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
+
+               isci_tci_free(ihost, tci);
+
+               return SCI_SUCCESS;
+       }
+       return SCI_FAILURE_INVALID_IO_TAG;
+}
+
+enum sci_status sci_controller_start_io(struct isci_host *ihost,
+                                       struct isci_remote_device *idev,
+                                       struct isci_request *ireq)
+{
+       enum sci_status status;
+
+       if (ihost->sm.current_state_id != SCIC_READY) {
+               dev_warn(&ihost->pdev->dev, "invalid state to start I/O");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       status = sci_remote_device_start_io(ihost, idev, ireq);
+       if (status != SCI_SUCCESS)
+               return status;
+
+       set_bit(IREQ_ACTIVE, &ireq->flags);
+       sci_controller_post_request(ihost, ireq->post_context);
+       return SCI_SUCCESS;
+}
+
+enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
+                                                struct isci_remote_device *idev,
+                                                struct isci_request *ireq)
+{
+       /* terminate an ongoing (i.e. started) core IO request.  This does not
+        * abort the IO request at the target, but rather removes the IO
+        * request from the host controller.
+        */
+       enum sci_status status;
+
+       if (ihost->sm.current_state_id != SCIC_READY) {
+               dev_warn(&ihost->pdev->dev,
+                        "invalid state to terminate request\n");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       status = sci_io_request_terminate(ireq);
+       if (status != SCI_SUCCESS)
+               return status;
+
+       /*
+        * Utilize the original post context command and or in the POST_TC_ABORT
+        * request sub-type.
+        */
+       sci_controller_post_request(ihost,
+                                   ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
+       return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_complete_io() - This method will perform core specific
+ *    completion operations for an IO request.  After this method is invoked,
+ *    the user should consider the IO request as invalid until it is properly
+ *    reused (i.e. re-constructed).
+ * @ihost: The handle to the controller object for which to complete the
+ *    IO request.
+ * @idev: The handle to the remote device object for which to complete
+ *    the IO request.
+ * @ireq: the handle to the io request object to complete.
+ */
+enum sci_status sci_controller_complete_io(struct isci_host *ihost,
+                                          struct isci_remote_device *idev,
+                                          struct isci_request *ireq)
+{
+       enum sci_status status;
+       u16 index;
+
+       switch (ihost->sm.current_state_id) {
+       case SCIC_STOPPING:
+               /* XXX: Implement this function */
+               return SCI_FAILURE;
+       case SCIC_READY:
+               status = sci_remote_device_complete_io(ihost, idev, ireq);
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               index = ISCI_TAG_TCI(ireq->io_tag);
+               clear_bit(IREQ_ACTIVE, &ireq->flags);
+               return SCI_SUCCESS;
+       default:
+               dev_warn(&ihost->pdev->dev, "invalid state to complete I/O");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+}
+
+enum sci_status sci_controller_continue_io(struct isci_request *ireq)
+{
+       struct isci_host *ihost = ireq->owning_controller;
+
+       if (ihost->sm.current_state_id != SCIC_READY) {
+               dev_warn(&ihost->pdev->dev, "invalid state to continue I/O");
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       set_bit(IREQ_ACTIVE, &ireq->flags);
+       sci_controller_post_request(ihost, ireq->post_context);
+       return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_start_task() - This method is called by the SCIC user to
+ *    send/start a framework task management request.
+ * @controller: the handle to the controller object for which to start the task
+ *    management request.
+ * @remote_device: the handle to the remote device object for which to start
+ *    the task management request.
+ * @task_request: the handle to the task request object to start.
+ */
+enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
+                                              struct isci_remote_device *idev,
+                                              struct isci_request *ireq)
+{
+       enum sci_status status;
+
+       if (ihost->sm.current_state_id != SCIC_READY) {
+               dev_warn(&ihost->pdev->dev,
+                        "%s: SCIC Controller starting task from invalid "
+                        "state\n",
+                        __func__);
+               return SCI_TASK_FAILURE_INVALID_STATE;
+       }
+
+       status = sci_remote_device_start_task(ihost, idev, ireq);
+       switch (status) {
+       case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
+               set_bit(IREQ_ACTIVE, &ireq->flags);
+
+               /*
+                * We will let framework know this task request started successfully,
+                * although core is still woring on starting the request (to post tc when
+                * RNC is resumed.)
+                */
+               return SCI_SUCCESS;
+       case SCI_SUCCESS:
+               set_bit(IREQ_ACTIVE, &ireq->flags);
+               sci_controller_post_request(ihost, ireq->post_context);
+               break;
+       default:
+               break;
+       }
+
+       return status;
+}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
new file mode 100644 (file)
index 0000000..062101a
--- /dev/null
@@ -0,0 +1,542 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _SCI_HOST_H_
+#define _SCI_HOST_H_
+
+#include "remote_device.h"
+#include "phy.h"
+#include "isci.h"
+#include "remote_node_table.h"
+#include "registers.h"
+#include "unsolicited_frame_control.h"
+#include "probe_roms.h"
+
+struct isci_request;
+struct scu_task_context;
+
+
+/**
+ * struct sci_power_control -
+ *
+ * This structure defines the fields for managing power control for direct
+ * attached disk devices.
+ */
+struct sci_power_control {
+       /**
+        * This field is set when the power control timer is running and cleared when
+        * it is not.
+        */
+       bool timer_started;
+
+       /**
+        * Timer to control when the directed attached disks can consume power.
+        */
+       struct sci_timer timer;
+
+       /**
+        * This field is used to keep track of how many phys are put into the
+        * requesters field.
+        */
+       u8 phys_waiting;
+
+       /**
+        * This field is used to keep track of how many phys have been granted to consume power
+        */
+       u8 phys_granted_power;
+
+       /**
+        * This field is an array of phys that we are waiting on. The phys are direct
+        * mapped into requesters via struct sci_phy.phy_index
+        */
+       struct isci_phy *requesters[SCI_MAX_PHYS];
+
+};
+
+struct sci_port_configuration_agent;
+typedef void (*port_config_fn)(struct isci_host *,
+                              struct sci_port_configuration_agent *,
+                              struct isci_port *, struct isci_phy *);
+
+struct sci_port_configuration_agent {
+       u16 phy_configured_mask;
+       u16 phy_ready_mask;
+       struct {
+               u8 min_index;
+               u8 max_index;
+       } phy_valid_port_range[SCI_MAX_PHYS];
+       bool timer_pending;
+       port_config_fn link_up_handler;
+       port_config_fn link_down_handler;
+       struct sci_timer        timer;
+};
+
+/**
+ * isci_host - primary host/controller object
+ * @timer: timeout start/stop operations
+ * @device_table: rni (hw remote node index) to remote device lookup table
+ * @available_remote_nodes: rni allocator
+ * @power_control: manage device spin up
+ * @io_request_sequence: generation number for tci's (task contexts)
+ * @task_context_table: hw task context table
+ * @remote_node_context_table: hw remote node context table
+ * @completion_queue: hw-producer driver-consumer communication ring
+ * @completion_queue_get: tracks the driver 'head' of the ring to notify hw
+ * @logical_port_entries: min({driver|silicon}-supported-port-count)
+ * @remote_node_entries: min({driver|silicon}-supported-node-count)
+ * @task_context_entries: min({driver|silicon}-supported-task-count)
+ * @phy_timer: phy startup timer
+ * @invalid_phy_mask: if an invalid_link_up notification is reported a bit for
+ *                   the phy index is set so further notifications are not
+ *                   made.  Once the phy reports link up and is made part of a
+ *                   port then this bit is cleared.
+
+ */
+struct isci_host {
+       struct sci_base_state_machine sm;
+       /* XXX can we time this externally */
+       struct sci_timer timer;
+       /* XXX drop reference module params directly */
+       struct sci_user_parameters user_parameters;
+       /* XXX no need to be a union */
+       struct sci_oem_params oem_parameters;
+       struct sci_port_configuration_agent port_agent;
+       struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES];
+       struct sci_remote_node_table available_remote_nodes;
+       struct sci_power_control power_control;
+       u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
+       struct scu_task_context *task_context_table;
+       dma_addr_t task_context_dma;
+       union scu_remote_node_context *remote_node_context_table;
+       u32 *completion_queue;
+       u32 completion_queue_get;
+       u32 logical_port_entries;
+       u32 remote_node_entries;
+       u32 task_context_entries;
+       struct sci_unsolicited_frame_control uf_control;
+
+       /* phy startup */
+       struct sci_timer phy_timer;
+       /* XXX kill */
+       bool phy_startup_timer_pending;
+       u32 next_phy_to_start;
+       /* XXX convert to unsigned long and use bitops */
+       u8 invalid_phy_mask;
+
+       /* TODO attempt dynamic interrupt coalescing scheme */
+       u16 interrupt_coalesce_number;
+       u32 interrupt_coalesce_timeout;
+       struct smu_registers __iomem *smu_registers;
+       struct scu_registers __iomem *scu_registers;
+
+       u16 tci_head;
+       u16 tci_tail;
+       u16 tci_pool[SCI_MAX_IO_REQUESTS];
+
+       int id; /* unique within a given pci device */
+       struct isci_phy phys[SCI_MAX_PHYS];
+       struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
+       struct sas_ha_struct sas_ha;
+
+       spinlock_t state_lock;
+       struct pci_dev *pdev;
+       enum isci_status status;
+       #define IHOST_START_PENDING 0
+       #define IHOST_STOP_PENDING 1
+       unsigned long flags;
+       wait_queue_head_t eventq;
+       struct Scsi_Host *shost;
+       struct tasklet_struct completion_tasklet;
+       struct list_head requests_to_complete;
+       struct list_head requests_to_errorback;
+       spinlock_t scic_lock;
+       struct isci_request *reqs[SCI_MAX_IO_REQUESTS];
+       struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
+};
+
+/**
+ * enum sci_controller_states - This enumeration depicts all the states
+ *    for the common controller state machine.
+ */
+enum sci_controller_states {
+       /**
+        * Simply the initial state for the base controller state machine.
+        */
+       SCIC_INITIAL = 0,
+
+       /**
+        * This state indicates that the controller is reset.  The memory for
+        * the controller is in it's initial state, but the controller requires
+        * initialization.
+        * This state is entered from the INITIAL state.
+        * This state is entered from the RESETTING state.
+        */
+       SCIC_RESET,
+
+       /**
+        * This state is typically an action state that indicates the controller
+        * is in the process of initialization.  In this state no new IO operations
+        * are permitted.
+        * This state is entered from the RESET state.
+        */
+       SCIC_INITIALIZING,
+
+       /**
+        * This state indicates that the controller has been successfully
+        * initialized.  In this state no new IO operations are permitted.
+        * This state is entered from the INITIALIZING state.
+        */
+       SCIC_INITIALIZED,
+
+       /**
+        * This state indicates the the controller is in the process of becoming
+        * ready (i.e. starting).  In this state no new IO operations are permitted.
+        * This state is entered from the INITIALIZED state.
+        */
+       SCIC_STARTING,
+
+       /**
+        * This state indicates the controller is now ready.  Thus, the user
+        * is able to perform IO operations on the controller.
+        * This state is entered from the STARTING state.
+        */
+       SCIC_READY,
+
+       /**
+        * This state is typically an action state that indicates the controller
+        * is in the process of resetting.  Thus, the user is unable to perform
+        * IO operations on the controller.  A reset is considered destructive in
+        * most cases.
+        * This state is entered from the READY state.
+        * This state is entered from the FAILED state.
+        * This state is entered from the STOPPED state.
+        */
+       SCIC_RESETTING,
+
+       /**
+        * This state indicates that the controller is in the process of stopping.
+        * In this state no new IO operations are permitted, but existing IO
+        * operations are allowed to complete.
+        * This state is entered from the READY state.
+        */
+       SCIC_STOPPING,
+
+       /**
+        * This state indicates that the controller has successfully been stopped.
+        * In this state no new IO operations are permitted.
+        * This state is entered from the STOPPING state.
+        */
+       SCIC_STOPPED,
+
+       /**
+        * This state indicates that the controller could not successfully be
+        * initialized.  In this state no new IO operations are permitted.
+        * This state is entered from the INITIALIZING state.
+        * This state is entered from the STARTING state.
+        * This state is entered from the STOPPING state.
+        * This state is entered from the RESETTING state.
+        */
+       SCIC_FAILED,
+};
+
+/**
+ * struct isci_pci_info - This class represents the pci function containing the
+ *    controllers. Depending on PCI SKU, there could be up to 2 controllers in
+ *    the PCI function.
+ */
+#define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
+
+struct isci_pci_info {
+       struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
+       struct isci_host *hosts[SCI_MAX_CONTROLLERS];
+       struct isci_orom *orom;
+};
+
+static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
+{
+       return pci_get_drvdata(pdev);
+}
+
+#define for_each_isci_host(id, ihost, pdev) \
+       for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
+            id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
+            ihost = to_pci_info(pdev)->hosts[++id])
+
+static inline enum isci_status isci_host_get_state(struct isci_host *isci_host)
+{
+       return isci_host->status;
+}
+
+static inline void isci_host_change_state(struct isci_host *isci_host,
+                                         enum isci_status status)
+{
+       unsigned long flags;
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_host = %p, state = 0x%x",
+               __func__,
+               isci_host,
+               status);
+       spin_lock_irqsave(&isci_host->state_lock, flags);
+       isci_host->status = status;
+       spin_unlock_irqrestore(&isci_host->state_lock, flags);
+
+}
+
+static inline void wait_for_start(struct isci_host *ihost)
+{
+       wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
+}
+
+static inline void wait_for_stop(struct isci_host *ihost)
+{
+       wait_event(ihost->eventq, !test_bit(IHOST_STOP_PENDING, &ihost->flags));
+}
+
+static inline void wait_for_device_start(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+       wait_event(ihost->eventq, !test_bit(IDEV_START_PENDING, &idev->flags));
+}
+
+static inline void wait_for_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+       wait_event(ihost->eventq, !test_bit(IDEV_STOP_PENDING, &idev->flags));
+}
+
+static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
+{
+       return dev->port->ha->lldd_ha;
+}
+
+/* we always use protocol engine group zero */
+#define ISCI_PEG 0
+
+/* see sci_controller_io_tag_allocate|free for how seq and tci are built */
+#define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci)
+
+/* these are returned by the hardware, so sanitize them */
+#define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))
+#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
+
+/* expander attached sata devices require 3 rnc slots */
+static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
+{
+       struct domain_device *dev = idev->domain_dev;
+
+       if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
+           !idev->is_direct_attached)
+               return SCU_STP_REMOTE_NODE_COUNT;
+       return SCU_SSP_REMOTE_NODE_COUNT;
+}
+
+/**
+ * sci_controller_clear_invalid_phy() -
+ *
+ * This macro will clear the bit in the invalid phy mask for this controller
+ * object.  This is used to control messages reported for invalid link up
+ * notifications.
+ */
+#define sci_controller_clear_invalid_phy(controller, phy) \
+       ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
+
+static inline struct device *sciphy_to_dev(struct isci_phy *iphy)
+{
+
+       if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host)
+               return NULL;
+
+       return &iphy->isci_port->isci_host->pdev->dev;
+}
+
+static inline struct device *sciport_to_dev(struct isci_port *iport)
+{
+
+       if (!iport || !iport->isci_host)
+               return NULL;
+
+       return &iport->isci_host->pdev->dev;
+}
+
+static inline struct device *scirdev_to_dev(struct isci_remote_device *idev)
+{
+       if (!idev || !idev->isci_port || !idev->isci_port->isci_host)
+               return NULL;
+
+       return &idev->isci_port->isci_host->pdev->dev;
+}
+
+static inline bool is_a2(struct pci_dev *pdev)
+{
+       if (pdev->revision < 4)
+               return true;
+       return false;
+}
+
+static inline bool is_b0(struct pci_dev *pdev)
+{
+       if (pdev->revision == 4)
+               return true;
+       return false;
+}
+
+static inline bool is_c0(struct pci_dev *pdev)
+{
+       if (pdev->revision >= 5)
+               return true;
+       return false;
+}
+
+void sci_controller_post_request(struct isci_host *ihost,
+                                     u32 request);
+void sci_controller_release_frame(struct isci_host *ihost,
+                                      u32 frame_index);
+void sci_controller_copy_sata_response(void *response_buffer,
+                                           void *frame_header,
+                                           void *frame_buffer);
+enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
+                                                                struct isci_remote_device *idev,
+                                                                u16 *node_id);
+void sci_controller_free_remote_node_context(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       u16 node_id);
+
+struct isci_request *sci_request_by_tag(struct isci_host *ihost,
+                                            u16 io_tag);
+
+void sci_controller_power_control_queue_insert(
+       struct isci_host *ihost,
+       struct isci_phy *iphy);
+
+void sci_controller_power_control_queue_remove(
+       struct isci_host *ihost,
+       struct isci_phy *iphy);
+
+void sci_controller_link_up(
+       struct isci_host *ihost,
+       struct isci_port *iport,
+       struct isci_phy *iphy);
+
+void sci_controller_link_down(
+       struct isci_host *ihost,
+       struct isci_port *iport,
+       struct isci_phy *iphy);
+
+void sci_controller_remote_device_stopped(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev);
+
+void sci_controller_copy_task_context(
+       struct isci_host *ihost,
+       struct isci_request *ireq);
+
+void sci_controller_register_setup(struct isci_host *ihost);
+
+enum sci_status sci_controller_continue_io(struct isci_request *ireq);
+int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
+void isci_host_scan_start(struct Scsi_Host *);
+u16 isci_alloc_tag(struct isci_host *ihost);
+enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
+void isci_tci_free(struct isci_host *ihost, u16 tci);
+
+int isci_host_init(struct isci_host *);
+
+void isci_host_init_controller_names(
+       struct isci_host *isci_host,
+       unsigned int controller_idx);
+
+void isci_host_deinit(
+       struct isci_host *);
+
+void isci_host_port_link_up(
+       struct isci_host *,
+       struct isci_port *,
+       struct isci_phy *);
+int isci_host_dev_found(struct domain_device *);
+
+void isci_host_remote_device_start_complete(
+       struct isci_host *,
+       struct isci_remote_device *,
+       enum sci_status);
+
+void sci_controller_disable_interrupts(
+       struct isci_host *ihost);
+
+enum sci_status sci_controller_start_io(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+enum sci_task_status sci_controller_start_task(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+enum sci_status sci_controller_terminate_request(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+enum sci_status sci_controller_complete_io(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+void sci_port_configuration_agent_construct(
+       struct sci_port_configuration_agent *port_agent);
+
+enum sci_status sci_port_configuration_agent_initialize(
+       struct isci_host *ihost,
+       struct sci_port_configuration_agent *port_agent);
+#endif
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
new file mode 100644 (file)
index 0000000..61e0d09
--- /dev/null
@@ -0,0 +1,565 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/efi.h>
+#include <asm/string.h>
+#include "isci.h"
+#include "task.h"
+#include "probe_roms.h"
+
+static struct scsi_transport_template *isci_transport_template;
+
+static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = {
+       { PCI_VDEVICE(INTEL, 0x1D61),},
+       { PCI_VDEVICE(INTEL, 0x1D63),},
+       { PCI_VDEVICE(INTEL, 0x1D65),},
+       { PCI_VDEVICE(INTEL, 0x1D67),},
+       { PCI_VDEVICE(INTEL, 0x1D69),},
+       { PCI_VDEVICE(INTEL, 0x1D6B),},
+       { PCI_VDEVICE(INTEL, 0x1D60),},
+       { PCI_VDEVICE(INTEL, 0x1D62),},
+       { PCI_VDEVICE(INTEL, 0x1D64),},
+       { PCI_VDEVICE(INTEL, 0x1D66),},
+       { PCI_VDEVICE(INTEL, 0x1D68),},
+       { PCI_VDEVICE(INTEL, 0x1D6A),},
+       {}
+};
+
+MODULE_DEVICE_TABLE(pci, isci_id_table);
+
+/* linux isci specific settings */
+
+unsigned char no_outbound_task_to = 20;
+module_param(no_outbound_task_to, byte, 0);
+MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
+
+u16 ssp_max_occ_to = 20;
+module_param(ssp_max_occ_to, ushort, 0);
+MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)");
+
+u16 stp_max_occ_to = 5;
+module_param(stp_max_occ_to, ushort, 0);
+MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)");
+
+u16 ssp_inactive_to = 5;
+module_param(ssp_inactive_to, ushort, 0);
+MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)");
+
+u16 stp_inactive_to = 5;
+module_param(stp_inactive_to, ushort, 0);
+MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
+
+unsigned char phy_gen = 3;
+module_param(phy_gen, byte, 0);
+MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
+
+unsigned char max_concurr_spinup = 1;
+module_param(max_concurr_spinup, byte, 0);
+MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
+
+static struct scsi_host_template isci_sht = {
+
+       .module                         = THIS_MODULE,
+       .name                           = DRV_NAME,
+       .proc_name                      = DRV_NAME,
+       .queuecommand                   = sas_queuecommand,
+       .target_alloc                   = sas_target_alloc,
+       .slave_configure                = sas_slave_configure,
+       .slave_destroy                  = sas_slave_destroy,
+       .scan_finished                  = isci_host_scan_finished,
+       .scan_start                     = isci_host_scan_start,
+       .change_queue_depth             = sas_change_queue_depth,
+       .change_queue_type              = sas_change_queue_type,
+       .bios_param                     = sas_bios_param,
+       .can_queue                      = ISCI_CAN_QUEUE_VAL,
+       .cmd_per_lun                    = 1,
+       .this_id                        = -1,
+       .sg_tablesize                   = SG_ALL,
+       .max_sectors                    = SCSI_DEFAULT_MAX_SECTORS,
+       .use_clustering                 = ENABLE_CLUSTERING,
+       .eh_device_reset_handler        = sas_eh_device_reset_handler,
+       .eh_bus_reset_handler           = isci_bus_reset_handler,
+       .slave_alloc                    = sas_slave_alloc,
+       .target_destroy                 = sas_target_destroy,
+       .ioctl                          = sas_ioctl,
+};
+
+static struct sas_domain_function_template isci_transport_ops  = {
+
+       /* The class calls these to notify the LLDD of an event. */
+       .lldd_port_formed       = isci_port_formed,
+       .lldd_port_deformed     = isci_port_deformed,
+
+       /* The class calls these when a device is found or gone. */
+       .lldd_dev_found         = isci_remote_device_found,
+       .lldd_dev_gone          = isci_remote_device_gone,
+
+       .lldd_execute_task      = isci_task_execute_task,
+       /* Task Management Functions. Must be called from process context. */
+       .lldd_abort_task        = isci_task_abort_task,
+       .lldd_abort_task_set    = isci_task_abort_task_set,
+       .lldd_clear_aca         = isci_task_clear_aca,
+       .lldd_clear_task_set    = isci_task_clear_task_set,
+       .lldd_I_T_nexus_reset   = isci_task_I_T_nexus_reset,
+       .lldd_lu_reset          = isci_task_lu_reset,
+       .lldd_query_task        = isci_task_query_task,
+
+       /* Port and Adapter management */
+       .lldd_clear_nexus_port  = isci_task_clear_nexus_port,
+       .lldd_clear_nexus_ha    = isci_task_clear_nexus_ha,
+
+       /* Phy management */
+       .lldd_control_phy       = isci_phy_control,
+};
+
+
+/******************************************************************************
+* P R O T E C T E D  M E T H O D S
+******************************************************************************/
+
+
+
+/**
+ * isci_register_sas_ha() - This method initializes various lldd
+ *    specific members of the sas_ha struct and calls the libsas
+ *    sas_register_ha() function.
+ * @isci_host: This parameter specifies the lldd specific wrapper for the
+ *    libsas sas_ha struct.
+ *
+ * This method returns an error code indicating sucess or failure. The user
+ * should check for possible memory allocation error return otherwise, a zero
+ * indicates success.
+ */
+static int isci_register_sas_ha(struct isci_host *isci_host)
+{
+       int i;
+       struct sas_ha_struct *sas_ha = &(isci_host->sas_ha);
+       struct asd_sas_phy **sas_phys;
+       struct asd_sas_port **sas_ports;
+
+       sas_phys = devm_kzalloc(&isci_host->pdev->dev,
+                               SCI_MAX_PHYS * sizeof(void *),
+                               GFP_KERNEL);
+       if (!sas_phys)
+               return -ENOMEM;
+
+       sas_ports = devm_kzalloc(&isci_host->pdev->dev,
+                                SCI_MAX_PORTS * sizeof(void *),
+                                GFP_KERNEL);
+       if (!sas_ports)
+               return -ENOMEM;
+
+       /*----------------- Libsas Initialization Stuff----------------------
+        * Set various fields in the sas_ha struct:
+        */
+
+       sas_ha->sas_ha_name = DRV_NAME;
+       sas_ha->lldd_module = THIS_MODULE;
+       sas_ha->sas_addr    = &isci_host->phys[0].sas_addr[0];
+
+       /* set the array of phy and port structs.  */
+       for (i = 0; i < SCI_MAX_PHYS; i++) {
+               sas_phys[i] = &isci_host->phys[i].sas_phy;
+               sas_ports[i] = &isci_host->ports[i].sas_port;
+       }
+
+       sas_ha->sas_phy  = sas_phys;
+       sas_ha->sas_port = sas_ports;
+       sas_ha->num_phys = SCI_MAX_PHYS;
+
+       sas_ha->lldd_queue_size = ISCI_CAN_QUEUE_VAL;
+       sas_ha->lldd_max_execute_num = 1;
+       sas_ha->strict_wide_ports = 1;
+
+       sas_register_ha(sas_ha);
+
+       return 0;
+}
+
+static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
+       struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+       struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
+}
+
+static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
+
+static void isci_unregister(struct isci_host *isci_host)
+{
+       struct Scsi_Host *shost;
+
+       if (!isci_host)
+               return;
+
+       shost = isci_host->shost;
+       device_remove_file(&shost->shost_dev, &dev_attr_isci_id);
+
+       sas_unregister_ha(&isci_host->sas_ha);
+
+       sas_remove_host(isci_host->shost);
+       scsi_remove_host(isci_host->shost);
+       scsi_host_put(isci_host->shost);
+}
+
+static int __devinit isci_pci_init(struct pci_dev *pdev)
+{
+       int err, bar_num, bar_mask = 0;
+       void __iomem * const *iomap;
+
+       err = pcim_enable_device(pdev);
+       if (err) {
+               dev_err(&pdev->dev,
+                       "failed enable PCI device %s!\n",
+                       pci_name(pdev));
+               return err;
+       }
+
+       for (bar_num = 0; bar_num < SCI_PCI_BAR_COUNT; bar_num++)
+               bar_mask |= 1 << (bar_num * 2);
+
+       err = pcim_iomap_regions(pdev, bar_mask, DRV_NAME);
+       if (err)
+               return err;
+
+       iomap = pcim_iomap_table(pdev);
+       if (!iomap)
+               return -ENOMEM;
+
+       pci_set_master(pdev);
+
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (err) {
+               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (err)
+                       return err;
+       }
+
+       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (err) {
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int num_controllers(struct pci_dev *pdev)
+{
+       /* bar size alone can tell us if we are running with a dual controller
+        * part, no need to trust revision ids that might be under broken firmware
+        * control
+        */
+       resource_size_t scu_bar_size = pci_resource_len(pdev, SCI_SCU_BAR*2);
+       resource_size_t smu_bar_size = pci_resource_len(pdev, SCI_SMU_BAR*2);
+
+       if (scu_bar_size >= SCI_SCU_BAR_SIZE*SCI_MAX_CONTROLLERS &&
+           smu_bar_size >= SCI_SMU_BAR_SIZE*SCI_MAX_CONTROLLERS)
+               return SCI_MAX_CONTROLLERS;
+       else
+               return 1;
+}
+
+static int isci_setup_interrupts(struct pci_dev *pdev)
+{
+       int err, i, num_msix;
+       struct isci_host *ihost;
+       struct isci_pci_info *pci_info = to_pci_info(pdev);
+
+       /*
+        *  Determine the number of vectors associated with this
+        *  PCI function.
+        */
+       num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT;
+
+       for (i = 0; i < num_msix; i++)
+               pci_info->msix_entries[i].entry = i;
+
+       err = pci_enable_msix(pdev, pci_info->msix_entries, num_msix);
+       if (err)
+               goto intx;
+
+       for (i = 0; i < num_msix; i++) {
+               int id = i / SCI_NUM_MSI_X_INT;
+               struct msix_entry *msix = &pci_info->msix_entries[i];
+               irq_handler_t isr;
+
+               ihost = pci_info->hosts[id];
+               /* odd numbered vectors are error interrupts */
+               if (i & 1)
+                       isr = isci_error_isr;
+               else
+                       isr = isci_msix_isr;
+
+               err = devm_request_irq(&pdev->dev, msix->vector, isr, 0,
+                                      DRV_NAME"-msix", ihost);
+               if (!err)
+                       continue;
+
+               dev_info(&pdev->dev, "msix setup failed falling back to intx\n");
+               while (i--) {
+                       id = i / SCI_NUM_MSI_X_INT;
+                       ihost = pci_info->hosts[id];
+                       msix = &pci_info->msix_entries[i];
+                       devm_free_irq(&pdev->dev, msix->vector, ihost);
+               }
+               pci_disable_msix(pdev);
+               goto intx;
+       }
+       return 0;
+
+ intx:
+       for_each_isci_host(i, ihost, pdev) {
+               err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr,
+                                      IRQF_SHARED, DRV_NAME"-intx", ihost);
+               if (err)
+                       break;
+       }
+       return err;
+}
+
+static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
+{
+       struct isci_host *isci_host;
+       struct Scsi_Host *shost;
+       int err;
+
+       isci_host = devm_kzalloc(&pdev->dev, sizeof(*isci_host), GFP_KERNEL);
+       if (!isci_host)
+               return NULL;
+
+       isci_host->pdev = pdev;
+       isci_host->id = id;
+
+       shost = scsi_host_alloc(&isci_sht, sizeof(void *));
+       if (!shost)
+               return NULL;
+       isci_host->shost = shost;
+
+       err = isci_host_init(isci_host);
+       if (err)
+               goto err_shost;
+
+       SHOST_TO_SAS_HA(shost) = &isci_host->sas_ha;
+       isci_host->sas_ha.core.shost = shost;
+       shost->transportt = isci_transport_template;
+
+       shost->max_id = ~0;
+       shost->max_lun = ~0;
+       shost->max_cmd_len = MAX_COMMAND_SIZE;
+
+       err = scsi_add_host(shost, &pdev->dev);
+       if (err)
+               goto err_shost;
+
+       err = isci_register_sas_ha(isci_host);
+       if (err)
+               goto err_shost_remove;
+
+       err = device_create_file(&shost->shost_dev, &dev_attr_isci_id);
+       if (err)
+               goto err_unregister_ha;
+
+       return isci_host;
+
+ err_unregister_ha:
+       sas_unregister_ha(&(isci_host->sas_ha));
+ err_shost_remove:
+       scsi_remove_host(shost);
+ err_shost:
+       scsi_host_put(shost);
+
+       return NULL;
+}
+
+static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct isci_pci_info *pci_info;
+       int err, i;
+       struct isci_host *isci_host;
+       const struct firmware *fw = NULL;
+       struct isci_orom *orom = NULL;
+       char *source = "(platform)";
+
+       dev_info(&pdev->dev, "driver configured for rev: %d silicon\n",
+                pdev->revision);
+
+       pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL);
+       if (!pci_info)
+               return -ENOMEM;
+       pci_set_drvdata(pdev, pci_info);
+
+       if (efi_enabled)
+               orom = isci_get_efi_var(pdev);
+
+       if (!orom)
+               orom = isci_request_oprom(pdev);
+
+       for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
+               if (sci_oem_parameters_validate(&orom->ctrl[i])) {
+                       dev_warn(&pdev->dev,
+                                "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
+                       devm_kfree(&pdev->dev, orom);
+                       orom = NULL;
+                       break;
+               }
+       }
+
+       if (!orom) {
+               source = "(firmware)";
+               orom = isci_request_firmware(pdev, fw);
+               if (!orom) {
+                       /* TODO convert this to WARN_TAINT_ONCE once the
+                        * orom/efi parameter support is widely available
+                        */
+                       dev_warn(&pdev->dev,
+                                "Loading user firmware failed, using default "
+                                "values\n");
+                       dev_warn(&pdev->dev,
+                                "Default OEM configuration being used: 4 "
+                                "narrow ports, and default SAS Addresses\n");
+               }
+       }
+
+       if (orom)
+               dev_info(&pdev->dev,
+                        "OEM SAS parameters (version: %u.%u) loaded %s\n",
+                        (orom->hdr.version & 0xf0) >> 4,
+                        (orom->hdr.version & 0xf), source);
+
+       pci_info->orom = orom;
+
+       err = isci_pci_init(pdev);
+       if (err)
+               return err;
+
+       for (i = 0; i < num_controllers(pdev); i++) {
+               struct isci_host *h = isci_host_alloc(pdev, i);
+
+               if (!h) {
+                       err = -ENOMEM;
+                       goto err_host_alloc;
+               }
+               pci_info->hosts[i] = h;
+       }
+
+       err = isci_setup_interrupts(pdev);
+       if (err)
+               goto err_host_alloc;
+
+       for_each_isci_host(i, isci_host, pdev)
+               scsi_scan_host(isci_host->shost);
+
+       return 0;
+
+ err_host_alloc:
+       for_each_isci_host(i, isci_host, pdev)
+               isci_unregister(isci_host);
+       return err;
+}
+
+static void __devexit isci_pci_remove(struct pci_dev *pdev)
+{
+       struct isci_host *ihost;
+       int i;
+
+       for_each_isci_host(i, ihost, pdev) {
+               isci_unregister(ihost);
+               isci_host_deinit(ihost);
+               sci_controller_disable_interrupts(ihost);
+       }
+}
+
+static struct pci_driver isci_pci_driver = {
+       .name           = DRV_NAME,
+       .id_table       = isci_id_table,
+       .probe          = isci_pci_probe,
+       .remove         = __devexit_p(isci_pci_remove),
+};
+
+static __init int isci_init(void)
+{
+       int err;
+
+       pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME);
+
+       isci_transport_template = sas_domain_attach_transport(&isci_transport_ops);
+       if (!isci_transport_template)
+               return -ENOMEM;
+
+       err = pci_register_driver(&isci_pci_driver);
+       if (err)
+               sas_release_transport(isci_transport_template);
+
+       return err;
+}
+
+static __exit void isci_exit(void)
+{
+       pci_unregister_driver(&isci_pci_driver);
+       sas_release_transport(isci_transport_template);
+}
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(ISCI_FW_NAME);
+module_init(isci_init);
+module_exit(isci_exit);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
new file mode 100644 (file)
index 0000000..d1de633
--- /dev/null
@@ -0,0 +1,538 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __ISCI_H__
+#define __ISCI_H__
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+
+#define DRV_NAME "isci"
+#define SCI_PCI_BAR_COUNT 2
+#define SCI_NUM_MSI_X_INT 2
+#define SCI_SMU_BAR       0
+#define SCI_SMU_BAR_SIZE  (16*1024)
+#define SCI_SCU_BAR       1
+#define SCI_SCU_BAR_SIZE  (4*1024*1024)
+#define SCI_IO_SPACE_BAR0 2
+#define SCI_IO_SPACE_BAR1 3
+#define ISCI_CAN_QUEUE_VAL 250 /* < SCI_MAX_IO_REQUESTS ? */
+#define SCIC_CONTROLLER_STOP_TIMEOUT 5000
+
+#define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF
+
+#define SCI_MAX_PHYS  (4UL)
+#define SCI_MAX_PORTS SCI_MAX_PHYS
+#define SCI_MAX_SMP_PHYS  (384) /* not silicon constrained */
+#define SCI_MAX_REMOTE_DEVICES (256UL)
+#define SCI_MAX_IO_REQUESTS (256UL)
+#define SCI_MAX_SEQ (16)
+#define SCI_MAX_MSIX_MESSAGES  (2)
+#define SCI_MAX_SCATTER_GATHER_ELEMENTS 130 /* not silicon constrained */
+#define SCI_MAX_CONTROLLERS 2
+#define SCI_MAX_DOMAINS  SCI_MAX_PORTS
+
+#define SCU_MAX_CRITICAL_NOTIFICATIONS    (384)
+#define SCU_MAX_EVENTS_SHIFT             (7)
+#define SCU_MAX_EVENTS                    (1 << SCU_MAX_EVENTS_SHIFT)
+#define SCU_MAX_UNSOLICITED_FRAMES        (128)
+#define SCU_MAX_COMPLETION_QUEUE_SCRATCH  (128)
+#define SCU_MAX_COMPLETION_QUEUE_ENTRIES  (SCU_MAX_CRITICAL_NOTIFICATIONS \
+                                          + SCU_MAX_EVENTS \
+                                          + SCU_MAX_UNSOLICITED_FRAMES \
+                                          + SCI_MAX_IO_REQUESTS \
+                                          + SCU_MAX_COMPLETION_QUEUE_SCRATCH)
+#define SCU_MAX_COMPLETION_QUEUE_SHIFT   (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES))
+
+#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096)
+#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE   (1024)
+#define SCU_INVALID_FRAME_INDEX             (0xFFFF)
+
+#define SCU_IO_REQUEST_MAX_SGE_SIZE         (0x00FFFFFF)
+#define SCU_IO_REQUEST_MAX_TRANSFER_LENGTH  (0x00FFFFFF)
+
+static inline void check_sizes(void)
+{
+       BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_EVENTS);
+       BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES <= 8);
+       BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_UNSOLICITED_FRAMES);
+       BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_COMPLETION_QUEUE_ENTRIES);
+       BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES > SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES);
+       BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_IO_REQUESTS);
+       BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_SEQ);
+}
+
+/**
+ * enum sci_status - This is the general return status enumeration for non-IO,
+ *    non-task management related SCI interface methods.
+ *
+ *
+ */
+enum sci_status {
+       /**
+        * This member indicates successful completion.
+        */
+       SCI_SUCCESS = 0,
+
+       /**
+        * This value indicates that the calling method completed successfully,
+        * but that the IO may have completed before having it's start method
+        * invoked.  This occurs during SAT translation for requests that do
+        * not require an IO to the target or for any other requests that may
+        * be completed without having to submit IO.
+        */
+       SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
+
+       /**
+        *  This Value indicates that the SCU hardware returned an early response
+        *  because the io request specified more data than is returned by the
+        *  target device (mode pages, inquiry data, etc.). The completion routine
+        *  will handle this case to get the actual number of bytes transferred.
+        */
+       SCI_SUCCESS_IO_DONE_EARLY,
+
+       /**
+        * This member indicates that the object for which a state change is
+        * being requested is already in said state.
+        */
+       SCI_WARNING_ALREADY_IN_STATE,
+
+       /**
+        * This member indicates interrupt coalescence timer may cause SAS
+        * specification compliance issues (i.e. SMP target mode response
+        * frames must be returned within 1.9 milliseconds).
+        */
+       SCI_WARNING_TIMER_CONFLICT,
+
+       /**
+        * This field indicates a sequence of action is not completed yet. Mostly,
+        * this status is used when multiple ATA commands are needed in a SATI translation.
+        */
+       SCI_WARNING_SEQUENCE_INCOMPLETE,
+
+       /**
+        * This member indicates that there was a general failure.
+        */
+       SCI_FAILURE,
+
+       /**
+        * This member indicates that the SCI implementation is unable to complete
+        * an operation due to a critical flaw the prevents any further operation
+        * (i.e. an invalid pointer).
+        */
+       SCI_FATAL_ERROR,
+
+       /**
+        * This member indicates the calling function failed, because the state
+        * of the controller is in a state that prevents successful completion.
+        */
+       SCI_FAILURE_INVALID_STATE,
+
+       /**
+        * This member indicates the calling function failed, because there is
+        * insufficient resources/memory to complete the request.
+        */
+       SCI_FAILURE_INSUFFICIENT_RESOURCES,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * controller object required for the operation can't be located.
+        */
+       SCI_FAILURE_CONTROLLER_NOT_FOUND,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * discovered controller type is not supported by the library.
+        */
+       SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * requested initialization data version isn't supported.
+        */
+       SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * requested configuration of SAS Phys into SAS Ports is not supported.
+        */
+       SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * requested protocol is not supported by the remote device, port,
+        * or controller.
+        */
+       SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * requested information type is not supported by the SCI implementation.
+        */
+       SCI_FAILURE_UNSUPPORTED_INFORMATION_TYPE,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * device already exists.
+        */
+       SCI_FAILURE_DEVICE_EXISTS,
+
+       /**
+        * This member indicates the calling function failed, because adding
+        * a phy to the object is not possible.
+        */
+       SCI_FAILURE_ADDING_PHY_UNSUPPORTED,
+
+       /**
+        * This member indicates the calling function failed, because the
+        * requested information type is not supported by the SCI implementation.
+        */
+       SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD,
+
+       /**
+        * This member indicates the calling function failed, because the SCI
+        * implementation does not support the supplied time limit.
+        */
+       SCI_FAILURE_UNSUPPORTED_TIME_LIMIT,
+
+       /**
+        * This member indicates the calling method failed, because the SCI
+        * implementation does not contain the specified Phy.
+        */
+       SCI_FAILURE_INVALID_PHY,
+
+       /**
+        * This member indicates the calling method failed, because the SCI
+        * implementation does not contain the specified Port.
+        */
+       SCI_FAILURE_INVALID_PORT,
+
+       /**
+        * This member indicates the calling method was partly successful
+        * The port was reset but not all phys in port are operational
+        */
+       SCI_FAILURE_RESET_PORT_PARTIAL_SUCCESS,
+
+       /**
+        * This member indicates that calling method failed
+        * The port reset did not complete because none of the phys are operational
+        */
+       SCI_FAILURE_RESET_PORT_FAILURE,
+
+       /**
+        * This member indicates the calling method failed, because the SCI
+        * implementation does not contain the specified remote device.
+        */
+       SCI_FAILURE_INVALID_REMOTE_DEVICE,
+
+       /**
+        * This member indicates the calling method failed, because the remote
+        * device is in a bad state and requires a reset.
+        */
+       SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+
+       /**
+        * This member indicates the calling method failed, because the SCI
+        * implementation does not contain or support the specified IO tag.
+        */
+       SCI_FAILURE_INVALID_IO_TAG,
+
+       /**
+        * This member indicates that the operation failed and the user should
+        * check the response data associated with the IO.
+        */
+       SCI_FAILURE_IO_RESPONSE_VALID,
+
+       /**
+        * This member indicates that the operation failed, the failure is
+        * controller implementation specific, and the response data associated
+        * with the request is not valid.  You can query for the controller
+        * specific error information via sci_controller_get_request_status()
+        */
+       SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+
+       /**
+        * This member indicated that the operation failed because the
+        * user requested this IO to be terminated.
+        */
+       SCI_FAILURE_IO_TERMINATED,
+
+       /**
+        * This member indicates that the operation failed and the associated
+        * request requires a SCSI abort task to be sent to the target.
+        */
+       SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
+
+       /**
+        * This member indicates that the operation failed because the supplied
+        * device could not be located.
+        */
+       SCI_FAILURE_DEVICE_NOT_FOUND,
+
+       /**
+        * This member indicates that the operation failed because the
+        * objects association is required and is not correctly set.
+        */
+       SCI_FAILURE_INVALID_ASSOCIATION,
+
+       /**
+        * This member indicates that the operation failed, because a timeout
+        * occurred.
+        */
+       SCI_FAILURE_TIMEOUT,
+
+       /**
+        * This member indicates that the operation failed, because the user
+        * specified a value that is either invalid or not supported.
+        */
+       SCI_FAILURE_INVALID_PARAMETER_VALUE,
+
+       /**
+        * This value indicates that the operation failed, because the number
+        * of messages (MSI-X) is not supported.
+        */
+       SCI_FAILURE_UNSUPPORTED_MESSAGE_COUNT,
+
+       /**
+        * This value indicates that the method failed due to a lack of
+        * available NCQ tags.
+        */
+       SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
+
+       /**
+        * This value indicates that a protocol violation has occurred on the
+        * link.
+        */
+       SCI_FAILURE_PROTOCOL_VIOLATION,
+
+       /**
+        * This value indicates a failure condition that retry may help to clear.
+        */
+       SCI_FAILURE_RETRY_REQUIRED,
+
+       /**
+        * This field indicates the retry limit was reached when a retry is attempted
+        */
+       SCI_FAILURE_RETRY_LIMIT_REACHED,
+
+       /**
+        * This member indicates the calling method was partly successful.
+        * Mostly, this status is used when a LUN_RESET issued to an expander attached
+        * STP device in READY NCQ substate needs to have RNC suspended/resumed
+        * before posting TC.
+        */
+       SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS,
+
+       /**
+        * This field indicates an illegal phy connection based on the routing attribute
+        * of both expander phy attached to each other.
+        */
+       SCI_FAILURE_ILLEGAL_ROUTING_ATTRIBUTE_CONFIGURATION,
+
+       /**
+        * This field indicates a CONFIG ROUTE INFO command has a response with function result
+        * INDEX DOES NOT EXIST, usually means exceeding max route index.
+        */
+       SCI_FAILURE_EXCEED_MAX_ROUTE_INDEX,
+
+       /**
+        * This value indicates that an unsupported PCI device ID has been
+        * specified.  This indicates that attempts to invoke
+        * sci_library_allocate_controller() will fail.
+        */
+       SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID
+
+};
+
+/**
+ * enum sci_io_status - This enumeration depicts all of the possible IO
+ *    completion status values.  Each value in this enumeration maps directly
+ *    to a value in the enum sci_status enumeration.  Please refer to that
+ *    enumeration for detailed comments concerning what the status represents.
+ *
+ * Add the API to retrieve the SCU status from the core. Check to see that the
+ * following status are properly handled: - SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL
+ * - SCI_IO_FAILURE_INVALID_IO_TAG
+ */
+enum sci_io_status {
+       SCI_IO_SUCCESS                         = SCI_SUCCESS,
+       SCI_IO_FAILURE                         = SCI_FAILURE,
+       SCI_IO_SUCCESS_COMPLETE_BEFORE_START   = SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
+       SCI_IO_SUCCESS_IO_DONE_EARLY           = SCI_SUCCESS_IO_DONE_EARLY,
+       SCI_IO_FAILURE_INVALID_STATE           = SCI_FAILURE_INVALID_STATE,
+       SCI_IO_FAILURE_INSUFFICIENT_RESOURCES  = SCI_FAILURE_INSUFFICIENT_RESOURCES,
+       SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL    = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+       SCI_IO_FAILURE_RESPONSE_VALID          = SCI_FAILURE_IO_RESPONSE_VALID,
+       SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+       SCI_IO_FAILURE_TERMINATED              = SCI_FAILURE_IO_TERMINATED,
+       SCI_IO_FAILURE_REQUIRES_SCSI_ABORT     = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
+       SCI_IO_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
+       SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE    = SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
+       SCI_IO_FAILURE_PROTOCOL_VIOLATION      = SCI_FAILURE_PROTOCOL_VIOLATION,
+
+       SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+
+       SCI_IO_FAILURE_RETRY_REQUIRED      = SCI_FAILURE_RETRY_REQUIRED,
+       SCI_IO_FAILURE_RETRY_LIMIT_REACHED = SCI_FAILURE_RETRY_LIMIT_REACHED,
+       SCI_IO_FAILURE_INVALID_REMOTE_DEVICE = SCI_FAILURE_INVALID_REMOTE_DEVICE
+};
+
+/**
+ * enum sci_task_status - This enumeration depicts all of the possible task
+ *    completion status values.  Each value in this enumeration maps directly
+ *    to a value in the enum sci_status enumeration.  Please refer to that
+ *    enumeration for detailed comments concerning what the status represents.
+ *
+ * Check to see that the following status are properly handled:
+ */
+enum sci_task_status {
+       SCI_TASK_SUCCESS                         = SCI_SUCCESS,
+       SCI_TASK_FAILURE                         = SCI_FAILURE,
+       SCI_TASK_FAILURE_INVALID_STATE           = SCI_FAILURE_INVALID_STATE,
+       SCI_TASK_FAILURE_INSUFFICIENT_RESOURCES  = SCI_FAILURE_INSUFFICIENT_RESOURCES,
+       SCI_TASK_FAILURE_UNSUPPORTED_PROTOCOL    = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+       SCI_TASK_FAILURE_INVALID_TAG             = SCI_FAILURE_INVALID_IO_TAG,
+       SCI_TASK_FAILURE_RESPONSE_VALID          = SCI_FAILURE_IO_RESPONSE_VALID,
+       SCI_TASK_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+       SCI_TASK_FAILURE_TERMINATED              = SCI_FAILURE_IO_TERMINATED,
+       SCI_TASK_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
+
+       SCI_TASK_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+       SCI_TASK_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS = SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS
+
+};
+
+/**
+ * sci_swab32_cpy - convert between scsi and scu-hardware byte format
+ * @dest: receive the 4-byte endian swapped version of src
+ * @src: word aligned source buffer
+ *
+ * scu hardware handles SSP/SMP control, response, and unidentified
+ * frames in "big endian dword" order.  Regardless of host endian this
+ * is always a swab32()-per-dword conversion of the standard definition,
+ * i.e. single byte fields swapped and multi-byte fields in little-
+ * endian
+ */
+static inline void sci_swab32_cpy(void *_dest, void *_src, ssize_t word_cnt)
+{
+       u32 *dest = _dest, *src = _src;
+
+       while (--word_cnt >= 0)
+               dest[word_cnt] = swab32(src[word_cnt]);
+}
+
+extern unsigned char no_outbound_task_to;
+extern u16 ssp_max_occ_to;
+extern u16 stp_max_occ_to;
+extern u16 ssp_inactive_to;
+extern u16 stp_inactive_to;
+extern unsigned char phy_gen;
+extern unsigned char max_concurr_spinup;
+
+irqreturn_t isci_msix_isr(int vec, void *data);
+irqreturn_t isci_intx_isr(int vec, void *data);
+irqreturn_t isci_error_isr(int vec, void *data);
+
+/*
+ * Each timer is associated with a cancellation flag that is set when
+ * del_timer() is called and checked in the timer callback function. This
+ * is needed since del_timer_sync() cannot be called with sci_lock held.
+ * For deinit however, del_timer_sync() is used without holding the lock.
+ */
+struct sci_timer {
+       struct timer_list       timer;
+       bool                    cancel;
+};
+
+static inline
+void sci_init_timer(struct sci_timer *tmr, void (*fn)(unsigned long))
+{
+       tmr->timer.function = fn;
+       tmr->timer.data = (unsigned long) tmr;
+       tmr->cancel = 0;
+       init_timer(&tmr->timer);
+}
+
+static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec)
+{
+       tmr->cancel = 0;
+       mod_timer(&tmr->timer, jiffies + msecs_to_jiffies(msec));
+}
+
+static inline void sci_del_timer(struct sci_timer *tmr)
+{
+       tmr->cancel = 1;
+       del_timer(&tmr->timer);
+}
+
+struct sci_base_state_machine {
+       const struct sci_base_state *state_table;
+       u32 initial_state_id;
+       u32 current_state_id;
+       u32 previous_state_id;
+};
+
+typedef void (*sci_state_transition_t)(struct sci_base_state_machine *sm);
+
+struct sci_base_state {
+       sci_state_transition_t enter_state;     /* Called on state entry */
+       sci_state_transition_t exit_state;      /* Called on state exit */
+};
+
+extern void sci_init_sm(struct sci_base_state_machine *sm,
+                       const struct sci_base_state *state_table,
+                       u32 initial_state);
+extern void sci_change_state(struct sci_base_state_machine *sm, u32 next_state);
+#endif  /* __ISCI_H__ */
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
new file mode 100644 (file)
index 0000000..79313a7
--- /dev/null
@@ -0,0 +1,1312 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "isci.h"
+#include "host.h"
+#include "phy.h"
+#include "scu_event_codes.h"
+#include "probe_roms.h"
+
+/* Maximum arbitration wait time in micro-seconds */
+#define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME  (700)
+
+enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy)
+{
+       return iphy->max_negotiated_speed;
+}
+
+static enum sci_status
+sci_phy_transport_layer_initialization(struct isci_phy *iphy,
+                                      struct scu_transport_layer_registers __iomem *reg)
+{
+       u32 tl_control;
+
+       iphy->transport_layer_registers = reg;
+
+       writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX,
+               &iphy->transport_layer_registers->stp_rni);
+
+       /*
+        * Hardware team recommends that we enable the STP prefetch for all
+        * transports
+        */
+       tl_control = readl(&iphy->transport_layer_registers->control);
+       tl_control |= SCU_TLCR_GEN_BIT(STP_WRITE_DATA_PREFETCH);
+       writel(tl_control, &iphy->transport_layer_registers->control);
+
+       return SCI_SUCCESS;
+}
+
+static enum sci_status
+sci_phy_link_layer_initialization(struct isci_phy *iphy,
+                                 struct scu_link_layer_registers __iomem *reg)
+{
+       struct isci_host *ihost = iphy->owning_port->owning_controller;
+       int phy_idx = iphy->phy_index;
+       struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
+       struct sci_phy_oem_params *phy_oem =
+               &ihost->oem_parameters.phys[phy_idx];
+       u32 phy_configuration;
+       struct sci_phy_cap phy_cap;
+       u32 parity_check = 0;
+       u32 parity_count = 0;
+       u32 llctl, link_rate;
+       u32 clksm_value = 0;
+
+       iphy->link_layer_registers = reg;
+
+       /* Set our IDENTIFY frame data */
+       #define SCI_END_DEVICE 0x01
+
+       writel(SCU_SAS_TIID_GEN_BIT(SMP_INITIATOR) |
+              SCU_SAS_TIID_GEN_BIT(SSP_INITIATOR) |
+              SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
+              SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
+              SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
+              &iphy->link_layer_registers->transmit_identification);
+
+       /* Write the device SAS Address */
+       writel(0xFEDCBA98,
+              &iphy->link_layer_registers->sas_device_name_high);
+       writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low);
+
+       /* Write the source SAS Address */
+       writel(phy_oem->sas_address.high,
+               &iphy->link_layer_registers->source_sas_address_high);
+       writel(phy_oem->sas_address.low,
+               &iphy->link_layer_registers->source_sas_address_low);
+
+       /* Clear and Set the PHY Identifier */
+       writel(0, &iphy->link_layer_registers->identify_frame_phy_id);
+       writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx),
+               &iphy->link_layer_registers->identify_frame_phy_id);
+
+       /* Change the initial state of the phy configuration register */
+       phy_configuration =
+               readl(&iphy->link_layer_registers->phy_configuration);
+
+       /* Hold OOB state machine in reset */
+       phy_configuration |=  SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+       writel(phy_configuration,
+               &iphy->link_layer_registers->phy_configuration);
+
+       /* Configure the SNW capabilities */
+       phy_cap.all = 0;
+       phy_cap.start = 1;
+       phy_cap.gen3_no_ssc = 1;
+       phy_cap.gen2_no_ssc = 1;
+       phy_cap.gen1_no_ssc = 1;
+       if (ihost->oem_parameters.controller.do_enable_ssc == true) {
+               phy_cap.gen3_ssc = 1;
+               phy_cap.gen2_ssc = 1;
+               phy_cap.gen1_ssc = 1;
+       }
+
+       /*
+        * The SAS specification indicates that the phy_capabilities that
+        * are transmitted shall have an even parity.  Calculate the parity. */
+       parity_check = phy_cap.all;
+       while (parity_check != 0) {
+               if (parity_check & 0x1)
+                       parity_count++;
+               parity_check >>= 1;
+       }
+
+       /*
+        * If parity indicates there are an odd number of bits set, then
+        * set the parity bit to 1 in the phy capabilities. */
+       if ((parity_count % 2) != 0)
+               phy_cap.parity = 1;
+
+       writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities);
+
+       /* Set the enable spinup period but disable the ability to send
+        * notify enable spinup
+        */
+       writel(SCU_ENSPINUP_GEN_VAL(COUNT,
+                       phy_user->notify_enable_spin_up_insertion_frequency),
+               &iphy->link_layer_registers->notify_enable_spinup_control);
+
+       /* Write the ALIGN Insertion Ferequency for connected phy and
+        * inpendent of connected state
+        */
+       clksm_value = SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(CONNECTED,
+                       phy_user->in_connection_align_insertion_frequency);
+
+       clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
+                       phy_user->align_insertion_frequency);
+
+       writel(clksm_value, &iphy->link_layer_registers->clock_skew_management);
+
+       /* @todo Provide a way to write this register correctly */
+       writel(0x02108421,
+               &iphy->link_layer_registers->afe_lookup_table_control);
+
+       llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
+               (u8)ihost->user_parameters.no_outbound_task_timeout);
+
+       switch (phy_user->max_speed_generation) {
+       case SCIC_SDS_PARM_GEN3_SPEED:
+               link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3;
+               break;
+       case SCIC_SDS_PARM_GEN2_SPEED:
+               link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2;
+               break;
+       default:
+               link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1;
+               break;
+       }
+       llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
+       writel(llctl, &iphy->link_layer_registers->link_layer_control);
+
+       if (is_a2(ihost->pdev)) {
+               /* Program the max ARB time for the PHY to 700us so we inter-operate with
+                * the PMC expander which shuts down PHYs if the expander PHY generates too
+                * many breaks.  This time value will guarantee that the initiator PHY will
+                * generate the break.
+                */
+               writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
+                       &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout);
+       }
+
+       /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */
+       writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout);
+
+       /* We can exit the initial state to the stopped state */
+       sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+
+       return SCI_SUCCESS;
+}
+
+static void phy_sata_timeout(unsigned long data)
+{
+       struct sci_timer *tmr = (struct sci_timer *)data;
+       struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer);
+       struct isci_host *ihost = iphy->owning_port->owning_controller;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       if (tmr->cancel)
+               goto done;
+
+       dev_dbg(sciphy_to_dev(iphy),
+                "%s: SCIC SDS Phy 0x%p did not receive signature fis before "
+                "timeout.\n",
+                __func__,
+                iphy);
+
+       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+done:
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/**
+ * This method returns the port currently containing this phy. If the phy is
+ *    currently contained by the dummy port, then the phy is considered to not
+ *    be part of a port.
+ * @sci_phy: This parameter specifies the phy for which to retrieve the
+ *    containing port.
+ *
+ * This method returns a handle to a port that contains the supplied phy.
+ * NULL This value is returned if the phy is not part of a real
+ * port (i.e. it's contained in the dummy port). !NULL All other
+ * values indicate a handle/pointer to the port containing the phy.
+ */
+struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy)
+{
+       struct isci_port *iport = iphy->owning_port;
+
+       if (iport->physical_port_index == SCIC_SDS_DUMMY_PORT)
+               return NULL;
+
+       return iphy->owning_port;
+}
+
+/**
+ * This method will assign a port to the phy object.
+ * @out]: iphy This parameter specifies the phy for which to assign a port
+ *    object.
+ *
+ *
+ */
+void sci_phy_set_port(
+       struct isci_phy *iphy,
+       struct isci_port *iport)
+{
+       iphy->owning_port = iport;
+
+       if (iphy->bcn_received_while_port_unassigned) {
+               iphy->bcn_received_while_port_unassigned = false;
+               sci_port_broadcast_change_received(iphy->owning_port, iphy);
+       }
+}
+
+enum sci_status sci_phy_initialize(struct isci_phy *iphy,
+                                  struct scu_transport_layer_registers __iomem *tl,
+                                  struct scu_link_layer_registers __iomem *ll)
+{
+       /* Perfrom the initialization of the TL hardware */
+       sci_phy_transport_layer_initialization(iphy, tl);
+
+       /* Perofrm the initialization of the PE hardware */
+       sci_phy_link_layer_initialization(iphy, ll);
+
+       /* There is nothing that needs to be done in this state just
+        * transition to the stopped state
+        */
+       sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+
+       return SCI_SUCCESS;
+}
+
+/**
+ * This method assigns the direct attached device ID for this phy.
+ *
+ * @iphy The phy for which the direct attached device id is to
+ *       be assigned.
+ * @device_id The direct attached device ID to assign to the phy.
+ *       This will either be the RNi for the device or an invalid RNi if there
+ *       is no current device assigned to the phy.
+ */
+void sci_phy_setup_transport(struct isci_phy *iphy, u32 device_id)
+{
+       u32 tl_control;
+
+       writel(device_id, &iphy->transport_layer_registers->stp_rni);
+
+       /*
+        * The read should guarantee that the first write gets posted
+        * before the next write
+        */
+       tl_control = readl(&iphy->transport_layer_registers->control);
+       tl_control |= SCU_TLCR_GEN_BIT(CLEAR_TCI_NCQ_MAPPING_TABLE);
+       writel(tl_control, &iphy->transport_layer_registers->control);
+}
+
+static void sci_phy_suspend(struct isci_phy *iphy)
+{
+       u32 scu_sas_pcfg_value;
+
+       scu_sas_pcfg_value =
+               readl(&iphy->link_layer_registers->phy_configuration);
+       scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
+       writel(scu_sas_pcfg_value,
+               &iphy->link_layer_registers->phy_configuration);
+
+       sci_phy_setup_transport(iphy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
+}
+
+void sci_phy_resume(struct isci_phy *iphy)
+{
+       u32 scu_sas_pcfg_value;
+
+       scu_sas_pcfg_value =
+               readl(&iphy->link_layer_registers->phy_configuration);
+       scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
+       writel(scu_sas_pcfg_value,
+               &iphy->link_layer_registers->phy_configuration);
+}
+
+void sci_phy_get_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
+{
+       sas->high = readl(&iphy->link_layer_registers->source_sas_address_high);
+       sas->low = readl(&iphy->link_layer_registers->source_sas_address_low);
+}
+
+void sci_phy_get_attached_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
+{
+       struct sas_identify_frame *iaf;
+
+       iaf = &iphy->frame_rcvd.iaf;
+       memcpy(sas, iaf->sas_addr, SAS_ADDR_SIZE);
+}
+
+void sci_phy_get_protocols(struct isci_phy *iphy, struct sci_phy_proto *proto)
+{
+       proto->all = readl(&iphy->link_layer_registers->transmit_identification);
+}
+
+enum sci_status sci_phy_start(struct isci_phy *iphy)
+{
+       enum sci_phy_states state = iphy->sm.current_state_id;
+
+       if (state != SCI_PHY_STOPPED) {
+               dev_dbg(sciphy_to_dev(iphy),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+       return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_stop(struct isci_phy *iphy)
+{
+       enum sci_phy_states state = iphy->sm.current_state_id;
+
+       switch (state) {
+       case SCI_PHY_SUB_INITIAL:
+       case SCI_PHY_SUB_AWAIT_OSSP_EN:
+       case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+       case SCI_PHY_SUB_AWAIT_SAS_POWER:
+       case SCI_PHY_SUB_AWAIT_SATA_POWER:
+       case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+       case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+       case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+       case SCI_PHY_SUB_FINAL:
+       case SCI_PHY_READY:
+               break;
+       default:
+               dev_dbg(sciphy_to_dev(iphy),
+                       "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+       return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_reset(struct isci_phy *iphy)
+{
+       enum sci_phy_states state = iphy->sm.current_state_id;
+
+       if (state != SCI_PHY_READY) {
+               dev_dbg(sciphy_to_dev(iphy),
+                       "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_change_state(&iphy->sm, SCI_PHY_RESETTING);
+       return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy)
+{
+       enum sci_phy_states state = iphy->sm.current_state_id;
+
+       switch (state) {
+       case SCI_PHY_SUB_AWAIT_SAS_POWER: {
+               u32 enable_spinup;
+
+               enable_spinup = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
+               enable_spinup |= SCU_ENSPINUP_GEN_BIT(ENABLE);
+               writel(enable_spinup, &iphy->link_layer_registers->notify_enable_spinup_control);
+
+               /* Change state to the final state this substate machine has run to completion */
+               sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
+
+               return SCI_SUCCESS;
+       }
+       case SCI_PHY_SUB_AWAIT_SATA_POWER: {
+               u32 scu_sas_pcfg_value;
+
+               /* Release the spinup hold state and reset the OOB state machine */
+               scu_sas_pcfg_value =
+                       readl(&iphy->link_layer_registers->phy_configuration);
+               scu_sas_pcfg_value &=
+                       ~(SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD) | SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE));
+               scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+               writel(scu_sas_pcfg_value,
+                       &iphy->link_layer_registers->phy_configuration);
+
+               /* Now restart the OOB operation */
+               scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+               scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+               writel(scu_sas_pcfg_value,
+                       &iphy->link_layer_registers->phy_configuration);
+
+               /* Change state to the final state this substate machine has run to completion */
+               sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_PHY_EN);
+
+               return SCI_SUCCESS;
+       }
+       default:
+               dev_dbg(sciphy_to_dev(iphy),
+                       "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+static void sci_phy_start_sas_link_training(struct isci_phy *iphy)
+{
+       /* continue the link training for the phy as if it were a SAS PHY
+        * instead of a SATA PHY. This is done because the completion queue had a SAS
+        * PHY DETECTED event when the state machine was expecting a SATA PHY event.
+        */
+       u32 phy_control;
+
+       phy_control = readl(&iphy->link_layer_registers->phy_configuration);
+       phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD);
+       writel(phy_control,
+              &iphy->link_layer_registers->phy_configuration);
+
+       sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN);
+
+       iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS;
+}
+
+static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
+{
+       /* This method continues the link training for the phy as if it were a SATA PHY
+        * instead of a SAS PHY.  This is done because the completion queue had a SATA
+        * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none
+        */
+       sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER);
+
+       iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
+}
+
+/**
+ * sci_phy_complete_link_training - perform processing common to
+ *    all protocols upon completion of link training.
+ * @sci_phy: This parameter specifies the phy object for which link training
+ *    has completed.
+ * @max_link_rate: This parameter specifies the maximum link rate to be
+ *    associated with this phy.
+ * @next_state: This parameter specifies the next state for the phy's starting
+ *    sub-state machine.
+ *
+ */
+static void sci_phy_complete_link_training(struct isci_phy *iphy,
+                                          enum sas_linkrate max_link_rate,
+                                          u32 next_state)
+{
+       iphy->max_negotiated_speed = max_link_rate;
+
+       sci_change_state(&iphy->sm, next_state);
+}
+
+enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
+{
+       enum sci_phy_states state = iphy->sm.current_state_id;
+
+       switch (state) {
+       case SCI_PHY_SUB_AWAIT_OSSP_EN:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_SAS_PHY_DETECTED:
+                       sci_phy_start_sas_link_training(iphy);
+                       iphy->is_in_link_training = true;
+                       break;
+               case SCU_EVENT_SATA_SPINUP_HOLD:
+                       sci_phy_start_sata_link_training(iphy);
+                       iphy->is_in_link_training = true;
+                       break;
+               default:
+                       dev_dbg(sciphy_to_dev(iphy),
+                               "%s: PHY starting substate machine received "
+                               "unexpected event_code %x\n",
+                               __func__,
+                               event_code);
+                       return SCI_FAILURE;
+               }
+               return SCI_SUCCESS;
+       case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_SAS_PHY_DETECTED:
+                       /*
+                        * Why is this being reported again by the controller?
+                        * We would re-enter this state so just stay here */
+                       break;
+               case SCU_EVENT_SAS_15:
+               case SCU_EVENT_SAS_15_SSC:
+                       sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
+                                                      SCI_PHY_SUB_AWAIT_IAF_UF);
+                       break;
+               case SCU_EVENT_SAS_30:
+               case SCU_EVENT_SAS_30_SSC:
+                       sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
+                                                      SCI_PHY_SUB_AWAIT_IAF_UF);
+                       break;
+               case SCU_EVENT_SAS_60:
+               case SCU_EVENT_SAS_60_SSC:
+                       sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
+                                                      SCI_PHY_SUB_AWAIT_IAF_UF);
+                       break;
+               case SCU_EVENT_SATA_SPINUP_HOLD:
+                       /*
+                        * We were doing SAS PHY link training and received a SATA PHY event
+                        * continue OOB/SN as if this were a SATA PHY */
+                       sci_phy_start_sata_link_training(iphy);
+                       break;
+               case SCU_EVENT_LINK_FAILURE:
+                       /* Link failure change state back to the starting state */
+                       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+                       break;
+               default:
+                       dev_warn(sciphy_to_dev(iphy),
+                                "%s: PHY starting substate machine received "
+                                "unexpected event_code %x\n",
+                                __func__, event_code);
+
+                       return SCI_FAILURE;
+                       break;
+               }
+               return SCI_SUCCESS;
+       case SCI_PHY_SUB_AWAIT_IAF_UF:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_SAS_PHY_DETECTED:
+                       /* Backup the state machine */
+                       sci_phy_start_sas_link_training(iphy);
+                       break;
+               case SCU_EVENT_SATA_SPINUP_HOLD:
+                       /* We were doing SAS PHY link training and received a
+                        * SATA PHY event continue OOB/SN as if this were a
+                        * SATA PHY
+                        */
+                       sci_phy_start_sata_link_training(iphy);
+                       break;
+               case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+               case SCU_EVENT_LINK_FAILURE:
+               case SCU_EVENT_HARD_RESET_RECEIVED:
+                       /* Start the oob/sn state machine over again */
+                       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+                       break;
+               default:
+                       dev_warn(sciphy_to_dev(iphy),
+                                "%s: PHY starting substate machine received "
+                                "unexpected event_code %x\n",
+                                __func__, event_code);
+                       return SCI_FAILURE;
+               }
+               return SCI_SUCCESS;
+       case SCI_PHY_SUB_AWAIT_SAS_POWER:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_LINK_FAILURE:
+                       /* Link failure change state back to the starting state */
+                       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+                       break;
+               default:
+                       dev_warn(sciphy_to_dev(iphy),
+                               "%s: PHY starting substate machine received unexpected "
+                               "event_code %x\n",
+                               __func__,
+                               event_code);
+                       return SCI_FAILURE;
+               }
+               return SCI_SUCCESS;
+       case SCI_PHY_SUB_AWAIT_SATA_POWER:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_LINK_FAILURE:
+                       /* Link failure change state back to the starting state */
+                       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+                       break;
+               case SCU_EVENT_SATA_SPINUP_HOLD:
+                       /* These events are received every 10ms and are
+                        * expected while in this state
+                        */
+                       break;
+
+               case SCU_EVENT_SAS_PHY_DETECTED:
+                       /* There has been a change in the phy type before OOB/SN for the
+                        * SATA finished start down the SAS link traning path.
+                        */
+                       sci_phy_start_sas_link_training(iphy);
+                       break;
+
+               default:
+                       dev_warn(sciphy_to_dev(iphy),
+                                "%s: PHY starting substate machine received "
+                                "unexpected event_code %x\n",
+                                __func__, event_code);
+
+                       return SCI_FAILURE;
+               }
+               return SCI_SUCCESS;
+       case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_LINK_FAILURE:
+                       /* Link failure change state back to the starting state */
+                       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+                       break;
+               case SCU_EVENT_SATA_SPINUP_HOLD:
+                       /* These events might be received since we dont know how many may be in
+                        * the completion queue while waiting for power
+                        */
+                       break;
+               case SCU_EVENT_SATA_PHY_DETECTED:
+                       iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
+
+                       /* We have received the SATA PHY notification change state */
+                       sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
+                       break;
+               case SCU_EVENT_SAS_PHY_DETECTED:
+                       /* There has been a change in the phy type before OOB/SN for the
+                        * SATA finished start down the SAS link traning path.
+                        */
+                       sci_phy_start_sas_link_training(iphy);
+                       break;
+               default:
+                       dev_warn(sciphy_to_dev(iphy),
+                                "%s: PHY starting substate machine received "
+                                "unexpected event_code %x\n",
+                                __func__,
+                                event_code);
+
+                       return SCI_FAILURE;;
+               }
+               return SCI_SUCCESS;
+       case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_SATA_PHY_DETECTED:
+                       /*
+                        * The hardware reports multiple SATA PHY detected events
+                        * ignore the extras */
+                       break;
+               case SCU_EVENT_SATA_15:
+               case SCU_EVENT_SATA_15_SSC:
+                       sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
+                                                      SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+                       break;
+               case SCU_EVENT_SATA_30:
+               case SCU_EVENT_SATA_30_SSC:
+                       sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
+                                                      SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+                       break;
+               case SCU_EVENT_SATA_60:
+               case SCU_EVENT_SATA_60_SSC:
+                       sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
+                                                      SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+                       break;
+               case SCU_EVENT_LINK_FAILURE:
+                       /* Link failure change state back to the starting state */
+                       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+                       break;
+               case SCU_EVENT_SAS_PHY_DETECTED:
+                       /*
+                        * There has been a change in the phy type before OOB/SN for the
+                        * SATA finished start down the SAS link traning path. */
+                       sci_phy_start_sas_link_training(iphy);
+                       break;
+               default:
+                       dev_warn(sciphy_to_dev(iphy),
+                                "%s: PHY starting substate machine received "
+                                "unexpected event_code %x\n",
+                                __func__, event_code);
+
+                       return SCI_FAILURE;
+               }
+
+               return SCI_SUCCESS;
+       case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_SATA_PHY_DETECTED:
+                       /* Backup the state machine */
+                       sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
+                       break;
+
+               case SCU_EVENT_LINK_FAILURE:
+                       /* Link failure change state back to the starting state */
+                       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+                       break;
+
+               default:
+                       dev_warn(sciphy_to_dev(iphy),
+                                "%s: PHY starting substate machine received "
+                                "unexpected event_code %x\n",
+                                __func__,
+                                event_code);
+
+                       return SCI_FAILURE;
+               }
+               return SCI_SUCCESS;
+       case SCI_PHY_READY:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_LINK_FAILURE:
+                       /* Link failure change state back to the starting state */
+                       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+                       break;
+               case SCU_EVENT_BROADCAST_CHANGE:
+                       /* Broadcast change received. Notify the port. */
+                       if (phy_get_non_dummy_port(iphy) != NULL)
+                               sci_port_broadcast_change_received(iphy->owning_port, iphy);
+                       else
+                               iphy->bcn_received_while_port_unassigned = true;
+                       break;
+               default:
+                       dev_warn(sciphy_to_dev(iphy),
+                                "%sP SCIC PHY 0x%p ready state machine received "
+                                "unexpected event_code %x\n",
+                                __func__, iphy, event_code);
+                       return SCI_FAILURE_INVALID_STATE;
+               }
+               return SCI_SUCCESS;
+       case SCI_PHY_RESETTING:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_HARD_RESET_TRANSMITTED:
+                       /* Link failure change state back to the starting state */
+                       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+                       break;
+               default:
+                       dev_warn(sciphy_to_dev(iphy),
+                                "%s: SCIC PHY 0x%p resetting state machine received "
+                                "unexpected event_code %x\n",
+                                __func__, iphy, event_code);
+
+                       return SCI_FAILURE_INVALID_STATE;
+                       break;
+               }
+               return SCI_SUCCESS;
+       default:
+               dev_dbg(sciphy_to_dev(iphy),
+                       "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index)
+{
+       enum sci_phy_states state = iphy->sm.current_state_id;
+       struct isci_host *ihost = iphy->owning_port->owning_controller;
+       enum sci_status result;
+       unsigned long flags;
+
+       switch (state) {
+       case SCI_PHY_SUB_AWAIT_IAF_UF: {
+               u32 *frame_words;
+               struct sas_identify_frame iaf;
+
+               result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                                 frame_index,
+                                                                 (void **)&frame_words);
+
+               if (result != SCI_SUCCESS)
+                       return result;
+
+               sci_swab32_cpy(&iaf, frame_words, sizeof(iaf) / sizeof(u32));
+               if (iaf.frame_type == 0) {
+                       u32 state;
+
+                       spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+                       memcpy(&iphy->frame_rcvd.iaf, &iaf, sizeof(iaf));
+                       spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+                       if (iaf.smp_tport) {
+                               /* We got the IAF for an expander PHY go to the final
+                                * state since there are no power requirements for
+                                * expander phys.
+                                */
+                               state = SCI_PHY_SUB_FINAL;
+                       } else {
+                               /* We got the IAF we can now go to the await spinup
+                                * semaphore state
+                                */
+                               state = SCI_PHY_SUB_AWAIT_SAS_POWER;
+                       }
+                       sci_change_state(&iphy->sm, state);
+                       result = SCI_SUCCESS;
+               } else
+                       dev_warn(sciphy_to_dev(iphy),
+                               "%s: PHY starting substate machine received "
+                               "unexpected frame id %x\n",
+                               __func__, frame_index);
+
+               sci_controller_release_frame(ihost, frame_index);
+               return result;
+       }
+       case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: {
+               struct dev_to_host_fis *frame_header;
+               u32 *fis_frame_data;
+
+               result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                                 frame_index,
+                                                                 (void **)&frame_header);
+
+               if (result != SCI_SUCCESS)
+                       return result;
+
+               if ((frame_header->fis_type == FIS_REGD2H) &&
+                   !(frame_header->status & ATA_BUSY)) {
+                       sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+                                                                frame_index,
+                                                                (void **)&fis_frame_data);
+
+                       spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+                       sci_controller_copy_sata_response(&iphy->frame_rcvd.fis,
+                                                         frame_header,
+                                                         fis_frame_data);
+                       spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+                       /* got IAF we can now go to the await spinup semaphore state */
+                       sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
+
+                       result = SCI_SUCCESS;
+               } else
+                       dev_warn(sciphy_to_dev(iphy),
+                                "%s: PHY starting substate machine received "
+                                "unexpected frame id %x\n",
+                                __func__, frame_index);
+
+               /* Regardless of the result we are done with this frame with it */
+               sci_controller_release_frame(ihost, frame_index);
+
+               return result;
+       }
+       default:
+               dev_dbg(sciphy_to_dev(iphy),
+                       "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+}
+
+static void sci_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+       /* This is just an temporary state go off to the starting state */
+       sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN);
+}
+
+static void sci_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+       struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+       sci_controller_power_control_queue_insert(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+       struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+       sci_controller_power_control_queue_remove(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+       struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+       sci_controller_power_control_queue_insert(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+       struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+       sci_controller_power_control_queue_remove(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+       sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
+}
+
+static void sci_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+       sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+       sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
+}
+
+static void sci_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+       sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+       if (sci_port_link_detected(iphy->owning_port, iphy)) {
+
+               /*
+                * Clear the PE suspend condition so we can actually
+                * receive SIG FIS
+                * The hardware will not respond to the XRDY until the PE
+                * suspend condition is cleared.
+                */
+               sci_phy_resume(iphy);
+
+               sci_mod_timer(&iphy->sata_timer,
+                             SCIC_SDS_SIGNATURE_FIS_TIMEOUT);
+       } else
+               iphy->is_in_link_training = false;
+}
+
+static void sci_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+       sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+       /* State machine has run to completion so exit out and change
+        * the base state machine to the ready state
+        */
+       sci_change_state(&iphy->sm, SCI_PHY_READY);
+}
+
+/**
+ *
+ * @sci_phy: This is the struct isci_phy object to stop.
+ *
+ * This method will stop the struct isci_phy object. This does not reset the
+ * protocol engine it just suspends it and places it in a state where it will
+ * not cause the end device to power up. none
+ */
+static void scu_link_layer_stop_protocol_engine(
+       struct isci_phy *iphy)
+{
+       u32 scu_sas_pcfg_value;
+       u32 enable_spinup_value;
+
+       /* Suspend the protocol engine and place it in a sata spinup hold state */
+       scu_sas_pcfg_value =
+               readl(&iphy->link_layer_registers->phy_configuration);
+       scu_sas_pcfg_value |=
+               (SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+                SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE) |
+                SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD));
+       writel(scu_sas_pcfg_value,
+              &iphy->link_layer_registers->phy_configuration);
+
+       /* Disable the notify enable spinup primitives */
+       enable_spinup_value = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
+       enable_spinup_value &= ~SCU_ENSPINUP_GEN_BIT(ENABLE);
+       writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control);
+}
+
+/**
+ *
+ *
+ * This method will start the OOB/SN state machine for this struct isci_phy object.
+ */
+static void scu_link_layer_start_oob(
+       struct isci_phy *iphy)
+{
+       u32 scu_sas_pcfg_value;
+
+       scu_sas_pcfg_value =
+               readl(&iphy->link_layer_registers->phy_configuration);
+       scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+       scu_sas_pcfg_value &=
+               ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+               SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
+       writel(scu_sas_pcfg_value,
+              &iphy->link_layer_registers->phy_configuration);
+}
+
+/**
+ *
+ *
+ * This method will transmit a hard reset request on the specified phy. The SCU
+ * hardware requires that we reset the OOB state machine and set the hard reset
+ * bit in the phy configuration register. We then must start OOB over with the
+ * hard reset bit set.
+ */
+static void scu_link_layer_tx_hard_reset(
+       struct isci_phy *iphy)
+{
+       u32 phy_configuration_value;
+
+       /*
+        * SAS Phys must wait for the HARD_RESET_TX event notification to transition
+        * to the starting state. */
+       phy_configuration_value =
+               readl(&iphy->link_layer_registers->phy_configuration);
+       phy_configuration_value |=
+               (SCU_SAS_PCFG_GEN_BIT(HARD_RESET) |
+                SCU_SAS_PCFG_GEN_BIT(OOB_RESET));
+       writel(phy_configuration_value,
+              &iphy->link_layer_registers->phy_configuration);
+
+       /* Now take the OOB state machine out of reset */
+       phy_configuration_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+       phy_configuration_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+       writel(phy_configuration_value,
+              &iphy->link_layer_registers->phy_configuration);
+}
+
+static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+       struct isci_port *iport = iphy->owning_port;
+       struct isci_host *ihost = iport->owning_controller;
+
+       /*
+        * @todo We need to get to the controller to place this PE in a
+        * reset state
+        */
+       sci_del_timer(&iphy->sata_timer);
+
+       scu_link_layer_stop_protocol_engine(iphy);
+
+       if (iphy->sm.previous_state_id != SCI_PHY_INITIAL)
+               sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
+}
+
+static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+       struct isci_port *iport = iphy->owning_port;
+       struct isci_host *ihost = iport->owning_controller;
+
+       scu_link_layer_stop_protocol_engine(iphy);
+       scu_link_layer_start_oob(iphy);
+
+       /* We don't know what kind of phy we are going to be just yet */
+       iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
+       iphy->bcn_received_while_port_unassigned = false;
+
+       if (iphy->sm.previous_state_id == SCI_PHY_READY)
+               sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
+
+       sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL);
+}
+
+static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+       struct isci_port *iport = iphy->owning_port;
+       struct isci_host *ihost = iport->owning_controller;
+
+       sci_controller_link_up(ihost, phy_get_non_dummy_port(iphy), iphy);
+}
+
+static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+       sci_phy_suspend(iphy);
+}
+
+static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+       /* The phy is being reset, therefore deactivate it from the port.  In
+        * the resetting state we don't notify the user regarding link up and
+        * link down notifications
+        */
+       sci_port_deactivate_phy(iphy->owning_port, iphy, false);
+
+       if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+               scu_link_layer_tx_hard_reset(iphy);
+       } else {
+               /* The SCU does not need to have a discrete reset state so
+                * just go back to the starting state.
+                */
+               sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+       }
+}
+
+static const struct sci_base_state sci_phy_state_table[] = {
+       [SCI_PHY_INITIAL] = { },
+       [SCI_PHY_STOPPED] = {
+               .enter_state = sci_phy_stopped_state_enter,
+       },
+       [SCI_PHY_STARTING] = {
+               .enter_state = sci_phy_starting_state_enter,
+       },
+       [SCI_PHY_SUB_INITIAL] = {
+               .enter_state = sci_phy_starting_initial_substate_enter,
+       },
+       [SCI_PHY_SUB_AWAIT_OSSP_EN] = { },
+       [SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { },
+       [SCI_PHY_SUB_AWAIT_IAF_UF] = { },
+       [SCI_PHY_SUB_AWAIT_SAS_POWER] = {
+               .enter_state = sci_phy_starting_await_sas_power_substate_enter,
+               .exit_state  = sci_phy_starting_await_sas_power_substate_exit,
+       },
+       [SCI_PHY_SUB_AWAIT_SATA_POWER] = {
+               .enter_state = sci_phy_starting_await_sata_power_substate_enter,
+               .exit_state  = sci_phy_starting_await_sata_power_substate_exit
+       },
+       [SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = {
+               .enter_state = sci_phy_starting_await_sata_phy_substate_enter,
+               .exit_state  = sci_phy_starting_await_sata_phy_substate_exit
+       },
+       [SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = {
+               .enter_state = sci_phy_starting_await_sata_speed_substate_enter,
+               .exit_state  = sci_phy_starting_await_sata_speed_substate_exit
+       },
+       [SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = {
+               .enter_state = sci_phy_starting_await_sig_fis_uf_substate_enter,
+               .exit_state  = sci_phy_starting_await_sig_fis_uf_substate_exit
+       },
+       [SCI_PHY_SUB_FINAL] = {
+               .enter_state = sci_phy_starting_final_substate_enter,
+       },
+       [SCI_PHY_READY] = {
+               .enter_state = sci_phy_ready_state_enter,
+               .exit_state = sci_phy_ready_state_exit,
+       },
+       [SCI_PHY_RESETTING] = {
+               .enter_state = sci_phy_resetting_state_enter,
+       },
+       [SCI_PHY_FINAL] = { },
+};
+
+void sci_phy_construct(struct isci_phy *iphy,
+                           struct isci_port *iport, u8 phy_index)
+{
+       sci_init_sm(&iphy->sm, sci_phy_state_table, SCI_PHY_INITIAL);
+
+       /* Copy the rest of the input data to our locals */
+       iphy->owning_port = iport;
+       iphy->phy_index = phy_index;
+       iphy->bcn_received_while_port_unassigned = false;
+       iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
+       iphy->link_layer_registers = NULL;
+       iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
+
+       /* Create the SIGNATURE FIS Timeout timer for this phy */
+       sci_init_timer(&iphy->sata_timer, phy_sata_timeout);
+}
+
+void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
+{
+       struct sci_oem_params *oem = &ihost->oem_parameters;
+       u64 sci_sas_addr;
+       __be64 sas_addr;
+
+       sci_sas_addr = oem->phys[index].sas_address.high;
+       sci_sas_addr <<= 32;
+       sci_sas_addr |= oem->phys[index].sas_address.low;
+       sas_addr = cpu_to_be64(sci_sas_addr);
+       memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr));
+
+       iphy->isci_port = NULL;
+       iphy->sas_phy.enabled = 0;
+       iphy->sas_phy.id = index;
+       iphy->sas_phy.sas_addr = &iphy->sas_addr[0];
+       iphy->sas_phy.frame_rcvd = (u8 *)&iphy->frame_rcvd;
+       iphy->sas_phy.ha = &ihost->sas_ha;
+       iphy->sas_phy.lldd_phy = iphy;
+       iphy->sas_phy.enabled = 1;
+       iphy->sas_phy.class = SAS;
+       iphy->sas_phy.iproto = SAS_PROTOCOL_ALL;
+       iphy->sas_phy.tproto = 0;
+       iphy->sas_phy.type = PHY_TYPE_PHYSICAL;
+       iphy->sas_phy.role = PHY_ROLE_INITIATOR;
+       iphy->sas_phy.oob_mode = OOB_NOT_CONNECTED;
+       iphy->sas_phy.linkrate = SAS_LINK_RATE_UNKNOWN;
+       memset(&iphy->frame_rcvd, 0, sizeof(iphy->frame_rcvd));
+}
+
+
+/**
+ * isci_phy_control() - This function is one of the SAS Domain Template
+ *    functions. This is a phy management function.
+ * @phy: This parameter specifies the sphy being controlled.
+ * @func: This parameter specifies the phy control function being invoked.
+ * @buf: This parameter is specific to the phy function being invoked.
+ *
+ * status, zero indicates success.
+ */
+int isci_phy_control(struct asd_sas_phy *sas_phy,
+                    enum phy_func func,
+                    void *buf)
+{
+       int ret = 0;
+       struct isci_phy *iphy = sas_phy->lldd_phy;
+       struct isci_port *iport = iphy->isci_port;
+       struct isci_host *ihost = sas_phy->ha->lldd_ha;
+       unsigned long flags;
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: phy %p; func %d; buf %p; isci phy %p, port %p\n",
+               __func__, sas_phy, func, buf, iphy, iport);
+
+       switch (func) {
+       case PHY_FUNC_DISABLE:
+               spin_lock_irqsave(&ihost->scic_lock, flags);
+               sci_phy_stop(iphy);
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+               break;
+
+       case PHY_FUNC_LINK_RESET:
+               spin_lock_irqsave(&ihost->scic_lock, flags);
+               sci_phy_stop(iphy);
+               sci_phy_start(iphy);
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+               break;
+
+       case PHY_FUNC_HARD_RESET:
+               if (!iport)
+                       return -ENODEV;
+
+               /* Perform the port reset. */
+               ret = isci_port_perform_hard_reset(ihost, iport, iphy);
+
+               break;
+
+       default:
+               dev_dbg(&ihost->pdev->dev,
+                          "%s: phy %p; func %d NOT IMPLEMENTED!\n",
+                          __func__, sas_phy, func);
+               ret = -ENOSYS;
+               break;
+       }
+       return ret;
+}
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
new file mode 100644 (file)
index 0000000..67699c8
--- /dev/null
@@ -0,0 +1,504 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_PHY_H_
+#define _ISCI_PHY_H_
+
+#include <scsi/sas.h>
+#include <scsi/libsas.h>
+#include "isci.h"
+#include "sas.h"
+
+/* This is the timeout value for the SATA phy to wait for a SIGNATURE FIS
+ * before restarting the starting state machine.  Technically, the old parallel
+ * ATA specification required up to 30 seconds for a device to issue its
+ * signature FIS as a result of a soft reset.  Now we see that devices respond
+ * generally within 15 seconds, but we'll use 25 for now.
+ */
+#define SCIC_SDS_SIGNATURE_FIS_TIMEOUT    25000
+
+/* This is the timeout for the SATA OOB/SN because the hardware does not
+ * recognize a hot plug after OOB signal but before the SN signals.  We need to
+ * make sure after a hotplug timeout if we have not received the speed event
+ * notification from the hardware that we restart the hardware OOB state
+ * machine.
+ */
+#define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT  250
+
+enum sci_phy_protocol {
+       SCIC_SDS_PHY_PROTOCOL_UNKNOWN,
+       SCIC_SDS_PHY_PROTOCOL_SAS,
+       SCIC_SDS_PHY_PROTOCOL_SATA,
+       SCIC_SDS_MAX_PHY_PROTOCOLS
+};
+
+/**
+ * isci_phy - hba local phy infrastructure
+ * @sm:
+ * @protocol: attached device protocol
+ * @phy_index: physical index relative to the controller (0-3)
+ * @bcn_received_while_port_unassigned: bcn to report after port association
+ * @sata_timer: timeout SATA signature FIS arrival
+ */
+struct isci_phy {
+       struct sci_base_state_machine sm;
+       struct isci_port *owning_port;
+       enum sas_linkrate max_negotiated_speed;
+       enum sci_phy_protocol protocol;
+       u8 phy_index;
+       bool bcn_received_while_port_unassigned;
+       bool is_in_link_training;
+       struct sci_timer sata_timer;
+       struct scu_transport_layer_registers __iomem *transport_layer_registers;
+       struct scu_link_layer_registers __iomem *link_layer_registers;
+       struct asd_sas_phy sas_phy;
+       struct isci_port *isci_port;
+       u8 sas_addr[SAS_ADDR_SIZE];
+       union {
+               struct sas_identify_frame iaf;
+               struct dev_to_host_fis fis;
+       } frame_rcvd;
+};
+
+static inline struct isci_phy *to_iphy(struct asd_sas_phy *sas_phy)
+{
+       struct isci_phy *iphy = container_of(sas_phy, typeof(*iphy), sas_phy);
+
+       return iphy;
+}
+
+struct sci_phy_cap {
+       union {
+               struct {
+                       /*
+                        * The SAS specification indicates the start bit shall
+                        * always be set to
+                        * 1.  This implementation will have the start bit set
+                        * to 0 if the PHY CAPABILITIES were either not
+                        * received or speed negotiation failed.
+                        */
+                       u8 start:1;
+                       u8 tx_ssc_type:1;
+                       u8 res1:2;
+                       u8 req_logical_linkrate:4;
+
+                       u32 gen1_no_ssc:1;
+                       u32 gen1_ssc:1;
+                       u32 gen2_no_ssc:1;
+                       u32 gen2_ssc:1;
+                       u32 gen3_no_ssc:1;
+                       u32 gen3_ssc:1;
+                       u32 res2:17;
+                       u32 parity:1;
+               };
+               u32 all;
+       };
+}  __packed;
+
+/* this data structure reflects the link layer transmit identification reg */
+struct sci_phy_proto {
+       union {
+               struct {
+                       u16 _r_a:1;
+                       u16 smp_iport:1;
+                       u16 stp_iport:1;
+                       u16 ssp_iport:1;
+                       u16 _r_b:4;
+                       u16 _r_c:1;
+                       u16 smp_tport:1;
+                       u16 stp_tport:1;
+                       u16 ssp_tport:1;
+                       u16 _r_d:4;
+               };
+               u16 all;
+       };
+} __packed;
+
+
+/**
+ * struct sci_phy_properties - This structure defines the properties common to
+ *    all phys that can be retrieved.
+ *
+ *
+ */
+struct sci_phy_properties {
+       /**
+        * This field specifies the port that currently contains the
+        * supplied phy.  This field may be set to NULL
+        * if the phy is not currently contained in a port.
+        */
+       struct isci_port *iport;
+
+       /**
+        * This field specifies the link rate at which the phy is
+        * currently operating.
+        */
+       enum sas_linkrate negotiated_link_rate;
+
+       /**
+        * This field specifies the index of the phy in relation to other
+        * phys within the controller.  This index is zero relative.
+        */
+       u8 index;
+};
+
+/**
+ * struct sci_sas_phy_properties - This structure defines the properties,
+ *    specific to a SAS phy, that can be retrieved.
+ *
+ *
+ */
+struct sci_sas_phy_properties {
+       /**
+        * This field delineates the Identify Address Frame received
+        * from the remote end point.
+        */
+       struct sas_identify_frame rcvd_iaf;
+
+       /**
+        * This field delineates the Phy capabilities structure received
+        * from the remote end point.
+        */
+       struct sci_phy_cap rcvd_cap;
+
+};
+
+/**
+ * struct sci_sata_phy_properties - This structure defines the properties,
+ *    specific to a SATA phy, that can be retrieved.
+ *
+ *
+ */
+struct sci_sata_phy_properties {
+       /**
+        * This field delineates the signature FIS received from the
+        * attached target.
+        */
+       struct dev_to_host_fis signature_fis;
+
+       /**
+        * This field specifies to the user if a port selector is connected
+        * on the specified phy.
+        */
+       bool is_port_selector_present;
+
+};
+
+/**
+ * enum sci_phy_counter_id - This enumeration depicts the various pieces of
+ *    optional information that can be retrieved for a specific phy.
+ *
+ *
+ */
+enum sci_phy_counter_id {
+       /**
+        * This PHY information field tracks the number of frames received.
+        */
+       SCIC_PHY_COUNTER_RECEIVED_FRAME,
+
+       /**
+        * This PHY information field tracks the number of frames transmitted.
+        */
+       SCIC_PHY_COUNTER_TRANSMITTED_FRAME,
+
+       /**
+        * This PHY information field tracks the number of DWORDs received.
+        */
+       SCIC_PHY_COUNTER_RECEIVED_FRAME_WORD,
+
+       /**
+        * This PHY information field tracks the number of DWORDs transmitted.
+        */
+       SCIC_PHY_COUNTER_TRANSMITTED_FRAME_DWORD,
+
+       /**
+        * This PHY information field tracks the number of times DWORD
+        * synchronization was lost.
+        */
+       SCIC_PHY_COUNTER_LOSS_OF_SYNC_ERROR,
+
+       /**
+        * This PHY information field tracks the number of received DWORDs with
+        * running disparity errors.
+        */
+       SCIC_PHY_COUNTER_RECEIVED_DISPARITY_ERROR,
+
+       /**
+        * This PHY information field tracks the number of received frames with a
+        * CRC error (not including short or truncated frames).
+        */
+       SCIC_PHY_COUNTER_RECEIVED_FRAME_CRC_ERROR,
+
+       /**
+        * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT)
+        * primitives received.
+        */
+       SCIC_PHY_COUNTER_RECEIVED_DONE_ACK_NAK_TIMEOUT,
+
+       /**
+        * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT)
+        * primitives transmitted.
+        */
+       SCIC_PHY_COUNTER_TRANSMITTED_DONE_ACK_NAK_TIMEOUT,
+
+       /**
+        * This PHY information field tracks the number of times the inactivity
+        * timer for connections on the phy has been utilized.
+        */
+       SCIC_PHY_COUNTER_INACTIVITY_TIMER_EXPIRED,
+
+       /**
+        * This PHY information field tracks the number of DONE (CREDIT TIMEOUT)
+        * primitives received.
+        */
+       SCIC_PHY_COUNTER_RECEIVED_DONE_CREDIT_TIMEOUT,
+
+       /**
+        * This PHY information field tracks the number of DONE (CREDIT TIMEOUT)
+        * primitives transmitted.
+        */
+       SCIC_PHY_COUNTER_TRANSMITTED_DONE_CREDIT_TIMEOUT,
+
+       /**
+        * This PHY information field tracks the number of CREDIT BLOCKED
+        * primitives received.
+        * @note Depending on remote device implementation, credit blocks
+        *       may occur regularly.
+        */
+       SCIC_PHY_COUNTER_RECEIVED_CREDIT_BLOCKED,
+
+       /**
+        * This PHY information field contains the number of short frames
+        * received.  A short frame is simply a frame smaller then what is
+        * allowed by either the SAS or SATA specification.
+        */
+       SCIC_PHY_COUNTER_RECEIVED_SHORT_FRAME,
+
+       /**
+        * This PHY information field contains the number of frames received after
+        * credit has been exhausted.
+        */
+       SCIC_PHY_COUNTER_RECEIVED_FRAME_WITHOUT_CREDIT,
+
+       /**
+        * This PHY information field contains the number of frames received after
+        * a DONE has been received.
+        */
+       SCIC_PHY_COUNTER_RECEIVED_FRAME_AFTER_DONE,
+
+       /**
+        * This PHY information field contains the number of times the phy
+        * failed to achieve DWORD synchronization during speed negotiation.
+        */
+       SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR
+};
+
+enum sci_phy_states {
+       /**
+        * Simply the initial state for the base domain state machine.
+        */
+       SCI_PHY_INITIAL,
+
+       /**
+        * This state indicates that the phy has successfully been stopped.
+        * In this state no new IO operations are permitted on this phy.
+        * This state is entered from the INITIAL state.
+        * This state is entered from the STARTING state.
+        * This state is entered from the READY state.
+        * This state is entered from the RESETTING state.
+        */
+       SCI_PHY_STOPPED,
+
+       /**
+        * This state indicates that the phy is in the process of becomming
+        * ready.  In this state no new IO operations are permitted on this phy.
+        * This state is entered from the STOPPED state.
+        * This state is entered from the READY state.
+        * This state is entered from the RESETTING state.
+        */
+       SCI_PHY_STARTING,
+
+       /**
+        * Initial state
+        */
+       SCI_PHY_SUB_INITIAL,
+
+       /**
+        * Wait state for the hardware OSSP event type notification
+        */
+       SCI_PHY_SUB_AWAIT_OSSP_EN,
+
+       /**
+        * Wait state for the PHY speed notification
+        */
+       SCI_PHY_SUB_AWAIT_SAS_SPEED_EN,
+
+       /**
+        * Wait state for the IAF Unsolicited frame notification
+        */
+       SCI_PHY_SUB_AWAIT_IAF_UF,
+
+       /**
+        * Wait state for the request to consume power
+        */
+       SCI_PHY_SUB_AWAIT_SAS_POWER,
+
+       /**
+        * Wait state for request to consume power
+        */
+       SCI_PHY_SUB_AWAIT_SATA_POWER,
+
+       /**
+        * Wait state for the SATA PHY notification
+        */
+       SCI_PHY_SUB_AWAIT_SATA_PHY_EN,
+
+       /**
+        * Wait for the SATA PHY speed notification
+        */
+       SCI_PHY_SUB_AWAIT_SATA_SPEED_EN,
+
+       /**
+        * Wait state for the SIGNATURE FIS unsolicited frame notification
+        */
+       SCI_PHY_SUB_AWAIT_SIG_FIS_UF,
+
+       /**
+        * Exit state for this state machine
+        */
+       SCI_PHY_SUB_FINAL,
+
+       /**
+        * This state indicates the the phy is now ready.  Thus, the user
+        * is able to perform IO operations utilizing this phy as long as it
+        * is currently part of a valid port.
+        * This state is entered from the STARTING state.
+        */
+       SCI_PHY_READY,
+
+       /**
+        * This state indicates that the phy is in the process of being reset.
+        * In this state no new IO operations are permitted on this phy.
+        * This state is entered from the READY state.
+        */
+       SCI_PHY_RESETTING,
+
+       /**
+        * Simply the final state for the base phy state machine.
+        */
+       SCI_PHY_FINAL,
+};
+
+void sci_phy_construct(
+       struct isci_phy *iphy,
+       struct isci_port *iport,
+       u8 phy_index);
+
+struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy);
+
+void sci_phy_set_port(
+       struct isci_phy *iphy,
+       struct isci_port *iport);
+
+enum sci_status sci_phy_initialize(
+       struct isci_phy *iphy,
+       struct scu_transport_layer_registers __iomem *transport_layer_registers,
+       struct scu_link_layer_registers __iomem *link_layer_registers);
+
+enum sci_status sci_phy_start(
+       struct isci_phy *iphy);
+
+enum sci_status sci_phy_stop(
+       struct isci_phy *iphy);
+
+enum sci_status sci_phy_reset(
+       struct isci_phy *iphy);
+
+void sci_phy_resume(
+       struct isci_phy *iphy);
+
+void sci_phy_setup_transport(
+       struct isci_phy *iphy,
+       u32 device_id);
+
+enum sci_status sci_phy_event_handler(
+       struct isci_phy *iphy,
+       u32 event_code);
+
+enum sci_status sci_phy_frame_handler(
+       struct isci_phy *iphy,
+       u32 frame_index);
+
+enum sci_status sci_phy_consume_power_handler(
+       struct isci_phy *iphy);
+
+void sci_phy_get_sas_address(
+       struct isci_phy *iphy,
+       struct sci_sas_address *sas_address);
+
+void sci_phy_get_attached_sas_address(
+       struct isci_phy *iphy,
+       struct sci_sas_address *sas_address);
+
+struct sci_phy_proto;
+void sci_phy_get_protocols(
+       struct isci_phy *iphy,
+       struct sci_phy_proto *protocols);
+enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy);
+
+struct isci_host;
+void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index);
+int isci_phy_control(struct asd_sas_phy *phy, enum phy_func func, void *buf);
+
+#endif /* !defined(_ISCI_PHY_H_) */
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
new file mode 100644 (file)
index 0000000..8f6f9b7
--- /dev/null
@@ -0,0 +1,1757 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "isci.h"
+#include "port.h"
+#include "request.h"
+
+#define SCIC_SDS_PORT_HARD_RESET_TIMEOUT  (1000)
+#define SCU_DUMMY_INDEX    (0xFFFF)
+
+static void isci_port_change_state(struct isci_port *iport, enum isci_status status)
+{
+       unsigned long flags;
+
+       dev_dbg(&iport->isci_host->pdev->dev,
+               "%s: iport = %p, state = 0x%x\n",
+               __func__, iport, status);
+
+       /* XXX pointless lock */
+       spin_lock_irqsave(&iport->state_lock, flags);
+       iport->status = status;
+       spin_unlock_irqrestore(&iport->state_lock, flags);
+}
+
+static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
+{
+       u8 index;
+
+       proto->all = 0;
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               struct isci_phy *iphy = iport->phy_table[index];
+
+               if (!iphy)
+                       continue;
+               sci_phy_get_protocols(iphy, proto);
+       }
+}
+
+static u32 sci_port_get_phys(struct isci_port *iport)
+{
+       u32 index;
+       u32 mask;
+
+       mask = 0;
+       for (index = 0; index < SCI_MAX_PHYS; index++)
+               if (iport->phy_table[index])
+                       mask |= (1 << index);
+
+       return mask;
+}
+
+/**
+ * sci_port_get_properties() - This method simply returns the properties
+ *    regarding the port, such as: physical index, protocols, sas address, etc.
+ * @port: this parameter specifies the port for which to retrieve the physical
+ *    index.
+ * @properties: This parameter specifies the properties structure into which to
+ *    copy the requested information.
+ *
+ * Indicate if the user specified a valid port. SCI_SUCCESS This value is
+ * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This
+ * value is returned if the specified port is not valid.  When this value is
+ * returned, no data is copied to the properties output parameter.
+ */
+static enum sci_status sci_port_get_properties(struct isci_port *iport,
+                                               struct sci_port_properties *prop)
+{
+       if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
+               return SCI_FAILURE_INVALID_PORT;
+
+       prop->index = iport->logical_port_index;
+       prop->phy_mask = sci_port_get_phys(iport);
+       sci_port_get_sas_address(iport, &prop->local.sas_address);
+       sci_port_get_protocols(iport, &prop->local.protocols);
+       sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
+
+       return SCI_SUCCESS;
+}
+
+static void sci_port_bcn_enable(struct isci_port *iport)
+{
+       struct isci_phy *iphy;
+       u32 val;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
+               iphy = iport->phy_table[i];
+               if (!iphy)
+                       continue;
+               val = readl(&iphy->link_layer_registers->link_layer_control);
+               /* clear the bit by writing 1. */
+               writel(val, &iphy->link_layer_registers->link_layer_control);
+       }
+}
+
+/* called under sci_lock to stabilize phy:port associations */
+void isci_port_bcn_enable(struct isci_host *ihost, struct isci_port *iport)
+{
+       int i;
+
+       clear_bit(IPORT_BCN_BLOCKED, &iport->flags);
+       wake_up(&ihost->eventq);
+
+       if (!test_and_clear_bit(IPORT_BCN_PENDING, &iport->flags))
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
+               struct isci_phy *iphy = iport->phy_table[i];
+
+               if (!iphy)
+                       continue;
+
+               ihost->sas_ha.notify_port_event(&iphy->sas_phy,
+                                               PORTE_BROADCAST_RCVD);
+               break;
+       }
+}
+
+static void isci_port_bc_change_received(struct isci_host *ihost,
+                                        struct isci_port *iport,
+                                        struct isci_phy *iphy)
+{
+       if (iport && test_bit(IPORT_BCN_BLOCKED, &iport->flags)) {
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: disabled BCN; isci_phy = %p, sas_phy = %p\n",
+                       __func__, iphy, &iphy->sas_phy);
+               set_bit(IPORT_BCN_PENDING, &iport->flags);
+               atomic_inc(&iport->event);
+               wake_up(&ihost->eventq);
+       } else {
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: isci_phy = %p, sas_phy = %p\n",
+                       __func__, iphy, &iphy->sas_phy);
+
+               ihost->sas_ha.notify_port_event(&iphy->sas_phy,
+                                               PORTE_BROADCAST_RCVD);
+       }
+       sci_port_bcn_enable(iport);
+}
+
+static void isci_port_link_up(struct isci_host *isci_host,
+                             struct isci_port *iport,
+                             struct isci_phy *iphy)
+{
+       unsigned long flags;
+       struct sci_port_properties properties;
+       unsigned long success = true;
+
+       BUG_ON(iphy->isci_port != NULL);
+
+       iphy->isci_port = iport;
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_port = %p\n",
+               __func__, iport);
+
+       spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+       isci_port_change_state(iphy->isci_port, isci_starting);
+
+       sci_port_get_properties(iport, &properties);
+
+       if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
+               u64 attached_sas_address;
+
+               iphy->sas_phy.oob_mode = SATA_OOB_MODE;
+               iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis);
+
+               /*
+                * For direct-attached SATA devices, the SCI core will
+                * automagically assign a SAS address to the end device
+                * for the purpose of creating a port. This SAS address
+                * will not be the same as assigned to the PHY and needs
+                * to be obtained from struct sci_port_properties properties.
+                */
+               attached_sas_address = properties.remote.sas_address.high;
+               attached_sas_address <<= 32;
+               attached_sas_address |= properties.remote.sas_address.low;
+               swab64s(&attached_sas_address);
+
+               memcpy(&iphy->sas_phy.attached_sas_addr,
+                      &attached_sas_address, sizeof(attached_sas_address));
+       } else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+               iphy->sas_phy.oob_mode = SAS_OOB_MODE;
+               iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
+
+               /* Copy the attached SAS address from the IAF */
+               memcpy(iphy->sas_phy.attached_sas_addr,
+                      iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE);
+       } else {
+               dev_err(&isci_host->pdev->dev, "%s: unkown target\n", __func__);
+               success = false;
+       }
+
+       iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy);
+
+       spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+       /* Notify libsas that we have an address frame, if indeed
+        * we've found an SSP, SMP, or STP target */
+       if (success)
+               isci_host->sas_ha.notify_port_event(&iphy->sas_phy,
+                                                   PORTE_BYTES_DMAED);
+}
+
+
+/**
+ * isci_port_link_down() - This function is called by the sci core when a link
+ *    becomes inactive.
+ * @isci_host: This parameter specifies the isci host object.
+ * @phy: This parameter specifies the isci phy with the active link.
+ * @port: This parameter specifies the isci port with the active link.
+ *
+ */
+static void isci_port_link_down(struct isci_host *isci_host,
+                               struct isci_phy *isci_phy,
+                               struct isci_port *isci_port)
+{
+       struct isci_remote_device *isci_device;
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_port = %p\n", __func__, isci_port);
+
+       if (isci_port) {
+
+               /* check to see if this is the last phy on this port. */
+               if (isci_phy->sas_phy.port &&
+                   isci_phy->sas_phy.port->num_phys == 1) {
+                       atomic_inc(&isci_port->event);
+                       isci_port_bcn_enable(isci_host, isci_port);
+
+                       /* change the state for all devices on this port.  The
+                        * next task sent to this device will be returned as
+                        * SAS_TASK_UNDELIVERED, and the scsi mid layer will
+                        * remove the target
+                        */
+                       list_for_each_entry(isci_device,
+                                           &isci_port->remote_dev_list,
+                                           node) {
+                               dev_dbg(&isci_host->pdev->dev,
+                                       "%s: isci_device = %p\n",
+                                       __func__, isci_device);
+                               set_bit(IDEV_GONE, &isci_device->flags);
+                       }
+               }
+               isci_port_change_state(isci_port, isci_stopping);
+       }
+
+       /* Notify libsas of the borken link, this will trigger calls to our
+        * isci_port_deformed and isci_dev_gone functions.
+        */
+       sas_phy_disconnected(&isci_phy->sas_phy);
+       isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
+                                          PHYE_LOSS_OF_SIGNAL);
+
+       isci_phy->isci_port = NULL;
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_port = %p - Done\n", __func__, isci_port);
+}
+
+
+/**
+ * isci_port_ready() - This function is called by the sci core when a link
+ *    becomes ready.
+ * @isci_host: This parameter specifies the isci host object.
+ * @port: This parameter specifies the sci port with the active link.
+ *
+ */
+static void isci_port_ready(struct isci_host *isci_host, struct isci_port *isci_port)
+{
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_port = %p\n", __func__, isci_port);
+
+       complete_all(&isci_port->start_complete);
+       isci_port_change_state(isci_port, isci_ready);
+       return;
+}
+
+/**
+ * isci_port_not_ready() - This function is called by the sci core when a link
+ *    is not ready. All remote devices on this link will be removed if they are
+ *    in the stopping state.
+ * @isci_host: This parameter specifies the isci host object.
+ * @port: This parameter specifies the sci port with the active link.
+ *
+ */
+static void isci_port_not_ready(struct isci_host *isci_host, struct isci_port *isci_port)
+{
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_port = %p\n", __func__, isci_port);
+}
+
+static void isci_port_stop_complete(struct isci_host *ihost,
+                                   struct isci_port *iport,
+                                   enum sci_status completion_status)
+{
+       dev_dbg(&ihost->pdev->dev, "Port stop complete\n");
+}
+
+/**
+ * isci_port_hard_reset_complete() - This function is called by the sci core
+ *    when the hard reset complete notification has been received.
+ * @port: This parameter specifies the sci port with the active link.
+ * @completion_status: This parameter specifies the core status for the reset
+ *    process.
+ *
+ */
+static void isci_port_hard_reset_complete(struct isci_port *isci_port,
+                                         enum sci_status completion_status)
+{
+       dev_dbg(&isci_port->isci_host->pdev->dev,
+               "%s: isci_port = %p, completion_status=%x\n",
+                    __func__, isci_port, completion_status);
+
+       /* Save the status of the hard reset from the port. */
+       isci_port->hard_reset_status = completion_status;
+
+       complete_all(&isci_port->hard_reset_complete);
+}
+
+/* This method will return a true value if the specified phy can be assigned to
+ * this port The following is a list of phys for each port that are allowed: -
+ * Port 0 - 3 2 1 0 - Port 1 -     1 - Port 2 - 3 2 - Port 3 - 3 This method
+ * doesn't preclude all configurations.  It merely ensures that a phy is part
+ * of the allowable set of phy identifiers for that port.  For example, one
+ * could assign phy 3 to port 0 and no other phys.  Please refer to
+ * sci_port_is_phy_mask_valid() for information regarding whether the
+ * phy_mask for a port can be supported. bool true if this is a valid phy
+ * assignment for the port false if this is not a valid phy assignment for the
+ * port
+ */
+bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
+{
+       struct isci_host *ihost = iport->owning_controller;
+       struct sci_user_parameters *user = &ihost->user_parameters;
+
+       /* Initialize to invalid value. */
+       u32 existing_phy_index = SCI_MAX_PHYS;
+       u32 index;
+
+       if ((iport->physical_port_index == 1) && (phy_index != 1))
+               return false;
+
+       if (iport->physical_port_index == 3 && phy_index != 3)
+               return false;
+
+       if (iport->physical_port_index == 2 &&
+           (phy_index == 0 || phy_index == 1))
+               return false;
+
+       for (index = 0; index < SCI_MAX_PHYS; index++)
+               if (iport->phy_table[index] && index != phy_index)
+                       existing_phy_index = index;
+
+       /* Ensure that all of the phys in the port are capable of
+        * operating at the same maximum link rate.
+        */
+       if (existing_phy_index < SCI_MAX_PHYS &&
+           user->phys[phy_index].max_speed_generation !=
+           user->phys[existing_phy_index].max_speed_generation)
+               return false;
+
+       return true;
+}
+
+/**
+ *
+ * @sci_port: This is the port object for which to determine if the phy mask
+ *    can be supported.
+ *
+ * This method will return a true value if the port's phy mask can be supported
+ * by the SCU. The following is a list of valid PHY mask configurations for
+ * each port: - Port 0 - [[3  2] 1] 0 - Port 1 -        [1] - Port 2 - [[3] 2]
+ * - Port 3 -  [3] This method returns a boolean indication specifying if the
+ * phy mask can be supported. true if this is a valid phy assignment for the
+ * port false if this is not a valid phy assignment for the port
+ */
+static bool sci_port_is_phy_mask_valid(
+       struct isci_port *iport,
+       u32 phy_mask)
+{
+       if (iport->physical_port_index == 0) {
+               if (((phy_mask & 0x0F) == 0x0F)
+                   || ((phy_mask & 0x03) == 0x03)
+                   || ((phy_mask & 0x01) == 0x01)
+                   || (phy_mask == 0))
+                       return true;
+       } else if (iport->physical_port_index == 1) {
+               if (((phy_mask & 0x02) == 0x02)
+                   || (phy_mask == 0))
+                       return true;
+       } else if (iport->physical_port_index == 2) {
+               if (((phy_mask & 0x0C) == 0x0C)
+                   || ((phy_mask & 0x04) == 0x04)
+                   || (phy_mask == 0))
+                       return true;
+       } else if (iport->physical_port_index == 3) {
+               if (((phy_mask & 0x08) == 0x08)
+                   || (phy_mask == 0))
+                       return true;
+       }
+
+       return false;
+}
+
+/*
+ * This method retrieves a currently active (i.e. connected) phy contained in
+ * the port.  Currently, the lowest order phy that is connected is returned.
+ * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is
+ * returned if there are no currently active (i.e. connected to a remote end
+ * point) phys contained in the port. All other values specify a struct sci_phy
+ * object that is active in the port.
+ */
+static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
+{
+       u32 index;
+       struct isci_phy *iphy;
+
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               /* Ensure that the phy is both part of the port and currently
+                * connected to the remote end-point.
+                */
+               iphy = iport->phy_table[index];
+               if (iphy && sci_port_active_phy(iport, iphy))
+                       return iphy;
+       }
+
+       return NULL;
+}
+
+static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+       /* Check to see if we can add this phy to a port
+        * that means that the phy is not part of a port and that the port does
+        * not already have a phy assinged to the phy index.
+        */
+       if (!iport->phy_table[iphy->phy_index] &&
+           !phy_get_non_dummy_port(iphy) &&
+           sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
+               /* Phy is being added in the stopped state so we are in MPC mode
+                * make logical port index = physical port index
+                */
+               iport->logical_port_index = iport->physical_port_index;
+               iport->phy_table[iphy->phy_index] = iphy;
+               sci_phy_set_port(iphy, iport);
+
+               return SCI_SUCCESS;
+       }
+
+       return SCI_FAILURE;
+}
+
+static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+       /* Make sure that this phy is part of this port */
+       if (iport->phy_table[iphy->phy_index] == iphy &&
+           phy_get_non_dummy_port(iphy) == iport) {
+               struct isci_host *ihost = iport->owning_controller;
+
+               /* Yep it is assigned to this port so remove it */
+               sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
+               iport->phy_table[iphy->phy_index] = NULL;
+               return SCI_SUCCESS;
+       }
+
+       return SCI_FAILURE;
+}
+
+void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
+{
+       u32 index;
+
+       sas->high = 0;
+       sas->low  = 0;
+       for (index = 0; index < SCI_MAX_PHYS; index++)
+               if (iport->phy_table[index])
+                       sci_phy_get_sas_address(iport->phy_table[index], sas);
+}
+
+void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
+{
+       struct isci_phy *iphy;
+
+       /*
+        * Ensure that the phy is both part of the port and currently
+        * connected to the remote end-point.
+        */
+       iphy = sci_port_get_a_connected_phy(iport);
+       if (iphy) {
+               if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) {
+                       sci_phy_get_attached_sas_address(iphy, sas);
+               } else {
+                       sci_phy_get_sas_address(iphy, sas);
+                       sas->low += iphy->phy_index;
+               }
+       } else {
+               sas->high = 0;
+               sas->low  = 0;
+       }
+}
+
+/**
+ * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
+ *
+ * @sci_port: logical port on which we need to create the remote node context
+ * @rni: remote node index for this remote node context.
+ *
+ * This routine will construct a dummy remote node context data structure
+ * This structure will be posted to the hardware to work around a scheduler
+ * error in the hardware.
+ */
+static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
+{
+       union scu_remote_node_context *rnc;
+
+       rnc = &iport->owning_controller->remote_node_context_table[rni];
+
+       memset(rnc, 0, sizeof(union scu_remote_node_context));
+
+       rnc->ssp.remote_sas_address_hi = 0;
+       rnc->ssp.remote_sas_address_lo = 0;
+
+       rnc->ssp.remote_node_index = rni;
+       rnc->ssp.remote_node_port_width = 1;
+       rnc->ssp.logical_port_index = iport->physical_port_index;
+
+       rnc->ssp.nexus_loss_timer_enable = false;
+       rnc->ssp.check_bit = false;
+       rnc->ssp.is_valid = true;
+       rnc->ssp.is_remote_node_context = true;
+       rnc->ssp.function_number = 0;
+       rnc->ssp.arbitration_wait_time = 0;
+}
+
+/*
+ * construct a dummy task context data structure.  This
+ * structure will be posted to the hardwre to work around a scheduler error
+ * in the hardware.
+ */
+static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
+{
+       struct isci_host *ihost = iport->owning_controller;
+       struct scu_task_context *task_context;
+
+       task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+       memset(task_context, 0, sizeof(struct scu_task_context));
+
+       task_context->initiator_request = 1;
+       task_context->connection_rate = 1;
+       task_context->logical_port_index = iport->physical_port_index;
+       task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
+       task_context->task_index = ISCI_TAG_TCI(tag);
+       task_context->valid = SCU_TASK_CONTEXT_VALID;
+       task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+       task_context->remote_node_index = iport->reserved_rni;
+       task_context->do_not_dma_ssp_good_response = 1;
+       task_context->task_phase = 0x01;
+}
+
+static void sci_port_destroy_dummy_resources(struct isci_port *iport)
+{
+       struct isci_host *ihost = iport->owning_controller;
+
+       if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
+               isci_free_tag(ihost, iport->reserved_tag);
+
+       if (iport->reserved_rni != SCU_DUMMY_INDEX)
+               sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
+                                                                    1, iport->reserved_rni);
+
+       iport->reserved_rni = SCU_DUMMY_INDEX;
+       iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
+{
+       u8 index;
+
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               if (iport->active_phy_mask & (1 << index))
+                       sci_phy_setup_transport(iport->phy_table[index], device_id);
+       }
+}
+
+static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy,
+                                 bool do_notify_user)
+{
+       struct isci_host *ihost = iport->owning_controller;
+
+       if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA)
+               sci_phy_resume(iphy);
+
+       iport->active_phy_mask |= 1 << iphy->phy_index;
+
+       sci_controller_clear_invalid_phy(ihost, iphy);
+
+       if (do_notify_user == true)
+               isci_port_link_up(ihost, iport, iphy);
+}
+
+void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
+                            bool do_notify_user)
+{
+       struct isci_host *ihost = iport->owning_controller;
+
+       iport->active_phy_mask &= ~(1 << iphy->phy_index);
+
+       iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
+
+       /* Re-assign the phy back to the LP as if it were a narrow port */
+       writel(iphy->phy_index,
+               &iport->port_pe_configuration_register[iphy->phy_index]);
+
+       if (do_notify_user == true)
+               isci_port_link_down(ihost, iphy, iport);
+}
+
+static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
+{
+       struct isci_host *ihost = iport->owning_controller;
+
+       /*
+        * Check to see if we have alreay reported this link as bad and if
+        * not go ahead and tell the SCI_USER that we have discovered an
+        * invalid link.
+        */
+       if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
+               ihost->invalid_phy_mask |= 1 << iphy->phy_index;
+               dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
+       }
+}
+
+static bool is_port_ready_state(enum sci_port_states state)
+{
+       switch (state) {
+       case SCI_PORT_READY:
+       case SCI_PORT_SUB_WAITING:
+       case SCI_PORT_SUB_OPERATIONAL:
+       case SCI_PORT_SUB_CONFIGURING:
+               return true;
+       default:
+               return false;
+       }
+}
+
+/* flag dummy rnc hanling when exiting a ready state */
+static void port_state_machine_change(struct isci_port *iport,
+                                     enum sci_port_states state)
+{
+       struct sci_base_state_machine *sm = &iport->sm;
+       enum sci_port_states old_state = sm->current_state_id;
+
+       if (is_port_ready_state(old_state) && !is_port_ready_state(state))
+               iport->ready_exit = true;
+
+       sci_change_state(sm, state);
+       iport->ready_exit = false;
+}
+
+/**
+ * sci_port_general_link_up_handler - phy can be assigned to port?
+ * @sci_port: sci_port object for which has a phy that has gone link up.
+ * @sci_phy: This is the struct isci_phy object that has gone link up.
+ * @do_notify_user: This parameter specifies whether to inform the user (via
+ *    sci_port_link_up()) as to the fact that a new phy as become ready.
+ *
+ * Determine if this phy can be assigned to this
+ * port . If the phy is not a valid PHY for
+ * this port then the function will notify the user. A PHY can only be
+ * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in
+ * the same port. none
+ */
+static void sci_port_general_link_up_handler(struct isci_port *iport,
+                                                 struct isci_phy *iphy,
+                                                 bool do_notify_user)
+{
+       struct sci_sas_address port_sas_address;
+       struct sci_sas_address phy_sas_address;
+
+       sci_port_get_attached_sas_address(iport, &port_sas_address);
+       sci_phy_get_attached_sas_address(iphy, &phy_sas_address);
+
+       /* If the SAS address of the new phy matches the SAS address of
+        * other phys in the port OR this is the first phy in the port,
+        * then activate the phy and allow it to be used for operations
+        * in this port.
+        */
+       if ((phy_sas_address.high == port_sas_address.high &&
+            phy_sas_address.low  == port_sas_address.low) ||
+           iport->active_phy_mask == 0) {
+               struct sci_base_state_machine *sm = &iport->sm;
+
+               sci_port_activate_phy(iport, iphy, do_notify_user);
+               if (sm->current_state_id == SCI_PORT_RESETTING)
+                       port_state_machine_change(iport, SCI_PORT_READY);
+       } else
+               sci_port_invalid_link_up(iport, iphy);
+}
+
+
+
+/**
+ * This method returns false if the port only has a single phy object assigned.
+ *     If there are no phys or more than one phy then the method will return
+ *    true.
+ * @sci_port: The port for which the wide port condition is to be checked.
+ *
+ * bool true Is returned if this is a wide ported port. false Is returned if
+ * this is a narrow port.
+ */
+static bool sci_port_is_wide(struct isci_port *iport)
+{
+       u32 index;
+       u32 phy_count = 0;
+
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               if (iport->phy_table[index] != NULL) {
+                       phy_count++;
+               }
+       }
+
+       return phy_count != 1;
+}
+
+/**
+ * This method is called by the PHY object when the link is detected. if the
+ *    port wants the PHY to continue on to the link up state then the port
+ *    layer must return true.  If the port object returns false the phy object
+ *    must halt its attempt to go link up.
+ * @sci_port: The port associated with the phy object.
+ * @sci_phy: The phy object that is trying to go link up.
+ *
+ * true if the phy object can continue to the link up condition. true Is
+ * returned if this phy can continue to the ready state. false Is returned if
+ * can not continue on to the ready state. This notification is in place for
+ * wide ports and direct attached phys.  Since there are no wide ported SATA
+ * devices this could become an invalid port configuration.
+ */
+bool sci_port_link_detected(
+       struct isci_port *iport,
+       struct isci_phy *iphy)
+{
+       if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
+           (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) &&
+           sci_port_is_wide(iport)) {
+               sci_port_invalid_link_up(iport, iphy);
+
+               return false;
+       }
+
+       return true;
+}
+
+static void port_timeout(unsigned long data)
+{
+       struct sci_timer *tmr = (struct sci_timer *)data;
+       struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
+       struct isci_host *ihost = iport->owning_controller;
+       unsigned long flags;
+       u32 current_state;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       if (tmr->cancel)
+               goto done;
+
+       current_state = iport->sm.current_state_id;
+
+       if (current_state == SCI_PORT_RESETTING) {
+               /* if the port is still in the resetting state then the timeout
+                * fired before the reset completed.
+                */
+               port_state_machine_change(iport, SCI_PORT_FAILED);
+       } else if (current_state == SCI_PORT_STOPPED) {
+               /* if the port is stopped then the start request failed In this
+                * case stay in the stopped state.
+                */
+               dev_err(sciport_to_dev(iport),
+                       "%s: SCIC Port 0x%p failed to stop before tiemout.\n",
+                       __func__,
+                       iport);
+       } else if (current_state == SCI_PORT_STOPPING) {
+               /* if the port is still stopping then the stop has not completed */
+               isci_port_stop_complete(iport->owning_controller,
+                                       iport,
+                                       SCI_FAILURE_TIMEOUT);
+       } else {
+               /* The port is in the ready state and we have a timer
+                * reporting a timeout this should not happen.
+                */
+               dev_err(sciport_to_dev(iport),
+                       "%s: SCIC Port 0x%p is processing a timeout operation "
+                       "in state %d.\n", __func__, iport, current_state);
+       }
+
+done:
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/* --------------------------------------------------------------------------- */
+
+/**
+ * This function updates the hardwares VIIT entry for this port.
+ *
+ *
+ */
+static void sci_port_update_viit_entry(struct isci_port *iport)
+{
+       struct sci_sas_address sas_address;
+
+       sci_port_get_sas_address(iport, &sas_address);
+
+       writel(sas_address.high,
+               &iport->viit_registers->initiator_sas_address_hi);
+       writel(sas_address.low,
+               &iport->viit_registers->initiator_sas_address_lo);
+
+       /* This value get cleared just in case its not already cleared */
+       writel(0, &iport->viit_registers->reserved);
+
+       /* We are required to update the status register last */
+       writel(SCU_VIIT_ENTRY_ID_VIIT |
+              SCU_VIIT_IPPT_INITIATOR |
+              ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) |
+              SCU_VIIT_STATUS_ALL_VALID,
+              &iport->viit_registers->status);
+}
+
+enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
+{
+       u16 index;
+       struct isci_phy *iphy;
+       enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS;
+
+       /*
+        * Loop through all of the phys in this port and find the phy with the
+        * lowest maximum link rate. */
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               iphy = iport->phy_table[index];
+               if (iphy && sci_port_active_phy(iport, iphy) &&
+                   iphy->max_negotiated_speed < max_allowed_speed)
+                       max_allowed_speed = iphy->max_negotiated_speed;
+       }
+
+       return max_allowed_speed;
+}
+
+static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
+{
+       u32 pts_control_value;
+
+       pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+       pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND);
+       writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+/**
+ * sci_port_post_dummy_request() - post dummy/workaround request
+ * @sci_port: port to post task
+ *
+ * Prevent the hardware scheduler from posting new requests to the front
+ * of the scheduler queue causing a starvation problem for currently
+ * ongoing requests.
+ *
+ */
+static void sci_port_post_dummy_request(struct isci_port *iport)
+{
+       struct isci_host *ihost = iport->owning_controller;
+       u16 tag = iport->reserved_tag;
+       struct scu_task_context *tc;
+       u32 command;
+
+       tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+       tc->abort = 0;
+
+       command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+                 iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
+                 ISCI_TAG_TCI(tag);
+
+       sci_controller_post_request(ihost, command);
+}
+
+/**
+ * This routine will abort the dummy request.  This will alow the hardware to
+ * power down parts of the silicon to save power.
+ *
+ * @sci_port: The port on which the task must be aborted.
+ *
+ */
+static void sci_port_abort_dummy_request(struct isci_port *iport)
+{
+       struct isci_host *ihost = iport->owning_controller;
+       u16 tag = iport->reserved_tag;
+       struct scu_task_context *tc;
+       u32 command;
+
+       tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+       tc->abort = 1;
+
+       command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
+                 iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
+                 ISCI_TAG_TCI(tag);
+
+       sci_controller_post_request(ihost, command);
+}
+
+/**
+ *
+ * @sci_port: This is the struct isci_port object to resume.
+ *
+ * This method will resume the port task scheduler for this port object. none
+ */
+static void
+sci_port_resume_port_task_scheduler(struct isci_port *iport)
+{
+       u32 pts_control_value;
+
+       pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+       pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND);
+       writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+       sci_port_suspend_port_task_scheduler(iport);
+
+       iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
+
+       if (iport->active_phy_mask != 0) {
+               /* At least one of the phys on the port is ready */
+               port_state_machine_change(iport,
+                                         SCI_PORT_SUB_OPERATIONAL);
+       }
+}
+
+static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
+{
+       u32 index;
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+       struct isci_host *ihost = iport->owning_controller;
+
+       isci_port_ready(ihost, iport);
+
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               if (iport->phy_table[index]) {
+                       writel(iport->physical_port_index,
+                               &iport->port_pe_configuration_register[
+                                       iport->phy_table[index]->phy_index]);
+               }
+       }
+
+       sci_port_update_viit_entry(iport);
+
+       sci_port_resume_port_task_scheduler(iport);
+
+       /*
+        * Post the dummy task for the port so the hardware can schedule
+        * io correctly
+        */
+       sci_port_post_dummy_request(iport);
+}
+
+static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
+{
+       struct isci_host *ihost = iport->owning_controller;
+       u8 phys_index = iport->physical_port_index;
+       union scu_remote_node_context *rnc;
+       u16 rni = iport->reserved_rni;
+       u32 command;
+
+       rnc = &ihost->remote_node_context_table[rni];
+
+       rnc->ssp.is_valid = false;
+
+       /* ensure the preceding tc abort request has reached the
+        * controller and give it ample time to act before posting the rnc
+        * invalidate
+        */
+       readl(&ihost->smu_registers->interrupt_status); /* flush */
+       udelay(10);
+
+       command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE |
+                 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+       sci_controller_post_request(ihost, command);
+}
+
+/**
+ *
+ * @object: This is the object which is cast to a struct isci_port object.
+ *
+ * This method will perform the actions required by the struct isci_port on
+ * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
+ * the port not ready and suspends the port task scheduler. none
+ */
+static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+       struct isci_host *ihost = iport->owning_controller;
+
+       /*
+        * Kill the dummy task for this port if it has not yet posted
+        * the hardware will treat this as a NOP and just return abort
+        * complete.
+        */
+       sci_port_abort_dummy_request(iport);
+
+       isci_port_not_ready(ihost, iport);
+
+       if (iport->ready_exit)
+               sci_port_invalidate_dummy_remote_node(iport);
+}
+
+static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+       struct isci_host *ihost = iport->owning_controller;
+
+       if (iport->active_phy_mask == 0) {
+               isci_port_not_ready(ihost, iport);
+
+               port_state_machine_change(iport,
+                                         SCI_PORT_SUB_WAITING);
+       } else if (iport->started_request_count == 0)
+               port_state_machine_change(iport,
+                                         SCI_PORT_SUB_OPERATIONAL);
+}
+
+static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+       sci_port_suspend_port_task_scheduler(iport);
+       if (iport->ready_exit)
+               sci_port_invalidate_dummy_remote_node(iport);
+}
+
+enum sci_status sci_port_start(struct isci_port *iport)
+{
+       struct isci_host *ihost = iport->owning_controller;
+       enum sci_status status = SCI_SUCCESS;
+       enum sci_port_states state;
+       u32 phy_mask;
+
+       state = iport->sm.current_state_id;
+       if (state != SCI_PORT_STOPPED) {
+               dev_warn(sciport_to_dev(iport),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       if (iport->assigned_device_count > 0) {
+               /* TODO This is a start failure operation because
+                * there are still devices assigned to this port.
+                * There must be no devices assigned to a port on a
+                * start operation.
+                */
+               return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+       }
+
+       if (iport->reserved_rni == SCU_DUMMY_INDEX) {
+               u16 rni = sci_remote_node_table_allocate_remote_node(
+                               &ihost->available_remote_nodes, 1);
+
+               if (rni != SCU_DUMMY_INDEX)
+                       sci_port_construct_dummy_rnc(iport, rni);
+               else
+                       status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
+               iport->reserved_rni = rni;
+       }
+
+       if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
+               u16 tag;
+
+               tag = isci_alloc_tag(ihost);
+               if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
+                       status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
+               else
+                       sci_port_construct_dummy_task(iport, tag);
+               iport->reserved_tag = tag;
+       }
+
+       if (status == SCI_SUCCESS) {
+               phy_mask = sci_port_get_phys(iport);
+
+               /*
+                * There are one or more phys assigned to this port.  Make sure
+                * the port's phy mask is in fact legal and supported by the
+                * silicon.
+                */
+               if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
+                       port_state_machine_change(iport,
+                                                 SCI_PORT_READY);
+
+                       return SCI_SUCCESS;
+               }
+               status = SCI_FAILURE;
+       }
+
+       if (status != SCI_SUCCESS)
+               sci_port_destroy_dummy_resources(iport);
+
+       return status;
+}
+
+enum sci_status sci_port_stop(struct isci_port *iport)
+{
+       enum sci_port_states state;
+
+       state = iport->sm.current_state_id;
+       switch (state) {
+       case SCI_PORT_STOPPED:
+               return SCI_SUCCESS;
+       case SCI_PORT_SUB_WAITING:
+       case SCI_PORT_SUB_OPERATIONAL:
+       case SCI_PORT_SUB_CONFIGURING:
+       case SCI_PORT_RESETTING:
+               port_state_machine_change(iport,
+                                         SCI_PORT_STOPPING);
+               return SCI_SUCCESS;
+       default:
+               dev_warn(sciport_to_dev(iport),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
+{
+       enum sci_status status = SCI_FAILURE_INVALID_PHY;
+       struct isci_phy *iphy = NULL;
+       enum sci_port_states state;
+       u32 phy_index;
+
+       state = iport->sm.current_state_id;
+       if (state != SCI_PORT_SUB_OPERATIONAL) {
+               dev_warn(sciport_to_dev(iport),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       /* Select a phy on which we can send the hard reset request. */
+       for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) {
+               iphy = iport->phy_table[phy_index];
+               if (iphy && !sci_port_active_phy(iport, iphy)) {
+                       /*
+                        * We found a phy but it is not ready select
+                        * different phy
+                        */
+                       iphy = NULL;
+               }
+       }
+
+       /* If we have a phy then go ahead and start the reset procedure */
+       if (!iphy)
+               return status;
+       status = sci_phy_reset(iphy);
+
+       if (status != SCI_SUCCESS)
+               return status;
+
+       sci_mod_timer(&iport->timer, timeout);
+       iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
+
+       port_state_machine_change(iport, SCI_PORT_RESETTING);
+       return SCI_SUCCESS;
+}
+
+/**
+ * sci_port_add_phy() -
+ * @sci_port: This parameter specifies the port in which the phy will be added.
+ * @sci_phy: This parameter is the phy which is to be added to the port.
+ *
+ * This method will add a PHY to the selected port. This method returns an
+ * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
+ * status is a failure to add the phy to the port.
+ */
+enum sci_status sci_port_add_phy(struct isci_port *iport,
+                                     struct isci_phy *iphy)
+{
+       enum sci_status status;
+       enum sci_port_states state;
+
+       state = iport->sm.current_state_id;
+       switch (state) {
+       case SCI_PORT_STOPPED: {
+               struct sci_sas_address port_sas_address;
+
+               /* Read the port assigned SAS Address if there is one */
+               sci_port_get_sas_address(iport, &port_sas_address);
+
+               if (port_sas_address.high != 0 && port_sas_address.low != 0) {
+                       struct sci_sas_address phy_sas_address;
+
+                       /* Make sure that the PHY SAS Address matches the SAS Address
+                        * for this port
+                        */
+                       sci_phy_get_sas_address(iphy, &phy_sas_address);
+
+                       if (port_sas_address.high != phy_sas_address.high ||
+                           port_sas_address.low  != phy_sas_address.low)
+                               return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+               }
+               return sci_port_set_phy(iport, iphy);
+       }
+       case SCI_PORT_SUB_WAITING:
+       case SCI_PORT_SUB_OPERATIONAL:
+               status = sci_port_set_phy(iport, iphy);
+
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               sci_port_general_link_up_handler(iport, iphy, true);
+               iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
+               port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
+
+               return status;
+       case SCI_PORT_SUB_CONFIGURING:
+               status = sci_port_set_phy(iport, iphy);
+
+               if (status != SCI_SUCCESS)
+                       return status;
+               sci_port_general_link_up_handler(iport, iphy, true);
+
+               /* Re-enter the configuring state since this may be the last phy in
+                * the port.
+                */
+               port_state_machine_change(iport,
+                                         SCI_PORT_SUB_CONFIGURING);
+               return SCI_SUCCESS;
+       default:
+               dev_warn(sciport_to_dev(iport),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+/**
+ * sci_port_remove_phy() -
+ * @sci_port: This parameter specifies the port in which the phy will be added.
+ * @sci_phy: This parameter is the phy which is to be added to the port.
+ *
+ * This method will remove the PHY from the selected PORT. This method returns
+ * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
+ * other status is a failure to add the phy to the port.
+ */
+enum sci_status sci_port_remove_phy(struct isci_port *iport,
+                                        struct isci_phy *iphy)
+{
+       enum sci_status status;
+       enum sci_port_states state;
+
+       state = iport->sm.current_state_id;
+
+       switch (state) {
+       case SCI_PORT_STOPPED:
+               return sci_port_clear_phy(iport, iphy);
+       case SCI_PORT_SUB_OPERATIONAL:
+               status = sci_port_clear_phy(iport, iphy);
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               sci_port_deactivate_phy(iport, iphy, true);
+               iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
+               port_state_machine_change(iport,
+                                         SCI_PORT_SUB_CONFIGURING);
+               return SCI_SUCCESS;
+       case SCI_PORT_SUB_CONFIGURING:
+               status = sci_port_clear_phy(iport, iphy);
+
+               if (status != SCI_SUCCESS)
+                       return status;
+               sci_port_deactivate_phy(iport, iphy, true);
+
+               /* Re-enter the configuring state since this may be the last phy in
+                * the port
+                */
+               port_state_machine_change(iport,
+                                         SCI_PORT_SUB_CONFIGURING);
+               return SCI_SUCCESS;
+       default:
+               dev_warn(sciport_to_dev(iport),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+enum sci_status sci_port_link_up(struct isci_port *iport,
+                                     struct isci_phy *iphy)
+{
+       enum sci_port_states state;
+
+       state = iport->sm.current_state_id;
+       switch (state) {
+       case SCI_PORT_SUB_WAITING:
+               /* Since this is the first phy going link up for the port we
+                * can just enable it and continue
+                */
+               sci_port_activate_phy(iport, iphy, true);
+
+               port_state_machine_change(iport,
+                                         SCI_PORT_SUB_OPERATIONAL);
+               return SCI_SUCCESS;
+       case SCI_PORT_SUB_OPERATIONAL:
+               sci_port_general_link_up_handler(iport, iphy, true);
+               return SCI_SUCCESS;
+       case SCI_PORT_RESETTING:
+               /* TODO We should  make  sure  that  the phy  that  has gone
+                * link up is the same one on which we sent the reset.  It is
+                * possible that the phy on which we sent  the reset is not the
+                * one that has  gone  link up  and we  want to make sure that
+                * phy being reset  comes  back.  Consider the case where a
+                * reset is sent but before the hardware processes the reset it
+                * get a link up on  the  port because of a hot plug event.
+                * because  of  the reset request this phy will go link down
+                * almost immediately.
+                */
+
+               /* In the resetting state we don't notify the user regarding
+                * link up and link down notifications.
+                */
+               sci_port_general_link_up_handler(iport, iphy, false);
+               return SCI_SUCCESS;
+       default:
+               dev_warn(sciport_to_dev(iport),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+enum sci_status sci_port_link_down(struct isci_port *iport,
+                                       struct isci_phy *iphy)
+{
+       enum sci_port_states state;
+
+       state = iport->sm.current_state_id;
+       switch (state) {
+       case SCI_PORT_SUB_OPERATIONAL:
+               sci_port_deactivate_phy(iport, iphy, true);
+
+               /* If there are no active phys left in the port, then
+                * transition the port to the WAITING state until such time
+                * as a phy goes link up
+                */
+               if (iport->active_phy_mask == 0)
+                       port_state_machine_change(iport,
+                                                 SCI_PORT_SUB_WAITING);
+               return SCI_SUCCESS;
+       case SCI_PORT_RESETTING:
+               /* In the resetting state we don't notify the user regarding
+                * link up and link down notifications. */
+               sci_port_deactivate_phy(iport, iphy, false);
+               return SCI_SUCCESS;
+       default:
+               dev_warn(sciport_to_dev(iport),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+enum sci_status sci_port_start_io(struct isci_port *iport,
+                                 struct isci_remote_device *idev,
+                                 struct isci_request *ireq)
+{
+       enum sci_port_states state;
+
+       state = iport->sm.current_state_id;
+       switch (state) {
+       case SCI_PORT_SUB_WAITING:
+               return SCI_FAILURE_INVALID_STATE;
+       case SCI_PORT_SUB_OPERATIONAL:
+               iport->started_request_count++;
+               return SCI_SUCCESS;
+       default:
+               dev_warn(sciport_to_dev(iport),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+enum sci_status sci_port_complete_io(struct isci_port *iport,
+                                    struct isci_remote_device *idev,
+                                    struct isci_request *ireq)
+{
+       enum sci_port_states state;
+
+       state = iport->sm.current_state_id;
+       switch (state) {
+       case SCI_PORT_STOPPED:
+               dev_warn(sciport_to_dev(iport),
+                        "%s: in wrong state: %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       case SCI_PORT_STOPPING:
+               sci_port_decrement_request_count(iport);
+
+               if (iport->started_request_count == 0)
+                       port_state_machine_change(iport,
+                                                 SCI_PORT_STOPPED);
+               break;
+       case SCI_PORT_READY:
+       case SCI_PORT_RESETTING:
+       case SCI_PORT_FAILED:
+       case SCI_PORT_SUB_WAITING:
+       case SCI_PORT_SUB_OPERATIONAL:
+               sci_port_decrement_request_count(iport);
+               break;
+       case SCI_PORT_SUB_CONFIGURING:
+               sci_port_decrement_request_count(iport);
+               if (iport->started_request_count == 0) {
+                       port_state_machine_change(iport,
+                                                 SCI_PORT_SUB_OPERATIONAL);
+               }
+               break;
+       }
+       return SCI_SUCCESS;
+}
+
+static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
+{
+       u32 pts_control_value;
+
+        /* enable the port task scheduler in a suspended state */
+       pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+       pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND);
+       writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
+{
+       u32 pts_control_value;
+
+       pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+       pts_control_value &=
+               ~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND));
+       writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_post_dummy_remote_node(struct isci_port *iport)
+{
+       struct isci_host *ihost = iport->owning_controller;
+       u8 phys_index = iport->physical_port_index;
+       union scu_remote_node_context *rnc;
+       u16 rni = iport->reserved_rni;
+       u32 command;
+
+       rnc = &ihost->remote_node_context_table[rni];
+       rnc->ssp.is_valid = true;
+
+       command = SCU_CONTEXT_COMMAND_POST_RNC_32 |
+                 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+       sci_controller_post_request(ihost, command);
+
+       /* ensure hardware has seen the post rnc command and give it
+        * ample time to act before sending the suspend
+        */
+       readl(&ihost->smu_registers->interrupt_status); /* flush */
+       udelay(10);
+
+       command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX |
+                 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+       sci_controller_post_request(ihost, command);
+}
+
+static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+       if (iport->sm.previous_state_id == SCI_PORT_STOPPING) {
+               /*
+                * If we enter this state becasuse of a request to stop
+                * the port then we want to disable the hardwares port
+                * task scheduler. */
+               sci_port_disable_port_task_scheduler(iport);
+       }
+}
+
+static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+       /* Enable and suspend the port task scheduler */
+       sci_port_enable_port_task_scheduler(iport);
+}
+
+static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+       struct isci_host *ihost = iport->owning_controller;
+       u32 prev_state;
+
+       prev_state = iport->sm.previous_state_id;
+       if (prev_state  == SCI_PORT_RESETTING)
+               isci_port_hard_reset_complete(iport, SCI_SUCCESS);
+       else
+               isci_port_not_ready(ihost, iport);
+
+       /* Post and suspend the dummy remote node context for this port. */
+       sci_port_post_dummy_remote_node(iport);
+
+       /* Start the ready substate machine */
+       port_state_machine_change(iport,
+                                 SCI_PORT_SUB_WAITING);
+}
+
+static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+       sci_del_timer(&iport->timer);
+}
+
+static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+       sci_del_timer(&iport->timer);
+
+       sci_port_destroy_dummy_resources(iport);
+}
+
+static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+       isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
+}
+
+/* --------------------------------------------------------------------------- */
+
+static const struct sci_base_state sci_port_state_table[] = {
+       [SCI_PORT_STOPPED] = {
+               .enter_state = sci_port_stopped_state_enter,
+               .exit_state  = sci_port_stopped_state_exit
+       },
+       [SCI_PORT_STOPPING] = {
+               .exit_state  = sci_port_stopping_state_exit
+       },
+       [SCI_PORT_READY] = {
+               .enter_state = sci_port_ready_state_enter,
+       },
+       [SCI_PORT_SUB_WAITING] = {
+               .enter_state = sci_port_ready_substate_waiting_enter,
+       },
+       [SCI_PORT_SUB_OPERATIONAL] = {
+               .enter_state = sci_port_ready_substate_operational_enter,
+               .exit_state  = sci_port_ready_substate_operational_exit
+       },
+       [SCI_PORT_SUB_CONFIGURING] = {
+               .enter_state = sci_port_ready_substate_configuring_enter,
+               .exit_state  = sci_port_ready_substate_configuring_exit
+       },
+       [SCI_PORT_RESETTING] = {
+               .exit_state  = sci_port_resetting_state_exit
+       },
+       [SCI_PORT_FAILED] = {
+               .enter_state = sci_port_failed_state_enter,
+       }
+};
+
+void sci_port_construct(struct isci_port *iport, u8 index,
+                            struct isci_host *ihost)
+{
+       sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
+
+       iport->logical_port_index  = SCIC_SDS_DUMMY_PORT;
+       iport->physical_port_index = index;
+       iport->active_phy_mask     = 0;
+       iport->ready_exit             = false;
+
+       iport->owning_controller = ihost;
+
+       iport->started_request_count = 0;
+       iport->assigned_device_count = 0;
+
+       iport->reserved_rni = SCU_DUMMY_INDEX;
+       iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
+
+       sci_init_timer(&iport->timer, port_timeout);
+
+       iport->port_task_scheduler_registers = NULL;
+
+       for (index = 0; index < SCI_MAX_PHYS; index++)
+               iport->phy_table[index] = NULL;
+}
+
+void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
+{
+       INIT_LIST_HEAD(&iport->remote_dev_list);
+       INIT_LIST_HEAD(&iport->domain_dev_list);
+       spin_lock_init(&iport->state_lock);
+       init_completion(&iport->start_complete);
+       iport->isci_host = ihost;
+       isci_port_change_state(iport, isci_freed);
+       atomic_set(&iport->event, 0);
+}
+
+/**
+ * isci_port_get_state() - This function gets the status of the port object.
+ * @isci_port: This parameter points to the isci_port object
+ *
+ * status of the object as a isci_status enum.
+ */
+enum isci_status isci_port_get_state(
+       struct isci_port *isci_port)
+{
+       return isci_port->status;
+}
+
+void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
+{
+       struct isci_host *ihost = iport->owning_controller;
+
+       /* notify the user. */
+       isci_port_bc_change_received(ihost, iport, iphy);
+}
+
+int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
+                                struct isci_phy *iphy)
+{
+       unsigned long flags;
+       enum sci_status status;
+       int idx, ret = TMF_RESP_FUNC_COMPLETE;
+
+       dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
+               __func__, iport);
+
+       init_completion(&iport->hard_reset_complete);
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
+       status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
+
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       if (status == SCI_SUCCESS) {
+               wait_for_completion(&iport->hard_reset_complete);
+
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: iport = %p; hard reset completion\n",
+                       __func__, iport);
+
+               if (iport->hard_reset_status != SCI_SUCCESS)
+                       ret = TMF_RESP_FUNC_FAILED;
+       } else {
+               ret = TMF_RESP_FUNC_FAILED;
+
+               dev_err(&ihost->pdev->dev,
+                       "%s: iport = %p; sci_port_hard_reset call"
+                       " failed 0x%x\n",
+                       __func__, iport, status);
+
+       }
+
+       /* If the hard reset for the port has failed, consider this
+        * the same as link failures on all phys in the port.
+        */
+       if (ret != TMF_RESP_FUNC_COMPLETE) {
+
+               dev_err(&ihost->pdev->dev,
+                       "%s: iport = %p; hard reset failed "
+                       "(0x%x) - driving explicit link fail for all phys\n",
+                       __func__, iport, iport->hard_reset_status);
+
+               /* Down all phys in the port. */
+               spin_lock_irqsave(&ihost->scic_lock, flags);
+               for (idx = 0; idx < SCI_MAX_PHYS; ++idx) {
+                       struct isci_phy *iphy = iport->phy_table[idx];
+
+                       if (!iphy)
+                               continue;
+                       sci_phy_stop(iphy);
+                       sci_phy_start(iphy);
+               }
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+       }
+       return ret;
+}
+
+/**
+ * isci_port_deformed() - This function is called by libsas when a port becomes
+ *    inactive.
+ * @phy: This parameter specifies the libsas phy with the inactive port.
+ *
+ */
+void isci_port_deformed(struct asd_sas_phy *phy)
+{
+       pr_debug("%s: sas_phy = %p\n", __func__, phy);
+}
+
+/**
+ * isci_port_formed() - This function is called by libsas when a port becomes
+ *    active.
+ * @phy: This parameter specifies the libsas phy with the active port.
+ *
+ */
+void isci_port_formed(struct asd_sas_phy *phy)
+{
+       pr_debug("%s: sas_phy = %p, sas_port = %p\n", __func__, phy, phy->port);
+}
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
new file mode 100644 (file)
index 0000000..b50ecd4
--- /dev/null
@@ -0,0 +1,306 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_PORT_H_
+#define _ISCI_PORT_H_
+
+#include <scsi/libsas.h>
+#include "isci.h"
+#include "sas.h"
+#include "phy.h"
+
+#define SCIC_SDS_DUMMY_PORT   0xFF
+
+struct isci_phy;
+struct isci_host;
+
+enum isci_status {
+       isci_freed        = 0x00,
+       isci_starting     = 0x01,
+       isci_ready        = 0x02,
+       isci_ready_for_io = 0x03,
+       isci_stopping     = 0x04,
+       isci_stopped      = 0x05,
+};
+
+/**
+ * struct isci_port - isci direct attached sas port object
+ * @event: counts bcns and port stop events (for bcn filtering)
+ * @ready_exit: several states constitute 'ready'. When exiting ready we
+ *              need to take extra port-teardown actions that are
+ *              skipped when exiting to another 'ready' state.
+ * @logical_port_index: software port index
+ * @physical_port_index: hardware port index
+ * @active_phy_mask: identifies phy members
+ * @reserved_tag:
+ * @reserved_rni: reserver for port task scheduler workaround
+ * @started_request_count: reference count for outstanding commands
+ * @not_ready_reason: set during state transitions and notified
+ * @timer: timeout start/stop operations
+ */
+struct isci_port {
+       enum isci_status status;
+       #define IPORT_BCN_BLOCKED 0
+       #define IPORT_BCN_PENDING 1
+       unsigned long flags;
+       atomic_t event;
+       struct isci_host *isci_host;
+       struct asd_sas_port sas_port;
+       struct list_head remote_dev_list;
+       spinlock_t state_lock;
+       struct list_head domain_dev_list;
+       struct completion start_complete;
+       struct completion hard_reset_complete;
+       enum sci_status hard_reset_status;
+       struct sci_base_state_machine sm;
+       bool ready_exit;
+       u8 logical_port_index;
+       u8 physical_port_index;
+       u8 active_phy_mask;
+       u16 reserved_rni;
+       u16 reserved_tag;
+       u32 started_request_count;
+       u32 assigned_device_count;
+       u32 not_ready_reason;
+       struct isci_phy *phy_table[SCI_MAX_PHYS];
+       struct isci_host *owning_controller;
+       struct sci_timer timer;
+       struct scu_port_task_scheduler_registers __iomem *port_task_scheduler_registers;
+       /* XXX rework: only one register, no need to replicate per-port */
+       u32 __iomem *port_pe_configuration_register;
+       struct scu_viit_entry __iomem *viit_registers;
+};
+
+enum sci_port_not_ready_reason_code {
+       SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS,
+       SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED,
+       SCIC_PORT_NOT_READY_INVALID_PORT_CONFIGURATION,
+       SCIC_PORT_NOT_READY_RECONFIGURING,
+
+       SCIC_PORT_NOT_READY_REASON_CODE_MAX
+};
+
+struct sci_port_end_point_properties {
+       struct sci_sas_address sas_address;
+       struct sci_phy_proto protocols;
+};
+
+struct sci_port_properties {
+       u32 index;
+       struct sci_port_end_point_properties local;
+       struct sci_port_end_point_properties remote;
+       u32 phy_mask;
+};
+
+/**
+ * enum sci_port_states - This enumeration depicts all the states for the
+ *    common port state machine.
+ *
+ *
+ */
+enum sci_port_states {
+       /**
+        * This state indicates that the port has successfully been stopped.
+        * In this state no new IO operations are permitted.
+        * This state is entered from the STOPPING state.
+        */
+       SCI_PORT_STOPPED,
+
+       /**
+        * This state indicates that the port is in the process of stopping.
+        * In this state no new IO operations are permitted, but existing IO
+        * operations are allowed to complete.
+        * This state is entered from the READY state.
+        */
+       SCI_PORT_STOPPING,
+
+       /**
+        * This state indicates the port is now ready.  Thus, the user is
+        * able to perform IO operations on this port.
+        * This state is entered from the STARTING state.
+        */
+       SCI_PORT_READY,
+
+       /**
+        * The substate where the port is started and ready but has no
+        * active phys.
+        */
+       SCI_PORT_SUB_WAITING,
+
+       /**
+        * The substate where the port is started and ready and there is
+        * at least one phy operational.
+        */
+       SCI_PORT_SUB_OPERATIONAL,
+
+       /**
+        * The substate where the port is started and there was an
+        * add/remove phy event.  This state is only used in Automatic
+        * Port Configuration Mode (APC)
+        */
+       SCI_PORT_SUB_CONFIGURING,
+
+       /**
+        * This state indicates the port is in the process of performing a hard
+        * reset.  Thus, the user is unable to perform IO operations on this
+        * port.
+        * This state is entered from the READY state.
+        */
+       SCI_PORT_RESETTING,
+
+       /**
+        * This state indicates the port has failed a reset request.  This state
+        * is entered when a port reset request times out.
+        * This state is entered from the RESETTING state.
+        */
+       SCI_PORT_FAILED,
+
+
+};
+
+static inline void sci_port_decrement_request_count(struct isci_port *iport)
+{
+       if (WARN_ONCE(iport->started_request_count == 0,
+                      "%s: tried to decrement started_request_count past 0!?",
+                       __func__))
+               /* pass */;
+       else
+               iport->started_request_count--;
+}
+
+#define sci_port_active_phy(port, phy) \
+       (((port)->active_phy_mask & (1 << (phy)->phy_index)) != 0)
+
+void sci_port_construct(
+       struct isci_port *iport,
+       u8 port_index,
+       struct isci_host *ihost);
+
+enum sci_status sci_port_start(struct isci_port *iport);
+enum sci_status sci_port_stop(struct isci_port *iport);
+
+enum sci_status sci_port_add_phy(
+       struct isci_port *iport,
+       struct isci_phy *iphy);
+
+enum sci_status sci_port_remove_phy(
+       struct isci_port *iport,
+       struct isci_phy *iphy);
+
+void sci_port_setup_transports(
+       struct isci_port *iport,
+       u32 device_id);
+
+void isci_port_bcn_enable(struct isci_host *, struct isci_port *);
+
+void sci_port_deactivate_phy(
+       struct isci_port *iport,
+       struct isci_phy *iphy,
+       bool do_notify_user);
+
+bool sci_port_link_detected(
+       struct isci_port *iport,
+       struct isci_phy *iphy);
+
+enum sci_status sci_port_link_up(struct isci_port *iport,
+                                     struct isci_phy *iphy);
+enum sci_status sci_port_link_down(struct isci_port *iport,
+                                       struct isci_phy *iphy);
+
+struct isci_request;
+struct isci_remote_device;
+enum sci_status sci_port_start_io(
+       struct isci_port *iport,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+enum sci_status sci_port_complete_io(
+       struct isci_port *iport,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+enum sas_linkrate sci_port_get_max_allowed_speed(
+       struct isci_port *iport);
+
+void sci_port_broadcast_change_received(
+       struct isci_port *iport,
+       struct isci_phy *iphy);
+
+bool sci_port_is_valid_phy_assignment(
+       struct isci_port *iport,
+       u32 phy_index);
+
+void sci_port_get_sas_address(
+       struct isci_port *iport,
+       struct sci_sas_address *sas_address);
+
+void sci_port_get_attached_sas_address(
+       struct isci_port *iport,
+       struct sci_sas_address *sas_address);
+
+enum isci_status isci_port_get_state(
+       struct isci_port *isci_port);
+
+void isci_port_formed(struct asd_sas_phy *);
+void isci_port_deformed(struct asd_sas_phy *);
+
+void isci_port_init(
+       struct isci_port *port,
+       struct isci_host *host,
+       int index);
+
+int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
+                                struct isci_phy *iphy);
+#endif /* !defined(_ISCI_PORT_H_) */
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
new file mode 100644 (file)
index 0000000..486b113
--- /dev/null
@@ -0,0 +1,754 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "host.h"
+
+#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT    (10)
+#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT    (10)
+#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION  (100)
+
+enum SCIC_SDS_APC_ACTIVITY {
+       SCIC_SDS_APC_SKIP_PHY,
+       SCIC_SDS_APC_ADD_PHY,
+       SCIC_SDS_APC_START_TIMER,
+
+       SCIC_SDS_APC_ACTIVITY_MAX
+};
+
+/*
+ * ******************************************************************************
+ * General port configuration agent routines
+ * ****************************************************************************** */
+
+/**
+ *
+ * @address_one: A SAS Address to be compared.
+ * @address_two: A SAS Address to be compared.
+ *
+ * Compare the two SAS Address and if SAS Address One is greater than SAS
+ * Address Two then return > 0 else if SAS Address One is less than SAS Address
+ * Two return < 0 Otherwise they are the same return 0 A signed value of x > 0
+ * > y where x is returned for Address One > Address Two y is returned for
+ * Address One < Address Two 0 is returned ofr Address One = Address Two
+ */
+static s32 sci_sas_address_compare(
+       struct sci_sas_address address_one,
+       struct sci_sas_address address_two)
+{
+       if (address_one.high > address_two.high) {
+               return 1;
+       } else if (address_one.high < address_two.high) {
+               return -1;
+       } else if (address_one.low > address_two.low) {
+               return 1;
+       } else if (address_one.low < address_two.low) {
+               return -1;
+       }
+
+       /* The two SAS Address must be identical */
+       return 0;
+}
+
+/**
+ *
+ * @controller: The controller object used for the port search.
+ * @phy: The phy object to match.
+ *
+ * This routine will find a matching port for the phy.  This means that the
+ * port and phy both have the same broadcast sas address and same received sas
+ * address. The port address or the NULL if there is no matching
+ * port. port address if the port can be found to match the phy.
+ * NULL if there is no matching port for the phy.
+ */
+static struct isci_port *sci_port_configuration_agent_find_port(
+       struct isci_host *ihost,
+       struct isci_phy *iphy)
+{
+       u8 i;
+       struct sci_sas_address port_sas_address;
+       struct sci_sas_address port_attached_device_address;
+       struct sci_sas_address phy_sas_address;
+       struct sci_sas_address phy_attached_device_address;
+
+       /*
+        * Since this phy can be a member of a wide port check to see if one or
+        * more phys match the sent and received SAS address as this phy in which
+        * case it should participate in the same port.
+        */
+       sci_phy_get_sas_address(iphy, &phy_sas_address);
+       sci_phy_get_attached_sas_address(iphy, &phy_attached_device_address);
+
+       for (i = 0; i < ihost->logical_port_entries; i++) {
+               struct isci_port *iport = &ihost->ports[i];
+
+               sci_port_get_sas_address(iport, &port_sas_address);
+               sci_port_get_attached_sas_address(iport, &port_attached_device_address);
+
+               if (sci_sas_address_compare(port_sas_address, phy_sas_address) == 0 &&
+                   sci_sas_address_compare(port_attached_device_address, phy_attached_device_address) == 0)
+                       return iport;
+       }
+
+       return NULL;
+}
+
+/**
+ *
+ * @controller: This is the controller object that contains the port agent
+ * @port_agent: This is the port configruation agent for the controller.
+ *
+ * This routine will validate the port configuration is correct for the SCU
+ * hardware.  The SCU hardware allows for port configurations as follows. LP0
+ * -> (PE0), (PE0, PE1), (PE0, PE1, PE2, PE3) LP1 -> (PE1) LP2 -> (PE2), (PE2,
+ * PE3) LP3 -> (PE3) enum sci_status SCI_SUCCESS the port configuration is valid for
+ * this port configuration agent. SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION
+ * the port configuration is not valid for this port configuration agent.
+ */
+static enum sci_status sci_port_configuration_agent_validate_ports(
+       struct isci_host *ihost,
+       struct sci_port_configuration_agent *port_agent)
+{
+       struct sci_sas_address first_address;
+       struct sci_sas_address second_address;
+
+       /*
+        * Sanity check the max ranges for all the phys the max index
+        * is always equal to the port range index */
+       if (port_agent->phy_valid_port_range[0].max_index != 0 ||
+           port_agent->phy_valid_port_range[1].max_index != 1 ||
+           port_agent->phy_valid_port_range[2].max_index != 2 ||
+           port_agent->phy_valid_port_range[3].max_index != 3)
+               return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+
+       /*
+        * This is a request to configure a single x4 port or at least attempt
+        * to make all the phys into a single port */
+       if (port_agent->phy_valid_port_range[0].min_index == 0 &&
+           port_agent->phy_valid_port_range[1].min_index == 0 &&
+           port_agent->phy_valid_port_range[2].min_index == 0 &&
+           port_agent->phy_valid_port_range[3].min_index == 0)
+               return SCI_SUCCESS;
+
+       /*
+        * This is a degenerate case where phy 1 and phy 2 are assigned
+        * to the same port this is explicitly disallowed by the hardware
+        * unless they are part of the same x4 port and this condition was
+        * already checked above. */
+       if (port_agent->phy_valid_port_range[2].min_index == 1) {
+               return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+       }
+
+       /*
+        * PE0 and PE3 can never have the same SAS Address unless they
+        * are part of the same x4 wide port and we have already checked
+        * for this condition. */
+       sci_phy_get_sas_address(&ihost->phys[0], &first_address);
+       sci_phy_get_sas_address(&ihost->phys[3], &second_address);
+
+       if (sci_sas_address_compare(first_address, second_address) == 0) {
+               return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+       }
+
+       /*
+        * PE0 and PE1 are configured into a 2x1 ports make sure that the
+        * SAS Address for PE0 and PE2 are different since they can not be
+        * part of the same port. */
+       if (port_agent->phy_valid_port_range[0].min_index == 0 &&
+           port_agent->phy_valid_port_range[1].min_index == 1) {
+               sci_phy_get_sas_address(&ihost->phys[0], &first_address);
+               sci_phy_get_sas_address(&ihost->phys[2], &second_address);
+
+               if (sci_sas_address_compare(first_address, second_address) == 0) {
+                       return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+               }
+       }
+
+       /*
+        * PE2 and PE3 are configured into a 2x1 ports make sure that the
+        * SAS Address for PE1 and PE3 are different since they can not be
+        * part of the same port. */
+       if (port_agent->phy_valid_port_range[2].min_index == 2 &&
+           port_agent->phy_valid_port_range[3].min_index == 3) {
+               sci_phy_get_sas_address(&ihost->phys[1], &first_address);
+               sci_phy_get_sas_address(&ihost->phys[3], &second_address);
+
+               if (sci_sas_address_compare(first_address, second_address) == 0) {
+                       return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+               }
+       }
+
+       return SCI_SUCCESS;
+}
+
+/*
+ * ******************************************************************************
+ * Manual port configuration agent routines
+ * ****************************************************************************** */
+
+/* verify all of the phys in the same port are using the same SAS address */
+static enum sci_status
+sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
+                                             struct sci_port_configuration_agent *port_agent)
+{
+       u32 phy_mask;
+       u32 assigned_phy_mask;
+       struct sci_sas_address sas_address;
+       struct sci_sas_address phy_assigned_address;
+       u8 port_index;
+       u8 phy_index;
+
+       assigned_phy_mask = 0;
+       sas_address.high = 0;
+       sas_address.low = 0;
+
+       for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) {
+               phy_mask = ihost->oem_parameters.ports[port_index].phy_mask;
+
+               if (!phy_mask)
+                       continue;
+               /*
+                * Make sure that one or more of the phys were not already assinged to
+                * a different port. */
+               if ((phy_mask & ~assigned_phy_mask) == 0) {
+                       return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+               }
+
+               /* Find the starting phy index for this round through the loop */
+               for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
+                       if ((phy_mask & (1 << phy_index)) == 0)
+                               continue;
+                       sci_phy_get_sas_address(&ihost->phys[phy_index],
+                                                    &sas_address);
+
+                       /*
+                        * The phy_index can be used as the starting point for the
+                        * port range since the hardware starts all logical ports
+                        * the same as the PE index. */
+                       port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+                       port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+
+                       if (phy_index != port_index) {
+                               return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+                       }
+
+                       break;
+               }
+
+               /*
+                * See how many additional phys are being added to this logical port.
+                * Note: We have not moved the current phy_index so we will actually
+                *       compare the startting phy with itself.
+                *       This is expected and required to add the phy to the port. */
+               while (phy_index < SCI_MAX_PHYS) {
+                       if ((phy_mask & (1 << phy_index)) == 0)
+                               continue;
+                       sci_phy_get_sas_address(&ihost->phys[phy_index],
+                                                    &phy_assigned_address);
+
+                       if (sci_sas_address_compare(sas_address, phy_assigned_address) != 0) {
+                               /*
+                                * The phy mask specified that this phy is part of the same port
+                                * as the starting phy and it is not so fail this configuration */
+                               return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+                       }
+
+                       port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+                       port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+
+                       sci_port_add_phy(&ihost->ports[port_index],
+                                             &ihost->phys[phy_index]);
+
+                       assigned_phy_mask |= (1 << phy_index);
+               }
+
+               phy_index++;
+       }
+
+       return sci_port_configuration_agent_validate_ports(ihost, port_agent);
+}
+
+static void mpc_agent_timeout(unsigned long data)
+{
+       u8 index;
+       struct sci_timer *tmr = (struct sci_timer *)data;
+       struct sci_port_configuration_agent *port_agent;
+       struct isci_host *ihost;
+       unsigned long flags;
+       u16 configure_phy_mask;
+
+       port_agent = container_of(tmr, typeof(*port_agent), timer);
+       ihost = container_of(port_agent, typeof(*ihost), port_agent);
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       if (tmr->cancel)
+               goto done;
+
+       port_agent->timer_pending = false;
+
+       /* Find the mask of phys that are reported read but as yet unconfigured into a port */
+       configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
+
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               struct isci_phy *iphy = &ihost->phys[index];
+
+               if (configure_phy_mask & (1 << index)) {
+                       port_agent->link_up_handler(ihost, port_agent,
+                                                   phy_get_non_dummy_port(iphy),
+                                                   iphy);
+               }
+       }
+
+done:
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static void sci_mpc_agent_link_up(struct isci_host *ihost,
+                                      struct sci_port_configuration_agent *port_agent,
+                                      struct isci_port *iport,
+                                      struct isci_phy *iphy)
+{
+       /* If the port is NULL then the phy was not assigned to a port.
+        * This is because the phy was not given the same SAS Address as
+        * the other PHYs in the port.
+        */
+       if (!iport)
+               return;
+
+       port_agent->phy_ready_mask |= (1 << iphy->phy_index);
+       sci_port_link_up(iport, iphy);
+       if ((iport->active_phy_mask & (1 << iphy->phy_index)))
+               port_agent->phy_configured_mask |= (1 << iphy->phy_index);
+}
+
+/**
+ *
+ * @controller: This is the controller object that receives the link down
+ *    notification.
+ * @port: This is the port object associated with the phy.  If the is no
+ *    associated port this is an NULL.  The port is an invalid
+ *    handle only if the phy was never port of this port.  This happens when
+ *    the phy is not broadcasting the same SAS address as the other phys in the
+ *    assigned port.
+ * @phy: This is the phy object which has gone link down.
+ *
+ * This function handles the manual port configuration link down notifications.
+ * Since all ports and phys are associated at initialization time we just turn
+ * around and notifiy the port object of the link down event.  If this PHY is
+ * not associated with a port there is no action taken. Is it possible to get a
+ * link down notification from a phy that has no assocoated port?
+ */
+static void sci_mpc_agent_link_down(
+       struct isci_host *ihost,
+       struct sci_port_configuration_agent *port_agent,
+       struct isci_port *iport,
+       struct isci_phy *iphy)
+{
+       if (iport != NULL) {
+               /*
+                * If we can form a new port from the remainder of the phys
+                * then we want to start the timer to allow the SCI User to
+                * cleanup old devices and rediscover the port before
+                * rebuilding the port with the phys that remain in the ready
+                * state.
+                */
+               port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
+               port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
+
+               /*
+                * Check to see if there are more phys waiting to be
+                * configured into a port. If there are allow the SCI User
+                * to tear down this port, if necessary, and then reconstruct
+                * the port after the timeout.
+                */
+               if ((port_agent->phy_configured_mask == 0x0000) &&
+                   (port_agent->phy_ready_mask != 0x0000) &&
+                   !port_agent->timer_pending) {
+                       port_agent->timer_pending = true;
+
+                       sci_mod_timer(&port_agent->timer,
+                                     SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT);
+               }
+
+               sci_port_link_down(iport, iphy);
+       }
+}
+
+/* verify phys are assigned a valid SAS address for automatic port
+ * configuration mode.
+ */
+static enum sci_status
+sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
+                                             struct sci_port_configuration_agent *port_agent)
+{
+       u8 phy_index;
+       u8 port_index;
+       struct sci_sas_address sas_address;
+       struct sci_sas_address phy_assigned_address;
+
+       phy_index = 0;
+
+       while (phy_index < SCI_MAX_PHYS) {
+               port_index = phy_index;
+
+               /* Get the assigned SAS Address for the first PHY on the controller. */
+               sci_phy_get_sas_address(&ihost->phys[phy_index],
+                                           &sas_address);
+
+               while (++phy_index < SCI_MAX_PHYS) {
+                       sci_phy_get_sas_address(&ihost->phys[phy_index],
+                                                    &phy_assigned_address);
+
+                       /* Verify each of the SAS address are all the same for every PHY */
+                       if (sci_sas_address_compare(sas_address, phy_assigned_address) == 0) {
+                               port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+                               port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+                       } else {
+                               port_agent->phy_valid_port_range[phy_index].min_index = phy_index;
+                               port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+                               break;
+                       }
+               }
+       }
+
+       return sci_port_configuration_agent_validate_ports(ihost, port_agent);
+}
+
+static void sci_apc_agent_configure_ports(struct isci_host *ihost,
+                                              struct sci_port_configuration_agent *port_agent,
+                                              struct isci_phy *iphy,
+                                              bool start_timer)
+{
+       u8 port_index;
+       enum sci_status status;
+       struct isci_port *iport;
+       enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY;
+
+       iport = sci_port_configuration_agent_find_port(ihost, iphy);
+
+       if (iport) {
+               if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index))
+                       apc_activity = SCIC_SDS_APC_ADD_PHY;
+               else
+                       apc_activity = SCIC_SDS_APC_SKIP_PHY;
+       } else {
+               /*
+                * There is no matching Port for this PHY so lets search through the
+                * Ports and see if we can add the PHY to its own port or maybe start
+                * the timer and wait to see if a wider port can be made.
+                *
+                * Note the break when we reach the condition of the port id == phy id */
+               for (port_index = port_agent->phy_valid_port_range[iphy->phy_index].min_index;
+                    port_index <= port_agent->phy_valid_port_range[iphy->phy_index].max_index;
+                    port_index++) {
+
+                       iport = &ihost->ports[port_index];
+
+                       /* First we must make sure that this PHY can be added to this Port. */
+                       if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
+                               /*
+                                * Port contains a PHY with a greater PHY ID than the current
+                                * PHY that has gone link up.  This phy can not be part of any
+                                * port so skip it and move on. */
+                               if (iport->active_phy_mask > (1 << iphy->phy_index)) {
+                                       apc_activity = SCIC_SDS_APC_SKIP_PHY;
+                                       break;
+                               }
+
+                               /*
+                                * We have reached the end of our Port list and have not found
+                                * any reason why we should not either add the PHY to the port
+                                * or wait for more phys to become active. */
+                               if (iport->physical_port_index == iphy->phy_index) {
+                                       /*
+                                        * The Port either has no active PHYs.
+                                        * Consider that if the port had any active PHYs we would have
+                                        * or active PHYs with
+                                        * a lower PHY Id than this PHY. */
+                                       if (apc_activity != SCIC_SDS_APC_START_TIMER) {
+                                               apc_activity = SCIC_SDS_APC_ADD_PHY;
+                                       }
+
+                                       break;
+                               }
+
+                               /*
+                                * The current Port has no active PHYs and this PHY could be part
+                                * of this Port.  Since we dont know as yet setup to start the
+                                * timer and see if there is a better configuration. */
+                               if (iport->active_phy_mask == 0) {
+                                       apc_activity = SCIC_SDS_APC_START_TIMER;
+                               }
+                       } else if (iport->active_phy_mask != 0) {
+                               /*
+                                * The Port has an active phy and the current Phy can not
+                                * participate in this port so skip the PHY and see if
+                                * there is a better configuration. */
+                               apc_activity = SCIC_SDS_APC_SKIP_PHY;
+                       }
+               }
+       }
+
+       /*
+        * Check to see if the start timer operations should instead map to an
+        * add phy operation.  This is caused because we have been waiting to
+        * add a phy to a port but could not becuase the automatic port
+        * configuration engine had a choice of possible ports for the phy.
+        * Since we have gone through a timeout we are going to restrict the
+        * choice to the smallest possible port. */
+       if (
+               (start_timer == false)
+               && (apc_activity == SCIC_SDS_APC_START_TIMER)
+               ) {
+               apc_activity = SCIC_SDS_APC_ADD_PHY;
+       }
+
+       switch (apc_activity) {
+       case SCIC_SDS_APC_ADD_PHY:
+               status = sci_port_add_phy(iport, iphy);
+
+               if (status == SCI_SUCCESS) {
+                       port_agent->phy_configured_mask |= (1 << iphy->phy_index);
+               }
+               break;
+
+       case SCIC_SDS_APC_START_TIMER:
+               /*
+                * This can occur for either a link down event, or a link
+                * up event where we cannot yet tell the port to which a
+                * phy belongs.
+                */
+               if (port_agent->timer_pending)
+                       sci_del_timer(&port_agent->timer);
+
+               port_agent->timer_pending = true;
+               sci_mod_timer(&port_agent->timer,
+                             SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
+               break;
+
+       case SCIC_SDS_APC_SKIP_PHY:
+       default:
+               /* do nothing the PHY can not be made part of a port at this time. */
+               break;
+       }
+}
+
+/**
+ * sci_apc_agent_link_up - handle apc link up events
+ * @scic: This is the controller object that receives the link up
+ *    notification.
+ * @sci_port: This is the port object associated with the phy.  If the is no
+ *    associated port this is an NULL.
+ * @sci_phy: This is the phy object which has gone link up.
+ *
+ * This method handles the automatic port configuration for link up
+ * notifications. Is it possible to get a link down notification from a phy
+ * that has no assocoated port?
+ */
+static void sci_apc_agent_link_up(struct isci_host *ihost,
+                                      struct sci_port_configuration_agent *port_agent,
+                                      struct isci_port *iport,
+                                      struct isci_phy *iphy)
+{
+       u8 phy_index  = iphy->phy_index;
+
+       if (!iport) {
+               /* the phy is not the part of this port */
+               port_agent->phy_ready_mask |= 1 << phy_index;
+               sci_apc_agent_configure_ports(ihost, port_agent, iphy, true);
+       } else {
+               /* the phy is already the part of the port */
+               u32 port_state = iport->sm.current_state_id;
+
+               /* if the PORT'S state is resetting then the link up is from
+                * port hard reset in this case, we need to tell the port
+                * that link up is recieved
+                */
+               BUG_ON(port_state != SCI_PORT_RESETTING);
+               port_agent->phy_ready_mask |= 1 << phy_index;
+               sci_port_link_up(iport, iphy);
+       }
+}
+
+/**
+ *
+ * @controller: This is the controller object that receives the link down
+ *    notification.
+ * @iport: This is the port object associated with the phy.  If the is no
+ *    associated port this is an NULL.
+ * @iphy: This is the phy object which has gone link down.
+ *
+ * This method handles the automatic port configuration link down
+ * notifications. not associated with a port there is no action taken. Is it
+ * possible to get a link down notification from a phy that has no assocoated
+ * port?
+ */
+static void sci_apc_agent_link_down(
+       struct isci_host *ihost,
+       struct sci_port_configuration_agent *port_agent,
+       struct isci_port *iport,
+       struct isci_phy *iphy)
+{
+       port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
+
+       if (!iport)
+               return;
+       if (port_agent->phy_configured_mask & (1 << iphy->phy_index)) {
+               enum sci_status status;
+
+               status = sci_port_remove_phy(iport, iphy);
+
+               if (status == SCI_SUCCESS)
+                       port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
+       }
+}
+
+/* configure the phys into ports when the timer fires */
+static void apc_agent_timeout(unsigned long data)
+{
+       u32 index;
+       struct sci_timer *tmr = (struct sci_timer *)data;
+       struct sci_port_configuration_agent *port_agent;
+       struct isci_host *ihost;
+       unsigned long flags;
+       u16 configure_phy_mask;
+
+       port_agent = container_of(tmr, typeof(*port_agent), timer);
+       ihost = container_of(port_agent, typeof(*ihost), port_agent);
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       if (tmr->cancel)
+               goto done;
+
+       port_agent->timer_pending = false;
+
+       configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
+
+       if (!configure_phy_mask)
+               return;
+
+       for (index = 0; index < SCI_MAX_PHYS; index++) {
+               if ((configure_phy_mask & (1 << index)) == 0)
+                       continue;
+
+               sci_apc_agent_configure_ports(ihost, port_agent,
+                                                  &ihost->phys[index], false);
+       }
+
+done:
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/*
+ * ******************************************************************************
+ * Public port configuration agent routines
+ * ****************************************************************************** */
+
+/**
+ *
+ *
+ * This method will construct the port configuration agent for operation. This
+ * call is universal for both manual port configuration and automatic port
+ * configuration modes.
+ */
+void sci_port_configuration_agent_construct(
+       struct sci_port_configuration_agent *port_agent)
+{
+       u32 index;
+
+       port_agent->phy_configured_mask = 0x00;
+       port_agent->phy_ready_mask = 0x00;
+
+       port_agent->link_up_handler = NULL;
+       port_agent->link_down_handler = NULL;
+
+       port_agent->timer_pending = false;
+
+       for (index = 0; index < SCI_MAX_PORTS; index++) {
+               port_agent->phy_valid_port_range[index].min_index = 0;
+               port_agent->phy_valid_port_range[index].max_index = 0;
+       }
+}
+
+enum sci_status sci_port_configuration_agent_initialize(
+       struct isci_host *ihost,
+       struct sci_port_configuration_agent *port_agent)
+{
+       enum sci_status status;
+       enum sci_port_configuration_mode mode;
+
+       mode = ihost->oem_parameters.controller.mode_type;
+
+       if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+               status = sci_mpc_agent_validate_phy_configuration(
+                               ihost, port_agent);
+
+               port_agent->link_up_handler = sci_mpc_agent_link_up;
+               port_agent->link_down_handler = sci_mpc_agent_link_down;
+
+               sci_init_timer(&port_agent->timer, mpc_agent_timeout);
+       } else {
+               status = sci_apc_agent_validate_phy_configuration(
+                               ihost, port_agent);
+
+               port_agent->link_up_handler = sci_apc_agent_link_up;
+               port_agent->link_down_handler = sci_apc_agent_link_down;
+
+               sci_init_timer(&port_agent->timer, apc_agent_timeout);
+       }
+
+       return status;
+}
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
new file mode 100644 (file)
index 0000000..b5f4341
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ */
+
+/* probe_roms - scan for oem parameters */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/uaccess.h>
+#include <linux/efi.h>
+#include <asm/probe_roms.h>
+
+#include "isci.h"
+#include "task.h"
+#include "probe_roms.h"
+
+static efi_char16_t isci_efivar_name[] = {
+       'R', 's', 't', 'S', 'c', 'u', 'O'
+};
+
+struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
+{
+       void __iomem *oprom = pci_map_biosrom(pdev);
+       struct isci_orom *rom = NULL;
+       size_t len, i;
+       int j;
+       char oem_sig[4];
+       struct isci_oem_hdr oem_hdr;
+       u8 *tmp, sum;
+
+       if (!oprom)
+               return NULL;
+
+       len = pci_biosrom_size(pdev);
+       rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL);
+       if (!rom) {
+               dev_warn(&pdev->dev,
+                        "Unable to allocate memory for orom\n");
+               return NULL;
+       }
+
+       for (i = 0; i < len && rom; i += ISCI_OEM_SIG_SIZE) {
+               memcpy_fromio(oem_sig, oprom + i, ISCI_OEM_SIG_SIZE);
+
+               /* we think we found the OEM table */
+               if (memcmp(oem_sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) == 0) {
+                       size_t copy_len;
+
+                       memcpy_fromio(&oem_hdr, oprom + i, sizeof(oem_hdr));
+
+                       copy_len = min(oem_hdr.len - sizeof(oem_hdr),
+                                      sizeof(*rom));
+
+                       memcpy_fromio(rom,
+                                     oprom + i + sizeof(oem_hdr),
+                                     copy_len);
+
+                       /* calculate checksum */
+                       tmp = (u8 *)&oem_hdr;
+                       for (j = 0, sum = 0; j < sizeof(oem_hdr); j++, tmp++)
+                               sum += *tmp;
+
+                       tmp = (u8 *)rom;
+                       for (j = 0; j < sizeof(*rom); j++, tmp++)
+                               sum += *tmp;
+
+                       if (sum != 0) {
+                               dev_warn(&pdev->dev,
+                                        "OEM table checksum failed\n");
+                               continue;
+                       }
+
+                       /* keep going if that's not the oem param table */
+                       if (memcmp(rom->hdr.signature,
+                                  ISCI_ROM_SIG,
+                                  ISCI_ROM_SIG_SIZE) != 0)
+                               continue;
+
+                       dev_info(&pdev->dev,
+                                "OEM parameter table found in OROM\n");
+                       break;
+               }
+       }
+
+       if (i >= len) {
+               dev_err(&pdev->dev, "oprom parse error\n");
+               devm_kfree(&pdev->dev, rom);
+               rom = NULL;
+       }
+       pci_unmap_biosrom(oprom);
+
+       return rom;
+}
+
+enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
+                                         struct isci_orom *orom, int scu_index)
+{
+       /* check for valid inputs */
+       if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS ||
+           scu_index > orom->hdr.num_elements || !oem)
+               return -EINVAL;
+
+       *oem = orom->ctrl[scu_index];
+       return 0;
+}
+
+struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw)
+{
+       struct isci_orom *orom = NULL, *data;
+       int i, j;
+
+       if (request_firmware(&fw, ISCI_FW_NAME, &pdev->dev) != 0)
+               return NULL;
+
+       if (fw->size < sizeof(*orom))
+               goto out;
+
+       data = (struct isci_orom *)fw->data;
+
+       if (strncmp(ISCI_ROM_SIG, data->hdr.signature,
+                   strlen(ISCI_ROM_SIG)) != 0)
+               goto out;
+
+       orom = devm_kzalloc(&pdev->dev, fw->size, GFP_KERNEL);
+       if (!orom)
+               goto out;
+
+       memcpy(orom, fw->data, fw->size);
+
+       if (is_c0(pdev))
+               goto out;
+
+       /*
+        * deprecated: override default amp_control for pre-preproduction
+        * silicon revisions
+        */
+       for (i = 0; i < ARRAY_SIZE(orom->ctrl); i++)
+               for (j = 0; j < ARRAY_SIZE(orom->ctrl[i].phys); j++) {
+                       orom->ctrl[i].phys[j].afe_tx_amp_control0 = 0xe7c03;
+                       orom->ctrl[i].phys[j].afe_tx_amp_control1 = 0xe7c03;
+                       orom->ctrl[i].phys[j].afe_tx_amp_control2 = 0xe7c03;
+                       orom->ctrl[i].phys[j].afe_tx_amp_control3 = 0xe7c03;
+               }
+ out:
+       release_firmware(fw);
+
+       return orom;
+}
+
+static struct efi *get_efi(void)
+{
+#ifdef CONFIG_EFI
+       return &efi;
+#else
+       return NULL;
+#endif
+}
+
+struct isci_orom *isci_get_efi_var(struct pci_dev *pdev)
+{
+       efi_status_t status;
+       struct isci_orom *rom;
+       struct isci_oem_hdr *oem_hdr;
+       u8 *tmp, sum;
+       int j;
+       unsigned long data_len;
+       u8 *efi_data;
+       u32 efi_attrib = 0;
+
+       data_len = 1024;
+       efi_data = devm_kzalloc(&pdev->dev, data_len, GFP_KERNEL);
+       if (!efi_data) {
+               dev_warn(&pdev->dev,
+                        "Unable to allocate memory for EFI data\n");
+               return NULL;
+       }
+
+       rom = (struct isci_orom *)(efi_data + sizeof(struct isci_oem_hdr));
+
+       if (get_efi())
+               status = get_efi()->get_variable(isci_efivar_name,
+                                                &ISCI_EFI_VENDOR_GUID,
+                                                &efi_attrib,
+                                                &data_len,
+                                                efi_data);
+       else
+               status = EFI_NOT_FOUND;
+
+       if (status != EFI_SUCCESS) {
+               dev_warn(&pdev->dev,
+                        "Unable to obtain EFI var data for OEM parms\n");
+               return NULL;
+       }
+
+       oem_hdr = (struct isci_oem_hdr *)efi_data;
+
+       if (memcmp(oem_hdr->sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) != 0) {
+               dev_warn(&pdev->dev,
+                        "Invalid OEM header signature\n");
+               return NULL;
+       }
+
+       /* calculate checksum */
+       tmp = (u8 *)efi_data;
+       for (j = 0, sum = 0; j < (sizeof(*oem_hdr) + sizeof(*rom)); j++, tmp++)
+               sum += *tmp;
+
+       if (sum != 0) {
+               dev_warn(&pdev->dev,
+                        "OEM table checksum failed\n");
+               return NULL;
+       }
+
+       if (memcmp(rom->hdr.signature,
+                  ISCI_ROM_SIG,
+                  ISCI_ROM_SIG_SIZE) != 0) {
+               dev_warn(&pdev->dev,
+                        "Invalid OEM table signature\n");
+               return NULL;
+       }
+
+       return rom;
+}
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
new file mode 100644 (file)
index 0000000..dc007e6
--- /dev/null
@@ -0,0 +1,249 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_PROBE_ROMS_H_
+#define _ISCI_PROBE_ROMS_H_
+
+#ifdef __KERNEL__
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/efi.h>
+#include "isci.h"
+
+#define SCIC_SDS_PARM_NO_SPEED   0
+
+/* generation 1 (i.e. 1.5 Gb/s) */
+#define SCIC_SDS_PARM_GEN1_SPEED 1
+
+/* generation 2 (i.e. 3.0 Gb/s) */
+#define SCIC_SDS_PARM_GEN2_SPEED 2
+
+/* generation 3 (i.e. 6.0 Gb/s) */
+#define SCIC_SDS_PARM_GEN3_SPEED 3
+#define SCIC_SDS_PARM_MAX_SPEED SCIC_SDS_PARM_GEN3_SPEED
+
+/* parameters that can be set by module parameters */
+struct sci_user_parameters {
+       struct sci_phy_user_params {
+               /**
+                * This field specifies the NOTIFY (ENABLE SPIN UP) primitive
+                * insertion frequency for this phy index.
+                */
+               u32 notify_enable_spin_up_insertion_frequency;
+
+               /**
+                * This method specifies the number of transmitted DWORDs within which
+                * to transmit a single ALIGN primitive.  This value applies regardless
+                * of what type of device is attached or connection state.  A value of
+                * 0 indicates that no ALIGN primitives will be inserted.
+                */
+               u16 align_insertion_frequency;
+
+               /**
+                * This method specifies the number of transmitted DWORDs within which
+                * to transmit 2 ALIGN primitives.  This applies for SAS connections
+                * only.  A minimum value of 3 is required for this field.
+                */
+               u16 in_connection_align_insertion_frequency;
+
+               /**
+                * This field indicates the maximum speed generation to be utilized
+                * by phys in the supplied port.
+                * - A value of 1 indicates generation 1 (i.e. 1.5 Gb/s).
+                * - A value of 2 indicates generation 2 (i.e. 3.0 Gb/s).
+                * - A value of 3 indicates generation 3 (i.e. 6.0 Gb/s).
+                */
+               u8 max_speed_generation;
+
+       } phys[SCI_MAX_PHYS];
+
+       /**
+        * This field specifies the maximum number of direct attached devices
+        * that can have power supplied to them simultaneously.
+        */
+       u8 max_number_concurrent_device_spin_up;
+
+       /**
+        * This field specifies the number of seconds to allow a phy to consume
+        * power before yielding to another phy.
+        *
+        */
+       u8 phy_spin_up_delay_interval;
+
+       /**
+        * These timer values specifies how long a link will remain open with no
+        * activity in increments of a microsecond, it can be in increments of
+        * 100 microseconds if the upper most bit is set.
+        *
+        */
+       u16 stp_inactivity_timeout;
+       u16 ssp_inactivity_timeout;
+
+       /**
+        * These timer values specifies how long a link will remain open in increments
+        * of 100 microseconds.
+        *
+        */
+       u16 stp_max_occupancy_timeout;
+       u16 ssp_max_occupancy_timeout;
+
+       /**
+        * This timer value specifies how long a link will remain open with no
+        * outbound traffic in increments of a microsecond.
+        *
+        */
+       u8 no_outbound_task_timeout;
+
+};
+
+#define SCIC_SDS_PARM_PHY_MASK_MIN 0x0
+#define SCIC_SDS_PARM_PHY_MASK_MAX 0xF
+#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
+
+struct sci_oem_params;
+int sci_oem_parameters_validate(struct sci_oem_params *oem);
+
+struct isci_orom;
+struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
+enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
+                                         struct isci_orom *orom, int scu_index);
+struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw);
+struct isci_orom *isci_get_efi_var(struct pci_dev *pdev);
+
+struct isci_oem_hdr {
+       u8 sig[4];
+       u8 rev_major;
+       u8 rev_minor;
+       u16 len;
+       u8 checksum;
+       u8 reserved1;
+       u16 reserved2;
+} __attribute__ ((packed));
+
+#else
+#define SCI_MAX_PORTS 4
+#define SCI_MAX_PHYS 4
+#define SCI_MAX_CONTROLLERS 2
+#endif
+
+#define ISCI_FW_NAME           "isci/isci_firmware.bin"
+
+#define ROMSIGNATURE           0xaa55
+
+#define ISCI_OEM_SIG           "$OEM"
+#define ISCI_OEM_SIG_SIZE      4
+#define ISCI_ROM_SIG           "ISCUOEMB"
+#define ISCI_ROM_SIG_SIZE      8
+
+#define ISCI_EFI_VENDOR_GUID   \
+       EFI_GUID(0x193dfefa, 0xa445, 0x4302, 0x99, 0xd8, 0xef, 0x3a, 0xad, \
+                       0x1a, 0x04, 0xc6)
+#define ISCI_EFI_VAR_NAME      "RstScuO"
+
+/* Allowed PORT configuration modes APC Automatic PORT configuration mode is
+ * defined by the OEM configuration parameters providing no PHY_MASK parameters
+ * for any PORT. i.e. There are no phys assigned to any of the ports at start.
+ * MPC Manual PORT configuration mode is defined by the OEM configuration
+ * parameters providing a PHY_MASK value for any PORT.  It is assumed that any
+ * PORT with no PHY_MASK is an invalid port and not all PHYs must be assigned.
+ * A PORT_PHY mask that assigns just a single PHY to a port and no other PHYs
+ * being assigned is sufficient to declare manual PORT configuration.
+ */
+enum sci_port_configuration_mode {
+       SCIC_PORT_MANUAL_CONFIGURATION_MODE = 0,
+       SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE = 1
+};
+
+struct sci_bios_oem_param_block_hdr {
+       uint8_t signature[ISCI_ROM_SIG_SIZE];
+       uint16_t total_block_length;
+       uint8_t hdr_length;
+       uint8_t version;
+       uint8_t preboot_source;
+       uint8_t num_elements;
+       uint16_t element_length;
+       uint8_t reserved[8];
+} __attribute__ ((packed));
+
+struct sci_oem_params {
+       struct {
+               uint8_t mode_type;
+               uint8_t max_concurrent_dev_spin_up;
+               uint8_t do_enable_ssc;
+               uint8_t reserved;
+       } controller;
+
+       struct {
+               uint8_t phy_mask;
+       } ports[SCI_MAX_PORTS];
+
+       struct sci_phy_oem_params {
+               struct {
+                       uint32_t high;
+                       uint32_t low;
+               } sas_address;
+
+               uint32_t afe_tx_amp_control0;
+               uint32_t afe_tx_amp_control1;
+               uint32_t afe_tx_amp_control2;
+               uint32_t afe_tx_amp_control3;
+       } phys[SCI_MAX_PHYS];
+} __attribute__ ((packed));
+
+struct isci_orom {
+       struct sci_bios_oem_param_block_hdr hdr;
+       struct sci_oem_params ctrl[SCI_MAX_CONTROLLERS];
+} __attribute__ ((packed));
+
+#endif
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
new file mode 100644 (file)
index 0000000..9b266c7
--- /dev/null
@@ -0,0 +1,1934 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_REGISTERS_H_
+#define _SCU_REGISTERS_H_
+
+/**
+ * This file contains the constants and structures for the SCU memory mapped
+ *    registers.
+ *
+ *
+ */
+
+#define SCU_VIIT_ENTRY_ID_MASK         (0xC0000000)
+#define SCU_VIIT_ENTRY_ID_SHIFT        (30)
+
+#define SCU_VIIT_ENTRY_FUNCTION_MASK   (0x0FF00000)
+#define SCU_VIIT_ENTRY_FUNCTION_SHIFT  (20)
+
+#define SCU_VIIT_ENTRY_IPPTMODE_MASK   (0x0001F800)
+#define SCU_VIIT_ENTRY_IPPTMODE_SHIFT  (12)
+
+#define SCU_VIIT_ENTRY_LPVIE_MASK      (0x00000F00)
+#define SCU_VIIT_ENTRY_LPVIE_SHIFT     (8)
+
+#define SCU_VIIT_ENTRY_STATUS_MASK     (0x000000FF)
+#define SCU_VIIT_ENTRY_STATUS_SHIFT    (0)
+
+#define SCU_VIIT_ENTRY_ID_INVALID   (0 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_VIIT      (1 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_IIT       (2 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_VIRT_EXP  (3 << SCU_VIIT_ENTRY_ID_SHIFT)
+
+#define SCU_VIIT_IPPT_SSP_INITIATOR (0x01 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_SMP_INITIATOR (0x02 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_STP_INITIATOR (0x04 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_INITIATOR            \
+       (\
+               SCU_VIIT_IPPT_SSP_INITIATOR  \
+               | SCU_VIIT_IPPT_SMP_INITIATOR  \
+               | SCU_VIIT_IPPT_STP_INITIATOR  \
+       )
+
+#define SCU_VIIT_STATUS_RNC_VALID      (0x01 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_ADDRESS_VALID  (0x02 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_RNI_VALID      (0x04 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_ALL_VALID      \
+       (\
+               SCU_VIIT_STATUS_RNC_VALID       \
+               | SCU_VIIT_STATUS_ADDRESS_VALID   \
+               | SCU_VIIT_STATUS_RNI_VALID       \
+       )
+
+#define SCU_VIIT_IPPT_SMP_TARGET    (0x10 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+
+/**
+ * struct scu_viit_entry - This is the SCU Virtual Initiator Table Entry
+ *
+ *
+ */
+struct scu_viit_entry {
+       /**
+        * This must be encoded as to the type of initiator that is being constructed
+        * for this port.
+        */
+       u32 status;
+
+       /**
+        * Virtual initiator high SAS Address
+        */
+       u32 initiator_sas_address_hi;
+
+       /**
+        * Virtual initiator low SAS Address
+        */
+       u32 initiator_sas_address_lo;
+
+       /**
+        * This must be 0
+        */
+       u32 reserved;
+
+};
+
+
+/* IIT Status Defines */
+#define SCU_IIT_ENTRY_ID_MASK                (0xC0000000)
+#define SCU_IIT_ENTRY_ID_SHIFT               (30)
+
+#define SCU_IIT_ENTRY_STATUS_UPDATE_MASK     (0x20000000)
+#define SCU_IIT_ENTRY_STATUS_UPDATE_SHIFT    (29)
+
+#define SCU_IIT_ENTRY_LPI_MASK               (0x00000F00)
+#define SCU_IIT_ENTRY_LPI_SHIFT              (8)
+
+#define SCU_IIT_ENTRY_STATUS_MASK            (0x000000FF)
+#define SCU_IIT_ENTRY_STATUS_SHIFT           (0)
+
+/* IIT Remote Initiator Defines */
+#define SCU_IIT_ENTRY_REMOTE_TAG_MASK  (0x0000FFFF)
+#define SCU_IIT_ENTRY_REMOTE_TAG_SHIFT (0)
+
+#define SCU_IIT_ENTRY_REMOTE_RNC_MASK  (0x0FFF0000)
+#define SCU_IIT_ENTRY_REMOTE_RNC_SHIFT (16)
+
+#define SCU_IIT_ENTRY_ID_INVALID   (0 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_VIIT      (1 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_IIT       (2 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_VIRT_EXP  (3 << SCU_IIT_ENTRY_ID_SHIFT)
+
+/**
+ * struct scu_iit_entry - This will be implemented later when we support
+ *    virtual functions
+ *
+ *
+ */
+struct scu_iit_entry {
+       u32 status;
+       u32 remote_initiator_sas_address_hi;
+       u32 remote_initiator_sas_address_lo;
+       u32 remote_initiator;
+
+};
+
+/* Generate a value for an SCU register */
+#define SCU_GEN_VALUE(name, value) \
+       (((value) << name ## _SHIFT) & (name ## _MASK))
+
+/*
+ * Generate a bit value for an SCU register
+ * Make sure that the register MASK is just a single bit */
+#define SCU_GEN_BIT(name) \
+       SCU_GEN_VALUE(name, ((u32)1))
+
+#define SCU_SET_BIT(name, reg_value) \
+       ((reg_value) | SCU_GEN_BIT(name))
+
+#define SCU_CLEAR_BIT(name, reg_value) \
+       ((reg_value)$ ~(SCU_GEN_BIT(name)))
+
+/*
+ * *****************************************************************************
+ * Unions for bitfield definitions of SCU Registers
+ * SMU Post Context Port
+ * ***************************************************************************** */
+#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_SHIFT         (0)
+#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_MASK          (0x00000FFF)
+#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_SHIFT    (12)
+#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_MASK     (0x0000F000)
+#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_SHIFT       (16)
+#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_MASK        (0x00030000)
+#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_SHIFT       (18)
+#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_MASK        (0x00FC0000)
+#define SMU_POST_CONTEXT_PORT_RESERVED_MASK               (0xFF000000)
+
+#define SMU_PCP_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SMU_POST_CONTEXT_PORT_ ## name, value)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_STATUS_COMPLETION_SHIFT       (31)
+#define SMU_INTERRUPT_STATUS_COMPLETION_MASK        (0x80000000)
+#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_SHIFT    (1)
+#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_MASK     (0x00000002)
+#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_SHIFT      (0)
+#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_MASK       (0x00000001)
+#define SMU_INTERRUPT_STATUS_RESERVED_MASK          (0x7FFFFFFC)
+
+#define SMU_ISR_GEN_BIT(name) \
+       SCU_GEN_BIT(SMU_INTERRUPT_STATUS_ ## name)
+
+#define SMU_ISR_QUEUE_ERROR   SMU_ISR_GEN_BIT(QUEUE_ERROR)
+#define SMU_ISR_QUEUE_SUSPEND SMU_ISR_GEN_BIT(QUEUE_SUSPEND)
+#define SMU_ISR_COMPLETION    SMU_ISR_GEN_BIT(COMPLETION)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_MASK_COMPLETION_SHIFT         (31)
+#define SMU_INTERRUPT_MASK_COMPLETION_MASK          (0x80000000)
+#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_SHIFT      (1)
+#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_MASK       (0x00000002)
+#define SMU_INTERRUPT_MASK_QUEUE_ERROR_SHIFT        (0)
+#define SMU_INTERRUPT_MASK_QUEUE_ERROR_MASK         (0x00000001)
+#define SMU_INTERRUPT_MASK_RESERVED_MASK            (0x7FFFFFFC)
+
+#define SMU_IMR_GEN_BIT(name) \
+       SCU_GEN_BIT(SMU_INTERRUPT_MASK_ ## name)
+
+#define SMU_IMR_QUEUE_ERROR   SMU_IMR_GEN_BIT(QUEUE_ERROR)
+#define SMU_IMR_QUEUE_SUSPEND SMU_IMR_GEN_BIT(QUEUE_SUSPEND)
+#define SMU_IMR_COMPLETION    SMU_IMR_GEN_BIT(COMPLETION)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_SHIFT    (0)
+#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_MASK     (0x0000001F)
+#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_SHIFT   (8)
+#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_MASK    (0x0000FF00)
+#define SMU_INTERRUPT_COALESCING_CONTROL_RESERVED_MASK  (0xFFFF00E0)
+
+#define SMU_ICC_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SMU_INTERRUPT_COALESCING_CONTROL_ ## name, value)
+
+/* ***************************************************************************** */
+#define SMU_TASK_CONTEXT_RANGE_START_SHIFT      (0)
+#define SMU_TASK_CONTEXT_RANGE_START_MASK       (0x00000FFF)
+#define SMU_TASK_CONTEXT_RANGE_ENDING_SHIFT     (16)
+#define SMU_TASK_CONTEXT_RANGE_ENDING_MASK      (0x0FFF0000)
+#define SMU_TASK_CONTEXT_RANGE_ENABLE_SHIFT     (31)
+#define SMU_TASK_CONTEXT_RANGE_ENABLE_MASK      (0x80000000)
+#define SMU_TASK_CONTEXT_RANGE_RESERVED_MASK    (0x7000F000)
+
+#define SMU_TCR_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SMU_TASK_CONTEXT_RANGE_ ## name, value)
+
+#define SMU_TCR_GEN_BIT(name, value) \
+       SCU_GEN_BIT(SMU_TASK_CONTEXT_RANGE_ ## name)
+
+/* ***************************************************************************** */
+
+#define SMU_COMPLETION_QUEUE_PUT_POINTER_SHIFT          (0)
+#define SMU_COMPLETION_QUEUE_PUT_POINTER_MASK           (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_SHIFT        (15)
+#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_MASK         (0x00008000)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_SHIFT    (16)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_MASK     (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_SHIFT  (26)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_MASK   (0x04000000)
+#define SMU_COMPLETION_QUEUE_PUT_RESERVED_MASK          (0xF8004000)
+
+#define SMU_CQPR_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_PUT_ ## name, value)
+
+#define SMU_CQPR_GEN_BIT(name) \
+       SCU_GEN_BIT(SMU_COMPLETION_QUEUE_PUT_ ## name)
+
+/* ***************************************************************************** */
+
+#define SMU_COMPLETION_QUEUE_GET_POINTER_SHIFT          (0)
+#define SMU_COMPLETION_QUEUE_GET_POINTER_MASK           (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT        (15)
+#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_MASK         (0x00008000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT    (16)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK     (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT  (26)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_MASK   (0x04000000)
+#define SMU_COMPLETION_QUEUE_GET_ENABLE_SHIFT           (30)
+#define SMU_COMPLETION_QUEUE_GET_ENABLE_MASK            (0x40000000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_SHIFT     (31)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_MASK      (0x80000000)
+#define SMU_COMPLETION_QUEUE_GET_RESERVED_MASK          (0x38004000)
+
+#define SMU_CQGR_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_GET_ ## name, value)
+
+#define SMU_CQGR_GEN_BIT(name) \
+       SCU_GEN_BIT(SMU_COMPLETION_QUEUE_GET_ ## name)
+
+#define SMU_CQGR_CYCLE_BIT \
+       SMU_CQGR_GEN_BIT(CYCLE_BIT)
+
+#define SMU_CQGR_EVENT_CYCLE_BIT \
+       SMU_CQGR_GEN_BIT(EVENT_CYCLE_BIT)
+
+#define SMU_CQGR_GET_POINTER_SET(value)        \
+       SMU_CQGR_GEN_VAL(POINTER, value)
+
+
+/* ***************************************************************************** */
+#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_SHIFT  (0)
+#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_MASK   (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_SHIFT  (16)
+#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_MASK   (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_CONTROL_RESERVED_MASK      (0xFC00C000)
+
+#define SMU_CQC_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_CONTROL_ ## name, value)
+
+#define SMU_CQC_QUEUE_LIMIT_SET(value) \
+       SMU_CQC_GEN_VAL(QUEUE_LIMIT, value)
+
+#define SMU_CQC_EVENT_LIMIT_SET(value) \
+       SMU_CQC_GEN_VAL(EVENT_LIMIT, value)
+
+
+/* ***************************************************************************** */
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT    (0)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK     (0x00000FFF)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT    (12)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK     (0x00007000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT   (15)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK    (0x07FF8000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_SHIFT   (27)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK    (0x08000000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_RESERVED_MASK   (0xF0000000)
+
+#define SMU_DCC_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SMU_DEVICE_CONTEXT_CAPACITY_ ## name, value)
+
+#define SMU_DCC_GET_MAX_PEG(value) \
+       (\
+               ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK) \
+               >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \
+       )
+
+#define SMU_DCC_GET_MAX_LP(value) \
+       (\
+               ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
+               >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \
+       )
+
+#define SMU_DCC_GET_MAX_TC(value) \
+       (\
+               ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
+               >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT \
+       )
+
+#define SMU_DCC_GET_MAX_RNC(value) \
+       (\
+               ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
+               >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \
+       )
+
+/* -------------------------------------------------------------------------- */
+
+#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT      (0)
+#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_MASK       (0x00000001)
+#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_SHIFT    (1)
+#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_MASK     (0x00000002)
+#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_SHIFT     (16)
+#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_MASK      (0x00010000)
+#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_SHIFT   (17)
+#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_MASK    (0x00020000)
+#define SMU_CONTROL_STATUS_RESERVED_MASK                        (0xFFFCFFFC)
+
+#define SMU_SMUCSR_GEN_BIT(name) \
+       SCU_GEN_BIT(SMU_CONTROL_STATUS_ ## name)
+
+#define SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED        \
+       (SMU_SMUCSR_GEN_BIT(SCHEDULER_RAM_INIT_COMPLETED))
+
+#define SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED  \
+       (SMU_SMUCSR_GEN_BIT(CONTEXT_RAM_INIT_COMPLETED))
+
+#define SCU_RAM_INIT_COMPLETED \
+       (\
+               SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \
+               | SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \
+       )
+
+/* -------------------------------------------------------------------------- */
+
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_SHIFT  (0)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_MASK   (0x00000001)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_SHIFT  (1)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_MASK   (0x00000002)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_SHIFT  (2)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_MASK   (0x00000004)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_SHIFT  (3)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_MASK   (0x00000008)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_SHIFT  (8)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_MASK   (0x00000100)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_SHIFT  (9)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_MASK   (0x00000200)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_SHIFT  (10)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_MASK   (0x00000400)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_SHIFT  (11)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_MASK   (0x00000800)
+
+#define SMU_RESET_PROTOCOL_ENGINE(peg, pe) \
+       ((1 << (pe)) << ((peg) * 8))
+
+#define SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \
+       (\
+               SMU_RESET_PROTOCOL_ENGINE(peg, 0) \
+               | SMU_RESET_PROTOCOL_ENGINE(peg, 1) \
+               | SMU_RESET_PROTOCOL_ENGINE(peg, 2) \
+               | SMU_RESET_PROTOCOL_ENGINE(peg, 3) \
+       )
+
+#define SMU_RESET_ALL_PROTOCOL_ENGINES() \
+       (\
+               SMU_RESET_PEG_PROTOCOL_ENGINES(0) \
+               | SMU_RESET_PEG_PROTOCOL_ENGINES(1) \
+       )
+
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_SHIFT  (16)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_MASK   (0x00010000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_SHIFT  (17)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_MASK   (0x00020000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_SHIFT  (18)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_MASK   (0x00040000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_SHIFT  (19)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_MASK   (0x00080000)
+
+#define SMU_RESET_WIDE_PORT_QUEUE(peg, wide_port) \
+       ((1 << ((wide_port) / 2)) << ((peg) * 2) << 16)
+
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_SHIFT      (20)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_MASK       (0x00100000)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_SHIFT      (21)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_MASK       (0x00200000)
+#define SMU_SOFTRESET_CONTROL_RESET_SCU_SHIFT       (22)
+#define SMU_SOFTRESET_CONTROL_RESET_SCU_MASK        (0x00400000)
+
+/*
+ * It seems to make sense that if you are going to reset the protocol
+ * engine group that you would also reset all of the protocol engines */
+#define SMU_RESET_PROTOCOL_ENGINE_GROUP(peg) \
+       (\
+               (1 << ((peg) + 20)) \
+               | SMU_RESET_WIDE_PORT_QUEUE(peg, 0) \
+               | SMU_RESET_WIDE_PORT_QUEUE(peg, 1) \
+               | SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \
+       )
+
+#define SMU_RESET_ALL_PROTOCOL_ENGINE_GROUPS() \
+       (\
+               SMU_RESET_PROTOCOL_ENGINE_GROUP(0) \
+               | SMU_RESET_PROTOCOL_ENGINE_GROUP(1) \
+       )
+
+#define SMU_RESET_SCU()  (0xFFFFFFFF)
+
+
+
+/* ***************************************************************************** */
+#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_SHIFT              (0)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_MASK               (0x00000FFF)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_SHIFT                (16)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_MASK                 (0x0FFF0000)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_SHIFT    (31)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_MASK     (0x80000000)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RESERVED_MASK               (0x7000F000)
+
+#define SMU_TCA_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name, value)
+
+#define SMU_TCA_GEN_BIT(name) \
+       SCU_GEN_BIT(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name)
+
+/* ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_SHIFT   (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_MASK    (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_RESERVED_MASK      (0xFFFFF000)
+
+#define SCU_UFQC_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_ ## name, value)
+
+#define SCU_UFQC_QUEUE_SIZE_SET(value) \
+       SCU_UFQC_GEN_VAL(QUEUE_SIZE, value)
+
+/* ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_SHIFT      (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_MASK       (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_SHIFT    (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_MASK     (0x00001000)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_RESERVED_MASK      (0xFFFFE000)
+
+#define SCU_UFQPP_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name, value)
+
+#define SCU_UFQPP_GEN_BIT(name)        \
+       SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SDMA Registers
+ * ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_SHIFT      (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_MASK       (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_SHIFT    (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_MASK     (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_SHIFT   (31)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_MASK    (0x80000000)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_RESERVED_MASK      (0x7FFFE000)
+
+#define SCU_UFQGP_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name, value)
+
+#define SCU_UFQGP_GEN_BIT(name)        \
+       SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name)
+
+#define SCU_UFQGP_CYCLE_BIT(value) \
+       SCU_UFQGP_GEN_BIT(CYCLE_BIT, value)
+
+#define SCU_UFQGP_GET_POINTER(value) \
+       SCU_UFQGP_GEN_VALUE(POINTER, value)
+
+#define SCU_UFQGP_ENABLE(value)        \
+       (SCU_UFQGP_GEN_BIT(ENABLE) | value)
+
+#define SCU_UFQGP_DISABLE(value) \
+       (~SCU_UFQGP_GEN_BIT(ENABLE) & value)
+
+#define SCU_UFQGP_VALUE(bit, value) \
+       (SCU_UFQGP_CYCLE_BIT(bit) | SCU_UFQGP_GET_POINTER(value))
+
+/* ***************************************************************************** */
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SHIFT                               (0)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_MASK                                (0x0000FFFF)
+#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT                    (16)
+#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK                     (0x00010000)
+#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_SHIFT                            (17)
+#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_MASK                             (0x00020000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_SHIFT                   (18)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_MASK                    (0x00040000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_SHIFT               (19)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_MASK                (0x00080000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_SHIFT     (20)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_MASK      (0x00100000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_SHIFT        (21)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_MASK         (0x00200000)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_SHIFT                        (22)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_MASK                         (0x00400000)
+#define SCU_PDMA_CONFIGURATION_RESERVED_MASK                                        (0xFF800000)
+
+#define SCU_PDMACR_GEN_VALUE(name, value) \
+       SCU_GEN_VALUE(SCU_PDMA_CONFIGURATION_ ## name, value)
+
+#define SCU_PDMACR_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_PDMA_CONFIGURATION_ ## name)
+
+#define SCU_PDMACR_BE_GEN_BIT(name) \
+       SCU_PCMACR_GEN_BIT(BIG_ENDIAN_CONTROL_ ## name)
+
+/* ***************************************************************************** */
+#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT                    (8)
+#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK                     (0x00000100)
+
+#define SCU_CDMACR_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_CDMA_CONFIGURATION_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SCU Link Layer Registers
+ * ***************************************************************************** */
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_SHIFT             (0)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_MASK              (0x000000FF)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_SHIFT           (8)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_MASK            (0x0000FF00)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_SHIFT   (16)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_MASK    (0x00FF0000)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_SHIFT  (24)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_MASK   (0xFF000000)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_REQUIRED_MASK             (0x00000000)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_DEFAULT_MASK              (0x7D00676F)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_RESERVED_MASK             (0x00FF0000)
+
+#define SCU_SAS_SPDTOV_GEN_VALUE(name, value) \
+       SCU_GEN_VALUE(SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_ ## name, value)
+
+
+#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_SHIFT            (2)
+#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_MASK             (0x00000004)
+#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_SHIFT  (4)
+#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_MASK   (0x00000010)
+#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_SHIFT     (5)
+#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_MASK      (0x00000020)
+#define SCU_LINK_STATUS_RESERVED_MASK                       (0xFFFFFFCD)
+
+#define SCU_SAS_LLSTA_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_LINK_STATUS_ ## name)
+
+
+/* TODO: Where is the SATA_PSELTOV register? */
+
+/*
+ * *****************************************************************************
+ * * SCU SAS Maximum Arbitration Wait Time Timeout Register
+ * ***************************************************************************** */
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_SHIFT       (0)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_MASK        (0x00007FFF)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_SHIFT       (15)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_MASK        (0x00008000)
+
+#define SCU_SAS_MAWTTOV_GEN_VALUE(name, value) \
+       SCU_GEN_VALUE(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name, value)
+
+#define SCU_SAS_MAWTTOV_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name)
+
+
+/*
+ * TODO: Where is the SAS_LNKTOV regsiter?
+ * TODO: Where is the SAS_PHYTOV register? */
+
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_SHIFT            (1)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_MASK             (0x00000002)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_SHIFT            (2)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_MASK             (0x00000004)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_SHIFT            (3)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_MASK             (0x00000008)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_SHIFT          (8)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_MASK           (0x00000100)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_SHIFT         (9)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_MASK          (0x00000200)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_SHIFT         (10)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_MASK          (0x00000400)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_SHIFT         (11)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_MASK          (0x00000800)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_SHIFT           (16)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_MASK            (0x000F0000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_SHIFT    (24)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_MASK     (0x0F000000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_SHIFT           (28)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_MASK            (0x70000000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_RESERVED_MASK               (0x80F0F1F1)
+
+#define SCU_SAS_TIID_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name, value)
+
+#define SCU_SAS_TIID_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name)
+
+/* SAS Identify Frame PHY Identifier Register */
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_SHIFT      (16)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_MASK       (0x00010000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_SHIFT   (17)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_MASK    (0x00020000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_SHIFT  (18)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_MASK   (0x00040000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_SHIFT                       (24)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_MASK                        (0xFF000000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_RESERVED_MASK                  (0x00F800FF)
+
+#define SCU_SAS_TIPID_GEN_VALUE(name, value) \
+       SCU_GEN_VALUE(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name, value)
+
+#define SCU_SAS_TIPID_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name)
+
+
+#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_SHIFT                     (4)
+#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_MASK                      (0x00000010)
+#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_SHIFT                          (6)
+#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_MASK                           (0x00000040)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_SHIFT                   (7)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_MASK                    (0x00000080)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_SHIFT                 (8)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_MASK                  (0x00000100)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_SHIFT            (9)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_MASK             (0x00000200)
+#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_SHIFT             (11)
+#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_MASK              (0x00000800)
+#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_SHIFT                    (12)
+#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_MASK                     (0x00001000)
+#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_SHIFT      (13)
+#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_MASK       (0x00002000)
+#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_SHIFT                          (14)
+#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_MASK                           (0x00004000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_SHIFT                          (15)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_MASK                           (0x00008000)
+#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_SHIFT        (23)
+#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_MASK         (0x00800000)
+#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_SHIFT              (27)
+#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_MASK               (0x08000000)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_SHIFT    (28)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_MASK     (0x10000000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_SHIFT                           (29)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_MASK                            (0x20000000)
+#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_SHIFT                    (30)
+#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_MASK                     (0x40000000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_SHIFT                   (31)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_MASK                    (0x80000000)
+#define SCU_SAS_PHY_CONFIGURATION_REQUIRED_MASK                             (0x0100000F)
+#define SCU_SAS_PHY_CONFIGURATION_DEFAULT_MASK                              (0x4180100F)
+#define SCU_SAS_PHY_CONFIGURATION_RESERVED_MASK                             (0x00000000)
+
+#define SCU_SAS_PCFG_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_SAS_PHY_CONFIGURATION_ ## name)
+
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_SHIFT      (0)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_MASK       (0x000007FF)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_SHIFT    (16)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_MASK     (0x00ff0000)
+
+#define SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_##name, value)
+
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_SHIFT    (0)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_MASK     (0x0003FFFF)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_SHIFT   (31)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_MASK    (0x80000000)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_RESERVED_MASK  (0x7FFC0000)
+
+#define SCU_ENSPINUP_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name, value)
+
+#define SCU_ENSPINUP_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name)
+
+
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_SHIFT     (1)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_MASK      (0x00000002)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_SHIFT       (4)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_MASK        (0x000000F0)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_SHIFT     (8)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_MASK      (0x00000100)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_SHIFT      (9)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_MASK       (0x00000201)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_SHIFT     (10)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_MASK      (0x00000401)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_SHIFT      (11)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_MASK       (0x00000801)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_SHIFT     (12)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_MASK      (0x00001001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_SHIFT      (13)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_MASK       (0x00002001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_SHIFT   (31)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_MASK    (0x80000000)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_DEFAULT_MASK        (0x00003F01)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_REQUIRED_MASK       (0x00000001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RESERVED_MASK       (0x7FFFC00D)
+
+#define SCU_SAS_PHYCAP_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name, value)
+
+#define SCU_SAS_PHYCAP_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name)
+
+
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_SHIFT  (0)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_MASK   (0x000000FF)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_SHIFT         (31)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_MASK          (0x80000000)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_RESERVED_MASK                          (0x7FFFFF00)
+
+#define SCU_PSZGCR_GEN_VAL(name, value)        \
+       SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name, value)
+
+#define SCU_PSZGCR_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name)
+
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_SHIFT        (1)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_MASK         (0x00000002)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_SHIFT      (2)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_MASK       (0x00000004)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_SHIFT        (4)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_MASK         (0x00000010)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_SHIFT      (5)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_MASK       (0x00000020)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_SHIFT (16)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_MASK  (0x00030000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_SHIFT      (19)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_MASK       (0x00080000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_SHIFT (20)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_MASK  (0x00300000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_SHIFT      (23)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_MASK       (0x00800000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_SHIFT (24)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_MASK  (0x03000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_SHIFT      (27)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_MASK       (0x08000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_SHIFT (28)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_MASK  (0x30000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_SHIFT      (31)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_MASK       (0x80000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_RESERVED_MASK             (0x4444FFC9)
+
+#define SCU_PEG_SCUVZECR_GEN_VAL(name, val) \
+       SCU_GEN_VALUE(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name, val)
+
+#define SCU_PEG_SCUVZECR_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name)
+
+
+/*
+ * *****************************************************************************
+ * * Port Task Scheduler registers shift and mask values
+ * ***************************************************************************** */
+#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_SHIFT     (0)
+#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_MASK      (0x0000FFFF)
+#define SCU_PTSG_CONTROL_TASK_TIMEOUT_SHIFT         (16)
+#define SCU_PTSG_CONTROL_TASK_TIMEOUT_MASK          (0x00FF0000)
+#define SCU_PTSG_CONTROL_PTSG_ENABLE_SHIFT          (24)
+#define SCU_PTSG_CONTROL_PTSG_ENABLE_MASK           (0x01000000)
+#define SCU_PTSG_CONTROL_ETM_ENABLE_SHIFT           (25)
+#define SCU_PTSG_CONTROL_ETM_ENABLE_MASK            (0x02000000)
+#define SCU_PTSG_CONTROL_DEFAULT_MASK               (0x00020002)
+#define SCU_PTSG_CONTROL_REQUIRED_MASK              (0x00000000)
+#define SCU_PTSG_CONTROL_RESERVED_MASK              (0xFC000000)
+
+#define SCU_PTSGCR_GEN_VAL(name, val) \
+       SCU_GEN_VALUE(SCU_PTSG_CONTROL_ ## name, val)
+
+#define SCU_PTSGCR_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_PTSG_CONTROL_ ## name)
+
+
+/* ***************************************************************************** */
+#define SCU_PTSG_REAL_TIME_CLOCK_SHIFT          (0)
+#define SCU_PTSG_REAL_TIME_CLOCK_MASK           (0x0000FFFF)
+#define SCU_PTSG_REAL_TIME_CLOCK_RESERVED_MASK  (0xFFFF0000)
+
+#define SCU_RTCR_GEN_VAL(name, val) \
+       SCU_GEN_VALUE(SCU_PTSG_ ## name, val)
+
+
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_SHIFT  (0)
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_MASK   (0x00FFFFFF)
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_RESERVED_MASK          (0xFF000000)
+
+#define SCU_RTCCR_GEN_VAL(name, val) \
+       SCU_GEN_VALUE(SCU_PTSG_REAL_TIME_CLOCK_CONTROL_ ## name, val)
+
+
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_SHIFT  (0)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_MASK   (0x00000001)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_SHIFT   (1)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_MASK    (0x00000002)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_RESERVED_MASK  (0xFFFFFFFC)
+
+#define SCU_PTSxCR_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ ## name)
+
+
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_SHIFT             (0)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_MASK              (0x00000001)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_SHIFT    (1)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_MASK     (0x00000002)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_SHIFT             (2)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_MASK              (0x00000004)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_RESERVED_MASK                   (0xFFFFFFF8)
+
+#define SCU_PTSxSR_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ ## name)
+
+
+/*
+ * *****************************************************************************
+ * * SGPIO Register shift and mask values
+ * ***************************************************************************** */
+#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_SHIFT                    (0)
+#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_MASK                     (0x00000001)
+#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_SHIFT       (1)
+#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_MASK        (0x00000002)
+#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_SHIFT (2)
+#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_MASK  (0x00000004)
+#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_SHIFT                  (15)
+#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_MASK                   (0x00008000)
+#define SCU_SGPIO_CONTROL_SGPIO_RESERVED_MASK                   (0xFFFF7FF8)
+
+#define SCU_SGICRx_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_SGPIO_CONTROL_SGPIO_ ## name)
+
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_SHIFT      (0)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_MASK       (0x0000000F)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_SHIFT      (4)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_MASK       (0x000000F0)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_SHIFT      (8)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_MASK       (0x00000F00)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_SHIFT      (12)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_MASK       (0x0000F000)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_RESERVED_MASK (0xFFFF0000)
+
+#define SCU_SGPBRx_GEN_VAL(name, value)        \
+       SCU_GEN_VALUE(SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_ ## name, value)
+
+#define SCU_SGPIO_START_DRIVE_LOWER_R0_SHIFT        (0)
+#define SCU_SGPIO_START_DRIVE_LOWER_R0_MASK         (0x00000003)
+#define SCU_SGPIO_START_DRIVE_LOWER_R1_SHIFT        (4)
+#define SCU_SGPIO_START_DRIVE_LOWER_R1_MASK         (0x00000030)
+#define SCU_SGPIO_START_DRIVE_LOWER_R2_SHIFT        (8)
+#define SCU_SGPIO_START_DRIVE_LOWER_R2_MASK         (0x00000300)
+#define SCU_SGPIO_START_DRIVE_LOWER_R3_SHIFT        (12)
+#define SCU_SGPIO_START_DRIVE_LOWER_R3_MASK         (0x00003000)
+#define SCU_SGPIO_START_DRIVE_LOWER_RESERVED_MASK   (0xFFFF8888)
+
+#define SCU_SGSDLRx_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
+
+#define SCU_SGPIO_START_DRIVE_UPPER_R0_SHIFT        (0)
+#define SCU_SGPIO_START_DRIVE_UPPER_R0_MASK         (0x00000003)
+#define SCU_SGPIO_START_DRIVE_UPPER_R1_SHIFT        (4)
+#define SCU_SGPIO_START_DRIVE_UPPER_R1_MASK         (0x00000030)
+#define SCU_SGPIO_START_DRIVE_UPPER_R2_SHIFT        (8)
+#define SCU_SGPIO_START_DRIVE_UPPER_R2_MASK         (0x00000300)
+#define SCU_SGPIO_START_DRIVE_UPPER_R3_SHIFT        (12)
+#define SCU_SGPIO_START_DRIVE_UPPER_R3_MASK         (0x00003000)
+#define SCU_SGPIO_START_DRIVE_UPPER_RESERVED_MASK   (0xFFFF8888)
+
+#define SCU_SGSDURx_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
+
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_SHIFT      (0)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_MASK       (0x00000003)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_SHIFT      (4)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_MASK       (0x00000030)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_SHIFT      (8)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_MASK       (0x00000300)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_SHIFT      (12)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_MASK       (0x00003000)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_RESERVED_MASK (0xFFFF8888)
+
+#define SCU_SGSIDLRx_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
+
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_SHIFT      (0)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_MASK       (0x00000003)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_SHIFT      (4)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_MASK       (0x00000030)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_SHIFT      (8)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_MASK       (0x00000300)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_SHIFT      (12)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_MASK       (0x00003000)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_RESERVED_MASK (0xFFFF8888)
+
+#define SCU_SGSIDURx_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
+
+#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_SHIFT            (0)
+#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_MASK             (0x0000000F)
+#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_RESERVED_MASK    (0xFFFFFFF0)
+
+#define SCU_SGVSCR_GEN_VAL(value) \
+       SCU_GEN_VALUE(SCU_SGPIO_VENDOR_SPECIFIC_CODE ## name, value)
+
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_SHIFT           (0)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_MASK            (0x00000003)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_SHIFT    (2)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_MASK     (0x00000004)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_SHIFT      (3)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_MASK       (0x00000008)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_SHIFT           (4)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_MASK            (0x00000030)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_SHIFT    (6)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_MASK     (0x00000040)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_SHIFT      (7)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_MASK       (0x00000080)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_SHIFT           (8)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_MASK            (0x00000300)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_SHIFT    (10)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_MASK     (0x00000400)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_SHIFT      (11)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_MASK       (0x00000800)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_RESERVED_MASK               (0xFFFFF000)
+
+#define SCU_SGODSR_GEN_VAL(name, value)        \
+       SCU_GEN_VALUE(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name, value)
+
+#define SCU_SGODSR_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SMU Registers
+ * ***************************************************************************** */
+
+/*
+ * ----------------------------------------------------------------------------
+ * SMU Registers
+ * These registers are based off of BAR0
+ *
+ * To calculate the offset for other functions use
+ *       BAR0 + FN# * SystemPageSize * 2
+ *
+ * The TCA is only accessable from FN#0 (Physical Function) and each
+ * is programmed by (BAR0 + SCU_SMU_TCA_OFFSET + (FN# * 0x04)) or
+ *    TCA0 for FN#0 is at BAR0 + 0x0400
+ *    TCA1 for FN#1 is at BAR0 + 0x0404
+ *    etc.
+ * ----------------------------------------------------------------------------
+ * Accessable to all FN#s */
+#define SCU_SMU_PCP_OFFSET          0x0000
+#define SCU_SMU_AMR_OFFSET          0x0004
+#define SCU_SMU_ISR_OFFSET          0x0010
+#define SCU_SMU_IMR_OFFSET          0x0014
+#define SCU_SMU_ICC_OFFSET          0x0018
+#define SCU_SMU_HTTLBAR_OFFSET      0x0020
+#define SCU_SMU_HTTUBAR_OFFSET      0x0024
+#define SCU_SMU_TCR_OFFSET          0x0028
+#define SCU_SMU_CQLBAR_OFFSET       0x0030
+#define SCU_SMU_CQUBAR_OFFSET       0x0034
+#define SCU_SMU_CQPR_OFFSET         0x0040
+#define SCU_SMU_CQGR_OFFSET         0x0044
+#define SCU_SMU_CQC_OFFSET          0x0048
+/* Accessable to FN#0 only */
+#define SCU_SMU_RNCLBAR_OFFSET      0x0080
+#define SCU_SMU_RNCUBAR_OFFSET      0x0084
+#define SCU_SMU_DCC_OFFSET          0x0090
+#define SCU_SMU_DFC_OFFSET          0x0094
+#define SCU_SMU_SMUCSR_OFFSET       0x0098
+#define SCU_SMU_SCUSRCR_OFFSET      0x009C
+#define SCU_SMU_SMAW_OFFSET         0x00A0
+#define SCU_SMU_SMDW_OFFSET         0x00A4
+/* Accessable to FN#0 only */
+#define SCU_SMU_TCA_OFFSET          0x0400
+/* Accessable to all FN#s */
+#define SCU_SMU_MT_MLAR0_OFFSET     0x2000
+#define SCU_SMU_MT_MUAR0_OFFSET     0x2004
+#define SCU_SMU_MT_MDR0_OFFSET      0x2008
+#define SCU_SMU_MT_VCR0_OFFSET      0x200C
+#define SCU_SMU_MT_MLAR1_OFFSET     0x2010
+#define SCU_SMU_MT_MUAR1_OFFSET     0x2014
+#define SCU_SMU_MT_MDR1_OFFSET      0x2018
+#define SCU_SMU_MT_VCR1_OFFSET      0x201C
+#define SCU_SMU_MPBA_OFFSET         0x3000
+
+/**
+ * struct smu_registers - These are the SMU registers
+ *
+ *
+ */
+struct smu_registers {
+/* 0x0000 PCP */
+       u32 post_context_port;
+/* 0x0004 AMR */
+       u32 address_modifier;
+       u32 reserved_08;
+       u32 reserved_0C;
+/* 0x0010 ISR */
+       u32 interrupt_status;
+/* 0x0014 IMR */
+       u32 interrupt_mask;
+/* 0x0018 ICC */
+       u32 interrupt_coalesce_control;
+       u32 reserved_1C;
+/* 0x0020 HTTLBAR */
+       u32 host_task_table_lower;
+/* 0x0024 HTTUBAR */
+       u32 host_task_table_upper;
+/* 0x0028 TCR */
+       u32 task_context_range;
+       u32 reserved_2C;
+/* 0x0030 CQLBAR */
+       u32 completion_queue_lower;
+/* 0x0034 CQUBAR */
+       u32 completion_queue_upper;
+       u32 reserved_38;
+       u32 reserved_3C;
+/* 0x0040 CQPR */
+       u32 completion_queue_put;
+/* 0x0044 CQGR */
+       u32 completion_queue_get;
+/* 0x0048 CQC */
+       u32 completion_queue_control;
+       u32 reserved_4C;
+       u32 reserved_5x[4];
+       u32 reserved_6x[4];
+       u32 reserved_7x[4];
+/*
+ * Accessable to FN#0 only
+ * 0x0080 RNCLBAR */
+       u32 remote_node_context_lower;
+/* 0x0084 RNCUBAR */
+       u32 remote_node_context_upper;
+       u32 reserved_88;
+       u32 reserved_8C;
+/* 0x0090 DCC */
+       u32 device_context_capacity;
+/* 0x0094 DFC */
+       u32 device_function_capacity;
+/* 0x0098 SMUCSR */
+       u32 control_status;
+/* 0x009C SCUSRCR */
+       u32 soft_reset_control;
+/* 0x00A0 SMAW */
+       u32 mmr_address_window;
+/* 0x00A4 SMDW */
+       u32 mmr_data_window;
+       u32 reserved_A8;
+       u32 reserved_AC;
+/* A whole bunch of reserved space */
+       u32 reserved_Bx[4];
+       u32 reserved_Cx[4];
+       u32 reserved_Dx[4];
+       u32 reserved_Ex[4];
+       u32 reserved_Fx[4];
+       u32 reserved_1xx[64];
+       u32 reserved_2xx[64];
+       u32 reserved_3xx[64];
+/*
+ * Accessable to FN#0 only
+ * 0x0400 TCA */
+       u32 task_context_assignment[256];
+/* MSI-X registers not included */
+};
+
+/*
+ * *****************************************************************************
+ * SDMA Registers
+ * ***************************************************************************** */
+#define SCU_SDMA_BASE               0x6000
+#define SCU_SDMA_PUFATLHAR_OFFSET   0x0000
+#define SCU_SDMA_PUFATUHAR_OFFSET   0x0004
+#define SCU_SDMA_UFLHBAR_OFFSET     0x0008
+#define SCU_SDMA_UFUHBAR_OFFSET     0x000C
+#define SCU_SDMA_UFQC_OFFSET        0x0010
+#define SCU_SDMA_UFQPP_OFFSET       0x0014
+#define SCU_SDMA_UFQGP_OFFSET       0x0018
+#define SCU_SDMA_PDMACR_OFFSET      0x001C
+#define SCU_SDMA_CDMACR_OFFSET      0x0080
+
+/**
+ * struct scu_sdma_registers - These are the SCU SDMA Registers
+ *
+ *
+ */
+struct scu_sdma_registers {
+/* 0x0000 PUFATLHAR */
+       u32 uf_address_table_lower;
+/* 0x0004 PUFATUHAR */
+       u32 uf_address_table_upper;
+/* 0x0008 UFLHBAR */
+       u32 uf_header_base_address_lower;
+/* 0x000C UFUHBAR */
+       u32 uf_header_base_address_upper;
+/* 0x0010 UFQC */
+       u32 unsolicited_frame_queue_control;
+/* 0x0014 UFQPP */
+       u32 unsolicited_frame_put_pointer;
+/* 0x0018 UFQGP */
+       u32 unsolicited_frame_get_pointer;
+/* 0x001C PDMACR */
+       u32 pdma_configuration;
+/* Reserved until offset 0x80 */
+       u32 reserved_0020_007C[0x18];
+/* 0x0080 CDMACR */
+       u32 cdma_configuration;
+/* Remainder SDMA register space */
+       u32 reserved_0084_0400[0xDF];
+
+};
+
+/*
+ * *****************************************************************************
+ * * SCU Link Registers
+ * ***************************************************************************** */
+#define SCU_PEG0_OFFSET    0x0000
+#define SCU_PEG1_OFFSET    0x8000
+
+#define SCU_TL0_OFFSET     0x0000
+#define SCU_TL1_OFFSET     0x0400
+#define SCU_TL2_OFFSET     0x0800
+#define SCU_TL3_OFFSET     0x0C00
+
+#define SCU_LL_OFFSET      0x0080
+#define SCU_LL0_OFFSET     (SCU_TL0_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL1_OFFSET     (SCU_TL1_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL2_OFFSET     (SCU_TL2_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL3_OFFSET     (SCU_TL3_OFFSET + SCU_LL_OFFSET)
+
+/* Transport Layer Offsets (PEG + TL) */
+#define SCU_TLCR_OFFSET         0x0000
+#define SCU_TLADTR_OFFSET       0x0004
+#define SCU_TLTTMR_OFFSET       0x0008
+#define SCU_TLEECR0_OFFSET      0x000C
+#define SCU_STPTLDARNI_OFFSET   0x0010
+
+
+#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_SHIFT    (0)
+#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_MASK     (0x00000001)
+#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_SHIFT (1)
+#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_MASK  (0x00000002)
+#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_SHIFT     (3)
+#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_MASK      (0x00000008)
+#define SCU_TLCR_CMD_NAK_STATUS_CODE_SHIFT         (4)
+#define SCU_TLCR_CMD_NAK_STATUS_CODE_MASK          (0x00000010)
+#define SCU_TLCR_RESERVED_MASK                     (0xFFFFFFEB)
+
+#define SCU_TLCR_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_TLCR_ ## name)
+
+/**
+ * struct scu_transport_layer_registers - These are the SCU Transport Layer
+ *    registers
+ *
+ *
+ */
+struct scu_transport_layer_registers {
+       /* 0x0000 TLCR */
+       u32 control;
+       /* 0x0004 TLADTR */
+       u32 arbitration_delay_timer;
+       /* 0x0008 TLTTMR */
+       u32 timer_test_mode;
+       /* 0x000C reserved */
+       u32 reserved_0C;
+       /* 0x0010 STPTLDARNI */
+       u32 stp_rni;
+       /* 0x0014 TLFEWPORCTRL */
+       u32 tlfe_wpo_read_control;
+       /* 0x0018 TLFEWPORDATA */
+       u32 tlfe_wpo_read_data;
+       /* 0x001C RXTLSSCSR1 */
+       u32 rxtl_single_step_control_status_1;
+       /* 0x0020 RXTLSSCSR2 */
+       u32 rxtl_single_step_control_status_2;
+       /* 0x0024 AWTRDDCR */
+       u32 tlfe_awt_retry_delay_debug_control;
+       /* Remainder of TL memory space */
+       u32 reserved_0028_007F[0x16];
+
+};
+
+/* Protocol Engine Group Registers */
+#define SCU_SCUVZECRx_OFFSET        0x1080
+
+/* Link Layer Offsets (PEG + TL + LL) */
+#define SCU_SAS_SPDTOV_OFFSET       0x0000
+#define SCU_SAS_LLSTA_OFFSET        0x0004
+#define SCU_SATA_PSELTOV_OFFSET     0x0008
+#define SCU_SAS_TIMETOV_OFFSET      0x0010
+#define SCU_SAS_LOSTOT_OFFSET       0x0014
+#define SCU_SAS_LNKTOV_OFFSET       0x0018
+#define SCU_SAS_PHYTOV_OFFSET       0x001C
+#define SCU_SAS_AFERCNT_OFFSET      0x0020
+#define SCU_SAS_WERCNT_OFFSET       0x0024
+#define SCU_SAS_TIID_OFFSET         0x0028
+#define SCU_SAS_TIDNH_OFFSET        0x002C
+#define SCU_SAS_TIDNL_OFFSET        0x0030
+#define SCU_SAS_TISSAH_OFFSET       0x0034
+#define SCU_SAS_TISSAL_OFFSET       0x0038
+#define SCU_SAS_TIPID_OFFSET        0x003C
+#define SCU_SAS_TIRES2_OFFSET       0x0040
+#define SCU_SAS_ADRSTA_OFFSET       0x0044
+#define SCU_SAS_MAWTTOV_OFFSET      0x0048
+#define SCU_SAS_FRPLDFIL_OFFSET     0x0054
+#define SCU_SAS_RFCNT_OFFSET        0x0060
+#define SCU_SAS_TFCNT_OFFSET        0x0064
+#define SCU_SAS_RFDCNT_OFFSET       0x0068
+#define SCU_SAS_TFDCNT_OFFSET       0x006C
+#define SCU_SAS_LERCNT_OFFSET       0x0070
+#define SCU_SAS_RDISERRCNT_OFFSET   0x0074
+#define SCU_SAS_CRERCNT_OFFSET      0x0078
+#define SCU_STPCTL_OFFSET           0x007C
+#define SCU_SAS_PCFG_OFFSET         0x0080
+#define SCU_SAS_CLKSM_OFFSET        0x0084
+#define SCU_SAS_TXCOMWAKE_OFFSET    0x0088
+#define SCU_SAS_TXCOMINIT_OFFSET    0x008C
+#define SCU_SAS_TXCOMSAS_OFFSET     0x0090
+#define SCU_SAS_COMINIT_OFFSET      0x0094
+#define SCU_SAS_COMWAKE_OFFSET      0x0098
+#define SCU_SAS_COMSAS_OFFSET       0x009C
+#define SCU_SAS_SFERCNT_OFFSET      0x00A0
+#define SCU_SAS_CDFERCNT_OFFSET     0x00A4
+#define SCU_SAS_DNFERCNT_OFFSET     0x00A8
+#define SCU_SAS_PRSTERCNT_OFFSET    0x00AC
+#define SCU_SAS_CNTCTL_OFFSET       0x00B0
+#define SCU_SAS_SSPTOV_OFFSET       0x00B4
+#define SCU_FTCTL_OFFSET            0x00B8
+#define SCU_FRCTL_OFFSET            0x00BC
+#define SCU_FTWMRK_OFFSET           0x00C0
+#define SCU_ENSPINUP_OFFSET         0x00C4
+#define SCU_SAS_TRNTOV_OFFSET       0x00C8
+#define SCU_SAS_PHYCAP_OFFSET       0x00CC
+#define SCU_SAS_PHYCTL_OFFSET       0x00D0
+#define SCU_SAS_LLCTL_OFFSET        0x00D8
+#define SCU_AFE_XCVRCR_OFFSET       0x00DC
+#define SCU_AFE_LUTCR_OFFSET        0x00E0
+
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT                  (0)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK                   (0x00000003)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1                   (0)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2                   (1)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3                   (2)
+#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_SHIFT            (2)
+#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_MASK             (0x000003FC)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_SHIFT   (16)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_MASK    (0x00010000)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_SHIFT (17)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_MASK  (0x00020000)
+#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_SHIFT       (24)
+#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_MASK        (0xFF000000)
+#define SCU_SAS_LINK_LAYER_CONTROL_RESERVED                             (0x00FCFC00)
+
+#define SCU_SAS_LLCTL_GEN_VAL(name, value) \
+       SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_CONTROL_ ## name, value)
+
+#define SCU_SAS_LLCTL_GEN_BIT(name) \
+       SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name)
+
+
+/* #define SCU_FRXHECR_DCNT_OFFSET      0x00B0 */
+#define SCU_PSZGCR_OFFSET           0x00E4
+#define SCU_SAS_RECPHYCAP_OFFSET    0x00E8
+/* #define SCU_TX_LUTSEL_OFFSET         0x00B8 */
+
+#define SCU_SAS_PTxC_OFFSET         0x00D4 /* Same offset as SAS_TCTSTM */
+
+/**
+ * struct scu_link_layer_registers - SCU Link Layer Registers
+ *
+ *
+ */
+struct scu_link_layer_registers {
+/* 0x0000 SAS_SPDTOV */
+       u32 speed_negotiation_timers;
+/* 0x0004 SAS_LLSTA */
+       u32 link_layer_status;
+/* 0x0008 SATA_PSELTOV */
+       u32 port_selector_timeout;
+       u32 reserved0C;
+/* 0x0010 SAS_TIMETOV */
+       u32 timeout_unit_value;
+/* 0x0014 SAS_RCDTOV */
+       u32 rcd_timeout;
+/* 0x0018 SAS_LNKTOV */
+       u32 link_timer_timeouts;
+/* 0x001C SAS_PHYTOV */
+       u32 sas_phy_timeouts;
+/* 0x0020 SAS_AFERCNT */
+       u32 received_address_frame_error_counter;
+/* 0x0024 SAS_WERCNT */
+       u32 invalid_dword_counter;
+/* 0x0028 SAS_TIID */
+       u32 transmit_identification;
+/* 0x002C SAS_TIDNH */
+       u32 sas_device_name_high;
+/* 0x0030 SAS_TIDNL */
+       u32 sas_device_name_low;
+/* 0x0034 SAS_TISSAH */
+       u32 source_sas_address_high;
+/* 0x0038 SAS_TISSAL */
+       u32 source_sas_address_low;
+/* 0x003C SAS_TIPID */
+       u32 identify_frame_phy_id;
+/* 0x0040 SAS_TIRES2 */
+       u32 identify_frame_reserved;
+/* 0x0044 SAS_ADRSTA */
+       u32 received_address_frame;
+/* 0x0048 SAS_MAWTTOV */
+       u32 maximum_arbitration_wait_timer_timeout;
+/* 0x004C SAS_PTxC */
+       u32 transmit_primitive;
+/* 0x0050 SAS_RORES */
+       u32 error_counter_event_notification_control;
+/* 0x0054 SAS_FRPLDFIL */
+       u32 frxq_payload_fill_threshold;
+/* 0x0058 SAS_LLHANG_TOT */
+       u32 link_layer_hang_detection_timeout;
+       u32 reserved_5C;
+/* 0x0060 SAS_RFCNT */
+       u32 received_frame_count;
+/* 0x0064 SAS_TFCNT */
+       u32 transmit_frame_count;
+/* 0x0068 SAS_RFDCNT */
+       u32 received_dword_count;
+/* 0x006C SAS_TFDCNT */
+       u32 transmit_dword_count;
+/* 0x0070 SAS_LERCNT */
+       u32 loss_of_sync_error_count;
+/* 0x0074 SAS_RDISERRCNT */
+       u32 running_disparity_error_count;
+/* 0x0078 SAS_CRERCNT */
+       u32 received_frame_crc_error_count;
+/* 0x007C STPCTL */
+       u32 stp_control;
+/* 0x0080 SAS_PCFG */
+       u32 phy_configuration;
+/* 0x0084 SAS_CLKSM */
+       u32 clock_skew_management;
+/* 0x0088 SAS_TXCOMWAKE */
+       u32 transmit_comwake_signal;
+/* 0x008C SAS_TXCOMINIT */
+       u32 transmit_cominit_signal;
+/* 0x0090 SAS_TXCOMSAS */
+       u32 transmit_comsas_signal;
+/* 0x0094 SAS_COMINIT */
+       u32 cominit_control;
+/* 0x0098 SAS_COMWAKE */
+       u32 comwake_control;
+/* 0x009C SAS_COMSAS */
+       u32 comsas_control;
+/* 0x00A0 SAS_SFERCNT */
+       u32 received_short_frame_count;
+/* 0x00A4 SAS_CDFERCNT */
+       u32 received_frame_without_credit_count;
+/* 0x00A8 SAS_DNFERCNT */
+       u32 received_frame_after_done_count;
+/* 0x00AC SAS_PRSTERCNT */
+       u32 phy_reset_problem_count;
+/* 0x00B0 SAS_CNTCTL */
+       u32 counter_control;
+/* 0x00B4 SAS_SSPTOV */
+       u32 ssp_timer_timeout_values;
+/* 0x00B8 FTCTL */
+       u32 ftx_control;
+/* 0x00BC FRCTL */
+       u32 frx_control;
+/* 0x00C0 FTWMRK */
+       u32 ftx_watermark;
+/* 0x00C4 ENSPINUP */
+       u32 notify_enable_spinup_control;
+/* 0x00C8 SAS_TRNTOV */
+       u32 sas_training_sequence_timer_values;
+/* 0x00CC SAS_PHYCAP */
+       u32 phy_capabilities;
+/* 0x00D0 SAS_PHYCTL */
+       u32 phy_control;
+       u32 reserved_d4;
+/* 0x00D8 LLCTL */
+       u32 link_layer_control;
+/* 0x00DC AFE_XCVRCR */
+       u32 afe_xcvr_control;
+/* 0x00E0 AFE_LUTCR */
+       u32 afe_lookup_table_control;
+/* 0x00E4 PSZGCR */
+       u32 phy_source_zone_group_control;
+/* 0x00E8 SAS_RECPHYCAP */
+       u32 receive_phycap;
+       u32 reserved_ec;
+/* 0x00F0 SNAFERXRSTCTL */
+       u32 speed_negotiation_afe_rx_reset_control;
+/* 0x00F4 SAS_SSIPMCTL */
+       u32 power_management_control;
+/* 0x00F8 SAS_PSPREQ_PRIM */
+       u32 sas_pm_partial_request_primitive;
+/* 0x00FC SAS_PSSREQ_PRIM */
+       u32 sas_pm_slumber_request_primitive;
+/* 0x0100 SAS_PPSACK_PRIM */
+       u32 sas_pm_ack_primitive_register;
+/* 0x0104 SAS_PSNAK_PRIM */
+       u32 sas_pm_nak_primitive_register;
+/* 0x0108 SAS_SSIPMTOV */
+       u32 sas_primitive_timeout;
+       u32 reserved_10c;
+/* 0x0110 - 0x011C PLAPRDCTRLxREG */
+       u32 pla_product_control[4];
+/* 0x0120 PLAPRDSUMREG */
+       u32 pla_product_sum;
+/* 0x0124 PLACONTROLREG */
+       u32 pla_control;
+/* Remainder of memory space 896 bytes */
+       u32 reserved_0128_037f[0x96];
+
+};
+
+/*
+ * 0x00D4 // Same offset as SAS_TCTSTM SAS_PTxC
+ *   u32   primitive_transmit_control; */
+
+/*
+ * ----------------------------------------------------------------------------
+ * SGPIO
+ * ---------------------------------------------------------------------------- */
+#define SCU_SGPIO_OFFSET         0x1400
+
+/* #define SCU_SGPIO_OFFSET         0x6000   // later moves to 0x1400 see HSD 652625 */
+#define SCU_SGPIO_SGICR_OFFSET   0x0000
+#define SCU_SGPIO_SGPBR_OFFSET   0x0004
+#define SCU_SGPIO_SGSDLR_OFFSET  0x0008
+#define SCU_SGPIO_SGSDUR_OFFSET  0x000C
+#define SCU_SGPIO_SGSIDLR_OFFSET 0x0010
+#define SCU_SGPIO_SGSIDUR_OFFSET 0x0014
+#define SCU_SGPIO_SGVSCR_OFFSET  0x0018
+/* Address from 0x0820 to 0x083C */
+#define SCU_SGPIO_SGODSR_OFFSET  0x0020
+
+/**
+ * struct scu_sgpio_registers - SCU SGPIO Registers
+ *
+ *
+ */
+struct scu_sgpio_registers {
+/* 0x0000 SGPIO_SGICR */
+       u32 interface_control;
+/* 0x0004 SGPIO_SGPBR */
+       u32 blink_rate;
+/* 0x0008 SGPIO_SGSDLR */
+       u32 start_drive_lower;
+/* 0x000C SGPIO_SGSDUR */
+       u32 start_drive_upper;
+/* 0x0010 SGPIO_SGSIDLR */
+       u32 serial_input_lower;
+/* 0x0014 SGPIO_SGSIDUR */
+       u32 serial_input_upper;
+/* 0x0018 SGPIO_SGVSCR */
+       u32 vendor_specific_code;
+/* 0x0020 SGPIO_SGODSR */
+       u32 ouput_data_select[8];
+/* Remainder of memory space 256 bytes */
+       u32 reserved_1444_14ff[0x31];
+
+};
+
+/*
+ * *****************************************************************************
+ * * Defines for VIIT entry offsets
+ * * Access additional entries by SCU_VIIT_BASE + index * 0x10
+ * ***************************************************************************** */
+#define     SCU_VIIT_BASE     0x1c00
+
+struct scu_viit_registers {
+       u32 registers[256];
+};
+
+/*
+ * *****************************************************************************
+ * * SCU PORT TASK SCHEDULER REGISTERS
+ * ***************************************************************************** */
+
+#define SCU_PTSG_BASE               0x1000
+
+#define SCU_PTSG_PTSGCR_OFFSET      0x0000
+#define SCU_PTSG_RTCR_OFFSET        0x0004
+#define SCU_PTSG_RTCCR_OFFSET       0x0008
+#define SCU_PTSG_PTS0CR_OFFSET      0x0010
+#define SCU_PTSG_PTS0SR_OFFSET      0x0014
+#define SCU_PTSG_PTS1CR_OFFSET      0x0018
+#define SCU_PTSG_PTS1SR_OFFSET      0x001C
+#define SCU_PTSG_PTS2CR_OFFSET      0x0020
+#define SCU_PTSG_PTS2SR_OFFSET      0x0024
+#define SCU_PTSG_PTS3CR_OFFSET      0x0028
+#define SCU_PTSG_PTS3SR_OFFSET      0x002C
+#define SCU_PTSG_PCSPE0CR_OFFSET    0x0030
+#define SCU_PTSG_PCSPE1CR_OFFSET    0x0034
+#define SCU_PTSG_PCSPE2CR_OFFSET    0x0038
+#define SCU_PTSG_PCSPE3CR_OFFSET    0x003C
+#define SCU_PTSG_ETMTSCCR_OFFSET    0x0040
+#define SCU_PTSG_ETMRNSCCR_OFFSET   0x0044
+
+/**
+ * struct scu_port_task_scheduler_registers - These are the control/stats pairs
+ *    for each Port Task Scheduler.
+ *
+ *
+ */
+struct scu_port_task_scheduler_registers {
+       u32 control;
+       u32 status;
+};
+
+/**
+ * struct scu_port_task_scheduler_group_registers - These are the PORT Task
+ *    Scheduler registers
+ *
+ *
+ */
+struct scu_port_task_scheduler_group_registers {
+/* 0x0000 PTSGCR */
+       u32 control;
+/* 0x0004 RTCR */
+       u32 real_time_clock;
+/* 0x0008 RTCCR */
+       u32 real_time_clock_control;
+/* 0x000C */
+       u32 reserved_0C;
+/*
+ * 0x0010 PTS0CR
+ * 0x0014 PTS0SR
+ * 0x0018 PTS1CR
+ * 0x001C PTS1SR
+ * 0x0020 PTS2CR
+ * 0x0024 PTS2SR
+ * 0x0028 PTS3CR
+ * 0x002C PTS3SR */
+       struct scu_port_task_scheduler_registers port[4];
+/*
+ * 0x0030 PCSPE0CR
+ * 0x0034 PCSPE1CR
+ * 0x0038 PCSPE2CR
+ * 0x003C PCSPE3CR */
+       u32 protocol_engine[4];
+/* 0x0040 ETMTSCCR */
+       u32 tc_scanning_interval_control;
+/* 0x0044 ETMRNSCCR */
+       u32 rnc_scanning_interval_control;
+/* Remainder of memory space 128 bytes */
+       u32 reserved_1048_107f[0x0E];
+
+};
+
+#define SCU_PTSG_SCUVZECR_OFFSET        0x003C
+
+/*
+ * *****************************************************************************
+ * * AFE REGISTERS
+ * ***************************************************************************** */
+#define SCU_AFE_MMR_BASE                  0xE000
+
+/*
+ * AFE 0 is at offset 0x0800
+ * AFE 1 is at offset 0x0900
+ * AFE 2 is at offset 0x0a00
+ * AFE 3 is at offset 0x0b00 */
+struct scu_afe_transceiver {
+       /* 0x0000 AFE_XCVR_CTRL0 */
+       u32 afe_xcvr_control0;
+       /* 0x0004 AFE_XCVR_CTRL1 */
+       u32 afe_xcvr_control1;
+       /* 0x0008 */
+       u32 reserved_0008;
+       /* 0x000c afe_dfx_rx_control0 */
+       u32 afe_dfx_rx_control0;
+       /* 0x0010 AFE_DFX_RX_CTRL1 */
+       u32 afe_dfx_rx_control1;
+       /* 0x0014 */
+       u32 reserved_0014;
+       /* 0x0018 AFE_DFX_RX_STS0 */
+       u32 afe_dfx_rx_status0;
+       /* 0x001c AFE_DFX_RX_STS1 */
+       u32 afe_dfx_rx_status1;
+       /* 0x0020 */
+       u32 reserved_0020;
+       /* 0x0024 AFE_TX_CTRL */
+       u32 afe_tx_control;
+       /* 0x0028 AFE_TX_AMP_CTRL0 */
+       u32 afe_tx_amp_control0;
+       /* 0x002c AFE_TX_AMP_CTRL1 */
+       u32 afe_tx_amp_control1;
+       /* 0x0030 AFE_TX_AMP_CTRL2 */
+       u32 afe_tx_amp_control2;
+       /* 0x0034 AFE_TX_AMP_CTRL3 */
+       u32 afe_tx_amp_control3;
+       /* 0x0038 afe_tx_ssc_control */
+       u32 afe_tx_ssc_control;
+       /* 0x003c */
+       u32 reserved_003c;
+       /* 0x0040 AFE_RX_SSC_CTRL0 */
+       u32 afe_rx_ssc_control0;
+       /* 0x0044 AFE_RX_SSC_CTRL1 */
+       u32 afe_rx_ssc_control1;
+       /* 0x0048 AFE_RX_SSC_CTRL2 */
+       u32 afe_rx_ssc_control2;
+       /* 0x004c AFE_RX_EQ_STS0 */
+       u32 afe_rx_eq_status0;
+       /* 0x0050 AFE_RX_EQ_STS1 */
+       u32 afe_rx_eq_status1;
+       /* 0x0054 AFE_RX_CDR_STS */
+       u32 afe_rx_cdr_status;
+       /* 0x0058 */
+       u32 reserved_0058;
+       /* 0x005c AFE_CHAN_CTRL */
+       u32 afe_channel_control;
+       /* 0x0060-0x006c */
+       u32 reserved_0060_006c[0x04];
+       /* 0x0070 AFE_XCVR_EC_STS0 */
+       u32 afe_xcvr_error_capture_status0;
+       /* 0x0074 AFE_XCVR_EC_STS1 */
+       u32 afe_xcvr_error_capture_status1;
+       /* 0x0078 AFE_XCVR_EC_STS2 */
+       u32 afe_xcvr_error_capture_status2;
+       /* 0x007c afe_xcvr_ec_status3 */
+       u32 afe_xcvr_error_capture_status3;
+       /* 0x0080 AFE_XCVR_EC_STS4 */
+       u32 afe_xcvr_error_capture_status4;
+       /* 0x0084 AFE_XCVR_EC_STS5 */
+       u32 afe_xcvr_error_capture_status5;
+       /* 0x0088-0x00fc */
+       u32 reserved_008c_00fc[0x1e];
+};
+
+/**
+ * struct scu_afe_registers - AFE Regsiters
+ *
+ *
+ */
+/* Uaoa AFE registers */
+struct scu_afe_registers {
+       /* 0Xe000 AFE_BIAS_CTRL */
+       u32 afe_bias_control;
+       u32 reserved_0004;
+       /* 0x0008 AFE_PLL_CTRL0 */
+       u32 afe_pll_control0;
+       /* 0x000c AFE_PLL_CTRL1 */
+       u32 afe_pll_control1;
+       /* 0x0010 AFE_PLL_CTRL2 */
+       u32 afe_pll_control2;
+       /* 0x0014 AFE_CB_STS */
+       u32 afe_common_block_status;
+       /* 0x0018-0x007c */
+       u32 reserved_18_7c[0x1a];
+       /* 0x0080 AFE_PMSN_MCTRL0 */
+       u32 afe_pmsn_master_control0;
+       /* 0x0084 AFE_PMSN_MCTRL1 */
+       u32 afe_pmsn_master_control1;
+       /* 0x0088 AFE_PMSN_MCTRL2 */
+       u32 afe_pmsn_master_control2;
+       /* 0x008C-0x00fc */
+       u32 reserved_008c_00fc[0x1D];
+       /* 0x0100 AFE_DFX_MST_CTRL0 */
+       u32 afe_dfx_master_control0;
+       /* 0x0104 AFE_DFX_MST_CTRL1 */
+       u32 afe_dfx_master_control1;
+       /* 0x0108 AFE_DFX_DCL_CTRL */
+       u32 afe_dfx_dcl_control;
+       /* 0x010c AFE_DFX_DMON_CTRL */
+       u32 afe_dfx_digital_monitor_control;
+       /* 0x0110 AFE_DFX_AMONP_CTRL */
+       u32 afe_dfx_analog_p_monitor_control;
+       /* 0x0114 AFE_DFX_AMONN_CTRL */
+       u32 afe_dfx_analog_n_monitor_control;
+       /* 0x0118 AFE_DFX_NTL_STS */
+       u32 afe_dfx_ntl_status;
+       /* 0x011c AFE_DFX_FIFO_STS0 */
+       u32 afe_dfx_fifo_status0;
+       /* 0x0120 AFE_DFX_FIFO_STS1 */
+       u32 afe_dfx_fifo_status1;
+       /* 0x0124 AFE_DFX_MPAT_CTRL */
+       u32 afe_dfx_master_pattern_control;
+       /* 0x0128 AFE_DFX_P0_CTRL */
+       u32 afe_dfx_p0_control;
+       /* 0x012c-0x01a8 AFE_DFX_P0_DRx */
+       u32 afe_dfx_p0_data[32];
+       /* 0x01ac */
+       u32 reserved_01ac;
+       /* 0x01b0-0x020c AFE_DFX_P0_IRx */
+       u32 afe_dfx_p0_instruction[24];
+       /* 0x0210 */
+       u32 reserved_0210;
+       /* 0x0214 AFE_DFX_P1_CTRL */
+       u32 afe_dfx_p1_control;
+       /* 0x0218-0x245 AFE_DFX_P1_DRx */
+       u32 afe_dfx_p1_data[16];
+       /* 0x0258-0x029c */
+       u32 reserved_0258_029c[0x12];
+       /* 0x02a0-0x02bc AFE_DFX_P1_IRx */
+       u32 afe_dfx_p1_instruction[8];
+       /* 0x02c0-0x2fc */
+       u32 reserved_02c0_02fc[0x10];
+       /* 0x0300 AFE_DFX_TX_PMSN_CTRL */
+       u32 afe_dfx_tx_pmsn_control;
+       /* 0x0304 AFE_DFX_RX_PMSN_CTRL */
+       u32 afe_dfx_rx_pmsn_control;
+       u32 reserved_0308;
+       /* 0x030c AFE_DFX_NOA_CTRL0 */
+       u32 afe_dfx_noa_control0;
+       /* 0x0310 AFE_DFX_NOA_CTRL1 */
+       u32 afe_dfx_noa_control1;
+       /* 0x0314 AFE_DFX_NOA_CTRL2 */
+       u32 afe_dfx_noa_control2;
+       /* 0x0318 AFE_DFX_NOA_CTRL3 */
+       u32 afe_dfx_noa_control3;
+       /* 0x031c AFE_DFX_NOA_CTRL4 */
+       u32 afe_dfx_noa_control4;
+       /* 0x0320 AFE_DFX_NOA_CTRL5 */
+       u32 afe_dfx_noa_control5;
+       /* 0x0324 AFE_DFX_NOA_CTRL6 */
+       u32 afe_dfx_noa_control6;
+       /* 0x0328 AFE_DFX_NOA_CTRL7 */
+       u32 afe_dfx_noa_control7;
+       /* 0x032c-0x07fc */
+       u32 reserved_032c_07fc[0x135];
+
+       /* 0x0800-0x0bfc */
+       struct scu_afe_transceiver scu_afe_xcvr[4];
+
+       /* 0x0c00-0x0ffc */
+       u32 reserved_0c00_0ffc[0x0100];
+};
+
+struct scu_protocol_engine_group_registers {
+       u32 table[0xE0];
+};
+
+
+struct scu_viit_iit {
+       u32 table[256];
+};
+
+/**
+ * Placeholder for the ZONE Partition Table information ZONING will not be
+ *    included in the 1.1 release.
+ *
+ *
+ */
+struct scu_zone_partition_table {
+       u32 table[2048];
+};
+
+/**
+ * Placeholder for the CRAM register since I am not sure if we need to
+ *    read/write to these registers as yet.
+ *
+ *
+ */
+struct scu_completion_ram {
+       u32 ram[128];
+};
+
+/**
+ * Placeholder for the FBRAM registers since I am not sure if we need to
+ *    read/write to these registers as yet.
+ *
+ *
+ */
+struct scu_frame_buffer_ram {
+       u32 ram[128];
+};
+
+#define scu_scratch_ram_SIZE_IN_DWORDS  256
+
+/**
+ * Placeholder for the scratch RAM registers.
+ *
+ *
+ */
+struct scu_scratch_ram {
+       u32 ram[scu_scratch_ram_SIZE_IN_DWORDS];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_protocol_engine_partition {
+       u32 reserved[64];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_hub_partition {
+       u32 reserved[64];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_host_interface_partition {
+       u32 reserved[64];
+};
+
+/**
+ * struct transport_link_layer_pair - The SCU Hardware pairs up the TL
+ *    registers with the LL registers so we must place them adjcent to make the
+ *    array of registers in the PEG.
+ *
+ *
+ */
+struct transport_link_layer_pair {
+       struct scu_transport_layer_registers tl;
+       struct scu_link_layer_registers ll;
+};
+
+/**
+ * struct scu_peg_registers - SCU Protocol Engine Memory mapped register space.
+ *     These registers are unique to each protocol engine group.  There can be
+ *    at most two PEG for a single SCU part.
+ *
+ *
+ */
+struct scu_peg_registers {
+       struct transport_link_layer_pair pe[4];
+       struct scu_port_task_scheduler_group_registers ptsg;
+       struct scu_protocol_engine_group_registers peg;
+       struct scu_sgpio_registers sgpio;
+       u32 reserved_01500_1BFF[0x1C0];
+       struct scu_viit_entry viit[64];
+       struct scu_zone_partition_table zpt0;
+       struct scu_zone_partition_table zpt1;
+};
+
+/**
+ * struct scu_registers - SCU regsiters including both PEG registers if we turn
+ *    on that compile option. All of these registers are in the memory mapped
+ *    space returned from BAR1.
+ *
+ *
+ */
+struct scu_registers {
+       /* 0x0000 - PEG 0 */
+       struct scu_peg_registers peg0;
+
+       /* 0x6000 - SDMA and Miscellaneous */
+       struct scu_sdma_registers sdma;
+       struct scu_completion_ram cram;
+       struct scu_frame_buffer_ram fbram;
+       u32 reserved_6800_69FF[0x80];
+       struct noa_protocol_engine_partition noa_pe;
+       struct noa_hub_partition noa_hub;
+       struct noa_host_interface_partition noa_if;
+       u32 reserved_6d00_7fff[0x4c0];
+
+       /* 0x8000 - PEG 1 */
+       struct scu_peg_registers peg1;
+
+       /* 0xE000 - AFE Registers */
+       struct scu_afe_registers afe;
+
+       /* 0xF000 - reserved */
+       u32 reserved_f000_211fff[0x80c00];
+
+       /* 0x212000 - scratch RAM */
+       struct scu_scratch_ram scratch_ram;
+};
+
+#endif   /* _SCU_REGISTERS_HEADER_ */
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
new file mode 100644 (file)
index 0000000..b6e6368
--- /dev/null
@@ -0,0 +1,1501 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <scsi/sas.h>
+#include "isci.h"
+#include "port.h"
+#include "remote_device.h"
+#include "request.h"
+#include "remote_node_context.h"
+#include "scu_event_codes.h"
+#include "task.h"
+
+/**
+ * isci_remote_device_not_ready() - This function is called by the ihost when
+ *    the remote device is not ready. We mark the isci device as ready (not
+ *    "ready_for_io") and signal the waiting proccess.
+ * @isci_host: This parameter specifies the isci host object.
+ * @isci_device: This parameter specifies the remote device
+ *
+ * sci_lock is held on entrance to this function.
+ */
+static void isci_remote_device_not_ready(struct isci_host *ihost,
+                                 struct isci_remote_device *idev, u32 reason)
+{
+       struct isci_request *ireq;
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: isci_device = %p\n", __func__, idev);
+
+       switch (reason) {
+       case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED:
+               set_bit(IDEV_GONE, &idev->flags);
+               break;
+       case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
+               set_bit(IDEV_IO_NCQERROR, &idev->flags);
+
+               /* Kill all outstanding requests for the device. */
+               list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) {
+
+                       dev_dbg(&ihost->pdev->dev,
+                               "%s: isci_device = %p request = %p\n",
+                               __func__, idev, ireq);
+
+                       sci_controller_terminate_request(ihost,
+                                                         idev,
+                                                         ireq);
+               }
+               /* Fall through into the default case... */
+       default:
+               clear_bit(IDEV_IO_READY, &idev->flags);
+               break;
+       }
+}
+
+/**
+ * isci_remote_device_ready() - This function is called by the ihost when the
+ *    remote device is ready. We mark the isci device as ready and signal the
+ *    waiting proccess.
+ * @ihost: our valid isci_host
+ * @idev: remote device
+ *
+ */
+static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+       dev_dbg(&ihost->pdev->dev,
+               "%s: idev = %p\n", __func__, idev);
+
+       clear_bit(IDEV_IO_NCQERROR, &idev->flags);
+       set_bit(IDEV_IO_READY, &idev->flags);
+       if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
+               wake_up(&ihost->eventq);
+}
+
+/* called once the remote node context is ready to be freed.
+ * The remote device can now report that its stop operation is complete. none
+ */
+static void rnc_destruct_done(void *_dev)
+{
+       struct isci_remote_device *idev = _dev;
+
+       BUG_ON(idev->started_request_count != 0);
+       sci_change_state(&idev->sm, SCI_DEV_STOPPED);
+}
+
+static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev)
+{
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+       enum sci_status status  = SCI_SUCCESS;
+       u32 i;
+
+       for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
+               struct isci_request *ireq = ihost->reqs[i];
+               enum sci_status s;
+
+               if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
+                   ireq->target_device != idev)
+                       continue;
+
+               s = sci_controller_terminate_request(ihost, idev, ireq);
+               if (s != SCI_SUCCESS)
+                       status = s;
+       }
+
+       return status;
+}
+
+enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
+                                       u32 timeout)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+
+       switch (state) {
+       case SCI_DEV_INITIAL:
+       case SCI_DEV_FAILED:
+       case SCI_DEV_FINAL:
+       default:
+               dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+                        __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       case SCI_DEV_STOPPED:
+               return SCI_SUCCESS;
+       case SCI_DEV_STARTING:
+               /* device not started so there had better be no requests */
+               BUG_ON(idev->started_request_count != 0);
+               sci_remote_node_context_destruct(&idev->rnc,
+                                                     rnc_destruct_done, idev);
+               /* Transition to the stopping state and wait for the
+                * remote node to complete being posted and invalidated.
+                */
+               sci_change_state(sm, SCI_DEV_STOPPING);
+               return SCI_SUCCESS;
+       case SCI_DEV_READY:
+       case SCI_STP_DEV_IDLE:
+       case SCI_STP_DEV_CMD:
+       case SCI_STP_DEV_NCQ:
+       case SCI_STP_DEV_NCQ_ERROR:
+       case SCI_STP_DEV_AWAIT_RESET:
+       case SCI_SMP_DEV_IDLE:
+       case SCI_SMP_DEV_CMD:
+               sci_change_state(sm, SCI_DEV_STOPPING);
+               if (idev->started_request_count == 0) {
+                       sci_remote_node_context_destruct(&idev->rnc,
+                                                             rnc_destruct_done, idev);
+                       return SCI_SUCCESS;
+               } else
+                       return sci_remote_device_terminate_requests(idev);
+               break;
+       case SCI_DEV_STOPPING:
+               /* All requests should have been terminated, but if there is an
+                * attempt to stop a device already in the stopping state, then
+                * try again to terminate.
+                */
+               return sci_remote_device_terminate_requests(idev);
+       case SCI_DEV_RESETTING:
+               sci_change_state(sm, SCI_DEV_STOPPING);
+               return SCI_SUCCESS;
+       }
+}
+
+enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+
+       switch (state) {
+       case SCI_DEV_INITIAL:
+       case SCI_DEV_STOPPED:
+       case SCI_DEV_STARTING:
+       case SCI_SMP_DEV_IDLE:
+       case SCI_SMP_DEV_CMD:
+       case SCI_DEV_STOPPING:
+       case SCI_DEV_FAILED:
+       case SCI_DEV_RESETTING:
+       case SCI_DEV_FINAL:
+       default:
+               dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+                        __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       case SCI_DEV_READY:
+       case SCI_STP_DEV_IDLE:
+       case SCI_STP_DEV_CMD:
+       case SCI_STP_DEV_NCQ:
+       case SCI_STP_DEV_NCQ_ERROR:
+       case SCI_STP_DEV_AWAIT_RESET:
+               sci_change_state(sm, SCI_DEV_RESETTING);
+               return SCI_SUCCESS;
+       }
+}
+
+enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+
+       if (state != SCI_DEV_RESETTING) {
+               dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+                        __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_change_state(sm, SCI_DEV_READY);
+       return SCI_SUCCESS;
+}
+
+enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
+                                              u32 suspend_type)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+
+       if (state != SCI_STP_DEV_CMD) {
+               dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+                        __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       return sci_remote_node_context_suspend(&idev->rnc,
+                                                   suspend_type, NULL, NULL);
+}
+
+enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
+                                                    u32 frame_index)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+       enum sci_status status;
+
+       switch (state) {
+       case SCI_DEV_INITIAL:
+       case SCI_DEV_STOPPED:
+       case SCI_DEV_STARTING:
+       case SCI_STP_DEV_IDLE:
+       case SCI_SMP_DEV_IDLE:
+       case SCI_DEV_FINAL:
+       default:
+               dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+                        __func__, state);
+               /* Return the frame back to the controller */
+               sci_controller_release_frame(ihost, frame_index);
+               return SCI_FAILURE_INVALID_STATE;
+       case SCI_DEV_READY:
+       case SCI_STP_DEV_NCQ_ERROR:
+       case SCI_STP_DEV_AWAIT_RESET:
+       case SCI_DEV_STOPPING:
+       case SCI_DEV_FAILED:
+       case SCI_DEV_RESETTING: {
+               struct isci_request *ireq;
+               struct ssp_frame_hdr hdr;
+               void *frame_header;
+               ssize_t word_cnt;
+
+               status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                                      frame_index,
+                                                                      &frame_header);
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               word_cnt = sizeof(hdr) / sizeof(u32);
+               sci_swab32_cpy(&hdr, frame_header, word_cnt);
+
+               ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
+               if (ireq && ireq->target_device == idev) {
+                       /* The IO request is now in charge of releasing the frame */
+                       status = sci_io_request_frame_handler(ireq, frame_index);
+               } else {
+                       /* We could not map this tag to a valid IO
+                        * request Just toss the frame and continue
+                        */
+                       sci_controller_release_frame(ihost, frame_index);
+               }
+               break;
+       }
+       case SCI_STP_DEV_NCQ: {
+               struct dev_to_host_fis *hdr;
+
+               status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                                      frame_index,
+                                                                      (void **)&hdr);
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               if (hdr->fis_type == FIS_SETDEVBITS &&
+                   (hdr->status & ATA_ERR)) {
+                       idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
+
+                       /* TODO Check sactive and complete associated IO if any. */
+                       sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
+               } else if (hdr->fis_type == FIS_REGD2H &&
+                          (hdr->status & ATA_ERR)) {
+                       /*
+                        * Some devices return D2H FIS when an NCQ error is detected.
+                        * Treat this like an SDB error FIS ready reason.
+                        */
+                       idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
+                       sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
+               } else
+                       status = SCI_FAILURE;
+
+               sci_controller_release_frame(ihost, frame_index);
+               break;
+       }
+       case SCI_STP_DEV_CMD:
+       case SCI_SMP_DEV_CMD:
+               /* The device does not process any UF received from the hardware while
+                * in this state.  All unsolicited frames are forwarded to the io request
+                * object.
+                */
+               status = sci_io_request_frame_handler(idev->working_request, frame_index);
+               break;
+       }
+
+       return status;
+}
+
+static bool is_remote_device_ready(struct isci_remote_device *idev)
+{
+
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+
+       switch (state) {
+       case SCI_DEV_READY:
+       case SCI_STP_DEV_IDLE:
+       case SCI_STP_DEV_CMD:
+       case SCI_STP_DEV_NCQ:
+       case SCI_STP_DEV_NCQ_ERROR:
+       case SCI_STP_DEV_AWAIT_RESET:
+       case SCI_SMP_DEV_IDLE:
+       case SCI_SMP_DEV_CMD:
+               return true;
+       default:
+               return false;
+       }
+}
+
+enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
+                                                    u32 event_code)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+       enum sci_status status;
+
+       switch (scu_get_event_type(event_code)) {
+       case SCU_EVENT_TYPE_RNC_OPS_MISC:
+       case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+       case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+               status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
+               break;
+       case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
+               if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
+                       status = SCI_SUCCESS;
+
+                       /* Suspend the associated RNC */
+                       sci_remote_node_context_suspend(&idev->rnc,
+                                                             SCI_SOFTWARE_SUSPENSION,
+                                                             NULL, NULL);
+
+                       dev_dbg(scirdev_to_dev(idev),
+                               "%s: device: %p event code: %x: %s\n",
+                               __func__, idev, event_code,
+                               is_remote_device_ready(idev)
+                               ? "I_T_Nexus_Timeout event"
+                               : "I_T_Nexus_Timeout event in wrong state");
+
+                       break;
+               }
+       /* Else, fall through and treat as unhandled... */
+       default:
+               dev_dbg(scirdev_to_dev(idev),
+                       "%s: device: %p event code: %x: %s\n",
+                       __func__, idev, event_code,
+                       is_remote_device_ready(idev)
+                       ? "unexpected event"
+                       : "unexpected event in wrong state");
+               status = SCI_FAILURE_INVALID_STATE;
+               break;
+       }
+
+       if (status != SCI_SUCCESS)
+               return status;
+
+       if (state == SCI_STP_DEV_IDLE) {
+
+               /* We pick up suspension events to handle specifically to this
+                * state. We resume the RNC right away.
+                */
+               if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
+                   scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX)
+                       status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
+       }
+
+       return status;
+}
+
+static void sci_remote_device_start_request(struct isci_remote_device *idev,
+                                                struct isci_request *ireq,
+                                                enum sci_status status)
+{
+       struct isci_port *iport = idev->owning_port;
+
+       /* cleanup requests that failed after starting on the port */
+       if (status != SCI_SUCCESS)
+               sci_port_complete_io(iport, idev, ireq);
+       else {
+               kref_get(&idev->kref);
+               idev->started_request_count++;
+       }
+}
+
+enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
+                                               struct isci_remote_device *idev,
+                                               struct isci_request *ireq)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+       struct isci_port *iport = idev->owning_port;
+       enum sci_status status;
+
+       switch (state) {
+       case SCI_DEV_INITIAL:
+       case SCI_DEV_STOPPED:
+       case SCI_DEV_STARTING:
+       case SCI_STP_DEV_NCQ_ERROR:
+       case SCI_DEV_STOPPING:
+       case SCI_DEV_FAILED:
+       case SCI_DEV_RESETTING:
+       case SCI_DEV_FINAL:
+       default:
+               dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+                        __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       case SCI_DEV_READY:
+               /* attempt to start an io request for this device object. The remote
+                * device object will issue the start request for the io and if
+                * successful it will start the request for the port object then
+                * increment its own request count.
+                */
+               status = sci_port_start_io(iport, idev, ireq);
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+               if (status != SCI_SUCCESS)
+                       break;
+
+               status = sci_request_start(ireq);
+               break;
+       case SCI_STP_DEV_IDLE: {
+               /* handle the start io operation for a sata device that is in
+                * the command idle state. - Evalute the type of IO request to
+                * be started - If its an NCQ request change to NCQ substate -
+                * If its any other command change to the CMD substate
+                *
+                * If this is a softreset we may want to have a different
+                * substate.
+                */
+               enum sci_remote_device_states new_state;
+               struct sas_task *task = isci_request_access_task(ireq);
+
+               status = sci_port_start_io(iport, idev, ireq);
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+               if (status != SCI_SUCCESS)
+                       break;
+
+               status = sci_request_start(ireq);
+               if (status != SCI_SUCCESS)
+                       break;
+
+               if (task->ata_task.use_ncq)
+                       new_state = SCI_STP_DEV_NCQ;
+               else {
+                       idev->working_request = ireq;
+                       new_state = SCI_STP_DEV_CMD;
+               }
+               sci_change_state(sm, new_state);
+               break;
+       }
+       case SCI_STP_DEV_NCQ: {
+               struct sas_task *task = isci_request_access_task(ireq);
+
+               if (task->ata_task.use_ncq) {
+                       status = sci_port_start_io(iport, idev, ireq);
+                       if (status != SCI_SUCCESS)
+                               return status;
+
+                       status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+                       if (status != SCI_SUCCESS)
+                               break;
+
+                       status = sci_request_start(ireq);
+               } else
+                       return SCI_FAILURE_INVALID_STATE;
+               break;
+       }
+       case SCI_STP_DEV_AWAIT_RESET:
+               return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+       case SCI_SMP_DEV_IDLE:
+               status = sci_port_start_io(iport, idev, ireq);
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+               if (status != SCI_SUCCESS)
+                       break;
+
+               status = sci_request_start(ireq);
+               if (status != SCI_SUCCESS)
+                       break;
+
+               idev->working_request = ireq;
+               sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
+               break;
+       case SCI_STP_DEV_CMD:
+       case SCI_SMP_DEV_CMD:
+               /* device is already handling a command it can not accept new commands
+                * until this one is complete.
+                */
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_remote_device_start_request(idev, ireq, status);
+       return status;
+}
+
+static enum sci_status common_complete_io(struct isci_port *iport,
+                                         struct isci_remote_device *idev,
+                                         struct isci_request *ireq)
+{
+       enum sci_status status;
+
+       status = sci_request_complete(ireq);
+       if (status != SCI_SUCCESS)
+               return status;
+
+       status = sci_port_complete_io(iport, idev, ireq);
+       if (status != SCI_SUCCESS)
+               return status;
+
+       sci_remote_device_decrement_request_count(idev);
+       return status;
+}
+
+enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
+                                                  struct isci_remote_device *idev,
+                                                  struct isci_request *ireq)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+       struct isci_port *iport = idev->owning_port;
+       enum sci_status status;
+
+       switch (state) {
+       case SCI_DEV_INITIAL:
+       case SCI_DEV_STOPPED:
+       case SCI_DEV_STARTING:
+       case SCI_STP_DEV_IDLE:
+       case SCI_SMP_DEV_IDLE:
+       case SCI_DEV_FAILED:
+       case SCI_DEV_FINAL:
+       default:
+               dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+                        __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       case SCI_DEV_READY:
+       case SCI_STP_DEV_AWAIT_RESET:
+       case SCI_DEV_RESETTING:
+               status = common_complete_io(iport, idev, ireq);
+               break;
+       case SCI_STP_DEV_CMD:
+       case SCI_STP_DEV_NCQ:
+       case SCI_STP_DEV_NCQ_ERROR:
+               status = common_complete_io(iport, idev, ireq);
+               if (status != SCI_SUCCESS)
+                       break;
+
+               if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+                       /* This request causes hardware error, device needs to be Lun Reset.
+                        * So here we force the state machine to IDLE state so the rest IOs
+                        * can reach RNC state handler, these IOs will be completed by RNC with
+                        * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
+                        */
+                       sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
+               } else if (idev->started_request_count == 0)
+                       sci_change_state(sm, SCI_STP_DEV_IDLE);
+               break;
+       case SCI_SMP_DEV_CMD:
+               status = common_complete_io(iport, idev, ireq);
+               if (status != SCI_SUCCESS)
+                       break;
+               sci_change_state(sm, SCI_SMP_DEV_IDLE);
+               break;
+       case SCI_DEV_STOPPING:
+               status = common_complete_io(iport, idev, ireq);
+               if (status != SCI_SUCCESS)
+                       break;
+
+               if (idev->started_request_count == 0)
+                       sci_remote_node_context_destruct(&idev->rnc,
+                                                        rnc_destruct_done,
+                                                        idev);
+               break;
+       }
+
+       if (status != SCI_SUCCESS)
+               dev_err(scirdev_to_dev(idev),
+                       "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
+                       "could not complete\n", __func__, iport,
+                       idev, ireq, status);
+       else
+               isci_put_device(idev);
+
+       return status;
+}
+
+static void sci_remote_device_continue_request(void *dev)
+{
+       struct isci_remote_device *idev = dev;
+
+       /* we need to check if this request is still valid to continue. */
+       if (idev->working_request)
+               sci_controller_continue_io(idev->working_request);
+}
+
+enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
+                                                 struct isci_remote_device *idev,
+                                                 struct isci_request *ireq)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+       struct isci_port *iport = idev->owning_port;
+       enum sci_status status;
+
+       switch (state) {
+       case SCI_DEV_INITIAL:
+       case SCI_DEV_STOPPED:
+       case SCI_DEV_STARTING:
+       case SCI_SMP_DEV_IDLE:
+       case SCI_SMP_DEV_CMD:
+       case SCI_DEV_STOPPING:
+       case SCI_DEV_FAILED:
+       case SCI_DEV_RESETTING:
+       case SCI_DEV_FINAL:
+       default:
+               dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+                        __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       case SCI_STP_DEV_IDLE:
+       case SCI_STP_DEV_CMD:
+       case SCI_STP_DEV_NCQ:
+       case SCI_STP_DEV_NCQ_ERROR:
+       case SCI_STP_DEV_AWAIT_RESET:
+               status = sci_port_start_io(iport, idev, ireq);
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               status = sci_remote_node_context_start_task(&idev->rnc, ireq);
+               if (status != SCI_SUCCESS)
+                       goto out;
+
+               status = sci_request_start(ireq);
+               if (status != SCI_SUCCESS)
+                       goto out;
+
+               /* Note: If the remote device state is not IDLE this will
+                * replace the request that probably resulted in the task
+                * management request.
+                */
+               idev->working_request = ireq;
+               sci_change_state(sm, SCI_STP_DEV_CMD);
+
+               /* The remote node context must cleanup the TCi to NCQ mapping
+                * table.  The only way to do this correctly is to either write
+                * to the TLCR register or to invalidate and repost the RNC. In
+                * either case the remote node context state machine will take
+                * the correct action when the remote node context is suspended
+                * and later resumed.
+                */
+               sci_remote_node_context_suspend(&idev->rnc,
+                               SCI_SOFTWARE_SUSPENSION, NULL, NULL);
+               sci_remote_node_context_resume(&idev->rnc,
+                               sci_remote_device_continue_request,
+                                                   idev);
+
+       out:
+               sci_remote_device_start_request(idev, ireq, status);
+               /* We need to let the controller start request handler know that
+                * it can't post TC yet. We will provide a callback function to
+                * post TC when RNC gets resumed.
+                */
+               return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
+       case SCI_DEV_READY:
+               status = sci_port_start_io(iport, idev, ireq);
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               status = sci_remote_node_context_start_task(&idev->rnc, ireq);
+               if (status != SCI_SUCCESS)
+                       break;
+
+               status = sci_request_start(ireq);
+               break;
+       }
+       sci_remote_device_start_request(idev, ireq, status);
+
+       return status;
+}
+
+void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
+{
+       struct isci_port *iport = idev->owning_port;
+       u32 context;
+
+       context = request |
+                 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+                 (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+                 idev->rnc.remote_node_index;
+
+       sci_controller_post_request(iport->owning_controller, context);
+}
+
+/* called once the remote node context has transisitioned to a
+ * ready state.  This is the indication that the remote device object can also
+ * transition to ready.
+ */
+static void remote_device_resume_done(void *_dev)
+{
+       struct isci_remote_device *idev = _dev;
+
+       if (is_remote_device_ready(idev))
+               return;
+
+       /* go 'ready' if we are not already in a ready state */
+       sci_change_state(&idev->sm, SCI_DEV_READY);
+}
+
+static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
+{
+       struct isci_remote_device *idev = _dev;
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+
+       /* For NCQ operation we do not issue a isci_remote_device_not_ready().
+        * As a result, avoid sending the ready notification.
+        */
+       if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
+               isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+       /* Initial state is a transitional state to the stopped state */
+       sci_change_state(&idev->sm, SCI_DEV_STOPPED);
+}
+
+/**
+ * sci_remote_device_destruct() - free remote node context and destruct
+ * @remote_device: This parameter specifies the remote device to be destructed.
+ *
+ * Remote device objects are a limited resource.  As such, they must be
+ * protected.  Thus calls to construct and destruct are mutually exclusive and
+ * non-reentrant. The return value shall indicate if the device was
+ * successfully destructed or if some failure occurred. enum sci_status This value
+ * is returned if the device is successfully destructed.
+ * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
+ * device isn't valid (e.g. it's already been destoryed, the handle isn't
+ * valid, etc.).
+ */
+static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+       struct isci_host *ihost;
+
+       if (state != SCI_DEV_STOPPED) {
+               dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+                        __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       ihost = idev->owning_port->owning_controller;
+       sci_controller_free_remote_node_context(ihost, idev,
+                                                    idev->rnc.remote_node_index);
+       idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+       sci_change_state(sm, SCI_DEV_FINAL);
+
+       return SCI_SUCCESS;
+}
+
+/**
+ * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
+ * @ihost: This parameter specifies the isci host object.
+ * @idev: This parameter specifies the remote device to be freed.
+ *
+ */
+static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+       dev_dbg(&ihost->pdev->dev,
+               "%s: isci_device = %p\n", __func__, idev);
+
+       /* There should not be any outstanding io's. All paths to
+        * here should go through isci_remote_device_nuke_requests.
+        * If we hit this condition, we will need a way to complete
+        * io requests in process */
+       BUG_ON(!list_empty(&idev->reqs_in_process));
+
+       sci_remote_device_destruct(idev);
+       list_del_init(&idev->node);
+       isci_put_device(idev);
+}
+
+static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+       u32 prev_state;
+
+       /* If we are entering from the stopping state let the SCI User know that
+        * the stop operation has completed.
+        */
+       prev_state = idev->sm.previous_state_id;
+       if (prev_state == SCI_DEV_STOPPING)
+               isci_remote_device_deconstruct(ihost, idev);
+
+       sci_controller_remote_device_stopped(ihost, idev);
+}
+
+static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+
+       isci_remote_device_not_ready(ihost, idev,
+                                    SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
+}
+
+static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+       struct domain_device *dev = idev->domain_dev;
+
+       if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
+               sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
+       } else if (dev_is_expander(dev)) {
+               sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
+       } else
+               isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+       struct domain_device *dev = idev->domain_dev;
+
+       if (dev->dev_type == SAS_END_DEV) {
+               struct isci_host *ihost = idev->owning_port->owning_controller;
+
+               isci_remote_device_not_ready(ihost, idev,
+                                            SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
+       }
+}
+
+static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+       sci_remote_node_context_suspend(
+               &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL);
+}
+
+static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+       sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
+}
+
+static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+       idev->working_request = NULL;
+       if (sci_remote_node_context_is_ready(&idev->rnc)) {
+               /*
+                * Since the RNC is ready, it's alright to finish completion
+                * processing (e.g. signal the remote device is ready). */
+               sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
+       } else {
+               sci_remote_node_context_resume(&idev->rnc,
+                       sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
+                       idev);
+       }
+}
+
+static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+
+       BUG_ON(idev->working_request == NULL);
+
+       isci_remote_device_not_ready(ihost, idev,
+                                    SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
+}
+
+static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+
+       if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
+               isci_remote_device_not_ready(ihost, idev,
+                                            idev->not_ready_reason);
+}
+
+static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+
+       isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+
+       BUG_ON(idev->working_request == NULL);
+
+       isci_remote_device_not_ready(ihost, idev,
+                                    SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
+}
+
+static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
+{
+       struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+       idev->working_request = NULL;
+}
+
+static const struct sci_base_state sci_remote_device_state_table[] = {
+       [SCI_DEV_INITIAL] = {
+               .enter_state = sci_remote_device_initial_state_enter,
+       },
+       [SCI_DEV_STOPPED] = {
+               .enter_state = sci_remote_device_stopped_state_enter,
+       },
+       [SCI_DEV_STARTING] = {
+               .enter_state = sci_remote_device_starting_state_enter,
+       },
+       [SCI_DEV_READY] = {
+               .enter_state = sci_remote_device_ready_state_enter,
+               .exit_state  = sci_remote_device_ready_state_exit
+       },
+       [SCI_STP_DEV_IDLE] = {
+               .enter_state = sci_stp_remote_device_ready_idle_substate_enter,
+       },
+       [SCI_STP_DEV_CMD] = {
+               .enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
+       },
+       [SCI_STP_DEV_NCQ] = { },
+       [SCI_STP_DEV_NCQ_ERROR] = {
+               .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
+       },
+       [SCI_STP_DEV_AWAIT_RESET] = { },
+       [SCI_SMP_DEV_IDLE] = {
+               .enter_state = sci_smp_remote_device_ready_idle_substate_enter,
+       },
+       [SCI_SMP_DEV_CMD] = {
+               .enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
+               .exit_state  = sci_smp_remote_device_ready_cmd_substate_exit,
+       },
+       [SCI_DEV_STOPPING] = { },
+       [SCI_DEV_FAILED] = { },
+       [SCI_DEV_RESETTING] = {
+               .enter_state = sci_remote_device_resetting_state_enter,
+               .exit_state  = sci_remote_device_resetting_state_exit
+       },
+       [SCI_DEV_FINAL] = { },
+};
+
+/**
+ * sci_remote_device_construct() - common construction
+ * @sci_port: SAS/SATA port through which this device is accessed.
+ * @sci_dev: remote device to construct
+ *
+ * This routine just performs benign initialization and does not
+ * allocate the remote_node_context which is left to
+ * sci_remote_device_[de]a_construct().  sci_remote_device_destruct()
+ * frees the remote_node_context(s) for the device.
+ */
+static void sci_remote_device_construct(struct isci_port *iport,
+                                 struct isci_remote_device *idev)
+{
+       idev->owning_port = iport;
+       idev->started_request_count = 0;
+
+       sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
+
+       sci_remote_node_context_construct(&idev->rnc,
+                                              SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
+}
+
+/**
+ * sci_remote_device_da_construct() - construct direct attached device.
+ *
+ * The information (e.g. IAF, Signature FIS, etc.) necessary to build
+ * the device is known to the SCI Core since it is contained in the
+ * sci_phy object.  Remote node context(s) is/are a global resource
+ * allocated by this routine, freed by sci_remote_device_destruct().
+ *
+ * Returns:
+ * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
+ * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
+ * sata-only controller instance.
+ * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
+ */
+static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
+                                                      struct isci_remote_device *idev)
+{
+       enum sci_status status;
+       struct domain_device *dev = idev->domain_dev;
+
+       sci_remote_device_construct(iport, idev);
+
+       /*
+        * This information is request to determine how many remote node context
+        * entries will be needed to store the remote node.
+        */
+       idev->is_direct_attached = true;
+       status = sci_controller_allocate_remote_node_context(iport->owning_controller,
+                                                                 idev,
+                                                                 &idev->rnc.remote_node_index);
+
+       if (status != SCI_SUCCESS)
+               return status;
+
+       if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
+           (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
+               /* pass */;
+       else
+               return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+       idev->connection_rate = sci_port_get_max_allowed_speed(iport);
+
+       /* / @todo Should I assign the port width by reading all of the phys on the port? */
+       idev->device_port_width = 1;
+
+       return SCI_SUCCESS;
+}
+
+/**
+ * sci_remote_device_ea_construct() - construct expander attached device
+ *
+ * Remote node context(s) is/are a global resource allocated by this
+ * routine, freed by sci_remote_device_destruct().
+ *
+ * Returns:
+ * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
+ * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
+ * sata-only controller instance.
+ * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
+ */
+static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
+                                                      struct isci_remote_device *idev)
+{
+       struct domain_device *dev = idev->domain_dev;
+       enum sci_status status;
+
+       sci_remote_device_construct(iport, idev);
+
+       status = sci_controller_allocate_remote_node_context(iport->owning_controller,
+                                                                 idev,
+                                                                 &idev->rnc.remote_node_index);
+       if (status != SCI_SUCCESS)
+               return status;
+
+       if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
+           (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
+               /* pass */;
+       else
+               return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+       /*
+        * For SAS-2 the physical link rate is actually a logical link
+        * rate that incorporates multiplexing.  The SCU doesn't
+        * incorporate multiplexing and for the purposes of the
+        * connection the logical link rate is that same as the
+        * physical.  Furthermore, the SAS-2 and SAS-1.1 fields overlay
+        * one another, so this code works for both situations. */
+       idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
+                                        dev->linkrate);
+
+       /* / @todo Should I assign the port width by reading all of the phys on the port? */
+       idev->device_port_width = 1;
+
+       return SCI_SUCCESS;
+}
+
+/**
+ * sci_remote_device_start() - This method will start the supplied remote
+ *    device.  This method enables normal IO requests to flow through to the
+ *    remote device.
+ * @remote_device: This parameter specifies the device to be started.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ *    start operation should complete.
+ *
+ * An indication of whether the device was successfully started. SCI_SUCCESS
+ * This value is returned if the device was successfully started.
+ * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
+ * the device when there have been no phys added to it.
+ */
+static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
+                                               u32 timeout)
+{
+       struct sci_base_state_machine *sm = &idev->sm;
+       enum sci_remote_device_states state = sm->current_state_id;
+       enum sci_status status;
+
+       if (state != SCI_DEV_STOPPED) {
+               dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+                        __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       status = sci_remote_node_context_resume(&idev->rnc,
+                                                    remote_device_resume_done,
+                                                    idev);
+       if (status != SCI_SUCCESS)
+               return status;
+
+       sci_change_state(sm, SCI_DEV_STARTING);
+
+       return SCI_SUCCESS;
+}
+
+static enum sci_status isci_remote_device_construct(struct isci_port *iport,
+                                                   struct isci_remote_device *idev)
+{
+       struct isci_host *ihost = iport->isci_host;
+       struct domain_device *dev = idev->domain_dev;
+       enum sci_status status;
+
+       if (dev->parent && dev_is_expander(dev->parent))
+               status = sci_remote_device_ea_construct(iport, idev);
+       else
+               status = sci_remote_device_da_construct(iport, idev);
+
+       if (status != SCI_SUCCESS) {
+               dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
+                       __func__, status);
+
+               return status;
+       }
+
+       /* start the device. */
+       status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
+
+       if (status != SCI_SUCCESS)
+               dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
+                        status);
+
+       return status;
+}
+
+void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+       DECLARE_COMPLETION_ONSTACK(aborted_task_completion);
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: idev = %p\n", __func__, idev);
+
+       /* Cleanup all requests pending for this device. */
+       isci_terminate_pending_requests(ihost, idev);
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: idev = %p, done\n", __func__, idev);
+}
+
+/**
+ * This function builds the isci_remote_device when a libsas dev_found message
+ *    is received.
+ * @isci_host: This parameter specifies the isci host object.
+ * @port: This parameter specifies the isci_port conected to this device.
+ *
+ * pointer to new isci_remote_device.
+ */
+static struct isci_remote_device *
+isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
+{
+       struct isci_remote_device *idev;
+       int i;
+
+       for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
+               idev = &ihost->devices[i];
+               if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
+                       break;
+       }
+
+       if (i >= SCI_MAX_REMOTE_DEVICES) {
+               dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
+               return NULL;
+       }
+
+       if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n"))
+               return NULL;
+
+       if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
+               return NULL;
+
+       return idev;
+}
+
+void isci_remote_device_release(struct kref *kref)
+{
+       struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
+       struct isci_host *ihost = idev->isci_port->isci_host;
+
+       idev->domain_dev = NULL;
+       idev->isci_port = NULL;
+       clear_bit(IDEV_START_PENDING, &idev->flags);
+       clear_bit(IDEV_STOP_PENDING, &idev->flags);
+       clear_bit(IDEV_IO_READY, &idev->flags);
+       clear_bit(IDEV_GONE, &idev->flags);
+       clear_bit(IDEV_EH, &idev->flags);
+       smp_mb__before_clear_bit();
+       clear_bit(IDEV_ALLOCATED, &idev->flags);
+       wake_up(&ihost->eventq);
+}
+
+/**
+ * isci_remote_device_stop() - This function is called internally to stop the
+ *    remote device.
+ * @isci_host: This parameter specifies the isci host object.
+ * @isci_device: This parameter specifies the remote device.
+ *
+ * The status of the ihost request to stop.
+ */
+enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+       enum sci_status status;
+       unsigned long flags;
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: isci_device = %p\n", __func__, idev);
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
+       set_bit(IDEV_GONE, &idev->flags);
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       /* Kill all outstanding requests. */
+       isci_remote_device_nuke_requests(ihost, idev);
+
+       set_bit(IDEV_STOP_PENDING, &idev->flags);
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       status = sci_remote_device_stop(idev, 50);
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       /* Wait for the stop complete callback. */
+       if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
+               /* nothing to wait for */;
+       else
+               wait_for_device_stop(ihost, idev);
+
+       return status;
+}
+
+/**
+ * isci_remote_device_gone() - This function is called by libsas when a domain
+ *    device is removed.
+ * @domain_device: This parameter specifies the libsas domain device.
+ *
+ */
+void isci_remote_device_gone(struct domain_device *dev)
+{
+       struct isci_host *ihost = dev_to_ihost(dev);
+       struct isci_remote_device *idev = dev->lldd_dev;
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
+               __func__, dev, idev, idev->isci_port);
+
+       isci_remote_device_stop(ihost, idev);
+}
+
+
+/**
+ * isci_remote_device_found() - This function is called by libsas when a remote
+ *    device is discovered. A remote device object is created and started. the
+ *    function then sleeps until the sci core device started message is
+ *    received.
+ * @domain_device: This parameter specifies the libsas domain device.
+ *
+ * status, zero indicates success.
+ */
+int isci_remote_device_found(struct domain_device *domain_dev)
+{
+       struct isci_host *isci_host = dev_to_ihost(domain_dev);
+       struct isci_port *isci_port;
+       struct isci_phy *isci_phy;
+       struct asd_sas_port *sas_port;
+       struct asd_sas_phy *sas_phy;
+       struct isci_remote_device *isci_device;
+       enum sci_status status;
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: domain_device = %p\n", __func__, domain_dev);
+
+       wait_for_start(isci_host);
+
+       sas_port = domain_dev->port;
+       sas_phy = list_first_entry(&sas_port->phy_list, struct asd_sas_phy,
+                                  port_phy_el);
+       isci_phy = to_iphy(sas_phy);
+       isci_port = isci_phy->isci_port;
+
+       /* we are being called for a device on this port,
+        * so it has to come up eventually
+        */
+       wait_for_completion(&isci_port->start_complete);
+
+       if ((isci_stopping == isci_port_get_state(isci_port)) ||
+           (isci_stopped == isci_port_get_state(isci_port)))
+               return -ENODEV;
+
+       isci_device = isci_remote_device_alloc(isci_host, isci_port);
+       if (!isci_device)
+               return -ENODEV;
+
+       kref_init(&isci_device->kref);
+       INIT_LIST_HEAD(&isci_device->node);
+
+       spin_lock_irq(&isci_host->scic_lock);
+       isci_device->domain_dev = domain_dev;
+       isci_device->isci_port = isci_port;
+       list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
+
+       set_bit(IDEV_START_PENDING, &isci_device->flags);
+       status = isci_remote_device_construct(isci_port, isci_device);
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_device = %p\n",
+               __func__, isci_device);
+
+       if (status == SCI_SUCCESS) {
+               /* device came up, advertise it to the world */
+               domain_dev->lldd_dev = isci_device;
+       } else
+               isci_put_device(isci_device);
+       spin_unlock_irq(&isci_host->scic_lock);
+
+       /* wait for the device ready callback. */
+       wait_for_device_start(isci_host, isci_device);
+
+       return status == SCI_SUCCESS ? 0 : -ENODEV;
+}
+/**
+ * isci_device_is_reset_pending() - This function will check if there is any
+ *    pending reset condition on the device.
+ * @request: This parameter is the isci_device object.
+ *
+ * true if there is a reset pending for the device.
+ */
+bool isci_device_is_reset_pending(
+       struct isci_host *isci_host,
+       struct isci_remote_device *isci_device)
+{
+       struct isci_request *isci_request;
+       struct isci_request *tmp_req;
+       bool reset_is_pending = false;
+       unsigned long flags;
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_device = %p\n", __func__, isci_device);
+
+       spin_lock_irqsave(&isci_host->scic_lock, flags);
+
+       /* Check for reset on all pending requests. */
+       list_for_each_entry_safe(isci_request, tmp_req,
+                                &isci_device->reqs_in_process, dev_node) {
+               dev_dbg(&isci_host->pdev->dev,
+                       "%s: isci_device = %p request = %p\n",
+                       __func__, isci_device, isci_request);
+
+               if (isci_request->ttype == io_task) {
+                       struct sas_task *task = isci_request_access_task(
+                               isci_request);
+
+                       spin_lock(&task->task_state_lock);
+                       if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
+                               reset_is_pending = true;
+                       spin_unlock(&task->task_state_lock);
+               }
+       }
+
+       spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_device = %p reset_is_pending = %d\n",
+               __func__, isci_device, reset_is_pending);
+
+       return reset_is_pending;
+}
+
+/**
+ * isci_device_clear_reset_pending() - This function will clear if any pending
+ *    reset condition flags on the device.
+ * @request: This parameter is the isci_device object.
+ *
+ * true if there is a reset pending for the device.
+ */
+void isci_device_clear_reset_pending(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+       struct isci_request *isci_request;
+       struct isci_request *tmp_req;
+       unsigned long flags = 0;
+
+       dev_dbg(&ihost->pdev->dev, "%s: idev=%p, ihost=%p\n",
+                __func__, idev, ihost);
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       /* Clear reset pending on all pending requests. */
+       list_for_each_entry_safe(isci_request, tmp_req,
+                                &idev->reqs_in_process, dev_node) {
+               dev_dbg(&ihost->pdev->dev, "%s: idev = %p request = %p\n",
+                        __func__, idev, isci_request);
+
+               if (isci_request->ttype == io_task) {
+
+                       unsigned long flags2;
+                       struct sas_task *task = isci_request_access_task(
+                               isci_request);
+
+                       spin_lock_irqsave(&task->task_state_lock, flags2);
+                       task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
+                       spin_unlock_irqrestore(&task->task_state_lock, flags2);
+               }
+       }
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
new file mode 100644 (file)
index 0000000..57ccfc3
--- /dev/null
@@ -0,0 +1,352 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_REMOTE_DEVICE_H_
+#define _ISCI_REMOTE_DEVICE_H_
+#include <scsi/libsas.h>
+#include <linux/kref.h>
+#include "scu_remote_node_context.h"
+#include "remote_node_context.h"
+#include "port.h"
+
+enum sci_remote_device_not_ready_reason_code {
+       SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED,
+       SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED,
+       SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED,
+       SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED,
+       SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED,
+       SCIC_REMOTE_DEVICE_NOT_READY_REASON_CODE_MAX
+};
+
+/**
+ * isci_remote_device - isci representation of a sas expander / end point
+ * @device_port_width: hw setting for number of simultaneous connections
+ * @connection_rate: per-taskcontext connection rate for this device
+ * @working_request: SATA requests have no tag we for unaccelerated
+ *                   protocols we need a method to associate unsolicited
+ *                   frames with a pending request
+ */
+struct isci_remote_device {
+       #define IDEV_START_PENDING 0
+       #define IDEV_STOP_PENDING 1
+       #define IDEV_ALLOCATED 2
+       #define IDEV_EH 3
+       #define IDEV_GONE 4
+       #define IDEV_IO_READY 5
+       #define IDEV_IO_NCQERROR 6
+       unsigned long flags;
+       struct kref kref;
+       struct isci_port *isci_port;
+       struct domain_device *domain_dev;
+       struct list_head node;
+       struct list_head reqs_in_process;
+       struct sci_base_state_machine sm;
+       u32 device_port_width;
+       enum sas_linkrate connection_rate;
+       bool is_direct_attached;
+       struct isci_port *owning_port;
+       struct sci_remote_node_context rnc;
+       /* XXX unify with device reference counting and delete */
+       u32 started_request_count;
+       struct isci_request *working_request;
+       u32 not_ready_reason;
+};
+
+#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
+
+/* device reference routines must be called under sci_lock */
+static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev)
+{
+       struct isci_remote_device *idev = dev->lldd_dev;
+
+       if (idev && !test_bit(IDEV_GONE, &idev->flags)) {
+               kref_get(&idev->kref);
+               return idev;
+       }
+
+       return NULL;
+}
+
+void isci_remote_device_release(struct kref *kref);
+static inline void isci_put_device(struct isci_remote_device *idev)
+{
+       if (idev)
+               kref_put(&idev->kref, isci_remote_device_release);
+}
+
+enum sci_status isci_remote_device_stop(struct isci_host *ihost,
+                                       struct isci_remote_device *idev);
+void isci_remote_device_nuke_requests(struct isci_host *ihost,
+                                     struct isci_remote_device *idev);
+void isci_remote_device_gone(struct domain_device *domain_dev);
+int isci_remote_device_found(struct domain_device *domain_dev);
+bool isci_device_is_reset_pending(struct isci_host *ihost,
+                                 struct isci_remote_device *idev);
+void isci_device_clear_reset_pending(struct isci_host *ihost,
+                                    struct isci_remote_device *idev);
+/**
+ * sci_remote_device_stop() - This method will stop both transmission and
+ *    reception of link activity for the supplied remote device.  This method
+ *    disables normal IO requests from flowing through to the remote device.
+ * @remote_device: This parameter specifies the device to be stopped.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ *    stop operation should complete.
+ *
+ * An indication of whether the device was successfully stopped. SCI_SUCCESS
+ * This value is returned if the transmission and reception for the device was
+ * successfully stopped.
+ */
+enum sci_status sci_remote_device_stop(
+       struct isci_remote_device *idev,
+       u32 timeout);
+
+/**
+ * sci_remote_device_reset() - This method will reset the device making it
+ *    ready for operation. This method must be called anytime the device is
+ *    reset either through a SMP phy control or a port hard reset request.
+ * @remote_device: This parameter specifies the device to be reset.
+ *
+ * This method does not actually cause the device hardware to be reset. This
+ * method resets the software object so that it will be operational after a
+ * device hardware reset completes. An indication of whether the device reset
+ * was accepted. SCI_SUCCESS This value is returned if the device reset is
+ * started.
+ */
+enum sci_status sci_remote_device_reset(
+       struct isci_remote_device *idev);
+
+/**
+ * sci_remote_device_reset_complete() - This method informs the device object
+ *    that the reset operation is complete and the device can resume operation
+ *    again.
+ * @remote_device: This parameter specifies the device which is to be informed
+ *    of the reset complete operation.
+ *
+ * An indication that the device is resuming operation. SCI_SUCCESS the device
+ * is resuming operation.
+ */
+enum sci_status sci_remote_device_reset_complete(
+       struct isci_remote_device *idev);
+
+/**
+ * enum sci_remote_device_states - This enumeration depicts all the states
+ *    for the common remote device state machine.
+ *
+ *
+ */
+enum sci_remote_device_states {
+       /**
+        * Simply the initial state for the base remote device state machine.
+        */
+       SCI_DEV_INITIAL,
+
+       /**
+        * This state indicates that the remote device has successfully been
+        * stopped.  In this state no new IO operations are permitted.
+        * This state is entered from the INITIAL state.
+        * This state is entered from the STOPPING state.
+        */
+       SCI_DEV_STOPPED,
+
+       /**
+        * This state indicates the the remote device is in the process of
+        * becoming ready (i.e. starting).  In this state no new IO operations
+        * are permitted.
+        * This state is entered from the STOPPED state.
+        */
+       SCI_DEV_STARTING,
+
+       /**
+        * This state indicates the remote device is now ready.  Thus, the user
+        * is able to perform IO operations on the remote device.
+        * This state is entered from the STARTING state.
+        */
+       SCI_DEV_READY,
+
+       /**
+        * This is the idle substate for the stp remote device.  When there are no
+        * active IO for the device it is is in this state.
+        */
+       SCI_STP_DEV_IDLE,
+
+       /**
+        * This is the command state for for the STP remote device.  This state is
+        * entered when the device is processing a non-NCQ command.  The device object
+        * will fail any new start IO requests until this command is complete.
+        */
+       SCI_STP_DEV_CMD,
+
+       /**
+        * This is the NCQ state for the STP remote device.  This state is entered
+        * when the device is processing an NCQ reuqest.  It will remain in this state
+        * so long as there is one or more NCQ requests being processed.
+        */
+       SCI_STP_DEV_NCQ,
+
+       /**
+        * This is the NCQ error state for the STP remote device.  This state is
+        * entered when an SDB error FIS is received by the device object while in the
+        * NCQ state.  The device object will only accept a READ LOG command while in
+        * this state.
+        */
+       SCI_STP_DEV_NCQ_ERROR,
+
+       /**
+        * This is the READY substate indicates the device is waiting for the RESET task
+        * coming to be recovered from certain hardware specific error.
+        */
+       SCI_STP_DEV_AWAIT_RESET,
+
+       /**
+        * This is the ready operational substate for the remote device.  This is the
+        * normal operational state for a remote device.
+        */
+       SCI_SMP_DEV_IDLE,
+
+       /**
+        * This is the suspended state for the remote device.  This is the state that
+        * the device is placed in when a RNC suspend is received by the SCU hardware.
+        */
+       SCI_SMP_DEV_CMD,
+
+       /**
+        * This state indicates that the remote device is in the process of
+        * stopping.  In this state no new IO operations are permitted, but
+        * existing IO operations are allowed to complete.
+        * This state is entered from the READY state.
+        * This state is entered from the FAILED state.
+        */
+       SCI_DEV_STOPPING,
+
+       /**
+        * This state indicates that the remote device has failed.
+        * In this state no new IO operations are permitted.
+        * This state is entered from the INITIALIZING state.
+        * This state is entered from the READY state.
+        */
+       SCI_DEV_FAILED,
+
+       /**
+        * This state indicates the device is being reset.
+        * In this state no new IO operations are permitted.
+        * This state is entered from the READY state.
+        */
+       SCI_DEV_RESETTING,
+
+       /**
+        * Simply the final state for the base remote device state machine.
+        */
+       SCI_DEV_FINAL,
+};
+
+static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc)
+{
+       struct isci_remote_device *idev;
+
+       idev = container_of(rnc, typeof(*idev), rnc);
+
+       return idev;
+}
+
+static inline bool dev_is_expander(struct domain_device *dev)
+{
+       return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV;
+}
+
+static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
+{
+       /* XXX delete this voodoo when converting to the top-level device
+        * reference count
+        */
+       if (WARN_ONCE(idev->started_request_count == 0,
+                     "%s: tried to decrement started_request_count past 0!?",
+                       __func__))
+               /* pass */;
+       else
+               idev->started_request_count--;
+}
+
+enum sci_status sci_remote_device_frame_handler(
+       struct isci_remote_device *idev,
+       u32 frame_index);
+
+enum sci_status sci_remote_device_event_handler(
+       struct isci_remote_device *idev,
+       u32 event_code);
+
+enum sci_status sci_remote_device_start_io(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+enum sci_status sci_remote_device_start_task(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+enum sci_status sci_remote_device_complete_io(
+       struct isci_host *ihost,
+       struct isci_remote_device *idev,
+       struct isci_request *ireq);
+
+enum sci_status sci_remote_device_suspend(
+       struct isci_remote_device *idev,
+       u32 suspend_type);
+
+void sci_remote_device_post_request(
+       struct isci_remote_device *idev,
+       u32 request);
+
+#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
new file mode 100644 (file)
index 0000000..748e833
--- /dev/null
@@ -0,0 +1,627 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "host.h"
+#include "isci.h"
+#include "remote_device.h"
+#include "remote_node_context.h"
+#include "scu_event_codes.h"
+#include "scu_task_context.h"
+
+
+/**
+ *
+ * @sci_rnc: The RNC for which the is posted request is being made.
+ *
+ * This method will return true if the RNC is not in the initial state.  In all
+ * other states the RNC is considered active and this will return true. The
+ * destroy request of the state machine drives the RNC back to the initial
+ * state.  If the state machine changes then this routine will also have to be
+ * changed. bool true if the state machine is not in the initial state false if
+ * the state machine is in the initial state
+ */
+
+/**
+ *
+ * @sci_rnc: The state of the remote node context object to check.
+ *
+ * This method will return true if the remote node context is in a READY state
+ * otherwise it will return false bool true if the remote node context is in
+ * the ready state. false if the remote node context is not in the ready state.
+ */
+bool sci_remote_node_context_is_ready(
+       struct sci_remote_node_context *sci_rnc)
+{
+       u32 current_state = sci_rnc->sm.current_state_id;
+
+       if (current_state == SCI_RNC_READY) {
+               return true;
+       }
+
+       return false;
+}
+
+static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
+{
+       if (id < ihost->remote_node_entries &&
+           ihost->device_table[id])
+               return &ihost->remote_node_context_table[id];
+
+       return NULL;
+}
+
+static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
+{
+       struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+       struct domain_device *dev = idev->domain_dev;
+       int rni = sci_rnc->remote_node_index;
+       union scu_remote_node_context *rnc;
+       struct isci_host *ihost;
+       __le64 sas_addr;
+
+       ihost = idev->owning_port->owning_controller;
+       rnc = sci_rnc_by_id(ihost, rni);
+
+       memset(rnc, 0, sizeof(union scu_remote_node_context)
+               * sci_remote_device_node_count(idev));
+
+       rnc->ssp.remote_node_index = rni;
+       rnc->ssp.remote_node_port_width = idev->device_port_width;
+       rnc->ssp.logical_port_index = idev->owning_port->physical_port_index;
+
+       /* sas address is __be64, context ram format is __le64 */
+       sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr));
+       rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr);
+       rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr);
+
+       rnc->ssp.nexus_loss_timer_enable = true;
+       rnc->ssp.check_bit               = false;
+       rnc->ssp.is_valid                = false;
+       rnc->ssp.is_remote_node_context  = true;
+       rnc->ssp.function_number         = 0;
+
+       rnc->ssp.arbitration_wait_time = 0;
+
+       if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+               rnc->ssp.connection_occupancy_timeout =
+                       ihost->user_parameters.stp_max_occupancy_timeout;
+               rnc->ssp.connection_inactivity_timeout =
+                       ihost->user_parameters.stp_inactivity_timeout;
+       } else {
+               rnc->ssp.connection_occupancy_timeout  =
+                       ihost->user_parameters.ssp_max_occupancy_timeout;
+               rnc->ssp.connection_inactivity_timeout =
+                       ihost->user_parameters.ssp_inactivity_timeout;
+       }
+
+       rnc->ssp.initial_arbitration_wait_time = 0;
+
+       /* Open Address Frame Parameters */
+       rnc->ssp.oaf_connection_rate = idev->connection_rate;
+       rnc->ssp.oaf_features = 0;
+       rnc->ssp.oaf_source_zone_group = 0;
+       rnc->ssp.oaf_more_compatibility_features = 0;
+}
+
+/**
+ *
+ * @sci_rnc:
+ * @callback:
+ * @callback_parameter:
+ *
+ * This method will setup the remote node context object so it will transition
+ * to its ready state.  If the remote node context is already setup to
+ * transition to its final state then this function does nothing. none
+ */
+static void sci_remote_node_context_setup_to_resume(
+       struct sci_remote_node_context *sci_rnc,
+       scics_sds_remote_node_context_callback callback,
+       void *callback_parameter)
+{
+       if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) {
+               sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY;
+               sci_rnc->user_callback     = callback;
+               sci_rnc->user_cookie       = callback_parameter;
+       }
+}
+
+static void sci_remote_node_context_setup_to_destory(
+       struct sci_remote_node_context *sci_rnc,
+       scics_sds_remote_node_context_callback callback,
+       void *callback_parameter)
+{
+       sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL;
+       sci_rnc->user_callback     = callback;
+       sci_rnc->user_cookie       = callback_parameter;
+}
+
+/**
+ *
+ *
+ * This method just calls the user callback function and then resets the
+ * callback.
+ */
+static void sci_remote_node_context_notify_user(
+       struct sci_remote_node_context *rnc)
+{
+       if (rnc->user_callback != NULL) {
+               (*rnc->user_callback)(rnc->user_cookie);
+
+               rnc->user_callback = NULL;
+               rnc->user_cookie = NULL;
+       }
+}
+
+static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
+{
+       if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
+               sci_remote_node_context_resume(rnc, rnc->user_callback,
+                                                   rnc->user_cookie);
+}
+
+static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
+{
+       union scu_remote_node_context *rnc_buffer;
+       struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+       struct domain_device *dev = idev->domain_dev;
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+
+       rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
+
+       rnc_buffer->ssp.is_valid = true;
+
+       if (!idev->is_direct_attached &&
+           (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) {
+               sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
+       } else {
+               sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
+
+               if (idev->is_direct_attached)
+                       sci_port_setup_transports(idev->owning_port,
+                                                 sci_rnc->remote_node_index);
+       }
+}
+
+static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
+{
+       union scu_remote_node_context *rnc_buffer;
+       struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+       struct isci_host *ihost = idev->owning_port->owning_controller;
+
+       rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
+
+       rnc_buffer->ssp.is_valid = false;
+
+       sci_remote_device_post_request(rnc_to_dev(sci_rnc),
+                                      SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
+}
+
+static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
+{
+       struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+       /* Check to see if we have gotten back to the initial state because
+        * someone requested to destroy the remote node context object.
+        */
+       if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
+               rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+               sci_remote_node_context_notify_user(rnc);
+       }
+}
+
+static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
+{
+       struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
+
+       sci_remote_node_context_validate_context_buffer(sci_rnc);
+}
+
+static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
+{
+       struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+       sci_remote_node_context_invalidate_context_buffer(rnc);
+}
+
+static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
+{
+       struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+       struct isci_remote_device *idev;
+       struct domain_device *dev;
+
+       idev = rnc_to_dev(rnc);
+       dev = idev->domain_dev;
+
+       /*
+        * For direct attached SATA devices we need to clear the TLCR
+        * NCQ to TCi tag mapping on the phy and in cases where we
+        * resume because of a target reset we also need to update
+        * the STPTLDARNI register with the RNi of the device
+        */
+       if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
+           idev->is_direct_attached)
+               sci_port_setup_transports(idev->owning_port,
+                                              rnc->remote_node_index);
+
+       sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
+}
+
+static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
+{
+       struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+       rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+
+       if (rnc->user_callback)
+               sci_remote_node_context_notify_user(rnc);
+}
+
+static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
+{
+       struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+       sci_remote_node_context_continue_state_transitions(rnc);
+}
+
+static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
+{
+       struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+       sci_remote_node_context_continue_state_transitions(rnc);
+}
+
+static const struct sci_base_state sci_remote_node_context_state_table[] = {
+       [SCI_RNC_INITIAL] = {
+               .enter_state = sci_remote_node_context_initial_state_enter,
+       },
+       [SCI_RNC_POSTING] = {
+               .enter_state = sci_remote_node_context_posting_state_enter,
+       },
+       [SCI_RNC_INVALIDATING] = {
+               .enter_state = sci_remote_node_context_invalidating_state_enter,
+       },
+       [SCI_RNC_RESUMING] = {
+               .enter_state = sci_remote_node_context_resuming_state_enter,
+       },
+       [SCI_RNC_READY] = {
+               .enter_state = sci_remote_node_context_ready_state_enter,
+       },
+       [SCI_RNC_TX_SUSPENDED] = {
+               .enter_state = sci_remote_node_context_tx_suspended_state_enter,
+       },
+       [SCI_RNC_TX_RX_SUSPENDED] = {
+               .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
+       },
+       [SCI_RNC_AWAIT_SUSPENSION] = { },
+};
+
+void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
+                                           u16 remote_node_index)
+{
+       memset(rnc, 0, sizeof(struct sci_remote_node_context));
+
+       rnc->remote_node_index = remote_node_index;
+       rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+
+       sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
+}
+
+enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
+                                                          u32 event_code)
+{
+       enum scis_sds_remote_node_context_states state;
+
+       state = sci_rnc->sm.current_state_id;
+       switch (state) {
+       case SCI_RNC_POSTING:
+               switch (scu_get_event_code(event_code)) {
+               case SCU_EVENT_POST_RNC_COMPLETE:
+                       sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
+                       break;
+               default:
+                       goto out;
+               }
+               break;
+       case SCI_RNC_INVALIDATING:
+               if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
+                       if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL)
+                               state = SCI_RNC_INITIAL;
+                       else
+                               state = SCI_RNC_POSTING;
+                       sci_change_state(&sci_rnc->sm, state);
+               } else {
+                       switch (scu_get_event_type(event_code)) {
+                       case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+                       case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+                               /* We really dont care if the hardware is going to suspend
+                                * the device since it's being invalidated anyway */
+                               dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+                                       "%s: SCIC Remote Node Context 0x%p was "
+                                       "suspeneded by hardware while being "
+                                       "invalidated.\n", __func__, sci_rnc);
+                               break;
+                       default:
+                               goto out;
+                       }
+               }
+               break;
+       case SCI_RNC_RESUMING:
+               if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
+                       sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
+               } else {
+                       switch (scu_get_event_type(event_code)) {
+                       case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+                       case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+                               /* We really dont care if the hardware is going to suspend
+                                * the device since it's being resumed anyway */
+                               dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+                                       "%s: SCIC Remote Node Context 0x%p was "
+                                       "suspeneded by hardware while being resumed.\n",
+                                       __func__, sci_rnc);
+                               break;
+                       default:
+                               goto out;
+                       }
+               }
+               break;
+       case SCI_RNC_READY:
+               switch (scu_get_event_type(event_code)) {
+               case SCU_EVENT_TL_RNC_SUSPEND_TX:
+                       sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
+                       sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+                       break;
+               case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
+                       sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
+                       sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+                       break;
+               default:
+                       goto out;
+               }
+               break;
+       case SCI_RNC_AWAIT_SUSPENSION:
+               switch (scu_get_event_type(event_code)) {
+               case SCU_EVENT_TL_RNC_SUSPEND_TX:
+                       sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
+                       sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+                       break;
+               case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
+                       sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
+                       sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+                       break;
+               default:
+                       goto out;
+               }
+               break;
+       default:
+               dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+                        "%s: invalid state %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+       return SCI_SUCCESS;
+
+ out:
+       dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+                "%s: code: %#x state: %d\n", __func__, event_code, state);
+       return SCI_FAILURE;
+
+}
+
+enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
+                                                     scics_sds_remote_node_context_callback cb_fn,
+                                                     void *cb_p)
+{
+       enum scis_sds_remote_node_context_states state;
+
+       state = sci_rnc->sm.current_state_id;
+       switch (state) {
+       case SCI_RNC_INVALIDATING:
+               sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
+               return SCI_SUCCESS;
+       case SCI_RNC_POSTING:
+       case SCI_RNC_RESUMING:
+       case SCI_RNC_READY:
+       case SCI_RNC_TX_SUSPENDED:
+       case SCI_RNC_TX_RX_SUSPENDED:
+       case SCI_RNC_AWAIT_SUSPENSION:
+               sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
+               sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
+               return SCI_SUCCESS;
+       case SCI_RNC_INITIAL:
+               dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+                        "%s: invalid state %d\n", __func__, state);
+               /* We have decided that the destruct request on the remote node context
+                * can not fail since it is either in the initial/destroyed state or is
+                * can be destroyed.
+                */
+               return SCI_SUCCESS;
+       default:
+               dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+                        "%s: invalid state %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
+                                                    u32 suspend_type,
+                                                    scics_sds_remote_node_context_callback cb_fn,
+                                                    void *cb_p)
+{
+       enum scis_sds_remote_node_context_states state;
+
+       state = sci_rnc->sm.current_state_id;
+       if (state != SCI_RNC_READY) {
+               dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+                        "%s: invalid state %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       sci_rnc->user_callback   = cb_fn;
+       sci_rnc->user_cookie     = cb_p;
+       sci_rnc->suspension_code = suspend_type;
+
+       if (suspend_type == SCI_SOFTWARE_SUSPENSION) {
+               sci_remote_device_post_request(rnc_to_dev(sci_rnc),
+                                                   SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX);
+       }
+
+       sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
+       return SCI_SUCCESS;
+}
+
+enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
+                                                   scics_sds_remote_node_context_callback cb_fn,
+                                                   void *cb_p)
+{
+       enum scis_sds_remote_node_context_states state;
+
+       state = sci_rnc->sm.current_state_id;
+       switch (state) {
+       case SCI_RNC_INITIAL:
+               if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
+                       return SCI_FAILURE_INVALID_STATE;
+
+               sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+               sci_remote_node_context_construct_buffer(sci_rnc);
+               sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
+               return SCI_SUCCESS;
+       case SCI_RNC_POSTING:
+       case SCI_RNC_INVALIDATING:
+       case SCI_RNC_RESUMING:
+               if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
+                       return SCI_FAILURE_INVALID_STATE;
+
+               sci_rnc->user_callback = cb_fn;
+               sci_rnc->user_cookie   = cb_p;
+               return SCI_SUCCESS;
+       case SCI_RNC_TX_SUSPENDED: {
+               struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+               struct domain_device *dev = idev->domain_dev;
+
+               sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+
+               /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */
+               if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev))
+                       sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
+               else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+                       if (idev->is_direct_attached) {
+                               /* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */
+                               sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
+                       } else {
+                               sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
+                       }
+               } else
+                       return SCI_FAILURE;
+               return SCI_SUCCESS;
+       }
+       case SCI_RNC_TX_RX_SUSPENDED:
+               sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+               sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
+               return SCI_FAILURE_INVALID_STATE;
+       case SCI_RNC_AWAIT_SUSPENSION:
+               sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+               return SCI_SUCCESS;
+       default:
+               dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+                        "%s: invalid state %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
+                                                            struct isci_request *ireq)
+{
+       enum scis_sds_remote_node_context_states state;
+
+       state = sci_rnc->sm.current_state_id;
+
+       switch (state) {
+       case SCI_RNC_READY:
+               return SCI_SUCCESS;
+       case SCI_RNC_TX_SUSPENDED:
+       case SCI_RNC_TX_RX_SUSPENDED:
+       case SCI_RNC_AWAIT_SUSPENSION:
+               dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+                        "%s: invalid state %d\n", __func__, state);
+               return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+       default:
+               break;
+       }
+       dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+               "%s: requested to start IO while still resuming, %d\n",
+               __func__, state);
+       return SCI_FAILURE_INVALID_STATE;
+}
+
+enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
+                                                       struct isci_request *ireq)
+{
+       enum scis_sds_remote_node_context_states state;
+
+       state = sci_rnc->sm.current_state_id;
+       switch (state) {
+       case SCI_RNC_RESUMING:
+       case SCI_RNC_READY:
+       case SCI_RNC_AWAIT_SUSPENSION:
+               return SCI_SUCCESS;
+       case SCI_RNC_TX_SUSPENDED:
+       case SCI_RNC_TX_RX_SUSPENDED:
+               sci_remote_node_context_resume(sci_rnc, NULL, NULL);
+               return SCI_SUCCESS;
+       default:
+               dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+                        "%s: invalid state %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
new file mode 100644 (file)
index 0000000..41580ad
--- /dev/null
@@ -0,0 +1,224 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
+#define _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
+
+/**
+ * This file contains the structures, constants, and prototypes associated with
+ *    the remote node context in the silicon.  It exists to model and manage
+ *    the remote node context in the silicon.
+ *
+ *
+ */
+
+#include "isci.h"
+
+/**
+ *
+ *
+ * This constant represents an invalid remote device id, it is used to program
+ * the STPDARNI register so the driver knows when it has received a SIGNATURE
+ * FIS from the SCU.
+ */
+#define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX    0x0FFF
+
+#define SCU_HARDWARE_SUSPENSION  (0)
+#define SCI_SOFTWARE_SUSPENSION  (1)
+
+struct isci_request;
+struct isci_remote_device;
+struct sci_remote_node_context;
+
+typedef void (*scics_sds_remote_node_context_callback)(void *);
+
+/**
+ * This is the enumeration of the remote node context states.
+ */
+enum scis_sds_remote_node_context_states {
+       /**
+        * This state is the initial state for a remote node context.  On a resume
+        * request the remote node context will transition to the posting state.
+        */
+       SCI_RNC_INITIAL,
+
+       /**
+        * This is a transition state that posts the RNi to the hardware. Once the RNC
+        * is posted the remote node context will be made ready.
+        */
+       SCI_RNC_POSTING,
+
+       /**
+        * This is a transition state that will post an RNC invalidate to the
+        * hardware.  Once the invalidate is complete the remote node context will
+        * transition to the posting state.
+        */
+       SCI_RNC_INVALIDATING,
+
+       /**
+        * This is a transition state that will post an RNC resume to the hardare.
+        * Once the event notification of resume complete is received the remote node
+        * context will transition to the ready state.
+        */
+       SCI_RNC_RESUMING,
+
+       /**
+        * This is the state that the remote node context must be in to accept io
+        * request operations.
+        */
+       SCI_RNC_READY,
+
+       /**
+        * This is the state that the remote node context transitions to when it gets
+        * a TX suspend notification from the hardware.
+        */
+       SCI_RNC_TX_SUSPENDED,
+
+       /**
+        * This is the state that the remote node context transitions to when it gets
+        * a TX RX suspend notification from the hardware.
+        */
+       SCI_RNC_TX_RX_SUSPENDED,
+
+       /**
+        * This state is a wait state for the remote node context that waits for a
+        * suspend notification from the hardware.  This state is entered when either
+        * there is a request to supend the remote node context or when there is a TC
+        * completion where the remote node will be suspended by the hardware.
+        */
+       SCI_RNC_AWAIT_SUSPENSION
+};
+
+/**
+ *
+ *
+ * This enumeration is used to define the end destination state for the remote
+ * node context.
+ */
+enum sci_remote_node_context_destination_state {
+       SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED,
+       SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY,
+       SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL
+};
+
+/**
+ * struct sci_remote_node_context - This structure contains the data
+ *    associated with the remote node context object.  The remote node context
+ *    (RNC) object models the the remote device information necessary to manage
+ *    the silicon RNC.
+ */
+struct sci_remote_node_context {
+       /**
+        * This field indicates the remote node index (RNI) associated with
+        * this RNC.
+        */
+       u16 remote_node_index;
+
+       /**
+        * This field is the recored suspension code or the reason for the remote node
+        * context suspension.
+        */
+       u32 suspension_code;
+
+       /**
+        * This field is true if the remote node context is resuming from its current
+        * state.  This can cause an automatic resume on receiving a suspension
+        * notification.
+        */
+       enum sci_remote_node_context_destination_state destination_state;
+
+       /**
+        * This field contains the callback function that the user requested to be
+        * called when the requested state transition is complete.
+        */
+       scics_sds_remote_node_context_callback user_callback;
+
+       /**
+        * This field contains the parameter that is called when the user requested
+        * state transition is completed.
+        */
+       void *user_cookie;
+
+       /**
+        * This field contains the data for the object's state machine.
+        */
+       struct sci_base_state_machine sm;
+};
+
+void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
+                                           u16 remote_node_index);
+
+
+bool sci_remote_node_context_is_ready(
+       struct sci_remote_node_context *sci_rnc);
+
+enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
+                                                          u32 event_code);
+enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
+                                                     scics_sds_remote_node_context_callback callback,
+                                                     void *callback_parameter);
+enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
+                                                    u32 suspend_type,
+                                                    scics_sds_remote_node_context_callback cb_fn,
+                                                    void *cb_p);
+enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
+                                                   scics_sds_remote_node_context_callback cb_fn,
+                                                   void *cb_p);
+enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
+                                                       struct isci_request *ireq);
+enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
+                                                     struct isci_request *ireq);
+
+#endif  /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/remote_node_table.c b/drivers/scsi/isci/remote_node_table.c
new file mode 100644 (file)
index 0000000..301b314
--- /dev/null
@@ -0,0 +1,598 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE
+ *    public, protected, and private methods.
+ *
+ *
+ */
+#include "remote_node_table.h"
+#include "remote_node_context.h"
+
+/**
+ *
+ * @remote_node_table: This is the remote node index table from which the
+ *    selection will be made.
+ * @group_table_index: This is the index to the group table from which to
+ *    search for an available selection.
+ *
+ * This routine will find the bit position in absolute bit terms of the next 32
+ * + bit position.  If there are available bits in the first u32 then it is
+ * just bit position. u32 This is the absolute bit position for an available
+ * group.
+ */
+static u32 sci_remote_node_table_get_group_index(
+       struct sci_remote_node_table *remote_node_table,
+       u32 group_table_index)
+{
+       u32 dword_index;
+       u32 *group_table;
+       u32 bit_index;
+
+       group_table = remote_node_table->remote_node_groups[group_table_index];
+
+       for (dword_index = 0; dword_index < remote_node_table->group_array_size; dword_index++) {
+               if (group_table[dword_index] != 0) {
+                       for (bit_index = 0; bit_index < 32; bit_index++) {
+                               if ((group_table[dword_index] & (1 << bit_index)) != 0) {
+                                       return (dword_index * 32) + bit_index;
+                               }
+                       }
+               }
+       }
+
+       return SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX;
+}
+
+/**
+ *
+ * @out]: remote_node_table This the remote node table in which to clear the
+ *    selector.
+ * @set_index: This is the remote node selector in which the change will be
+ *    made.
+ * @group_index: This is the bit index in the table to be modified.
+ *
+ * This method will clear the group index entry in the specified group index
+ * table. none
+ */
+static void sci_remote_node_table_clear_group_index(
+       struct sci_remote_node_table *remote_node_table,
+       u32 group_table_index,
+       u32 group_index)
+{
+       u32 dword_index;
+       u32 bit_index;
+       u32 *group_table;
+
+       BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
+       BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
+
+       dword_index = group_index / 32;
+       bit_index   = group_index % 32;
+       group_table = remote_node_table->remote_node_groups[group_table_index];
+
+       group_table[dword_index] = group_table[dword_index] & ~(1 << bit_index);
+}
+
+/**
+ *
+ * @out]: remote_node_table This the remote node table in which to set the
+ *    selector.
+ * @group_table_index: This is the remote node selector in which the change
+ *    will be made.
+ * @group_index: This is the bit position in the table to be modified.
+ *
+ * This method will set the group index bit entry in the specified gropu index
+ * table. none
+ */
+static void sci_remote_node_table_set_group_index(
+       struct sci_remote_node_table *remote_node_table,
+       u32 group_table_index,
+       u32 group_index)
+{
+       u32 dword_index;
+       u32 bit_index;
+       u32 *group_table;
+
+       BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
+       BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
+
+       dword_index = group_index / 32;
+       bit_index   = group_index % 32;
+       group_table = remote_node_table->remote_node_groups[group_table_index];
+
+       group_table[dword_index] = group_table[dword_index] | (1 << bit_index);
+}
+
+/**
+ *
+ * @out]: remote_node_table This is the remote node table in which to modify
+ *    the remote node availability.
+ * @remote_node_index: This is the remote node index that is being returned to
+ *    the table.
+ *
+ * This method will set the remote to available in the remote node allocation
+ * table. none
+ */
+static void sci_remote_node_table_set_node_index(
+       struct sci_remote_node_table *remote_node_table,
+       u32 remote_node_index)
+{
+       u32 dword_location;
+       u32 dword_remainder;
+       u32 slot_normalized;
+       u32 slot_position;
+
+       BUG_ON(
+               (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+               <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
+               );
+
+       dword_location  = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
+       dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
+       slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
+       slot_position   = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
+
+       remote_node_table->available_remote_nodes[dword_location] |=
+               1 << (slot_normalized + slot_position);
+}
+
+/**
+ *
+ * @out]: remote_node_table This is the remote node table from which to clear
+ *    the available remote node bit.
+ * @remote_node_index: This is the remote node index which is to be cleared
+ *    from the table.
+ *
+ * This method clears the remote node index from the table of available remote
+ * nodes. none
+ */
+static void sci_remote_node_table_clear_node_index(
+       struct sci_remote_node_table *remote_node_table,
+       u32 remote_node_index)
+{
+       u32 dword_location;
+       u32 dword_remainder;
+       u32 slot_position;
+       u32 slot_normalized;
+
+       BUG_ON(
+               (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+               <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
+               );
+
+       dword_location  = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
+       dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
+       slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
+       slot_position   = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
+
+       remote_node_table->available_remote_nodes[dword_location] &=
+               ~(1 << (slot_normalized + slot_position));
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote node table from which the slot will be
+ *    cleared.
+ * @group_index: The index for the slot that is to be cleared.
+ *
+ * This method clears the entire table slot at the specified slot index. none
+ */
+static void sci_remote_node_table_clear_group(
+       struct sci_remote_node_table *remote_node_table,
+       u32 group_index)
+{
+       u32 dword_location;
+       u32 dword_remainder;
+       u32 dword_value;
+
+       BUG_ON(
+               (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+               <= (group_index / SCU_STP_REMOTE_NODE_COUNT)
+               );
+
+       dword_location  = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+       dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+       dword_value = remote_node_table->available_remote_nodes[dword_location];
+       dword_value &= ~(SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+       remote_node_table->available_remote_nodes[dword_location] = dword_value;
+}
+
+/**
+ *
+ * @remote_node_table:
+ *
+ * THis method sets an entire remote node group in the remote node table.
+ */
+static void sci_remote_node_table_set_group(
+       struct sci_remote_node_table *remote_node_table,
+       u32 group_index)
+{
+       u32 dword_location;
+       u32 dword_remainder;
+       u32 dword_value;
+
+       BUG_ON(
+               (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+               <= (group_index / SCU_STP_REMOTE_NODE_COUNT)
+               );
+
+       dword_location  = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+       dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+       dword_value = remote_node_table->available_remote_nodes[dword_location];
+       dword_value |= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+       remote_node_table->available_remote_nodes[dword_location] = dword_value;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table that for which the group
+ *    value is to be returned.
+ * @group_index: This is the group index to use to find the group value.
+ *
+ * This method will return the group value for the specified group index. The
+ * bit values at the specified remote node group index.
+ */
+static u8 sci_remote_node_table_get_group_value(
+       struct sci_remote_node_table *remote_node_table,
+       u32 group_index)
+{
+       u32 dword_location;
+       u32 dword_remainder;
+       u32 dword_value;
+
+       dword_location  = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+       dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+       dword_value = remote_node_table->available_remote_nodes[dword_location];
+       dword_value &= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+       dword_value = dword_value >> (dword_remainder * 4);
+
+       return (u8)dword_value;
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote that which is to be initialized.
+ * @remote_node_entries: The number of entries to put in the table.
+ *
+ * This method will initialize the remote node table for use. none
+ */
+void sci_remote_node_table_initialize(
+       struct sci_remote_node_table *remote_node_table,
+       u32 remote_node_entries)
+{
+       u32 index;
+
+       /*
+        * Initialize the raw data we could improve the speed by only initializing
+        * those entries that we are actually going to be used */
+       memset(
+               remote_node_table->available_remote_nodes,
+               0x00,
+               sizeof(remote_node_table->available_remote_nodes)
+               );
+
+       memset(
+               remote_node_table->remote_node_groups,
+               0x00,
+               sizeof(remote_node_table->remote_node_groups)
+               );
+
+       /* Initialize the available remote node sets */
+       remote_node_table->available_nodes_array_size = (u16)
+                                                       (remote_node_entries / SCIC_SDS_REMOTE_NODES_PER_DWORD)
+                                                       + ((remote_node_entries % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0);
+
+
+       /* Initialize each full DWORD to a FULL SET of remote nodes */
+       for (index = 0; index < remote_node_entries; index++) {
+               sci_remote_node_table_set_node_index(remote_node_table, index);
+       }
+
+       remote_node_table->group_array_size = (u16)
+                                             (remote_node_entries / (SCU_STP_REMOTE_NODE_COUNT * 32))
+                                             + ((remote_node_entries % (SCU_STP_REMOTE_NODE_COUNT * 32)) != 0);
+
+       for (index = 0; index < (remote_node_entries / SCU_STP_REMOTE_NODE_COUNT); index++) {
+               /*
+                * These are all guaranteed to be full slot values so fill them in the
+                * available sets of 3 remote nodes */
+               sci_remote_node_table_set_group_index(remote_node_table, 2, index);
+       }
+
+       /* Now fill in any remainders that we may find */
+       if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) {
+               sci_remote_node_table_set_group_index(remote_node_table, 1, index);
+       } else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) {
+               sci_remote_node_table_set_group_index(remote_node_table, 0, index);
+       }
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote node table from which to allocate a
+ *    remote node.
+ * @table_index: The group index that is to be used for the search.
+ *
+ * This method will allocate a single RNi from the remote node table.  The
+ * table index will determine from which remote node group table to search.
+ * This search may fail and another group node table can be specified.  The
+ * function is designed to allow a serach of the available single remote node
+ * group up to the triple remote node group.  If an entry is found in the
+ * specified table the remote node is removed and the remote node groups are
+ * updated. The RNi value or an invalid remote node context if an RNi can not
+ * be found.
+ */
+static u16 sci_remote_node_table_allocate_single_remote_node(
+       struct sci_remote_node_table *remote_node_table,
+       u32 group_table_index)
+{
+       u8 index;
+       u8 group_value;
+       u32 group_index;
+       u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+       group_index = sci_remote_node_table_get_group_index(
+               remote_node_table, group_table_index);
+
+       /* We could not find an available slot in the table selector 0 */
+       if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
+               group_value = sci_remote_node_table_get_group_value(
+                       remote_node_table, group_index);
+
+               for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) {
+                       if (((1 << index) & group_value) != 0) {
+                               /* We have selected a bit now clear it */
+                               remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT
+                                                         + index);
+
+                               sci_remote_node_table_clear_group_index(
+                                       remote_node_table, group_table_index, group_index
+                                       );
+
+                               sci_remote_node_table_clear_node_index(
+                                       remote_node_table, remote_node_index
+                                       );
+
+                               if (group_table_index > 0) {
+                                       sci_remote_node_table_set_group_index(
+                                               remote_node_table, group_table_index - 1, group_index
+                                               );
+                               }
+
+                               break;
+                       }
+               }
+       }
+
+       return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table from which to allocate the
+ *    remote node entries.
+ * @group_table_index: THis is the group table index which must equal two (2)
+ *    for this operation.
+ *
+ * This method will allocate three consecutive remote node context entries. If
+ * there are no remaining triple entries the function will return a failure.
+ * The remote node index that represents three consecutive remote node entries
+ * or an invalid remote node context if none can be found.
+ */
+static u16 sci_remote_node_table_allocate_triple_remote_node(
+       struct sci_remote_node_table *remote_node_table,
+       u32 group_table_index)
+{
+       u32 group_index;
+       u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+       group_index = sci_remote_node_table_get_group_index(
+               remote_node_table, group_table_index);
+
+       if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
+               remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT;
+
+               sci_remote_node_table_clear_group_index(
+                       remote_node_table, group_table_index, group_index
+                       );
+
+               sci_remote_node_table_clear_group(
+                       remote_node_table, group_index
+                       );
+       }
+
+       return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table from which the remote node
+ *    allocation is to take place.
+ * @remote_node_count: This is ther remote node count which is one of
+ *    SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3).
+ *
+ * This method will allocate a remote node that mataches the remote node count
+ * specified by the caller.  Valid values for remote node count is
+ * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is
+ * the remote node index that is returned or an invalid remote node context.
+ */
+u16 sci_remote_node_table_allocate_remote_node(
+       struct sci_remote_node_table *remote_node_table,
+       u32 remote_node_count)
+{
+       u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+       if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
+               remote_node_index =
+                       sci_remote_node_table_allocate_single_remote_node(
+                               remote_node_table, 0);
+
+               if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+                       remote_node_index =
+                               sci_remote_node_table_allocate_single_remote_node(
+                                       remote_node_table, 1);
+               }
+
+               if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+                       remote_node_index =
+                               sci_remote_node_table_allocate_single_remote_node(
+                                       remote_node_table, 2);
+               }
+       } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
+               remote_node_index =
+                       sci_remote_node_table_allocate_triple_remote_node(
+                               remote_node_table, 2);
+       }
+
+       return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table:
+ *
+ * This method will free a single remote node index back to the remote node
+ * table.  This routine will update the remote node groups
+ */
+static void sci_remote_node_table_release_single_remote_node(
+       struct sci_remote_node_table *remote_node_table,
+       u16 remote_node_index)
+{
+       u32 group_index;
+       u8 group_value;
+
+       group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
+
+       group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index);
+
+       /*
+        * Assert that we are not trying to add an entry to a slot that is already
+        * full. */
+       BUG_ON(group_value == SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE);
+
+       if (group_value == 0x00) {
+               /*
+                * There are no entries in this slot so it must be added to the single
+                * slot table. */
+               sci_remote_node_table_set_group_index(remote_node_table, 0, group_index);
+       } else if ((group_value & (group_value - 1)) == 0) {
+               /*
+                * There is only one entry in this slot so it must be moved from the
+                * single slot table to the dual slot table */
+               sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index);
+               sci_remote_node_table_set_group_index(remote_node_table, 1, group_index);
+       } else {
+               /*
+                * There are two entries in the slot so it must be moved from the dual
+                * slot table to the tripple slot table. */
+               sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index);
+               sci_remote_node_table_set_group_index(remote_node_table, 2, group_index);
+       }
+
+       sci_remote_node_table_set_node_index(remote_node_table, remote_node_index);
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table to which the remote node
+ *    index is to be freed.
+ *
+ * This method will release a group of three consecutive remote nodes back to
+ * the free remote nodes.
+ */
+static void sci_remote_node_table_release_triple_remote_node(
+       struct sci_remote_node_table *remote_node_table,
+       u16 remote_node_index)
+{
+       u32 group_index;
+
+       group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
+
+       sci_remote_node_table_set_group_index(
+               remote_node_table, 2, group_index
+               );
+
+       sci_remote_node_table_set_group(remote_node_table, group_index);
+}
+
+/**
+ *
+ * @remote_node_table: The remote node table to which the remote node index is
+ *    to be freed.
+ * @remote_node_count: This is the count of consecutive remote nodes that are
+ *    to be freed.
+ *
+ * This method will release the remote node index back into the remote node
+ * table free pool.
+ */
+void sci_remote_node_table_release_remote_node_index(
+       struct sci_remote_node_table *remote_node_table,
+       u32 remote_node_count,
+       u16 remote_node_index)
+{
+       if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
+               sci_remote_node_table_release_single_remote_node(
+                       remote_node_table, remote_node_index);
+       } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
+               sci_remote_node_table_release_triple_remote_node(
+                       remote_node_table, remote_node_index);
+       }
+}
+
diff --git a/drivers/scsi/isci/remote_node_table.h b/drivers/scsi/isci/remote_node_table.h
new file mode 100644 (file)
index 0000000..721ab98
--- /dev/null
@@ -0,0 +1,188 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_REMOTE_NODE_TABLE_H_
+#define _SCIC_SDS_REMOTE_NODE_TABLE_H_
+
+#include "isci.h"
+
+/**
+ *
+ *
+ * Remote node sets are sets of remote node index in the remtoe node table The
+ * SCU hardware requires that STP remote node entries take three consecutive
+ * remote node index so the table is arranged in sets of three. The bits are
+ * used as 0111 0111 to make a byte and the bits define the set of three remote
+ * nodes to use as a sequence.
+ */
+#define SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE 2
+
+/**
+ *
+ *
+ * Since the remote node table is organized as DWORDS take the remote node sets
+ * in bytes and represent them in DWORDs. The lowest ordered bits are the ones
+ * used in case full DWORD is not being used. i.e. 0000 0000 0000 0000 0111
+ * 0111 0111 0111 // if only a single WORD is in use in the DWORD.
+ */
+#define SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD \
+       (sizeof(u32) * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE)
+/**
+ *
+ *
+ * This is a count of the numeber of remote nodes that can be represented in a
+ * byte
+ */
+#define SCIC_SDS_REMOTE_NODES_PER_BYTE \
+       (SCU_STP_REMOTE_NODE_COUNT * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE)
+
+/**
+ *
+ *
+ * This is a count of the number of remote nodes that can be represented in a
+ * DWROD
+ */
+#define SCIC_SDS_REMOTE_NODES_PER_DWORD        \
+       (sizeof(u32) * SCIC_SDS_REMOTE_NODES_PER_BYTE)
+
+/**
+ *
+ *
+ * This is the number of bits in a remote node group
+ */
+#define SCIC_SDS_REMOTE_NODES_BITS_PER_GROUP   4
+
+#define SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX      (0xFFFFFFFF)
+#define SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE    (0x07)
+#define SCIC_SDS_REMOTE_NODE_TABLE_EMPTY_SLOT_VALUE   (0x00)
+
+/**
+ *
+ *
+ * Expander attached sata remote node count
+ */
+#define SCU_STP_REMOTE_NODE_COUNT        3
+
+/**
+ *
+ *
+ * Expander or direct attached ssp remote node count
+ */
+#define SCU_SSP_REMOTE_NODE_COUNT        1
+
+/**
+ *
+ *
+ * Direct attached STP remote node count
+ */
+#define SCU_SATA_REMOTE_NODE_COUNT       1
+
+/**
+ * struct sci_remote_node_table -
+ *
+ *
+ */
+struct sci_remote_node_table {
+       /**
+        * This field contains the array size in dwords
+        */
+       u16 available_nodes_array_size;
+
+       /**
+        * This field contains the array size of the
+        */
+       u16 group_array_size;
+
+       /**
+        * This field is the array of available remote node entries in bits.
+        * Because of the way STP remote node data is allocated on the SCU hardware
+        * the remote nodes must occupy three consecutive remote node context
+        * entries.  For ease of allocation and de-allocation we have broken the
+        * sets of three into a single nibble.  When the STP RNi is allocated all
+        * of the bits in the nibble are cleared.  This math results in a table size
+        * of MAX_REMOTE_NODES / CONSECUTIVE RNi ENTRIES for STP / 2 entries per byte.
+        */
+       u32 available_remote_nodes[
+               (SCI_MAX_REMOTE_DEVICES / SCIC_SDS_REMOTE_NODES_PER_DWORD)
+               + ((SCI_MAX_REMOTE_DEVICES % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0)];
+
+       /**
+        * This field is the nibble selector for the above table.  There are three
+        * possible selectors each for fast lookup when trying to find one, two or
+        * three remote node entries.
+        */
+       u32 remote_node_groups[
+               SCU_STP_REMOTE_NODE_COUNT][
+               (SCI_MAX_REMOTE_DEVICES / (32 * SCU_STP_REMOTE_NODE_COUNT))
+               + ((SCI_MAX_REMOTE_DEVICES % (32 * SCU_STP_REMOTE_NODE_COUNT)) != 0)];
+
+};
+
+/* --------------------------------------------------------------------------- */
+
+void sci_remote_node_table_initialize(
+       struct sci_remote_node_table *remote_node_table,
+       u32 remote_node_entries);
+
+u16 sci_remote_node_table_allocate_remote_node(
+       struct sci_remote_node_table *remote_node_table,
+       u32 remote_node_count);
+
+void sci_remote_node_table_release_remote_node_index(
+       struct sci_remote_node_table *remote_node_table,
+       u32 remote_node_count,
+       u16 remote_node_index);
+
+#endif /* _SCIC_SDS_REMOTE_NODE_TABLE_H_ */
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
new file mode 100644 (file)
index 0000000..a46e07a
--- /dev/null
@@ -0,0 +1,3391 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "isci.h"
+#include "task.h"
+#include "request.h"
+#include "scu_completion_codes.h"
+#include "scu_event_codes.h"
+#include "sas.h"
+
+static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
+                                                       int idx)
+{
+       if (idx == 0)
+               return &ireq->tc->sgl_pair_ab;
+       else if (idx == 1)
+               return &ireq->tc->sgl_pair_cd;
+       else if (idx < 0)
+               return NULL;
+       else
+               return &ireq->sg_table[idx - 2];
+}
+
+static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
+                                         struct isci_request *ireq, u32 idx)
+{
+       u32 offset;
+
+       if (idx == 0) {
+               offset = (void *) &ireq->tc->sgl_pair_ab -
+                        (void *) &ihost->task_context_table[0];
+               return ihost->task_context_dma + offset;
+       } else if (idx == 1) {
+               offset = (void *) &ireq->tc->sgl_pair_cd -
+                        (void *) &ihost->task_context_table[0];
+               return ihost->task_context_dma + offset;
+       }
+
+       return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
+}
+
+static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
+{
+       e->length = sg_dma_len(sg);
+       e->address_upper = upper_32_bits(sg_dma_address(sg));
+       e->address_lower = lower_32_bits(sg_dma_address(sg));
+       e->address_modifier = 0;
+}
+
+static void sci_request_build_sgl(struct isci_request *ireq)
+{
+       struct isci_host *ihost = ireq->isci_host;
+       struct sas_task *task = isci_request_access_task(ireq);
+       struct scatterlist *sg = NULL;
+       dma_addr_t dma_addr;
+       u32 sg_idx = 0;
+       struct scu_sgl_element_pair *scu_sg   = NULL;
+       struct scu_sgl_element_pair *prev_sg  = NULL;
+
+       if (task->num_scatter > 0) {
+               sg = task->scatter;
+
+               while (sg) {
+                       scu_sg = to_sgl_element_pair(ireq, sg_idx);
+                       init_sgl_element(&scu_sg->A, sg);
+                       sg = sg_next(sg);
+                       if (sg) {
+                               init_sgl_element(&scu_sg->B, sg);
+                               sg = sg_next(sg);
+                       } else
+                               memset(&scu_sg->B, 0, sizeof(scu_sg->B));
+
+                       if (prev_sg) {
+                               dma_addr = to_sgl_element_pair_dma(ihost,
+                                                                  ireq,
+                                                                  sg_idx);
+
+                               prev_sg->next_pair_upper =
+                                       upper_32_bits(dma_addr);
+                               prev_sg->next_pair_lower =
+                                       lower_32_bits(dma_addr);
+                       }
+
+                       prev_sg = scu_sg;
+                       sg_idx++;
+               }
+       } else {        /* handle when no sg */
+               scu_sg = to_sgl_element_pair(ireq, sg_idx);
+
+               dma_addr = dma_map_single(&ihost->pdev->dev,
+                                         task->scatter,
+                                         task->total_xfer_len,
+                                         task->data_dir);
+
+               ireq->zero_scatter_daddr = dma_addr;
+
+               scu_sg->A.length = task->total_xfer_len;
+               scu_sg->A.address_upper = upper_32_bits(dma_addr);
+               scu_sg->A.address_lower = lower_32_bits(dma_addr);
+       }
+
+       if (scu_sg) {
+               scu_sg->next_pair_upper = 0;
+               scu_sg->next_pair_lower = 0;
+       }
+}
+
+static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
+{
+       struct ssp_cmd_iu *cmd_iu;
+       struct sas_task *task = isci_request_access_task(ireq);
+
+       cmd_iu = &ireq->ssp.cmd;
+
+       memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
+       cmd_iu->add_cdb_len = 0;
+       cmd_iu->_r_a = 0;
+       cmd_iu->_r_b = 0;
+       cmd_iu->en_fburst = 0; /* unsupported */
+       cmd_iu->task_prio = task->ssp_task.task_prio;
+       cmd_iu->task_attr = task->ssp_task.task_attr;
+       cmd_iu->_r_c = 0;
+
+       sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
+                      sizeof(task->ssp_task.cdb) / sizeof(u32));
+}
+
+static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
+{
+       struct ssp_task_iu *task_iu;
+       struct sas_task *task = isci_request_access_task(ireq);
+       struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
+
+       task_iu = &ireq->ssp.tmf;
+
+       memset(task_iu, 0, sizeof(struct ssp_task_iu));
+
+       memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
+
+       task_iu->task_func = isci_tmf->tmf_code;
+       task_iu->task_tag =
+               (ireq->ttype == tmf_task) ?
+               isci_tmf->io_tag :
+               SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+/**
+ * This method is will fill in the SCU Task Context for any type of SSP request.
+ * @sci_req:
+ * @task_context:
+ *
+ */
+static void scu_ssp_reqeust_construct_task_context(
+       struct isci_request *ireq,
+       struct scu_task_context *task_context)
+{
+       dma_addr_t dma_addr;
+       struct isci_remote_device *idev;
+       struct isci_port *iport;
+
+       idev = ireq->target_device;
+       iport = idev->owning_port;
+
+       /* Fill in the TC with the its required data */
+       task_context->abort = 0;
+       task_context->priority = 0;
+       task_context->initiator_request = 1;
+       task_context->connection_rate = idev->connection_rate;
+       task_context->protocol_engine_index = ISCI_PEG;
+       task_context->logical_port_index = iport->physical_port_index;
+       task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
+       task_context->valid = SCU_TASK_CONTEXT_VALID;
+       task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+       task_context->remote_node_index = idev->rnc.remote_node_index;
+       task_context->command_code = 0;
+
+       task_context->link_layer_control = 0;
+       task_context->do_not_dma_ssp_good_response = 1;
+       task_context->strict_ordering = 0;
+       task_context->control_frame = 0;
+       task_context->timeout_enable = 0;
+       task_context->block_guard_enable = 0;
+
+       task_context->address_modifier = 0;
+
+       /* task_context->type.ssp.tag = ireq->io_tag; */
+       task_context->task_phase = 0x01;
+
+       ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+                             (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+                             (iport->physical_port_index <<
+                              SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+                             ISCI_TAG_TCI(ireq->io_tag));
+
+       /*
+        * Copy the physical address for the command buffer to the
+        * SCU Task Context
+        */
+       dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
+
+       task_context->command_iu_upper = upper_32_bits(dma_addr);
+       task_context->command_iu_lower = lower_32_bits(dma_addr);
+
+       /*
+        * Copy the physical address for the response buffer to the
+        * SCU Task Context
+        */
+       dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
+
+       task_context->response_iu_upper = upper_32_bits(dma_addr);
+       task_context->response_iu_lower = lower_32_bits(dma_addr);
+}
+
+/**
+ * This method is will fill in the SCU Task Context for a SSP IO request.
+ * @sci_req:
+ *
+ */
+static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
+                                                     enum dma_data_direction dir,
+                                                     u32 len)
+{
+       struct scu_task_context *task_context = ireq->tc;
+
+       scu_ssp_reqeust_construct_task_context(ireq, task_context);
+
+       task_context->ssp_command_iu_length =
+               sizeof(struct ssp_cmd_iu) / sizeof(u32);
+       task_context->type.ssp.frame_type = SSP_COMMAND;
+
+       switch (dir) {
+       case DMA_FROM_DEVICE:
+       case DMA_NONE:
+       default:
+               task_context->task_type = SCU_TASK_TYPE_IOREAD;
+               break;
+       case DMA_TO_DEVICE:
+               task_context->task_type = SCU_TASK_TYPE_IOWRITE;
+               break;
+       }
+
+       task_context->transfer_length_bytes = len;
+
+       if (task_context->transfer_length_bytes > 0)
+               sci_request_build_sgl(ireq);
+}
+
+/**
+ * This method will fill in the SCU Task Context for a SSP Task request.  The
+ *    following important settings are utilized: -# priority ==
+ *    SCU_TASK_PRIORITY_HIGH.  This ensures that the task request is issued
+ *    ahead of other task destined for the same Remote Node. -# task_type ==
+ *    SCU_TASK_TYPE_IOREAD.  This simply indicates that a normal request type
+ *    (i.e. non-raw frame) is being utilized to perform task management. -#
+ *    control_frame == 1.  This ensures that the proper endianess is set so
+ *    that the bytes are transmitted in the right order for a task frame.
+ * @sci_req: This parameter specifies the task request object being
+ *    constructed.
+ *
+ */
+static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
+{
+       struct scu_task_context *task_context = ireq->tc;
+
+       scu_ssp_reqeust_construct_task_context(ireq, task_context);
+
+       task_context->control_frame                = 1;
+       task_context->priority                     = SCU_TASK_PRIORITY_HIGH;
+       task_context->task_type                    = SCU_TASK_TYPE_RAW_FRAME;
+       task_context->transfer_length_bytes        = 0;
+       task_context->type.ssp.frame_type          = SSP_TASK;
+       task_context->ssp_command_iu_length =
+               sizeof(struct ssp_task_iu) / sizeof(u32);
+}
+
+/**
+ * This method is will fill in the SCU Task Context for any type of SATA
+ *    request.  This is called from the various SATA constructors.
+ * @sci_req: The general IO request object which is to be used in
+ *    constructing the SCU task context.
+ * @task_context: The buffer pointer for the SCU task context which is being
+ *    constructed.
+ *
+ * The general io request construction is complete. The buffer assignment for
+ * the command buffer is complete. none Revisit task context construction to
+ * determine what is common for SSP/SMP/STP task context structures.
+ */
+static void scu_sata_reqeust_construct_task_context(
+       struct isci_request *ireq,
+       struct scu_task_context *task_context)
+{
+       dma_addr_t dma_addr;
+       struct isci_remote_device *idev;
+       struct isci_port *iport;
+
+       idev = ireq->target_device;
+       iport = idev->owning_port;
+
+       /* Fill in the TC with the its required data */
+       task_context->abort = 0;
+       task_context->priority = SCU_TASK_PRIORITY_NORMAL;
+       task_context->initiator_request = 1;
+       task_context->connection_rate = idev->connection_rate;
+       task_context->protocol_engine_index = ISCI_PEG;
+       task_context->logical_port_index = iport->physical_port_index;
+       task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
+       task_context->valid = SCU_TASK_CONTEXT_VALID;
+       task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+       task_context->remote_node_index = idev->rnc.remote_node_index;
+       task_context->command_code = 0;
+
+       task_context->link_layer_control = 0;
+       task_context->do_not_dma_ssp_good_response = 1;
+       task_context->strict_ordering = 0;
+       task_context->control_frame = 0;
+       task_context->timeout_enable = 0;
+       task_context->block_guard_enable = 0;
+
+       task_context->address_modifier = 0;
+       task_context->task_phase = 0x01;
+
+       task_context->ssp_command_iu_length =
+               (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
+
+       /* Set the first word of the H2D REG FIS */
+       task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
+
+       ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+                             (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+                             (iport->physical_port_index <<
+                              SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+                             ISCI_TAG_TCI(ireq->io_tag));
+       /*
+        * Copy the physical address for the command buffer to the SCU Task
+        * Context. We must offset the command buffer by 4 bytes because the
+        * first 4 bytes are transfered in the body of the TC.
+        */
+       dma_addr = sci_io_request_get_dma_addr(ireq,
+                                               ((char *) &ireq->stp.cmd) +
+                                               sizeof(u32));
+
+       task_context->command_iu_upper = upper_32_bits(dma_addr);
+       task_context->command_iu_lower = lower_32_bits(dma_addr);
+
+       /* SATA Requests do not have a response buffer */
+       task_context->response_iu_upper = 0;
+       task_context->response_iu_lower = 0;
+}
+
+static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
+{
+       struct scu_task_context *task_context = ireq->tc;
+
+       scu_sata_reqeust_construct_task_context(ireq, task_context);
+
+       task_context->control_frame         = 0;
+       task_context->priority              = SCU_TASK_PRIORITY_NORMAL;
+       task_context->task_type             = SCU_TASK_TYPE_SATA_RAW_FRAME;
+       task_context->type.stp.fis_type     = FIS_REGH2D;
+       task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
+}
+
+static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
+                                                         bool copy_rx_frame)
+{
+       struct isci_stp_request *stp_req = &ireq->stp.req;
+
+       scu_stp_raw_request_construct_task_context(ireq);
+
+       stp_req->status = 0;
+       stp_req->sgl.offset = 0;
+       stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
+
+       if (copy_rx_frame) {
+               sci_request_build_sgl(ireq);
+               stp_req->sgl.index = 0;
+       } else {
+               /* The user does not want the data copied to the SGL buffer location */
+               stp_req->sgl.index = -1;
+       }
+
+       return SCI_SUCCESS;
+}
+
+/**
+ *
+ * @sci_req: This parameter specifies the request to be constructed as an
+ *    optimized request.
+ * @optimized_task_type: This parameter specifies whether the request is to be
+ *    an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
+ *    value of 1 indicates NCQ.
+ *
+ * This method will perform request construction common to all types of STP
+ * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
+ * returns an indication as to whether the construction was successful.
+ */
+static void sci_stp_optimized_request_construct(struct isci_request *ireq,
+                                                    u8 optimized_task_type,
+                                                    u32 len,
+                                                    enum dma_data_direction dir)
+{
+       struct scu_task_context *task_context = ireq->tc;
+
+       /* Build the STP task context structure */
+       scu_sata_reqeust_construct_task_context(ireq, task_context);
+
+       /* Copy over the SGL elements */
+       sci_request_build_sgl(ireq);
+
+       /* Copy over the number of bytes to be transfered */
+       task_context->transfer_length_bytes = len;
+
+       if (dir == DMA_TO_DEVICE) {
+               /*
+                * The difference between the DMA IN and DMA OUT request task type
+                * values are consistent with the difference between FPDMA READ
+                * and FPDMA WRITE values.  Add the supplied task type parameter
+                * to this difference to set the task type properly for this
+                * DATA OUT (WRITE) case. */
+               task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
+                                                                - SCU_TASK_TYPE_DMA_IN);
+       } else {
+               /*
+                * For the DATA IN (READ) case, simply save the supplied
+                * optimized task type. */
+               task_context->task_type = optimized_task_type;
+       }
+}
+
+
+
+static enum sci_status
+sci_io_request_construct_sata(struct isci_request *ireq,
+                              u32 len,
+                              enum dma_data_direction dir,
+                              bool copy)
+{
+       enum sci_status status = SCI_SUCCESS;
+       struct sas_task *task = isci_request_access_task(ireq);
+
+       /* check for management protocols */
+       if (ireq->ttype == tmf_task) {
+               struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+
+               if (tmf->tmf_code == isci_tmf_sata_srst_high ||
+                   tmf->tmf_code == isci_tmf_sata_srst_low) {
+                       scu_stp_raw_request_construct_task_context(ireq);
+                       return SCI_SUCCESS;
+               } else {
+                       dev_err(&ireq->owning_controller->pdev->dev,
+                               "%s: Request 0x%p received un-handled SAT "
+                               "management protocol 0x%x.\n",
+                               __func__, ireq, tmf->tmf_code);
+
+                       return SCI_FAILURE;
+               }
+       }
+
+       if (!sas_protocol_ata(task->task_proto)) {
+               dev_err(&ireq->owning_controller->pdev->dev,
+                       "%s: Non-ATA protocol in SATA path: 0x%x\n",
+                       __func__,
+                       task->task_proto);
+               return SCI_FAILURE;
+
+       }
+
+       /* non data */
+       if (task->data_dir == DMA_NONE) {
+               scu_stp_raw_request_construct_task_context(ireq);
+               return SCI_SUCCESS;
+       }
+
+       /* NCQ */
+       if (task->ata_task.use_ncq) {
+               sci_stp_optimized_request_construct(ireq,
+                                                        SCU_TASK_TYPE_FPDMAQ_READ,
+                                                        len, dir);
+               return SCI_SUCCESS;
+       }
+
+       /* DMA */
+       if (task->ata_task.dma_xfer) {
+               sci_stp_optimized_request_construct(ireq,
+                                                        SCU_TASK_TYPE_DMA_IN,
+                                                        len, dir);
+               return SCI_SUCCESS;
+       } else /* PIO */
+               return sci_stp_pio_request_construct(ireq, copy);
+
+       return status;
+}
+
+static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
+{
+       struct sas_task *task = isci_request_access_task(ireq);
+
+       ireq->protocol = SCIC_SSP_PROTOCOL;
+
+       scu_ssp_io_request_construct_task_context(ireq,
+                                                 task->data_dir,
+                                                 task->total_xfer_len);
+
+       sci_io_request_build_ssp_command_iu(ireq);
+
+       sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+       return SCI_SUCCESS;
+}
+
+enum sci_status sci_task_request_construct_ssp(
+       struct isci_request *ireq)
+{
+       /* Construct the SSP Task SCU Task Context */
+       scu_ssp_task_request_construct_task_context(ireq);
+
+       /* Fill in the SSP Task IU */
+       sci_task_request_build_ssp_task_iu(ireq);
+
+       sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+       return SCI_SUCCESS;
+}
+
+static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
+{
+       enum sci_status status;
+       bool copy = false;
+       struct sas_task *task = isci_request_access_task(ireq);
+
+       ireq->protocol = SCIC_STP_PROTOCOL;
+
+       copy = (task->data_dir == DMA_NONE) ? false : true;
+
+       status = sci_io_request_construct_sata(ireq,
+                                               task->total_xfer_len,
+                                               task->data_dir,
+                                               copy);
+
+       if (status == SCI_SUCCESS)
+               sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+       return status;
+}
+
+enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
+{
+       enum sci_status status = SCI_SUCCESS;
+
+       /* check for management protocols */
+       if (ireq->ttype == tmf_task) {
+               struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+
+               if (tmf->tmf_code == isci_tmf_sata_srst_high ||
+                   tmf->tmf_code == isci_tmf_sata_srst_low) {
+                       scu_stp_raw_request_construct_task_context(ireq);
+               } else {
+                       dev_err(&ireq->owning_controller->pdev->dev,
+                               "%s: Request 0x%p received un-handled SAT "
+                               "Protocol 0x%x.\n",
+                               __func__, ireq, tmf->tmf_code);
+
+                       return SCI_FAILURE;
+               }
+       }
+
+       if (status != SCI_SUCCESS)
+               return status;
+       sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+       return status;
+}
+
+/**
+ * sci_req_tx_bytes - bytes transferred when reply underruns request
+ * @sci_req: request that was terminated early
+ */
+#define SCU_TASK_CONTEXT_SRAM 0x200000
+static u32 sci_req_tx_bytes(struct isci_request *ireq)
+{
+       struct isci_host *ihost = ireq->owning_controller;
+       u32 ret_val = 0;
+
+       if (readl(&ihost->smu_registers->address_modifier) == 0) {
+               void __iomem *scu_reg_base = ihost->scu_registers;
+
+               /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
+                *   BAR1 is the scu_registers
+                *   0x20002C = 0x200000 + 0x2c
+                *            = start of task context SRAM + offset of (type.ssp.data_offset)
+                *   TCi is the io_tag of struct sci_request
+                */
+               ret_val = readl(scu_reg_base +
+                               (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
+                               ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
+       }
+
+       return ret_val;
+}
+
+enum sci_status sci_request_start(struct isci_request *ireq)
+{
+       enum sci_base_request_states state;
+       struct scu_task_context *tc = ireq->tc;
+       struct isci_host *ihost = ireq->owning_controller;
+
+       state = ireq->sm.current_state_id;
+       if (state != SCI_REQ_CONSTRUCTED) {
+               dev_warn(&ihost->pdev->dev,
+                       "%s: SCIC IO Request requested to start while in wrong "
+                        "state %d\n", __func__, state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
+
+       switch (tc->protocol_type) {
+       case SCU_TASK_CONTEXT_PROTOCOL_SMP:
+       case SCU_TASK_CONTEXT_PROTOCOL_SSP:
+               /* SSP/SMP Frame */
+               tc->type.ssp.tag = ireq->io_tag;
+               tc->type.ssp.target_port_transfer_tag = 0xFFFF;
+               break;
+
+       case SCU_TASK_CONTEXT_PROTOCOL_STP:
+               /* STP/SATA Frame
+                * tc->type.stp.ncq_tag = ireq->ncq_tag;
+                */
+               break;
+
+       case SCU_TASK_CONTEXT_PROTOCOL_NONE:
+               /* / @todo When do we set no protocol type? */
+               break;
+
+       default:
+               /* This should never happen since we build the IO
+                * requests */
+               break;
+       }
+
+       /* Add to the post_context the io tag value */
+       ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
+
+       /* Everything is good go ahead and change state */
+       sci_change_state(&ireq->sm, SCI_REQ_STARTED);
+
+       return SCI_SUCCESS;
+}
+
+enum sci_status
+sci_io_request_terminate(struct isci_request *ireq)
+{
+       enum sci_base_request_states state;
+
+       state = ireq->sm.current_state_id;
+
+       switch (state) {
+       case SCI_REQ_CONSTRUCTED:
+               ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
+               ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               return SCI_SUCCESS;
+       case SCI_REQ_STARTED:
+       case SCI_REQ_TASK_WAIT_TC_COMP:
+       case SCI_REQ_SMP_WAIT_RESP:
+       case SCI_REQ_SMP_WAIT_TC_COMP:
+       case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+       case SCI_REQ_STP_UDMA_WAIT_D2H:
+       case SCI_REQ_STP_NON_DATA_WAIT_H2D:
+       case SCI_REQ_STP_NON_DATA_WAIT_D2H:
+       case SCI_REQ_STP_PIO_WAIT_H2D:
+       case SCI_REQ_STP_PIO_WAIT_FRAME:
+       case SCI_REQ_STP_PIO_DATA_IN:
+       case SCI_REQ_STP_PIO_DATA_OUT:
+       case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
+       case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
+       case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
+               sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
+               return SCI_SUCCESS;
+       case SCI_REQ_TASK_WAIT_TC_RESP:
+               sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               return SCI_SUCCESS;
+       case SCI_REQ_ABORTING:
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               return SCI_SUCCESS;
+       case SCI_REQ_COMPLETED:
+       default:
+               dev_warn(&ireq->owning_controller->pdev->dev,
+                        "%s: SCIC IO Request requested to abort while in wrong "
+                        "state %d\n",
+                        __func__,
+                        ireq->sm.current_state_id);
+               break;
+       }
+
+       return SCI_FAILURE_INVALID_STATE;
+}
+
+enum sci_status sci_request_complete(struct isci_request *ireq)
+{
+       enum sci_base_request_states state;
+       struct isci_host *ihost = ireq->owning_controller;
+
+       state = ireq->sm.current_state_id;
+       if (WARN_ONCE(state != SCI_REQ_COMPLETED,
+                     "isci: request completion from wrong state (%d)\n", state))
+               return SCI_FAILURE_INVALID_STATE;
+
+       if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
+               sci_controller_release_frame(ihost,
+                                                 ireq->saved_rx_frame_index);
+
+       /* XXX can we just stop the machine and remove the 'final' state? */
+       sci_change_state(&ireq->sm, SCI_REQ_FINAL);
+       return SCI_SUCCESS;
+}
+
+enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
+                                                 u32 event_code)
+{
+       enum sci_base_request_states state;
+       struct isci_host *ihost = ireq->owning_controller;
+
+       state = ireq->sm.current_state_id;
+
+       if (state != SCI_REQ_STP_PIO_DATA_IN) {
+               dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n",
+                        __func__, event_code, state);
+
+               return SCI_FAILURE_INVALID_STATE;
+       }
+
+       switch (scu_get_event_specifier(event_code)) {
+       case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
+               /* We are waiting for data and the SCU has R_ERR the data frame.
+                * Go back to waiting for the D2H Register FIS
+                */
+               sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+               return SCI_SUCCESS;
+       default:
+               dev_err(&ihost->pdev->dev,
+                       "%s: pio request unexpected event %#x\n",
+                       __func__, event_code);
+
+               /* TODO Should we fail the PIO request when we get an
+                * unexpected event?
+                */
+               return SCI_FAILURE;
+       }
+}
+
+/*
+ * This function copies response data for requests returning response data
+ *    instead of sense data.
+ * @sci_req: This parameter specifies the request object for which to copy
+ *    the response data.
+ */
+static void sci_io_request_copy_response(struct isci_request *ireq)
+{
+       void *resp_buf;
+       u32 len;
+       struct ssp_response_iu *ssp_response;
+       struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
+
+       ssp_response = &ireq->ssp.rsp;
+
+       resp_buf = &isci_tmf->resp.resp_iu;
+
+       len = min_t(u32,
+                   SSP_RESP_IU_MAX_SIZE,
+                   be32_to_cpu(ssp_response->response_data_len));
+
+       memcpy(resp_buf, ssp_response->resp_data, len);
+}
+
+static enum sci_status
+request_started_state_tc_event(struct isci_request *ireq,
+                              u32 completion_code)
+{
+       struct ssp_response_iu *resp_iu;
+       u8 datapres;
+
+       /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
+        * to determine SDMA status
+        */
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+               ireq->scu_status = SCU_TASK_DONE_GOOD;
+               ireq->sci_status = SCI_SUCCESS;
+               break;
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
+               /* There are times when the SCU hardware will return an early
+                * response because the io request specified more data than is
+                * returned by the target device (mode pages, inquiry data,
+                * etc.).  We must check the response stats to see if this is
+                * truly a failed request or a good request that just got
+                * completed early.
+                */
+               struct ssp_response_iu *resp = &ireq->ssp.rsp;
+               ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+               sci_swab32_cpy(&ireq->ssp.rsp,
+                              &ireq->ssp.rsp,
+                              word_cnt);
+
+               if (resp->status == 0) {
+                       ireq->scu_status = SCU_TASK_DONE_GOOD;
+                       ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
+               } else {
+                       ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+                       ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+               }
+               break;
+       }
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
+               ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+               sci_swab32_cpy(&ireq->ssp.rsp,
+                              &ireq->ssp.rsp,
+                              word_cnt);
+
+               ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+               ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+               break;
+       }
+
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
+               /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
+                * guaranteed to be received before this completion status is
+                * posted?
+                */
+               resp_iu = &ireq->ssp.rsp;
+               datapres = resp_iu->datapres;
+
+               if (datapres == 1 || datapres == 2) {
+                       ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+                       ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+               } else {
+                       ireq->scu_status = SCU_TASK_DONE_GOOD;
+                       ireq->sci_status = SCI_SUCCESS;
+               }
+               break;
+       /* only stp device gets suspended. */
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
+               if (ireq->protocol == SCIC_STP_PROTOCOL) {
+                       ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+                                          SCU_COMPLETION_TL_STATUS_SHIFT;
+                       ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+               } else {
+                       ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+                                          SCU_COMPLETION_TL_STATUS_SHIFT;
+                       ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               }
+               break;
+
+       /* both stp/ssp device gets suspended */
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
+               ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+                                  SCU_COMPLETION_TL_STATUS_SHIFT;
+               ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+               break;
+
+       /* neither ssp nor stp gets suspended. */
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
+       default:
+               ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+                                  SCU_COMPLETION_TL_STATUS_SHIFT;
+               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               break;
+       }
+
+       /*
+        * TODO: This is probably wrong for ACK/NAK timeout conditions
+        */
+
+       /* In all cases we will treat this as the completion of the IO req. */
+       sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+       return SCI_SUCCESS;
+}
+
+static enum sci_status
+request_aborting_state_tc_event(struct isci_request *ireq,
+                               u32 completion_code)
+{
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
+       case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
+               ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
+               ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+
+       default:
+               /* Unless we get some strange error wait for the task abort to complete
+                * TODO: Should there be a state change for this completion?
+                */
+               break;
+       }
+
+       return SCI_SUCCESS;
+}
+
+static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
+                                                      u32 completion_code)
+{
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+               ireq->scu_status = SCU_TASK_DONE_GOOD;
+               ireq->sci_status = SCI_SUCCESS;
+               sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
+               break;
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
+               /* Currently, the decision is to simply allow the task request
+                * to timeout if the task IU wasn't received successfully.
+                * There is a potential for receiving multiple task responses if
+                * we decide to send the task IU again.
+                */
+               dev_warn(&ireq->owning_controller->pdev->dev,
+                        "%s: TaskRequest:0x%p CompletionCode:%x - "
+                        "ACK/NAK timeout\n", __func__, ireq,
+                        completion_code);
+
+               sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
+               break;
+       default:
+               /*
+                * All other completion status cause the IO to be complete.
+                * If a NAK was received, then it is up to the user to retry
+                * the request.
+                */
+               ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       }
+
+       return SCI_SUCCESS;
+}
+
+static enum sci_status
+smp_request_await_response_tc_event(struct isci_request *ireq,
+                                   u32 completion_code)
+{
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+               /* In the AWAIT RESPONSE state, any TC completion is
+                * unexpected.  but if the TC has success status, we
+                * complete the IO anyway.
+                */
+               ireq->scu_status = SCU_TASK_DONE_GOOD;
+               ireq->sci_status = SCI_SUCCESS;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
+               /* These status has been seen in a specific LSI
+                * expander, which sometimes is not able to send smp
+                * response within 2 ms. This causes our hardware break
+                * the connection and set TC completion with one of
+                * these SMP_XXX_XX_ERR status. For these type of error,
+                * we ask ihost user to retry the request.
+                */
+               ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
+               ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       default:
+               /* All other completion status cause the IO to be complete.  If a NAK
+                * was received, then it is up to the user to retry the request
+                */
+               ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       }
+
+       return SCI_SUCCESS;
+}
+
+static enum sci_status
+smp_request_await_tc_event(struct isci_request *ireq,
+                          u32 completion_code)
+{
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+               ireq->scu_status = SCU_TASK_DONE_GOOD;
+               ireq->sci_status = SCI_SUCCESS;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       default:
+               /* All other completion status cause the IO to be
+                * complete.  If a NAK was received, then it is up to
+                * the user to retry the request.
+                */
+               ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       }
+
+       return SCI_SUCCESS;
+}
+
+static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
+{
+       struct scu_sgl_element *sgl;
+       struct scu_sgl_element_pair *sgl_pair;
+       struct isci_request *ireq = to_ireq(stp_req);
+       struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
+
+       sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
+       if (!sgl_pair)
+               sgl = NULL;
+       else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
+               if (sgl_pair->B.address_lower == 0 &&
+                   sgl_pair->B.address_upper == 0) {
+                       sgl = NULL;
+               } else {
+                       pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
+                       sgl = &sgl_pair->B;
+               }
+       } else {
+               if (sgl_pair->next_pair_lower == 0 &&
+                   sgl_pair->next_pair_upper == 0) {
+                       sgl = NULL;
+               } else {
+                       pio_sgl->index++;
+                       pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
+                       sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
+                       sgl = &sgl_pair->A;
+               }
+       }
+
+       return sgl;
+}
+
+static enum sci_status
+stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
+                                       u32 completion_code)
+{
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+               ireq->scu_status = SCU_TASK_DONE_GOOD;
+               ireq->sci_status = SCI_SUCCESS;
+               sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
+               break;
+
+       default:
+               /* All other completion status cause the IO to be
+                * complete.  If a NAK was received, then it is up to
+                * the user to retry the request.
+                */
+               ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       }
+
+       return SCI_SUCCESS;
+}
+
+#define SCU_MAX_FRAME_BUFFER_SIZE  0x400  /* 1K is the maximum SCU frame data payload */
+
+/* transmit DATA_FIS from (current sgl + offset) for input
+ * parameter length. current sgl and offset is alreay stored in the IO request
+ */
+static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
+       struct isci_request *ireq,
+       u32 length)
+{
+       struct isci_stp_request *stp_req = &ireq->stp.req;
+       struct scu_task_context *task_context = ireq->tc;
+       struct scu_sgl_element_pair *sgl_pair;
+       struct scu_sgl_element *current_sgl;
+
+       /* Recycle the TC and reconstruct it for sending out DATA FIS containing
+        * for the data from current_sgl+offset for the input length
+        */
+       sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
+       if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
+               current_sgl = &sgl_pair->A;
+       else
+               current_sgl = &sgl_pair->B;
+
+       /* update the TC */
+       task_context->command_iu_upper = current_sgl->address_upper;
+       task_context->command_iu_lower = current_sgl->address_lower;
+       task_context->transfer_length_bytes = length;
+       task_context->type.stp.fis_type = FIS_DATA;
+
+       /* send the new TC out. */
+       return sci_controller_continue_io(ireq);
+}
+
+static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
+{
+       struct isci_stp_request *stp_req = &ireq->stp.req;
+       struct scu_sgl_element_pair *sgl_pair;
+       struct scu_sgl_element *sgl;
+       enum sci_status status;
+       u32 offset;
+       u32 len = 0;
+
+       offset = stp_req->sgl.offset;
+       sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
+       if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
+               return SCI_FAILURE;
+
+       if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
+               sgl = &sgl_pair->A;
+               len = sgl_pair->A.length - offset;
+       } else {
+               sgl = &sgl_pair->B;
+               len = sgl_pair->B.length - offset;
+       }
+
+       if (stp_req->pio_len == 0)
+               return SCI_SUCCESS;
+
+       if (stp_req->pio_len >= len) {
+               status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
+               if (status != SCI_SUCCESS)
+                       return status;
+               stp_req->pio_len -= len;
+
+               /* update the current sgl, offset and save for future */
+               sgl = pio_sgl_next(stp_req);
+               offset = 0;
+       } else if (stp_req->pio_len < len) {
+               sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
+
+               /* Sgl offset will be adjusted and saved for future */
+               offset += stp_req->pio_len;
+               sgl->address_lower += stp_req->pio_len;
+               stp_req->pio_len = 0;
+       }
+
+       stp_req->sgl.offset = offset;
+
+       return status;
+}
+
+/**
+ *
+ * @stp_request: The request that is used for the SGL processing.
+ * @data_buffer: The buffer of data to be copied.
+ * @length: The length of the data transfer.
+ *
+ * Copy the data from the buffer for the length specified to the IO reqeust SGL
+ * specified data region. enum sci_status
+ */
+static enum sci_status
+sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
+                                                 u8 *data_buf, u32 len)
+{
+       struct isci_request *ireq;
+       u8 *src_addr;
+       int copy_len;
+       struct sas_task *task;
+       struct scatterlist *sg;
+       void *kaddr;
+       int total_len = len;
+
+       ireq = to_ireq(stp_req);
+       task = isci_request_access_task(ireq);
+       src_addr = data_buf;
+
+       if (task->num_scatter > 0) {
+               sg = task->scatter;
+
+               while (total_len > 0) {
+                       struct page *page = sg_page(sg);
+
+                       copy_len = min_t(int, total_len, sg_dma_len(sg));
+                       kaddr = kmap_atomic(page, KM_IRQ0);
+                       memcpy(kaddr + sg->offset, src_addr, copy_len);
+                       kunmap_atomic(kaddr, KM_IRQ0);
+                       total_len -= copy_len;
+                       src_addr += copy_len;
+                       sg = sg_next(sg);
+               }
+       } else {
+               BUG_ON(task->total_xfer_len < total_len);
+               memcpy(task->scatter, src_addr, total_len);
+       }
+
+       return SCI_SUCCESS;
+}
+
+/**
+ *
+ * @sci_req: The PIO DATA IN request that is to receive the data.
+ * @data_buffer: The buffer to copy from.
+ *
+ * Copy the data buffer to the io request data region. enum sci_status
+ */
+static enum sci_status sci_stp_request_pio_data_in_copy_data(
+       struct isci_stp_request *stp_req,
+       u8 *data_buffer)
+{
+       enum sci_status status;
+
+       /*
+        * If there is less than 1K remaining in the transfer request
+        * copy just the data for the transfer */
+       if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
+               status = sci_stp_request_pio_data_in_copy_data_buffer(
+                       stp_req, data_buffer, stp_req->pio_len);
+
+               if (status == SCI_SUCCESS)
+                       stp_req->pio_len = 0;
+       } else {
+               /* We are transfering the whole frame so copy */
+               status = sci_stp_request_pio_data_in_copy_data_buffer(
+                       stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
+
+               if (status == SCI_SUCCESS)
+                       stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
+       }
+
+       return status;
+}
+
+static enum sci_status
+stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
+                                             u32 completion_code)
+{
+       enum sci_status status = SCI_SUCCESS;
+
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+               ireq->scu_status = SCU_TASK_DONE_GOOD;
+               ireq->sci_status = SCI_SUCCESS;
+               sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+               break;
+
+       default:
+               /* All other completion status cause the IO to be
+                * complete.  If a NAK was received, then it is up to
+                * the user to retry the request.
+                */
+               ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       }
+
+       return status;
+}
+
+static enum sci_status
+pio_data_out_tx_done_tc_event(struct isci_request *ireq,
+                             u32 completion_code)
+{
+       enum sci_status status = SCI_SUCCESS;
+       bool all_frames_transferred = false;
+       struct isci_stp_request *stp_req = &ireq->stp.req;
+
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+               /* Transmit data */
+               if (stp_req->pio_len != 0) {
+                       status = sci_stp_request_pio_data_out_transmit_data(ireq);
+                       if (status == SCI_SUCCESS) {
+                               if (stp_req->pio_len == 0)
+                                       all_frames_transferred = true;
+                       }
+               } else if (stp_req->pio_len == 0) {
+                       /*
+                        * this will happen if the all data is written at the
+                        * first time after the pio setup fis is received
+                        */
+                       all_frames_transferred  = true;
+               }
+
+               /* all data transferred. */
+               if (all_frames_transferred) {
+                       /*
+                        * Change the state to SCI_REQ_STP_PIO_DATA_IN
+                        * and wait for PIO_SETUP fis / or D2H REg fis. */
+                       sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+               }
+               break;
+
+       default:
+               /*
+                * All other completion status cause the IO to be complete.
+                * If a NAK was received, then it is up to the user to retry
+                * the request.
+                */
+               ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       }
+
+       return status;
+}
+
+static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
+                                                                      u32 frame_index)
+{
+       struct isci_host *ihost = ireq->owning_controller;
+       struct dev_to_host_fis *frame_header;
+       enum sci_status status;
+       u32 *frame_buffer;
+
+       status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                              frame_index,
+                                                              (void **)&frame_header);
+
+       if ((status == SCI_SUCCESS) &&
+           (frame_header->fis_type == FIS_REGD2H)) {
+               sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+                                                             frame_index,
+                                                             (void **)&frame_buffer);
+
+               sci_controller_copy_sata_response(&ireq->stp.rsp,
+                                                      frame_header,
+                                                      frame_buffer);
+       }
+
+       sci_controller_release_frame(ihost, frame_index);
+
+       return status;
+}
+
+enum sci_status
+sci_io_request_frame_handler(struct isci_request *ireq,
+                                 u32 frame_index)
+{
+       struct isci_host *ihost = ireq->owning_controller;
+       struct isci_stp_request *stp_req = &ireq->stp.req;
+       enum sci_base_request_states state;
+       enum sci_status status;
+       ssize_t word_cnt;
+
+       state = ireq->sm.current_state_id;
+       switch (state)  {
+       case SCI_REQ_STARTED: {
+               struct ssp_frame_hdr ssp_hdr;
+               void *frame_header;
+
+               sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                             frame_index,
+                                                             &frame_header);
+
+               word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
+               sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
+
+               if (ssp_hdr.frame_type == SSP_RESPONSE) {
+                       struct ssp_response_iu *resp_iu;
+                       ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+                       sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+                                                                     frame_index,
+                                                                     (void **)&resp_iu);
+
+                       sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
+
+                       resp_iu = &ireq->ssp.rsp;
+
+                       if (resp_iu->datapres == 0x01 ||
+                           resp_iu->datapres == 0x02) {
+                               ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+                               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+                       } else {
+                               ireq->scu_status = SCU_TASK_DONE_GOOD;
+                               ireq->sci_status = SCI_SUCCESS;
+                       }
+               } else {
+                       /* not a response frame, why did it get forwarded? */
+                       dev_err(&ihost->pdev->dev,
+                               "%s: SCIC IO Request 0x%p received unexpected "
+                               "frame %d type 0x%02x\n", __func__, ireq,
+                               frame_index, ssp_hdr.frame_type);
+               }
+
+               /*
+                * In any case we are done with this frame buffer return it to
+                * the controller
+                */
+               sci_controller_release_frame(ihost, frame_index);
+
+               return SCI_SUCCESS;
+       }
+
+       case SCI_REQ_TASK_WAIT_TC_RESP:
+               sci_io_request_copy_response(ireq);
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               sci_controller_release_frame(ihost, frame_index);
+               return SCI_SUCCESS;
+
+       case SCI_REQ_SMP_WAIT_RESP: {
+               struct smp_resp *rsp_hdr = &ireq->smp.rsp;
+               void *frame_header;
+
+               sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                             frame_index,
+                                                             &frame_header);
+
+               /* byte swap the header. */
+               word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
+               sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
+
+               if (rsp_hdr->frame_type == SMP_RESPONSE) {
+                       void *smp_resp;
+
+                       sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+                                                                     frame_index,
+                                                                     &smp_resp);
+
+                       word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) /
+                               sizeof(u32);
+
+                       sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
+                                      smp_resp, word_cnt);
+
+                       ireq->scu_status = SCU_TASK_DONE_GOOD;
+                       ireq->sci_status = SCI_SUCCESS;
+                       sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
+               } else {
+                       /*
+                        * This was not a response frame why did it get
+                        * forwarded?
+                        */
+                       dev_err(&ihost->pdev->dev,
+                               "%s: SCIC SMP Request 0x%p received unexpected "
+                               "frame %d type 0x%02x\n",
+                               __func__,
+                               ireq,
+                               frame_index,
+                               rsp_hdr->frame_type);
+
+                       ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
+                       ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+                       sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               }
+
+               sci_controller_release_frame(ihost, frame_index);
+
+               return SCI_SUCCESS;
+       }
+
+       case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+               return sci_stp_request_udma_general_frame_handler(ireq,
+                                                                      frame_index);
+
+       case SCI_REQ_STP_UDMA_WAIT_D2H:
+               /* Use the general frame handler to copy the resposne data */
+               status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
+
+               if (status != SCI_SUCCESS)
+                       return status;
+
+               ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+               ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               return SCI_SUCCESS;
+
+       case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
+               struct dev_to_host_fis *frame_header;
+               u32 *frame_buffer;
+
+               status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                                      frame_index,
+                                                                      (void **)&frame_header);
+
+               if (status != SCI_SUCCESS) {
+                       dev_err(&ihost->pdev->dev,
+                               "%s: SCIC IO Request 0x%p could not get frame "
+                               "header for frame index %d, status %x\n",
+                               __func__,
+                               stp_req,
+                               frame_index,
+                               status);
+
+                       return status;
+               }
+
+               switch (frame_header->fis_type) {
+               case FIS_REGD2H:
+                       sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+                                                                     frame_index,
+                                                                     (void **)&frame_buffer);
+
+                       sci_controller_copy_sata_response(&ireq->stp.rsp,
+                                                              frame_header,
+                                                              frame_buffer);
+
+                       /* The command has completed with error */
+                       ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+                       ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+                       break;
+
+               default:
+                       dev_warn(&ihost->pdev->dev,
+                                "%s: IO Request:0x%p Frame Id:%d protocol "
+                                 "violation occurred\n", __func__, stp_req,
+                                 frame_index);
+
+                       ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
+                       ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
+                       break;
+               }
+
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+               /* Frame has been decoded return it to the controller */
+               sci_controller_release_frame(ihost, frame_index);
+
+               return status;
+       }
+
+       case SCI_REQ_STP_PIO_WAIT_FRAME: {
+               struct sas_task *task = isci_request_access_task(ireq);
+               struct dev_to_host_fis *frame_header;
+               u32 *frame_buffer;
+
+               status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                                      frame_index,
+                                                                      (void **)&frame_header);
+
+               if (status != SCI_SUCCESS) {
+                       dev_err(&ihost->pdev->dev,
+                               "%s: SCIC IO Request 0x%p could not get frame "
+                               "header for frame index %d, status %x\n",
+                               __func__, stp_req, frame_index, status);
+                       return status;
+               }
+
+               switch (frame_header->fis_type) {
+               case FIS_PIO_SETUP:
+                       /* Get from the frame buffer the PIO Setup Data */
+                       sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+                                                                     frame_index,
+                                                                     (void **)&frame_buffer);
+
+                       /* Get the data from the PIO Setup The SCU Hardware
+                        * returns first word in the frame_header and the rest
+                        * of the data is in the frame buffer so we need to
+                        * back up one dword
+                        */
+
+                       /* transfer_count: first 16bits in the 4th dword */
+                       stp_req->pio_len = frame_buffer[3] & 0xffff;
+
+                       /* status: 4th byte in the 3rd dword */
+                       stp_req->status = (frame_buffer[2] >> 24) & 0xff;
+
+                       sci_controller_copy_sata_response(&ireq->stp.rsp,
+                                                              frame_header,
+                                                              frame_buffer);
+
+                       ireq->stp.rsp.status = stp_req->status;
+
+                       /* The next state is dependent on whether the
+                        * request was PIO Data-in or Data out
+                        */
+                       if (task->data_dir == DMA_FROM_DEVICE) {
+                               sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
+                       } else if (task->data_dir == DMA_TO_DEVICE) {
+                               /* Transmit data */
+                               status = sci_stp_request_pio_data_out_transmit_data(ireq);
+                               if (status != SCI_SUCCESS)
+                                       break;
+                               sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
+                       }
+                       break;
+
+               case FIS_SETDEVBITS:
+                       sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+                       break;
+
+               case FIS_REGD2H:
+                       if (frame_header->status & ATA_BUSY) {
+                               /*
+                                * Now why is the drive sending a D2H Register
+                                * FIS when it is still busy?  Do nothing since
+                                * we are still in the right state.
+                                */
+                               dev_dbg(&ihost->pdev->dev,
+                                       "%s: SCIC PIO Request 0x%p received "
+                                       "D2H Register FIS with BSY status "
+                                       "0x%x\n",
+                                       __func__,
+                                       stp_req,
+                                       frame_header->status);
+                               break;
+                       }
+
+                       sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+                                                                     frame_index,
+                                                                     (void **)&frame_buffer);
+
+                       sci_controller_copy_sata_response(&ireq->stp.req,
+                                                              frame_header,
+                                                              frame_buffer);
+
+                       ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+                       ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+                       sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+                       break;
+
+               default:
+                       /* FIXME: what do we do here? */
+                       break;
+               }
+
+               /* Frame is decoded return it to the controller */
+               sci_controller_release_frame(ihost, frame_index);
+
+               return status;
+       }
+
+       case SCI_REQ_STP_PIO_DATA_IN: {
+               struct dev_to_host_fis *frame_header;
+               struct sata_fis_data *frame_buffer;
+
+               status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                                      frame_index,
+                                                                      (void **)&frame_header);
+
+               if (status != SCI_SUCCESS) {
+                       dev_err(&ihost->pdev->dev,
+                               "%s: SCIC IO Request 0x%p could not get frame "
+                               "header for frame index %d, status %x\n",
+                               __func__,
+                               stp_req,
+                               frame_index,
+                               status);
+                       return status;
+               }
+
+               if (frame_header->fis_type != FIS_DATA) {
+                       dev_err(&ihost->pdev->dev,
+                               "%s: SCIC PIO Request 0x%p received frame %d "
+                               "with fis type 0x%02x when expecting a data "
+                               "fis.\n",
+                               __func__,
+                               stp_req,
+                               frame_index,
+                               frame_header->fis_type);
+
+                       ireq->scu_status = SCU_TASK_DONE_GOOD;
+                       ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
+                       sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+                       /* Frame is decoded return it to the controller */
+                       sci_controller_release_frame(ihost, frame_index);
+                       return status;
+               }
+
+               if (stp_req->sgl.index < 0) {
+                       ireq->saved_rx_frame_index = frame_index;
+                       stp_req->pio_len = 0;
+               } else {
+                       sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+                                                                     frame_index,
+                                                                     (void **)&frame_buffer);
+
+                       status = sci_stp_request_pio_data_in_copy_data(stp_req,
+                                                                           (u8 *)frame_buffer);
+
+                       /* Frame is decoded return it to the controller */
+                       sci_controller_release_frame(ihost, frame_index);
+               }
+
+               /* Check for the end of the transfer, are there more
+                * bytes remaining for this data transfer
+                */
+               if (status != SCI_SUCCESS || stp_req->pio_len != 0)
+                       return status;
+
+               if ((stp_req->status & ATA_BUSY) == 0) {
+                       ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+                       ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+                       sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               } else {
+                       sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+               }
+               return status;
+       }
+
+       case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
+               struct dev_to_host_fis *frame_header;
+               u32 *frame_buffer;
+
+               status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+                                                                      frame_index,
+                                                                      (void **)&frame_header);
+               if (status != SCI_SUCCESS) {
+                       dev_err(&ihost->pdev->dev,
+                               "%s: SCIC IO Request 0x%p could not get frame "
+                               "header for frame index %d, status %x\n",
+                               __func__,
+                               stp_req,
+                               frame_index,
+                               status);
+                       return status;
+               }
+
+               switch (frame_header->fis_type) {
+               case FIS_REGD2H:
+                       sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+                                                                     frame_index,
+                                                                     (void **)&frame_buffer);
+
+                       sci_controller_copy_sata_response(&ireq->stp.rsp,
+                                                              frame_header,
+                                                              frame_buffer);
+
+                       /* The command has completed with error */
+                       ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+                       ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+                       break;
+
+               default:
+                       dev_warn(&ihost->pdev->dev,
+                                "%s: IO Request:0x%p Frame Id:%d protocol "
+                                "violation occurred\n",
+                                __func__,
+                                stp_req,
+                                frame_index);
+
+                       ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
+                       ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
+                       break;
+               }
+
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+               /* Frame has been decoded return it to the controller */
+               sci_controller_release_frame(ihost, frame_index);
+
+               return status;
+       }
+       case SCI_REQ_ABORTING:
+               /*
+                * TODO: Is it even possible to get an unsolicited frame in the
+                * aborting state?
+                */
+               sci_controller_release_frame(ihost, frame_index);
+               return SCI_SUCCESS;
+
+       default:
+               dev_warn(&ihost->pdev->dev,
+                        "%s: SCIC IO Request given unexpected frame %x while "
+                        "in state %d\n",
+                        __func__,
+                        frame_index,
+                        state);
+
+               sci_controller_release_frame(ihost, frame_index);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
+                                                      u32 completion_code)
+{
+       enum sci_status status = SCI_SUCCESS;
+
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+               ireq->scu_status = SCU_TASK_DONE_GOOD;
+               ireq->sci_status = SCI_SUCCESS;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
+               /* We must check ther response buffer to see if the D2H
+                * Register FIS was received before we got the TC
+                * completion.
+                */
+               if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
+                       sci_remote_device_suspend(ireq->target_device,
+                               SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
+
+                       ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+                       ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+                       sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               } else {
+                       /* If we have an error completion status for the
+                        * TC then we can expect a D2H register FIS from
+                        * the device so we must change state to wait
+                        * for it
+                        */
+                       sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
+               }
+               break;
+
+       /* TODO Check to see if any of these completion status need to
+        * wait for the device to host register fis.
+        */
+       /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
+        * - this comes only for B0
+        */
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
+               sci_remote_device_suspend(ireq->target_device,
+                       SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
+       /* Fall through to the default case */
+       default:
+               /* All other completion status cause the IO to be complete. */
+               ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       }
+
+       return status;
+}
+
+static enum sci_status
+stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
+                                                  u32 completion_code)
+{
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+               ireq->scu_status = SCU_TASK_DONE_GOOD;
+               ireq->sci_status = SCI_SUCCESS;
+               sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
+               break;
+
+       default:
+               /*
+                * All other completion status cause the IO to be complete.
+                * If a NAK was received, then it is up to the user to retry
+                * the request.
+                */
+               ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       }
+
+       return SCI_SUCCESS;
+}
+
+static enum sci_status
+stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
+                                                    u32 completion_code)
+{
+       switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+       case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+               ireq->scu_status = SCU_TASK_DONE_GOOD;
+               ireq->sci_status = SCI_SUCCESS;
+               sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
+               break;
+
+       default:
+               /* All other completion status cause the IO to be complete.  If
+                * a NAK was received, then it is up to the user to retry the
+                * request.
+                */
+               ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+               ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+               sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+               break;
+       }
+
+       return SCI_SUCCESS;
+}
+
+enum sci_status
+sci_io_request_tc_completion(struct isci_request *ireq,
+                                 u32 completion_code)
+{
+       enum sci_base_request_states state;
+       struct isci_host *ihost = ireq->owning_controller;
+
+       state = ireq->sm.current_state_id;
+
+       switch (state) {
+       case SCI_REQ_STARTED:
+               return request_started_state_tc_event(ireq, completion_code);
+
+       case SCI_REQ_TASK_WAIT_TC_COMP:
+               return ssp_task_request_await_tc_event(ireq,
+                                                      completion_code);
+
+       case SCI_REQ_SMP_WAIT_RESP:
+               return smp_request_await_response_tc_event(ireq,
+                                                          completion_code);
+
+       case SCI_REQ_SMP_WAIT_TC_COMP:
+               return smp_request_await_tc_event(ireq, completion_code);
+
+       case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+               return stp_request_udma_await_tc_event(ireq,
+                                                      completion_code);
+
+       case SCI_REQ_STP_NON_DATA_WAIT_H2D:
+               return stp_request_non_data_await_h2d_tc_event(ireq,
+                                                              completion_code);
+
+       case SCI_REQ_STP_PIO_WAIT_H2D:
+               return stp_request_pio_await_h2d_completion_tc_event(ireq,
+                                                                    completion_code);
+
+       case SCI_REQ_STP_PIO_DATA_OUT:
+               return pio_data_out_tx_done_tc_event(ireq, completion_code);
+
+       case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
+               return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
+                                                                         completion_code);
+
+       case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
+               return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
+                                                                           completion_code);
+
+       case SCI_REQ_ABORTING:
+               return request_aborting_state_tc_event(ireq,
+                                                      completion_code);
+
+       default:
+               dev_warn(&ihost->pdev->dev,
+                        "%s: SCIC IO Request given task completion "
+                        "notification %x while in wrong state %d\n",
+                        __func__,
+                        completion_code,
+                        state);
+               return SCI_FAILURE_INVALID_STATE;
+       }
+}
+
+/**
+ * isci_request_process_response_iu() - This function sets the status and
+ *    response iu, in the task struct, from the request object for the upper
+ *    layer driver.
+ * @sas_task: This parameter is the task struct from the upper layer driver.
+ * @resp_iu: This parameter points to the response iu of the completed request.
+ * @dev: This parameter specifies the linux device struct.
+ *
+ * none.
+ */
+static void isci_request_process_response_iu(
+       struct sas_task *task,
+       struct ssp_response_iu *resp_iu,
+       struct device *dev)
+{
+       dev_dbg(dev,
+               "%s: resp_iu = %p "
+               "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
+               "resp_iu->response_data_len = %x, "
+               "resp_iu->sense_data_len = %x\nrepsonse data: ",
+               __func__,
+               resp_iu,
+               resp_iu->status,
+               resp_iu->datapres,
+               resp_iu->response_data_len,
+               resp_iu->sense_data_len);
+
+       task->task_status.stat = resp_iu->status;
+
+       /* libsas updates the task status fields based on the response iu. */
+       sas_ssp_task_response(dev, task, resp_iu);
+}
+
+/**
+ * isci_request_set_open_reject_status() - This function prepares the I/O
+ *    completion for OPEN_REJECT conditions.
+ * @request: This parameter is the completed isci_request object.
+ * @response_ptr: This parameter specifies the service response for the I/O.
+ * @status_ptr: This parameter specifies the exec status for the I/O.
+ * @complete_to_host_ptr: This parameter specifies the action to be taken by
+ *    the LLDD with respect to completing this request or forcing an abort
+ *    condition on the I/O.
+ * @open_rej_reason: This parameter specifies the encoded reason for the
+ *    abandon-class reject.
+ *
+ * none.
+ */
+static void isci_request_set_open_reject_status(
+       struct isci_request *request,
+       struct sas_task *task,
+       enum service_response *response_ptr,
+       enum exec_status *status_ptr,
+       enum isci_completion_selection *complete_to_host_ptr,
+       enum sas_open_rej_reason open_rej_reason)
+{
+       /* Task in the target is done. */
+       set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+       *response_ptr                     = SAS_TASK_UNDELIVERED;
+       *status_ptr                       = SAS_OPEN_REJECT;
+       *complete_to_host_ptr             = isci_perform_normal_io_completion;
+       task->task_status.open_rej_reason = open_rej_reason;
+}
+
+/**
+ * isci_request_handle_controller_specific_errors() - This function decodes
+ *    controller-specific I/O completion error conditions.
+ * @request: This parameter is the completed isci_request object.
+ * @response_ptr: This parameter specifies the service response for the I/O.
+ * @status_ptr: This parameter specifies the exec status for the I/O.
+ * @complete_to_host_ptr: This parameter specifies the action to be taken by
+ *    the LLDD with respect to completing this request or forcing an abort
+ *    condition on the I/O.
+ *
+ * none.
+ */
+static void isci_request_handle_controller_specific_errors(
+       struct isci_remote_device *idev,
+       struct isci_request *request,
+       struct sas_task *task,
+       enum service_response *response_ptr,
+       enum exec_status *status_ptr,
+       enum isci_completion_selection *complete_to_host_ptr)
+{
+       unsigned int cstatus;
+
+       cstatus = request->scu_status;
+
+       dev_dbg(&request->isci_host->pdev->dev,
+               "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
+               "- controller status = 0x%x\n",
+               __func__, request, cstatus);
+
+       /* Decode the controller-specific errors; most
+        * important is to recognize those conditions in which
+        * the target may still have a task outstanding that
+        * must be aborted.
+        *
+        * Note that there are SCU completion codes being
+        * named in the decode below for which SCIC has already
+        * done work to handle them in a way other than as
+        * a controller-specific completion code; these are left
+        * in the decode below for completeness sake.
+        */
+       switch (cstatus) {
+       case SCU_TASK_DONE_DMASETUP_DIRERR:
+       /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
+       case SCU_TASK_DONE_XFERCNT_ERR:
+               /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
+               if (task->task_proto == SAS_PROTOCOL_SMP) {
+                       /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
+                       *response_ptr = SAS_TASK_COMPLETE;
+
+                       /* See if the device has been/is being stopped. Note
+                        * that we ignore the quiesce state, since we are
+                        * concerned about the actual device state.
+                        */
+                       if (!idev)
+                               *status_ptr = SAS_DEVICE_UNKNOWN;
+                       else
+                               *status_ptr = SAS_ABORTED_TASK;
+
+                       set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+                       *complete_to_host_ptr =
+                               isci_perform_normal_io_completion;
+               } else {
+                       /* Task in the target is not done. */
+                       *response_ptr = SAS_TASK_UNDELIVERED;
+
+                       if (!idev)
+                               *status_ptr = SAS_DEVICE_UNKNOWN;
+                       else
+                               *status_ptr = SAM_STAT_TASK_ABORTED;
+
+                       clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+                       *complete_to_host_ptr =
+                               isci_perform_error_io_completion;
+               }
+
+               break;
+
+       case SCU_TASK_DONE_CRC_ERR:
+       case SCU_TASK_DONE_NAK_CMD_ERR:
+       case SCU_TASK_DONE_EXCESS_DATA:
+       case SCU_TASK_DONE_UNEXP_FIS:
+       /* Also SCU_TASK_DONE_UNEXP_RESP: */
+       case SCU_TASK_DONE_VIIT_ENTRY_NV:       /* TODO - conditions? */
+       case SCU_TASK_DONE_IIT_ENTRY_NV:        /* TODO - conditions? */
+       case SCU_TASK_DONE_RNCNV_OUTBOUND:      /* TODO - conditions? */
+               /* These are conditions in which the target
+                * has completed the task, so that no cleanup
+                * is necessary.
+                */
+               *response_ptr = SAS_TASK_COMPLETE;
+
+               /* See if the device has been/is being stopped. Note
+                * that we ignore the quiesce state, since we are
+                * concerned about the actual device state.
+                */
+               if (!idev)
+                       *status_ptr = SAS_DEVICE_UNKNOWN;
+               else
+                       *status_ptr = SAS_ABORTED_TASK;
+
+               set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+               *complete_to_host_ptr = isci_perform_normal_io_completion;
+               break;
+
+
+       /* Note that the only open reject completion codes seen here will be
+        * abandon-class codes; all others are automatically retried in the SCU.
+        */
+       case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+
+               isci_request_set_open_reject_status(
+                       request, task, response_ptr, status_ptr,
+                       complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
+               break;
+
+       case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+
+               /* Note - the return of AB0 will change when
+                * libsas implements detection of zone violations.
+                */
+               isci_request_set_open_reject_status(
+                       request, task, response_ptr, status_ptr,
+                       complete_to_host_ptr, SAS_OREJ_RESV_AB0);
+               break;
+
+       case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+
+               isci_request_set_open_reject_status(
+                       request, task, response_ptr, status_ptr,
+                       complete_to_host_ptr, SAS_OREJ_RESV_AB1);
+               break;
+
+       case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+
+               isci_request_set_open_reject_status(
+                       request, task, response_ptr, status_ptr,
+                       complete_to_host_ptr, SAS_OREJ_RESV_AB2);
+               break;
+
+       case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+
+               isci_request_set_open_reject_status(
+                       request, task, response_ptr, status_ptr,
+                       complete_to_host_ptr, SAS_OREJ_RESV_AB3);
+               break;
+
+       case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+
+               isci_request_set_open_reject_status(
+                       request, task, response_ptr, status_ptr,
+                       complete_to_host_ptr, SAS_OREJ_BAD_DEST);
+               break;
+
+       case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
+
+               isci_request_set_open_reject_status(
+                       request, task, response_ptr, status_ptr,
+                       complete_to_host_ptr, SAS_OREJ_STP_NORES);
+               break;
+
+       case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
+
+               isci_request_set_open_reject_status(
+                       request, task, response_ptr, status_ptr,
+                       complete_to_host_ptr, SAS_OREJ_EPROTO);
+               break;
+
+       case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
+
+               isci_request_set_open_reject_status(
+                       request, task, response_ptr, status_ptr,
+                       complete_to_host_ptr, SAS_OREJ_CONN_RATE);
+               break;
+
+       case SCU_TASK_DONE_LL_R_ERR:
+       /* Also SCU_TASK_DONE_ACK_NAK_TO: */
+       case SCU_TASK_DONE_LL_PERR:
+       case SCU_TASK_DONE_LL_SY_TERM:
+       /* Also SCU_TASK_DONE_NAK_ERR:*/
+       case SCU_TASK_DONE_LL_LF_TERM:
+       /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
+       case SCU_TASK_DONE_LL_ABORT_ERR:
+       case SCU_TASK_DONE_SEQ_INV_TYPE:
+       /* Also SCU_TASK_DONE_UNEXP_XR: */
+       case SCU_TASK_DONE_XR_IU_LEN_ERR:
+       case SCU_TASK_DONE_INV_FIS_LEN:
+       /* Also SCU_TASK_DONE_XR_WD_LEN: */
+       case SCU_TASK_DONE_SDMA_ERR:
+       case SCU_TASK_DONE_OFFSET_ERR:
+       case SCU_TASK_DONE_MAX_PLD_ERR:
+       case SCU_TASK_DONE_LF_ERR:
+       case SCU_TASK_DONE_SMP_RESP_TO_ERR:  /* Escalate to dev reset? */
+       case SCU_TASK_DONE_SMP_LL_RX_ERR:
+       case SCU_TASK_DONE_UNEXP_DATA:
+       case SCU_TASK_DONE_UNEXP_SDBFIS:
+       case SCU_TASK_DONE_REG_ERR:
+       case SCU_TASK_DONE_SDB_ERR:
+       case SCU_TASK_DONE_TASK_ABORT:
+       default:
+               /* Task in the target is not done. */
+               *response_ptr = SAS_TASK_UNDELIVERED;
+               *status_ptr = SAM_STAT_TASK_ABORTED;
+
+               if (task->task_proto == SAS_PROTOCOL_SMP) {
+                       set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+                       *complete_to_host_ptr = isci_perform_normal_io_completion;
+               } else {
+                       clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+                       *complete_to_host_ptr = isci_perform_error_io_completion;
+               }
+               break;
+       }
+}
+
+/**
+ * isci_task_save_for_upper_layer_completion() - This function saves the
+ *    request for later completion to the upper layer driver.
+ * @host: This parameter is a pointer to the host on which the the request
+ *    should be queued (either as an error or success).
+ * @request: This parameter is the completed request.
+ * @response: This parameter is the response code for the completed task.
+ * @status: This parameter is the status code for the completed task.
+ *
+ * none.
+ */
+static void isci_task_save_for_upper_layer_completion(
+       struct isci_host *host,
+       struct isci_request *request,
+       enum service_response response,
+       enum exec_status status,
+       enum isci_completion_selection task_notification_selection)
+{
+       struct sas_task *task = isci_request_access_task(request);
+
+       task_notification_selection
+               = isci_task_set_completion_status(task, response, status,
+                                                 task_notification_selection);
+
+       /* Tasks aborted specifically by a call to the lldd_abort_task
+        * function should not be completed to the host in the regular path.
+        */
+       switch (task_notification_selection) {
+
+       case isci_perform_normal_io_completion:
+
+               /* Normal notification (task_done) */
+               dev_dbg(&host->pdev->dev,
+                       "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
+                       __func__,
+                       task,
+                       task->task_status.resp, response,
+                       task->task_status.stat, status);
+               /* Add to the completed list. */
+               list_add(&request->completed_node,
+                        &host->requests_to_complete);
+
+               /* Take the request off the device's pending request list. */
+               list_del_init(&request->dev_node);
+               break;
+
+       case isci_perform_aborted_io_completion:
+               /* No notification to libsas because this request is
+                * already in the abort path.
+                */
+               dev_dbg(&host->pdev->dev,
+                        "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
+                        __func__,
+                        task,
+                        task->task_status.resp, response,
+                        task->task_status.stat, status);
+
+               /* Wake up whatever process was waiting for this
+                * request to complete.
+                */
+               WARN_ON(request->io_request_completion == NULL);
+
+               if (request->io_request_completion != NULL) {
+
+                       /* Signal whoever is waiting that this
+                       * request is complete.
+                       */
+                       complete(request->io_request_completion);
+               }
+               break;
+
+       case isci_perform_error_io_completion:
+               /* Use sas_task_abort */
+               dev_dbg(&host->pdev->dev,
+                        "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
+                        __func__,
+                        task,
+                        task->task_status.resp, response,
+                        task->task_status.stat, status);
+               /* Add to the aborted list. */
+               list_add(&request->completed_node,
+                        &host->requests_to_errorback);
+               break;
+
+       default:
+               dev_dbg(&host->pdev->dev,
+                        "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
+                        __func__,
+                        task,
+                        task->task_status.resp, response,
+                        task->task_status.stat, status);
+
+               /* Add to the error to libsas list. */
+               list_add(&request->completed_node,
+                        &host->requests_to_errorback);
+               break;
+       }
+}
+
+static void isci_request_process_stp_response(struct sas_task *task,
+                                             void *response_buffer)
+{
+       struct dev_to_host_fis *d2h_reg_fis = response_buffer;
+       struct task_status_struct *ts = &task->task_status;
+       struct ata_task_resp *resp = (void *)&ts->buf[0];
+
+       resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6));
+       memcpy(&resp->ending_fis[0], response_buffer + 16, 24);
+       ts->buf_valid_size = sizeof(*resp);
+
+       /**
+        * If the device fault bit is set in the status register, then
+        * set the sense data and return.
+        */
+       if (d2h_reg_fis->status & ATA_DF)
+               ts->stat = SAS_PROTO_RESPONSE;
+       else
+               ts->stat = SAM_STAT_GOOD;
+
+       ts->resp = SAS_TASK_COMPLETE;
+}
+
+static void isci_request_io_request_complete(struct isci_host *ihost,
+                                            struct isci_request *request,
+                                            enum sci_io_status completion_status)
+{
+       struct sas_task *task = isci_request_access_task(request);
+       struct ssp_response_iu *resp_iu;
+       void *resp_buf;
+       unsigned long task_flags;
+       struct isci_remote_device *idev = isci_lookup_device(task->dev);
+       enum service_response response       = SAS_TASK_UNDELIVERED;
+       enum exec_status status         = SAS_ABORTED_TASK;
+       enum isci_request_status request_status;
+       enum isci_completion_selection complete_to_host
+               = isci_perform_normal_io_completion;
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: request = %p, task = %p,\n"
+               "task->data_dir = %d completion_status = 0x%x\n",
+               __func__,
+               request,
+               task,
+               task->data_dir,
+               completion_status);
+
+       spin_lock(&request->state_lock);
+       request_status = request->status;
+
+       /* Decode the request status.  Note that if the request has been
+        * aborted by a task management function, we don't care
+        * what the status is.
+        */
+       switch (request_status) {
+
+       case aborted:
+               /* "aborted" indicates that the request was aborted by a task
+                * management function, since once a task management request is
+                * perfomed by the device, the request only completes because
+                * of the subsequent driver terminate.
+                *
+                * Aborted also means an external thread is explicitly managing
+                * this request, so that we do not complete it up the stack.
+                *
+                * The target is still there (since the TMF was successful).
+                */
+               set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+               response = SAS_TASK_COMPLETE;
+
+               /* See if the device has been/is being stopped. Note
+                * that we ignore the quiesce state, since we are
+                * concerned about the actual device state.
+                */
+               if (!idev)
+                       status = SAS_DEVICE_UNKNOWN;
+               else
+                       status = SAS_ABORTED_TASK;
+
+               complete_to_host = isci_perform_aborted_io_completion;
+               /* This was an aborted request. */
+
+               spin_unlock(&request->state_lock);
+               break;
+
+       case aborting:
+               /* aborting means that the task management function tried and
+                * failed to abort the request. We need to note the request
+                * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
+                * target as down.
+                *
+                * Aborting also means an external thread is explicitly managing
+                * this request, so that we do not complete it up the stack.
+                */
+               set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+               response = SAS_TASK_UNDELIVERED;
+
+               if (!idev)
+                       /* The device has been /is being stopped. Note that
+                        * we ignore the quiesce state, since we are
+                        * concerned about the actual device state.
+                        */
+                       status = SAS_DEVICE_UNKNOWN;
+               else
+                       status = SAS_PHY_DOWN;
+
+               complete_to_host = isci_perform_aborted_io_completion;
+
+               /* This was an aborted request. */
+
+               spin_unlock(&request->state_lock);
+               break;
+
+       case terminating:
+
+               /* This was an terminated request.  This happens when
+                * the I/O is being terminated because of an action on
+                * the device (reset, tear down, etc.), and the I/O needs
+                * to be completed up the stack.
+                */
+               set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+               response = SAS_TASK_UNDELIVERED;
+
+               /* See if the device has been/is being stopped. Note
+                * that we ignore the quiesce state, since we are
+                * concerned about the actual device state.
+                */
+               if (!idev)
+                       status = SAS_DEVICE_UNKNOWN;
+               else
+                       status = SAS_ABORTED_TASK;
+
+               complete_to_host = isci_perform_aborted_io_completion;
+
+               /* This was a terminated request. */
+
+               spin_unlock(&request->state_lock);
+               break;
+
+       case dead:
+               /* This was a terminated request that timed-out during the
+                * termination process.  There is no task to complete to
+                * libsas.
+                */
+               complete_to_host = isci_perform_normal_io_completion;
+               spin_unlock(&request->state_lock);
+               break;
+
+       default:
+
+               /* The request is done from an SCU HW perspective. */
+               request->status = completed;
+
+               spin_unlock(&request->state_lock);
+
+               /* This is an active request being completed from the core. */
+               switch (completion_status) {
+
+               case SCI_IO_FAILURE_RESPONSE_VALID:
+                       dev_dbg(&ihost->pdev->dev,
+                               "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
+                               __func__,
+                               request,
+                               task);
+
+                       if (sas_protocol_ata(task->task_proto)) {
+                               resp_buf = &request->stp.rsp;
+                               isci_request_process_stp_response(task,
+                                                                 resp_buf);
+                       } else if (SAS_PROTOCOL_SSP == task->task_proto) {
+
+                               /* crack the iu response buffer. */
+                               resp_iu = &request->ssp.rsp;
+                               isci_request_process_response_iu(task, resp_iu,
+                                                                &ihost->pdev->dev);
+
+                       } else if (SAS_PROTOCOL_SMP == task->task_proto) {
+
+                               dev_err(&ihost->pdev->dev,
+                                       "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
+                                       "SAS_PROTOCOL_SMP protocol\n",
+                                       __func__);
+
+                       } else
+                               dev_err(&ihost->pdev->dev,
+                                       "%s: unknown protocol\n", __func__);
+
+                       /* use the task status set in the task struct by the
+                        * isci_request_process_response_iu call.
+                        */
+                       set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+                       response = task->task_status.resp;
+                       status = task->task_status.stat;
+                       break;
+
+               case SCI_IO_SUCCESS:
+               case SCI_IO_SUCCESS_IO_DONE_EARLY:
+
+                       response = SAS_TASK_COMPLETE;
+                       status   = SAM_STAT_GOOD;
+                       set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+                       if (task->task_proto == SAS_PROTOCOL_SMP) {
+                               void *rsp = &request->smp.rsp;
+
+                               dev_dbg(&ihost->pdev->dev,
+                                       "%s: SMP protocol completion\n",
+                                       __func__);
+
+                               sg_copy_from_buffer(
+                                       &task->smp_task.smp_resp, 1,
+                                       rsp, sizeof(struct smp_resp));
+                       } else if (completion_status
+                                  == SCI_IO_SUCCESS_IO_DONE_EARLY) {
+
+                               /* This was an SSP / STP / SATA transfer.
+                                * There is a possibility that less data than
+                                * the maximum was transferred.
+                                */
+                               u32 transferred_length = sci_req_tx_bytes(request);
+
+                               task->task_status.residual
+                                       = task->total_xfer_len - transferred_length;
+
+                               /* If there were residual bytes, call this an
+                                * underrun.
+                                */
+                               if (task->task_status.residual != 0)
+                                       status = SAS_DATA_UNDERRUN;
+
+                               dev_dbg(&ihost->pdev->dev,
+                                       "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
+                                       __func__,
+                                       status);
+
+                       } else
+                               dev_dbg(&ihost->pdev->dev,
+                                       "%s: SCI_IO_SUCCESS\n",
+                                       __func__);
+
+                       break;
+
+               case SCI_IO_FAILURE_TERMINATED:
+                       dev_dbg(&ihost->pdev->dev,
+                               "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
+                               __func__,
+                               request,
+                               task);
+
+                       /* The request was terminated explicitly.  No handling
+                        * is needed in the SCSI error handler path.
+                        */
+                       set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+                       response = SAS_TASK_UNDELIVERED;
+
+                       /* See if the device has been/is being stopped. Note
+                        * that we ignore the quiesce state, since we are
+                        * concerned about the actual device state.
+                        */
+                       if (!idev)
+                               status = SAS_DEVICE_UNKNOWN;
+                       else
+                               status = SAS_ABORTED_TASK;
+
+                       complete_to_host = isci_perform_normal_io_completion;
+                       break;
+
+               case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
+
+                       isci_request_handle_controller_specific_errors(
+                               idev, request, task, &response, &status,
+                               &complete_to_host);
+
+                       break;
+
+               case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
+                       /* This is a special case, in that the I/O completion
+                        * is telling us that the device needs a reset.
+                        * In order for the device reset condition to be
+                        * noticed, the I/O has to be handled in the error
+                        * handler.  Set the reset flag and cause the
+                        * SCSI error thread to be scheduled.
+                        */
+                       spin_lock_irqsave(&task->task_state_lock, task_flags);
+                       task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+                       spin_unlock_irqrestore(&task->task_state_lock, task_flags);
+
+                       /* Fail the I/O. */
+                       response = SAS_TASK_UNDELIVERED;
+                       status = SAM_STAT_TASK_ABORTED;
+
+                       complete_to_host = isci_perform_error_io_completion;
+                       clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+                       break;
+
+               case SCI_FAILURE_RETRY_REQUIRED:
+
+                       /* Fail the I/O so it can be retried. */
+                       response = SAS_TASK_UNDELIVERED;
+                       if (!idev)
+                               status = SAS_DEVICE_UNKNOWN;
+                       else
+                               status = SAS_ABORTED_TASK;
+
+                       complete_to_host = isci_perform_normal_io_completion;
+                       set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+                       break;
+
+
+               default:
+                       /* Catch any otherwise unhandled error codes here. */
+                       dev_dbg(&ihost->pdev->dev,
+                                "%s: invalid completion code: 0x%x - "
+                                "isci_request = %p\n",
+                                __func__, completion_status, request);
+
+                       response = SAS_TASK_UNDELIVERED;
+
+                       /* See if the device has been/is being stopped. Note
+                        * that we ignore the quiesce state, since we are
+                        * concerned about the actual device state.
+                        */
+                       if (!idev)
+                               status = SAS_DEVICE_UNKNOWN;
+                       else
+                               status = SAS_ABORTED_TASK;
+
+                       if (SAS_PROTOCOL_SMP == task->task_proto) {
+                               set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+                               complete_to_host = isci_perform_normal_io_completion;
+                       } else {
+                               clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+                               complete_to_host = isci_perform_error_io_completion;
+                       }
+                       break;
+               }
+               break;
+       }
+
+       switch (task->task_proto) {
+       case SAS_PROTOCOL_SSP:
+               if (task->data_dir == DMA_NONE)
+                       break;
+               if (task->num_scatter == 0)
+                       /* 0 indicates a single dma address */
+                       dma_unmap_single(&ihost->pdev->dev,
+                                        request->zero_scatter_daddr,
+                                        task->total_xfer_len, task->data_dir);
+               else  /* unmap the sgl dma addresses */
+                       dma_unmap_sg(&ihost->pdev->dev, task->scatter,
+                                    request->num_sg_entries, task->data_dir);
+               break;
+       case SAS_PROTOCOL_SMP: {
+               struct scatterlist *sg = &task->smp_task.smp_req;
+               struct smp_req *smp_req;
+               void *kaddr;
+
+               dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
+
+               /* need to swab it back in case the command buffer is re-used */
+               kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
+               smp_req = kaddr + sg->offset;
+               sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
+               kunmap_atomic(kaddr, KM_IRQ0);
+               break;
+       }
+       default:
+               break;
+       }
+
+       /* Put the completed request on the correct list */
+       isci_task_save_for_upper_layer_completion(ihost, request, response,
+                                                 status, complete_to_host
+                                                 );
+
+       /* complete the io request to the core. */
+       sci_controller_complete_io(ihost, request->target_device, request);
+       isci_put_device(idev);
+
+       /* set terminated handle so it cannot be completed or
+        * terminated again, and to cause any calls into abort
+        * task to recognize the already completed case.
+        */
+       set_bit(IREQ_TERMINATED, &request->flags);
+}
+
+static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+       struct domain_device *dev = ireq->target_device->domain_dev;
+       struct sas_task *task;
+
+       /* XXX as hch said always creating an internal sas_task for tmf
+        * requests would simplify the driver
+        */
+       task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
+
+       /* all unaccelerated request types (non ssp or ncq) handled with
+        * substates
+        */
+       if (!task && dev->dev_type == SAS_END_DEV) {
+               sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
+       } else if (!task &&
+                  (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
+                   isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
+               sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
+       } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
+               sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
+       } else if (task && sas_protocol_ata(task->task_proto) &&
+                  !task->ata_task.use_ncq) {
+               u32 state;
+
+               if (task->data_dir == DMA_NONE)
+                       state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
+               else if (task->ata_task.dma_xfer)
+                       state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
+               else /* PIO */
+                       state = SCI_REQ_STP_PIO_WAIT_H2D;
+
+               sci_change_state(sm, state);
+       }
+}
+
+static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+       struct isci_host *ihost = ireq->owning_controller;
+
+       /* Tell the SCI_USER that the IO request is complete */
+       if (!test_bit(IREQ_TMF, &ireq->flags))
+               isci_request_io_request_complete(ihost, ireq,
+                                                ireq->sci_status);
+       else
+               isci_task_request_complete(ihost, ireq, ireq->sci_status);
+}
+
+static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+       /* Setting the abort bit in the Task Context is required by the silicon. */
+       ireq->tc->abort = 1;
+}
+
+static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+       ireq->target_device->working_request = ireq;
+}
+
+static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+       ireq->target_device->working_request = ireq;
+}
+
+static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+       ireq->target_device->working_request = ireq;
+}
+
+static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
+{
+       struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+       struct scu_task_context *tc = ireq->tc;
+       struct host_to_dev_fis *h2d_fis;
+       enum sci_status status;
+
+       /* Clear the SRST bit */
+       h2d_fis = &ireq->stp.cmd;
+       h2d_fis->control = 0;
+
+       /* Clear the TC control bit */
+       tc->control_frame = 0;
+
+       status = sci_controller_continue_io(ireq);
+       WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
+}
+
+static const struct sci_base_state sci_request_state_table[] = {
+       [SCI_REQ_INIT] = { },
+       [SCI_REQ_CONSTRUCTED] = { },
+       [SCI_REQ_STARTED] = {
+               .enter_state = sci_request_started_state_enter,
+       },
+       [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
+               .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
+       },
+       [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
+       [SCI_REQ_STP_PIO_WAIT_H2D] = {
+               .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
+       },
+       [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
+       [SCI_REQ_STP_PIO_DATA_IN] = { },
+       [SCI_REQ_STP_PIO_DATA_OUT] = { },
+       [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
+       [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
+       [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
+               .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
+       },
+       [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
+               .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
+       },
+       [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
+       [SCI_REQ_TASK_WAIT_TC_COMP] = { },
+       [SCI_REQ_TASK_WAIT_TC_RESP] = { },
+       [SCI_REQ_SMP_WAIT_RESP] = { },
+       [SCI_REQ_SMP_WAIT_TC_COMP] = { },
+       [SCI_REQ_COMPLETED] = {
+               .enter_state = sci_request_completed_state_enter,
+       },
+       [SCI_REQ_ABORTING] = {
+               .enter_state = sci_request_aborting_state_enter,
+       },
+       [SCI_REQ_FINAL] = { },
+};
+
+static void
+sci_general_request_construct(struct isci_host *ihost,
+                                  struct isci_remote_device *idev,
+                                  struct isci_request *ireq)
+{
+       sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
+
+       ireq->target_device = idev;
+       ireq->protocol = SCIC_NO_PROTOCOL;
+       ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
+
+       ireq->sci_status   = SCI_SUCCESS;
+       ireq->scu_status   = 0;
+       ireq->post_context = 0xFFFFFFFF;
+}
+
+static enum sci_status
+sci_io_request_construct(struct isci_host *ihost,
+                         struct isci_remote_device *idev,
+                         struct isci_request *ireq)
+{
+       struct domain_device *dev = idev->domain_dev;
+       enum sci_status status = SCI_SUCCESS;
+
+       /* Build the common part of the request */
+       sci_general_request_construct(ihost, idev, ireq);
+
+       if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
+               return SCI_FAILURE_INVALID_REMOTE_DEVICE;
+
+       if (dev->dev_type == SAS_END_DEV)
+               /* pass */;
+       else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
+               memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
+       else if (dev_is_expander(dev))
+               /* pass */;
+       else
+               return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+       memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
+
+       return status;
+}
+
+enum sci_status sci_task_request_construct(struct isci_host *ihost,
+                                           struct isci_remote_device *idev,
+                                           u16 io_tag, struct isci_request *ireq)
+{
+       struct domain_device *dev = idev->domain_dev;
+       enum sci_status status = SCI_SUCCESS;
+
+       /* Build the common part of the request */
+       sci_general_request_construct(ihost, idev, ireq);
+
+       if (dev->dev_type == SAS_END_DEV ||
+           dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+               set_bit(IREQ_TMF, &ireq->flags);
+               memset(ireq->tc, 0, sizeof(struct scu_task_context));
+       } else
+               status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+       return status;
+}
+
+static enum sci_status isci_request_ssp_request_construct(
+       struct isci_request *request)
+{
+       enum sci_status status;
+
+       dev_dbg(&request->isci_host->pdev->dev,
+               "%s: request = %p\n",
+               __func__,
+               request);
+       status = sci_io_request_construct_basic_ssp(request);
+       return status;
+}
+
+static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq)
+{
+       struct sas_task *task = isci_request_access_task(ireq);
+       struct host_to_dev_fis *fis = &ireq->stp.cmd;
+       struct ata_queued_cmd *qc = task->uldd_task;
+       enum sci_status status;
+
+       dev_dbg(&ireq->isci_host->pdev->dev,
+               "%s: ireq = %p\n",
+               __func__,
+               ireq);
+
+       memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
+       if (!task->ata_task.device_control_reg_update)
+               fis->flags |= 0x80;
+       fis->flags &= 0xF0;
+
+       status = sci_io_request_construct_basic_sata(ireq);
+
+       if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+                  qc->tf.command == ATA_CMD_FPDMA_READ)) {
+               fis->sector_count = qc->tag << 3;
+               ireq->tc->type.stp.ncq_tag = qc->tag;
+       }
+
+       return status;
+}
+
+static enum sci_status
+sci_io_request_construct_smp(struct device *dev,
+                             struct isci_request *ireq,
+                             struct sas_task *task)
+{
+       struct scatterlist *sg = &task->smp_task.smp_req;
+       struct isci_remote_device *idev;
+       struct scu_task_context *task_context;
+       struct isci_port *iport;
+       struct smp_req *smp_req;
+       void *kaddr;
+       u8 req_len;
+       u32 cmd;
+
+       kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
+       smp_req = kaddr + sg->offset;
+       /*
+        * Look at the SMP requests' header fields; for certain SAS 1.x SMP
+        * functions under SAS 2.0, a zero request length really indicates
+        * a non-zero default length.
+        */
+       if (smp_req->req_len == 0) {
+               switch (smp_req->func) {
+               case SMP_DISCOVER:
+               case SMP_REPORT_PHY_ERR_LOG:
+               case SMP_REPORT_PHY_SATA:
+               case SMP_REPORT_ROUTE_INFO:
+                       smp_req->req_len = 2;
+                       break;
+               case SMP_CONF_ROUTE_INFO:
+               case SMP_PHY_CONTROL:
+               case SMP_PHY_TEST_FUNCTION:
+                       smp_req->req_len = 9;
+                       break;
+                       /* Default - zero is a valid default for 2.0. */
+               }
+       }
+       req_len = smp_req->req_len;
+       sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
+       cmd = *(u32 *) smp_req;
+       kunmap_atomic(kaddr, KM_IRQ0);
+
+       if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
+               return SCI_FAILURE;
+
+       ireq->protocol = SCIC_SMP_PROTOCOL;
+
+       /* byte swap the smp request. */
+
+       task_context = ireq->tc;
+
+       idev = ireq->target_device;
+       iport = idev->owning_port;
+
+       /*
+        * Fill in the TC with the its required data
+        * 00h
+        */
+       task_context->priority = 0;
+       task_context->initiator_request = 1;
+       task_context->connection_rate = idev->connection_rate;
+       task_context->protocol_engine_index = ISCI_PEG;
+       task_context->logical_port_index = iport->physical_port_index;
+       task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
+       task_context->abort = 0;
+       task_context->valid = SCU_TASK_CONTEXT_VALID;
+       task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+       /* 04h */
+       task_context->remote_node_index = idev->rnc.remote_node_index;
+       task_context->command_code = 0;
+       task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
+
+       /* 08h */
+       task_context->link_layer_control = 0;
+       task_context->do_not_dma_ssp_good_response = 1;
+       task_context->strict_ordering = 0;
+       task_context->control_frame = 1;
+       task_context->timeout_enable = 0;
+       task_context->block_guard_enable = 0;
+
+       /* 0ch */
+       task_context->address_modifier = 0;
+
+       /* 10h */
+       task_context->ssp_command_iu_length = req_len;
+
+       /* 14h */
+       task_context->transfer_length_bytes = 0;
+
+       /*
+        * 18h ~ 30h, protocol specific
+        * since commandIU has been build by framework at this point, we just
+        * copy the frist DWord from command IU to this location. */
+       memcpy(&task_context->type.smp, &cmd, sizeof(u32));
+
+       /*
+        * 40h
+        * "For SMP you could program it to zero. We would prefer that way
+        * so that done code will be consistent." - Venki
+        */
+       task_context->task_phase = 0;
+
+       ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+                             (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+                              (iport->physical_port_index <<
+                               SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+                             ISCI_TAG_TCI(ireq->io_tag));
+       /*
+        * Copy the physical address for the command buffer to the SCU Task
+        * Context command buffer should not contain command header.
+        */
+       task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
+       task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
+
+       /* SMP response comes as UF, so no need to set response IU address. */
+       task_context->response_iu_upper = 0;
+       task_context->response_iu_lower = 0;
+
+       sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+       return SCI_SUCCESS;
+}
+
+/*
+ * isci_smp_request_build() - This function builds the smp request.
+ * @ireq: This parameter points to the isci_request allocated in the
+ *    request construct function.
+ *
+ * SCI_SUCCESS on successfull completion, or specific failure code.
+ */
+static enum sci_status isci_smp_request_build(struct isci_request *ireq)
+{
+       struct sas_task *task = isci_request_access_task(ireq);
+       struct device *dev = &ireq->isci_host->pdev->dev;
+       enum sci_status status = SCI_FAILURE;
+
+       status = sci_io_request_construct_smp(dev, ireq, task);
+       if (status != SCI_SUCCESS)
+               dev_dbg(&ireq->isci_host->pdev->dev,
+                        "%s: failed with status = %d\n",
+                        __func__,
+                        status);
+
+       return status;
+}
+
+/**
+ * isci_io_request_build() - This function builds the io request object.
+ * @ihost: This parameter specifies the ISCI host object
+ * @request: This parameter points to the isci_request object allocated in the
+ *    request construct function.
+ * @sci_device: This parameter is the handle for the sci core's remote device
+ *    object that is the destination for this request.
+ *
+ * SCI_SUCCESS on successfull completion, or specific failure code.
+ */
+static enum sci_status isci_io_request_build(struct isci_host *ihost,
+                                            struct isci_request *request,
+                                            struct isci_remote_device *idev)
+{
+       enum sci_status status = SCI_SUCCESS;
+       struct sas_task *task = isci_request_access_task(request);
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: idev = 0x%p; request = %p, "
+               "num_scatter = %d\n",
+               __func__,
+               idev,
+               request,
+               task->num_scatter);
+
+       /* map the sgl addresses, if present.
+        * libata does the mapping for sata devices
+        * before we get the request.
+        */
+       if (task->num_scatter &&
+           !sas_protocol_ata(task->task_proto) &&
+           !(SAS_PROTOCOL_SMP & task->task_proto)) {
+
+               request->num_sg_entries = dma_map_sg(
+                       &ihost->pdev->dev,
+                       task->scatter,
+                       task->num_scatter,
+                       task->data_dir
+                       );
+
+               if (request->num_sg_entries == 0)
+                       return SCI_FAILURE_INSUFFICIENT_RESOURCES;
+       }
+
+       status = sci_io_request_construct(ihost, idev, request);
+
+       if (status != SCI_SUCCESS) {
+               dev_dbg(&ihost->pdev->dev,
+                        "%s: failed request construct\n",
+                        __func__);
+               return SCI_FAILURE;
+       }
+
+       switch (task->task_proto) {
+       case SAS_PROTOCOL_SMP:
+               status = isci_smp_request_build(request);
+               break;
+       case SAS_PROTOCOL_SSP:
+               status = isci_request_ssp_request_construct(request);
+               break;
+       case SAS_PROTOCOL_SATA:
+       case SAS_PROTOCOL_STP:
+       case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+               status = isci_request_stp_request_construct(request);
+               break;
+       default:
+               dev_dbg(&ihost->pdev->dev,
+                        "%s: unknown protocol\n", __func__);
+               return SCI_FAILURE;
+       }
+
+       return SCI_SUCCESS;
+}
+
+static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
+{
+       struct isci_request *ireq;
+
+       ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
+       ireq->io_tag = tag;
+       ireq->io_request_completion = NULL;
+       ireq->flags = 0;
+       ireq->num_sg_entries = 0;
+       INIT_LIST_HEAD(&ireq->completed_node);
+       INIT_LIST_HEAD(&ireq->dev_node);
+       isci_request_change_state(ireq, allocated);
+
+       return ireq;
+}
+
+static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
+                                                    struct sas_task *task,
+                                                    u16 tag)
+{
+       struct isci_request *ireq;
+
+       ireq = isci_request_from_tag(ihost, tag);
+       ireq->ttype_ptr.io_task_ptr = task;
+       ireq->ttype = io_task;
+       task->lldd_task = ireq;
+
+       return ireq;
+}
+
+struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
+                                              struct isci_tmf *isci_tmf,
+                                              u16 tag)
+{
+       struct isci_request *ireq;
+
+       ireq = isci_request_from_tag(ihost, tag);
+       ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
+       ireq->ttype = tmf_task;
+
+       return ireq;
+}
+
+int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
+                        struct sas_task *task, u16 tag)
+{
+       enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+       struct isci_request *ireq;
+       unsigned long flags;
+       int ret = 0;
+
+       /* do common allocation and init of request object. */
+       ireq = isci_io_request_from_tag(ihost, task, tag);
+
+       status = isci_io_request_build(ihost, ireq, idev);
+       if (status != SCI_SUCCESS) {
+               dev_dbg(&ihost->pdev->dev,
+                        "%s: request_construct failed - status = 0x%x\n",
+                        __func__,
+                        status);
+               return status;
+       }
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
+
+               if (isci_task_is_ncq_recovery(task)) {
+
+                       /* The device is in an NCQ recovery state.  Issue the
+                        * request on the task side.  Note that it will
+                        * complete on the I/O request side because the
+                        * request was built that way (ie.
+                        * ireq->is_task_management_request is false).
+                        */
+                       status = sci_controller_start_task(ihost,
+                                                           idev,
+                                                           ireq);
+               } else {
+                       status = SCI_FAILURE;
+               }
+       } else {
+               /* send the request, let the core assign the IO TAG.    */
+               status = sci_controller_start_io(ihost, idev,
+                                                 ireq);
+       }
+
+       if (status != SCI_SUCCESS &&
+           status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+               dev_dbg(&ihost->pdev->dev,
+                        "%s: failed request start (0x%x)\n",
+                        __func__, status);
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+               return status;
+       }
+
+       /* Either I/O started OK, or the core has signaled that
+        * the device needs a target reset.
+        *
+        * In either case, hold onto the I/O for later.
+        *
+        * Update it's status and add it to the list in the
+        * remote device object.
+        */
+       list_add(&ireq->dev_node, &idev->reqs_in_process);
+
+       if (status == SCI_SUCCESS) {
+               isci_request_change_state(ireq, started);
+       } else {
+               /* The request did not really start in the
+                * hardware, so clear the request handle
+                * here so no terminations will be done.
+                */
+               set_bit(IREQ_TERMINATED, &ireq->flags);
+               isci_request_change_state(ireq, completed);
+       }
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       if (status ==
+           SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+               /* Signal libsas that we need the SCSI error
+                * handler thread to work on this I/O and that
+                * we want a device reset.
+                */
+               spin_lock_irqsave(&task->task_state_lock, flags);
+               task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+               spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+               /* Cause this task to be scheduled in the SCSI error
+                * handler thread.
+                */
+               isci_execpath_callback(ihost, task,
+                                      sas_task_abort);
+
+               /* Change the status, since we are holding
+                * the I/O until it is managed by the SCSI
+                * error handler.
+                */
+               status = SCI_SUCCESS;
+       }
+
+       return ret;
+}
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
new file mode 100644 (file)
index 0000000..7a1d5a9
--- /dev/null
@@ -0,0 +1,448 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_REQUEST_H_
+#define _ISCI_REQUEST_H_
+
+#include "isci.h"
+#include "host.h"
+#include "scu_task_context.h"
+
+/**
+ * struct isci_request_status - This enum defines the possible states of an I/O
+ *    request.
+ *
+ *
+ */
+enum isci_request_status {
+       unallocated = 0x00,
+       allocated   = 0x01,
+       started     = 0x02,
+       completed   = 0x03,
+       aborting    = 0x04,
+       aborted     = 0x05,
+       terminating = 0x06,
+       dead        = 0x07
+};
+
+enum task_type {
+       io_task  = 0,
+       tmf_task = 1
+};
+
+enum sci_request_protocol {
+       SCIC_NO_PROTOCOL,
+       SCIC_SMP_PROTOCOL,
+       SCIC_SSP_PROTOCOL,
+       SCIC_STP_PROTOCOL
+}; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
+
+/**
+ * isci_stp_request - extra request infrastructure to handle pio/atapi protocol
+ * @pio_len - number of bytes requested at PIO setup
+ * @status - pio setup ending status value to tell us if we need
+ *          to wait for another fis or if the transfer is complete.  Upon
+ *           receipt of a d2h fis this will be the status field of that fis.
+ * @sgl - track pio transfer progress as we iterate through the sgl
+ * @device_cdb_len - atapi device advertises it's transfer constraints at setup
+ */
+struct isci_stp_request {
+       u32 pio_len;
+       u8 status;
+
+       struct isci_stp_pio_sgl {
+               int index;
+               u8 set;
+               u32 offset;
+       } sgl;
+       u32 device_cdb_len;
+};
+
+struct isci_request {
+       enum isci_request_status status;
+       #define IREQ_COMPLETE_IN_TARGET 0
+       #define IREQ_TERMINATED 1
+       #define IREQ_TMF 2
+       #define IREQ_ACTIVE 3
+       unsigned long flags;
+       /* XXX kill ttype and ttype_ptr, allocate full sas_task */
+       enum task_type ttype;
+       union ttype_ptr_union {
+               struct sas_task *io_task_ptr;   /* When ttype==io_task  */
+               struct isci_tmf *tmf_task_ptr;  /* When ttype==tmf_task */
+       } ttype_ptr;
+       struct isci_host *isci_host;
+       /* For use in the requests_to_{complete|abort} lists: */
+       struct list_head completed_node;
+       /* For use in the reqs_in_process list: */
+       struct list_head dev_node;
+       spinlock_t state_lock;
+       dma_addr_t request_daddr;
+       dma_addr_t zero_scatter_daddr;
+       unsigned int num_sg_entries;
+       /* Note: "io_request_completion" is completed in two different ways
+        * depending on whether this is a TMF or regular request.
+        * - TMF requests are completed in the thread that started them;
+        * - regular requests are completed in the request completion callback
+        *   function.
+        * This difference in operation allows the aborter of a TMF request
+        * to be sure that once the TMF request completes, the I/O that the
+        * TMF was aborting is guaranteed to have completed.
+        *
+        * XXX kill io_request_completion
+        */
+       struct completion *io_request_completion;
+       struct sci_base_state_machine sm;
+       struct isci_host *owning_controller;
+       struct isci_remote_device *target_device;
+       u16 io_tag;
+       enum sci_request_protocol protocol;
+       u32 scu_status; /* hardware result */
+       u32 sci_status; /* upper layer disposition */
+       u32 post_context;
+       struct scu_task_context *tc;
+       /* could be larger with sg chaining */
+       #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2)
+       struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
+       /* This field is a pointer to the stored rx frame data.  It is used in
+        * STP internal requests and SMP response frames.  If this field is
+        * non-NULL the saved frame must be released on IO request completion.
+        */
+       u32 saved_rx_frame_index;
+
+       union {
+               struct {
+                       union {
+                               struct ssp_cmd_iu cmd;
+                               struct ssp_task_iu tmf;
+                       };
+                       union {
+                               struct ssp_response_iu rsp;
+                               u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
+                       };
+               } ssp;
+               struct {
+                       struct smp_resp rsp;
+               } smp;
+               struct {
+                       struct isci_stp_request req;
+                       struct host_to_dev_fis cmd;
+                       struct dev_to_host_fis rsp;
+               } stp;
+       };
+};
+
+static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req)
+{
+       struct isci_request *ireq;
+
+       ireq = container_of(stp_req, typeof(*ireq), stp.req);
+       return ireq;
+}
+
+/**
+ * enum sci_base_request_states - This enumeration depicts all the states for
+ *    the common request state machine.
+ *
+ *
+ */
+enum sci_base_request_states {
+       /*
+        * Simply the initial state for the base request state machine.
+        */
+       SCI_REQ_INIT,
+
+       /*
+        * This state indicates that the request has been constructed.
+        * This state is entered from the INITIAL state.
+        */
+       SCI_REQ_CONSTRUCTED,
+
+       /*
+        * This state indicates that the request has been started. This state
+        * is entered from the CONSTRUCTED state.
+        */
+       SCI_REQ_STARTED,
+
+       SCI_REQ_STP_UDMA_WAIT_TC_COMP,
+       SCI_REQ_STP_UDMA_WAIT_D2H,
+
+       SCI_REQ_STP_NON_DATA_WAIT_H2D,
+       SCI_REQ_STP_NON_DATA_WAIT_D2H,
+
+       SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED,
+       SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG,
+       SCI_REQ_STP_SOFT_RESET_WAIT_D2H,
+
+       /*
+        * While in this state the IO request object is waiting for the TC
+        * completion notification for the H2D Register FIS
+        */
+       SCI_REQ_STP_PIO_WAIT_H2D,
+
+       /*
+        * While in this state the IO request object is waiting for either a
+        * PIO Setup FIS or a D2H register FIS.  The type of frame received is
+        * based on the result of the prior frame and line conditions.
+        */
+       SCI_REQ_STP_PIO_WAIT_FRAME,
+
+       /*
+        * While in this state the IO request object is waiting for a DATA
+        * frame from the device.
+        */
+       SCI_REQ_STP_PIO_DATA_IN,
+
+       /*
+        * While in this state the IO request object is waiting to transmit
+        * the next data frame to the device.
+        */
+       SCI_REQ_STP_PIO_DATA_OUT,
+
+       /*
+        * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
+        * task management request is waiting for the transmission of the
+        * initial frame (i.e. command, task, etc.).
+        */
+       SCI_REQ_TASK_WAIT_TC_COMP,
+
+       /*
+        * This sub-state indicates that the started task management request
+        * is waiting for the reception of an unsolicited frame
+        * (i.e. response IU).
+        */
+       SCI_REQ_TASK_WAIT_TC_RESP,
+
+       /*
+        * This sub-state indicates that the started task management request
+        * is waiting for the reception of an unsolicited frame
+        * (i.e. response IU).
+        */
+       SCI_REQ_SMP_WAIT_RESP,
+
+       /*
+        * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP
+        * request is waiting for the transmission of the initial frame
+        * (i.e. command, task, etc.).
+        */
+       SCI_REQ_SMP_WAIT_TC_COMP,
+
+       /*
+        * This state indicates that the request has completed.
+        * This state is entered from the STARTED state. This state is entered
+        * from the ABORTING state.
+        */
+       SCI_REQ_COMPLETED,
+
+       /*
+        * This state indicates that the request is in the process of being
+        * terminated/aborted.
+        * This state is entered from the CONSTRUCTED state.
+        * This state is entered from the STARTED state.
+        */
+       SCI_REQ_ABORTING,
+
+       /*
+        * Simply the final state for the base request state machine.
+        */
+       SCI_REQ_FINAL,
+};
+
+enum sci_status sci_request_start(struct isci_request *ireq);
+enum sci_status sci_io_request_terminate(struct isci_request *ireq);
+enum sci_status
+sci_io_request_event_handler(struct isci_request *ireq,
+                                 u32 event_code);
+enum sci_status
+sci_io_request_frame_handler(struct isci_request *ireq,
+                                 u32 frame_index);
+enum sci_status
+sci_task_request_terminate(struct isci_request *ireq);
+extern enum sci_status
+sci_request_complete(struct isci_request *ireq);
+extern enum sci_status
+sci_io_request_tc_completion(struct isci_request *ireq, u32 code);
+
+/* XXX open code in caller */
+static inline dma_addr_t
+sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
+{
+
+       char *requested_addr = (char *)virt_addr;
+       char *base_addr = (char *)ireq;
+
+       BUG_ON(requested_addr < base_addr);
+       BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
+
+       return ireq->request_daddr + (requested_addr - base_addr);
+}
+
+/**
+ * isci_request_change_state() - This function sets the status of the request
+ *    object.
+ * @request: This parameter points to the isci_request object
+ * @status: This Parameter is the new status of the object
+ *
+ */
+static inline enum isci_request_status
+isci_request_change_state(struct isci_request *isci_request,
+                         enum isci_request_status status)
+{
+       enum isci_request_status old_state;
+       unsigned long flags;
+
+       dev_dbg(&isci_request->isci_host->pdev->dev,
+               "%s: isci_request = %p, state = 0x%x\n",
+               __func__,
+               isci_request,
+               status);
+
+       BUG_ON(isci_request == NULL);
+
+       spin_lock_irqsave(&isci_request->state_lock, flags);
+       old_state = isci_request->status;
+       isci_request->status = status;
+       spin_unlock_irqrestore(&isci_request->state_lock, flags);
+
+       return old_state;
+}
+
+/**
+ * isci_request_change_started_to_newstate() - This function sets the status of
+ *    the request object.
+ * @request: This parameter points to the isci_request object
+ * @status: This Parameter is the new status of the object
+ *
+ * state previous to any change.
+ */
+static inline enum isci_request_status
+isci_request_change_started_to_newstate(struct isci_request *isci_request,
+                                       struct completion *completion_ptr,
+                                       enum isci_request_status newstate)
+{
+       enum isci_request_status old_state;
+       unsigned long flags;
+
+       spin_lock_irqsave(&isci_request->state_lock, flags);
+
+       old_state = isci_request->status;
+
+       if (old_state == started || old_state == aborting) {
+               BUG_ON(isci_request->io_request_completion != NULL);
+
+               isci_request->io_request_completion = completion_ptr;
+               isci_request->status = newstate;
+       }
+
+       spin_unlock_irqrestore(&isci_request->state_lock, flags);
+
+       dev_dbg(&isci_request->isci_host->pdev->dev,
+               "%s: isci_request = %p, old_state = 0x%x\n",
+               __func__,
+               isci_request,
+               old_state);
+
+       return old_state;
+}
+
+/**
+ * isci_request_change_started_to_aborted() - This function sets the status of
+ *    the request object.
+ * @request: This parameter points to the isci_request object
+ * @completion_ptr: This parameter is saved as the kernel completion structure
+ *    signalled when the old request completes.
+ *
+ * state previous to any change.
+ */
+static inline enum isci_request_status
+isci_request_change_started_to_aborted(struct isci_request *isci_request,
+                                      struct completion *completion_ptr)
+{
+       return isci_request_change_started_to_newstate(isci_request,
+                                                      completion_ptr,
+                                                      aborted);
+}
+
+#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
+
+#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
+
+struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
+                                              struct isci_tmf *isci_tmf,
+                                              u16 tag);
+int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
+                        struct sas_task *task, u16 tag);
+void isci_terminate_pending_requests(struct isci_host *ihost,
+                                    struct isci_remote_device *idev);
+enum sci_status
+sci_task_request_construct(struct isci_host *ihost,
+                           struct isci_remote_device *idev,
+                           u16 io_tag,
+                           struct isci_request *ireq);
+enum sci_status
+sci_task_request_construct_ssp(struct isci_request *ireq);
+enum sci_status
+sci_task_request_construct_sata(struct isci_request *ireq);
+void sci_smp_request_copy_response(struct isci_request *ireq);
+
+static inline int isci_task_is_ncq_recovery(struct sas_task *task)
+{
+       return (sas_protocol_ata(task->task_proto) &&
+               task->ata_task.fis.command == ATA_CMD_READ_LOG_EXT &&
+               task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ);
+
+}
+
+#endif /* !defined(_ISCI_REQUEST_H_) */
diff --git a/drivers/scsi/isci/sas.h b/drivers/scsi/isci/sas.h
new file mode 100644 (file)
index 0000000..462b151
--- /dev/null
@@ -0,0 +1,219 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCI_SAS_H_
+#define _SCI_SAS_H_
+
+#include <linux/kernel.h>
+
+/*
+ * SATA FIS Types These constants depict the various SATA FIS types devined in
+ * the serial ATA specification.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+#define FIS_REGH2D          0x27
+#define FIS_REGD2H          0x34
+#define FIS_SETDEVBITS      0xA1
+#define FIS_DMA_ACTIVATE    0x39
+#define FIS_DMA_SETUP       0x41
+#define FIS_BIST_ACTIVATE   0x58
+#define FIS_PIO_SETUP       0x5F
+#define FIS_DATA            0x46
+
+/**************************************************************************/
+#define SSP_RESP_IU_MAX_SIZE   280
+
+/*
+ * contents of the SSP COMMAND INFORMATION UNIT.
+ * For specific information on each of these individual fields please
+ * reference the SAS specification SSP transport layer section.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+struct ssp_cmd_iu {
+       u8 LUN[8];
+       u8 add_cdb_len:6;
+       u8 _r_a:2;
+       u8 _r_b;
+       u8 en_fburst:1;
+       u8 task_prio:4;
+       u8 task_attr:3;
+       u8 _r_c;
+
+       u8 cdb[16];
+}  __packed;
+
+/*
+ * contents of the SSP TASK INFORMATION UNIT.
+ * For specific information on each of these individual fields please
+ * reference the SAS specification SSP transport layer section.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+struct ssp_task_iu {
+       u8 LUN[8];
+       u8 _r_a;
+       u8 task_func;
+       u8 _r_b[4];
+       u16 task_tag;
+       u8 _r_c[12];
+}  __packed;
+
+
+/*
+ * struct smp_req_phy_id - This structure defines the contents of
+ *    an SMP Request that is comprised of the struct smp_request_header and a
+ *    phy identifier.
+ *    Examples: SMP_REQUEST_DISCOVER, SMP_REQUEST_REPORT_PHY_SATA.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_phy_id {
+       u8 _r_a[4];             /* bytes 4-7 */
+
+       u8 ign_zone_grp:1;      /* byte 8 */
+       u8 _r_b:7;
+
+       u8 phy_id;              /* byte 9 */
+       u8 _r_c;                /* byte 10 */
+       u8 _r_d;                /* byte 11 */
+}  __packed;
+
+/*
+ * struct smp_req_config_route_info - This structure defines the
+ *    contents of an SMP Configure Route Information request.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_conf_rtinfo {
+       u16 exp_change_cnt;             /* bytes 4-5 */
+       u8 exp_rt_idx_hi;               /* byte 6 */
+       u8 exp_rt_idx;                  /* byte 7 */
+
+       u8 _r_a;                        /* byte 8 */
+       u8 phy_id;                      /* byte 9 */
+       u16 _r_b;                       /* bytes 10-11 */
+
+       u8 _r_c:7;                      /* byte 12 */
+       u8 dis_rt_entry:1;
+       u8 _r_d[3];                     /* bytes 13-15 */
+
+       u8 rt_sas_addr[8];              /* bytes 16-23 */
+       u8 _r_e[16];                    /* bytes 24-39 */
+}  __packed;
+
+/*
+ * struct smp_req_phycntl - This structure defines the contents of an
+ *    SMP Phy Controller request.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_phycntl {
+       u16 exp_change_cnt;             /* byte 4-5 */
+
+       u8 _r_a[3];                     /* bytes 6-8 */
+
+       u8 phy_id;                      /* byte 9 */
+       u8 phy_op;                      /* byte 10 */
+
+       u8 upd_pathway:1;               /* byte 11 */
+       u8 _r_b:7;
+
+       u8 _r_c[12];                    /* byte 12-23 */
+
+       u8 att_dev_name[8];             /* byte 24-31 */
+
+       u8 _r_d:4;                      /* byte 32 */
+       u8 min_linkrate:4;
+
+       u8 _r_e:4;                      /* byte 33 */
+       u8 max_linkrate:4;
+
+       u8 _r_f[2];                     /* byte 34-35 */
+
+       u8 pathway:4;                   /* byte 36 */
+       u8 _r_g:4;
+
+       u8 _r_h[3];                     /* bytes 37-39 */
+}  __packed;
+
+/*
+ * struct smp_req - This structure simply unionizes the existing request
+ *    structures into a common request type.
+ *
+ * XXX: This data structure may need to go to scsi/sas.h
+ */
+struct smp_req {
+       u8 type;                /* byte 0 */
+       u8 func;                /* byte 1 */
+       u8 alloc_resp_len;      /* byte 2 */
+       u8 req_len;             /* byte 3 */
+       u8 req_data[0];
+}  __packed;
+
+#define SMP_RESP_HDR_SZ        4
+
+/*
+ * struct sci_sas_address - This structure depicts how a SAS address is
+ *    represented by SCI.
+ * XXX convert this to u8 [SAS_ADDR_SIZE] like the rest of libsas
+ *
+ */
+struct sci_sas_address {
+       u32 high;
+       u32 low;
+};
+#endif
diff --git a/drivers/scsi/isci/scu_completion_codes.h b/drivers/scsi/isci/scu_completion_codes.h
new file mode 100644 (file)
index 0000000..c8b329c
--- /dev/null
@@ -0,0 +1,283 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_COMPLETION_CODES_HEADER_
+#define _SCU_COMPLETION_CODES_HEADER_
+
+/**
+ * This file contains the constants and macros for the SCU hardware completion
+ *    codes.
+ *
+ *
+ */
+
+#define SCU_COMPLETION_TYPE_SHIFT      28
+#define SCU_COMPLETION_TYPE_MASK       0x70000000
+
+/**
+ * SCU_COMPLETION_TYPE() -
+ *
+ * This macro constructs an SCU completion type
+ */
+#define SCU_COMPLETION_TYPE(type) \
+       ((u32)(type) << SCU_COMPLETION_TYPE_SHIFT)
+
+/**
+ * SCU_COMPLETION_TYPE() -
+ *
+ * These macros contain the SCU completion types SCU_COMPLETION_TYPE
+ */
+#define SCU_COMPLETION_TYPE_TASK       SCU_COMPLETION_TYPE(0)
+#define SCU_COMPLETION_TYPE_SDMA       SCU_COMPLETION_TYPE(1)
+#define SCU_COMPLETION_TYPE_UFI        SCU_COMPLETION_TYPE(2)
+#define SCU_COMPLETION_TYPE_EVENT      SCU_COMPLETION_TYPE(3)
+#define SCU_COMPLETION_TYPE_NOTIFY     SCU_COMPLETION_TYPE(4)
+
+/**
+ *
+ *
+ * These constants provide the shift and mask values for the various parts of
+ * an SCU completion code.
+ */
+#define SCU_COMPLETION_STATUS_MASK       0x0FFC0000
+#define SCU_COMPLETION_TL_STATUS_MASK    0x0FC00000
+#define SCU_COMPLETION_TL_STATUS_SHIFT   22
+#define SCU_COMPLETION_SDMA_STATUS_MASK  0x003C0000
+#define SCU_COMPLETION_PEG_MASK          0x00010000
+#define SCU_COMPLETION_PORT_MASK         0x00007000
+#define SCU_COMPLETION_PE_MASK           SCU_COMPLETION_PORT_MASK
+#define SCU_COMPLETION_PE_SHIFT          12
+#define SCU_COMPLETION_INDEX_MASK        0x00000FFF
+
+/**
+ * SCU_GET_COMPLETION_TYPE() -
+ *
+ * This macro returns the SCU completion type.
+ */
+#define SCU_GET_COMPLETION_TYPE(completion_code) \
+       ((completion_code) & SCU_COMPLETION_TYPE_MASK)
+
+/**
+ * SCU_GET_COMPLETION_STATUS() -
+ *
+ * This macro returns the SCU completion status.
+ */
+#define SCU_GET_COMPLETION_STATUS(completion_code) \
+       ((completion_code) & SCU_COMPLETION_STATUS_MASK)
+
+/**
+ * SCU_GET_COMPLETION_TL_STATUS() -
+ *
+ * This macro returns the transport layer completion status.
+ */
+#define SCU_GET_COMPLETION_TL_STATUS(completion_code) \
+       ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK)
+
+/**
+ * SCU_MAKE_COMPLETION_STATUS() -
+ *
+ * This macro takes a completion code and performs the shift and mask
+ * operations to turn it into a completion code that can be compared to a
+ * SCU_GET_COMPLETION_TL_STATUS.
+ */
+#define SCU_MAKE_COMPLETION_STATUS(completion_code) \
+       ((u32)(completion_code) << SCU_COMPLETION_TL_STATUS_SHIFT)
+
+/**
+ * SCU_NORMALIZE_COMPLETION_STATUS() -
+ *
+ * This macro takes a SCU_GET_COMPLETION_TL_STATUS and normalizes it for a
+ * return code.
+ */
+#define SCU_NORMALIZE_COMPLETION_STATUS(completion_code) \
+       (\
+               ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK) \
+               >> SCU_COMPLETION_TL_STATUS_SHIFT \
+       )
+
+/**
+ * SCU_GET_COMPLETION_SDMA_STATUS() -
+ *
+ * This macro returns the SDMA completion status.
+ */
+#define SCU_GET_COMPLETION_SDMA_STATUS(completion_code)        \
+       ((completion_code) & SCU_COMPLETION_SDMA_STATUS_MASK)
+
+/**
+ * SCU_GET_COMPLETION_PEG() -
+ *
+ * This macro returns the Protocol Engine Group from the completion code.
+ */
+#define SCU_GET_COMPLETION_PEG(completion_code)        \
+       ((completion_code) & SCU_COMPLETION_PEG_MASK)
+
+/**
+ * SCU_GET_COMPLETION_PORT() -
+ *
+ * This macro reuturns the logical port index from the completion code.
+ */
+#define SCU_GET_COMPLETION_PORT(completion_code) \
+       ((completion_code) & SCU_COMPLETION_PORT_MASK)
+
+/**
+ * SCU_GET_PROTOCOL_ENGINE_INDEX() -
+ *
+ * This macro returns the PE index from the completion code.
+ */
+#define SCU_GET_PROTOCOL_ENGINE_INDEX(completion_code) \
+       (((completion_code) & SCU_COMPLETION_PE_MASK) >> SCU_COMPLETION_PE_SHIFT)
+
+/**
+ * SCU_GET_COMPLETION_INDEX() -
+ *
+ * This macro returns the index of the completion which is either a TCi or an
+ * RNi depending on the completion type.
+ */
+#define SCU_GET_COMPLETION_INDEX(completion_code) \
+       ((completion_code) & SCU_COMPLETION_INDEX_MASK)
+
+#define SCU_UNSOLICITED_FRAME_MASK     0x0FFF0000
+#define SCU_UNSOLICITED_FRAME_SHIFT    16
+
+/**
+ * SCU_GET_FRAME_INDEX() -
+ *
+ * This macro returns a normalized frame index from an unsolicited frame
+ * completion.
+ */
+#define SCU_GET_FRAME_INDEX(completion_code) \
+       (\
+               ((completion_code) & SCU_UNSOLICITED_FRAME_MASK) \
+               >> SCU_UNSOLICITED_FRAME_SHIFT \
+       )
+
+#define SCU_UNSOLICITED_FRAME_ERROR_MASK  0x00008000
+
+/**
+ * SCU_GET_FRAME_ERROR() -
+ *
+ * This macro returns a zero (0) value if there is no frame error otherwise it
+ * returns non-zero (!0).
+ */
+#define SCU_GET_FRAME_ERROR(completion_code) \
+       ((completion_code) & SCU_UNSOLICITED_FRAME_ERROR_MASK)
+
+/**
+ *
+ *
+ * These constants represent normalized completion codes which must be shifted
+ * 18 bits to match it with the hardware completion code. In a 16-bit compiler,
+ * immediate constants are 16-bit values (the size of an int). If we shift
+ * those by 18 bits, we completely lose the value. To ensure the value is a
+ * 32-bit value like we want, each immediate value must be cast to a u32.
+ */
+#define SCU_TASK_DONE_GOOD                                  ((u32)0x00)
+#define SCU_TASK_DONE_CRC_ERR                               ((u32)0x14)
+#define SCU_TASK_DONE_CHECK_RESPONSE                        ((u32)0x14)
+#define SCU_TASK_DONE_GEN_RESPONSE                          ((u32)0x15)
+#define SCU_TASK_DONE_NAK_CMD_ERR                           ((u32)0x16)
+#define SCU_TASK_DONE_CMD_LL_R_ERR                          ((u32)0x16)
+#define SCU_TASK_DONE_LL_R_ERR                              ((u32)0x17)
+#define SCU_TASK_DONE_ACK_NAK_TO                            ((u32)0x17)
+#define SCU_TASK_DONE_LL_PERR                               ((u32)0x18)
+#define SCU_TASK_DONE_LL_SY_TERM                            ((u32)0x19)
+#define SCU_TASK_DONE_NAK_ERR                               ((u32)0x19)
+#define SCU_TASK_DONE_LL_LF_TERM                            ((u32)0x1A)
+#define SCU_TASK_DONE_DATA_LEN_ERR                          ((u32)0x1A)
+#define SCU_TASK_DONE_LL_CL_TERM                            ((u32)0x1B)
+#define SCU_TASK_DONE_LL_ABORT_ERR                          ((u32)0x1B)
+#define SCU_TASK_DONE_SEQ_INV_TYPE                          ((u32)0x1C)
+#define SCU_TASK_DONE_UNEXP_XR                              ((u32)0x1C)
+#define SCU_TASK_DONE_INV_FIS_TYPE                          ((u32)0x1D)
+#define SCU_TASK_DONE_XR_IU_LEN_ERR                         ((u32)0x1D)
+#define SCU_TASK_DONE_INV_FIS_LEN                           ((u32)0x1E)
+#define SCU_TASK_DONE_XR_WD_LEN                             ((u32)0x1E)
+#define SCU_TASK_DONE_SDMA_ERR                              ((u32)0x1F)
+#define SCU_TASK_DONE_OFFSET_ERR                            ((u32)0x20)
+#define SCU_TASK_DONE_MAX_PLD_ERR                           ((u32)0x21)
+#define SCU_TASK_DONE_EXCESS_DATA                           ((u32)0x22)
+#define SCU_TASK_DONE_LF_ERR                                ((u32)0x23)
+#define SCU_TASK_DONE_UNEXP_FIS                             ((u32)0x24)
+#define SCU_TASK_DONE_UNEXP_RESP                            ((u32)0x24)
+#define SCU_TASK_DONE_EARLY_RESP                            ((u32)0x25)
+#define SCU_TASK_DONE_SMP_RESP_TO_ERR                       ((u32)0x26)
+#define SCU_TASK_DONE_DMASETUP_DIRERR                       ((u32)0x27)
+#define SCU_TASK_DONE_SMP_UFI_ERR                           ((u32)0x27)
+#define SCU_TASK_DONE_XFERCNT_ERR                           ((u32)0x28)
+#define SCU_TASK_DONE_SMP_FRM_TYPE_ERR                      ((u32)0x28)
+#define SCU_TASK_DONE_SMP_LL_RX_ERR                         ((u32)0x29)
+#define SCU_TASK_DONE_RESP_LEN_ERR                          ((u32)0x2A)
+#define SCU_TASK_DONE_UNEXP_DATA                            ((u32)0x2B)
+#define SCU_TASK_DONE_OPEN_FAIL                             ((u32)0x2C)
+#define SCU_TASK_DONE_UNEXP_SDBFIS                          ((u32)0x2D)
+#define SCU_TASK_DONE_REG_ERR                               ((u32)0x2E)
+#define SCU_TASK_DONE_SDB_ERR                               ((u32)0x2F)
+#define SCU_TASK_DONE_TASK_ABORT                            ((u32)0x30)
+#define SCU_TASK_DONE_CMD_SDMA_ERR                          ((U32)0x32)
+#define SCU_TASK_DONE_CMD_LL_ABORT_ERR                      ((U32)0x33)
+#define SCU_TASK_OPEN_REJECT_WRONG_DESTINATION              ((u32)0x34)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1             ((u32)0x35)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2             ((u32)0x36)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3             ((u32)0x37)
+#define SCU_TASK_OPEN_REJECT_BAD_DESTINATION                ((u32)0x38)
+#define SCU_TASK_OPEN_REJECT_ZONE_VIOLATION                 ((u32)0x39)
+#define SCU_TASK_DONE_VIIT_ENTRY_NV                         ((u32)0x3A)
+#define SCU_TASK_DONE_IIT_ENTRY_NV                          ((u32)0x3B)
+#define SCU_TASK_DONE_RNCNV_OUTBOUND                        ((u32)0x3C)
+#define SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY             ((u32)0x3D)
+#define SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED         ((u32)0x3E)
+#define SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED  ((u32)0x3F)
+
+#endif /* _SCU_COMPLETION_CODES_HEADER_ */
diff --git a/drivers/scsi/isci/scu_event_codes.h b/drivers/scsi/isci/scu_event_codes.h
new file mode 100644 (file)
index 0000000..36a945a
--- /dev/null
@@ -0,0 +1,336 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SCU_EVENT_CODES_HEADER__
+#define __SCU_EVENT_CODES_HEADER__
+
+/**
+ * This file contains the constants and macros for the SCU event codes.
+ *
+ *
+ */
+
+#define SCU_EVENT_TYPE_CODE_SHIFT      24
+#define SCU_EVENT_TYPE_CODE_MASK       0x0F000000
+
+#define SCU_EVENT_SPECIFIC_CODE_SHIFT  18
+#define SCU_EVENT_SPECIFIC_CODE_MASK   0x00FC0000
+
+#define SCU_EVENT_CODE_MASK \
+       (SCU_EVENT_TYPE_CODE_MASK | SCU_EVENT_SPECIFIC_CODE_MASK)
+
+/**
+ * SCU_EVENT_TYPE() -
+ *
+ * This macro constructs an SCU event type from the type value.
+ */
+#define SCU_EVENT_TYPE(type) \
+       ((u32)(type) << SCU_EVENT_TYPE_CODE_SHIFT)
+
+/**
+ * SCU_EVENT_SPECIFIC() -
+ *
+ * This macro constructs an SCU event specifier from the code value.
+ */
+#define SCU_EVENT_SPECIFIC(code) \
+       ((u32)(code) << SCU_EVENT_SPECIFIC_CODE_SHIFT)
+
+/**
+ * SCU_EVENT_MESSAGE() -
+ *
+ * This macro constructs a combines an SCU event type and SCU event specifier
+ * from the type and code values.
+ */
+#define SCU_EVENT_MESSAGE(type, code) \
+       ((type) | SCU_EVENT_SPECIFIC(code))
+
+/**
+ * SCU_EVENT_TYPE() -
+ *
+ * SCU_EVENT_TYPES
+ */
+#define SCU_EVENT_TYPE_SMU_COMMAND_ERROR  SCU_EVENT_TYPE(0x08)
+#define SCU_EVENT_TYPE_SMU_PCQ_ERROR      SCU_EVENT_TYPE(0x09)
+#define SCU_EVENT_TYPE_SMU_ERROR          SCU_EVENT_TYPE(0x00)
+#define SCU_EVENT_TYPE_TRANSPORT_ERROR    SCU_EVENT_TYPE(0x01)
+#define SCU_EVENT_TYPE_BROADCAST_CHANGE   SCU_EVENT_TYPE(0x02)
+#define SCU_EVENT_TYPE_OSSP_EVENT         SCU_EVENT_TYPE(0x03)
+#define SCU_EVENT_TYPE_FATAL_MEMORY_ERROR SCU_EVENT_TYPE(0x0F)
+#define SCU_EVENT_TYPE_RNC_SUSPEND_TX     SCU_EVENT_TYPE(0x04)
+#define SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX  SCU_EVENT_TYPE(0x05)
+#define SCU_EVENT_TYPE_RNC_OPS_MISC       SCU_EVENT_TYPE(0x06)
+#define SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT SCU_EVENT_TYPE(0x07)
+#define SCU_EVENT_TYPE_ERR_CNT_EVENT      SCU_EVENT_TYPE(0x0A)
+
+/**
+ *
+ *
+ * SCU_EVENT_SPECIFIERS
+ */
+#define SCU_EVENT_SPECIFIER_DRIVER_SUSPEND 0x20
+#define SCU_EVENT_SPECIFIER_RNC_RELEASE    0x00
+
+/**
+ *
+ *
+ * SMU_COMMAND_EVENTS
+ */
+#define SCU_EVENT_INVALID_CONTEXT_COMMAND \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_COMMAND_ERROR, 0x00)
+
+/**
+ *
+ *
+ * SMU_PCQ_EVENTS
+ */
+#define SCU_EVENT_UNCORRECTABLE_PCQ_ERROR \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_PCQ_ERROR, 0x00)
+
+/**
+ *
+ *
+ * SMU_EVENTS
+ */
+#define SCU_EVENT_UNCORRECTABLE_REGISTER_WRITE \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x02)
+#define SCU_EVENT_UNCORRECTABLE_REGISTER_READ \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x03)
+#define SCU_EVENT_PCIE_INTERFACE_ERROR \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x04)
+#define SCU_EVENT_FUNCTION_LEVEL_RESET \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x05)
+
+/**
+ *
+ *
+ * TRANSPORT_LEVEL_ERRORS
+ */
+#define SCU_EVENT_ACK_NAK_TIMEOUT_ERROR        \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_TRANSPORT_ERROR, 0x00)
+
+/**
+ *
+ *
+ * BROADCAST_CHANGE_EVENTS
+ */
+#define SCU_EVENT_BROADCAST_CHANGE \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x01)
+#define SCU_EVENT_BROADCAST_RESERVED0 \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x02)
+#define SCU_EVENT_BROADCAST_RESERVED1 \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x03)
+#define SCU_EVENT_BROADCAST_SES        \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x04)
+#define SCU_EVENT_BROADCAST_EXPANDER \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x05)
+#define SCU_EVENT_BROADCAST_AEN        \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x06)
+#define SCU_EVENT_BROADCAST_RESERVED3 \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x07)
+#define SCU_EVENT_BROADCAST_RESERVED4 \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x08)
+#define SCU_EVENT_PE_SUSPENDED \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x09)
+
+/**
+ *
+ *
+ * OSSP_EVENTS
+ */
+#define SCU_EVENT_PORT_SELECTOR_DETECTED \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x10)
+#define SCU_EVENT_SENT_PORT_SELECTION \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x11)
+#define SCU_EVENT_HARD_RESET_TRANSMITTED \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x12)
+#define SCU_EVENT_HARD_RESET_RECEIVED \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x13)
+#define SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x15)
+#define SCU_EVENT_LINK_FAILURE \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x16)
+#define SCU_EVENT_SATA_SPINUP_HOLD \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x17)
+#define SCU_EVENT_SAS_15_SSC \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x18)
+#define SCU_EVENT_SAS_15 \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x19)
+#define SCU_EVENT_SAS_30_SSC \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1A)
+#define SCU_EVENT_SAS_30 \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1B)
+#define SCU_EVENT_SAS_60_SSC \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1C)
+#define SCU_EVENT_SAS_60 \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1D)
+#define SCU_EVENT_SATA_15_SSC \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1E)
+#define SCU_EVENT_SATA_15 \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1F)
+#define SCU_EVENT_SATA_30_SSC \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x20)
+#define SCU_EVENT_SATA_30 \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x21)
+#define SCU_EVENT_SATA_60_SSC \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x22)
+#define SCU_EVENT_SATA_60 \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x23)
+#define SCU_EVENT_SAS_PHY_DETECTED \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x24)
+#define SCU_EVENT_SATA_PHY_DETECTED \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x25)
+
+/**
+ *
+ *
+ * FATAL_INTERNAL_MEMORY_ERROR_EVENTS
+ */
+#define SCU_EVENT_TSC_RNSC_UNCORRECTABLE_ERROR \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR,  0x00)
+#define SCU_EVENT_TC_RNC_UNCORRECTABLE_ERROR \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR,  0x01)
+#define SCU_EVENT_ZPT_UNCORRECTABLE_ERROR \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR,  0x02)
+
+/**
+ *
+ *
+ * REMOTE_NODE_SUSPEND_EVENTS
+ */
+#define SCU_EVENT_TL_RNC_SUSPEND_TX \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x00)
+#define SCU_EVENT_TL_RNC_SUSPEND_TX_RX \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x00)
+#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x20)
+#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX_RX        \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x20)
+
+/**
+ *
+ *
+ * REMOTE_NODE_MISC_EVENTS
+ */
+#define SCU_EVENT_POST_RCN_RELEASE \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, SCU_EVENT_SPECIFIER_RNC_RELEASE)
+#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_ENABLE \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x01)
+#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_DISABLE \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x02)
+#define SCU_EVENT_POST_RNC_COMPLETE \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x03)
+#define SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x04)
+
+/**
+ *
+ *
+ * ERROR_COUNT_EVENT
+ */
+#define SCU_EVENT_RX_CREDIT_BLOCKED_RECEIVED \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x00)
+#define SCU_EVENT_TX_DONE_CREDIT_TIMEOUT \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x01)
+#define SCU_EVENT_RX_DONE_CREDIT_TIMEOUT \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x02)
+
+/**
+ * scu_get_event_type() -
+ *
+ * This macro returns the SCU event type from the event code.
+ */
+#define scu_get_event_type(event_code) \
+       ((event_code) & SCU_EVENT_TYPE_CODE_MASK)
+
+/**
+ * scu_get_event_specifier() -
+ *
+ * This macro returns the SCU event specifier from the event code.
+ */
+#define scu_get_event_specifier(event_code) \
+       ((event_code) & SCU_EVENT_SPECIFIC_CODE_MASK)
+
+/**
+ * scu_get_event_code() -
+ *
+ * This macro returns the combined SCU event type and SCU event specifier from
+ * the event code.
+ */
+#define scu_get_event_code(event_code) \
+       ((event_code) & SCU_EVENT_CODE_MASK)
+
+
+/**
+ *
+ *
+ * PTS_SCHEDULE_EVENT
+ */
+#define SCU_EVENT_SMP_RESPONSE_NO_PE \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x00)
+#define SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE \
+       scu_get_event_specifier(SCU_EVENT_SMP_RESPONSE_NO_PE)
+
+#define SCU_EVENT_TASK_TIMEOUT \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x01)
+#define SCU_EVENT_SPECIFIC_TASK_TIMEOUT        \
+       scu_get_event_specifier(SCU_EVENT_TASK_TIMEOUT)
+
+#define SCU_EVENT_IT_NEXUS_TIMEOUT \
+       SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x02)
+#define SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT \
+       scu_get_event_specifier(SCU_EVENT_IT_NEXUS_TIMEOUT)
+
+
+#endif /* __SCU_EVENT_CODES_HEADER__ */
diff --git a/drivers/scsi/isci/scu_remote_node_context.h b/drivers/scsi/isci/scu_remote_node_context.h
new file mode 100644 (file)
index 0000000..33745ad
--- /dev/null
@@ -0,0 +1,229 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SCU_REMOTE_NODE_CONTEXT_HEADER__
+#define __SCU_REMOTE_NODE_CONTEXT_HEADER__
+
+/**
+ * This file contains the structures and constatns used by the SCU hardware to
+ *    describe a remote node context.
+ *
+ *
+ */
+
+/**
+ * struct ssp_remote_node_context - This structure contains the SCU hardware
+ *    definition for an SSP remote node.
+ *
+ *
+ */
+struct ssp_remote_node_context {
+       /* WORD 0 */
+
+       /**
+        * This field is the remote node index assigned for this remote node. All
+        * remote nodes must have a unique remote node index. The value of the remote
+        * node index can not exceed the maximum number of remote nodes reported in
+        * the SCU device context capacity register.
+        */
+       u32 remote_node_index:12;
+       u32 reserved0_1:4;
+
+       /**
+        * This field tells the SCU hardware how many simultaneous connections that
+        * this remote node will support.
+        */
+       u32 remote_node_port_width:4;
+
+       /**
+        * This field tells the SCU hardware which logical port to associate with this
+        * remote node.
+        */
+       u32 logical_port_index:3;
+       u32 reserved0_2:5;
+
+       /**
+        * This field will enable the I_T nexus loss timer for this remote node.
+        */
+       u32 nexus_loss_timer_enable:1;
+
+       /**
+        * This field is the for driver debug only and is not used.
+        */
+       u32 check_bit:1;
+
+       /**
+        * This field must be set to true when the hardware DMAs the remote node
+        * context to the hardware SRAM.  When the remote node is being invalidated
+        * this field must be set to false.
+        */
+       u32 is_valid:1;
+
+       /**
+        * This field must be set to true.
+        */
+       u32 is_remote_node_context:1;
+
+       /* WORD 1 - 2 */
+
+       /**
+        * This is the low word of the remote device SAS Address
+        */
+       u32 remote_sas_address_lo;
+
+       /**
+        * This field is the high word of the remote device SAS Address
+        */
+       u32 remote_sas_address_hi;
+
+       /* WORD 3 */
+       /**
+        * This field reprensets the function number assigned to this remote device.
+        * This value must match the virtual function number that is being used to
+        * communicate to the device.
+        */
+       u32 function_number:8;
+       u32 reserved3_1:8;
+
+       /**
+        * This field provides the driver a way to cheat on the arbitration wait time
+        * for this remote node.
+        */
+       u32 arbitration_wait_time:16;
+
+       /* WORD 4 */
+       /**
+        * This field tells the SCU hardware how long this device may occupy the
+        * connection before it must be closed.
+        */
+       u32 connection_occupancy_timeout:16;
+
+       /**
+        * This field tells the SCU hardware how long to maintain a connection when
+        * there are no frames being transmitted on the link.
+        */
+       u32 connection_inactivity_timeout:16;
+
+       /* WORD  5 */
+       /**
+        * This field allows the driver to cheat on the arbitration wait time for this
+        * remote node.
+        */
+       u32 initial_arbitration_wait_time:16;
+
+       /**
+        * This field is tells the hardware what to program for the connection rate in
+        * the open address frame.  See the SAS spec for valid values.
+        */
+       u32 oaf_connection_rate:4;
+
+       /**
+        * This field tells the SCU hardware what to program for the features in the
+        * open address frame.  See the SAS spec for valid values.
+        */
+       u32 oaf_features:4;
+
+       /**
+        * This field tells the SCU hardware what to use for the source zone group in
+        * the open address frame.  See the SAS spec for more details on zoning.
+        */
+       u32 oaf_source_zone_group:8;
+
+       /* WORD 6 */
+       /**
+        * This field tells the SCU hardware what to use as the more capibilities in
+        * the open address frame. See the SAS Spec for details.
+        */
+       u32 oaf_more_compatibility_features;
+
+       /* WORD 7 */
+       u32 reserved7;
+
+};
+
+/**
+ * struct stp_remote_node_context - This structure contains the SCU hardware
+ *    definition for a STP remote node.
+ *
+ * STP Targets are not yet supported so this definition is a placeholder until
+ * we do support them.
+ */
+struct stp_remote_node_context {
+       /**
+        * Placeholder data for the STP remote node.
+        */
+       u32 data[8];
+
+};
+
+/**
+ * This union combines the SAS and SATA remote node definitions.
+ *
+ * union scu_remote_node_context
+ */
+union scu_remote_node_context {
+       /**
+        * SSP Remote Node
+        */
+       struct ssp_remote_node_context ssp;
+
+       /**
+        * STP Remote Node
+        */
+       struct stp_remote_node_context stp;
+
+};
+
+#endif /* __SCU_REMOTE_NODE_CONTEXT_HEADER__ */
diff --git a/drivers/scsi/isci/scu_task_context.h b/drivers/scsi/isci/scu_task_context.h
new file mode 100644 (file)
index 0000000..7df87d9
--- /dev/null
@@ -0,0 +1,942 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_TASK_CONTEXT_H_
+#define _SCU_TASK_CONTEXT_H_
+
+/**
+ * This file contains the structures and constants for the SCU hardware task
+ *    context.
+ *
+ *
+ */
+
+
+/**
+ * enum scu_ssp_task_type - This enumberation defines the various SSP task
+ *    types the SCU hardware will accept. The definition for the various task
+ *    types the SCU hardware will accept can be found in the DS specification.
+ *
+ *
+ */
+typedef enum {
+       SCU_TASK_TYPE_IOREAD,           /* /< IO READ direction or no direction */
+       SCU_TASK_TYPE_IOWRITE,          /* /< IO Write direction */
+       SCU_TASK_TYPE_SMP_REQUEST,      /* /< SMP Request type */
+       SCU_TASK_TYPE_RESPONSE,         /* /< Driver generated response frame (targt mode) */
+       SCU_TASK_TYPE_RAW_FRAME,        /* /< Raw frame request type */
+       SCU_TASK_TYPE_PRIMITIVE         /* /< Request for a primitive to be transmitted */
+} scu_ssp_task_type;
+
+/**
+ * enum scu_sata_task_type - This enumeration defines the various SATA task
+ *    types the SCU hardware will accept. The definition for the various task
+ *    types the SCU hardware will accept can be found in the DS specification.
+ *
+ *
+ */
+typedef enum {
+       SCU_TASK_TYPE_DMA_IN,           /* /< Read request */
+       SCU_TASK_TYPE_FPDMAQ_READ,      /* /< NCQ read request */
+       SCU_TASK_TYPE_PACKET_DMA_IN,    /* /< Packet read request */
+       SCU_TASK_TYPE_SATA_RAW_FRAME,   /* /< Raw frame request */
+       RESERVED_4,
+       RESERVED_5,
+       RESERVED_6,
+       RESERVED_7,
+       SCU_TASK_TYPE_DMA_OUT,          /* /< Write request */
+       SCU_TASK_TYPE_FPDMAQ_WRITE,     /* /< NCQ write Request */
+       SCU_TASK_TYPE_PACKET_DMA_OUT    /* /< Packet write request */
+} scu_sata_task_type;
+
+
+/**
+ *
+ *
+ * SCU_CONTEXT_TYPE
+ */
+#define SCU_TASK_CONTEXT_TYPE  0
+#define SCU_RNC_CONTEXT_TYPE   1
+
+/**
+ *
+ *
+ * SCU_TASK_CONTEXT_VALIDITY
+ */
+#define SCU_TASK_CONTEXT_INVALID          0
+#define SCU_TASK_CONTEXT_VALID            1
+
+/**
+ *
+ *
+ * SCU_COMMAND_CODE
+ */
+#define SCU_COMMAND_CODE_INITIATOR_NEW_TASK   0
+#define SCU_COMMAND_CODE_ACTIVE_TASK          1
+#define SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK   2
+#define SCU_COMMAND_CODE_TARGET_RAW_FRAMES    3
+
+/**
+ *
+ *
+ * SCU_TASK_PRIORITY
+ */
+/**
+ *
+ *
+ * This priority is used when there is no priority request for this request.
+ */
+#define SCU_TASK_PRIORITY_NORMAL          0
+
+/**
+ *
+ *
+ * This priority indicates that the task should be scheduled to the head of the
+ * queue.  The task will NOT be executed if the TX is suspended for the remote
+ * node.
+ */
+#define SCU_TASK_PRIORITY_HEAD_OF_Q       1
+
+/**
+ *
+ *
+ * This priority indicates that the task will be executed before all
+ * SCU_TASK_PRIORITY_NORMAL and SCU_TASK_PRIORITY_HEAD_OF_Q tasks. The task
+ * WILL be executed if the TX is suspended for the remote node.
+ */
+#define SCU_TASK_PRIORITY_HIGH            2
+
+/**
+ *
+ *
+ * This task priority is reserved and should not be used.
+ */
+#define SCU_TASK_PRIORITY_RESERVED        3
+
+#define SCU_TASK_INITIATOR_MODE           1
+#define SCU_TASK_TARGET_MODE              0
+
+#define SCU_TASK_REGULAR                  0
+#define SCU_TASK_ABORTED                  1
+
+/* direction bit defintion */
+/**
+ *
+ *
+ * SATA_DIRECTION
+ */
+#define SCU_SATA_WRITE_DATA_DIRECTION     0
+#define SCU_SATA_READ_DATA_DIRECTION      1
+
+/**
+ *
+ *
+ * SCU_COMMAND_CONTEXT_MACROS These macros provide the mask and shift
+ * operations to construct the various SCU commands
+ */
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT           21
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK            0x00E00000
+#define scu_get_command_request_type(x)        \
+       ((x) & SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT        18
+#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK         0x001C0000
+#define scu_get_command_request_subtype(x) \
+       ((x) & SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK       \
+       (\
+               SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK             \
+               | SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK          \
+       )
+#define scu_get_command_request_full_type(x) \
+       ((x) & SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT  16
+#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK   0x00010000
+#define scu_get_command_protocl_engine_group(x)        \
+       ((x) & SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK)
+
+#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT           12
+#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK            0x00007000
+#define scu_get_command_reqeust_logical_port(x)        \
+       ((x) & SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK)
+
+
+#define MAKE_SCU_CONTEXT_COMMAND_TYPE(type) \
+       ((u32)(type) << SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT)
+
+/**
+ * MAKE_SCU_CONTEXT_COMMAND_TYPE() -
+ *
+ * SCU_COMMAND_TYPES These constants provide the grouping of the different SCU
+ * command types.
+ */
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC    MAKE_SCU_CONTEXT_COMMAND_TYPE(0)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC    MAKE_SCU_CONTEXT_COMMAND_TYPE(1)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC   MAKE_SCU_CONTEXT_COMMAND_TYPE(2)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC   MAKE_SCU_CONTEXT_COMMAND_TYPE(3)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC  MAKE_SCU_CONTEXT_COMMAND_TYPE(6)
+
+#define MAKE_SCU_CONTEXT_COMMAND_REQUEST(type, command)        \
+       ((type) | ((command) << SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT))
+
+/**
+ *
+ *
+ * SCU_REQUEST_TYPES These constants are the various request types that can be
+ * posted to the SCU hardware.
+ */
+#define SCU_CONTEXT_COMMAND_REQUST_POST_TC \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 0))
+
+#define SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 1))
+
+#define SCU_CONTEXT_COMMAND_REQUST_DUMP_TC \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_32        \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_96        \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE        \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 2))
+
+#define SCU_CONTEXT_COMMAND_DUMP_RNC_32        \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_DUMP_RNC_96        \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX        \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_RESUME \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 2))
+
+#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_ENABLE \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 3))
+
+#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_DISABLE        \
+       (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 4))
+
+/**
+ *
+ *
+ * SCU_TASK_CONTEXT_PROTOCOL SCU Task context protocol types this is uesd to
+ * program the SCU Task context protocol field in word 0x00.
+ */
+#define SCU_TASK_CONTEXT_PROTOCOL_SMP    0x00
+#define SCU_TASK_CONTEXT_PROTOCOL_SSP    0x01
+#define SCU_TASK_CONTEXT_PROTOCOL_STP    0x02
+#define SCU_TASK_CONTEXT_PROTOCOL_NONE   0x07
+
+/**
+ * struct ssp_task_context - This is the SCU hardware definition for an SSP
+ *    request.
+ *
+ *
+ */
+struct ssp_task_context {
+       /* OFFSET 0x18 */
+       u32 reserved00:24;
+       u32 frame_type:8;
+
+       /* OFFSET 0x1C */
+       u32 reserved01;
+
+       /* OFFSET 0x20 */
+       u32 fill_bytes:2;
+       u32 reserved02:6;
+       u32 changing_data_pointer:1;
+       u32 retransmit:1;
+       u32 retry_data_frame:1;
+       u32 tlr_control:2;
+       u32 reserved03:19;
+
+       /* OFFSET 0x24 */
+       u32 uiRsvd4;
+
+       /* OFFSET 0x28 */
+       u32 target_port_transfer_tag:16;
+       u32 tag:16;
+
+       /* OFFSET 0x2C */
+       u32 data_offset;
+};
+
+/**
+ * struct stp_task_context - This is the SCU hardware definition for an STP
+ *    request.
+ *
+ *
+ */
+struct stp_task_context {
+       /* OFFSET 0x18 */
+       u32 fis_type:8;
+       u32 pm_port:4;
+       u32 reserved0:3;
+       u32 control:1;
+       u32 command:8;
+       u32 features:8;
+
+       /* OFFSET 0x1C */
+       u32 reserved1;
+
+       /* OFFSET 0x20 */
+       u32 reserved2;
+
+       /* OFFSET 0x24 */
+       u32 reserved3;
+
+       /* OFFSET 0x28 */
+       u32 ncq_tag:5;
+       u32 reserved4:27;
+
+       /* OFFSET 0x2C */
+       u32 data_offset; /* TODO: What is this used for? */
+};
+
+/**
+ * struct smp_task_context - This is the SCU hardware definition for an SMP
+ *    request.
+ *
+ *
+ */
+struct smp_task_context {
+       /* OFFSET 0x18 */
+       u32 response_length:8;
+       u32 function_result:8;
+       u32 function:8;
+       u32 frame_type:8;
+
+       /* OFFSET 0x1C */
+       u32 smp_response_ufi:12;
+       u32 reserved1:20;
+
+       /* OFFSET 0x20 */
+       u32 reserved2;
+
+       /* OFFSET 0x24 */
+       u32 reserved3;
+
+       /* OFFSET 0x28 */
+       u32 reserved4;
+
+       /* OFFSET 0x2C */
+       u32 reserved5;
+};
+
+/**
+ * struct primitive_task_context - This is the SCU hardware definition used
+ *    when the driver wants to send a primitive on the link.
+ *
+ *
+ */
+struct primitive_task_context {
+       /* OFFSET 0x18 */
+       /**
+        * This field is the control word and it must be 0.
+        */
+       u32 control; /* /< must be set to 0 */
+
+       /* OFFSET 0x1C */
+       /**
+        * This field specifies the primitive that is to be transmitted.
+        */
+       u32 sequence;
+
+       /* OFFSET 0x20 */
+       u32 reserved0;
+
+       /* OFFSET 0x24 */
+       u32 reserved1;
+
+       /* OFFSET 0x28 */
+       u32 reserved2;
+
+       /* OFFSET 0x2C */
+       u32 reserved3;
+};
+
+/**
+ * The union of the protocols that can be selected in the SCU task context
+ *    field.
+ *
+ * protocol_context
+ */
+union protocol_context {
+       struct ssp_task_context ssp;
+       struct stp_task_context stp;
+       struct smp_task_context smp;
+       struct primitive_task_context primitive;
+       u32 words[6];
+};
+
+/**
+ * struct scu_sgl_element - This structure represents a single SCU defined SGL
+ *    element. SCU SGLs contain a 64 bit address with the maximum data transfer
+ *    being 24 bits in size.  The SGL can not cross a 4GB boundary.
+ *
+ * struct scu_sgl_element
+ */
+struct scu_sgl_element {
+       /**
+        * This field is the upper 32 bits of the 64 bit physical address.
+        */
+       u32 address_upper;
+
+       /**
+        * This field is the lower 32 bits of the 64 bit physical address.
+        */
+       u32 address_lower;
+
+       /**
+        * This field is the number of bytes to transfer.
+        */
+       u32 length:24;
+
+       /**
+        * This field is the address modifier to be used when a virtual function is
+        * requesting a data transfer.
+        */
+       u32 address_modifier:8;
+
+};
+
+#define SCU_SGL_ELEMENT_PAIR_A   0
+#define SCU_SGL_ELEMENT_PAIR_B   1
+
+/**
+ * struct scu_sgl_element_pair - This structure is the SCU hardware definition
+ *    of a pair of SGL elements. The SCU hardware always works on SGL pairs.
+ *    They are refered to in the DS specification as SGL A and SGL B.  Each SGL
+ *    pair is followed by the address of the next pair.
+ *
+ *
+ */
+struct scu_sgl_element_pair {
+       /* OFFSET 0x60-0x68 */
+       /**
+        * This field is the SGL element A of the SGL pair.
+        */
+       struct scu_sgl_element A;
+
+       /* OFFSET 0x6C-0x74 */
+       /**
+        * This field is the SGL element B of the SGL pair.
+        */
+       struct scu_sgl_element B;
+
+       /* OFFSET 0x78-0x7C */
+       /**
+        * This field is the upper 32 bits of the 64 bit address to the next SGL
+        * element pair.
+        */
+       u32 next_pair_upper;
+
+       /**
+        * This field is the lower 32 bits of the 64 bit address to the next SGL
+        * element pair.
+        */
+       u32 next_pair_lower;
+
+};
+
+/**
+ * struct transport_snapshot - This structure is the SCU hardware scratch area
+ *    for the task context. This is set to 0 by the driver but can be read by
+ *    issuing a dump TC request to the SCU.
+ *
+ *
+ */
+struct transport_snapshot {
+       /* OFFSET 0x48 */
+       u32 xfer_rdy_write_data_length;
+
+       /* OFFSET 0x4C */
+       u32 data_offset;
+
+       /* OFFSET 0x50 */
+       u32 data_transfer_size:24;
+       u32 reserved_50_0:8;
+
+       /* OFFSET 0x54 */
+       u32 next_initiator_write_data_offset;
+
+       /* OFFSET 0x58 */
+       u32 next_initiator_write_data_xfer_size:24;
+       u32 reserved_58_0:8;
+};
+
+/**
+ * struct scu_task_context - This structure defines the contents of the SCU
+ *    silicon task context. It lays out all of the fields according to the
+ *    expected order and location for the Storage Controller unit.
+ *
+ *
+ */
+struct scu_task_context {
+       /* OFFSET 0x00 ------ */
+       /**
+        * This field must be encoded to one of the valid SCU task priority values
+        *    - SCU_TASK_PRIORITY_NORMAL
+        *    - SCU_TASK_PRIORITY_HEAD_OF_Q
+        *    - SCU_TASK_PRIORITY_HIGH
+        */
+       u32 priority:2;
+
+       /**
+        * This field must be set to true if this is an initiator generated request.
+        * Until target mode is supported all task requests are initiator requests.
+        */
+       u32 initiator_request:1;
+
+       /**
+        * This field must be set to one of the valid connection rates valid values
+        * are 0x8, 0x9, and 0xA.
+        */
+       u32 connection_rate:4;
+
+       /**
+        * This field muse be programed when generating an SMP response since the SMP
+        * connection remains open until the SMP response is generated.
+        */
+       u32 protocol_engine_index:3;
+
+       /**
+        * This field must contain the logical port for the task request.
+        */
+       u32 logical_port_index:3;
+
+       /**
+        * This field must be set to one of the SCU_TASK_CONTEXT_PROTOCOL values
+        *    - SCU_TASK_CONTEXT_PROTOCOL_SMP
+        *    - SCU_TASK_CONTEXT_PROTOCOL_SSP
+        *    - SCU_TASK_CONTEXT_PROTOCOL_STP
+        *    - SCU_TASK_CONTEXT_PROTOCOL_NONE
+        */
+       u32 protocol_type:3;
+
+       /**
+        * This filed must be set to the TCi allocated for this task
+        */
+       u32 task_index:12;
+
+       /**
+        * This field is reserved and must be set to 0x00
+        */
+       u32 reserved_00_0:1;
+
+       /**
+        * For a normal task request this must be set to 0.  If this is an abort of
+        * this task request it must be set to 1.
+        */
+       u32 abort:1;
+
+       /**
+        * This field must be set to true for the SCU hardware to process the task.
+        */
+       u32 valid:1;
+
+       /**
+        * This field must be set to SCU_TASK_CONTEXT_TYPE
+        */
+       u32 context_type:1;
+
+       /* OFFSET 0x04 */
+       /**
+        * This field contains the RNi that is the target of this request.
+        */
+       u32 remote_node_index:12;
+
+       /**
+        * This field is programmed if this is a mirrored request, which we are not
+        * using, in which case it is the RNi for the mirrored target.
+        */
+       u32 mirrored_node_index:12;
+
+       /**
+        * This field is programmed with the direction of the SATA reqeust
+        *    - SCU_SATA_WRITE_DATA_DIRECTION
+        *    - SCU_SATA_READ_DATA_DIRECTION
+        */
+       u32 sata_direction:1;
+
+       /**
+        * This field is programmsed with one of the following SCU_COMMAND_CODE
+        *    - SCU_COMMAND_CODE_INITIATOR_NEW_TASK
+        *    - SCU_COMMAND_CODE_ACTIVE_TASK
+        *    - SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK
+        *    - SCU_COMMAND_CODE_TARGET_RAW_FRAMES
+        */
+       u32 command_code:2;
+
+       /**
+        * This field is set to true if the remote node should be suspended.
+        * This bit is only valid for SSP & SMP target devices.
+        */
+       u32 suspend_node:1;
+
+       /**
+        * This field is programmed with one of the following command type codes
+        *
+        * For SAS requests use the scu_ssp_task_type
+        *    - SCU_TASK_TYPE_IOREAD
+        *    - SCU_TASK_TYPE_IOWRITE
+        *    - SCU_TASK_TYPE_SMP_REQUEST
+        *    - SCU_TASK_TYPE_RESPONSE
+        *    - SCU_TASK_TYPE_RAW_FRAME
+        *    - SCU_TASK_TYPE_PRIMITIVE
+        *
+        * For SATA requests use the scu_sata_task_type
+        *    - SCU_TASK_TYPE_DMA_IN
+        *    - SCU_TASK_TYPE_FPDMAQ_READ
+        *    - SCU_TASK_TYPE_PACKET_DMA_IN
+        *    - SCU_TASK_TYPE_SATA_RAW_FRAME
+        *    - SCU_TASK_TYPE_DMA_OUT
+        *    - SCU_TASK_TYPE_FPDMAQ_WRITE
+        *    - SCU_TASK_TYPE_PACKET_DMA_OUT
+        */
+       u32 task_type:4;
+
+       /* OFFSET 0x08 */
+       /**
+        * This field is reserved and the must be set to 0x00
+        */
+       u32 link_layer_control:8; /* presently all reserved */
+
+       /**
+        * This field is set to true when TLR is to be enabled
+        */
+       u32 ssp_tlr_enable:1;
+
+       /**
+        * This is field specifies if the SCU DMAs a response frame to host
+        * memory for good response frames when operating in target mode.
+        */
+       u32 dma_ssp_target_good_response:1;
+
+       /**
+        * This field indicates if the SCU should DMA the response frame to
+        * host memory.
+        */
+       u32 do_not_dma_ssp_good_response:1;
+
+       /**
+        * This field is set to true when strict ordering is to be enabled
+        */
+       u32 strict_ordering:1;
+
+       /**
+        * This field indicates the type of endianess to be utilized for the
+        * frame.  command, task, and response frames utilized control_frame
+        * set to 1.
+        */
+       u32 control_frame:1;
+
+       /**
+        * This field is reserved and the driver should set to 0x00
+        */
+       u32 tl_control_reserved:3;
+
+       /**
+        * This field is set to true when the SCU hardware task timeout control is to
+        * be enabled
+        */
+       u32 timeout_enable:1;
+
+       /**
+        * This field is reserved and the driver should set it to 0x00
+        */
+       u32 pts_control_reserved:7;
+
+       /**
+        * This field should be set to true when block guard is to be enabled
+        */
+       u32 block_guard_enable:1;
+
+       /**
+        * This field is reserved and the driver should set to 0x00
+        */
+       u32 sdma_control_reserved:7;
+
+       /* OFFSET 0x0C */
+       /**
+        * This field is the address modifier for this io request it should be
+        * programmed with the virtual function that is making the request.
+        */
+       u32 address_modifier:16;
+
+       /**
+        * @todo What we support mirrored SMP response frame?
+        */
+       u32 mirrored_protocol_engine:3;  /* mirrored protocol Engine Index */
+
+       /**
+        * If this is a mirrored request the logical port index for the mirrored RNi
+        * must be programmed.
+        */
+       u32 mirrored_logical_port:4;  /* mirrored local port index */
+
+       /**
+        * This field is reserved and the driver must set it to 0x00
+        */
+       u32 reserved_0C_0:8;
+
+       /**
+        * This field must be set to true if the mirrored request processing is to be
+        * enabled.
+        */
+       u32 mirror_request_enable:1;  /* Mirrored request Enable */
+
+       /* OFFSET 0x10 */
+       /**
+        * This field is the command iu length in dwords
+        */
+       u32 ssp_command_iu_length:8;
+
+       /**
+        * This is the target TLR enable bit it must be set to 0 when creatning the
+        * task context.
+        */
+       u32 xfer_ready_tlr_enable:1;
+
+       /**
+        * This field is reserved and the driver must set it to 0x00
+        */
+       u32 reserved_10_0:7;
+
+       /**
+        * This is the maximum burst size that the SCU hardware will send in one
+        * connection its value is (N x 512) and N must be a multiple of 2.  If the
+        * value is 0x00 then maximum burst size is disabled.
+        */
+       u32 ssp_max_burst_size:16;
+
+       /* OFFSET 0x14 */
+       /**
+        * This filed is set to the number of bytes to be transfered in the request.
+        */
+       u32 transfer_length_bytes:24; /* In terms of bytes */
+
+       /**
+        * This field is reserved and the driver should set it to 0x00
+        */
+       u32 reserved_14_0:8;
+
+       /* OFFSET 0x18-0x2C */
+       /**
+        * This union provides for the protocol specif part of the SCU Task Context.
+        */
+       union protocol_context type;
+
+       /* OFFSET 0x30-0x34 */
+       /**
+        * This field is the upper 32 bits of the 64 bit physical address of the
+        * command iu buffer
+        */
+       u32 command_iu_upper;
+
+       /**
+        * This field is the lower 32 bits of the 64 bit physical address of the
+        * command iu buffer
+        */
+       u32 command_iu_lower;
+
+       /* OFFSET 0x38-0x3C */
+       /**
+        * This field is the upper 32 bits of the 64 bit physical address of the
+        * response iu buffer
+        */
+       u32 response_iu_upper;
+
+       /**
+        * This field is the lower 32 bits of the 64 bit physical address of the
+        * response iu buffer
+        */
+       u32 response_iu_lower;
+
+       /* OFFSET 0x40 */
+       /**
+        * This field is set to the task phase of the SCU hardware. The driver must
+        * set this to 0x01
+        */
+       u32 task_phase:8;
+
+       /**
+        * This field is set to the transport layer task status.  The driver must set
+        * this to 0x00
+        */
+       u32 task_status:8;
+
+       /**
+        * This field is used during initiator write TLR
+        */
+       u32 previous_extended_tag:4;
+
+       /**
+        * This field is set the maximum number of retries for a STP non-data FIS
+        */
+       u32 stp_retry_count:2;
+
+       /**
+        * This field is reserved and the driver must set it to 0x00
+        */
+       u32 reserved_40_1:2;
+
+       /**
+        * This field is used by the SCU TL to determine when to take a snapshot when
+        * tranmitting read data frames.
+        *    - 0x00 The entire IO
+        *    - 0x01 32k
+        *    - 0x02 64k
+        *    - 0x04 128k
+        *    - 0x08 256k
+        */
+       u32 ssp_tlr_threshold:4;
+
+       /**
+        * This field is reserved and the driver must set it to 0x00
+        */
+       u32 reserved_40_2:4;
+
+       /* OFFSET 0x44 */
+       u32 write_data_length; /* read only set to 0 */
+
+       /* OFFSET 0x48-0x58 */
+       struct transport_snapshot snapshot; /* read only set to 0 */
+
+       /* OFFSET 0x5C */
+       u32 block_protection_enable:1;
+       u32 block_size:2;
+       u32 block_protection_function:2;
+       u32 reserved_5C_0:9;
+       u32 active_sgl_element:2;  /* read only set to 0 */
+       u32 sgl_exhausted:1;  /* read only set to 0 */
+       u32 payload_data_transfer_error:4;  /* read only set to 0 */
+       u32 frame_buffer_offset:11; /* read only set to 0 */
+
+       /* OFFSET 0x60-0x7C */
+       /**
+        * This field is the first SGL element pair found in the TC data structure.
+        */
+       struct scu_sgl_element_pair sgl_pair_ab;
+       /* OFFSET 0x80-0x9C */
+       /**
+        * This field is the second SGL element pair found in the TC data structure.
+        */
+       struct scu_sgl_element_pair sgl_pair_cd;
+
+       /* OFFSET 0xA0-BC */
+       struct scu_sgl_element_pair sgl_snapshot_ac;
+
+       /* OFFSET 0xC0 */
+       u32 active_sgl_element_pair; /* read only set to 0 */
+
+       /* OFFSET 0xC4-0xCC */
+       u32 reserved_C4_CC[3];
+
+       /* OFFSET 0xD0 */
+       u32 intermediate_crc_value:16;
+       u32 initial_crc_seed:16;
+
+       /* OFFSET 0xD4 */
+       u32 application_tag_for_verify:16;
+       u32 application_tag_for_generate:16;
+
+       /* OFFSET 0xD8 */
+       u32 reference_tag_seed_for_verify_function;
+
+       /* OFFSET 0xDC */
+       u32 reserved_DC;
+
+       /* OFFSET 0xE0 */
+       u32 reserved_E0_0:16;
+       u32 application_tag_mask_for_generate:16;
+
+       /* OFFSET 0xE4 */
+       u32 block_protection_control:16;
+       u32 application_tag_mask_for_verify:16;
+
+       /* OFFSET 0xE8 */
+       u32 block_protection_error:8;
+       u32 reserved_E8_0:24;
+
+       /* OFFSET 0xEC */
+       u32 reference_tag_seed_for_verify;
+
+       /* OFFSET 0xF0 */
+       u32 intermediate_crc_valid_snapshot:16;
+       u32 reserved_F0_0:16;
+
+       /* OFFSET 0xF4 */
+       u32 reference_tag_seed_for_verify_function_snapshot;
+
+       /* OFFSET 0xF8 */
+       u32 snapshot_of_reserved_dword_DC_of_tc;
+
+       /* OFFSET 0xFC */
+       u32 reference_tag_seed_for_generate_function_snapshot;
+
+};
+
+#endif /* _SCU_TASK_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
new file mode 100644 (file)
index 0000000..d6bcdd0
--- /dev/null
@@ -0,0 +1,1676 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/completion.h>
+#include <linux/irqflags.h>
+#include "sas.h"
+#include <scsi/libsas.h>
+#include "remote_device.h"
+#include "remote_node_context.h"
+#include "isci.h"
+#include "request.h"
+#include "task.h"
+#include "host.h"
+
+/**
+* isci_task_refuse() - complete the request to the upper layer driver in
+*     the case where an I/O needs to be completed back in the submit path.
+* @ihost: host on which the the request was queued
+* @task: request to complete
+* @response: response code for the completed task.
+* @status: status code for the completed task.
+*
+*/
+static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
+                            enum service_response response,
+                            enum exec_status status)
+
+{
+       enum isci_completion_selection disposition;
+
+       disposition = isci_perform_normal_io_completion;
+       disposition = isci_task_set_completion_status(task, response, status,
+                                                     disposition);
+
+       /* Tasks aborted specifically by a call to the lldd_abort_task
+        * function should not be completed to the host in the regular path.
+        */
+       switch (disposition) {
+       case isci_perform_normal_io_completion:
+               /* Normal notification (task_done) */
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: Normal - task = %p, response=%d, "
+                       "status=%d\n",
+                       __func__, task, response, status);
+
+               task->lldd_task = NULL;
+
+               isci_execpath_callback(ihost, task, task->task_done);
+               break;
+
+       case isci_perform_aborted_io_completion:
+               /*
+                * No notification because this request is already in the
+                * abort path.
+                */
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: Aborted - task = %p, response=%d, "
+                       "status=%d\n",
+                       __func__, task, response, status);
+               break;
+
+       case isci_perform_error_io_completion:
+               /* Use sas_task_abort */
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: Error - task = %p, response=%d, "
+                       "status=%d\n",
+                       __func__, task, response, status);
+
+               isci_execpath_callback(ihost, task, sas_task_abort);
+               break;
+
+       default:
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: isci task notification default case!",
+                       __func__);
+               sas_task_abort(task);
+               break;
+       }
+}
+
+#define for_each_sas_task(num, task) \
+       for (; num > 0; num--,\
+            task = list_entry(task->list.next, struct sas_task, list))
+
+
+static inline int isci_device_io_ready(struct isci_remote_device *idev,
+                                      struct sas_task *task)
+{
+       return idev ? test_bit(IDEV_IO_READY, &idev->flags) ||
+                     (test_bit(IDEV_IO_NCQERROR, &idev->flags) &&
+                      isci_task_is_ncq_recovery(task))
+                   : 0;
+}
+/**
+ * isci_task_execute_task() - This function is one of the SAS Domain Template
+ *    functions. This function is called by libsas to send a task down to
+ *    hardware.
+ * @task: This parameter specifies the SAS task to send.
+ * @num: This parameter specifies the number of tasks to queue.
+ * @gfp_flags: This parameter specifies the context of this call.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
+{
+       struct isci_host *ihost = dev_to_ihost(task->dev);
+       struct isci_remote_device *idev;
+       unsigned long flags;
+       bool io_ready;
+       u16 tag;
+
+       dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
+
+       for_each_sas_task(num, task) {
+               enum sci_status status = SCI_FAILURE;
+
+               spin_lock_irqsave(&ihost->scic_lock, flags);
+               idev = isci_lookup_device(task->dev);
+               io_ready = isci_device_io_ready(idev, task);
+               tag = isci_alloc_tag(ihost);
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+               dev_dbg(&ihost->pdev->dev,
+                       "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n",
+                       task, num, task->dev, idev, idev ? idev->flags : 0,
+                       task->uldd_task);
+
+               if (!idev) {
+                       isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
+                                        SAS_DEVICE_UNKNOWN);
+               } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
+                       /* Indicate QUEUE_FULL so that the scsi midlayer
+                        * retries.
+                         */
+                       isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
+                                        SAS_QUEUE_FULL);
+               } else {
+                       /* There is a device and it's ready for I/O. */
+                       spin_lock_irqsave(&task->task_state_lock, flags);
+
+                       if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+                               /* The I/O was aborted. */
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                      flags);
+
+                               isci_task_refuse(ihost, task,
+                                                SAS_TASK_UNDELIVERED,
+                                                SAM_STAT_TASK_ABORTED);
+                       } else {
+                               task->task_state_flags |= SAS_TASK_AT_INITIATOR;
+                               spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+                               /* build and send the request. */
+                               status = isci_request_execute(ihost, idev, task, tag);
+
+                               if (status != SCI_SUCCESS) {
+
+                                       spin_lock_irqsave(&task->task_state_lock, flags);
+                                       /* Did not really start this command. */
+                                       task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+                                       spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+                                       /* Indicate QUEUE_FULL so that the scsi
+                                       * midlayer retries. if the request
+                                       * failed for remote device reasons,
+                                       * it gets returned as
+                                       * SAS_TASK_UNDELIVERED next time
+                                       * through.
+                                       */
+                                       isci_task_refuse(ihost, task,
+                                                        SAS_TASK_COMPLETE,
+                                                        SAS_QUEUE_FULL);
+                               }
+                       }
+               }
+               if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
+                       spin_lock_irqsave(&ihost->scic_lock, flags);
+                       /* command never hit the device, so just free
+                        * the tci and skip the sequence increment
+                        */
+                       isci_tci_free(ihost, ISCI_TAG_TCI(tag));
+                       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+               }
+               isci_put_device(idev);
+       }
+       return 0;
+}
+
+static enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq)
+{
+       struct isci_tmf *isci_tmf;
+       enum sci_status status;
+
+       if (tmf_task != ireq->ttype)
+               return SCI_FAILURE;
+
+       isci_tmf = isci_request_access_tmf(ireq);
+
+       switch (isci_tmf->tmf_code) {
+
+       case isci_tmf_sata_srst_high:
+       case isci_tmf_sata_srst_low: {
+               struct host_to_dev_fis *fis = &ireq->stp.cmd;
+
+               memset(fis, 0, sizeof(*fis));
+
+               fis->fis_type  =  0x27;
+               fis->flags     &= ~0x80;
+               fis->flags     &= 0xF0;
+               if (isci_tmf->tmf_code == isci_tmf_sata_srst_high)
+                       fis->control |= ATA_SRST;
+               else
+                       fis->control &= ~ATA_SRST;
+               break;
+       }
+       /* other management commnd go here... */
+       default:
+               return SCI_FAILURE;
+       }
+
+       /* core builds the protocol specific request
+        *  based on the h2d fis.
+        */
+       status = sci_task_request_construct_sata(ireq);
+
+       return status;
+}
+
+static struct isci_request *isci_task_request_build(struct isci_host *ihost,
+                                                   struct isci_remote_device *idev,
+                                                   u16 tag, struct isci_tmf *isci_tmf)
+{
+       enum sci_status status = SCI_FAILURE;
+       struct isci_request *ireq = NULL;
+       struct domain_device *dev;
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: isci_tmf = %p\n", __func__, isci_tmf);
+
+       dev = idev->domain_dev;
+
+       /* do common allocation and init of request object. */
+       ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag);
+       if (!ireq)
+               return NULL;
+
+       /* let the core do it's construct. */
+       status = sci_task_request_construct(ihost, idev, tag,
+                                            ireq);
+
+       if (status != SCI_SUCCESS) {
+               dev_warn(&ihost->pdev->dev,
+                        "%s: sci_task_request_construct failed - "
+                        "status = 0x%x\n",
+                        __func__,
+                        status);
+               return NULL;
+       }
+
+       /* XXX convert to get this from task->tproto like other drivers */
+       if (dev->dev_type == SAS_END_DEV) {
+               isci_tmf->proto = SAS_PROTOCOL_SSP;
+               status = sci_task_request_construct_ssp(ireq);
+               if (status != SCI_SUCCESS)
+                       return NULL;
+       }
+
+       if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+               isci_tmf->proto = SAS_PROTOCOL_SATA;
+               status = isci_sata_management_task_request_build(ireq);
+
+               if (status != SCI_SUCCESS)
+                       return NULL;
+       }
+       return ireq;
+}
+
+static int isci_task_execute_tmf(struct isci_host *ihost,
+                                struct isci_remote_device *idev,
+                                struct isci_tmf *tmf, unsigned long timeout_ms)
+{
+       DECLARE_COMPLETION_ONSTACK(completion);
+       enum sci_task_status status = SCI_TASK_FAILURE;
+       struct isci_request *ireq;
+       int ret = TMF_RESP_FUNC_FAILED;
+       unsigned long flags;
+       unsigned long timeleft;
+       u16 tag;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       tag = isci_alloc_tag(ihost);
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
+               return ret;
+
+       /* sanity check, return TMF_RESP_FUNC_FAILED
+        * if the device is not there and ready.
+        */
+       if (!idev ||
+           (!test_bit(IDEV_IO_READY, &idev->flags) &&
+            !test_bit(IDEV_IO_NCQERROR, &idev->flags))) {
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: idev = %p not ready (%#lx)\n",
+                       __func__,
+                       idev, idev ? idev->flags : 0);
+               goto err_tci;
+       } else
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: idev = %p\n",
+                       __func__, idev);
+
+       /* Assign the pointer to the TMF's completion kernel wait structure. */
+       tmf->complete = &completion;
+
+       ireq = isci_task_request_build(ihost, idev, tag, tmf);
+       if (!ireq)
+               goto err_tci;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       /* start the TMF io. */
+       status = sci_controller_start_task(ihost, idev, ireq);
+
+       if (status != SCI_TASK_SUCCESS) {
+               dev_dbg(&ihost->pdev->dev,
+                        "%s: start_io failed - status = 0x%x, request = %p\n",
+                        __func__,
+                        status,
+                        ireq);
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+               goto err_tci;
+       }
+
+       if (tmf->cb_state_func != NULL)
+               tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
+
+       isci_request_change_state(ireq, started);
+
+       /* add the request to the remote device request list. */
+       list_add(&ireq->dev_node, &idev->reqs_in_process);
+
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       /* Wait for the TMF to complete, or a timeout. */
+       timeleft = wait_for_completion_timeout(&completion,
+                                              msecs_to_jiffies(timeout_ms));
+
+       if (timeleft == 0) {
+               spin_lock_irqsave(&ihost->scic_lock, flags);
+
+               if (tmf->cb_state_func != NULL)
+                       tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data);
+
+               sci_controller_terminate_request(ihost,
+                                                 idev,
+                                                 ireq);
+
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+               wait_for_completion(tmf->complete);
+       }
+
+       isci_print_tmf(tmf);
+
+       if (tmf->status == SCI_SUCCESS)
+               ret =  TMF_RESP_FUNC_COMPLETE;
+       else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: tmf.status == "
+                       "SCI_FAILURE_IO_RESPONSE_VALID\n",
+                       __func__);
+               ret =  TMF_RESP_FUNC_COMPLETE;
+       }
+       /* Else - leave the default "failed" status alone. */
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: completed request = %p\n",
+               __func__,
+               ireq);
+
+       return ret;
+
+ err_tci:
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       isci_tci_free(ihost, ISCI_TAG_TCI(tag));
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       return ret;
+}
+
+static void isci_task_build_tmf(struct isci_tmf *tmf,
+                               enum isci_tmf_function_codes code,
+                               void (*tmf_sent_cb)(enum isci_tmf_cb_state,
+                                                   struct isci_tmf *,
+                                                   void *),
+                               void *cb_data)
+{
+       memset(tmf, 0, sizeof(*tmf));
+
+       tmf->tmf_code      = code;
+       tmf->cb_state_func = tmf_sent_cb;
+       tmf->cb_data       = cb_data;
+}
+
+static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
+                                          enum isci_tmf_function_codes code,
+                                          void (*tmf_sent_cb)(enum isci_tmf_cb_state,
+                                                              struct isci_tmf *,
+                                                              void *),
+                                          struct isci_request *old_request)
+{
+       isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request);
+       tmf->io_tag = old_request->io_tag;
+}
+
+/**
+ * isci_task_validate_request_to_abort() - This function checks the given I/O
+ *    against the "started" state.  If the request is still "started", it's
+ *    state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
+ *    BEFORE CALLING THIS FUNCTION.
+ * @isci_request: This parameter specifies the request object to control.
+ * @isci_host: This parameter specifies the ISCI host object
+ * @isci_device: This is the device to which the request is pending.
+ * @aborted_io_completion: This is a completion structure that will be added to
+ *    the request in case it is changed to aborting; this completion is
+ *    triggered when the request is fully completed.
+ *
+ * Either "started" on successful change of the task status to "aborted", or
+ * "unallocated" if the task cannot be controlled.
+ */
+static enum isci_request_status isci_task_validate_request_to_abort(
+       struct isci_request *isci_request,
+       struct isci_host *isci_host,
+       struct isci_remote_device *isci_device,
+       struct completion *aborted_io_completion)
+{
+       enum isci_request_status old_state = unallocated;
+
+       /* Only abort the task if it's in the
+        *  device's request_in_process list
+        */
+       if (isci_request && !list_empty(&isci_request->dev_node)) {
+               old_state = isci_request_change_started_to_aborted(
+                       isci_request, aborted_io_completion);
+
+       }
+
+       return old_state;
+}
+
+/**
+* isci_request_cleanup_completed_loiterer() - This function will take care of
+*    the final cleanup on any request which has been explicitly terminated.
+* @isci_host: This parameter specifies the ISCI host object
+* @isci_device: This is the device to which the request is pending.
+* @isci_request: This parameter specifies the terminated request object.
+* @task: This parameter is the libsas I/O request.
+*/
+static void isci_request_cleanup_completed_loiterer(
+       struct isci_host          *isci_host,
+       struct isci_remote_device *isci_device,
+       struct isci_request       *isci_request,
+       struct sas_task           *task)
+{
+       unsigned long flags;
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_device=%p, request=%p, task=%p\n",
+               __func__, isci_device, isci_request, task);
+
+       if (task != NULL) {
+
+               spin_lock_irqsave(&task->task_state_lock, flags);
+               task->lldd_task = NULL;
+
+               task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
+
+               isci_set_task_doneflags(task);
+
+               /* If this task is not in the abort path, call task_done. */
+               if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+
+                       spin_unlock_irqrestore(&task->task_state_lock, flags);
+                       task->task_done(task);
+               } else
+                       spin_unlock_irqrestore(&task->task_state_lock, flags);
+       }
+
+       if (isci_request != NULL) {
+               spin_lock_irqsave(&isci_host->scic_lock, flags);
+               list_del_init(&isci_request->dev_node);
+               spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+       }
+}
+
+/**
+ * isci_terminate_request_core() - This function will terminate the given
+ *    request, and wait for it to complete.  This function must only be called
+ *    from a thread that can wait.  Note that the request is terminated and
+ *    completed (back to the host, if started there).
+ * @ihost: This SCU.
+ * @idev: The target.
+ * @isci_request: The I/O request to be terminated.
+ *
+ */
+static void isci_terminate_request_core(struct isci_host *ihost,
+                                       struct isci_remote_device *idev,
+                                       struct isci_request *isci_request)
+{
+       enum sci_status status      = SCI_SUCCESS;
+       bool was_terminated         = false;
+       bool needs_cleanup_handling = false;
+       enum isci_request_status request_status;
+       unsigned long     flags;
+       unsigned long     termination_completed = 1;
+       struct completion *io_request_completion;
+       struct sas_task   *task;
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: device = %p; request = %p\n",
+               __func__, idev, isci_request);
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+
+       io_request_completion = isci_request->io_request_completion;
+
+       task = (isci_request->ttype == io_task)
+               ? isci_request_access_task(isci_request)
+               : NULL;
+
+       /* Note that we are not going to control
+        * the target to abort the request.
+        */
+       set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);
+
+       /* Make sure the request wasn't just sitting around signalling
+        * device condition (if the request handle is NULL, then the
+        * request completed but needed additional handling here).
+        */
+       if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
+               was_terminated = true;
+               needs_cleanup_handling = true;
+               status = sci_controller_terminate_request(ihost,
+                                                          idev,
+                                                          isci_request);
+       }
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       /*
+        * The only time the request to terminate will
+        * fail is when the io request is completed and
+        * being aborted.
+        */
+       if (status != SCI_SUCCESS) {
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: sci_controller_terminate_request"
+                       " returned = 0x%x\n",
+                       __func__, status);
+
+               isci_request->io_request_completion = NULL;
+
+       } else {
+               if (was_terminated) {
+                       dev_dbg(&ihost->pdev->dev,
+                               "%s: before completion wait (%p/%p)\n",
+                               __func__, isci_request, io_request_completion);
+
+                       /* Wait here for the request to complete. */
+                       #define TERMINATION_TIMEOUT_MSEC 500
+                       termination_completed
+                               = wait_for_completion_timeout(
+                                  io_request_completion,
+                                  msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC));
+
+                       if (!termination_completed) {
+
+                               /* The request to terminate has timed out.  */
+                               spin_lock_irqsave(&ihost->scic_lock,
+                                                 flags);
+
+                               /* Check for state changes. */
+                               if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
+
+                                       /* The best we can do is to have the
+                                        * request die a silent death if it
+                                        * ever really completes.
+                                        *
+                                        * Set the request state to "dead",
+                                        * and clear the task pointer so that
+                                        * an actual completion event callback
+                                        * doesn't do anything.
+                                        */
+                                       isci_request->status = dead;
+                                       isci_request->io_request_completion
+                                               = NULL;
+
+                                       if (isci_request->ttype == io_task) {
+
+                                               /* Break links with the
+                                               * sas_task.
+                                               */
+                                               isci_request->ttype_ptr.io_task_ptr
+                                                       = NULL;
+                                       }
+                               } else
+                                       termination_completed = 1;
+
+                               spin_unlock_irqrestore(&ihost->scic_lock,
+                                                      flags);
+
+                               if (!termination_completed) {
+
+                                       dev_dbg(&ihost->pdev->dev,
+                                               "%s: *** Timeout waiting for "
+                                               "termination(%p/%p)\n",
+                                               __func__, io_request_completion,
+                                               isci_request);
+
+                                       /* The request can no longer be referenced
+                                        * safely since it may go away if the
+                                        * termination every really does complete.
+                                        */
+                                       isci_request = NULL;
+                               }
+                       }
+                       if (termination_completed)
+                               dev_dbg(&ihost->pdev->dev,
+                                       "%s: after completion wait (%p/%p)\n",
+                                       __func__, isci_request, io_request_completion);
+               }
+
+               if (termination_completed) {
+
+                       isci_request->io_request_completion = NULL;
+
+                       /* Peek at the status of the request.  This will tell
+                        * us if there was special handling on the request such that it
+                        * needs to be detached and freed here.
+                        */
+                       spin_lock_irqsave(&isci_request->state_lock, flags);
+                       request_status = isci_request->status;
+
+                       if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
+                           && ((request_status == aborted)
+                               || (request_status == aborting)
+                               || (request_status == terminating)
+                               || (request_status == completed)
+                               || (request_status == dead)
+                               )
+                           ) {
+
+                               /* The completion routine won't free a request in
+                                * the aborted/aborting/etc. states, so we do
+                                * it here.
+                                */
+                               needs_cleanup_handling = true;
+                       }
+                       spin_unlock_irqrestore(&isci_request->state_lock, flags);
+
+               }
+               if (needs_cleanup_handling)
+                       isci_request_cleanup_completed_loiterer(
+                               ihost, idev, isci_request, task);
+       }
+}
+
+/**
+ * isci_terminate_pending_requests() - This function will change the all of the
+ *    requests on the given device's state to "aborting", will terminate the
+ *    requests, and wait for them to complete.  This function must only be
+ *    called from a thread that can wait.  Note that the requests are all
+ *    terminated and completed (back to the host, if started there).
+ * @isci_host: This parameter specifies SCU.
+ * @idev: This parameter specifies the target.
+ *
+ */
+void isci_terminate_pending_requests(struct isci_host *ihost,
+                                    struct isci_remote_device *idev)
+{
+       struct completion request_completion;
+       enum isci_request_status old_state;
+       unsigned long flags;
+       LIST_HEAD(list);
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       list_splice_init(&idev->reqs_in_process, &list);
+
+       /* assumes that isci_terminate_request_core deletes from the list */
+       while (!list_empty(&list)) {
+               struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
+
+               /* Change state to "terminating" if it is currently
+                * "started".
+                */
+               old_state = isci_request_change_started_to_newstate(ireq,
+                                                                   &request_completion,
+                                                                   terminating);
+               switch (old_state) {
+               case started:
+               case completed:
+               case aborting:
+                       break;
+               default:
+                       /* termination in progress, or otherwise dispositioned.
+                        * We know the request was on 'list' so should be safe
+                        * to move it back to reqs_in_process
+                        */
+                       list_move(&ireq->dev_node, &idev->reqs_in_process);
+                       ireq = NULL;
+                       break;
+               }
+
+               if (!ireq)
+                       continue;
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+               init_completion(&request_completion);
+
+               dev_dbg(&ihost->pdev->dev,
+                        "%s: idev=%p request=%p; task=%p old_state=%d\n",
+                        __func__, idev, ireq,
+                       ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL,
+                       old_state);
+
+               /* If the old_state is started:
+                * This request was not already being aborted. If it had been,
+                * then the aborting I/O (ie. the TMF request) would not be in
+                * the aborting state, and thus would be terminated here.  Note
+                * that since the TMF completion's call to the kernel function
+                * "complete()" does not happen until the pending I/O request
+                * terminate fully completes, we do not have to implement a
+                * special wait here for already aborting requests - the
+                * termination of the TMF request will force the request
+                * to finish it's already started terminate.
+                *
+                * If old_state == completed:
+                * This request completed from the SCU hardware perspective
+                * and now just needs cleaning up in terms of freeing the
+                * request and potentially calling up to libsas.
+                *
+                * If old_state == aborting:
+                * This request has already gone through a TMF timeout, but may
+                * not have been terminated; needs cleaning up at least.
+                */
+               isci_terminate_request_core(ihost, idev, ireq);
+               spin_lock_irqsave(&ihost->scic_lock, flags);
+       }
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/**
+ * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
+ *    Template functions.
+ * @lun: This parameter specifies the lun to be reset.
+ *
+ * status, zero indicates success.
+ */
+static int isci_task_send_lu_reset_sas(
+       struct isci_host *isci_host,
+       struct isci_remote_device *isci_device,
+       u8 *lun)
+{
+       struct isci_tmf tmf;
+       int ret = TMF_RESP_FUNC_FAILED;
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: isci_host = %p, isci_device = %p\n",
+               __func__, isci_host, isci_device);
+       /* Send the LUN reset to the target.  By the time the call returns,
+        * the TMF has fully exected in the target (in which case the return
+        * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
+        * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
+        */
+       isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
+
+       #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
+       ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
+
+       if (ret == TMF_RESP_FUNC_COMPLETE)
+               dev_dbg(&isci_host->pdev->dev,
+                       "%s: %p: TMF_LU_RESET passed\n",
+                       __func__, isci_device);
+       else
+               dev_dbg(&isci_host->pdev->dev,
+                       "%s: %p: TMF_LU_RESET failed (%x)\n",
+                       __func__, isci_device, ret);
+
+       return ret;
+}
+
+static int isci_task_send_lu_reset_sata(struct isci_host *ihost,
+                                struct isci_remote_device *idev, u8 *lun)
+{
+       int ret = TMF_RESP_FUNC_FAILED;
+       struct isci_tmf tmf;
+
+       /* Send the soft reset to the target */
+       #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */
+       isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL);
+
+       ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_SRST_TIMEOUT_MS);
+
+       if (ret != TMF_RESP_FUNC_COMPLETE) {
+               dev_dbg(&ihost->pdev->dev,
+                        "%s: Assert SRST failed (%p) = %x",
+                        __func__, idev, ret);
+
+               /* Return the failure so that the LUN reset is escalated
+                * to a target reset.
+                */
+       }
+       return ret;
+}
+
+/**
+ * isci_task_lu_reset() - This function is one of the SAS Domain Template
+ *    functions. This is one of the Task Management functoins called by libsas,
+ *    to reset the given lun. Note the assumption that while this call is
+ *    executing, no I/O will be sent by the host to the device.
+ * @lun: This parameter specifies the lun to be reset.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
+{
+       struct isci_host *isci_host = dev_to_ihost(domain_device);
+       struct isci_remote_device *isci_device;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&isci_host->scic_lock, flags);
+       isci_device = isci_lookup_device(domain_device);
+       spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
+                __func__, domain_device, isci_host, isci_device);
+
+       if (isci_device)
+               set_bit(IDEV_EH, &isci_device->flags);
+
+       /* If there is a device reset pending on any request in the
+        * device's list, fail this LUN reset request in order to
+        * escalate to the device reset.
+        */
+       if (!isci_device ||
+           isci_device_is_reset_pending(isci_host, isci_device)) {
+               dev_dbg(&isci_host->pdev->dev,
+                        "%s: No dev (%p), or "
+                        "RESET PENDING: domain_device=%p\n",
+                        __func__, isci_device, domain_device);
+               ret = TMF_RESP_FUNC_FAILED;
+               goto out;
+       }
+
+       /* Send the task management part of the reset. */
+       if (sas_protocol_ata(domain_device->tproto)) {
+               ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun);
+       } else
+               ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
+
+       /* If the LUN reset worked, all the I/O can now be terminated. */
+       if (ret == TMF_RESP_FUNC_COMPLETE)
+               /* Terminate all I/O now. */
+               isci_terminate_pending_requests(isci_host,
+                                               isci_device);
+
+ out:
+       isci_put_device(isci_device);
+       return ret;
+}
+
+
+/*      int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
+int isci_task_clear_nexus_port(struct asd_sas_port *port)
+{
+       return TMF_RESP_FUNC_FAILED;
+}
+
+
+
+int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
+{
+       return TMF_RESP_FUNC_FAILED;
+}
+
+/* Task Management Functions. Must be called from process context.      */
+
+/**
+ * isci_abort_task_process_cb() - This is a helper function for the abort task
+ *    TMF command.  It manages the request state with respect to the successful
+ *    transmission / completion of the abort task request.
+ * @cb_state: This parameter specifies when this function was called - after
+ *    the TMF request has been started and after it has timed-out.
+ * @tmf: This parameter specifies the TMF in progress.
+ *
+ *
+ */
+static void isci_abort_task_process_cb(
+       enum isci_tmf_cb_state cb_state,
+       struct isci_tmf *tmf,
+       void *cb_data)
+{
+       struct isci_request *old_request;
+
+       old_request = (struct isci_request *)cb_data;
+
+       dev_dbg(&old_request->isci_host->pdev->dev,
+               "%s: tmf=%p, old_request=%p\n",
+               __func__, tmf, old_request);
+
+       switch (cb_state) {
+
+       case isci_tmf_started:
+               /* The TMF has been started.  Nothing to do here, since the
+                * request state was already set to "aborted" by the abort
+                * task function.
+                */
+               if ((old_request->status != aborted)
+                       && (old_request->status != completed))
+                       dev_dbg(&old_request->isci_host->pdev->dev,
+                               "%s: Bad request status (%d): tmf=%p, old_request=%p\n",
+                               __func__, old_request->status, tmf, old_request);
+               break;
+
+       case isci_tmf_timed_out:
+
+               /* Set the task's state to "aborting", since the abort task
+                * function thread set it to "aborted" (above) in anticipation
+                * of the task management request working correctly.  Since the
+                * timeout has now fired, the TMF request failed.  We set the
+                * state such that the request completion will indicate the
+                * device is no longer present.
+                */
+               isci_request_change_state(old_request, aborting);
+               break;
+
+       default:
+               dev_dbg(&old_request->isci_host->pdev->dev,
+                       "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
+                       __func__, cb_state, tmf, old_request);
+               break;
+       }
+}
+
+/**
+ * isci_task_abort_task() - This function is one of the SAS Domain Template
+ *    functions. This function is called by libsas to abort a specified task.
+ * @task: This parameter specifies the SAS task to abort.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_abort_task(struct sas_task *task)
+{
+       struct isci_host *isci_host = dev_to_ihost(task->dev);
+       DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
+       struct isci_request       *old_request = NULL;
+       enum isci_request_status  old_state;
+       struct isci_remote_device *isci_device = NULL;
+       struct isci_tmf           tmf;
+       int                       ret = TMF_RESP_FUNC_FAILED;
+       unsigned long             flags;
+       bool                      any_dev_reset = false;
+
+       /* Get the isci_request reference from the task.  Note that
+        * this check does not depend on the pending request list
+        * in the device, because tasks driving resets may land here
+        * after completion in the core.
+        */
+       spin_lock_irqsave(&isci_host->scic_lock, flags);
+       spin_lock(&task->task_state_lock);
+
+       old_request = task->lldd_task;
+
+       /* If task is already done, the request isn't valid */
+       if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
+           (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
+           old_request)
+               isci_device = isci_lookup_device(task->dev);
+
+       spin_unlock(&task->task_state_lock);
+       spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: task = %p\n", __func__, task);
+
+       if (!isci_device || !old_request)
+               goto out;
+
+       set_bit(IDEV_EH, &isci_device->flags);
+
+       /* This version of the driver will fail abort requests for
+        * SATA/STP.  Failing the abort request this way will cause the
+        * SCSI error handler thread to escalate to LUN reset
+        */
+       if (sas_protocol_ata(task->task_proto)) {
+               dev_dbg(&isci_host->pdev->dev,
+                           " task %p is for a STP/SATA device;"
+                           " returning TMF_RESP_FUNC_FAILED\n"
+                           " to cause a LUN reset...\n", task);
+               goto out;
+       }
+
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: old_request == %p\n", __func__, old_request);
+
+       any_dev_reset = isci_device_is_reset_pending(isci_host, isci_device);
+
+       spin_lock_irqsave(&task->task_state_lock, flags);
+
+       any_dev_reset = any_dev_reset || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
+
+       /* If the extraction of the request reference from the task
+        * failed, then the request has been completed (or if there is a
+        * pending reset then this abort request function must be failed
+        * in order to escalate to the target reset).
+        */
+       if ((old_request == NULL) || any_dev_reset) {
+
+               /* If the device reset task flag is set, fail the task
+                * management request.  Otherwise, the original request
+                * has completed.
+                */
+               if (any_dev_reset) {
+
+                       /* Turn off the task's DONE to make sure this
+                        * task is escalated to a target reset.
+                        */
+                       task->task_state_flags &= ~SAS_TASK_STATE_DONE;
+
+                       /* Make the reset happen as soon as possible. */
+                       task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+
+                       spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+                       /* Fail the task management request in order to
+                        * escalate to the target reset.
+                        */
+                       ret = TMF_RESP_FUNC_FAILED;
+
+                       dev_dbg(&isci_host->pdev->dev,
+                               "%s: Failing task abort in order to "
+                               "escalate to target reset because\n"
+                               "SAS_TASK_NEED_DEV_RESET is set for "
+                               "task %p on dev %p\n",
+                               __func__, task, isci_device);
+
+
+               } else {
+                       /* The request has already completed and there
+                        * is nothing to do here other than to set the task
+                        * done bit, and indicate that the task abort function
+                        * was sucessful.
+                        */
+                       isci_set_task_doneflags(task);
+
+                       spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+                       ret = TMF_RESP_FUNC_COMPLETE;
+
+                       dev_dbg(&isci_host->pdev->dev,
+                               "%s: abort task not needed for %p\n",
+                               __func__, task);
+               }
+               goto out;
+       } else {
+               spin_unlock_irqrestore(&task->task_state_lock, flags);
+       }
+
+       spin_lock_irqsave(&isci_host->scic_lock, flags);
+
+       /* Check the request status and change to "aborted" if currently
+        * "starting"; if true then set the I/O kernel completion
+        * struct that will be triggered when the request completes.
+        */
+       old_state = isci_task_validate_request_to_abort(
+                               old_request, isci_host, isci_device,
+                               &aborted_io_completion);
+       if ((old_state != started) &&
+           (old_state != completed) &&
+           (old_state != aborting)) {
+
+               spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+               /* The request was already being handled by someone else (because
+               * they got to set the state away from started).
+               */
+               dev_dbg(&isci_host->pdev->dev,
+                       "%s:  device = %p; old_request %p already being aborted\n",
+                       __func__,
+                       isci_device, old_request);
+               ret = TMF_RESP_FUNC_COMPLETE;
+               goto out;
+       }
+       if (task->task_proto == SAS_PROTOCOL_SMP ||
+           test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
+
+               spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+               dev_dbg(&isci_host->pdev->dev,
+                       "%s: SMP request (%d)"
+                       " or complete_in_target (%d), thus no TMF\n",
+                       __func__, (task->task_proto == SAS_PROTOCOL_SMP),
+                       test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
+
+               /* Set the state on the task. */
+               isci_task_all_done(task);
+
+               ret = TMF_RESP_FUNC_COMPLETE;
+
+               /* Stopping and SMP devices are not sent a TMF, and are not
+                * reset, but the outstanding I/O request is terminated below.
+                */
+       } else {
+               /* Fill in the tmf stucture */
+               isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
+                                              isci_abort_task_process_cb,
+                                              old_request);
+
+               spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+               #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
+               ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
+                                           ISCI_ABORT_TASK_TIMEOUT_MS);
+
+               if (ret != TMF_RESP_FUNC_COMPLETE)
+                       dev_dbg(&isci_host->pdev->dev,
+                               "%s: isci_task_send_tmf failed\n",
+                               __func__);
+       }
+       if (ret == TMF_RESP_FUNC_COMPLETE) {
+               set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
+
+               /* Clean up the request on our side, and wait for the aborted
+                * I/O to complete.
+                */
+               isci_terminate_request_core(isci_host, isci_device, old_request);
+       }
+
+       /* Make sure we do not leave a reference to aborted_io_completion */
+       old_request->io_request_completion = NULL;
+ out:
+       isci_put_device(isci_device);
+       return ret;
+}
+
+/**
+ * isci_task_abort_task_set() - This function is one of the SAS Domain Template
+ *    functions. This is one of the Task Management functoins called by libsas,
+ *    to abort all task for the given lun.
+ * @d_device: This parameter specifies the domain device associated with this
+ *    request.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_abort_task_set(
+       struct domain_device *d_device,
+       u8 *lun)
+{
+       return TMF_RESP_FUNC_FAILED;
+}
+
+
+/**
+ * isci_task_clear_aca() - This function is one of the SAS Domain Template
+ *    functions. This is one of the Task Management functoins called by libsas.
+ * @d_device: This parameter specifies the domain device associated with this
+ *    request.
+ * @lun: This parameter specifies the lun       associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_clear_aca(
+       struct domain_device *d_device,
+       u8 *lun)
+{
+       return TMF_RESP_FUNC_FAILED;
+}
+
+
+
+/**
+ * isci_task_clear_task_set() - This function is one of the SAS Domain Template
+ *    functions. This is one of the Task Management functoins called by libsas.
+ * @d_device: This parameter specifies the domain device associated with this
+ *    request.
+ * @lun: This parameter specifies the lun       associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_clear_task_set(
+       struct domain_device *d_device,
+       u8 *lun)
+{
+       return TMF_RESP_FUNC_FAILED;
+}
+
+
+/**
+ * isci_task_query_task() - This function is implemented to cause libsas to
+ *    correctly escalate the failed abort to a LUN or target reset (this is
+ *    because sas_scsi_find_task libsas function does not correctly interpret
+ *    all return codes from the abort task call).  When TMF_RESP_FUNC_SUCC is
+ *    returned, libsas turns this into a LUN reset; when FUNC_FAILED is
+ *    returned, libsas will turn this into a target reset
+ * @task: This parameter specifies the sas task being queried.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_query_task(
+       struct sas_task *task)
+{
+       /* See if there is a pending device reset for this device. */
+       if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
+               return TMF_RESP_FUNC_FAILED;
+       else
+               return TMF_RESP_FUNC_SUCC;
+}
+
+/*
+ * isci_task_request_complete() - This function is called by the sci core when
+ *    an task request completes.
+ * @ihost: This parameter specifies the ISCI host object
+ * @ireq: This parameter is the completed isci_request object.
+ * @completion_status: This parameter specifies the completion status from the
+ *    sci core.
+ *
+ * none.
+ */
+void
+isci_task_request_complete(struct isci_host *ihost,
+                          struct isci_request *ireq,
+                          enum sci_task_status completion_status)
+{
+       struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+       struct completion *tmf_complete;
+
+       dev_dbg(&ihost->pdev->dev,
+               "%s: request = %p, status=%d\n",
+               __func__, ireq, completion_status);
+
+       isci_request_change_state(ireq, completed);
+
+       tmf->status = completion_status;
+       set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
+
+       if (tmf->proto == SAS_PROTOCOL_SSP) {
+               memcpy(&tmf->resp.resp_iu,
+                      &ireq->ssp.rsp,
+                      SSP_RESP_IU_MAX_SIZE);
+       } else if (tmf->proto == SAS_PROTOCOL_SATA) {
+               memcpy(&tmf->resp.d2h_fis,
+                      &ireq->stp.rsp,
+                      sizeof(struct dev_to_host_fis));
+       }
+
+       /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
+       tmf_complete = tmf->complete;
+
+       sci_controller_complete_io(ihost, ireq->target_device, ireq);
+       /* set the 'terminated' flag handle to make sure it cannot be terminated
+        *  or completed again.
+        */
+       set_bit(IREQ_TERMINATED, &ireq->flags);
+
+       isci_request_change_state(ireq, unallocated);
+       list_del_init(&ireq->dev_node);
+
+       /* The task management part completes last. */
+       complete(tmf_complete);
+}
+
+static void isci_smp_task_timedout(unsigned long _task)
+{
+       struct sas_task *task = (void *) _task;
+       unsigned long flags;
+
+       spin_lock_irqsave(&task->task_state_lock, flags);
+       if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+               task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+       spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+       complete(&task->completion);
+}
+
+static void isci_smp_task_done(struct sas_task *task)
+{
+       if (!del_timer(&task->timer))
+               return;
+       complete(&task->completion);
+}
+
+static struct sas_task *isci_alloc_task(void)
+{
+       struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL);
+
+       if (task) {
+               INIT_LIST_HEAD(&task->list);
+               spin_lock_init(&task->task_state_lock);
+               task->task_state_flags = SAS_TASK_STATE_PENDING;
+               init_timer(&task->timer);
+               init_completion(&task->completion);
+       }
+
+       return task;
+}
+
+static void isci_free_task(struct isci_host *ihost, struct sas_task  *task)
+{
+       if (task) {
+               BUG_ON(!list_empty(&task->list));
+               kfree(task);
+       }
+}
+
+static int isci_smp_execute_task(struct isci_host *ihost,
+                                struct domain_device *dev, void *req,
+                                int req_size, void *resp, int resp_size)
+{
+       int res, retry;
+       struct sas_task *task = NULL;
+
+       for (retry = 0; retry < 3; retry++) {
+               task = isci_alloc_task();
+               if (!task)
+                       return -ENOMEM;
+
+               task->dev = dev;
+               task->task_proto = dev->tproto;
+               sg_init_one(&task->smp_task.smp_req, req, req_size);
+               sg_init_one(&task->smp_task.smp_resp, resp, resp_size);
+
+               task->task_done = isci_smp_task_done;
+
+               task->timer.data = (unsigned long) task;
+               task->timer.function = isci_smp_task_timedout;
+               task->timer.expires = jiffies + 10*HZ;
+               add_timer(&task->timer);
+
+               res = isci_task_execute_task(task, 1, GFP_KERNEL);
+
+               if (res) {
+                       del_timer(&task->timer);
+                       dev_dbg(&ihost->pdev->dev,
+                               "%s: executing SMP task failed:%d\n",
+                               __func__, res);
+                       goto ex_err;
+               }
+
+               wait_for_completion(&task->completion);
+               res = -ECOMM;
+               if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+                       dev_dbg(&ihost->pdev->dev,
+                               "%s: smp task timed out or aborted\n",
+                               __func__);
+                       isci_task_abort_task(task);
+                       if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+                               dev_dbg(&ihost->pdev->dev,
+                                       "%s: SMP task aborted and not done\n",
+                                       __func__);
+                               goto ex_err;
+                       }
+               }
+               if (task->task_status.resp == SAS_TASK_COMPLETE &&
+                   task->task_status.stat == SAM_STAT_GOOD) {
+                       res = 0;
+                       break;
+               }
+               if (task->task_status.resp == SAS_TASK_COMPLETE &&
+                     task->task_status.stat == SAS_DATA_UNDERRUN) {
+                       /* no error, but return the number of bytes of
+                       * underrun */
+                       res = task->task_status.residual;
+                       break;
+               }
+               if (task->task_status.resp == SAS_TASK_COMPLETE &&
+                     task->task_status.stat == SAS_DATA_OVERRUN) {
+                       res = -EMSGSIZE;
+                       break;
+               } else {
+                       dev_dbg(&ihost->pdev->dev,
+                               "%s: task to dev %016llx response: 0x%x "
+                               "status 0x%x\n", __func__,
+                               SAS_ADDR(dev->sas_addr),
+                               task->task_status.resp,
+                               task->task_status.stat);
+                       isci_free_task(ihost, task);
+                       task = NULL;
+               }
+       }
+ex_err:
+       BUG_ON(retry == 3 && task != NULL);
+       isci_free_task(ihost, task);
+       return res;
+}
+
+#define DISCOVER_REQ_SIZE  16
+#define DISCOVER_RESP_SIZE 56
+
+int isci_smp_get_phy_attached_dev_type(struct isci_host *ihost,
+                                      struct domain_device *dev,
+                                      int phy_id, int *adt)
+{
+       struct smp_resp *disc_resp;
+       u8 *disc_req;
+       int res;
+
+       disc_resp = kzalloc(DISCOVER_RESP_SIZE, GFP_KERNEL);
+       if (!disc_resp)
+               return -ENOMEM;
+
+       disc_req = kzalloc(DISCOVER_REQ_SIZE, GFP_KERNEL);
+       if (disc_req) {
+               disc_req[0] = SMP_REQUEST;
+               disc_req[1] = SMP_DISCOVER;
+               disc_req[9] = phy_id;
+       } else {
+               kfree(disc_resp);
+               return -ENOMEM;
+       }
+       res = isci_smp_execute_task(ihost, dev, disc_req, DISCOVER_REQ_SIZE,
+                                   disc_resp, DISCOVER_RESP_SIZE);
+       if (!res) {
+               if (disc_resp->result != SMP_RESP_FUNC_ACC)
+                       res = disc_resp->result;
+               else
+                       *adt = disc_resp->disc.attached_dev_type;
+       }
+       kfree(disc_req);
+       kfree(disc_resp);
+
+       return res;
+}
+
+static void isci_wait_for_smp_phy_reset(struct isci_remote_device *idev, int phy_num)
+{
+       struct domain_device *dev = idev->domain_dev;
+       struct isci_port *iport = idev->isci_port;
+       struct isci_host *ihost = iport->isci_host;
+       int res, iteration = 0, attached_device_type;
+       #define STP_WAIT_MSECS 25000
+       unsigned long tmo = msecs_to_jiffies(STP_WAIT_MSECS);
+       unsigned long deadline = jiffies + tmo;
+       enum {
+               SMP_PHYWAIT_PHYDOWN,
+               SMP_PHYWAIT_PHYUP,
+               SMP_PHYWAIT_DONE
+       } phy_state = SMP_PHYWAIT_PHYDOWN;
+
+       /* While there is time, wait for the phy to go away and come back */
+       while (time_is_after_jiffies(deadline) && phy_state != SMP_PHYWAIT_DONE) {
+               int event = atomic_read(&iport->event);
+
+               ++iteration;
+
+               tmo = wait_event_timeout(ihost->eventq,
+                                        event != atomic_read(&iport->event) ||
+                                        !test_bit(IPORT_BCN_BLOCKED, &iport->flags),
+                                        tmo);
+               /* link down, stop polling */
+               if (!test_bit(IPORT_BCN_BLOCKED, &iport->flags))
+                       break;
+
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: iport %p, iteration %d,"
+                       " phase %d: time_remaining %lu, bcns = %d\n",
+                       __func__, iport, iteration, phy_state,
+                       tmo, test_bit(IPORT_BCN_PENDING, &iport->flags));
+
+               res = isci_smp_get_phy_attached_dev_type(ihost, dev, phy_num,
+                                                        &attached_device_type);
+               tmo = deadline - jiffies;
+
+               if (res) {
+                       dev_dbg(&ihost->pdev->dev,
+                                "%s: iteration %d, phase %d:"
+                                " SMP error=%d, time_remaining=%lu\n",
+                                __func__, iteration, phy_state, res, tmo);
+                       break;
+               }
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: iport %p, iteration %d,"
+                       " phase %d: time_remaining %lu, bcns = %d, "
+                       "attdevtype = %x\n",
+                       __func__, iport, iteration, phy_state,
+                       tmo, test_bit(IPORT_BCN_PENDING, &iport->flags),
+                       attached_device_type);
+
+               switch (phy_state) {
+               case SMP_PHYWAIT_PHYDOWN:
+                       /* Has the device gone away? */
+                       if (!attached_device_type)
+                               phy_state = SMP_PHYWAIT_PHYUP;
+
+                       break;
+
+               case SMP_PHYWAIT_PHYUP:
+                       /* Has the device come back? */
+                       if (attached_device_type)
+                               phy_state = SMP_PHYWAIT_DONE;
+                       break;
+
+               case SMP_PHYWAIT_DONE:
+                       break;
+               }
+
+       }
+       dev_dbg(&ihost->pdev->dev, "%s: done\n",  __func__);
+}
+
+static int isci_reset_device(struct isci_host *ihost,
+                            struct isci_remote_device *idev)
+{
+       struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
+       struct isci_port *iport = idev->isci_port;
+       enum sci_status status;
+       unsigned long flags;
+       int rc;
+
+       dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       status = sci_remote_device_reset(idev);
+       if (status != SCI_SUCCESS) {
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+               dev_dbg(&ihost->pdev->dev,
+                        "%s: sci_remote_device_reset(%p) returned %d!\n",
+                        __func__, idev, status);
+
+               return TMF_RESP_FUNC_FAILED;
+       }
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       /* Make sure all pending requests are able to be fully terminated. */
+       isci_device_clear_reset_pending(ihost, idev);
+
+       /* If this is a device on an expander, disable BCN processing. */
+       if (!scsi_is_sas_phy_local(phy))
+               set_bit(IPORT_BCN_BLOCKED, &iport->flags);
+
+       rc = sas_phy_reset(phy, true);
+
+       /* Terminate in-progress I/O now. */
+       isci_remote_device_nuke_requests(ihost, idev);
+
+       /* Since all pending TCs have been cleaned, resume the RNC. */
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       status = sci_remote_device_reset_complete(idev);
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       /* If this is a device on an expander, bring the phy back up. */
+       if (!scsi_is_sas_phy_local(phy)) {
+               /* A phy reset will cause the device to go away then reappear.
+                * Since libsas will take action on incoming BCNs (eg. remove
+                * a device going through an SMP phy-control driven reset),
+                * we need to wait until the phy comes back up before letting
+                * discovery proceed in libsas.
+                */
+               isci_wait_for_smp_phy_reset(idev, phy->number);
+
+               spin_lock_irqsave(&ihost->scic_lock, flags);
+               isci_port_bcn_enable(ihost, idev->isci_port);
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+       }
+
+       if (status != SCI_SUCCESS) {
+               dev_dbg(&ihost->pdev->dev,
+                        "%s: sci_remote_device_reset_complete(%p) "
+                        "returned %d!\n", __func__, idev, status);
+       }
+
+       dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
+
+       return rc;
+}
+
+int isci_task_I_T_nexus_reset(struct domain_device *dev)
+{
+       struct isci_host *ihost = dev_to_ihost(dev);
+       struct isci_remote_device *idev;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       idev = isci_lookup_device(dev);
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
+               ret = TMF_RESP_FUNC_COMPLETE;
+               goto out;
+       }
+
+       ret = isci_reset_device(ihost, idev);
+ out:
+       isci_put_device(idev);
+       return ret;
+}
+
+int isci_bus_reset_handler(struct scsi_cmnd *cmd)
+{
+       struct domain_device *dev = sdev_to_domain_dev(cmd->device);
+       struct isci_host *ihost = dev_to_ihost(dev);
+       struct isci_remote_device *idev;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       idev = isci_lookup_device(dev);
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       if (!idev) {
+               ret = TMF_RESP_FUNC_COMPLETE;
+               goto out;
+       }
+
+       ret = isci_reset_device(ihost, idev);
+ out:
+       isci_put_device(idev);
+       return ret;
+}
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
new file mode 100644 (file)
index 0000000..4a7fa90
--- /dev/null
@@ -0,0 +1,367 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_TASK_H_
+#define _ISCI_TASK_H_
+
+#include <scsi/sas_ata.h>
+#include "host.h"
+
+struct isci_request;
+
+/**
+ * enum isci_tmf_cb_state - This enum defines the possible states in which the
+ *    TMF callback function is invoked during the TMF execution process.
+ *
+ *
+ */
+enum isci_tmf_cb_state {
+
+       isci_tmf_init_state = 0,
+       isci_tmf_started,
+       isci_tmf_timed_out
+};
+
+/**
+ * enum isci_tmf_function_codes - This enum defines the possible preparations
+ *    of task management requests.
+ *
+ *
+ */
+enum isci_tmf_function_codes {
+
+       isci_tmf_func_none      = 0,
+       isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
+       isci_tmf_ssp_lun_reset  = TMF_LU_RESET,
+       isci_tmf_sata_srst_high = TMF_LU_RESET + 0x100, /* Non SCSI */
+       isci_tmf_sata_srst_low  = TMF_LU_RESET + 0x101  /* Non SCSI */
+};
+/**
+ * struct isci_tmf - This class represents the task management object which
+ *    acts as an interface to libsas for processing task management requests
+ *
+ *
+ */
+struct isci_tmf {
+
+       struct completion *complete;
+       enum sas_protocol proto;
+       union {
+               struct ssp_response_iu resp_iu;
+               struct dev_to_host_fis d2h_fis;
+               u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
+       } resp;
+       unsigned char lun[8];
+       u16 io_tag;
+       struct isci_remote_device *device;
+       enum isci_tmf_function_codes tmf_code;
+       int status;
+
+       /* The optional callback function allows the user process to
+        * track the TMF transmit / timeout conditions.
+        */
+       void (*cb_state_func)(
+               enum isci_tmf_cb_state,
+               struct isci_tmf *, void *);
+       void *cb_data;
+
+};
+
+static inline void isci_print_tmf(struct isci_tmf *tmf)
+{
+       if (SAS_PROTOCOL_SATA == tmf->proto)
+               dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+                       "%s: status = %x\n"
+                       "tmf->resp.d2h_fis.status = %x\n"
+                       "tmf->resp.d2h_fis.error = %x\n",
+                       __func__,
+                       tmf->status,
+                       tmf->resp.d2h_fis.status,
+                       tmf->resp.d2h_fis.error);
+       else
+               dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+                       "%s: status = %x\n"
+                       "tmf->resp.resp_iu.data_present = %x\n"
+                       "tmf->resp.resp_iu.status = %x\n"
+                       "tmf->resp.resp_iu.data_length = %x\n"
+                       "tmf->resp.resp_iu.data[0] = %x\n"
+                       "tmf->resp.resp_iu.data[1] = %x\n"
+                       "tmf->resp.resp_iu.data[2] = %x\n"
+                       "tmf->resp.resp_iu.data[3] = %x\n",
+                       __func__,
+                       tmf->status,
+                       tmf->resp.resp_iu.datapres,
+                       tmf->resp.resp_iu.status,
+                       be32_to_cpu(tmf->resp.resp_iu.response_data_len),
+                       tmf->resp.resp_iu.resp_data[0],
+                       tmf->resp.resp_iu.resp_data[1],
+                       tmf->resp.resp_iu.resp_data[2],
+                       tmf->resp.resp_iu.resp_data[3]);
+}
+
+
+int isci_task_execute_task(
+       struct sas_task *task,
+       int num,
+       gfp_t gfp_flags);
+
+int isci_task_abort_task(
+       struct sas_task *task);
+
+int isci_task_abort_task_set(
+       struct domain_device *d_device,
+       u8 *lun);
+
+int isci_task_clear_aca(
+       struct domain_device *d_device,
+       u8 *lun);
+
+int isci_task_clear_task_set(
+       struct domain_device *d_device,
+       u8 *lun);
+
+int isci_task_query_task(
+       struct sas_task *task);
+
+int isci_task_lu_reset(
+       struct domain_device *d_device,
+       u8 *lun);
+
+int isci_task_clear_nexus_port(
+       struct asd_sas_port *port);
+
+int isci_task_clear_nexus_ha(
+       struct sas_ha_struct *ha);
+
+int isci_task_I_T_nexus_reset(
+       struct domain_device *d_device);
+
+void isci_task_request_complete(
+       struct isci_host *isci_host,
+       struct isci_request *request,
+       enum sci_task_status completion_status);
+
+u16 isci_task_ssp_request_get_io_tag_to_manage(
+       struct isci_request *request);
+
+u8 isci_task_ssp_request_get_function(
+       struct isci_request *request);
+
+
+void *isci_task_ssp_request_get_response_data_address(
+       struct isci_request *request);
+
+u32 isci_task_ssp_request_get_response_data_length(
+       struct isci_request *request);
+
+int isci_queuecommand(
+       struct scsi_cmnd *scsi_cmd,
+       void (*donefunc)(struct scsi_cmnd *));
+
+int isci_bus_reset_handler(struct scsi_cmnd *cmd);
+
+/**
+ * enum isci_completion_selection - This enum defines the possible actions to
+ *    take with respect to a given request's notification back to libsas.
+ *
+ *
+ */
+enum isci_completion_selection {
+
+       isci_perform_normal_io_completion,      /* Normal notify (task_done) */
+       isci_perform_aborted_io_completion,     /* No notification.   */
+       isci_perform_error_io_completion        /* Use sas_task_abort */
+};
+
+static inline void isci_set_task_doneflags(
+       struct sas_task *task)
+{
+       /* Since no futher action will be taken on this task,
+        * make sure to mark it complete from the lldd perspective.
+        */
+       task->task_state_flags |= SAS_TASK_STATE_DONE;
+       task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+}
+/**
+ * isci_task_all_done() - This function clears the task bits to indicate the
+ *    LLDD is done with the task.
+ *
+ *
+ */
+static inline void isci_task_all_done(
+       struct sas_task *task)
+{
+       unsigned long flags;
+
+       /* Since no futher action will be taken on this task,
+        * make sure to mark it complete from the lldd perspective.
+        */
+       spin_lock_irqsave(&task->task_state_lock, flags);
+       isci_set_task_doneflags(task);
+       spin_unlock_irqrestore(&task->task_state_lock, flags);
+}
+
+/**
+ * isci_task_set_completion_status() - This function sets the completion status
+ *    for the request.
+ * @task: This parameter is the completed request.
+ * @response: This parameter is the response code for the completed task.
+ * @status: This parameter is the status code for the completed task.
+ *
+* @return The new notification mode for the request.
+*/
+static inline enum isci_completion_selection
+isci_task_set_completion_status(
+       struct sas_task *task,
+       enum service_response response,
+       enum exec_status status,
+       enum isci_completion_selection task_notification_selection)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&task->task_state_lock, flags);
+
+       /* If a device reset is being indicated, make sure the I/O
+       * is in the error path.
+       */
+       if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
+               /* Fail the I/O to make sure it goes into the error path. */
+               response = SAS_TASK_UNDELIVERED;
+               status = SAM_STAT_TASK_ABORTED;
+
+               task_notification_selection = isci_perform_error_io_completion;
+       }
+       task->task_status.resp = response;
+       task->task_status.stat = status;
+
+       switch (task_notification_selection) {
+
+       case isci_perform_error_io_completion:
+
+               if (task->task_proto == SAS_PROTOCOL_SMP) {
+                       /* There is no error escalation in the SMP case.
+                        * Convert to a normal completion to avoid the
+                        * timeout in the discovery path and to let the
+                        * next action take place quickly.
+                        */
+                       task_notification_selection
+                               = isci_perform_normal_io_completion;
+
+                       /* Fall through to the normal case... */
+               } else {
+                       /* Use sas_task_abort */
+                       /* Leave SAS_TASK_STATE_DONE clear
+                        * Leave SAS_TASK_AT_INITIATOR set.
+                        */
+                       break;
+               }
+
+       case isci_perform_aborted_io_completion:
+               /* This path can occur with task-managed requests as well as
+                * requests terminated because of LUN or device resets.
+                */
+               /* Fall through to the normal case... */
+       case isci_perform_normal_io_completion:
+               /* Normal notification (task_done) */
+               isci_set_task_doneflags(task);
+               break;
+       default:
+               WARN_ONCE(1, "unknown task_notification_selection: %d\n",
+                        task_notification_selection);
+               break;
+       }
+
+       spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+       return task_notification_selection;
+
+}
+/**
+* isci_execpath_callback() - This function is called from the task
+* execute path when the task needs to callback libsas about the submit-time
+* task failure.  The callback occurs either through the task's done function
+* or through sas_task_abort.  In the case of regular non-discovery SATA/STP I/O
+* requests, libsas takes the host lock before calling execute task.  Therefore
+* in this situation the host lock must be managed before calling the func.
+*
+* @ihost: This parameter is the controller to which the I/O request was sent.
+* @task: This parameter is the I/O request.
+* @func: This parameter is the function to call in the correct context.
+* @status: This parameter is the status code for the completed task.
+*
+*/
+static inline void isci_execpath_callback(struct isci_host *ihost,
+                                         struct sas_task  *task,
+                                         void (*func)(struct sas_task *))
+{
+       struct domain_device *dev = task->dev;
+
+       if (dev_is_sata(dev) && task->uldd_task) {
+               unsigned long flags;
+
+               /* Since we are still in the submit path, and since
+                * libsas takes the host lock on behalf of SATA
+                * devices before I/O starts (in the non-discovery case),
+                * we need to unlock before we can call the callback function.
+                */
+               raw_local_irq_save(flags);
+               spin_unlock(dev->sata_dev.ap->lock);
+               func(task);
+               spin_lock(dev->sata_dev.ap->lock);
+               raw_local_irq_restore(flags);
+       } else
+               func(task);
+}
+#endif /* !defined(_SCI_TASK_H_) */
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
new file mode 100644 (file)
index 0000000..e9e1e2a
--- /dev/null
@@ -0,0 +1,225 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "host.h"
+#include "unsolicited_frame_control.h"
+#include "registers.h"
+
+int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
+{
+       struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control;
+       struct sci_unsolicited_frame *uf;
+       u32 buf_len, header_len, i;
+       dma_addr_t dma;
+       size_t size;
+       void *virt;
+
+       /*
+        * Prepare all of the memory sizes for the UF headers, UF address
+        * table, and UF buffers themselves.
+        */
+       buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
+       header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
+       size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t);
+
+       /*
+        * The Unsolicited Frame buffers are set at the start of the UF
+        * memory descriptor entry. The headers and address table will be
+        * placed after the buffers.
+        */
+       virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL);
+       if (!virt)
+               return -ENOMEM;
+
+       /*
+        * Program the location of the UF header table into the SCU.
+        * Notes:
+        * - The address must align on a 64-byte boundary. Guaranteed to be
+        *   on 64-byte boundary already 1KB boundary for unsolicited frames.
+        * - Program unused header entries to overlap with the last
+        *   unsolicited frame.  The silicon will never DMA to these unused
+        *   headers, since we program the UF address table pointers to
+        *   NULL.
+        */
+       uf_control->headers.physical_address = dma + buf_len;
+       uf_control->headers.array = virt + buf_len;
+
+       /*
+        * Program the location of the UF address table into the SCU.
+        * Notes:
+        * - The address must align on a 64-bit boundary. Guaranteed to be on 64
+        *   byte boundary already due to above programming headers being on a
+        *   64-bit boundary and headers are on a 64-bytes in size.
+        */
+       uf_control->address_table.physical_address = dma + buf_len + header_len;
+       uf_control->address_table.array = virt + buf_len + header_len;
+       uf_control->get = 0;
+
+       /*
+        * UF buffer requirements are:
+        * - The last entry in the UF queue is not NULL.
+        * - There is a power of 2 number of entries (NULL or not-NULL)
+        *   programmed into the queue.
+        * - Aligned on a 1KB boundary. */
+
+       /*
+        * Program the actual used UF buffers into the UF address table and
+        * the controller's array of UFs.
+        */
+       for (i = 0; i < SCU_MAX_UNSOLICITED_FRAMES; i++) {
+               uf = &uf_control->buffers.array[i];
+
+               uf_control->address_table.array[i] = dma;
+
+               uf->buffer = virt;
+               uf->header = &uf_control->headers.array[i];
+               uf->state  = UNSOLICITED_FRAME_EMPTY;
+
+               /*
+                * Increment the address of the physical and virtual memory
+                * pointers. Everything is aligned on 1k boundary with an
+                * increment of 1k.
+                */
+               virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
+               dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
+       }
+
+       return 0;
+}
+
+enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control,
+                                                        u32 frame_index,
+                                                        void **frame_header)
+{
+       if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
+               /* Skip the first word in the frame since this is a controll word used
+                * by the hardware.
+                */
+               *frame_header = &uf_control->buffers.array[frame_index].header->data;
+
+               return SCI_SUCCESS;
+       }
+
+       return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+}
+
+enum sci_status sci_unsolicited_frame_control_get_buffer(struct sci_unsolicited_frame_control *uf_control,
+                                                        u32 frame_index,
+                                                        void **frame_buffer)
+{
+       if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
+               *frame_buffer = uf_control->buffers.array[frame_index].buffer;
+
+               return SCI_SUCCESS;
+       }
+
+       return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+}
+
+bool sci_unsolicited_frame_control_release_frame(struct sci_unsolicited_frame_control *uf_control,
+                                                u32 frame_index)
+{
+       u32 frame_get;
+       u32 frame_cycle;
+
+       frame_get   = uf_control->get & (SCU_MAX_UNSOLICITED_FRAMES - 1);
+       frame_cycle = uf_control->get & SCU_MAX_UNSOLICITED_FRAMES;
+
+       /*
+        * In the event there are NULL entries in the UF table, we need to
+        * advance the get pointer in order to find out if this frame should
+        * be released (i.e. update the get pointer)
+        */
+       while (lower_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
+              upper_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
+              frame_get < SCU_MAX_UNSOLICITED_FRAMES)
+               frame_get++;
+
+       /*
+        * The table has a NULL entry as it's last element.  This is
+        * illegal.
+        */
+       BUG_ON(frame_get >= SCU_MAX_UNSOLICITED_FRAMES);
+       if (frame_index >= SCU_MAX_UNSOLICITED_FRAMES)
+               return false;
+
+       uf_control->buffers.array[frame_index].state = UNSOLICITED_FRAME_RELEASED;
+
+       if (frame_get != frame_index) {
+               /*
+                * Frames remain in use until we advance the get pointer
+                * so there is nothing we can do here
+                */
+               return false;
+       }
+
+       /*
+        * The frame index is equal to the current get pointer so we
+        * can now free up all of the frame entries that
+        */
+       while (uf_control->buffers.array[frame_get].state == UNSOLICITED_FRAME_RELEASED) {
+               uf_control->buffers.array[frame_get].state = UNSOLICITED_FRAME_EMPTY;
+
+               if (frame_get+1 == SCU_MAX_UNSOLICITED_FRAMES-1) {
+                       frame_cycle ^= SCU_MAX_UNSOLICITED_FRAMES;
+                       frame_get = 0;
+               } else
+                       frame_get++;
+       }
+
+       uf_control->get = SCU_UFQGP_GEN_BIT(ENABLE_BIT) | frame_cycle | frame_get;
+
+       return true;
+}
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h
new file mode 100644 (file)
index 0000000..31cb950
--- /dev/null
@@ -0,0 +1,278 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_
+#define _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_
+
+#include "isci.h"
+
+#define SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS 15
+
+/**
+ * struct scu_unsolicited_frame_header -
+ *
+ * This structure delineates the format of an unsolicited frame header. The
+ * first DWORD are UF attributes defined by the silicon architecture. The data
+ * depicts actual header information received on the link.
+ */
+struct scu_unsolicited_frame_header {
+       /**
+        * This field indicates if there is an Initiator Index Table entry with
+        * which this header is associated.
+        */
+       u32 iit_exists:1;
+
+       /**
+        * This field simply indicates the protocol type (i.e. SSP, STP, SMP).
+        */
+       u32 protocol_type:3;
+
+       /**
+        * This field indicates if the frame is an address frame (IAF or OAF)
+        * or if it is a information unit frame.
+        */
+       u32 is_address_frame:1;
+
+       /**
+        * This field simply indicates the connection rate at which the frame
+        * was received.
+        */
+       u32 connection_rate:4;
+
+       u32 reserved:23;
+
+       /**
+        * This field represents the actual header data received on the link.
+        */
+       u32 data[SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS];
+
+};
+
+
+
+/**
+ * enum unsolicited_frame_state -
+ *
+ * This enumeration represents the current unsolicited frame state.  The
+ * controller object can not updtate the hardware unsolicited frame put pointer
+ * unless it has already processed the priror unsolicited frames.
+ */
+enum unsolicited_frame_state {
+       /**
+        * This state is when the frame is empty and not in use.  It is
+        * different from the released state in that the hardware could DMA
+        * data to this frame buffer.
+        */
+       UNSOLICITED_FRAME_EMPTY,
+
+       /**
+        * This state is set when the frame buffer is in use by by some
+        * object in the system.
+        */
+       UNSOLICITED_FRAME_IN_USE,
+
+       /**
+        * This state is set when the frame is returned to the free pool
+        * but one or more frames prior to this one are still in use.
+        * Once all of the frame before this one are freed it will go to
+        * the empty state.
+        */
+       UNSOLICITED_FRAME_RELEASED,
+
+       UNSOLICITED_FRAME_MAX_STATES
+};
+
+/**
+ * struct sci_unsolicited_frame -
+ *
+ * This is the unsolicited frame data structure it acts as the container for
+ * the current frame state, frame header and frame buffer.
+ */
+struct sci_unsolicited_frame {
+       /**
+        * This field contains the current frame state
+        */
+       enum unsolicited_frame_state state;
+
+       /**
+        * This field points to the frame header data.
+        */
+       struct scu_unsolicited_frame_header *header;
+
+       /**
+        * This field points to the frame buffer data.
+        */
+       void *buffer;
+
+};
+
+/**
+ * struct sci_uf_header_array -
+ *
+ * This structure contains all of the unsolicited frame header information.
+ */
+struct sci_uf_header_array {
+       /**
+        * This field is represents a virtual pointer to the start
+        * address of the UF address table.  The table contains
+        * 64-bit pointers as required by the hardware.
+        */
+       struct scu_unsolicited_frame_header *array;
+
+       /**
+        * This field specifies the physical address location for the UF
+        * buffer array.
+        */
+       dma_addr_t physical_address;
+
+};
+
+/**
+ * struct sci_uf_buffer_array -
+ *
+ * This structure contains all of the unsolicited frame buffer (actual payload)
+ * information.
+ */
+struct sci_uf_buffer_array {
+       /**
+        * This field is the unsolicited frame data its used to manage
+        * the data for the unsolicited frame requests.  It also represents
+        * the virtual address location that corresponds to the
+        * physical_address field.
+        */
+       struct sci_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES];
+
+       /**
+        * This field specifies the physical address location for the UF
+        * buffer array.
+        */
+       dma_addr_t physical_address;
+};
+
+/**
+ * struct sci_uf_address_table_array -
+ *
+ * This object maintains all of the unsolicited frame address table specific
+ * data.  The address table is a collection of 64-bit pointers that point to
+ * 1KB buffers into which the silicon will DMA unsolicited frames.
+ */
+struct sci_uf_address_table_array {
+       /**
+        * This field represents a virtual pointer that refers to the
+        * starting address of the UF address table.
+        * 64-bit pointers are required by the hardware.
+        */
+       dma_addr_t *array;
+
+       /**
+        * This field specifies the physical address location for the UF
+        * address table.
+        */
+       dma_addr_t physical_address;
+
+};
+
+/**
+ * struct sci_unsolicited_frame_control -
+ *
+ * This object contains all of the data necessary to handle unsolicited frames.
+ */
+struct sci_unsolicited_frame_control {
+       /**
+        * This field is the software copy of the unsolicited frame queue
+        * get pointer.  The controller object writes this value to the
+        * hardware to let the hardware put more unsolicited frame entries.
+        */
+       u32 get;
+
+       /**
+        * This field contains all of the unsolicited frame header
+        * specific fields.
+        */
+       struct sci_uf_header_array headers;
+
+       /**
+        * This field contains all of the unsolicited frame buffer
+        * specific fields.
+        */
+       struct sci_uf_buffer_array buffers;
+
+       /**
+        * This field contains all of the unsolicited frame address table
+        * specific fields.
+        */
+       struct sci_uf_address_table_array address_table;
+
+};
+
+struct isci_host;
+
+int sci_unsolicited_frame_control_construct(struct isci_host *ihost);
+
+enum sci_status sci_unsolicited_frame_control_get_header(
+       struct sci_unsolicited_frame_control *uf_control,
+       u32 frame_index,
+       void **frame_header);
+
+enum sci_status sci_unsolicited_frame_control_get_buffer(
+       struct sci_unsolicited_frame_control *uf_control,
+       u32 frame_index,
+       void **frame_buffer);
+
+bool sci_unsolicited_frame_control_release_frame(
+       struct sci_unsolicited_frame_control *uf_control,
+       u32 frame_index);
+
+#endif /* _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ */
index f706dba165cf6812fc364271d3086424ce12aa36..cc880c95e7de7dfee978319ebfb7aecd74a4f355 100644 (file)
@@ -681,13 +681,14 @@ static void bfin_spi_pump_transfers(unsigned long data)
        drv_data->cs_change = transfer->cs_change;
 
        /* Bits per word setup */
-       bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word;
-       if ((bits_per_word > 0) && (bits_per_word % 16 == 0)) {
+       bits_per_word = transfer->bits_per_word ? :
+               message->spi->bits_per_word ? : 8;
+       if (bits_per_word % 16 == 0) {
                drv_data->n_bytes = bits_per_word/8;
                drv_data->len = (transfer->len) >> 1;
                cr_width = BIT_CTL_WORDSIZE;
                drv_data->ops = &bfin_bfin_spi_transfer_ops_u16;
-       } else if ((bits_per_word > 0) && (bits_per_word % 8 == 0)) {
+       } else if (bits_per_word % 8 == 0) {
                drv_data->n_bytes = bits_per_word/8;
                drv_data->len = transfer->len;
                cr_width = 0;
index 795828b90f45ebe4eef243d4abccfebee503f2f7..8945e201e42eb5a57d66d4a4f8ab762314f601a6 100644 (file)
                                        (((i)->fifo_lvl_mask + 1))) \
                                        ? 1 : 0)
 
-#define S3C64XX_SPI_ST_TX_DONE(v, i) ((((v) >> (i)->rx_lvl_offset) & \
-                                       (((i)->fifo_lvl_mask + 1) << 1)) \
-                                       ? 1 : 0)
+#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & (1 << (i)->tx_st_done)) ? 1 : 0)
 #define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask)
 #define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask)
 
index 2a20dabec76d722d1c311aad9e1e34a13382b6d2..d6620ad309ce9489f35358cf6644bd20b8e96888 100644 (file)
@@ -516,8 +516,17 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
 
 static void ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
 {
+       ssb_pcicore_fix_sprom_core_index(pc);
+
        /* Disable PCI interrupts. */
        ssb_write32(pc->dev, SSB_INTVEC, 0);
+
+       /* Additional PCIe always once-executed workarounds */
+       if (pc->dev->id.coreid == SSB_DEV_PCIE) {
+               ssb_pcicore_serdes_workaround(pc);
+               /* TODO: ASPM */
+               /* TODO: Clock Request Update */
+       }
 }
 
 void ssb_pcicore_init(struct ssb_pcicore *pc)
@@ -529,8 +538,6 @@ void ssb_pcicore_init(struct ssb_pcicore *pc)
        if (!ssb_device_is_enabled(dev))
                ssb_device_enable(dev, 0);
 
-       ssb_pcicore_fix_sprom_core_index(pc);
-
 #ifdef CONFIG_SSB_PCICORE_HOSTMODE
        pc->hostmode = pcicore_is_in_hostmode(pc);
        if (pc->hostmode)
@@ -538,13 +545,6 @@ void ssb_pcicore_init(struct ssb_pcicore *pc)
 #endif /* CONFIG_SSB_PCICORE_HOSTMODE */
        if (!pc->hostmode)
                ssb_pcicore_init_clientmode(pc);
-
-       /* Additional PCIe always once-executed workarounds */
-       if (dev->id.coreid == SSB_DEV_PCIE) {
-               ssb_pcicore_serdes_workaround(pc);
-               /* TODO: ASPM */
-               /* TODO: Clock Request Update */
-       }
 }
 
 static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address)
index f4cf9b23481e0ffad692f3158ded5fec6a069151..379cf16e89f7a29a28a9f951d14c0c32c05f15fd 100644 (file)
@@ -7,6 +7,7 @@ config BRCMSMAC
        default n
        depends on PCI
        depends on WLAN && MAC80211
+       depends on X86 || MIPS
        select BRCMUTIL
        select FW_LOADER
        select CRC_CCITT
@@ -20,6 +21,7 @@ config BRCMFMAC
        default n
        depends on MMC
        depends on WLAN && CFG80211
+       depends on X86 || MIPS
        select BRCMUTIL
        select FW_LOADER
        select WIRELESS_EXT
index 1502d80f6f787c316acac00f3a54d80af181a721..20008a4376e8924895a2b245a165098c12857d13 100644 (file)
@@ -2,6 +2,7 @@ config COMEDI
        tristate "Data acquisition support (comedi)"
        default N
        depends on m
+       depends on BROKEN || FRV || M32R || MN10300 || SUPERH || TILE || X86
        ---help---
          Enable support a wide range of data acquisition devices
          for Linux.
@@ -160,6 +161,7 @@ config COMEDI_PCL730
 
 config COMEDI_PCL812
        tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink
@@ -171,6 +173,7 @@ config COMEDI_PCL812
 
 config COMEDI_PCL816
        tristate "Advantech PCL-814 and PCL-816 ISA card support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for Advantech PCL-814 and PCL-816 ISA cards
@@ -180,6 +183,7 @@ config COMEDI_PCL816
 
 config COMEDI_PCL818
        tristate "Advantech PCL-718 and PCL-818 ISA card support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for Advantech PCL-818 ISA cards
@@ -269,6 +273,7 @@ config COMEDI_DAS800
 
 config COMEDI_DAS1800
        tristate "DAS1800 and compatible ISA card support"
+       depends on VIRT_TO_BUS
        select COMEDI_FC
        default N
        ---help---
@@ -340,6 +345,7 @@ config COMEDI_DT2817
 config COMEDI_DT282X
        tristate "Data Translation DT2821 series and DT-EZ ISA card support"
        select COMEDI_FC
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for Data Translation DT2821 series including DT-EZ
@@ -419,6 +425,7 @@ config COMEDI_ADQ12B
 config COMEDI_NI_AT_A2150
        tristate "NI AT-A2150 ISA card support"
        depends on COMEDI_NI_COMMON
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for National Instruments AT-A2150 cards
@@ -536,6 +543,7 @@ if COMEDI_PCI_DRIVERS && PCI
 
 config COMEDI_ADDI_APCI_035
        tristate "ADDI-DATA APCI_035 support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_035 cards
@@ -545,6 +553,7 @@ config COMEDI_ADDI_APCI_035
 
 config COMEDI_ADDI_APCI_1032
        tristate "ADDI-DATA APCI_1032 support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_1032 cards
@@ -554,6 +563,7 @@ config COMEDI_ADDI_APCI_1032
 
 config COMEDI_ADDI_APCI_1500
        tristate "ADDI-DATA APCI_1500 support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_1500 cards
@@ -563,6 +573,7 @@ config COMEDI_ADDI_APCI_1500
 
 config COMEDI_ADDI_APCI_1516
        tristate "ADDI-DATA APCI_1516 support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_1516 cards
@@ -572,6 +583,7 @@ config COMEDI_ADDI_APCI_1516
 
 config COMEDI_ADDI_APCI_1564
        tristate "ADDI-DATA APCI_1564 support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_1564 cards
@@ -581,6 +593,7 @@ config COMEDI_ADDI_APCI_1564
 
 config COMEDI_ADDI_APCI_16XX
        tristate "ADDI-DATA APCI_16xx support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_16xx cards
@@ -590,6 +603,7 @@ config COMEDI_ADDI_APCI_16XX
 
 config COMEDI_ADDI_APCI_2016
        tristate "ADDI-DATA APCI_2016 support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_2016 cards
@@ -599,6 +613,7 @@ config COMEDI_ADDI_APCI_2016
 
 config COMEDI_ADDI_APCI_2032
        tristate "ADDI-DATA APCI_2032 support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_2032 cards
@@ -608,6 +623,7 @@ config COMEDI_ADDI_APCI_2032
 
 config COMEDI_ADDI_APCI_2200
        tristate "ADDI-DATA APCI_2200 support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_2200 cards
@@ -617,6 +633,7 @@ config COMEDI_ADDI_APCI_2200
 
 config COMEDI_ADDI_APCI_3001
        tristate "ADDI-DATA APCI_3001 support"
+       depends on VIRT_TO_BUS
        select COMEDI_FC
        default N
        ---help---
@@ -627,6 +644,7 @@ config COMEDI_ADDI_APCI_3001
 
 config COMEDI_ADDI_APCI_3120
        tristate "ADDI-DATA APCI_3520 support"
+       depends on VIRT_TO_BUS
        select COMEDI_FC
        default N
        ---help---
@@ -637,6 +655,7 @@ config COMEDI_ADDI_APCI_3120
 
 config COMEDI_ADDI_APCI_3501
        tristate "ADDI-DATA APCI_3501 support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_3501 cards
@@ -646,6 +665,7 @@ config COMEDI_ADDI_APCI_3501
 
 config COMEDI_ADDI_APCI_3XXX
        tristate "ADDI-DATA APCI_3xxx support"
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADDI-DATA APCI_3xxx cards
@@ -712,6 +732,7 @@ config COMEDI_ADL_PCI9111
 config COMEDI_ADL_PCI9118
        tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support"
        select COMEDI_FC
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards
@@ -1287,6 +1308,7 @@ config COMEDI_NI_LABPC
        depends on COMEDI_MITE
        select COMEDI_8255
        select COMEDI_FC
+       depends on VIRT_TO_BUS
        default N
        ---help---
          Enable support for National Instruments Lab-PC and compatibles
index f96d5b5d5141cabe75c79a722b6811135ed9b90e..d329635fb5c4f970cace5bb91468db327a4b4326 100644 (file)
@@ -4,7 +4,7 @@
 
 menuconfig IIO
        tristate "Industrial I/O support"
-       depends on !S390
+       depends on GENERIC_HARDIRQS
        help
          The industrial I/O subsystem provides a unified framework for
          drivers for many different types of embedded sensors using a
index 5310a4297688ba93bfa2a350024e9e8f8457fb4c..1690c0d15690d13e5c4340f1ca5f93ca59b3aa4b 100644 (file)
@@ -84,7 +84,6 @@ struct adis16204_state {
 
 int adis16204_set_irq(struct iio_dev *indio_dev, bool enable);
 
-#ifdef CONFIG_IIO_RING_BUFFER
 enum adis16204_scan {
        ADIS16204_SCAN_SUPPLY,
        ADIS16204_SCAN_ACC_X,
@@ -93,6 +92,7 @@ enum adis16204_scan {
        ADIS16204_SCAN_TEMP,
 };
 
+#ifdef CONFIG_IIO_RING_BUFFER
 void adis16204_remove_trigger(struct iio_dev *indio_dev);
 int adis16204_probe_trigger(struct iio_dev *indio_dev);
 
index 58d08db6f9b50488526dc651923ec6958e6950bc..3153cbee0957d6956f9a698f10f0eb3d02fbbe74 100644 (file)
@@ -121,8 +121,6 @@ struct adis16209_state {
 
 int adis16209_set_irq(struct iio_dev *indio_dev, bool enable);
 
-#ifdef CONFIG_IIO_RING_BUFFER
-
 #define ADIS16209_SCAN_SUPPLY  0
 #define ADIS16209_SCAN_ACC_X   1
 #define ADIS16209_SCAN_ACC_Y   2
@@ -132,6 +130,8 @@ int adis16209_set_irq(struct iio_dev *indio_dev, bool enable);
 #define ADIS16209_SCAN_INCLI_Y 6
 #define ADIS16209_SCAN_ROT     7
 
+#ifdef CONFIG_IIO_RING_BUFFER
+
 void adis16209_remove_trigger(struct iio_dev *indio_dev);
 int adis16209_probe_trigger(struct iio_dev *indio_dev);
 
index 702dc982f62f9e25cc351ca88688f08f3a1fbd42..24bf70e4b29bb0adde776a7e653acc29f8ab9f80 100644 (file)
@@ -104,7 +104,6 @@ struct adis16260_state {
 
 int adis16260_set_irq(struct iio_dev *indio_dev, bool enable);
 
-#ifdef CONFIG_IIO_RING_BUFFER
 /* At the moment triggers are only used for ring buffer
  * filling. This may change!
  */
@@ -115,6 +114,7 @@ int adis16260_set_irq(struct iio_dev *indio_dev, bool enable);
 #define ADIS16260_SCAN_TEMP    3
 #define ADIS16260_SCAN_ANGL    4
 
+#ifdef CONFIG_IIO_RING_BUFFER
 void adis16260_remove_trigger(struct iio_dev *indio_dev);
 int adis16260_probe_trigger(struct iio_dev *indio_dev);
 
index db184d11dfc0ff85d12ee311cdb7d829d5f9334b..e87715b9acc60fa9de8e27a7c66f386f65312dfa 100644 (file)
@@ -158,7 +158,6 @@ struct adis16400_state {
 
 int adis16400_set_irq(struct iio_dev *indio_dev, bool enable);
 
-#ifdef CONFIG_IIO_RING_BUFFER
 /* At the moment triggers are only used for ring buffer
  * filling. This may change!
  */
@@ -182,6 +181,7 @@ int adis16400_set_irq(struct iio_dev *indio_dev, bool enable);
 #define ADIS16300_SCAN_INCLI_X 12
 #define ADIS16300_SCAN_INCLI_Y 13
 
+#ifdef CONFIG_IIO_RING_BUFFER
 void adis16400_remove_trigger(struct iio_dev *indio_dev);
 int adis16400_probe_trigger(struct iio_dev *indio_dev);
 
index 4039eda2a15ba9cb769b1d5588e9a6a09aa4f56a..4a9e563f40fa17ce45db1b591a511b7d2aae5b96 100644 (file)
@@ -672,8 +672,6 @@ static void imon_incoming_packet(struct imon_context *context,
 static void usb_rx_callback(struct urb *urb)
 {
        struct imon_context *context;
-       unsigned char *buf;
-       int len;
        int intfnum = 0;
 
        if (!urb)
@@ -683,9 +681,6 @@ static void usb_rx_callback(struct urb *urb)
        if (!context)
                return;
 
-       buf = urb->transfer_buffer;
-       len = urb->actual_length;
-
        switch (urb->status) {
        case -ENOENT:           /* usbcore unlink successful! */
                return;
@@ -728,7 +723,6 @@ static int imon_probe(struct usb_interface *interface,
        int ir_ep_found = 0;
        int alloc_status = 0;
        int vfd_proto_6p = 0;
-       int code_length;
        struct imon_context *context = NULL;
        int i;
        u16 vendor, product;
@@ -749,8 +743,6 @@ static int imon_probe(struct usb_interface *interface,
        else
                context->display = 1;
 
-       code_length = BUF_CHUNK_SIZE * 8;
-
        usbdev     = usb_get_dev(interface_to_usbdev(interface));
        iface_desc = interface->cur_altsetting;
        num_endpts = iface_desc->desc.bNumEndpoints;
@@ -856,7 +848,7 @@ static int imon_probe(struct usb_interface *interface,
 
        strcpy(driver->name, MOD_NAME);
        driver->minor = -1;
-       driver->code_length = sizeof(int) * 8;
+       driver->code_length = BUF_CHUNK_SIZE * 8;
        driver->sample_rate = 0;
        driver->features = LIRC_CAN_REC_MODE2;
        driver->data = context;
index 4a3cca03224a8a114176a9ea4f09600d3f245600..805df913bb6e164f68ae0c2fdc6efe933040da9d 100644 (file)
@@ -838,7 +838,23 @@ static int hardware_init_port(void)
 
 static int init_port(void)
 {
-       int i, nlow, nhigh;
+       int i, nlow, nhigh, result;
+
+       result = request_irq(irq, irq_handler,
+                            IRQF_DISABLED | (share_irq ? IRQF_SHARED : 0),
+                            LIRC_DRIVER_NAME, (void *)&hardware);
+
+       switch (result) {
+       case -EBUSY:
+               printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n", irq);
+               return -EBUSY;
+       case -EINVAL:
+               printk(KERN_ERR LIRC_DRIVER_NAME
+                      ": Bad irq number or handler\n");
+               return -EINVAL;
+       default:
+               break;
+       };
 
        /* Reserve io region. */
        /*
@@ -893,34 +909,17 @@ static int init_port(void)
                printk(KERN_INFO LIRC_DRIVER_NAME  ": Manually using active "
                       "%s receiver\n", sense ? "low" : "high");
 
+       dprintk("Interrupt %d, port %04x obtained\n", irq, io);
        return 0;
 }
 
 static int set_use_inc(void *data)
 {
-       int result;
        unsigned long flags;
 
        /* initialize timestamp */
        do_gettimeofday(&lasttv);
 
-       result = request_irq(irq, irq_handler,
-                            IRQF_DISABLED | (share_irq ? IRQF_SHARED : 0),
-                            LIRC_DRIVER_NAME, (void *)&hardware);
-
-       switch (result) {
-       case -EBUSY:
-               printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n", irq);
-               return -EBUSY;
-       case -EINVAL:
-               printk(KERN_ERR LIRC_DRIVER_NAME
-                      ": Bad irq number or handler\n");
-               return -EINVAL;
-       default:
-               dprintk("Interrupt %d, port %04x obtained\n", irq, io);
-               break;
-       }
-
        spin_lock_irqsave(&hardware[type].lock, flags);
 
        /* Set DLAB 0. */
@@ -945,10 +944,6 @@ static void set_use_dec(void *data)
        soutp(UART_IER, sinp(UART_IER) &
              (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI)));
        spin_unlock_irqrestore(&hardware[type].lock, flags);
-
-       free_irq(irq, (void *)&hardware);
-
-       dprintk("freed IRQ %d\n", irq);
 }
 
 static ssize_t lirc_write(struct file *file, const char *buf,
@@ -1256,6 +1251,9 @@ exit_serial_exit:
 static void __exit lirc_serial_exit_module(void)
 {
        lirc_serial_exit();
+
+       free_irq(irq, (void *)&hardware);
+
        if (iommap != 0)
                release_mem_region(iommap, 8 << ioshift);
        else
index a7b46f24f24e242e61cf5e7c0dea2b9650bef7d8..0d3864594b12abf28545f4b52cc511ae5ce17378 100644 (file)
@@ -739,23 +739,16 @@ static void send_space(unsigned long len)
 static void send_pulse(unsigned long len)
 {
        long bytes_out = len / TIME_CONST;
-       long time_left;
 
-       time_left = (long)len - (long)bytes_out * (long)TIME_CONST;
-       if (bytes_out == 0) {
+       if (bytes_out == 0)
                bytes_out++;
-               time_left = 0;
-       }
+
        while (bytes_out--) {
                outb(PULSE, io + UART_TX);
                /* FIXME treba seriozne cakanie z char/serial.c */
                while (!(inb(io + UART_LSR) & UART_LSR_THRE))
                        ;
        }
-#if 0
-       if (time_left > 0)
-               safe_udelay(time_left);
-#endif
 }
 #endif
 
index dd6a57c3c3a3149bac53eb36fd4ddeabed804050..4e051f6b52dba96c57ddb96e9207ad5a97a67ee7 100644 (file)
@@ -475,14 +475,14 @@ static int lirc_thread(void *arg)
        dprintk("poll thread started\n");
 
        while (!kthread_should_stop()) {
+               set_current_state(TASK_INTERRUPTIBLE);
+
                /* if device not opened, we can sleep half a second */
                if (atomic_read(&ir->open_count) == 0) {
                        schedule_timeout(HZ/2);
                        continue;
                }
 
-               set_current_state(TASK_INTERRUPTIBLE);
-
                /*
                 * This is ~113*2 + 24 + jitter (2*repeat gap + code length).
                 * We use this interval as the chip resets every time you poll
index d1ffa32cd141105b1f9fe4f064ef8b06a53b01e1..685fcf63964494cefb8cf93bd472478c5b8f301a 100644 (file)
@@ -189,7 +189,7 @@ int mei_hw_init(struct mei_device *dev)
                mutex_lock(&dev->device_lock);
        }
 
-       if (!err && !dev->recvd_msg) {
+       if (err <= 0 && !dev->recvd_msg) {
                dev->mei_state = MEI_DISABLED;
                dev_dbg(&dev->pdev->dev,
                        "wait_event_interruptible_timeout failed"
index 2564b038636ad6e79ad1e52d3b8a83749e8ce1ca..fff53d0b5c6efed99aba827b0f68979092f5bb44 100644 (file)
@@ -169,10 +169,15 @@ int mei_wd_stop(struct mei_device *dev, bool preserve)
        ret = wait_event_interruptible_timeout(dev->wait_stop_wd,
                                        dev->wd_stopped, 10 * HZ);
        mutex_lock(&dev->device_lock);
-       if (!dev->wd_stopped)
-               dev_dbg(&dev->pdev->dev, "stop wd failed to complete.\n");
-       else
-               dev_dbg(&dev->pdev->dev, "stop wd complete.\n");
+       if (dev->wd_stopped) {
+               dev_dbg(&dev->pdev->dev, "stop wd complete ret=%d.\n", ret);
+               ret = 0;
+       } else {
+               if (!ret)
+                       ret = -ETIMEDOUT;
+               dev_warn(&dev->pdev->dev,
+                       "stop wd failed to complete ret=%d.\n", ret);
+       }
 
        if (preserve)
                dev->wd_timeout = wd_timeout;
index dee2a2c909f5386671f593428cbfa837f37e65a6..70c2e7fa66643f7a7cdf040c62d74e977d7b9d1b 100644 (file)
@@ -386,7 +386,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
         */
        se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr,
                                TMR_LUN_RESET);
-       if (!se_cmd->se_tmr_req)
+       if (IS_ERR(se_cmd->se_tmr_req))
                goto release;
        /*
         * Locate the underlying TCM struct se_lun from sc->device->lun
@@ -1017,6 +1017,7 @@ static int tcm_loop_make_nexus(
        struct se_portal_group *se_tpg;
        struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
        struct tcm_loop_nexus *tl_nexus;
+       int ret = -ENOMEM;
 
        if (tl_tpg->tl_hba->tl_nexus) {
                printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n");
@@ -1033,8 +1034,10 @@ static int tcm_loop_make_nexus(
         * Initialize the struct se_session pointer
         */
        tl_nexus->se_sess = transport_init_session();
-       if (!tl_nexus->se_sess)
+       if (IS_ERR(tl_nexus->se_sess)) {
+               ret = PTR_ERR(tl_nexus->se_sess);
                goto out;
+       }
        /*
         * Since we are running in 'demo mode' this call with generate a
         * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
@@ -1060,7 +1063,7 @@ static int tcm_loop_make_nexus(
 
 out:
        kfree(tl_nexus);
-       return -ENOMEM;
+       return ret;
 }
 
 static int tcm_loop_drop_nexus(
@@ -1140,7 +1143,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
         * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
         * tcm_loop_make_nexus()
         */
-       if (strlen(page) > TL_WWN_ADDR_LEN) {
+       if (strlen(page) >= TL_WWN_ADDR_LEN) {
                printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds"
                                " max: %d\n", page, TL_WWN_ADDR_LEN);
                return -EINVAL;
@@ -1321,7 +1324,7 @@ struct se_wwn *tcm_loop_make_scsi_hba(
        return ERR_PTR(-EINVAL);
 
 check_len:
-       if (strlen(name) > TL_WWN_ADDR_LEN) {
+       if (strlen(name) >= TL_WWN_ADDR_LEN) {
                printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds"
                        " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
                        TL_WWN_ADDR_LEN);
index ee6fad979b50c82fa9ff2fc9f8121a62f32d7bb5..25c1f49a7d8bae7ee61f20df5830ba5a782277c7 100644 (file)
@@ -304,7 +304,7 @@ struct target_fabric_configfs *target_fabric_configfs_init(
                printk(KERN_ERR "Unable to locate passed fabric name\n");
                return NULL;
        }
-       if (strlen(name) > TARGET_FABRIC_NAME_SIZE) {
+       if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) {
                printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC"
                        "_NAME_SIZE\n", name);
                return NULL;
@@ -312,7 +312,7 @@ struct target_fabric_configfs *target_fabric_configfs_init(
 
        tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
        if (!(tf))
-               return ERR_PTR(-ENOMEM);
+               return NULL;
 
        INIT_LIST_HEAD(&tf->tf_list);
        atomic_set(&tf->tf_access_cnt, 0);
@@ -851,7 +851,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
                return -EOPNOTSUPP;
        }
 
-       if ((strlen(page) + 1) > INQUIRY_VPD_SERIAL_LEN) {
+       if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
                printk(KERN_ERR "Emulated VPD Unit Serial exceeds"
                " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
                return -EOVERFLOW;
@@ -917,7 +917,7 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
 
                transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
 
-               if ((len + strlen(buf) > PAGE_SIZE))
+               if ((len + strlen(buf) >= PAGE_SIZE))
                        break;
 
                len += sprintf(page+len, "%s", buf);
@@ -962,19 +962,19 @@ static ssize_t target_core_dev_wwn_show_attr_##_name(                     \
                                                                        \
                memset(buf, 0, VPD_TMP_BUF_SIZE);                       \
                transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE);   \
-               if ((len + strlen(buf) > PAGE_SIZE))                    \
+               if ((len + strlen(buf) >= PAGE_SIZE))                   \
                        break;                                          \
                len += sprintf(page+len, "%s", buf);                    \
                                                                        \
                memset(buf, 0, VPD_TMP_BUF_SIZE);                       \
                transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
-               if ((len + strlen(buf) > PAGE_SIZE))                    \
+               if ((len + strlen(buf) >= PAGE_SIZE))                   \
                        break;                                          \
                len += sprintf(page+len, "%s", buf);                    \
                                                                        \
                memset(buf, 0, VPD_TMP_BUF_SIZE);                       \
                transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
-               if ((len + strlen(buf) > PAGE_SIZE))                    \
+               if ((len + strlen(buf) >= PAGE_SIZE))                   \
                        break;                                          \
                len += sprintf(page+len, "%s", buf);                    \
        }                                                               \
@@ -1299,7 +1299,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
                        &i_buf[0] : "", pr_reg->pr_res_key,
                        pr_reg->pr_res_generation);
 
-               if ((len + strlen(buf) > PAGE_SIZE))
+               if ((len + strlen(buf) >= PAGE_SIZE))
                        break;
 
                len += sprintf(page+len, "%s", buf);
@@ -1496,7 +1496,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
                                ret = -ENOMEM;
                                goto out;
                        }
-                       if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) {
+                       if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
                                printk(KERN_ERR "APTPL metadata initiator_node="
                                        " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
                                        PR_APTPL_MAX_IPORT_LEN);
@@ -1510,7 +1510,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
                                ret = -ENOMEM;
                                goto out;
                        }
-                       if (strlen(isid) > PR_REG_ISID_LEN) {
+                       if (strlen(isid) >= PR_REG_ISID_LEN) {
                                printk(KERN_ERR "APTPL metadata initiator_isid"
                                        "= exceeds PR_REG_ISID_LEN: %d\n",
                                        PR_REG_ISID_LEN);
@@ -1571,7 +1571,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
                                ret = -ENOMEM;
                                goto out;
                        }
-                       if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) {
+                       if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
                                printk(KERN_ERR "APTPL metadata target_node="
                                        " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
                                        PR_APTPL_MAX_TPORT_LEN);
@@ -3052,7 +3052,7 @@ static struct config_group *target_core_call_addhbatotarget(
        int ret;
 
        memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
-       if (strlen(name) > TARGET_CORE_NAME_MAX_LEN) {
+       if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
                printk(KERN_ERR "Passed *name strlen(): %d exceeds"
                        " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
                        TARGET_CORE_NAME_MAX_LEN);
index 8407f9ca2b3163f69dc5770ad4d2ff804516a95e..ba698ea62bb2a52c19d87a6d1df7de10fcba9ecd 100644 (file)
@@ -192,7 +192,7 @@ int transport_get_lun_for_tmr(
                        &SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
        if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
                se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
-               dev = se_tmr->tmr_dev = se_lun->lun_se_dev;
+               dev = se_lun->lun_se_dev;
                se_cmd->pr_res_key = deve->pr_res_key;
                se_cmd->orig_fe_lun = unpacked_lun;
                se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
@@ -216,6 +216,7 @@ int transport_get_lun_for_tmr(
                se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
                return -1;
        }
+       se_tmr->tmr_dev = dev;
 
        spin_lock(&dev->se_tmr_lock);
        list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
@@ -1430,7 +1431,7 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
        struct se_lun_acl *lacl;
        struct se_node_acl *nacl;
 
-       if (strlen(initiatorname) > TRANSPORT_IQN_LEN) {
+       if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
                printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
                        TPG_TFO(tpg)->get_fabric_name());
                *ret = -EOVERFLOW;
index a79f518ca6e2b76e12312d55f536cdb732640802..b662db3a320bb96f272cbf9a831c1c8813e89952 100644 (file)
@@ -1916,7 +1916,7 @@ static int __core_scsi3_update_aptpl_buf(
                                pr_reg->pr_res_mapped_lun);
                }
 
-               if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
+               if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
                        printk(KERN_ERR "Unable to update renaming"
                                " APTPL metadata\n");
                        spin_unlock(&T10_RES(su_dev)->registration_lock);
@@ -1934,7 +1934,7 @@ static int __core_scsi3_update_aptpl_buf(
                        TPG_TFO(tpg)->tpg_get_tag(tpg),
                        lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
 
-               if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
+               if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
                        printk(KERN_ERR "Unable to update renaming"
                                " APTPL metadata\n");
                        spin_unlock(&T10_RES(su_dev)->registration_lock);
@@ -1986,7 +1986,7 @@ static int __core_scsi3_write_aptpl_to_file(
        memset(iov, 0, sizeof(struct iovec));
        memset(path, 0, 512);
 
-       if (strlen(&wwn->unit_serial[0]) > 512) {
+       if (strlen(&wwn->unit_serial[0]) >= 512) {
                printk(KERN_ERR "WWN value for struct se_device does not fit"
                        " into path buffer\n");
                return -1;
index 59b8b9c5ad72a272e1c228474f6b241b6a70913b..179063d81cdd6f30f1af19ef3cb20c2e12b923a4 100644 (file)
@@ -75,10 +75,16 @@ void core_tmr_release_req(
 {
        struct se_device *dev = tmr->tmr_dev;
 
+       if (!dev) {
+               kmem_cache_free(se_tmr_req_cache, tmr);
+               return;
+       }
+
        spin_lock(&dev->se_tmr_lock);
        list_del(&tmr->tmr_list);
-       kmem_cache_free(se_tmr_req_cache, tmr);
        spin_unlock(&dev->se_tmr_lock);
+
+       kmem_cache_free(se_tmr_req_cache, tmr);
 }
 
 static void core_tmr_handle_tas_abort(
index 4dafeb8b56381173b733653ce0e9b1c6c9755efd..4b9b7169bdd96957d9a6054ea68ec99c897a2381 100644 (file)
@@ -536,13 +536,13 @@ EXPORT_SYMBOL(transport_register_session);
 void transport_deregister_session_configfs(struct se_session *se_sess)
 {
        struct se_node_acl *se_nacl;
-
+       unsigned long flags;
        /*
         * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
         */
        se_nacl = se_sess->se_node_acl;
        if ((se_nacl)) {
-               spin_lock_irq(&se_nacl->nacl_sess_lock);
+               spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
                list_del(&se_sess->sess_acl_list);
                /*
                 * If the session list is empty, then clear the pointer.
@@ -556,7 +556,7 @@ void transport_deregister_session_configfs(struct se_session *se_sess)
                                        se_nacl->acl_sess_list.prev,
                                        struct se_session, sess_acl_list);
                }
-               spin_unlock_irq(&se_nacl->nacl_sess_lock);
+               spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
        }
 }
 EXPORT_SYMBOL(transport_deregister_session_configfs);
index defff32b7880faf26c32732c4898a87ec6ba80f4..7b82f1b7fef814b595f8cc266f182baa3f031642 100644 (file)
@@ -144,7 +144,7 @@ enum ft_cmd_state {
  */
 struct ft_cmd {
        enum ft_cmd_state state;
-       u16 lun;                        /* LUN from request */
+       u32 lun;                        /* LUN from request */
        struct ft_sess *sess;           /* session held for cmd */
        struct fc_seq *seq;             /* sequence in exchange mgr */
        struct se_cmd se_cmd;           /* Local TCM I/O descriptor */
index c056a1132ae133a5836613bb357ee24badfc3202..b2a106729d4914eca2c275adb956a944939d9b99 100644 (file)
@@ -94,29 +94,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
                16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
 }
 
-/*
- * Get LUN from CDB.
- */
-static int ft_get_lun_for_cmd(struct ft_cmd *cmd, u8 *lunp)
-{
-       u64 lun;
-
-       lun = lunp[1];
-       switch (lunp[0] >> 6) {
-       case 0:
-               break;
-       case 1:
-               lun |= (lunp[0] & 0x3f) << 8;
-               break;
-       default:
-               return -1;
-       }
-       if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
-               return -1;
-       cmd->lun = lun;
-       return transport_get_lun_for_cmd(&cmd->se_cmd, NULL, lun);
-}
-
 static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
 {
        struct se_queue_obj *qobj;
@@ -418,6 +395,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
 {
        struct se_tmr_req *tmr;
        struct fcp_cmnd *fcp;
+       struct ft_sess *sess;
        u8 tm_func;
 
        fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
@@ -425,13 +403,6 @@ static void ft_send_tm(struct ft_cmd *cmd)
        switch (fcp->fc_tm_flags) {
        case FCP_TMF_LUN_RESET:
                tm_func = TMR_LUN_RESET;
-               if (ft_get_lun_for_cmd(cmd, fcp->fc_lun) < 0) {
-                       ft_dump_cmd(cmd, __func__);
-                       transport_send_check_condition_and_sense(&cmd->se_cmd,
-                               cmd->se_cmd.scsi_sense_reason, 0);
-                       ft_sess_put(cmd->sess);
-                       return;
-               }
                break;
        case FCP_TMF_TGT_RESET:
                tm_func = TMR_TARGET_WARM_RESET;
@@ -463,6 +434,36 @@ static void ft_send_tm(struct ft_cmd *cmd)
                return;
        }
        cmd->se_cmd.se_tmr_req = tmr;
+
+       switch (fcp->fc_tm_flags) {
+       case FCP_TMF_LUN_RESET:
+               cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
+               if (transport_get_lun_for_tmr(&cmd->se_cmd, cmd->lun) < 0) {
+                       /*
+                        * Make sure to clean up newly allocated TMR request
+                        * since "unable to  handle TMR request because failed
+                        * to get to LUN"
+                        */
+                       FT_TM_DBG("Failed to get LUN for TMR func %d, "
+                                 "se_cmd %p, unpacked_lun %d\n",
+                                 tm_func, &cmd->se_cmd, cmd->lun);
+                       ft_dump_cmd(cmd, __func__);
+                       sess = cmd->sess;
+                       transport_send_check_condition_and_sense(&cmd->se_cmd,
+                               cmd->se_cmd.scsi_sense_reason, 0);
+                       transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
+                       ft_sess_put(sess);
+                       return;
+               }
+               break;
+       case FCP_TMF_TGT_RESET:
+       case FCP_TMF_CLR_TASK_SET:
+       case FCP_TMF_ABT_TASK_SET:
+       case FCP_TMF_CLR_ACA:
+               break;
+       default:
+               return;
+       }
        transport_generic_handle_tmr(&cmd->se_cmd);
 }
 
@@ -635,7 +636,8 @@ static void ft_send_cmd(struct ft_cmd *cmd)
 
        fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
 
-       ret = ft_get_lun_for_cmd(cmd, fcp->fc_lun);
+       cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
+       ret = transport_get_lun_for_cmd(&cmd->se_cmd, NULL, cmd->lun);
        if (ret < 0) {
                ft_dump_cmd(cmd, __func__);
                transport_send_check_condition_and_sense(&cmd->se_cmd,
index 4c3c0efbe13f5b97158f92c51d17d46a5001ab40..8c4a24077d9d701fc6902501f9ad9f53775c24b2 100644 (file)
@@ -203,7 +203,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
                        /* XXX For now, initiator will retry */
                        if (printk_ratelimit())
                                printk(KERN_ERR "%s: Failed to send frame %p, "
-                                               "xid <0x%x>, remaining <0x%x>, "
+                                               "xid <0x%x>, remaining %zu, "
                                                "lso_max <0x%x>\n",
                                                __func__, fp, ep->xid,
                                                remaining, lport->lso_max);
index a3bd57f2ea3204d5c079f38aba247f40aae47e25..7491e21cc6ae4c35629614b687a424ac2917d87a 100644 (file)
@@ -229,7 +229,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
                return NULL;
 
        sess->se_sess = transport_init_session();
-       if (!sess->se_sess) {
+       if (IS_ERR(sess->se_sess)) {
                kfree(sess);
                return NULL;
        }
@@ -332,7 +332,7 @@ void ft_sess_close(struct se_session *se_sess)
        lport = sess->tport->lport;
        port_id = sess->port_id;
        if (port_id == -1) {
-               mutex_lock(&ft_lport_lock);
+               mutex_unlock(&ft_lport_lock);
                return;
        }
        FT_SESS_DBG("port_id %x\n", port_id);
index 09e8c7d53af3e7d72ebfe25b45100789f4f5185a..19b4ae052af8eaf2f803bb7bd8b9e49e3e4151ca 100644 (file)
@@ -875,7 +875,8 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
                *dp++ = last << 7 | first << 6 | 1;     /* EA */
                len--;
        }
-       memcpy(dp, skb_pull(dlci->skb, len), len);
+       memcpy(dp, dlci->skb->data, len);
+       skb_pull(dlci->skb, len);
        __gsm_data_queue(dlci, msg);
        if (last)
                dlci->skb = NULL;
@@ -984,10 +985,22 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, u8 *data,
  */
 
 static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
-                                                       u32 modem)
+                                                       u32 modem, int clen)
 {
        int  mlines = 0;
-       u8 brk = modem >> 6;
+       u8 brk = 0;
+
+       /* The modem status command can either contain one octet (v.24 signals)
+          or two octets (v.24 signals + break signals). The length field will
+          either be 2 or 3 respectively. This is specified in section
+          5.4.6.3.7 of the  27.010 mux spec. */
+
+       if (clen == 2)
+               modem = modem & 0x7f;
+       else {
+               brk = modem & 0x7f;
+               modem = (modem >> 7) & 0x7f;
+       };
 
        /* Flow control/ready to communicate */
        if (modem & MDM_FC) {
@@ -1061,7 +1074,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
                        return;
        }
        tty = tty_port_tty_get(&dlci->port);
-       gsm_process_modem(tty, dlci, modem);
+       gsm_process_modem(tty, dlci, modem, clen);
        if (tty) {
                tty_wakeup(tty);
                tty_kref_put(tty);
@@ -1482,12 +1495,13 @@ static void gsm_dlci_begin_close(struct gsm_dlci *dlci)
  *     open we shovel the bits down it, if not we drop them.
  */
 
-static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int len)
+static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int clen)
 {
        /* krefs .. */
        struct tty_port *port = &dlci->port;
        struct tty_struct *tty = tty_port_tty_get(port);
        unsigned int modem = 0;
+       int len = clen;
 
        if (debug & 16)
                pr_debug("%d bytes for tty %p\n", len, tty);
@@ -1507,7 +1521,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int len)
                                if (len == 0)
                                        return;
                        }
-                       gsm_process_modem(tty, dlci, modem);
+                       gsm_process_modem(tty, dlci, modem, clen);
                /* Line state will go via DLCI 0 controls only */
                case 1:
                default:
index 0ad32888091c16c1c27de4c032517df583ea4c9f..c3954fbf6ac402d7d8e8a92389c4f1ecdc065e53 100644 (file)
@@ -1815,6 +1815,7 @@ do_it_again:
                        /* FIXME: does n_tty_set_room need locking ? */
                        n_tty_set_room(tty);
                        timeout = schedule_timeout(timeout);
+                       BUG_ON(!tty->read_buf);
                        continue;
                }
                __set_current_state(TASK_RUNNING);
index b40f7b90c81da53925c420f7d8e37a18a739d4dd..b4129f53fb1b1c7a6fe5d18e7fd2ed8097fcdfda 100644 (file)
@@ -3318,6 +3318,7 @@ void serial8250_unregister_port(int line)
                uart->port.flags &= ~UPF_BOOT_AUTOCONF;
                uart->port.type = PORT_UNKNOWN;
                uart->port.dev = &serial8250_isa_devs->dev;
+               uart->capabilities = uart_config[uart->port.type].flags;
                uart_add_one_port(&serial8250_reg, &uart->port);
        } else {
                uart->port.dev = NULL;
index 4b4968a294b293d0b17f43024487447b40bd450a..f41b4259ecddea3e6fc49c858c77a070ba400bcc 100644 (file)
@@ -973,7 +973,7 @@ ce4100_serial_setup(struct serial_private *priv,
 
 static int
 pci_omegapci_setup(struct serial_private *priv,
-                     struct pciserial_board *board,
+                     const struct pciserial_board *board,
                      struct uart_port *port, int idx)
 {
        return setup_port(priv, port, 2, idx * 8, 0);
@@ -994,6 +994,15 @@ static int skip_tx_en_setup(struct serial_private *priv,
        return pci_default_setup(priv, board, port, idx);
 }
 
+static int pci_eg20t_init(struct pci_dev *dev)
+{
+#if defined(CONFIG_SERIAL_PCH_UART) || defined(CONFIG_SERIAL_PCH_UART_MODULE)
+       return -ENODEV;
+#else
+       return 0;
+#endif
+}
+
 /* This should be in linux/pci_ids.h */
 #define PCI_VENDOR_ID_SBSMODULARIO     0x124B
 #define PCI_SUBVENDOR_ID_SBSMODULARIO  0x124B
@@ -1446,6 +1455,56 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
                .init                   = pci_oxsemi_tornado_init,
                .setup          = pci_default_setup,
        },
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = 0x8811,
+               .init           = pci_eg20t_init,
+       },
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = 0x8812,
+               .init           = pci_eg20t_init,
+       },
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = 0x8813,
+               .init           = pci_eg20t_init,
+       },
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = 0x8814,
+               .init           = pci_eg20t_init,
+       },
+       {
+               .vendor         = 0x10DB,
+               .device         = 0x8027,
+               .init           = pci_eg20t_init,
+       },
+       {
+               .vendor         = 0x10DB,
+               .device         = 0x8028,
+               .init           = pci_eg20t_init,
+       },
+       {
+               .vendor         = 0x10DB,
+               .device         = 0x8029,
+               .init           = pci_eg20t_init,
+       },
+       {
+               .vendor         = 0x10DB,
+               .device         = 0x800C,
+               .init           = pci_eg20t_init,
+       },
+       {
+               .vendor         = 0x10DB,
+               .device         = 0x800D,
+               .init           = pci_eg20t_init,
+       },
+       {
+               .vendor         = 0x10DB,
+               .device         = 0x800D,
+               .init           = pci_eg20t_init,
+       },
        /*
         * Cronyx Omega PCI (PLX-chip based)
         */
index 8dc0541feecc18c2c185cead4394f5357d25eee1..f5f6831b0a640671d9e91c3d3e17a4d682e35578 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
 #include <linux/scatterlist.h>
+#include <linux/delay.h>
 
 #include <asm/io.h>
 #include <asm/sizes.h>
 #define UART_DR_ERROR          (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
 #define UART_DUMMY_DR_RX       (1 << 16)
 
+
+#define UART_WA_SAVE_NR 14
+
+static void pl011_lockup_wa(unsigned long data);
+static const u32 uart_wa_reg[UART_WA_SAVE_NR] = {
+       ST_UART011_DMAWM,
+       ST_UART011_TIMEOUT,
+       ST_UART011_LCRH_RX,
+       UART011_IBRD,
+       UART011_FBRD,
+       ST_UART011_LCRH_TX,
+       UART011_IFLS,
+       ST_UART011_XFCR,
+       ST_UART011_XON1,
+       ST_UART011_XON2,
+       ST_UART011_XOFF1,
+       ST_UART011_XOFF2,
+       UART011_CR,
+       UART011_IMSC
+};
+
+static u32 uart_wa_regdata[UART_WA_SAVE_NR];
+static DECLARE_TASKLET(pl011_lockup_tlet, pl011_lockup_wa, 0);
+
 /* There is by now at least one vendor with differing details, so handle it */
 struct vendor_data {
        unsigned int            ifls;
@@ -72,6 +97,7 @@ struct vendor_data {
        unsigned int            lcrh_tx;
        unsigned int            lcrh_rx;
        bool                    oversampling;
+       bool                    interrupt_may_hang;   /* vendor-specific */
        bool                    dma_threshold;
 };
 
@@ -90,9 +116,12 @@ static struct vendor_data vendor_st = {
        .lcrh_tx                = ST_UART011_LCRH_TX,
        .lcrh_rx                = ST_UART011_LCRH_RX,
        .oversampling           = true,
+       .interrupt_may_hang     = true,
        .dma_threshold          = true,
 };
 
+static struct uart_amba_port *amba_ports[UART_NR];
+
 /* Deals with DMA transactions */
 
 struct pl011_sgbuf {
@@ -132,6 +161,7 @@ struct uart_amba_port {
        unsigned int            lcrh_rx;        /* vendor-specific */
        bool                    autorts;
        char                    type[12];
+       bool                    interrupt_may_hang; /* vendor-specific */
 #ifdef CONFIG_DMA_ENGINE
        /* DMA stuff */
        bool                    using_tx_dma;
@@ -1008,6 +1038,68 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
 #endif
 
 
+/*
+ * pl011_lockup_wa
+ * This workaround aims to break the deadlock situation
+ * when after long transfer over uart in hardware flow
+ * control, uart interrupt registers cannot be cleared.
+ * Hence uart transfer gets blocked.
+ *
+ * It is seen that during such deadlock condition ICR
+ * don't get cleared even on multiple write. This leads
+ * pass_counter to decrease and finally reach zero. This
+ * can be taken as trigger point to run this UART_BT_WA.
+ *
+ */
+static void pl011_lockup_wa(unsigned long data)
+{
+       struct uart_amba_port *uap = amba_ports[0];
+       void __iomem *base = uap->port.membase;
+       struct circ_buf *xmit = &uap->port.state->xmit;
+       struct tty_struct *tty = uap->port.state->port.tty;
+       int buf_empty_retries = 200;
+       int loop;
+
+       /* Stop HCI layer from submitting data for tx */
+       tty->hw_stopped = 1;
+       while (!uart_circ_empty(xmit)) {
+               if (buf_empty_retries-- == 0)
+                       break;
+               udelay(100);
+       }
+
+       /* Backup registers */
+       for (loop = 0; loop < UART_WA_SAVE_NR; loop++)
+               uart_wa_regdata[loop] = readl(base + uart_wa_reg[loop]);
+
+       /* Disable UART so that FIFO data is flushed out */
+       writew(0x00, uap->port.membase + UART011_CR);
+
+       /* Soft reset UART module */
+       if (uap->port.dev->platform_data) {
+               struct amba_pl011_data *plat;
+
+               plat = uap->port.dev->platform_data;
+               if (plat->reset)
+                       plat->reset();
+       }
+
+       /* Restore registers */
+       for (loop = 0; loop < UART_WA_SAVE_NR; loop++)
+               writew(uart_wa_regdata[loop] ,
+                               uap->port.membase + uart_wa_reg[loop]);
+
+       /* Initialise the old status of the modem signals */
+       uap->old_status = readw(uap->port.membase + UART01x_FR) &
+               UART01x_FR_MODEM_ANY;
+
+       if (readl(base + UART011_MIS) & 0x2)
+               printk(KERN_EMERG "UART_BT_WA: ***FAILED***\n");
+
+       /* Start Tx/Rx */
+       tty->hw_stopped = 0;
+}
+
 static void pl011_stop_tx(struct uart_port *port)
 {
        struct uart_amba_port *uap = (struct uart_amba_port *)port;
@@ -1158,8 +1250,11 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
                        if (status & UART011_TXIS)
                                pl011_tx_chars(uap);
 
-                       if (pass_counter-- == 0)
+                       if (pass_counter-- == 0) {
+                               if (uap->interrupt_may_hang)
+                                       tasklet_schedule(&pl011_lockup_tlet);
                                break;
+                       }
 
                        status = readw(uap->port.membase + UART011_MIS);
                } while (status != 0);
@@ -1339,6 +1434,14 @@ static int pl011_startup(struct uart_port *port)
        writew(uap->im, uap->port.membase + UART011_IMSC);
        spin_unlock_irq(&uap->port.lock);
 
+       if (uap->port.dev->platform_data) {
+               struct amba_pl011_data *plat;
+
+               plat = uap->port.dev->platform_data;
+               if (plat->init)
+                       plat->init();
+       }
+
        return 0;
 
  clk_dis:
@@ -1394,6 +1497,15 @@ static void pl011_shutdown(struct uart_port *port)
         * Shut down the clock producer
         */
        clk_disable(uap->clk);
+
+       if (uap->port.dev->platform_data) {
+               struct amba_pl011_data *plat;
+
+               plat = uap->port.dev->platform_data;
+               if (plat->exit)
+                       plat->exit();
+       }
+
 }
 
 static void
@@ -1700,6 +1812,14 @@ static int __init pl011_console_setup(struct console *co, char *options)
        if (!uap)
                return -ENODEV;
 
+       if (uap->port.dev->platform_data) {
+               struct amba_pl011_data *plat;
+
+               plat = uap->port.dev->platform_data;
+               if (plat->init)
+                       plat->init();
+       }
+
        uap->port.uartclk = clk_get_rate(uap->clk);
 
        if (options)
@@ -1774,6 +1894,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
        uap->lcrh_rx = vendor->lcrh_rx;
        uap->lcrh_tx = vendor->lcrh_tx;
        uap->fifosize = vendor->fifosize;
+       uap->interrupt_may_hang = vendor->interrupt_may_hang;
        uap->port.dev = &dev->dev;
        uap->port.mapbase = dev->res.start;
        uap->port.membase = base;
index 6d5d6e679fc7f59c568a0a1afbac800e51f91e49..af9b7814965a461921d337047c068aed9bb5d051 100644 (file)
@@ -1709,12 +1709,13 @@ static int atmel_serial_resume(struct platform_device *pdev)
 static int __devinit atmel_serial_probe(struct platform_device *pdev)
 {
        struct atmel_uart_port *port;
+       struct atmel_uart_data *pdata = pdev->dev.platform_data;
        void *data;
        int ret;
 
        BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
 
-       port = &atmel_ports[pdev->id];
+       port = &atmel_ports[pdata->num];
        port->backup_imr = 0;
 
        atmel_init_port(port, pdev);
index a1a0e55d0807761b59ae9a9374c3de5c03f57007..c0b68b9cad911f66652e2e9cdd5f41600a3fc21c 100644 (file)
@@ -250,6 +250,20 @@ static void bcm_uart_do_rx(struct uart_port *port)
                /* get overrun/fifo empty information from ier
                 * register */
                iestat = bcm_uart_readl(port, UART_IR_REG);
+
+               if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) {
+                       unsigned int val;
+
+                       /* fifo reset is required to clear
+                        * interrupt */
+                       val = bcm_uart_readl(port, UART_CTL_REG);
+                       val |= UART_CTL_RSTRXFIFO_MASK;
+                       bcm_uart_writel(port, val, UART_CTL_REG);
+
+                       port->icount.overrun++;
+                       tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+               }
+
                if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY)))
                        break;
 
@@ -284,10 +298,6 @@ static void bcm_uart_do_rx(struct uart_port *port)
                if (uart_handle_sysrq_char(port, c))
                        continue;
 
-               if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) {
-                       port->icount.overrun++;
-                       tty_insert_flip_char(tty, 0, TTY_OVERRUN);
-               }
 
                if ((cstat & port->ignore_status_mask) == 0)
                        tty_insert_flip_char(tty, c, flag);
index 18f548449c63c886a753ca569121abe94c26f3db..96da17868cf3c46bad25d743c4a304e37fca2c30 100644 (file)
@@ -125,7 +125,7 @@ static int __devinit jsm_probe_one(struct pci_dev *pdev, const struct pci_device
        brd->bd_uart_offset = 0x200;
        brd->bd_dividend = 921600;
 
-       brd->re_map_membase = ioremap(brd->membase, 0x1000);
+       brd->re_map_membase = ioremap(brd->membase, pci_resource_len(pdev, 0));
        if (!brd->re_map_membase) {
                dev_err(&pdev->dev,
                        "card has no PCI Memory resources, "
index 1bd28450ca40913f87503d7f75aa4ed5cd149904..a764bf99743b0b5c6be4d46fed2952cbca278d94 100644 (file)
@@ -421,7 +421,6 @@ static int max3110_main_thread(void *_max)
        int ret = 0;
        struct circ_buf *xmit = &max->con_xmit;
 
-       init_waitqueue_head(wq);
        pr_info(PR_FMT "start main thread\n");
 
        do {
@@ -823,7 +822,7 @@ static int __devinit serial_m3110_probe(struct spi_device *spi)
        res = RC_TAG;
        ret = max3110_write_then_read(max, (u8 *)&res, (u8 *)&res, 2, 0);
        if (ret < 0 || res == 0 || res == 0xffff) {
-               printk(KERN_ERR "MAX3111 deemed not present (conf reg %04x)",
+               dev_dbg(&spi->dev, "MAX3111 deemed not present (conf reg %04x)",
                                                                        res);
                ret = -ENODEV;
                goto err_get_page;
@@ -838,6 +837,8 @@ static int __devinit serial_m3110_probe(struct spi_device *spi)
        max->con_xmit.head = 0;
        max->con_xmit.tail = 0;
 
+       init_waitqueue_head(&max->wq);
+
        max->main_thread = kthread_run(max3110_main_thread,
                                        max, "max3110_main");
        if (IS_ERR(max->main_thread)) {
index fb2619f93d84e25424dc551def7a33ae56710041..dd194dc80ee9a4f0adbbeae30e4c222756b20d19 100644 (file)
@@ -30,7 +30,7 @@ static int s5pv210_serial_setsource(struct uart_port *port,
        struct s3c2410_uartcfg *cfg = port->dev->platform_data;
        unsigned long ucon = rd_regl(port, S3C2410_UCON);
 
-       if ((cfg->clocks_size) == 1)
+       if (cfg->flags & NO_NEED_CHECK_CLKSRC)
                return 0;
 
        if (strcmp(clk->name, "pclk") == 0)
@@ -55,7 +55,7 @@ static int s5pv210_serial_getsource(struct uart_port *port,
 
        clk->divisor = 1;
 
-       if ((cfg->clocks_size) == 1)
+       if (cfg->flags & NO_NEED_CHECK_CLKSRC)
                return 0;
 
        switch (ucon & S5PV210_UCON_CLKMASK) {
index 5d01d32e2cf072fa26699fd7a7b19e77c7ff4f39..ef925d5817139417764e7b3d6dfc2108d86971c5 100644 (file)
@@ -555,7 +555,7 @@ static void tty_ldisc_flush_works(struct tty_struct *tty)
 static int tty_ldisc_wait_idle(struct tty_struct *tty)
 {
        int ret;
-       ret = wait_event_interruptible_timeout(tty_ldisc_idle,
+       ret = wait_event_timeout(tty_ldisc_idle,
                        atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
        if (ret < 0)
                return ret;
@@ -763,6 +763,8 @@ static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
        if (IS_ERR(ld))
                return -1;
 
+       WARN_ON_ONCE(tty_ldisc_wait_idle(tty));
+
        tty_ldisc_close(tty, tty->ldisc);
        tty_ldisc_put(tty->ldisc);
        tty->ldisc = NULL;
index e35a17687c05f77b9ecc55b4002d0b13386081b6..34e3da5aa72a2463863b04e3b3befb4aba02505d 100644 (file)
@@ -375,7 +375,7 @@ static int usb_unbind_interface(struct device *dev)
                 * Just re-enable it without affecting the endpoint toggles.
                 */
                usb_enable_interface(udev, intf, false);
-       } else if (!error && !intf->dev.power.in_suspend) {
+       } else if (!error && !intf->dev.power.is_prepared) {
                r = usb_set_interface(udev, intf->altsetting[0].
                                desc.bInterfaceNumber, 0);
                if (r < 0)
@@ -960,7 +960,7 @@ void usb_rebind_intf(struct usb_interface *intf)
        }
 
        /* Try to rebind the interface */
-       if (!intf->dev.power.in_suspend) {
+       if (!intf->dev.power.is_prepared) {
                intf->needs_binding = 0;
                rc = device_attach(&intf->dev);
                if (rc < 0)
@@ -1107,7 +1107,7 @@ static int usb_resume_interface(struct usb_device *udev,
        if (intf->condition == USB_INTERFACE_UNBOUND) {
 
                /* Carry out a deferred switch to altsetting 0 */
-               if (intf->needs_altsetting0 && !intf->dev.power.in_suspend) {
+               if (intf->needs_altsetting0 && !intf->dev.power.is_prepared) {
                        usb_set_interface(udev, intf->altsetting[0].
                                        desc.bInterfaceNumber, 0);
                        intf->needs_altsetting0 = 0;
@@ -1187,13 +1187,22 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
                for (i = n - 1; i >= 0; --i) {
                        intf = udev->actconfig->interface[i];
                        status = usb_suspend_interface(udev, intf, msg);
+
+                       /* Ignore errors during system sleep transitions */
+                       if (!(msg.event & PM_EVENT_AUTO))
+                               status = 0;
                        if (status != 0)
                                break;
                }
        }
-       if (status == 0)
+       if (status == 0) {
                status = usb_suspend_device(udev, msg);
 
+               /* Again, ignore errors during system sleep transitions */
+               if (!(msg.event & PM_EVENT_AUTO))
+                       status = 0;
+       }
+
        /* If the suspend failed, resume interfaces that did get suspended */
        if (status != 0) {
                msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
index 90ae1753dda16bab13f0cebb759d961fb5598cbc..a428aa080a365e219857cd0b465aab5e67a4887b 100644 (file)
@@ -1634,6 +1634,7 @@ void usb_disconnect(struct usb_device **pdev)
 {
        struct usb_device       *udev = *pdev;
        int                     i;
+       struct usb_hcd          *hcd = bus_to_hcd(udev->bus);
 
        if (!udev) {
                pr_debug ("%s nodev\n", __func__);
@@ -1661,7 +1662,9 @@ void usb_disconnect(struct usb_device **pdev)
         * so that the hardware is now fully quiesced.
         */
        dev_dbg (&udev->dev, "unregistering device\n");
+       mutex_lock(hcd->bandwidth_mutex);
        usb_disable_device(udev, 0);
+       mutex_unlock(hcd->bandwidth_mutex);
        usb_hcd_synchronize_unlinks(udev);
 
        usb_remove_ep_devs(&udev->ep0);
@@ -2362,6 +2365,10 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
                                USB_DEVICE_REMOTE_WAKEUP, 0,
                                NULL, 0,
                                USB_CTRL_SET_TIMEOUT);
+
+               /* System sleep transitions should never fail */
+               if (!(msg.event & PM_EVENT_AUTO))
+                       status = 0;
        } else {
                /* device has up to 10 msec to fully suspend */
                dev_dbg(&udev->dev, "usb %ssuspend\n",
@@ -2611,16 +2618,15 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
        struct usb_device       *hdev = hub->hdev;
        unsigned                port1;
 
-       /* fail if children aren't already suspended */
+       /* Warn if children aren't already suspended */
        for (port1 = 1; port1 <= hdev->maxchild; port1++) {
                struct usb_device       *udev;
 
                udev = hdev->children [port1-1];
                if (udev && udev->can_submit) {
-                       if (!(msg.event & PM_EVENT_AUTO))
-                               dev_dbg(&intf->dev, "port %d nyet suspended\n",
-                                               port1);
-                       return -EBUSY;
+                       dev_warn(&intf->dev, "port %d nyet suspended\n", port1);
+                       if (msg.event & PM_EVENT_AUTO)
+                               return -EBUSY;
                }
        }
 
index 5701e857392bc2846bb7e2b4741b31fb2e30b333..0b5ec234c787ff907cf677c25dbf4f407a418642 100644 (file)
@@ -1135,15 +1135,26 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
  * Deallocates hcd/hardware state for the endpoints (nuking all or most
  * pending urbs) and usbcore state for the interfaces, so that usbcore
  * must usb_set_configuration() before any interfaces could be used.
+ *
+ * Must be called with hcd->bandwidth_mutex held.
  */
 void usb_disable_device(struct usb_device *dev, int skip_ep0)
 {
        int i;
+       struct usb_hcd *hcd = bus_to_hcd(dev->bus);
 
        /* getting rid of interfaces will disconnect
         * any drivers bound to them (a key side effect)
         */
        if (dev->actconfig) {
+               /*
+                * FIXME: In order to avoid self-deadlock involving the
+                * bandwidth_mutex, we have to mark all the interfaces
+                * before unregistering any of them.
+                */
+               for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++)
+                       dev->actconfig->interface[i]->unregistering = 1;
+
                for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) {
                        struct usb_interface    *interface;
 
@@ -1153,7 +1164,6 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
                                continue;
                        dev_dbg(&dev->dev, "unregistering interface %s\n",
                                dev_name(&interface->dev));
-                       interface->unregistering = 1;
                        remove_intf_ep_devs(interface);
                        device_del(&interface->dev);
                }
@@ -1172,6 +1182,16 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
 
        dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
                skip_ep0 ? "non-ep0" : "all");
+       if (hcd->driver->check_bandwidth) {
+               /* First pass: Cancel URBs, leave endpoint pointers intact. */
+               for (i = skip_ep0; i < 16; ++i) {
+                       usb_disable_endpoint(dev, i, false);
+                       usb_disable_endpoint(dev, i + USB_DIR_IN, false);
+               }
+               /* Remove endpoints from the host controller internal state */
+               usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
+               /* Second pass: remove endpoint pointers */
+       }
        for (i = skip_ep0; i < 16; ++i) {
                usb_disable_endpoint(dev, i, true);
                usb_disable_endpoint(dev, i + USB_DIR_IN, true);
@@ -1273,6 +1293,8 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
                        interface);
                return -EINVAL;
        }
+       if (iface->unregistering)
+               return -ENODEV;
 
        alt = usb_altnum_to_altsetting(iface, alternate);
        if (!alt) {
@@ -1727,6 +1749,7 @@ free_interfaces:
        /* if it's already configured, clear out old state first.
         * getting rid of old interfaces means unbinding their drivers.
         */
+       mutex_lock(hcd->bandwidth_mutex);
        if (dev->state != USB_STATE_ADDRESS)
                usb_disable_device(dev, 1);     /* Skip ep0 */
 
@@ -1739,7 +1762,6 @@ free_interfaces:
         * host controller will not allow submissions to dropped endpoints.  If
         * this call fails, the device state is unchanged.
         */
-       mutex_lock(hcd->bandwidth_mutex);
        ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
        if (ret < 0) {
                mutex_unlock(hcd->bandwidth_mutex);
index 2cd9a60c7f3a55d5872395aba45d4bc0bb1d977b..4e4833168087565215274dd1dab672628cd303b9 100644 (file)
@@ -46,7 +46,6 @@
 #include <asm/system.h>
 #include <asm/unaligned.h>
 #include <asm/dma.h>
-#include <asm/cacheflush.h>
 
 #include "fsl_usb2_udc.h"
 
@@ -118,6 +117,17 @@ static void (*_fsl_writel)(u32 v, unsigned __iomem *p);
 #define fsl_readl(p)           (*_fsl_readl)((p))
 #define fsl_writel(v, p)       (*_fsl_writel)((v), (p))
 
+static inline void fsl_set_accessors(struct fsl_usb2_platform_data *pdata)
+{
+       if (pdata->big_endian_mmio) {
+               _fsl_readl = _fsl_readl_be;
+               _fsl_writel = _fsl_writel_be;
+       } else {
+               _fsl_readl = _fsl_readl_le;
+               _fsl_writel = _fsl_writel_le;
+       }
+}
+
 static inline u32 cpu_to_hc32(const u32 x)
 {
        return udc_controller->pdata->big_endian_desc
@@ -132,6 +142,8 @@ static inline u32 hc32_to_cpu(const u32 x)
                : le32_to_cpu((__force __le32)x);
 }
 #else /* !CONFIG_PPC32 */
+static inline void fsl_set_accessors(struct fsl_usb2_platform_data *pdata) {}
+
 #define fsl_readl(addr)                readl(addr)
 #define fsl_writel(val32, addr) writel(val32, addr)
 #define cpu_to_hc32(x)         cpu_to_le32(x)
@@ -1277,6 +1289,11 @@ static int ep0_prime_status(struct fsl_udc *udc, int direction)
        req->req.complete = NULL;
        req->dtd_count = 0;
 
+       req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+                       req->req.buf, req->req.length,
+                       ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+       req->mapped = 1;
+
        if (fsl_req_to_dtd(req) == 0)
                fsl_queue_td(ep, req);
        else
@@ -1348,9 +1365,6 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
        /* Fill in the reqest structure */
        *((u16 *) req->req.buf) = cpu_to_le16(tmp);
 
-       /* flush cache for the req buffer */
-       flush_dcache_range((u32)req->req.buf, (u32)req->req.buf + 8);
-
        req->ep = ep;
        req->req.length = 2;
        req->req.status = -EINPROGRESS;
@@ -1358,6 +1372,11 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
        req->req.complete = NULL;
        req->dtd_count = 0;
 
+       req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+                               req->req.buf, req->req.length,
+                               ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+       req->mapped = 1;
+
        /* prime the data phase */
        if ((fsl_req_to_dtd(req) == 0))
                fsl_queue_td(ep, req);
@@ -2354,7 +2373,6 @@ static int __init struct_udc_setup(struct fsl_udc *udc,
                        struct fsl_req, req);
        /* allocate a small amount of memory to get valid address */
        udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
-       udc->status_req->req.dma = virt_to_phys(udc->status_req->req.buf);
 
        udc->resume_state = USB_STATE_NOTATTACHED;
        udc->usb_state = USB_STATE_POWERED;
@@ -2470,13 +2488,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
        }
 
        /* Set accessors only after pdata->init() ! */
-       if (pdata->big_endian_mmio) {
-               _fsl_readl = _fsl_readl_be;
-               _fsl_writel = _fsl_writel_be;
-       } else {
-               _fsl_readl = _fsl_readl_le;
-               _fsl_writel = _fsl_writel_le;
-       }
+       fsl_set_accessors(pdata);
 
 #ifndef CONFIG_ARCH_MXC
        if (pdata->have_sysif_regs)
index 98cc8a13169c9aab288803ecba0d88062a04e727..aa248c2f2c60d6a7de7996e48fa0ecffc1691519 100644 (file)
@@ -44,7 +44,6 @@ static int ehci_ath79_init(struct usb_hcd *hcd)
        struct ehci_hcd *ehci = hcd_to_ehci(hcd);
        struct platform_device *pdev = to_platform_device(hcd->self.controller);
        const struct platform_device_id *id;
-       int hclength;
        int ret;
 
        id = platform_get_device_id(pdev);
@@ -53,20 +52,23 @@ static int ehci_ath79_init(struct usb_hcd *hcd)
                return -EINVAL;
        }
 
-       hclength = HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
        switch (id->driver_data) {
        case EHCI_ATH79_IP_V1:
                ehci->has_synopsys_hc_bug = 1;
 
                ehci->caps = hcd->regs;
-               ehci->regs = hcd->regs + hclength;
+               ehci->regs = hcd->regs +
+                       HC_LENGTH(ehci,
+                                 ehci_readl(ehci, &ehci->caps->hc_capbase));
                break;
 
        case EHCI_ATH79_IP_V2:
                hcd->has_tt = 1;
 
                ehci->caps = hcd->regs + 0x100;
-               ehci->regs = hcd->regs + 0x100 + hclength;
+               ehci->regs = hcd->regs + 0x100 +
+                       HC_LENGTH(ehci,
+                                 ehci_readl(ehci, &ehci->caps->hc_capbase));
                break;
 
        default:
index b435ed67dd5c4e0649185c228648709e30c4d0f7..f8030ee928e8ccb86c4abcd4262f1467476e2a80 100644 (file)
@@ -1,4 +1,8 @@
 /*
+ * Enhanced Host Controller Interface (EHCI) driver for USB.
+ *
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
+ *
  * Copyright (c) 2000-2004 by David Brownell
  *
  * This program is free software; you can redistribute it and/or modify it
index c9e6e454c625248e61d093666f562c01ea33db53..55d3d5859ac5667da2332a3a782588e4541bbdc9 100644 (file)
@@ -1555,7 +1555,7 @@ static void kill_transfer(struct usb_hcd *hcd, struct urb *urb,
 
        /* We need to forcefully reclaim the slot since some transfers never
           return, e.g. interrupt transfers and NAKed bulk transfers. */
-       if (usb_pipebulk(urb->pipe)) {
+       if (usb_pipecontrol(urb->pipe) || usb_pipebulk(urb->pipe)) {
                skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
                skip_map |= (1 << qh->slot);
                reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
index 9aa10bdf39188e4a0a7892684b7d877d7550731a..f9cf3f04b7424bd2299dc52f1662cdd6443e050b 100644 (file)
@@ -1,5 +1,7 @@
 /*
- * OHCI HCD (Host Controller Driver) for USB.
+ * Open Host Controller Interface (OHCI) driver for USB.
+ *
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
  *
  * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
  * (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net>
index db6f8b9c19b6d5a1a9768f0bb50bae3bdf8ae5ee..4586369dda00beebaf9c47d73200bbdc4c66c319 100644 (file)
@@ -2517,6 +2517,7 @@ static int __devinit r8a66597_probe(struct platform_device *pdev)
        INIT_LIST_HEAD(&r8a66597->child_device);
 
        hcd->rsrc_start = res->start;
+       hcd->has_tt = 1;
 
        ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger);
        if (ret != 0) {
index 0f8e1d29a858e6c5e00e287a11f8ff1ba2b212e1..fcb7f7efc86db903544aa8601fb20e8e874f4b33 100644 (file)
@@ -1215,8 +1215,6 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
                ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
                /* dig out max burst from ep companion desc */
                max_packet = ep->ss_ep_comp.bMaxBurst;
-               if (!max_packet)
-                       xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n");
                ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
                break;
        case USB_SPEED_HIGH:
index 17541d09eabbc9b867c5be1dcdeba349b45bfa82..cb16de213f6491bb062c41e7cd91277f9fc0b605 100644 (file)
@@ -29,6 +29,9 @@
 #define PCI_VENDOR_ID_FRESCO_LOGIC     0x1b73
 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
 
+#define PCI_VENDOR_ID_ETRON            0x1b6f
+#define PCI_DEVICE_ID_ASROCK_P67       0x7023
+
 static const char hcd_name[] = "xhci_hcd";
 
 /* called after powerup, by probe or system-pm "wakeup" */
@@ -134,6 +137,11 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
                xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
                xhci->limit_active_eps = 64;
        }
+       if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+                       pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+               xhci->quirks |= XHCI_RESET_ON_RESUME;
+               xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
+       }
 
        /* Make sure the HC is halted. */
        retval = xhci_halt(xhci);
index 800f417c730900271a98ccf0410d7617afc3aa20..70cacbbe7fb9b4b0ec16b8d040d9c062ab83d96f 100644 (file)
@@ -1733,6 +1733,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
                frame->status = -EOVERFLOW;
                skip_td = true;
                break;
+       case COMP_DEV_ERR:
        case COMP_STALL:
                frame->status = -EPROTO;
                skip_td = true;
@@ -1767,9 +1768,6 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
                }
        }
 
-       if ((idx == urb_priv->length - 1) && *status == -EINPROGRESS)
-               *status = 0;
-
        return finish_td(xhci, td, event_trb, event, ep, status, false);
 }
 
@@ -1787,8 +1785,7 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
        idx = urb_priv->td_cnt;
        frame = &td->urb->iso_frame_desc[idx];
 
-       /* The transfer is partly done */
-       *status = -EXDEV;
+       /* The transfer is partly done. */
        frame->status = -EXDEV;
 
        /* calc actual length */
@@ -2016,6 +2013,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                                 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
                                 ep_index);
                goto cleanup;
+       case COMP_DEV_ERR:
+               xhci_warn(xhci, "WARN: detect an incompatible device");
+               status = -EPROTO;
+               break;
        case COMP_MISSED_INT:
                /*
                 * When encounter missed service error, one or more isoc tds
@@ -2063,6 +2064,20 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                /* Is this a TRB in the currently executing TD? */
                event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
                                td->last_trb, event_dma);
+
+               /*
+                * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
+                * is not in the current TD pointed by ep_ring->dequeue because
+                * that the hardware dequeue pointer still at the previous TRB
+                * of the current TD. The previous TRB maybe a Link TD or the
+                * last TRB of the previous TD. The command completion handle
+                * will take care the rest.
+                */
+               if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
+                       ret = 0;
+                       goto cleanup;
+               }
+
                if (!event_seg) {
                        if (!ep->skip ||
                            !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
@@ -2158,6 +2173,11 @@ cleanup:
                                                urb->transfer_buffer_length,
                                                status);
                        spin_unlock(&xhci->lock);
+                       /* EHCI, UHCI, and OHCI always unconditionally set the
+                        * urb->status of an isochronous endpoint to 0.
+                        */
+                       if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
+                               status = 0;
                        usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
                        spin_lock(&xhci->lock);
                }
index 06e7023258d0f4e3e4b603d6c2ae68b1fbd38f4f..f5fe1ac301ab43b69a3d22e930a4d06c85ee9cd4 100644 (file)
@@ -759,6 +759,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                msleep(100);
 
        spin_lock_irq(&xhci->lock);
+       if (xhci->quirks & XHCI_RESET_ON_RESUME)
+               hibernated = true;
 
        if (!hibernated) {
                /* step 1: restore register */
@@ -1401,6 +1403,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
        u32 added_ctxs;
        unsigned int last_ctx;
        u32 new_add_flags, new_drop_flags, new_slot_info;
+       struct xhci_virt_device *virt_dev;
        int ret = 0;
 
        ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
@@ -1425,11 +1428,25 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
                return 0;
        }
 
-       in_ctx = xhci->devs[udev->slot_id]->in_ctx;
-       out_ctx = xhci->devs[udev->slot_id]->out_ctx;
+       virt_dev = xhci->devs[udev->slot_id];
+       in_ctx = virt_dev->in_ctx;
+       out_ctx = virt_dev->out_ctx;
        ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
        ep_index = xhci_get_endpoint_index(&ep->desc);
        ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
+
+       /* If this endpoint is already in use, and the upper layers are trying
+        * to add it again without dropping it, reject the addition.
+        */
+       if (virt_dev->eps[ep_index].ring &&
+                       !(le32_to_cpu(ctrl_ctx->drop_flags) &
+                               xhci_get_endpoint_flag(&ep->desc))) {
+               xhci_warn(xhci, "Trying to add endpoint 0x%x "
+                               "without dropping it.\n",
+                               (unsigned int) ep->desc.bEndpointAddress);
+               return -EINVAL;
+       }
+
        /* If the HCD has already noted the endpoint is enabled,
         * ignore this request.
         */
@@ -1445,8 +1462,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
         * process context, not interrupt context (or so documenation
         * for usb_set_interface() and usb_set_configuration() claim).
         */
-       if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
-                               udev, ep, GFP_NOIO) < 0) {
+       if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
                dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
                                __func__, ep->desc.bEndpointAddress);
                return -ENOMEM;
@@ -1537,6 +1553,11 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
                                "and endpoint is not disabled.\n");
                ret = -EINVAL;
                break;
+       case COMP_DEV_ERR:
+               dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
+                               "configure command.\n");
+               ret = -ENODEV;
+               break;
        case COMP_SUCCESS:
                dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
                ret = 0;
@@ -1571,6 +1592,11 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
                xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
                ret = -EINVAL;
                break;
+       case COMP_DEV_ERR:
+               dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
+                               "context command.\n");
+               ret = -ENODEV;
+               break;
        case COMP_MEL_ERR:
                /* Max Exit Latency too large error */
                dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
@@ -2853,6 +2879,11 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
                dev_warn(&udev->dev, "Device not responding to set address.\n");
                ret = -EPROTO;
                break;
+       case COMP_DEV_ERR:
+               dev_warn(&udev->dev, "ERROR: Incompatible device for address "
+                               "device command.\n");
+               ret = -ENODEV;
+               break;
        case COMP_SUCCESS:
                xhci_dbg(xhci, "Successful Address Device command\n");
                break;
index 7d1ea3bf5e1fa0187210f6c89ad87a9f53f1f8e2..d8bbf5ccb10d97872fb79aeb4b8aa00f6dad2cea 100644 (file)
@@ -874,6 +874,8 @@ struct xhci_transfer_event {
 #define COMP_PING_ERR  20
 /* Event Ring is full */
 #define COMP_ER_FULL   21
+/* Incompatible Device Error */
+#define COMP_DEV_ERR   22
 /* Missed Service Error - HC couldn't service an isoc ep within interval */
 #define COMP_MISSED_INT        23
 /* Successfully stopped command ring */
@@ -1308,6 +1310,7 @@ struct xhci_hcd {
  */
 #define XHCI_EP_LIMIT_QUIRK    (1 << 5)
 #define XHCI_BROKEN_MSI                (1 << 6)
+#define XHCI_RESET_ON_RESUME   (1 << 7)
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
        /* There are two roothubs to keep track of bus suspend info for */
index 0a50a35e185351f0732602c7d5b04e4f4173d3d3..6aeb363e63e7c143fdfb35ae52f167fe8f37cf3a 100644 (file)
@@ -1524,6 +1524,12 @@ static void musb_gadget_fifo_flush(struct usb_ep *ep)
                csr = musb_readw(epio, MUSB_TXCSR);
                if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
                        csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
+                       /*
+                        * Setting both TXPKTRDY and FLUSHFIFO makes controller
+                        * to interrupt current FIFO loading, but not flushing
+                        * the already loaded ones.
+                        */
+                       csr &= ~MUSB_TXCSR_TXPKTRDY;
                        musb_writew(epio, MUSB_TXCSR, csr);
                        /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
                        musb_writew(epio, MUSB_TXCSR, csr);
index 7295e316bdfcf82e2af402118ce93e5af2dec465..8b2473fa0f47387c9647944d6e45b362539a8ab5 100644 (file)
@@ -1575,7 +1575,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
                        /* even if there was an error, we did the dma
                         * for iso_frame_desc->length
                         */
-                       if (d->status != EILSEQ && d->status != -EOVERFLOW)
+                       if (d->status != -EILSEQ && d->status != -EOVERFLOW)
                                d->status = 0;
 
                        if (++qh->iso_idx >= urb->number_of_packets)
index 1627289775538eaa2b1967e2547c54717c82baa3..2e06b90aa1f8f50139c8873011f1c34956077b63 100644 (file)
@@ -179,6 +179,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_232H_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) },
@@ -848,7 +849,8 @@ static const char *ftdi_chip_name[] = {
        [FT2232C] = "FT2232C",
        [FT232RL] = "FT232RL",
        [FT2232H] = "FT2232H",
-       [FT4232H] = "FT4232H"
+       [FT4232H] = "FT4232H",
+       [FT232H]  = "FT232H"
 };
 
 
@@ -1168,6 +1170,7 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
                break;
        case FT2232H: /* FT2232H chip */
        case FT4232H: /* FT4232H chip */
+       case FT232H:  /* FT232H chip */
                if ((baud <= 12000000) & (baud >= 1200)) {
                        div_value = ftdi_2232h_baud_to_divisor(baud);
                } else if (baud < 1200) {
@@ -1429,9 +1432,12 @@ static void ftdi_determine_type(struct usb_serial_port *port)
        } else if (version < 0x600) {
                /* Assume it's an FT232BM (or FT245BM) */
                priv->chip_type = FT232BM;
-       } else {
-               /* Assume it's an FT232R */
+       } else if (version < 0x900) {
+               /* Assume it's an FT232RL */
                priv->chip_type = FT232RL;
+       } else {
+               /* Assume it's an FT232H */
+               priv->chip_type = FT232H;
        }
        dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]);
 }
@@ -1559,7 +1565,8 @@ static int create_sysfs_attrs(struct usb_serial_port *port)
                     priv->chip_type == FT2232C ||
                     priv->chip_type == FT232RL ||
                     priv->chip_type == FT2232H ||
-                    priv->chip_type == FT4232H)) {
+                    priv->chip_type == FT4232H ||
+                    priv->chip_type == FT232H)) {
                        retval = device_create_file(&port->dev,
                                                    &dev_attr_latency_timer);
                }
@@ -1580,7 +1587,8 @@ static void remove_sysfs_attrs(struct usb_serial_port *port)
                    priv->chip_type == FT2232C ||
                    priv->chip_type == FT232RL ||
                    priv->chip_type == FT2232H ||
-                   priv->chip_type == FT4232H) {
+                   priv->chip_type == FT4232H ||
+                    priv->chip_type == FT232H) {
                        device_remove_file(&port->dev, &dev_attr_latency_timer);
                }
        }
@@ -2212,6 +2220,7 @@ static int ftdi_tiocmget(struct tty_struct *tty)
        case FT232RL:
        case FT2232H:
        case FT4232H:
+       case FT232H:
                len = 2;
                break;
        default:
index 213fe3d61282478270e757518d4453a3dca1c69d..19584faa86f92c35ce94fcdf345c5d591da58187 100644 (file)
@@ -156,7 +156,8 @@ enum ftdi_chip_type {
        FT2232C = 4,
        FT232RL = 5,
        FT2232H = 6,
-       FT4232H = 7
+       FT4232H = 7,
+       FT232H  = 8
 };
 
 enum ftdi_sio_baudrate {
index ab1fcdf3c378e954882a8a7db9bc1e4cb672aed8..19156d1049fe22134e745158a6dc64da1a9d0a1b 100644 (file)
@@ -22,6 +22,7 @@
 #define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
 #define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
 #define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
+#define FTDI_232H_PID  0x6014 /* Single channel hi-speed device */
 #define FTDI_SIO_PID   0x8372  /* Product Id SIO application of 8U100AX */
 #define FTDI_232RL_PID  0xFBFA  /* Product ID for FT232RL */
 
index c6d92a5300869ce17aa0ce584778e484d94384bd..ea8445689c8501f82795a9fe29c95b9fbcd73183 100644 (file)
@@ -1745,6 +1745,7 @@ static int ti_download_firmware(struct ti_device *tdev)
        }
        if (fw_p->size > TI_FIRMWARE_BUF_SIZE) {
                dev_err(&dev->dev, "%s - firmware too large %zu\n", __func__, fw_p->size);
+               release_firmware(fw_p);
                return -ENOENT;
        }
 
index 5fc983c5b92cb6677026fb8e5ddb18349150bee0..cf03ad0671472f2aaecbcd4fec7d462cf29d01e5 100644 (file)
@@ -447,6 +447,8 @@ static int clcdfb_register(struct clcd_fb *fb)
                goto out;
        }
 
+       fb->fb.device           = &fb->dev->dev;
+
        fb->fb.fix.mmio_start   = fb->dev->res.start;
        fb->fb.fix.mmio_len     = resource_size(&fb->dev->res);
 
index bedf5be27f05352bf01f35f945e0d07ebd5e3fcf..0acc7d65aeaade67e5ce3d1188d030d5d842d159 100644 (file)
@@ -555,8 +555,6 @@ static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
 static int fsl_diu_check_var(struct fb_var_screeninfo *var,
                                struct fb_info *info)
 {
-       unsigned long htotal, vtotal;
-
        pr_debug("check_var xres: %d\n", var->xres);
        pr_debug("check_var yres: %d\n", var->yres);
 
@@ -635,20 +633,6 @@ static int fsl_diu_check_var(struct fb_var_screeninfo *var,
 
                break;
        }
-       /* If the pixclock is below the minimum spec'd value then set to
-        * refresh rate for 60Hz since this is supported by most monitors.
-        * Refer to Documentation/fb/ for calculations.
-        */
-       if ((var->pixclock < MIN_PIX_CLK) || (var->pixclock > MAX_PIX_CLK)) {
-               htotal = var->xres + var->right_margin + var->hsync_len +
-                   var->left_margin;
-               vtotal = var->yres + var->lower_margin + var->vsync_len +
-                   var->upper_margin;
-               var->pixclock = (vtotal * htotal * 6UL) / 100UL;
-               var->pixclock = KHZ2PICOS(var->pixclock);
-               pr_debug("pixclock set for 60Hz refresh = %u ps\n",
-                       var->pixclock);
-       }
 
        var->height = -1;
        var->width = -1;
index c6b554f72c6d255b61e1925e32f6effb9f300978..5a5d0928df33cf94355f39a73b803d4c1ab86522 100644 (file)
@@ -29,7 +29,7 @@ static int  crt_option = 1;
 static char panel_option[32] = "";
 
 /* Modes relevant to the GX1 (taken from modedb.c) */
-static const struct fb_videomode __initdata gx1_modedb[] = {
+static const struct fb_videomode __devinitdata gx1_modedb[] = {
        /* 640x480-60 VESA */
        { NULL, 60, 640, 480, 39682,  48, 16, 33, 10, 96, 2,
          0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
@@ -195,7 +195,7 @@ static int gx1fb_blank(int blank_mode, struct fb_info *info)
        return par->vid_ops->blank_display(info, blank_mode);
 }
 
-static int __init gx1fb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
+static int __devinit gx1fb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
 {
        struct geodefb_par *par = info->par;
        unsigned gx_base;
@@ -268,7 +268,7 @@ static struct fb_ops gx1fb_ops = {
        .fb_imageblit   = cfb_imageblit,
 };
 
-static struct fb_info * __init gx1fb_init_fbinfo(struct device *dev)
+static struct fb_info * __devinit gx1fb_init_fbinfo(struct device *dev)
 {
        struct geodefb_par *par;
        struct fb_info *info;
@@ -318,7 +318,7 @@ static struct fb_info * __init gx1fb_init_fbinfo(struct device *dev)
        return info;
 }
 
-static int __init gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int __devinit gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        struct geodefb_par *par;
        struct fb_info *info;
@@ -382,7 +382,7 @@ static int __init gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *
        return ret;
 }
 
-static void gx1fb_remove(struct pci_dev *pdev)
+static void __devexit gx1fb_remove(struct pci_dev *pdev)
 {
        struct fb_info *info = pci_get_drvdata(pdev);
        struct geodefb_par *par = info->par;
@@ -441,7 +441,7 @@ static struct pci_driver gx1fb_driver = {
        .name           = "gx1fb",
        .id_table       = gx1fb_id_table,
        .probe          = gx1fb_probe,
-       .remove         = gx1fb_remove,
+       .remove         = __devexit_p(gx1fb_remove),
 };
 
 static int __init gx1fb_init(void)
@@ -456,7 +456,7 @@ static int __init gx1fb_init(void)
        return pci_register_driver(&gx1fb_driver);
 }
 
-static void __exit gx1fb_cleanup(void)
+static void __devexit gx1fb_cleanup(void)
 {
        pci_unregister_driver(&gx1fb_driver);
 }
index fbef15f7a21803051e778b89436cd08c2f0f0561..614251a9af912953e867802e2ddbd9a495fdee86 100644 (file)
@@ -233,7 +233,7 @@ static int __devinit hecubafb_probe(struct platform_device *dev)
 
        videomemory = vzalloc(videomemorysize);
        if (!videomemory)
-               return retval;
+               goto err_videomem_alloc;
 
        info = framebuffer_alloc(sizeof(struct hecubafb_par), &dev->dev);
        if (!info)
@@ -275,6 +275,7 @@ err_fbreg:
        framebuffer_release(info);
 err_fballoc:
        vfree(videomemory);
+err_videomem_alloc:
        module_put(board->owner);
        return retval;
 }
index 9170c82b495ca7a84642b0d8a74a3bc803e45609..cc7d7329dc151af821153feb999998e42460267e 100644 (file)
@@ -218,7 +218,7 @@ static inline void meram_get_next_icb_addr(struct sh_mobile_meram_info *pdata,
                icb_offset = 0xc0000000 | (cfg->current_reg << 23);
 
        *icb_addr_y = icb_offset | (cfg->icb[0].marker_icb << 24);
-       if ((*icb_addr_c) && is_nvcolor(cfg->pixelformat))
+       if (is_nvcolor(cfg->pixelformat))
                *icb_addr_c = icb_offset | (cfg->icb[1].marker_icb << 24);
 }
 
index 87f0be1e78b555e52297ed7d399f3bfe9364a1a5..6294dca955005988f384fbf0bdae104f43c8037d 100644 (file)
@@ -1664,7 +1664,7 @@ static void sm501fb_stop(struct sm501fb_info *info)
                           resource_size(info->regs_res));
 }
 
-static int sm501fb_init_fb(struct fb_info *fb,
+static int __devinit sm501fb_init_fb(struct fb_info *fb,
                           enum sm501_controller head,
                           const char *fbname)
 {
index 52b0f3e8ccac694743aa4ba6b2d3db0c86448a0e..816a4fda04f5b9c0637721054a5c7aba1a456932 100644 (file)
@@ -1233,8 +1233,12 @@ static int dlfb_setup_modes(struct dlfb_data *dev,
                        if (dlfb_is_valid_mode(&info->monspecs.modedb[i], info))
                                fb_add_videomode(&info->monspecs.modedb[i],
                                        &info->modelist);
-                       else /* if we've removed top/best mode */
-                               info->monspecs.misc &= ~FB_MISC_1ST_DETAIL;
+                       else {
+                               if (i == 0)
+                                       /* if we've removed top/best mode */
+                                       info->monspecs.misc
+                                               &= ~FB_MISC_1ST_DETAIL;
+                       }
                }
 
                default_vmode = fb_find_best_display(&info->monspecs,
index a99bbe86db13d77183ac258333435c6be56ce187..501b3406c6d5584361676c0543d573f88e3f497a 100644 (file)
@@ -175,6 +175,7 @@ static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
 
 static void vesafb_destroy(struct fb_info *info)
 {
+       fb_dealloc_cmap(&info->cmap);
        if (info->screen_base)
                iounmap(info->screen_base);
        release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
index ad57593d224a1f3200a0ef818b36bcb22f6fdf75..a0c8965c1a793cb00f157eaa506355dedbd8ca00 100644 (file)
@@ -109,6 +109,7 @@ struct ds1wm_data {
        /* byte to write that makes all intr disabled, */
        /* considering active_state (IAS) (optimization) */
        u8       int_en_reg_none;
+       unsigned int reset_recover_delay; /* see ds1wm.h */
 };
 
 static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg,
@@ -187,6 +188,9 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data)
                return 1;
        }
 
+       if (ds1wm_data->reset_recover_delay)
+               msleep(ds1wm_data->reset_recover_delay);
+
        return 0;
 }
 
@@ -490,6 +494,7 @@ static int ds1wm_probe(struct platform_device *pdev)
        }
        ds1wm_data->irq = res->start;
        ds1wm_data->int_en_reg_none = (plat->active_high ? DS1WM_INTEN_IAS : 0);
+       ds1wm_data->reset_recover_delay = plat->reset_recover_delay;
 
        if (res->flags & IORESOURCE_IRQ_HIGHEDGE)
                irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING);
index 022f9eb0b7bf01546479d8da03a98394f73299c6..21d816e9dfa51d7166d61c9b2272192040b56ad3 100644 (file)
@@ -535,8 +535,7 @@ config I6300ESB_WDT
 
 config INTEL_SCU_WATCHDOG
        bool "Intel SCU Watchdog for Mobile Platforms"
-       depends on WATCHDOG
-       depends on INTEL_SCU_IPC
+       depends on X86_MRST
        ---help---
          Hardware driver for the watchdog time built into the Intel SCU
          for Intel Mobile Platforms.
@@ -600,8 +599,7 @@ config IT87_WDT
 
 config HP_WATCHDOG
        tristate "HP ProLiant iLO2+ Hardware Watchdog Timer"
-       depends on X86
-       default m
+       depends on X86 && PCI
        help
          A software monitoring watchdog and NMI sourcing driver. This driver
          will detect lockups and provide a stack trace. This is a driver that
index 750bc5281d79d18ec7c3fca85e31254aae412251..4ca5d40304b255a7b9f810ba4a27b8dfd7b69e20 100644 (file)
@@ -448,7 +448,7 @@ static void __exit at32_wdt_exit(void)
 }
 module_exit(at32_wdt_exit);
 
-MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
+MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
 MODULE_DESCRIPTION("Watchdog driver for Atmel AT32AP700X");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
index 29a7cd4b90c839cdb8986dee2bc526a15a07f733..b146082bd85a977410bc240c9bf041f41d63e8b4 100644 (file)
@@ -329,4 +329,4 @@ MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com>");
 MODULE_DESCRIPTION("GE watchdog driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-MODULE_ALIAS("platform: gef_wdt");
+MODULE_ALIAS("platform:gef_wdt");
index 919bdd16136fddd27674d544f11ec4feb382152d..ba4386066a42f28d2d813d377123d3bd978b839d 100644 (file)
@@ -42,7 +42,6 @@
 #include <linux/sched.h>
 #include <linux/signal.h>
 #include <linux/sfi.h>
-#include <linux/types.h>
 #include <asm/irq.h>
 #include <asm/atomic.h>
 #include <asm/intel_scu_ipc.h>
index 1479dc4d612922327e5c2314862ace3b99922a74..0430e093b1a0e39d8b97d62eebfbb8fa62ac8fb5 100644 (file)
@@ -66,23 +66,18 @@ static struct {
        int default_ticks;
        unsigned long inuse;
        unsigned gpio;
-       int gstate;
+       unsigned int gstate;
 } mtx1_wdt_device;
 
 static void mtx1_wdt_trigger(unsigned long unused)
 {
-       u32 tmp;
-
        spin_lock(&mtx1_wdt_device.lock);
        if (mtx1_wdt_device.running)
                ticks--;
 
        /* toggle wdt gpio */
-       mtx1_wdt_device.gstate = ~mtx1_wdt_device.gstate;
-       if (mtx1_wdt_device.gstate)
-               gpio_direction_output(mtx1_wdt_device.gpio, 1);
-       else
-               gpio_direction_input(mtx1_wdt_device.gpio);
+       mtx1_wdt_device.gstate = !mtx1_wdt_device.gstate;
+       gpio_set_value(mtx1_wdt_device.gpio, mtx1_wdt_device.gstate);
 
        if (mtx1_wdt_device.queue && ticks)
                mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL);
@@ -105,7 +100,7 @@ static void mtx1_wdt_start(void)
        if (!mtx1_wdt_device.queue) {
                mtx1_wdt_device.queue = 1;
                mtx1_wdt_device.gstate = 1;
-               gpio_direction_output(mtx1_wdt_device.gpio, 1);
+               gpio_set_value(mtx1_wdt_device.gpio, 1);
                mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL);
        }
        mtx1_wdt_device.running++;
@@ -120,7 +115,7 @@ static int mtx1_wdt_stop(void)
        if (mtx1_wdt_device.queue) {
                mtx1_wdt_device.queue = 0;
                mtx1_wdt_device.gstate = 0;
-               gpio_direction_output(mtx1_wdt_device.gpio, 0);
+               gpio_set_value(mtx1_wdt_device.gpio, 0);
        }
        ticks = mtx1_wdt_device.default_ticks;
        spin_unlock_irqrestore(&mtx1_wdt_device.lock, flags);
@@ -214,6 +209,12 @@ static int __devinit mtx1_wdt_probe(struct platform_device *pdev)
        int ret;
 
        mtx1_wdt_device.gpio = pdev->resource[0].start;
+       ret = gpio_request_one(mtx1_wdt_device.gpio,
+                               GPIOF_OUT_INIT_HIGH, "mtx1-wdt");
+       if (ret < 0) {
+               dev_err(&pdev->dev, "failed to request gpio");
+               return ret;
+       }
 
        spin_lock_init(&mtx1_wdt_device.lock);
        init_completion(&mtx1_wdt_device.stop);
@@ -239,11 +240,13 @@ static int __devexit mtx1_wdt_remove(struct platform_device *pdev)
                mtx1_wdt_device.queue = 0;
                wait_for_completion(&mtx1_wdt_device.stop);
        }
+
+       gpio_free(mtx1_wdt_device.gpio);
        misc_deregister(&mtx1_wdt_misc);
        return 0;
 }
 
-static struct platform_driver mtx1_wdt = {
+static struct platform_driver mtx1_wdt_driver = {
        .probe = mtx1_wdt_probe,
        .remove = __devexit_p(mtx1_wdt_remove),
        .driver.name = "mtx1-wdt",
@@ -252,12 +255,12 @@ static struct platform_driver mtx1_wdt = {
 
 static int __init mtx1_wdt_init(void)
 {
-       return platform_driver_register(&mtx1_wdt);
+       return platform_driver_register(&mtx1_wdt_driver);
 }
 
 static void __exit mtx1_wdt_exit(void)
 {
-       platform_driver_unregister(&mtx1_wdt);
+       platform_driver_unregister(&mtx1_wdt_driver);
 }
 
 module_init(mtx1_wdt_init);
index 8c4b2d5bb7da542a364d77ac7deb9e8c5a07f1c0..871caea4e1c62d7a868cfb24593d1de7c1f244c2 100644 (file)
@@ -320,6 +320,11 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
        struct wm831x_watchdog_pdata *pdata;
        int reg, ret;
 
+       if (wm831x) {
+               dev_err(&pdev->dev, "wm831x watchdog already registered\n");
+               return -EBUSY;
+       }
+
        wm831x = dev_get_drvdata(pdev->dev.parent);
 
        ret = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
index 553da68bd510c8ef0c7fecc2f5676778d6525954..30df85d8fca860d69a75079951feb9bb725944f1 100644 (file)
@@ -395,9 +395,9 @@ static void unmask_evtchn(int port)
 static void xen_irq_init(unsigned irq)
 {
        struct irq_info *info;
+#ifdef CONFIG_SMP
        struct irq_desc *desc = irq_to_desc(irq);
 
-#ifdef CONFIG_SMP
        /* By default all event channels notify CPU#0. */
        cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
 #endif
index 0d15a3d113a2c77bb119913f6fa2229a7c0605fa..5f43bfba3c7a76906244b47fce383ab310c08c5b 100644 (file)
@@ -82,6 +82,7 @@ fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis \
 fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin
 fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \
                                      advansys/3550.bin advansys/38C0800.bin
+fw-shipped-$(CONFIG_SCSI_ISCI) += isci/isci_firmware.bin
 fw-shipped-$(CONFIG_SCSI_QLOGIC_1280) += qlogic/1040.bin qlogic/1280.bin \
                                         qlogic/12160.bin
 fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin
diff --git a/firmware/isci/isci_firmware.bin.ihex b/firmware/isci/isci_firmware.bin.ihex
new file mode 100644 (file)
index 0000000..2e66195
--- /dev/null
@@ -0,0 +1,16 @@
+:10000000495343554F454D42E80018100002000087
+:1000100000000000000000000101000000000000DE
+:10002000FFFFCF5F0100000008DD0B0000FC0F00A8
+:10003000097C0B006EFC0A00FFFFCF5F010000008F
+:1000400008DD0B0000FC0F00097C0B006EFC0A00B1
+:10005000FFFFCF5F0100000008DD0B0000FC0F0078
+:10006000097C0B006EFC0A00FFFFCF5F010000005F
+:1000700008DD0B0000FC0F00097C0B006EFC0A0081
+:100080000101000000000000FFFFCF5F0200000040
+:1000900008DD0B0000FC0F00097C0B006EFC0A0061
+:1000A000FFFFCF5F0200000008DD0B0000FC0F0027
+:1000B000097C0B006EFC0A00FFFFCF5F020000000E
+:1000C00008DD0B0000FC0F00097C0B006EFC0A0031
+:1000D000FFFFCF5F0200000008DD0B0000FC0F00F7
+:0800E000097C0B006EFC0A0014
+:00000001FF
index 9ad2369d9e35e0651885a78dc3648ef9dbe8c412..bfcb18feb1df32b91a9db144f3f74f5873c85866 100644 (file)
@@ -231,9 +231,6 @@ static int bad_inode_readlink(struct dentry *dentry, char __user *buffer,
 
 static int bad_inode_permission(struct inode *inode, int mask, unsigned int flags)
 {
-       if (flags & IPERM_FLAG_RCU)
-               return -ECHILD;
-
        return -EIO;
 }
 
index 63039ed9576f75c396ed0dae71bec1e4abf53058..2bc5dc644b4cb82c21300a925276a861644de5bd 100644 (file)
@@ -1864,6 +1864,7 @@ cleanup:
        kfree(psinfo);
        kfree(notes);
        kfree(fpu);
+       kfree(shdr4extnum);
 #ifdef ELF_CORE_COPY_XFPREGS
        kfree(xfpu);
 #endif
index 1a2421f908f0a471f028d7e0f320caeed18592bd..610e8e0b04b88946721eb58b78791bab0e72440b 100644 (file)
@@ -762,7 +762,19 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
        if (!disk)
                return ERR_PTR(-ENXIO);
 
-       whole = bdget_disk(disk, 0);
+       /*
+        * Normally, @bdev should equal what's returned from bdget_disk()
+        * if partno is 0; however, some drivers (floppy) use multiple
+        * bdev's for the same physical device and @bdev may be one of the
+        * aliases.  Keep @bdev if partno is 0.  This means claimer
+        * tracking is broken for those devices but it has always been that
+        * way.
+        */
+       if (partno)
+               whole = bdget_disk(disk, 0);
+       else
+               whole = bdgrab(bdev);
+
        module_put(disk->fops->owner);
        put_disk(disk);
        if (!whole)
index 378b5b4443f3a6991e1313d4009b89160d16539e..3b859a3e6a0e9354a653e324f8c08ba85d45f959 100644 (file)
@@ -19,7 +19,6 @@
 #ifndef __BTRFS_CTREE__
 #define __BTRFS_CTREE__
 
-#include <linux/version.h>
 #include <linux/mm.h>
 #include <linux/highmem.h>
 #include <linux/fs.h>
@@ -967,6 +966,12 @@ struct btrfs_fs_info {
        struct srcu_struct subvol_srcu;
 
        spinlock_t trans_lock;
+       /*
+        * the reloc mutex goes with the trans lock, it is taken
+        * during commit to protect us from the relocation code
+        */
+       struct mutex reloc_mutex;
+
        struct list_head trans_list;
        struct list_head hashers;
        struct list_head dead_roots;
@@ -1172,6 +1177,14 @@ struct btrfs_root {
        u32 type;
 
        u64 highest_objectid;
+
+       /* btrfs_record_root_in_trans is a multi-step process,
+        * and it can race with the balancing code.   But the
+        * race is very small, and only the first time the root
+        * is added to each transaction.  So in_trans_setup
+        * is used to tell us when more checks are required
+        */
+       unsigned long in_trans_setup;
        int ref_cows;
        int track_dirty;
        int in_radix;
@@ -1181,7 +1194,6 @@ struct btrfs_root {
        struct btrfs_key defrag_max;
        int defrag_running;
        char *name;
-       int in_sysfs;
 
        /* the dirty list is only used by non-reference counted roots */
        struct list_head dirty_list;
@@ -1323,6 +1335,11 @@ struct btrfs_ioctl_defrag_range_args {
  */
 #define BTRFS_STRING_ITEM_KEY  253
 
+/*
+ * Flags for mount options.
+ *
+ * Note: don't forget to add new options to btrfs_show_options()
+ */
 #define BTRFS_MOUNT_NODATASUM          (1 << 0)
 #define BTRFS_MOUNT_NODATACOW          (1 << 1)
 #define BTRFS_MOUNT_NOBARRIER          (1 << 2)
index 6462c29d2d37fcc8ec779f1d6d3b6817003a68c2..98c68e658a9b2eb08a8ba99d0b5691f16fc8a67d 100644 (file)
@@ -82,19 +82,16 @@ static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
        return root->fs_info->delayed_root;
 }
 
-static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
-                                                       struct inode *inode)
+static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
 {
-       struct btrfs_delayed_node *node;
        struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
        struct btrfs_root *root = btrfs_inode->root;
        u64 ino = btrfs_ino(inode);
-       int ret;
+       struct btrfs_delayed_node *node;
 
-again:
        node = ACCESS_ONCE(btrfs_inode->delayed_node);
        if (node) {
-               atomic_inc(&node->refs);        /* can be accessed */
+               atomic_inc(&node->refs);
                return node;
        }
 
@@ -102,8 +99,10 @@ again:
        node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
        if (node) {
                if (btrfs_inode->delayed_node) {
+                       atomic_inc(&node->refs);        /* can be accessed */
+                       BUG_ON(btrfs_inode->delayed_node != node);
                        spin_unlock(&root->inode_lock);
-                       goto again;
+                       return node;
                }
                btrfs_inode->delayed_node = node;
                atomic_inc(&node->refs);        /* can be accessed */
@@ -113,6 +112,23 @@ again:
        }
        spin_unlock(&root->inode_lock);
 
+       return NULL;
+}
+
+static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
+                                                       struct inode *inode)
+{
+       struct btrfs_delayed_node *node;
+       struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
+       struct btrfs_root *root = btrfs_inode->root;
+       u64 ino = btrfs_ino(inode);
+       int ret;
+
+again:
+       node = btrfs_get_delayed_node(inode);
+       if (node)
+               return node;
+
        node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
        if (!node)
                return ERR_PTR(-ENOMEM);
@@ -297,7 +313,6 @@ struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
                item->data_len = data_len;
                item->ins_or_del = 0;
                item->bytes_reserved = 0;
-               item->block_rsv = NULL;
                item->delayed_node = NULL;
                atomic_set(&item->refs, 1);
        }
@@ -549,19 +564,6 @@ struct btrfs_delayed_item *__btrfs_next_delayed_item(
        return next;
 }
 
-static inline struct btrfs_delayed_node *btrfs_get_delayed_node(
-                                                       struct inode *inode)
-{
-       struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
-       struct btrfs_delayed_node *delayed_node;
-
-       delayed_node = btrfs_inode->delayed_node;
-       if (delayed_node)
-               atomic_inc(&delayed_node->refs);
-
-       return delayed_node;
-}
-
 static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root,
                                                   u64 root_id)
 {
@@ -593,10 +595,8 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
 
        num_bytes = btrfs_calc_trans_metadata_size(root, 1);
        ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
-       if (!ret) {
+       if (!ret)
                item->bytes_reserved = num_bytes;
-               item->block_rsv = dst_rsv;
-       }
 
        return ret;
 }
@@ -604,10 +604,13 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
                                                struct btrfs_delayed_item *item)
 {
+       struct btrfs_block_rsv *rsv;
+
        if (!item->bytes_reserved)
                return;
 
-       btrfs_block_rsv_release(root, item->block_rsv,
+       rsv = &root->fs_info->global_block_rsv;
+       btrfs_block_rsv_release(root, rsv,
                                item->bytes_reserved);
 }
 
@@ -1014,6 +1017,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
        struct btrfs_delayed_root *delayed_root;
        struct btrfs_delayed_node *curr_node, *prev_node;
        struct btrfs_path *path;
+       struct btrfs_block_rsv *block_rsv;
        int ret = 0;
 
        path = btrfs_alloc_path();
@@ -1021,6 +1025,9 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
                return -ENOMEM;
        path->leave_spinning = 1;
 
+       block_rsv = trans->block_rsv;
+       trans->block_rsv = &root->fs_info->global_block_rsv;
+
        delayed_root = btrfs_get_delayed_root(root);
 
        curr_node = btrfs_first_delayed_node(delayed_root);
@@ -1045,6 +1052,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
        }
 
        btrfs_free_path(path);
+       trans->block_rsv = block_rsv;
        return ret;
 }
 
@@ -1052,6 +1060,7 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
                                              struct btrfs_delayed_node *node)
 {
        struct btrfs_path *path;
+       struct btrfs_block_rsv *block_rsv;
        int ret;
 
        path = btrfs_alloc_path();
@@ -1059,6 +1068,9 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
                return -ENOMEM;
        path->leave_spinning = 1;
 
+       block_rsv = trans->block_rsv;
+       trans->block_rsv = &node->root->fs_info->global_block_rsv;
+
        ret = btrfs_insert_delayed_items(trans, path, node->root, node);
        if (!ret)
                ret = btrfs_delete_delayed_items(trans, path, node->root, node);
@@ -1066,6 +1078,7 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
                ret = btrfs_update_delayed_inode(trans, node->root, path, node);
        btrfs_free_path(path);
 
+       trans->block_rsv = block_rsv;
        return ret;
 }
 
@@ -1116,6 +1129,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
        struct btrfs_path *path;
        struct btrfs_delayed_node *delayed_node = NULL;
        struct btrfs_root *root;
+       struct btrfs_block_rsv *block_rsv;
        unsigned long nr = 0;
        int need_requeue = 0;
        int ret;
@@ -1134,6 +1148,9 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
        if (IS_ERR(trans))
                goto free_path;
 
+       block_rsv = trans->block_rsv;
+       trans->block_rsv = &root->fs_info->global_block_rsv;
+
        ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
        if (!ret)
                ret = btrfs_delete_delayed_items(trans, path, root,
@@ -1176,6 +1193,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
 
        nr = trans->blocks_used;
 
+       trans->block_rsv = block_rsv;
        btrfs_end_transaction_dmeta(trans, root);
        __btrfs_btree_balance_dirty(root, nr);
 free_path:
@@ -1222,6 +1240,13 @@ again:
        return 0;
 }
 
+void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
+{
+       struct btrfs_delayed_root *delayed_root;
+       delayed_root = btrfs_get_delayed_root(root);
+       WARN_ON(btrfs_first_delayed_node(delayed_root));
+}
+
 void btrfs_balance_delayed_items(struct btrfs_root *root)
 {
        struct btrfs_delayed_root *delayed_root;
@@ -1382,8 +1407,7 @@ end:
 
 int btrfs_inode_delayed_dir_index_count(struct inode *inode)
 {
-       struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node;
-       int ret = 0;
+       struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
 
        if (!delayed_node)
                return -ENOENT;
@@ -1393,11 +1417,14 @@ int btrfs_inode_delayed_dir_index_count(struct inode *inode)
         * a new directory index is added into the delayed node and index_cnt
         * is updated now. So we needn't lock the delayed node.
         */
-       if (!delayed_node->index_cnt)
+       if (!delayed_node->index_cnt) {
+               btrfs_release_delayed_node(delayed_node);
                return -EINVAL;
+       }
 
        BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
-       return ret;
+       btrfs_release_delayed_node(delayed_node);
+       return 0;
 }
 
 void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
@@ -1591,6 +1618,57 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
                                      inode->i_ctime.tv_nsec);
 }
 
+int btrfs_fill_inode(struct inode *inode, u32 *rdev)
+{
+       struct btrfs_delayed_node *delayed_node;
+       struct btrfs_inode_item *inode_item;
+       struct btrfs_timespec *tspec;
+
+       delayed_node = btrfs_get_delayed_node(inode);
+       if (!delayed_node)
+               return -ENOENT;
+
+       mutex_lock(&delayed_node->mutex);
+       if (!delayed_node->inode_dirty) {
+               mutex_unlock(&delayed_node->mutex);
+               btrfs_release_delayed_node(delayed_node);
+               return -ENOENT;
+       }
+
+       inode_item = &delayed_node->inode_item;
+
+       inode->i_uid = btrfs_stack_inode_uid(inode_item);
+       inode->i_gid = btrfs_stack_inode_gid(inode_item);
+       btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
+       inode->i_mode = btrfs_stack_inode_mode(inode_item);
+       inode->i_nlink = btrfs_stack_inode_nlink(inode_item);
+       inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
+       BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
+       BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item);
+       inode->i_rdev = 0;
+       *rdev = btrfs_stack_inode_rdev(inode_item);
+       BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
+
+       tspec = btrfs_inode_atime(inode_item);
+       inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
+       inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
+
+       tspec = btrfs_inode_mtime(inode_item);
+       inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
+       inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
+
+       tspec = btrfs_inode_ctime(inode_item);
+       inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
+       inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
+
+       inode->i_generation = BTRFS_I(inode)->generation;
+       BTRFS_I(inode)->index_cnt = (u64)-1;
+
+       mutex_unlock(&delayed_node->mutex);
+       btrfs_release_delayed_node(delayed_node);
+       return 0;
+}
+
 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
                               struct btrfs_root *root, struct inode *inode)
 {
index eb7d240aa6485db61c8d13299519abc9b81d3cc0..8d27af4bd8b99f00f073d18125589fa39e2269fa 100644 (file)
@@ -75,7 +75,6 @@ struct btrfs_delayed_item {
        struct list_head tree_list;     /* used for batch insert/delete items */
        struct list_head readdir_list;  /* used for readdir items */
        u64 bytes_reserved;
-       struct btrfs_block_rsv *block_rsv;
        struct btrfs_delayed_node *delayed_node;
        atomic_t refs;
        int ins_or_del;
@@ -120,6 +119,7 @@ void btrfs_kill_delayed_inode_items(struct inode *inode);
 
 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
                               struct btrfs_root *root, struct inode *inode);
+int btrfs_fill_inode(struct inode *inode, u32 *rdev);
 
 /* Used for drop dead root */
 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
@@ -138,4 +138,8 @@ int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
 /* for init */
 int __init btrfs_delayed_inode_init(void);
 void btrfs_delayed_inode_exit(void);
+
+/* for debugging */
+void btrfs_assert_delayed_root_empty(struct btrfs_root *root);
+
 #endif
index 9f68c68986535fbdbd21f01abad64906e19fde68..1ac8db5dc0a31b9a742099956b121cd75ba0a1a6 100644 (file)
@@ -1044,7 +1044,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
        root->last_trans = 0;
        root->highest_objectid = 0;
        root->name = NULL;
-       root->in_sysfs = 0;
        root->inode_tree = RB_ROOT;
        INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
        root->block_rsv = NULL;
@@ -1300,19 +1299,21 @@ again:
                return root;
 
        root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
-       if (!root->free_ino_ctl)
-               goto fail;
        root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
                                        GFP_NOFS);
-       if (!root->free_ino_pinned)
+       if (!root->free_ino_pinned || !root->free_ino_ctl) {
+               ret = -ENOMEM;
                goto fail;
+       }
 
        btrfs_init_free_ino_ctl(root);
        mutex_init(&root->fs_commit_mutex);
        spin_lock_init(&root->cache_lock);
        init_waitqueue_head(&root->cache_wait);
 
-       set_anon_super(&root->anon_super, NULL);
+       ret = set_anon_super(&root->anon_super, NULL);
+       if (ret)
+               goto fail;
 
        if (btrfs_root_refs(&root->root_item) == 0) {
                ret = -ENOENT;
@@ -1618,6 +1619,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        spin_lock_init(&fs_info->fs_roots_radix_lock);
        spin_lock_init(&fs_info->delayed_iput_lock);
        spin_lock_init(&fs_info->defrag_inodes_lock);
+       mutex_init(&fs_info->reloc_mutex);
 
        init_completion(&fs_info->kobj_unregister);
        fs_info->tree_root = tree_root;
index b42efc2ded513ec10c38eb7ebcee9247d2f2c825..71cd456fdb60360d44edd113ab7c593bdacfa658 100644 (file)
@@ -3314,10 +3314,6 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
        if (reserved == 0)
                return 0;
 
-       /* nothing to shrink - nothing to reclaim */
-       if (root->fs_info->delalloc_bytes == 0)
-               return 0;
-
        max_reclaim = min(reserved, to_reclaim);
 
        while (loops < 1024) {
@@ -4846,7 +4842,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
                                     u64 num_bytes, u64 empty_size,
                                     u64 search_start, u64 search_end,
                                     u64 hint_byte, struct btrfs_key *ins,
-                                    int data)
+                                    u64 data)
 {
        int ret = 0;
        struct btrfs_root *root = orig_root->fs_info->extent_root;
@@ -4873,7 +4869,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
 
        space_info = __find_space_info(root->fs_info, data);
        if (!space_info) {
-               printk(KERN_ERR "No space info for %d\n", data);
+               printk(KERN_ERR "No space info for %llu\n", data);
                return -ENOSPC;
        }
 
index 9f985a429877fdd5d04100514f3ff4a5f2abeb18..bf0d61567f3d65a24e9d11802acdd0c8c5abf32b 100644 (file)
@@ -1893,9 +1893,12 @@ void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
 
        while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
                info = rb_entry(node, struct btrfs_free_space, offset_index);
-               unlink_free_space(ctl, info);
-               kfree(info->bitmap);
-               kmem_cache_free(btrfs_free_space_cachep, info);
+               if (!info->bitmap) {
+                       unlink_free_space(ctl, info);
+                       kmem_cache_free(btrfs_free_space_cachep, info);
+               } else {
+                       free_bitmap(ctl, info);
+               }
                if (need_resched()) {
                        spin_unlock(&ctl->tree_lock);
                        cond_resched();
index 751ddf8fc58a152fd442979cb758637cbcf01bc0..3601f0aebddf61931906f2cbe49d5559778e5e51 100644 (file)
@@ -2509,6 +2509,11 @@ static void btrfs_read_locked_inode(struct inode *inode)
        int maybe_acls;
        u32 rdev;
        int ret;
+       bool filled = false;
+
+       ret = btrfs_fill_inode(inode, &rdev);
+       if (!ret)
+               filled = true;
 
        path = btrfs_alloc_path();
        BUG_ON(!path);
@@ -2520,6 +2525,10 @@ static void btrfs_read_locked_inode(struct inode *inode)
                goto make_bad;
 
        leaf = path->nodes[0];
+
+       if (filled)
+               goto cache_acl;
+
        inode_item = btrfs_item_ptr(leaf, path->slots[0],
                                    struct btrfs_inode_item);
        if (!leaf->map_token)
@@ -2556,7 +2565,7 @@ static void btrfs_read_locked_inode(struct inode *inode)
 
        BTRFS_I(inode)->index_cnt = (u64)-1;
        BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
-
+cache_acl:
        /*
         * try to precache a NULL acl entry for files that don't have
         * any xattrs or acls
@@ -2572,7 +2581,6 @@ static void btrfs_read_locked_inode(struct inode *inode)
        }
 
        btrfs_free_path(path);
-       inode_item = NULL;
 
        switch (inode->i_mode & S_IFMT) {
        case S_IFREG:
@@ -2670,12 +2678,14 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
        int ret;
 
        /*
-        * If root is tree root, it means this inode is used to
-        * store free space information. And these inodes are updated
-        * when committing the transaction, so they needn't delaye to
-        * be updated, or deadlock will occured.
+        * If the inode is a free space inode, we can deadlock during commit
+        * if we put it into the delayed code.
+        *
+        * The data relocation inode should also be directly updated
+        * without delay
         */
-       if (!is_free_space_inode(root, inode)) {
+       if (!is_free_space_inode(root, inode)
+           && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
                ret = btrfs_delayed_update_inode(trans, root, inode);
                if (!ret)
                        btrfs_set_inode_last_trans(trans, inode);
@@ -3076,6 +3086,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
        ret = btrfs_update_inode(trans, root, dir);
        BUG_ON(ret);
 
+       btrfs_free_path(path);
        return 0;
 }
 
@@ -4519,6 +4530,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
        inode_tree_add(inode);
 
        trace_btrfs_inode_new(inode);
+       btrfs_set_inode_last_trans(trans, inode);
 
        return inode;
 fail:
index b793d112d1f65c80b95e06be70233cd5f5aa2654..a3c4751e07db0d7704e8ac1aa3269d7c75e3c48a 100644 (file)
@@ -482,8 +482,10 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
        ret = btrfs_snap_reserve_metadata(trans, pending_snapshot);
        BUG_ON(ret);
 
+       spin_lock(&root->fs_info->trans_lock);
        list_add(&pending_snapshot->list,
                 &trans->transaction->pending_snapshots);
+       spin_unlock(&root->fs_info->trans_lock);
        if (async_transid) {
                *async_transid = trans->transid;
                ret = btrfs_commit_transaction_async(trans,
index b1ef27cc673b8abc9e135b05e0da24ff7a0c7bb2..5e0a3dc79a453f3930e9c749c1cf08c63e5c7c6a 100644 (file)
@@ -1368,7 +1368,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
        int ret;
 
        if (!root->reloc_root)
-               return 0;
+               goto out;
 
        reloc_root = root->reloc_root;
        root_item = &reloc_root->root_item;
@@ -1390,6 +1390,8 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
        ret = btrfs_update_root(trans, root->fs_info->tree_root,
                                &reloc_root->root_key, root_item);
        BUG_ON(ret);
+
+out:
        return 0;
 }
 
@@ -2142,10 +2144,11 @@ int prepare_to_merge(struct reloc_control *rc, int err)
        u64 num_bytes = 0;
        int ret;
 
-       spin_lock(&root->fs_info->trans_lock);
+       mutex_lock(&root->fs_info->reloc_mutex);
        rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
        rc->merging_rsv_size += rc->nodes_relocated * 2;
-       spin_unlock(&root->fs_info->trans_lock);
+       mutex_unlock(&root->fs_info->reloc_mutex);
+
 again:
        if (!err) {
                num_bytes = rc->merging_rsv_size;
@@ -2214,9 +2217,16 @@ int merge_reloc_roots(struct reloc_control *rc)
        int ret;
 again:
        root = rc->extent_root;
-       spin_lock(&root->fs_info->trans_lock);
+
+       /*
+        * this serializes us with btrfs_record_root_in_transaction,
+        * we have to make sure nobody is in the middle of
+        * adding their roots to the list while we are
+        * doing this splice
+        */
+       mutex_lock(&root->fs_info->reloc_mutex);
        list_splice_init(&rc->reloc_roots, &reloc_roots);
-       spin_unlock(&root->fs_info->trans_lock);
+       mutex_unlock(&root->fs_info->reloc_mutex);
 
        while (!list_empty(&reloc_roots)) {
                found = 1;
@@ -3590,17 +3600,19 @@ next:
 static void set_reloc_control(struct reloc_control *rc)
 {
        struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
-       spin_lock(&fs_info->trans_lock);
+
+       mutex_lock(&fs_info->reloc_mutex);
        fs_info->reloc_ctl = rc;
-       spin_unlock(&fs_info->trans_lock);
+       mutex_unlock(&fs_info->reloc_mutex);
 }
 
 static void unset_reloc_control(struct reloc_control *rc)
 {
        struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
-       spin_lock(&fs_info->trans_lock);
+
+       mutex_lock(&fs_info->reloc_mutex);
        fs_info->reloc_ctl = NULL;
-       spin_unlock(&fs_info->trans_lock);
+       mutex_unlock(&fs_info->reloc_mutex);
 }
 
 static int check_extent_flags(u64 flags)
index 0bb4ebbb71b7b0bf6861b7a5efde6f5cf2f5b97e..15634d4648d719922bc36313657c31b60066973c 100644 (file)
@@ -723,6 +723,12 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
                seq_puts(seq, ",clear_cache");
        if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
                seq_puts(seq, ",user_subvol_rm_allowed");
+       if (btrfs_test_opt(root, ENOSPC_DEBUG))
+               seq_puts(seq, ",enospc_debug");
+       if (btrfs_test_opt(root, AUTO_DEFRAG))
+               seq_puts(seq, ",autodefrag");
+       if (btrfs_test_opt(root, INODE_MAP_CACHE))
+               seq_puts(seq, ",inode_cache");
        return 0;
 }
 
index c3c223ae66918d9e244fc909c4d6b7836490ef95..daac9ae6d7319b4f2cc68c0893a86c7d779e6516 100644 (file)
 #include "disk-io.h"
 #include "transaction.h"
 
-static ssize_t root_blocks_used_show(struct btrfs_root *root, char *buf)
-{
-       return snprintf(buf, PAGE_SIZE, "%llu\n",
-               (unsigned long long)btrfs_root_used(&root->root_item));
-}
-
-static ssize_t root_block_limit_show(struct btrfs_root *root, char *buf)
-{
-       return snprintf(buf, PAGE_SIZE, "%llu\n",
-               (unsigned long long)btrfs_root_limit(&root->root_item));
-}
-
-static ssize_t super_blocks_used_show(struct btrfs_fs_info *fs, char *buf)
-{
-
-       return snprintf(buf, PAGE_SIZE, "%llu\n",
-               (unsigned long long)btrfs_super_bytes_used(&fs->super_copy));
-}
-
-static ssize_t super_total_blocks_show(struct btrfs_fs_info *fs, char *buf)
-{
-       return snprintf(buf, PAGE_SIZE, "%llu\n",
-               (unsigned long long)btrfs_super_total_bytes(&fs->super_copy));
-}
-
-static ssize_t super_blocksize_show(struct btrfs_fs_info *fs, char *buf)
-{
-       return snprintf(buf, PAGE_SIZE, "%llu\n",
-               (unsigned long long)btrfs_super_sectorsize(&fs->super_copy));
-}
-
-/* this is for root attrs (subvols/snapshots) */
-struct btrfs_root_attr {
-       struct attribute attr;
-       ssize_t (*show)(struct btrfs_root *, char *);
-       ssize_t (*store)(struct btrfs_root *, const char *, size_t);
-};
-
-#define ROOT_ATTR(name, mode, show, store) \
-static struct btrfs_root_attr btrfs_root_attr_##name = __ATTR(name, mode, \
-                                                             show, store)
-
-ROOT_ATTR(blocks_used, 0444,   root_blocks_used_show,  NULL);
-ROOT_ATTR(block_limit, 0644,   root_block_limit_show,  NULL);
-
-static struct attribute *btrfs_root_attrs[] = {
-       &btrfs_root_attr_blocks_used.attr,
-       &btrfs_root_attr_block_limit.attr,
-       NULL,
-};
-
-/* this is for super attrs (actual full fs) */
-struct btrfs_super_attr {
-       struct attribute attr;
-       ssize_t (*show)(struct btrfs_fs_info *, char *);
-       ssize_t (*store)(struct btrfs_fs_info *, const char *, size_t);
-};
-
-#define SUPER_ATTR(name, mode, show, store) \
-static struct btrfs_super_attr btrfs_super_attr_##name = __ATTR(name, mode, \
-                                                               show, store)
-
-SUPER_ATTR(blocks_used,                0444,   super_blocks_used_show,         NULL);
-SUPER_ATTR(total_blocks,       0444,   super_total_blocks_show,        NULL);
-SUPER_ATTR(blocksize,          0444,   super_blocksize_show,           NULL);
-
-static struct attribute *btrfs_super_attrs[] = {
-       &btrfs_super_attr_blocks_used.attr,
-       &btrfs_super_attr_total_blocks.attr,
-       &btrfs_super_attr_blocksize.attr,
-       NULL,
-};
-
-static ssize_t btrfs_super_attr_show(struct kobject *kobj,
-                                   struct attribute *attr, char *buf)
-{
-       struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
-                                               super_kobj);
-       struct btrfs_super_attr *a = container_of(attr,
-                                                 struct btrfs_super_attr,
-                                                 attr);
-
-       return a->show ? a->show(fs, buf) : 0;
-}
-
-static ssize_t btrfs_super_attr_store(struct kobject *kobj,
-                                    struct attribute *attr,
-                                    const char *buf, size_t len)
-{
-       struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
-                                               super_kobj);
-       struct btrfs_super_attr *a = container_of(attr,
-                                                 struct btrfs_super_attr,
-                                                 attr);
-
-       return a->store ? a->store(fs, buf, len) : 0;
-}
-
-static ssize_t btrfs_root_attr_show(struct kobject *kobj,
-                                   struct attribute *attr, char *buf)
-{
-       struct btrfs_root *root = container_of(kobj, struct btrfs_root,
-                                               root_kobj);
-       struct btrfs_root_attr *a = container_of(attr,
-                                                struct btrfs_root_attr,
-                                                attr);
-
-       return a->show ? a->show(root, buf) : 0;
-}
-
-static ssize_t btrfs_root_attr_store(struct kobject *kobj,
-                                    struct attribute *attr,
-                                    const char *buf, size_t len)
-{
-       struct btrfs_root *root = container_of(kobj, struct btrfs_root,
-                                               root_kobj);
-       struct btrfs_root_attr *a = container_of(attr,
-                                                struct btrfs_root_attr,
-                                                attr);
-       return a->store ? a->store(root, buf, len) : 0;
-}
-
-static void btrfs_super_release(struct kobject *kobj)
-{
-       struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
-                                               super_kobj);
-       complete(&fs->kobj_unregister);
-}
-
-static void btrfs_root_release(struct kobject *kobj)
-{
-       struct btrfs_root *root = container_of(kobj, struct btrfs_root,
-                                               root_kobj);
-       complete(&root->kobj_unregister);
-}
-
-static const struct sysfs_ops btrfs_super_attr_ops = {
-       .show   = btrfs_super_attr_show,
-       .store  = btrfs_super_attr_store,
-};
-
-static const struct sysfs_ops btrfs_root_attr_ops = {
-       .show   = btrfs_root_attr_show,
-       .store  = btrfs_root_attr_store,
-};
-
 /* /sys/fs/btrfs/ entry */
 static struct kset *btrfs_kset;
 
index 2b3590b9fe98a6107efc9a7b13ce053e499c049e..51dcec86757f071654bc3866123e65157ef0286b 100644 (file)
@@ -126,28 +126,85 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail)
  * to make sure the old root from before we joined the transaction is deleted
  * when the transaction commits
  */
-int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
+static int record_root_in_trans(struct btrfs_trans_handle *trans,
                               struct btrfs_root *root)
 {
        if (root->ref_cows && root->last_trans < trans->transid) {
                WARN_ON(root == root->fs_info->extent_root);
                WARN_ON(root->commit_root != root->node);
 
+               /*
+                * see below for in_trans_setup usage rules
+                * we have the reloc mutex held now, so there
+                * is only one writer in this function
+                */
+               root->in_trans_setup = 1;
+
+               /* make sure readers find in_trans_setup before
+                * they find our root->last_trans update
+                */
+               smp_wmb();
+
                spin_lock(&root->fs_info->fs_roots_radix_lock);
                if (root->last_trans == trans->transid) {
                        spin_unlock(&root->fs_info->fs_roots_radix_lock);
                        return 0;
                }
-               root->last_trans = trans->transid;
                radix_tree_tag_set(&root->fs_info->fs_roots_radix,
                           (unsigned long)root->root_key.objectid,
                           BTRFS_ROOT_TRANS_TAG);
                spin_unlock(&root->fs_info->fs_roots_radix_lock);
+               root->last_trans = trans->transid;
+
+               /* this is pretty tricky.  We don't want to
+                * take the relocation lock in btrfs_record_root_in_trans
+                * unless we're really doing the first setup for this root in
+                * this transaction.
+                *
+                * Normally we'd use root->last_trans as a flag to decide
+                * if we want to take the expensive mutex.
+                *
+                * But, we have to set root->last_trans before we
+                * init the relocation root, otherwise, we trip over warnings
+                * in ctree.c.  The solution used here is to flag ourselves
+                * with root->in_trans_setup.  When this is 1, we're still
+                * fixing up the reloc trees and everyone must wait.
+                *
+                * When this is zero, they can trust root->last_trans and fly
+                * through btrfs_record_root_in_trans without having to take the
+                * lock.  smp_wmb() makes sure that all the writes above are
+                * done before we pop in the zero below
+                */
                btrfs_init_reloc_root(trans, root);
+               smp_wmb();
+               root->in_trans_setup = 0;
        }
        return 0;
 }
 
+
+int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
+                              struct btrfs_root *root)
+{
+       if (!root->ref_cows)
+               return 0;
+
+       /*
+        * see record_root_in_trans for comments about in_trans_setup usage
+        * and barriers
+        */
+       smp_rmb();
+       if (root->last_trans == trans->transid &&
+           !root->in_trans_setup)
+               return 0;
+
+       mutex_lock(&root->fs_info->reloc_mutex);
+       record_root_in_trans(trans, root);
+       mutex_unlock(&root->fs_info->reloc_mutex);
+
+       return 0;
+}
+
 /* wait for commit against the current transaction to become unblocked
  * when this is done, it is safe to start a new transaction, but the current
  * transaction might not be fully on disk.
@@ -882,7 +939,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        parent = dget_parent(dentry);
        parent_inode = parent->d_inode;
        parent_root = BTRFS_I(parent_inode)->root;
-       btrfs_record_root_in_trans(trans, parent_root);
+       record_root_in_trans(trans, parent_root);
 
        /*
         * insert the directory item
@@ -900,7 +957,16 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        ret = btrfs_update_inode(trans, parent_root, parent_inode);
        BUG_ON(ret);
 
-       btrfs_record_root_in_trans(trans, root);
+       /*
+        * pull in the delayed directory update
+        * and the delayed inode item
+        * otherwise we corrupt the FS during
+        * snapshot
+        */
+       ret = btrfs_run_delayed_items(trans, root);
+       BUG_ON(ret);
+
+       record_root_in_trans(trans, root);
        btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
        memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
        btrfs_check_and_init_root_item(new_root_item);
@@ -961,14 +1027,6 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
        int ret;
 
        list_for_each_entry(pending, head, list) {
-               /*
-                * We must deal with the delayed items before creating
-                * snapshots, or we will create a snapthot with inconsistent
-                * information.
-               */
-               ret = btrfs_run_delayed_items(trans, fs_info->fs_root);
-               BUG_ON(ret);
-
                ret = create_pending_snapshot(trans, fs_info, pending);
                BUG_ON(ret);
        }
@@ -1241,21 +1299,42 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                        schedule_timeout(1);
 
                finish_wait(&cur_trans->writer_wait, &wait);
-               spin_lock(&root->fs_info->trans_lock);
-               root->fs_info->trans_no_join = 1;
-               spin_unlock(&root->fs_info->trans_lock);
        } while (atomic_read(&cur_trans->num_writers) > 1 ||
                 (should_grow && cur_trans->num_joined != joined));
 
-       ret = create_pending_snapshots(trans, root->fs_info);
-       BUG_ON(ret);
+       /*
+        * Ok now we need to make sure to block out any other joins while we
+        * commit the transaction.  We could have started a join before setting
+        * no_join so make sure to wait for num_writers to == 1 again.
+        */
+       spin_lock(&root->fs_info->trans_lock);
+       root->fs_info->trans_no_join = 1;
+       spin_unlock(&root->fs_info->trans_lock);
+       wait_event(cur_trans->writer_wait,
+                  atomic_read(&cur_trans->num_writers) == 1);
+
+       /*
+        * the reloc mutex makes sure that we stop
+        * the balancing code from coming in and moving
+        * extents around in the middle of the commit
+        */
+       mutex_lock(&root->fs_info->reloc_mutex);
 
        ret = btrfs_run_delayed_items(trans, root);
        BUG_ON(ret);
 
+       ret = create_pending_snapshots(trans, root->fs_info);
+       BUG_ON(ret);
+
        ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
        BUG_ON(ret);
 
+       /*
+        * make sure none of the code above managed to slip in a
+        * delayed item
+        */
+       btrfs_assert_delayed_root_empty(root);
+
        WARN_ON(cur_trans != trans->transaction);
 
        btrfs_scrub_pause(root);
@@ -1312,6 +1391,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
        root->fs_info->running_transaction = NULL;
        root->fs_info->trans_no_join = 0;
        spin_unlock(&root->fs_info->trans_lock);
+       mutex_unlock(&root->fs_info->reloc_mutex);
 
        wake_up(&root->fs_info->transaction_wait);
 
index 592396c6dc47465bccf4422d6259d0dfe2885e0c..4ce8a9f41d1ec3916753bd610183a82b4dc6aa21 100644 (file)
@@ -3177,7 +3177,7 @@ again:
                tmp_key.offset = (u64)-1;
 
                wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
-               BUG_ON(!wc.replay_dest);
+               BUG_ON(IS_ERR_OR_NULL(wc.replay_dest));
 
                wc.replay_dest->log_root = log;
                btrfs_record_root_in_trans(trans, wc.replay_dest);
index 1efa56e18f9b905ceac4dbcb3faa0221ab331ce2..19450bc536327c77f7add37e723b668da410eedd 100644 (file)
@@ -2098,7 +2098,8 @@ int btrfs_balance(struct btrfs_root *dev_root)
                                           chunk_root->root_key.objectid,
                                           found_key.objectid,
                                           found_key.offset);
-               BUG_ON(ret && ret != -ENOSPC);
+               if (ret && ret != -ENOSPC)
+                       goto error;
                key.offset = found_key.offset - 1;
        }
        ret = 0;
index 9542f07d0b9306774e7172afed25b6f809503c06..4698a5c553dc010fa0aedabe3f3f4e0fb68b65c8 100644 (file)
@@ -290,7 +290,6 @@ static int striped_read(struct inode *inode,
        struct ceph_inode_info *ci = ceph_inode(inode);
        u64 pos, this_len;
        int io_align, page_align;
-       int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */
        int left, pages_left;
        int read;
        struct page **page_pos;
@@ -326,12 +325,11 @@ more:
             ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
 
        if (ret > 0) {
-               int didpages =
-                       ((pos & ~PAGE_CACHE_MASK) + ret) >> PAGE_CACHE_SHIFT;
+               int didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
 
                if (read < pos - off) {
                        dout(" zero gap %llu to %llu\n", off + read, pos);
-                       ceph_zero_page_vector_range(page_off + read,
+                       ceph_zero_page_vector_range(page_align + read,
                                                    pos - off - read, pages);
                }
                pos += ret;
@@ -356,7 +354,7 @@ more:
                                left = inode->i_size - pos;
 
                        dout("zero tail %d\n", left);
-                       ceph_zero_page_vector_range(page_off + read, left,
+                       ceph_zero_page_vector_range(page_align + read, left,
                                                    pages);
                        read += left;
                }
@@ -478,9 +476,6 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data,
        else
                pos = *offset;
 
-       io_align = pos & ~PAGE_MASK;
-       buf_align = (unsigned long)data & ~PAGE_MASK;
-
        ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left);
        if (ret < 0)
                return ret;
@@ -504,6 +499,8 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data,
         * boundary.  this isn't atomic, unfortunately.  :(
         */
 more:
+       io_align = pos & ~PAGE_MASK;
+       buf_align = (unsigned long)data & ~PAGE_MASK;
        len = left;
        if (file->f_flags & O_DIRECT) {
                /* write from beginning of first page, regardless of
@@ -593,6 +590,7 @@ out:
                pos += len;
                written += len;
                left -= len;
+               data += written;
                if (left)
                        goto more;
 
index 79743d146be69ec8ce3f279a032e1a5459eb462c..0c1d91756528969d409b7f4480b1653fc508fd5c 100644 (file)
@@ -1438,12 +1438,15 @@ char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
        struct dentry *temp;
        char *path;
        int len, pos;
+       unsigned seq;
 
        if (dentry == NULL)
                return ERR_PTR(-EINVAL);
 
 retry:
        len = 0;
+       seq = read_seqbegin(&rename_lock);
+       rcu_read_lock();
        for (temp = dentry; !IS_ROOT(temp);) {
                struct inode *inode = temp->d_inode;
                if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
@@ -1455,10 +1458,12 @@ retry:
                        len += 1 + temp->d_name.len;
                temp = temp->d_parent;
                if (temp == NULL) {
+                       rcu_read_unlock();
                        pr_err("build_path corrupt dentry %p\n", dentry);
                        return ERR_PTR(-EINVAL);
                }
        }
+       rcu_read_unlock();
        if (len)
                len--;  /* no leading '/' */
 
@@ -1467,9 +1472,12 @@ retry:
                return ERR_PTR(-ENOMEM);
        pos = len;
        path[pos] = 0;  /* trailing null */
+       rcu_read_lock();
        for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
-               struct inode *inode = temp->d_inode;
+               struct inode *inode;
 
+               spin_lock(&temp->d_lock);
+               inode = temp->d_inode;
                if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
                        dout("build_path path+%d: %p SNAPDIR\n",
                             pos, temp);
@@ -1478,21 +1486,26 @@ retry:
                        break;
                } else {
                        pos -= temp->d_name.len;
-                       if (pos < 0)
+                       if (pos < 0) {
+                               spin_unlock(&temp->d_lock);
                                break;
+                       }
                        strncpy(path + pos, temp->d_name.name,
                                temp->d_name.len);
                }
+               spin_unlock(&temp->d_lock);
                if (pos)
                        path[--pos] = '/';
                temp = temp->d_parent;
                if (temp == NULL) {
+                       rcu_read_unlock();
                        pr_err("build_path corrupt dentry\n");
                        kfree(path);
                        return ERR_PTR(-EINVAL);
                }
        }
-       if (pos != 0) {
+       rcu_read_unlock();
+       if (pos != 0 || read_seqretry(&rename_lock, seq)) {
                pr_err("build_path did not end path lookup where "
                       "expected, namelen is %d, pos is %d\n", len, pos);
                /* presumably this is only possible if racing with a
index 53ed1ad2c112808ea27c92dce5fff5ebefad8651..f66cc1625150839244870bb6bbb3d27893b7ec82 100644 (file)
@@ -156,6 +156,6 @@ config CIFS_ACL
 
 config CIFS_NFSD_EXPORT
          bool "Allow nfsd to export CIFS file system (EXPERIMENTAL)"
-         depends on CIFS && EXPERIMENTAL
+         depends on CIFS && EXPERIMENTAL && BROKEN
          help
           Allows NFS server to export a CIFS mounted share (nfsd over cifs)
index ffb1459dc6ecf04f2da417a85c734374177f45ce..7260e11e21f8429304b519b2ec0f5aeb477ce0b9 100644 (file)
@@ -42,6 +42,7 @@
 #define CIFS_MOUNT_MULTIUSER   0x20000 /* multiuser mount */
 #define CIFS_MOUNT_STRICT_IO   0x40000 /* strict cache mode */
 #define CIFS_MOUNT_RWPIDFORWARD        0x80000 /* use pid forwarding for rw */
+#define CIFS_MOUNT_POSIXACL    0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */
 
 struct cifs_sb_info {
        struct rb_root tlink_tree;
index e9def996e3835610c6f8cc94dfd1a6579f2a4c39..bc4b12ca537bfaf1a6556cf0b1df5a4bc8af25ac 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/delay.h>
 #include <linux/kthread.h>
 #include <linux/freezer.h>
+#include <linux/namei.h>
 #include <net/ipv6.h>
 #include "cifsfs.h"
 #include "cifspdu.h"
@@ -104,8 +105,7 @@ cifs_sb_deactive(struct super_block *sb)
 }
 
 static int
-cifs_read_super(struct super_block *sb, struct smb_vol *volume_info,
-               const char *devname, int silent)
+cifs_read_super(struct super_block *sb)
 {
        struct inode *inode;
        struct cifs_sb_info *cifs_sb;
@@ -113,22 +113,16 @@ cifs_read_super(struct super_block *sb, struct smb_vol *volume_info,
 
        cifs_sb = CIFS_SB(sb);
 
-       spin_lock_init(&cifs_sb->tlink_tree_lock);
-       cifs_sb->tlink_tree = RB_ROOT;
+       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
+               sb->s_flags |= MS_POSIXACL;
 
-       rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
-       if (rc)
-               return rc;
-
-       cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
+       if (cifs_sb_master_tcon(cifs_sb)->ses->capabilities & CAP_LARGE_FILES)
+               sb->s_maxbytes = MAX_LFS_FILESIZE;
+       else
+               sb->s_maxbytes = MAX_NON_LFS;
 
-       rc = cifs_mount(sb, cifs_sb, volume_info, devname);
-
-       if (rc) {
-               if (!silent)
-                       cERROR(1, "cifs_mount failed w/return code = %d", rc);
-               goto out_mount_failed;
-       }
+       /* BB FIXME fix time_gran to be larger for LANMAN sessions */
+       sb->s_time_gran = 100;
 
        sb->s_magic = CIFS_MAGIC_NUMBER;
        sb->s_op = &cifs_super_ops;
@@ -170,37 +164,14 @@ out_no_root:
        if (inode)
                iput(inode);
 
-       cifs_umount(sb, cifs_sb);
-
-out_mount_failed:
-       bdi_destroy(&cifs_sb->bdi);
        return rc;
 }
 
-static void
-cifs_put_super(struct super_block *sb)
+static void cifs_kill_sb(struct super_block *sb)
 {
-       int rc = 0;
-       struct cifs_sb_info *cifs_sb;
-
-       cFYI(1, "In cifs_put_super");
-       cifs_sb = CIFS_SB(sb);
-       if (cifs_sb == NULL) {
-               cFYI(1, "Empty cifs superblock info passed to unmount");
-               return;
-       }
-
-       rc = cifs_umount(sb, cifs_sb);
-       if (rc)
-               cERROR(1, "cifs_umount failed with return code %d", rc);
-       if (cifs_sb->mountdata) {
-               kfree(cifs_sb->mountdata);
-               cifs_sb->mountdata = NULL;
-       }
-
-       unload_nls(cifs_sb->local_nls);
-       bdi_destroy(&cifs_sb->bdi);
-       kfree(cifs_sb);
+       struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+       kill_anon_super(sb);
+       cifs_umount(cifs_sb);
 }
 
 static int
@@ -257,9 +228,6 @@ static int cifs_permission(struct inode *inode, int mask, unsigned int flags)
 {
        struct cifs_sb_info *cifs_sb;
 
-       if (flags & IPERM_FLAG_RCU)
-               return -ECHILD;
-
        cifs_sb = CIFS_SB(inode->i_sb);
 
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
@@ -551,7 +519,6 @@ static int cifs_drop_inode(struct inode *inode)
 }
 
 static const struct super_operations cifs_super_ops = {
-       .put_super = cifs_put_super,
        .statfs = cifs_statfs,
        .alloc_inode = cifs_alloc_inode,
        .destroy_inode = cifs_destroy_inode,
@@ -576,91 +543,55 @@ static const struct super_operations cifs_super_ops = {
 static struct dentry *
 cifs_get_root(struct smb_vol *vol, struct super_block *sb)
 {
-       int xid, rc;
-       struct inode *inode;
-       struct qstr name;
-       struct dentry *dparent = NULL, *dchild = NULL, *alias;
+       struct dentry *dentry;
        struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
-       unsigned int i, full_len, len;
-       char *full_path = NULL, *pstart;
+       char *full_path = NULL;
+       char *s, *p;
        char sep;
+       int xid;
 
        full_path = cifs_build_path_to_root(vol, cifs_sb,
                                            cifs_sb_master_tcon(cifs_sb));
        if (full_path == NULL)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        cFYI(1, "Get root dentry for %s", full_path);
 
        xid = GetXid();
        sep = CIFS_DIR_SEP(cifs_sb);
-       dparent = dget(sb->s_root);
-       full_len = strlen(full_path);
-       full_path[full_len] = sep;
-       pstart = full_path + 1;
-
-       for (i = 1, len = 0; i <= full_len; i++) {
-               if (full_path[i] != sep || !len) {
-                       len++;
-                       continue;
-               }
-
-               full_path[i] = 0;
-               cFYI(1, "get dentry for %s", pstart);
-
-               name.name = pstart;
-               name.len = len;
-               name.hash = full_name_hash(pstart, len);
-               dchild = d_lookup(dparent, &name);
-               if (dchild == NULL) {
-                       cFYI(1, "not exists");
-                       dchild = d_alloc(dparent, &name);
-                       if (dchild == NULL) {
-                               dput(dparent);
-                               dparent = NULL;
-                               goto out;
-                       }
-               }
-
-               cFYI(1, "get inode");
-               if (dchild->d_inode == NULL) {
-                       cFYI(1, "not exists");
-                       inode = NULL;
-                       if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
-                               rc = cifs_get_inode_info_unix(&inode, full_path,
-                                                             sb, xid);
-                       else
-                               rc = cifs_get_inode_info(&inode, full_path,
-                                                        NULL, sb, xid, NULL);
-                       if (rc) {
-                               dput(dchild);
-                               dput(dparent);
-                               dparent = NULL;
-                               goto out;
-                       }
-                       alias = d_materialise_unique(dchild, inode);
-                       if (alias != NULL) {
-                               dput(dchild);
-                               if (IS_ERR(alias)) {
-                                       dput(dparent);
-                                       dparent = NULL;
-                                       goto out;
-                               }
-                               dchild = alias;
-                       }
-               }
-               cFYI(1, "parent %p, child %p", dparent, dchild);
-
-               dput(dparent);
-               dparent = dchild;
-               len = 0;
-               pstart = full_path + i + 1;
-               full_path[i] = sep;
-       }
-out:
+       dentry = dget(sb->s_root);
+       p = s = full_path;
+
+       do {
+               struct inode *dir = dentry->d_inode;
+               struct dentry *child;
+
+               /* skip separators */
+               while (*s == sep)
+                       s++;
+               if (!*s)
+                       break;
+               p = s++;
+               /* next separator */
+               while (*s && *s != sep)
+                       s++;
+
+               mutex_lock(&dir->i_mutex);
+               child = lookup_one_len(p, dentry, s - p);
+               mutex_unlock(&dir->i_mutex);
+               dput(dentry);
+               dentry = child;
+       } while (!IS_ERR(dentry));
        _FreeXid(xid);
        kfree(full_path);
-       return dparent;
+       return dentry;
+}
+
+static int cifs_set_super(struct super_block *sb, void *data)
+{
+       struct cifs_mnt_data *mnt_data = data;
+       sb->s_fs_info = mnt_data->cifs_sb;
+       return set_anon_super(sb, NULL);
 }
 
 static struct dentry *
@@ -676,82 +607,80 @@ cifs_do_mount(struct file_system_type *fs_type,
 
        cFYI(1, "Devname: %s flags: %d ", dev_name, flags);
 
-       rc = cifs_setup_volume_info(&volume_info, (char *)data, dev_name);
-       if (rc)
-               return ERR_PTR(rc);
+       volume_info = cifs_get_volume_info((char *)data, dev_name);
+       if (IS_ERR(volume_info))
+               return ERR_CAST(volume_info);
 
        cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
        if (cifs_sb == NULL) {
                root = ERR_PTR(-ENOMEM);
-               goto out;
+               goto out_nls;
+       }
+
+       cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
+       if (cifs_sb->mountdata == NULL) {
+               root = ERR_PTR(-ENOMEM);
+               goto out_cifs_sb;
        }
 
        cifs_setup_cifs_sb(volume_info, cifs_sb);
 
+       rc = cifs_mount(cifs_sb, volume_info);
+       if (rc) {
+               if (!(flags & MS_SILENT))
+                       cERROR(1, "cifs_mount failed w/return code = %d", rc);
+               root = ERR_PTR(rc);
+               goto out_mountdata;
+       }
+
        mnt_data.vol = volume_info;
        mnt_data.cifs_sb = cifs_sb;
        mnt_data.flags = flags;
 
-       sb = sget(fs_type, cifs_match_super, set_anon_super, &mnt_data);
+       sb = sget(fs_type, cifs_match_super, cifs_set_super, &mnt_data);
        if (IS_ERR(sb)) {
                root = ERR_CAST(sb);
-               goto out_cifs_sb;
+               cifs_umount(cifs_sb);
+               goto out;
        }
 
-       if (sb->s_fs_info) {
+       if (sb->s_root) {
                cFYI(1, "Use existing superblock");
-               goto out_shared;
-       }
-
-       /*
-        * Copy mount params for use in submounts. Better to do
-        * the copy here and deal with the error before cleanup gets
-        * complicated post-mount.
-        */
-       cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
-       if (cifs_sb->mountdata == NULL) {
-               root = ERR_PTR(-ENOMEM);
-               goto out_super;
-       }
-
-       sb->s_flags = flags;
-       /* BB should we make this contingent on mount parm? */
-       sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
-       sb->s_fs_info = cifs_sb;
+               cifs_umount(cifs_sb);
+       } else {
+               sb->s_flags = flags;
+               /* BB should we make this contingent on mount parm? */
+               sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
+
+               rc = cifs_read_super(sb);
+               if (rc) {
+                       root = ERR_PTR(rc);
+                       goto out_super;
+               }
 
-       rc = cifs_read_super(sb, volume_info, dev_name,
-                            flags & MS_SILENT ? 1 : 0);
-       if (rc) {
-               root = ERR_PTR(rc);
-               goto out_super;
+               sb->s_flags |= MS_ACTIVE;
        }
 
-       sb->s_flags |= MS_ACTIVE;
-
        root = cifs_get_root(volume_info, sb);
-       if (root == NULL)
+       if (IS_ERR(root))
                goto out_super;
 
        cFYI(1, "dentry root is: %p", root);
        goto out;
 
-out_shared:
-       root = cifs_get_root(volume_info, sb);
-       if (root)
-               cFYI(1, "dentry root is: %p", root);
-       goto out;
-
 out_super:
-       kfree(cifs_sb->mountdata);
        deactivate_locked_super(sb);
+out:
+       cifs_cleanup_volume_info(volume_info);
+       return root;
 
+out_mountdata:
+       kfree(cifs_sb->mountdata);
 out_cifs_sb:
-       unload_nls(cifs_sb->local_nls);
        kfree(cifs_sb);
-
-out:
-       cifs_cleanup_volume_info(&volume_info);
-       return root;
+out_nls:
+       unload_nls(volume_info->local_nls);
+       goto out;
 }
 
 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
@@ -840,7 +769,7 @@ struct file_system_type cifs_fs_type = {
        .owner = THIS_MODULE,
        .name = "cifs",
        .mount = cifs_do_mount,
-       .kill_sb = kill_anon_super,
+       .kill_sb = cifs_kill_sb,
        /*  .fs_flags */
 };
 const struct inode_operations cifs_dir_inode_ops = {
index 0900e1658c967de0fc2f4d70cc7645c9363a5d30..036ca83e5f461c2ff3e807ded5b7b797e5b6f836 100644 (file)
@@ -129,5 +129,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 extern const struct export_operations cifs_export_ops;
 #endif /* CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "1.73"
+#define CIFS_VERSION   "1.74"
 #endif                         /* _CIFSFS_H */
index 953f84413c771b1cb0e3bb404a4349f7d90b08fa..8df28e925e5b180240ad35ce2b304dec18ed5188 100644 (file)
@@ -154,12 +154,11 @@ extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
 extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
                               struct cifs_sb_info *cifs_sb);
 extern int cifs_match_super(struct super_block *, void *);
-extern void cifs_cleanup_volume_info(struct smb_vol **pvolume_info);
-extern int cifs_setup_volume_info(struct smb_vol **pvolume_info,
-                                 char *mount_data, const char *devname);
-extern int cifs_mount(struct super_block *, struct cifs_sb_info *,
-                     struct smb_vol *, const char *);
-extern int cifs_umount(struct super_block *, struct cifs_sb_info *);
+extern void cifs_cleanup_volume_info(struct smb_vol *pvolume_info);
+extern struct smb_vol *cifs_get_volume_info(char *mount_data,
+                                           const char *devname);
+extern int cifs_mount(struct cifs_sb_info *, struct smb_vol *);
+extern void cifs_umount(struct cifs_sb_info *);
 extern void cifs_dfs_release_automount_timer(void);
 void cifs_proc_init(void);
 void cifs_proc_clean(void);
@@ -218,7 +217,8 @@ extern int get_dfs_path(int xid, struct cifs_ses *pSesInfo,
                        struct dfs_info3_param **preferrals,
                        int remap);
 extern void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
-                                struct super_block *sb, struct smb_vol *vol);
+                                struct cifs_sb_info *cifs_sb,
+                                struct smb_vol *vol);
 extern int CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon,
                        struct kstatfs *FSData);
 extern int SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon,
index 12cf72dd0c42963b75f674edbb651bb32ccb486c..ccc1afa0bf3b697eaccc92351884042d549b7ca4 100644 (file)
@@ -65,6 +65,8 @@ static int ip_connect(struct TCP_Server_Info *server);
 static int generic_ip_connect(struct TCP_Server_Info *server);
 static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
 static void cifs_prune_tlinks(struct work_struct *work);
+static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
+                                       const char *devname);
 
 /*
  * cifs tcp session reconnection
@@ -2240,8 +2242,8 @@ cifs_match_super(struct super_block *sb, void *data)
 
        rc = compare_mount_options(sb, mnt_data);
 out:
-       cifs_put_tlink(tlink);
        spin_unlock(&cifs_tcp_ses_lock);
+       cifs_put_tlink(tlink);
        return rc;
 }
 
@@ -2474,14 +2476,6 @@ generic_ip_connect(struct TCP_Server_Info *server)
        if (rc < 0)
                return rc;
 
-       rc = socket->ops->connect(socket, saddr, slen, 0);
-       if (rc < 0) {
-               cFYI(1, "Error %d connecting to server", rc);
-               sock_release(socket);
-               server->ssocket = NULL;
-               return rc;
-       }
-
        /*
         * Eventually check for other socket options to change from
         * the default. sock_setsockopt not used because it expects
@@ -2510,6 +2504,14 @@ generic_ip_connect(struct TCP_Server_Info *server)
                 socket->sk->sk_sndbuf,
                 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
 
+       rc = socket->ops->connect(socket, saddr, slen, 0);
+       if (rc < 0) {
+               cFYI(1, "Error %d connecting to server", rc);
+               sock_release(socket);
+               server->ssocket = NULL;
+               return rc;
+       }
+
        if (sport == htons(RFC1001_PORT))
                rc = ip_rfc1001_connect(server);
 
@@ -2546,7 +2548,7 @@ ip_connect(struct TCP_Server_Info *server)
 }
 
 void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
-                         struct super_block *sb, struct smb_vol *vol_info)
+                         struct cifs_sb_info *cifs_sb, struct smb_vol *vol_info)
 {
        /* if we are reconnecting then should we check to see if
         * any requested capabilities changed locally e.g. via
@@ -2600,22 +2602,23 @@ void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
                        cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
                else if (CIFS_UNIX_POSIX_ACL_CAP & cap) {
                        cFYI(1, "negotiated posix acl support");
-                       if (sb)
-                               sb->s_flags |= MS_POSIXACL;
+                       if (cifs_sb)
+                               cifs_sb->mnt_cifs_flags |=
+                                       CIFS_MOUNT_POSIXACL;
                }
 
                if (vol_info && vol_info->posix_paths == 0)
                        cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
                else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
                        cFYI(1, "negotiate posix pathnames");
-                       if (sb)
-                               CIFS_SB(sb)->mnt_cifs_flags |=
+                       if (cifs_sb)
+                               cifs_sb->mnt_cifs_flags |=
                                        CIFS_MOUNT_POSIX_PATHS;
                }
 
-               if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) {
+               if (cifs_sb && (cifs_sb->rsize > 127 * 1024)) {
                        if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) {
-                               CIFS_SB(sb)->rsize = 127 * 1024;
+                               cifs_sb->rsize = 127 * 1024;
                                cFYI(DBG2, "larger reads not supported by srv");
                        }
                }
@@ -2662,6 +2665,9 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
 {
        INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
 
+       spin_lock_init(&cifs_sb->tlink_tree_lock);
+       cifs_sb->tlink_tree = RB_ROOT;
+
        if (pvolume_info->rsize > CIFSMaxBufSize) {
                cERROR(1, "rsize %d too large, using MaxBufSize",
                        pvolume_info->rsize);
@@ -2750,21 +2756,21 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
 
 /*
  * When the server supports very large writes via POSIX extensions, we can
- * allow up to 2^24 - PAGE_CACHE_SIZE.
+ * allow up to 2^24-1, minus the size of a WRITE_AND_X header, not including
+ * the RFC1001 length.
  *
  * Note that this might make for "interesting" allocation problems during
- * writeback however (as we have to allocate an array of pointers for the
- * pages). A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
+ * writeback however as we have to allocate an array of pointers for the
+ * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
  */
-#define CIFS_MAX_WSIZE ((1<<24) - PAGE_CACHE_SIZE)
+#define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4)
 
 /*
- * When the server doesn't allow large posix writes, default to a wsize of
- * 128k - PAGE_CACHE_SIZE -- one page less than the largest frame size
- * described in RFC1001. This allows space for the header without going over
- * that by default.
+ * When the server doesn't allow large posix writes, only allow a wsize of
+ * 128k minus the size of the WRITE_AND_X header. That allows for a write up
+ * to the maximum size described by RFC1002.
  */
-#define CIFS_MAX_RFC1001_WSIZE (128 * 1024 - PAGE_CACHE_SIZE)
+#define CIFS_MAX_RFC1002_WSIZE (128 * 1024 - sizeof(WRITE_REQ) + 4)
 
 /*
  * The default wsize is 1M. find_get_pages seems to return a maximum of 256
@@ -2783,11 +2789,18 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
 
        /* can server support 24-bit write sizes? (via UNIX extensions) */
        if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
-               wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1001_WSIZE);
+               wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1002_WSIZE);
 
-       /* no CAP_LARGE_WRITE_X? Limit it to 16 bits */
-       if (!(server->capabilities & CAP_LARGE_WRITE_X))
-               wsize = min_t(unsigned int, wsize, USHRT_MAX);
+       /*
+        * no CAP_LARGE_WRITE_X or is signing enabled without CAP_UNIX set?
+        * Limit it to max buffer offered by the server, minus the size of the
+        * WRITEX header, not including the 4 byte RFC1001 length.
+        */
+       if (!(server->capabilities & CAP_LARGE_WRITE_X) ||
+           (!(server->capabilities & CAP_UNIX) &&
+            (server->sec_mode & (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED))))
+               wsize = min_t(unsigned int, wsize,
+                               server->maxBuf - sizeof(WRITE_REQ) + 4);
 
        /* hard limit of CIFS_MAX_WSIZE */
        wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
@@ -2819,15 +2832,9 @@ is_path_accessible(int xid, struct cifs_tcon *tcon,
        return rc;
 }
 
-void
-cifs_cleanup_volume_info(struct smb_vol **pvolume_info)
+static void
+cleanup_volume_info_contents(struct smb_vol *volume_info)
 {
-       struct smb_vol *volume_info;
-
-       if (!pvolume_info || !*pvolume_info)
-               return;
-
-       volume_info = *pvolume_info;
        kfree(volume_info->username);
        kzfree(volume_info->password);
        kfree(volume_info->UNC);
@@ -2835,28 +2842,44 @@ cifs_cleanup_volume_info(struct smb_vol **pvolume_info)
        kfree(volume_info->domainname);
        kfree(volume_info->iocharset);
        kfree(volume_info->prepath);
+}
+
+void
+cifs_cleanup_volume_info(struct smb_vol *volume_info)
+{
+       if (!volume_info)
+               return;
+       cleanup_volume_info_contents(volume_info);
        kfree(volume_info);
-       *pvolume_info = NULL;
-       return;
 }
 
+
 #ifdef CONFIG_CIFS_DFS_UPCALL
 /* build_path_to_root returns full path to root when
  * we do not have an exiting connection (tcon) */
 static char *
-build_unc_path_to_root(const struct smb_vol *volume_info,
+build_unc_path_to_root(const struct smb_vol *vol,
                const struct cifs_sb_info *cifs_sb)
 {
-       char *full_path;
+       char *full_path, *pos;
+       unsigned int pplen = vol->prepath ? strlen(vol->prepath) : 0;
+       unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1);
 
-       int unc_len = strnlen(volume_info->UNC, MAX_TREE_SIZE + 1);
-       full_path = kmalloc(unc_len + 1, GFP_KERNEL);
+       full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL);
        if (full_path == NULL)
                return ERR_PTR(-ENOMEM);
 
-       strncpy(full_path, volume_info->UNC, unc_len);
-       full_path[unc_len] = 0; /* add trailing null */
+       strncpy(full_path, vol->UNC, unc_len);
+       pos = full_path + unc_len;
+
+       if (pplen) {
+               strncpy(pos, vol->prepath, pplen);
+               pos += pplen;
+       }
+
+       *pos = '\0'; /* add trailing null */
        convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
+       cFYI(1, "%s: full_path=%s", __func__, full_path);
        return full_path;
 }
 
@@ -2899,15 +2922,18 @@ expand_dfs_referral(int xid, struct cifs_ses *pSesInfo,
                                                   &fake_devname);
 
                free_dfs_info_array(referrals, num_referrals);
-               kfree(fake_devname);
-
-               if (cifs_sb->mountdata != NULL)
-                       kfree(cifs_sb->mountdata);
 
                if (IS_ERR(mdata)) {
                        rc = PTR_ERR(mdata);
                        mdata = NULL;
+               } else {
+                       cleanup_volume_info_contents(volume_info);
+                       memset(volume_info, '\0', sizeof(*volume_info));
+                       rc = cifs_setup_volume_info(volume_info, mdata,
+                                                       fake_devname);
                }
+               kfree(fake_devname);
+               kfree(cifs_sb->mountdata);
                cifs_sb->mountdata = mdata;
        }
        kfree(full_path);
@@ -2915,29 +2941,20 @@ expand_dfs_referral(int xid, struct cifs_ses *pSesInfo,
 }
 #endif
 
-int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data,
-                          const char *devname)
+static int
+cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
+                       const char *devname)
 {
-       struct smb_vol *volume_info;
        int rc = 0;
 
-       *pvolume_info = NULL;
-
-       volume_info = kzalloc(sizeof(struct smb_vol), GFP_KERNEL);
-       if (!volume_info) {
-               rc = -ENOMEM;
-               goto out;
-       }
-
-       if (cifs_parse_mount_options(mount_data, devname,
-                                    volume_info)) {
-               rc = -EINVAL;
-               goto out;
-       }
+       if (cifs_parse_mount_options(mount_data, devname, volume_info))
+               return -EINVAL;
 
        if (volume_info->nullauth) {
                cFYI(1, "null user");
-               volume_info->username = "";
+               volume_info->username = kzalloc(1, GFP_KERNEL);
+               if (volume_info->username == NULL)
+                       return -ENOMEM;
        } else if (volume_info->username) {
                /* BB fixme parse for domain name here */
                cFYI(1, "Username: %s", volume_info->username);
@@ -2945,8 +2962,7 @@ int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data,
                cifserror("No username specified");
        /* In userspace mount helper we can get user name from alternate
           locations such as env variables and files on disk */
-               rc = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
 
        /* this is needed for ASCII cp to Unicode converts */
@@ -2958,21 +2974,34 @@ int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data,
                if (volume_info->local_nls == NULL) {
                        cERROR(1, "CIFS mount error: iocharset %s not found",
                                 volume_info->iocharset);
-                       rc = -ELIBACC;
-                       goto out;
+                       return -ELIBACC;
                }
        }
 
-       *pvolume_info = volume_info;
-       return rc;
-out:
-       cifs_cleanup_volume_info(&volume_info);
        return rc;
 }
 
+struct smb_vol *
+cifs_get_volume_info(char *mount_data, const char *devname)
+{
+       int rc;
+       struct smb_vol *volume_info;
+
+       volume_info = kzalloc(sizeof(struct smb_vol), GFP_KERNEL);
+       if (!volume_info)
+               return ERR_PTR(-ENOMEM);
+
+       rc = cifs_setup_volume_info(volume_info, mount_data, devname);
+       if (rc) {
+               cifs_cleanup_volume_info(volume_info);
+               volume_info = ERR_PTR(rc);
+       }
+
+       return volume_info;
+}
+
 int
-cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
-          struct smb_vol *volume_info, const char *devname)
+cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
 {
        int rc = 0;
        int xid;
@@ -2983,6 +3012,15 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
        struct tcon_link *tlink;
 #ifdef CONFIG_CIFS_DFS_UPCALL
        int referral_walks_count = 0;
+#endif
+
+       rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
+       if (rc)
+               return rc;
+
+       cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
+
+#ifdef CONFIG_CIFS_DFS_UPCALL
 try_mount_again:
        /* cleanup activities if we're chasing a referral */
        if (referral_walks_count) {
@@ -2991,7 +3029,6 @@ try_mount_again:
                else if (pSesInfo)
                        cifs_put_smb_ses(pSesInfo);
 
-               cifs_cleanup_volume_info(&volume_info);
                FreeXid(xid);
        }
 #endif
@@ -3007,6 +3044,7 @@ try_mount_again:
        srvTcp = cifs_get_tcp_session(volume_info);
        if (IS_ERR(srvTcp)) {
                rc = PTR_ERR(srvTcp);
+               bdi_destroy(&cifs_sb->bdi);
                goto out;
        }
 
@@ -3018,14 +3056,6 @@ try_mount_again:
                goto mount_fail_check;
        }
 
-       if (pSesInfo->capabilities & CAP_LARGE_FILES)
-               sb->s_maxbytes = MAX_LFS_FILESIZE;
-       else
-               sb->s_maxbytes = MAX_NON_LFS;
-
-       /* BB FIXME fix time_gran to be larger for LANMAN sessions */
-       sb->s_time_gran = 100;
-
        /* search for existing tcon to this server share */
        tcon = cifs_get_tcon(pSesInfo, volume_info);
        if (IS_ERR(tcon)) {
@@ -3038,7 +3068,7 @@ try_mount_again:
        if (tcon->ses->capabilities & CAP_UNIX) {
                /* reset of caps checks mount to see if unix extensions
                   disabled for just this mount */
-               reset_cifs_unix_caps(xid, tcon, sb, volume_info);
+               reset_cifs_unix_caps(xid, tcon, cifs_sb, volume_info);
                if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
                    (le64_to_cpu(tcon->fsUnixInfo.Capability) &
                     CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
@@ -3161,6 +3191,7 @@ mount_fail_check:
                        cifs_put_smb_ses(pSesInfo);
                else
                        cifs_put_tcp_session(srvTcp);
+               bdi_destroy(&cifs_sb->bdi);
                goto out;
        }
 
@@ -3335,8 +3366,8 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses,
        return rc;
 }
 
-int
-cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
+void
+cifs_umount(struct cifs_sb_info *cifs_sb)
 {
        struct rb_root *root = &cifs_sb->tlink_tree;
        struct rb_node *node;
@@ -3357,7 +3388,10 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
        }
        spin_unlock(&cifs_sb->tlink_tree_lock);
 
-       return 0;
+       bdi_destroy(&cifs_sb->bdi);
+       kfree(cifs_sb->mountdata);
+       unload_nls(cifs_sb->local_nls);
+       kfree(cifs_sb);
 }
 
 int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
@@ -3451,7 +3485,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
                goto out;
        }
 
-       snprintf(username, MAX_USERNAME_SIZE, "krb50x%x", fsuid);
+       snprintf(username, sizeof(username), "krb50x%x", fsuid);
        vol_info->username = username;
        vol_info->local_nls = cifs_sb->local_nls;
        vol_info->linux_uid = fsuid;
index 81914df47ef1612c1ab742d228503bbf5e598046..fa8c21d913bc5b212191d2c444cb5ce74e1d4058 100644 (file)
@@ -55,6 +55,7 @@ build_path_from_dentry(struct dentry *direntry)
        char dirsep;
        struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
        struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+       unsigned seq;
 
        if (direntry == NULL)
                return NULL;  /* not much we can do if dentry is freed and
@@ -68,22 +69,29 @@ build_path_from_dentry(struct dentry *direntry)
                dfsplen = 0;
 cifs_bp_rename_retry:
        namelen = dfsplen;
+       seq = read_seqbegin(&rename_lock);
+       rcu_read_lock();
        for (temp = direntry; !IS_ROOT(temp);) {
                namelen += (1 + temp->d_name.len);
                temp = temp->d_parent;
                if (temp == NULL) {
                        cERROR(1, "corrupt dentry");
+                       rcu_read_unlock();
                        return NULL;
                }
        }
+       rcu_read_unlock();
 
        full_path = kmalloc(namelen+1, GFP_KERNEL);
        if (full_path == NULL)
                return full_path;
        full_path[namelen] = 0; /* trailing null */
+       rcu_read_lock();
        for (temp = direntry; !IS_ROOT(temp);) {
+               spin_lock(&temp->d_lock);
                namelen -= 1 + temp->d_name.len;
                if (namelen < 0) {
+                       spin_unlock(&temp->d_lock);
                        break;
                } else {
                        full_path[namelen] = dirsep;
@@ -91,14 +99,17 @@ cifs_bp_rename_retry:
                                temp->d_name.len);
                        cFYI(0, "name: %s", full_path + namelen);
                }
+               spin_unlock(&temp->d_lock);
                temp = temp->d_parent;
                if (temp == NULL) {
                        cERROR(1, "corrupt dentry");
+                       rcu_read_unlock();
                        kfree(full_path);
                        return NULL;
                }
        }
-       if (namelen != dfsplen) {
+       rcu_read_unlock();
+       if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) {
                cERROR(1, "did not end path lookup where expected namelen is %d",
                        namelen);
                /* presumably this is only possible if racing with a rename
index 816696621ec9ea1be2d5b351ac4fc17d0e8f41aa..42e5363b41024c8c34d399398dfb77d1216bcd03 100644 (file)
@@ -92,6 +92,7 @@ static void cifs_fscache_disable_inode_cookie(struct inode *inode)
 
        if (cifsi->fscache) {
                cFYI(1, "%s: (0x%p)", __func__, cifsi->fscache);
+               fscache_uncache_all_inode_pages(cifsi->fscache, inode);
                fscache_relinquish_cookie(cifsi->fscache, 1);
                cifsi->fscache = NULL;
        }
index 3892ab817a36407975d6738a1b20d7c90c846574..d3e619692ee0f0437e26d9d91d61545859f248bc 100644 (file)
@@ -428,8 +428,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
                        (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
                flags |= NTLMSSP_NEGOTIATE_SIGN;
                if (!ses->server->session_estab)
-                       flags |= NTLMSSP_NEGOTIATE_KEY_XCH |
-                               NTLMSSP_NEGOTIATE_EXTENDED_SEC;
+                       flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
        }
 
        sec_blob->NegotiateFlags = cpu_to_le32(flags);
@@ -465,10 +464,11 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
                NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC;
        if (ses->server->sec_mode &
-          (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+          (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
                flags |= NTLMSSP_NEGOTIATE_SIGN;
-       if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
-               flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN;
+               if (!ses->server->session_estab)
+                       flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+       }
 
        tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
        sec_blob->NegotiateFlags = cpu_to_le32(flags);
index 1525d5e662b65cd11649587458013bd47dc7fc2a..1c5b770c314135a3ed932c88260470f13edd6008 100644 (file)
@@ -90,12 +90,10 @@ smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
        sg_init_one(&sgout, out, 8);
 
        rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, 8);
-       if (rc) {
+       if (rc)
                cERROR(1, "could not encrypt crypt key rc: %d\n", rc);
-               crypto_free_blkcipher(tfm_des);
-               goto smbhash_err;
-       }
 
+       crypto_free_blkcipher(tfm_des);
 smbhash_err:
        return rc;
 }
index 6cbb3afb36dc2b28b9aa3f2b585c195330894699..cb140ef293e46e8739669f2e69eea701d8213874 100644 (file)
@@ -43,8 +43,6 @@ const struct file_operations coda_ioctl_operations = {
 /* the coda pioctl inode ops */
 static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags)
 {
-       if (flags & IPERM_FLAG_RCU)
-               return -ECHILD;
        return (mask & MAY_EXEC) ? -EACCES : 0;
 }
 
index e141939080f0d6db65c525305cab5a74b9dd552c..739fb59bcdc25123f3bdc620246045ddabdac34d 100644 (file)
@@ -37,7 +37,7 @@ static DEFINE_MUTEX(read_mutex);
 /* These macros may change in future, to provide better st_ino semantics. */
 #define OFFSET(x)      ((x)->i_ino)
 
-static unsigned long cramino(struct cramfs_inode *cino, unsigned int offset)
+static unsigned long cramino(const struct cramfs_inode *cino, unsigned int offset)
 {
        if (!cino->offset)
                return offset + 1;
@@ -61,7 +61,7 @@ static unsigned long cramino(struct cramfs_inode *cino, unsigned int offset)
 }
 
 static struct inode *get_cramfs_inode(struct super_block *sb,
-       struct cramfs_inode *cramfs_inode, unsigned int offset)
+       const struct cramfs_inode *cramfs_inode, unsigned int offset)
 {
        struct inode *inode;
        static struct timespec zerotime;
@@ -317,7 +317,7 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent)
        /* Set it all up.. */
        sb->s_op = &cramfs_ops;
        root = get_cramfs_inode(sb, &super.root, 0);
-       if (!root)
+       if (IS_ERR(root))
                goto out;
        sb->s_root = d_alloc_root(root);
        if (!sb->s_root) {
@@ -423,6 +423,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
 static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
 {
        unsigned int offset = 0;
+       struct inode *inode = NULL;
        int sorted;
 
        mutex_lock(&read_mutex);
@@ -449,8 +450,8 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
 
                for (;;) {
                        if (!namelen) {
-                               mutex_unlock(&read_mutex);
-                               return ERR_PTR(-EIO);
+                               inode = ERR_PTR(-EIO);
+                               goto out;
                        }
                        if (name[namelen-1])
                                break;
@@ -462,17 +463,18 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
                if (retval > 0)
                        continue;
                if (!retval) {
-                       struct cramfs_inode entry = *de;
-                       mutex_unlock(&read_mutex);
-                       d_add(dentry, get_cramfs_inode(dir->i_sb, &entry, dir_off));
-                       return NULL;
+                       inode = get_cramfs_inode(dir->i_sb, de, dir_off);
+                       break;
                }
                /* else (retval < 0) */
                if (sorted)
                        break;
        }
+out:
        mutex_unlock(&read_mutex);
-       d_add(dentry, NULL);
+       if (IS_ERR(inode))
+               return ERR_CAST(inode);
+       d_add(dentry, inode);
        return NULL;
 }
 
index 37f72ee5bf7c9577f164ce4a472d7d4d8f0c4aaa..6e4ea6d87774b6b31a212309fde5c3bed55a30c3 100644 (file)
@@ -2213,14 +2213,15 @@ static void dentry_unlock_parents_for_move(struct dentry *dentry,
  * The hash value has to match the hash queue that the dentry is on..
  */
 /*
- * d_move - move a dentry
+ * __d_move - move a dentry
  * @dentry: entry to move
  * @target: new dentry
  *
  * Update the dcache to reflect the move of a file name. Negative
- * dcache entries should not be moved in this way.
+ * dcache entries should not be moved in this way.  Caller hold
+ * rename_lock.
  */
-void d_move(struct dentry * dentry, struct dentry * target)
+static void __d_move(struct dentry * dentry, struct dentry * target)
 {
        if (!dentry->d_inode)
                printk(KERN_WARNING "VFS: moving negative dcache entry\n");
@@ -2228,8 +2229,6 @@ void d_move(struct dentry * dentry, struct dentry * target)
        BUG_ON(d_ancestor(dentry, target));
        BUG_ON(d_ancestor(target, dentry));
 
-       write_seqlock(&rename_lock);
-
        dentry_lock_for_move(dentry, target);
 
        write_seqcount_begin(&dentry->d_seq);
@@ -2275,6 +2274,20 @@ void d_move(struct dentry * dentry, struct dentry * target)
        spin_unlock(&target->d_lock);
        fsnotify_d_move(dentry);
        spin_unlock(&dentry->d_lock);
+}
+
+/*
+ * d_move - move a dentry
+ * @dentry: entry to move
+ * @target: new dentry
+ *
+ * Update the dcache to reflect the move of a file name. Negative
+ * dcache entries should not be moved in this way.
+ */
+void d_move(struct dentry *dentry, struct dentry *target)
+{
+       write_seqlock(&rename_lock);
+       __d_move(dentry, target);
        write_sequnlock(&rename_lock);
 }
 EXPORT_SYMBOL(d_move);
@@ -2302,7 +2315,7 @@ struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
  * This helper attempts to cope with remotely renamed directories
  *
  * It assumes that the caller is already holding
- * dentry->d_parent->d_inode->i_mutex and the inode->i_lock
+ * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
  *
  * Note: If ever the locking in lock_rename() changes, then please
  * remember to update this too...
@@ -2317,11 +2330,6 @@ static struct dentry *__d_unalias(struct inode *inode,
        if (alias->d_parent == dentry->d_parent)
                goto out_unalias;
 
-       /* Check for loops */
-       ret = ERR_PTR(-ELOOP);
-       if (d_ancestor(alias, dentry))
-               goto out_err;
-
        /* See lock_rename() */
        ret = ERR_PTR(-EBUSY);
        if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
@@ -2331,7 +2339,7 @@ static struct dentry *__d_unalias(struct inode *inode,
                goto out_err;
        m2 = &alias->d_parent->d_inode->i_mutex;
 out_unalias:
-       d_move(alias, dentry);
+       __d_move(alias, dentry);
        ret = alias;
 out_err:
        spin_unlock(&inode->i_lock);
@@ -2416,15 +2424,24 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
                alias = __d_find_alias(inode, 0);
                if (alias) {
                        actual = alias;
-                       /* Is this an anonymous mountpoint that we could splice
-                        * into our tree? */
-                       if (IS_ROOT(alias)) {
+                       write_seqlock(&rename_lock);
+
+                       if (d_ancestor(alias, dentry)) {
+                               /* Check for loops */
+                               actual = ERR_PTR(-ELOOP);
+                       } else if (IS_ROOT(alias)) {
+                               /* Is this an anonymous mountpoint that we
+                                * could splice into our tree? */
                                __d_materialise_dentry(dentry, alias);
+                               write_sequnlock(&rename_lock);
                                __d_drop(alias);
                                goto found;
+                       } else {
+                               /* Nope, but we must(!) avoid directory
+                                * aliasing */
+                               actual = __d_unalias(inode, dentry, alias);
                        }
-                       /* Nope, but we must(!) avoid directory aliasing */
-                       actual = __d_unalias(inode, dentry, alias);
+                       write_sequnlock(&rename_lock);
                        if (IS_ERR(actual))
                                dput(alias);
                        goto out_nolock;
index 97e0d52d72fdd65a365f3cab7027ae22c230582e..6075a1e727aee13dd3cd492b61d55edd81ee258e 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1996,7 +1996,7 @@ static void wait_for_dump_helpers(struct file *file)
  * is a special value that we use to trap recursive
  * core dumps
  */
-static int umh_pipe_setup(struct subprocess_info *info)
+static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
 {
        struct file *rp, *wp;
        struct fdtable *fdt;
index 06065bd37fc339070948a141cd8063c9d39af8ad..c57beddcc217e3e3592fe977f8237f7f46ddf16f 100644 (file)
@@ -913,7 +913,7 @@ struct dentry *exofs_get_parent(struct dentry *child)
        unsigned long ino = exofs_parent_ino(child);
 
        if (!ino)
-               return NULL;
+               return ERR_PTR(-ESTALE);
 
        return d_obtain_alias(exofs_iget(child->d_inode->i_sb, ino));
 }
index 2e29abb30f7629903f11d493bc685e3877f49917..095c36f3b6129e97db937f30a969174c4facacf1 100644 (file)
@@ -125,7 +125,7 @@ struct ext4_ext_path {
  * positive retcode - signal for ext4_ext_walk_space(), see below
  * callback must return valid extent (passed or newly created)
  */
-typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *,
+typedef int (*ext_prepare_callback)(struct inode *, ext4_lblk_t,
                                        struct ext4_ext_cache *,
                                        struct ext4_extent *, void *);
 
@@ -133,8 +133,11 @@ typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *,
 #define EXT_BREAK      1
 #define EXT_REPEAT     2
 
-/* Maximum logical block in a file; ext4_extent's ee_block is __le32 */
-#define EXT_MAX_BLOCK  0xffffffff
+/*
+ * Maximum number of logical blocks in a file; ext4_extent's ee_block is
+ * __le32.
+ */
+#define EXT_MAX_BLOCKS 0xffffffff
 
 /*
  * EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an
index 5199bac7fc625d5a19ad5934417f724fb699fe22..f815cc81e7a287bd7198dc874d7954938c188464 100644 (file)
@@ -1408,7 +1408,7 @@ got_index:
 
 /*
  * ext4_ext_next_allocated_block:
- * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
+ * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
  * NOTE: it considers block number from index entry as
  * allocated block. Thus, index entries have to be consistent
  * with leaves.
@@ -1422,7 +1422,7 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path)
        depth = path->p_depth;
 
        if (depth == 0 && path->p_ext == NULL)
-               return EXT_MAX_BLOCK;
+               return EXT_MAX_BLOCKS;
 
        while (depth >= 0) {
                if (depth == path->p_depth) {
@@ -1439,12 +1439,12 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path)
                depth--;
        }
 
-       return EXT_MAX_BLOCK;
+       return EXT_MAX_BLOCKS;
 }
 
 /*
  * ext4_ext_next_leaf_block:
- * returns first allocated block from next leaf or EXT_MAX_BLOCK
+ * returns first allocated block from next leaf or EXT_MAX_BLOCKS
  */
 static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
                                        struct ext4_ext_path *path)
@@ -1456,7 +1456,7 @@ static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
 
        /* zero-tree has no leaf blocks at all */
        if (depth == 0)
-               return EXT_MAX_BLOCK;
+               return EXT_MAX_BLOCKS;
 
        /* go to index block */
        depth--;
@@ -1469,7 +1469,7 @@ static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
                depth--;
        }
 
-       return EXT_MAX_BLOCK;
+       return EXT_MAX_BLOCKS;
 }
 
 /*
@@ -1677,13 +1677,13 @@ static unsigned int ext4_ext_check_overlap(struct inode *inode,
         */
        if (b2 < b1) {
                b2 = ext4_ext_next_allocated_block(path);
-               if (b2 == EXT_MAX_BLOCK)
+               if (b2 == EXT_MAX_BLOCKS)
                        goto out;
        }
 
        /* check for wrap through zero on extent logical start block*/
        if (b1 + len1 < b1) {
-               len1 = EXT_MAX_BLOCK - b1;
+               len1 = EXT_MAX_BLOCKS - b1;
                newext->ee_len = cpu_to_le16(len1);
                ret = 1;
        }
@@ -1767,7 +1767,7 @@ repeat:
        fex = EXT_LAST_EXTENT(eh);
        next = ext4_ext_next_leaf_block(inode, path);
        if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
-           && next != EXT_MAX_BLOCK) {
+           && next != EXT_MAX_BLOCKS) {
                ext_debug("next leaf block - %d\n", next);
                BUG_ON(npath != NULL);
                npath = ext4_ext_find_extent(inode, next, NULL);
@@ -1887,7 +1887,7 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
        BUG_ON(func == NULL);
        BUG_ON(inode == NULL);
 
-       while (block < last && block != EXT_MAX_BLOCK) {
+       while (block < last && block != EXT_MAX_BLOCKS) {
                num = last - block;
                /* find extent for this block */
                down_read(&EXT4_I(inode)->i_data_sem);
@@ -1958,7 +1958,7 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
                        err = -EIO;
                        break;
                }
-               err = func(inode, path, &cbex, ex, cbdata);
+               err = func(inode, next, &cbex, ex, cbdata);
                ext4_ext_drop_refs(path);
 
                if (err < 0)
@@ -2020,7 +2020,7 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
        if (ex == NULL) {
                /* there is no extent yet, so gap is [0;-] */
                lblock = 0;
-               len = EXT_MAX_BLOCK;
+               len = EXT_MAX_BLOCKS;
                ext_debug("cache gap(whole file):");
        } else if (block < le32_to_cpu(ex->ee_block)) {
                lblock = block;
@@ -2350,7 +2350,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
                         * never happen because at least one of the end points
                         * needs to be on the edge of the extent.
                         */
-                       if (end == EXT_MAX_BLOCK) {
+                       if (end == EXT_MAX_BLOCKS - 1) {
                                ext_debug("  bad truncate %u:%u\n",
                                                start, end);
                                block = 0;
@@ -2398,7 +2398,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
                         * If this is a truncate, this condition
                         * should never happen
                         */
-                       if (end == EXT_MAX_BLOCK) {
+                       if (end == EXT_MAX_BLOCKS - 1) {
                                ext_debug("  bad truncate %u:%u\n",
                                        start, end);
                                err = -EIO;
@@ -2478,7 +2478,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
                 * we need to remove it from the leaf
                 */
                if (num == 0) {
-                       if (end != EXT_MAX_BLOCK) {
+                       if (end != EXT_MAX_BLOCKS - 1) {
                                /*
                                 * For hole punching, we need to scoot all the
                                 * extents up when an extent is removed so that
@@ -3699,7 +3699,7 @@ void ext4_ext_truncate(struct inode *inode)
 
        last_block = (inode->i_size + sb->s_blocksize - 1)
                        >> EXT4_BLOCK_SIZE_BITS(sb);
-       err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCK);
+       err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
 
        /* In a multi-transaction truncate, we only make the final
         * transaction synchronous.
@@ -3914,14 +3914,13 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
 /*
  * Callback function called for each extent to gather FIEMAP information.
  */
-static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
+static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next,
                       struct ext4_ext_cache *newex, struct ext4_extent *ex,
                       void *data)
 {
        __u64   logical;
        __u64   physical;
        __u64   length;
-       loff_t  size;
        __u32   flags = 0;
        int             ret = 0;
        struct fiemap_extent_info *fieinfo = data;
@@ -4103,8 +4102,7 @@ found_delayed_extent:
        if (ex && ext4_ext_is_uninitialized(ex))
                flags |= FIEMAP_EXTENT_UNWRITTEN;
 
-       size = i_size_read(inode);
-       if (logical + length >= size)
+       if (next == EXT_MAX_BLOCKS)
                flags |= FIEMAP_EXTENT_LAST;
 
        ret = fiemap_fill_next_extent(fieinfo, logical, physical,
@@ -4347,8 +4345,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 
                start_blk = start >> inode->i_sb->s_blocksize_bits;
                last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
-               if (last_blk >= EXT_MAX_BLOCK)
-                       last_blk = EXT_MAX_BLOCK-1;
+               if (last_blk >= EXT_MAX_BLOCKS)
+                       last_blk = EXT_MAX_BLOCKS-1;
                len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
 
                /*
index a5763e3505ba59cc330e2cecbcf54c5eb23d8410..e3126c0510066fec7fe8bd0d46123aa1af16388c 100644 (file)
@@ -2634,7 +2634,7 @@ static int ext4_writepage(struct page *page,
        struct buffer_head *page_bufs = NULL;
        struct inode *inode = page->mapping->host;
 
-       trace_ext4_writepage(inode, page);
+       trace_ext4_writepage(page);
        size = i_size_read(inode);
        if (page->index == size >> PAGE_CACHE_SHIFT)
                len = size & ~PAGE_CACHE_MASK;
index 859f2ae8864e6af2b85dc62135f7f89cb0258d4f..6ed859d56850494d440dbbacc56967c4538fc659 100644 (file)
@@ -3578,8 +3578,8 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
                free += next - bit;
 
                trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
-               trace_ext4_mb_release_inode_pa(sb, pa->pa_inode, pa,
-                                              grp_blk_start + bit, next - bit);
+               trace_ext4_mb_release_inode_pa(pa, grp_blk_start + bit,
+                                              next - bit);
                mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
                bit = next + 1;
        }
@@ -3608,7 +3608,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
        ext4_group_t group;
        ext4_grpblk_t bit;
 
-       trace_ext4_mb_release_group_pa(sb, pa);
+       trace_ext4_mb_release_group_pa(pa);
        BUG_ON(pa->pa_deleted == 0);
        ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
        BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
@@ -4448,7 +4448,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
  * @inode:             inode
  * @block:             start physical block to free
  * @count:             number of blocks to count
- * @metadata:          Are these metadata blocks
+ * @flags:             flags used by ext4_free_blocks
  */
 void ext4_free_blocks(handle_t *handle, struct inode *inode,
                      struct buffer_head *bh, ext4_fsblk_t block,
index 2b8304bf3c50e1d1363287e79a66d5641fccceaa..f57455a1b1b281bdf21e12f63bc46087abe58194 100644 (file)
@@ -1002,12 +1002,12 @@ mext_check_arguments(struct inode *orig_inode,
                return -EINVAL;
        }
 
-       if ((orig_start > EXT_MAX_BLOCK) ||
-           (donor_start > EXT_MAX_BLOCK) ||
-           (*len > EXT_MAX_BLOCK) ||
-           (orig_start + *len > EXT_MAX_BLOCK))  {
+       if ((orig_start >= EXT_MAX_BLOCKS) ||
+           (donor_start >= EXT_MAX_BLOCKS) ||
+           (*len > EXT_MAX_BLOCKS) ||
+           (orig_start + *len >= EXT_MAX_BLOCKS))  {
                ext4_debug("ext4 move extent: Can't handle over [%u] blocks "
-                       "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCK,
+                       "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS,
                        orig_inode->i_ino, donor_inode->i_ino);
                return -EINVAL;
        }
index cc5c157aa11df4cef77167e870dd64c6273cd4fa..9ea71aa864b3a620667aed2c7fbbe5348341d312 100644 (file)
@@ -2243,6 +2243,12 @@ static void ext4_orphan_cleanup(struct super_block *sb,
  * in the vfs.  ext4 inode has 48 bits of i_block in fsblock units,
  * so that won't be a limiting factor.
  *
+ * However there is other limiting factor. We do store extents in the form
+ * of starting block and length, hence the resulting length of the extent
+ * covering maximum file size must fit into on-disk format containers as
+ * well. Given that length is always by 1 unit bigger than max unit (because
+ * we count 0 as well) we have to lower the s_maxbytes by one fs block.
+ *
  * Note, this does *not* consider any metadata overhead for vfs i_blocks.
  */
 static loff_t ext4_max_size(int blkbits, int has_huge_files)
@@ -2264,10 +2270,13 @@ static loff_t ext4_max_size(int blkbits, int has_huge_files)
                upper_limit <<= blkbits;
        }
 
-       /* 32-bit extent-start container, ee_block */
-       res = 1LL << 32;
+       /*
+        * 32-bit extent-start container, ee_block. We lower the maxbytes
+        * by one fs block, so ee_len can cover the extent of maximum file
+        * size
+        */
+       res = (1LL << 32) - 1;
        res <<= blkbits;
-       res -= 1;
 
        /* Sanity check against vm- & vfs- imposed limits */
        if (res > upper_limit)
index a2a5d19ece6adc92b3deda283bd6f5db1f2c701c..2f343b4d7a7d87b6792aad379b91ebffee9e15bc 100644 (file)
@@ -954,3 +954,47 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
        pagevec_reinit(pagevec);
 }
 EXPORT_SYMBOL(fscache_mark_pages_cached);
+
+/*
+ * Uncache all the pages in an inode that are marked PG_fscache, assuming them
+ * to be associated with the given cookie.
+ */
+void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
+                                      struct inode *inode)
+{
+       struct address_space *mapping = inode->i_mapping;
+       struct pagevec pvec;
+       pgoff_t next;
+       int i;
+
+       _enter("%p,%p", cookie, inode);
+
+       if (!mapping || mapping->nrpages == 0) {
+               _leave(" [no pages]");
+               return;
+       }
+
+       pagevec_init(&pvec, 0);
+       next = 0;
+       while (next <= (loff_t)-1 &&
+              pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)
+              ) {
+               for (i = 0; i < pagevec_count(&pvec); i++) {
+                       struct page *page = pvec.pages[i];
+                       pgoff_t page_index = page->index;
+
+                       ASSERTCMP(page_index, >=, next);
+                       next = page_index + 1;
+
+                       if (PageFsCache(page)) {
+                               __fscache_wait_on_page_write(cookie, page);
+                               __fscache_uncache_page(cookie, page);
+                       }
+               }
+               pagevec_release(&pvec);
+               cond_resched();
+       }
+
+       _leave("");
+}
+EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);
index 802ac5eeba28e2c77c8618ba457a179fa8babb73..f9fbbe96c222860374840a825f2168d14b340f83 100644 (file)
@@ -1069,6 +1069,7 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
                return 0;
 
        gfs2_log_lock(sdp);
+       spin_lock(&sdp->sd_ail_lock);
        head = bh = page_buffers(page);
        do {
                if (atomic_read(&bh->b_count))
@@ -1080,6 +1081,7 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
                        goto not_possible;
                bh = bh->b_this_page;
        } while(bh != head);
+       spin_unlock(&sdp->sd_ail_lock);
        gfs2_log_unlock(sdp);
 
        head = bh = page_buffers(page);
@@ -1112,6 +1114,7 @@ not_possible: /* Should never happen */
        WARN_ON(buffer_dirty(bh));
        WARN_ON(buffer_pinned(bh));
 cannot_release:
+       spin_unlock(&sdp->sd_ail_lock);
        gfs2_log_unlock(sdp);
        return 0;
 }
index 8ef70f464731eec2e1a707b0bd1dcd12d24aa2f1..2cca29316bd624b30802550931e569eacc65b7ac 100644 (file)
@@ -47,10 +47,10 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl)
                                bd_ail_gl_list);
                bh = bd->bd_bh;
                gfs2_remove_from_ail(bd);
-               spin_unlock(&sdp->sd_ail_lock);
-
                bd->bd_bh = NULL;
                bh->b_private = NULL;
+               spin_unlock(&sdp->sd_ail_lock);
+
                bd->bd_blkno = bh->b_blocknr;
                gfs2_log_lock(sdp);
                gfs2_assert_withdraw(sdp, !buffer_busy(bh));
@@ -221,8 +221,10 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
                }
        }
 
-       if (ip == GFS2_I(gl->gl_sbd->sd_rindex))
+       if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) {
+               gfs2_log_flush(gl->gl_sbd, NULL);
                gl->gl_sbd->sd_rindex_uptodate = 0;
+       }
        if (ip && S_ISREG(ip->i_inode.i_mode))
                truncate_inode_pages(ip->i_inode.i_mapping, 0);
 }
index 0a064e91ac7071e6f5570acf50666aaf59b59bc2..81206e70cbf69485d19d27a7ae7515cc6b158586 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/buffer_head.h>
 #include <linux/rcupdate.h>
 #include <linux/rculist_bl.h>
+#include <linux/completion.h>
 
 #define DIO_WAIT       0x00000010
 #define DIO_METADATA   0x00000020
@@ -546,6 +547,7 @@ struct gfs2_sbd {
        struct gfs2_glock *sd_trans_gl;
        wait_queue_head_t sd_glock_wait;
        atomic_t sd_glock_disposal;
+       struct completion sd_locking_init;
 
        /* Inode Stuff */
 
index 903115f2bb34849d8df402d57723bec403857204..85c62923ee292d9d663119854f9115ed26fff36b 100644 (file)
@@ -903,6 +903,7 @@ void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
                if (gfs2_ail1_empty(sdp))
                        break;
        }
+       gfs2_log_flush(sdp, NULL);
 }
 
 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
index 8ac9ae189b535cfe91fa8922fdf091e8057766f2..2a77071fb7b68df78c3ff41041bf1d7fb330985c 100644 (file)
@@ -72,6 +72,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
 
        init_waitqueue_head(&sdp->sd_glock_wait);
        atomic_set(&sdp->sd_glock_disposal, 0);
+       init_completion(&sdp->sd_locking_init);
        spin_lock_init(&sdp->sd_statfs_spin);
 
        spin_lock_init(&sdp->sd_rindex_spin);
@@ -1017,11 +1018,13 @@ hostdata_error:
                fsname++;
        if (lm->lm_mount == NULL) {
                fs_info(sdp, "Now mounting FS...\n");
+               complete(&sdp->sd_locking_init);
                return 0;
        }
        ret = lm->lm_mount(sdp, fsname);
        if (ret == 0)
                fs_info(sdp, "Joined cluster. Now mounting FS...\n");
+       complete(&sdp->sd_locking_init);
        return ret;
 }
 
index ed540e7018beee30c058b185d6599cfff30dd9ba..fb0edf735483174932e569ab2c318311d88cb6e2 100644 (file)
@@ -757,13 +757,17 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
        struct timespec atime;
        struct gfs2_dinode *di;
        int ret = -EAGAIN;
+       int unlock_required = 0;
 
        /* Skip timestamp update, if this is from a memalloc */
        if (current->flags & PF_MEMALLOC)
                goto do_flush;
-       ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
-       if (ret)
-               goto do_flush;
+       if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
+               ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
+               if (ret)
+                       goto do_flush;
+               unlock_required = 1;
+       }
        ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
        if (ret)
                goto do_unlock;
@@ -780,7 +784,8 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
        }
        gfs2_trans_end(sdp);
 do_unlock:
-       gfs2_glock_dq_uninit(&gh);
+       if (unlock_required)
+               gfs2_glock_dq_uninit(&gh);
 do_flush:
        if (wbc->sync_mode == WB_SYNC_ALL)
                gfs2_log_flush(GFS2_SB(inode), ip->i_gl);
@@ -1427,7 +1432,20 @@ out:
        return error;
 }
 
-/*
+/**
+ * gfs2_evict_inode - Remove an inode from cache
+ * @inode: The inode to evict
+ *
+ * There are three cases to consider:
+ * 1. i_nlink == 0, we are final opener (and must deallocate)
+ * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
+ * 3. i_nlink > 0
+ *
+ * If the fs is read only, then we have to treat all cases as per #3
+ * since we are unable to do any deallocation. The inode will be
+ * deallocated by the next read/write node to attempt an allocation
+ * in the same resource group
+ *
  * We have to (at the moment) hold the inodes main lock to cover
  * the gap between unlocking the shared lock on the iopen lock and
  * taking the exclusive lock. I'd rather do a shared -> exclusive
@@ -1470,6 +1488,8 @@ static void gfs2_evict_inode(struct inode *inode)
        if (error)
                goto out_truncate;
 
+       /* Case 1 starts here */
+
        if (S_ISDIR(inode->i_mode) &&
            (ip->i_diskflags & GFS2_DIF_EXHASH)) {
                error = gfs2_dir_exhash_dealloc(ip);
@@ -1493,13 +1513,16 @@ static void gfs2_evict_inode(struct inode *inode)
        goto out_unlock;
 
 out_truncate:
+       /* Case 2 starts here */
        error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
        if (error)
                goto out_unlock;
-       gfs2_final_release_pages(ip);
+       /* Needs to be done before glock release & also in a transaction */
+       truncate_inode_pages(&inode->i_data, 0);
        gfs2_trans_end(sdp);
 
 out_unlock:
+       /* Error path for case 1 */
        if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
                gfs2_glock_dq(&ip->i_iopen_gh);
        gfs2_holder_uninit(&ip->i_iopen_gh);
@@ -1507,6 +1530,7 @@ out_unlock:
        if (error && error != GLR_TRYFAILED && error != -EROFS)
                fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
 out:
+       /* Case 3 starts here */
        truncate_inode_pages(&inode->i_data, 0);
        end_writeback(inode);
 
index e20eab37bc80c3fa6629c38dd79c1b9dc6491c2f..443cabcfcd23f834f64bc800c6c4cd518a0e58db 100644 (file)
@@ -338,6 +338,9 @@ static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
        rv = sscanf(buf, "%u", &first);
        if (rv != 1 || first > 1)
                return -EINVAL;
+       rv = wait_for_completion_killable(&sdp->sd_locking_init);
+       if (rv)
+               return rv;
        spin_lock(&sdp->sd_jindex_spin);
        rv = -EBUSY;
        if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
@@ -414,7 +417,9 @@ static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
        rv = sscanf(buf, "%d", &jid);
        if (rv != 1)
                return -EINVAL;
-
+       rv = wait_for_completion_killable(&sdp->sd_locking_init);
+       if (rv)
+               return rv;
        spin_lock(&sdp->sd_jindex_spin);
        rv = -EINVAL;
        if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
index b49b55584c844e8578b1aecb090affa73991d0cc..84a47b709f51835de4dc368e30318abddbbce226 100644 (file)
@@ -500,7 +500,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
 out_put_hidden_dir:
        iput(sbi->hidden_dir);
 out_put_root:
-       iput(sbi->alloc_file);
+       iput(root);
 out_put_alloc_file:
        iput(sbi->alloc_file);
 out_close_cat_tree:
index 3031d81f5f0f585b17ecc3928da80b19b8a9eee3..4ac88ff79aa6671ef37b66fdbb96d7e6ff5609f7 100644 (file)
@@ -36,6 +36,7 @@ int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
 {
        DECLARE_COMPLETION_ONSTACK(wait);
        struct bio *bio;
+       int ret = 0;
 
        bio = bio_alloc(GFP_NOIO, 1);
        bio->bi_sector = sector;
@@ -54,8 +55,10 @@ int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
        wait_for_completion(&wait);
 
        if (!bio_flagged(bio, BIO_UPTODATE))
-               return -EIO;
-       return 0;
+               ret = -EIO;
+
+       bio_put(bio);
+       return ret;
 }
 
 static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd)
index 87ed48e0343d701fe621fa1751390bfdfbec06f7..85c098a499f33ce858bdfaf85f76f053bd1b9376 100644 (file)
@@ -139,7 +139,8 @@ static int file_removed(struct dentry *dentry, const char *file)
 static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
                                   struct nameidata *nd)
 {
-       struct dentry *proc_dentry, *new, *parent;
+       struct dentry *proc_dentry, *parent;
+       struct qstr *name = &dentry->d_name;
        struct inode *inode;
        int err, deleted;
 
@@ -149,23 +150,9 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
        else if (deleted)
                return ERR_PTR(-ENOENT);
 
-       err = -ENOMEM;
        parent = HPPFS_I(ino)->proc_dentry;
        mutex_lock(&parent->d_inode->i_mutex);
-       proc_dentry = d_lookup(parent, &dentry->d_name);
-       if (proc_dentry == NULL) {
-               proc_dentry = d_alloc(parent, &dentry->d_name);
-               if (proc_dentry == NULL) {
-                       mutex_unlock(&parent->d_inode->i_mutex);
-                       goto out;
-               }
-               new = (*parent->d_inode->i_op->lookup)(parent->d_inode,
-                                                      proc_dentry, NULL);
-               if (new) {
-                       dput(proc_dentry);
-                       proc_dentry = new;
-               }
-       }
+       proc_dentry = lookup_one_len(name->name, parent, name->len);
        mutex_unlock(&parent->d_inode->i_mutex);
 
        if (IS_ERR(proc_dentry))
@@ -174,13 +161,11 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
        err = -ENOMEM;
        inode = get_inode(ino->i_sb, proc_dentry);
        if (!inode)
-               goto out_dput;
+               goto out;
 
        d_add(dentry, inode);
        return NULL;
 
- out_dput:
-       dput(proc_dentry);
  out:
        return ERR_PTR(err);
 }
@@ -690,8 +675,10 @@ static struct inode *get_inode(struct super_block *sb, struct dentry *dentry)
        struct inode *proc_ino = dentry->d_inode;
        struct inode *inode = new_inode(sb);
 
-       if (!inode)
+       if (!inode) {
+               dput(dentry);
                return ERR_PTR(-ENOMEM);
+       }
 
        if (S_ISDIR(dentry->d_inode->i_mode)) {
                inode->i_op = &hppfs_dir_iops;
@@ -704,7 +691,7 @@ static struct inode *get_inode(struct super_block *sb, struct dentry *dentry)
                inode->i_fop = &hppfs_file_fops;
        }
 
-       HPPFS_I(inode)->proc_dentry = dget(dentry);
+       HPPFS_I(inode)->proc_dentry = dentry;
 
        inode->i_uid = proc_ino->i_uid;
        inode->i_gid = proc_ino->i_gid;
@@ -737,7 +724,7 @@ static int hppfs_fill_super(struct super_block *sb, void *d, int silent)
        sb->s_fs_info = proc_mnt;
 
        err = -ENOMEM;
-       root_inode = get_inode(sb, proc_mnt->mnt_sb->s_root);
+       root_inode = get_inode(sb, dget(proc_mnt->mnt_sb->s_root));
        if (!root_inode)
                goto out_mntput;
 
index 0f7e88a7803f39e52b778eb30652654d7d61d2d1..43566d17d1b85643820f0c943f35f866869b9a80 100644 (file)
@@ -423,7 +423,14 @@ EXPORT_SYMBOL(remove_inode_hash);
 void end_writeback(struct inode *inode)
 {
        might_sleep();
+       /*
+        * We have to cycle tree_lock here because reclaim can be still in the
+        * process of removing the last page (in __delete_from_page_cache())
+        * and we must not free mapping under it.
+        */
+       spin_lock_irq(&inode->i_data.tree_lock);
        BUG_ON(inode->i_data.nrpages);
+       spin_unlock_irq(&inode->i_data.tree_lock);
        BUG_ON(!list_empty(&inode->i_data.private_list));
        BUG_ON(!(inode->i_state & I_FREEING));
        BUG_ON(inode->i_state & I_CLEAR);
index 3db5ba4568fc8efd30025a9e9906eb01a47f9c45..b3cc8586984e9748ff3c66e6c8703fa84c822c1c 100644 (file)
@@ -974,7 +974,7 @@ out_no_inode:
 out_no_read:
        printk(KERN_WARNING "%s: bread failed, dev=%s, iso_blknum=%d, block=%d\n",
                __func__, s->s_id, iso_blknum, block);
-       goto out_freesbi;
+       goto out_freebh;
 out_bad_zone_size:
        printk(KERN_WARNING "ISOFS: Bad logical zone size %ld\n",
                sbi->s_log_zone_size);
@@ -989,6 +989,7 @@ out_unknown_format:
 
 out_freebh:
        brelse(bh);
+       brelse(pri_bh);
 out_freesbi:
        kfree(opt.iocharset);
        kfree(sbi);
index 6a79fd0a1a32cdd018fea9879a6026e4dcf9c8b6..2c62c5aae82ff8936ae30036fe595aff68ab6877 100644 (file)
@@ -97,10 +97,14 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
 
        if (jh->b_jlist == BJ_None && !buffer_locked(bh) &&
            !buffer_dirty(bh) && !buffer_write_io_error(bh)) {
+               /*
+                * Get our reference so that bh cannot be freed before
+                * we unlock it
+                */
+               get_bh(bh);
                JBUFFER_TRACE(jh, "remove from checkpoint list");
                ret = __jbd2_journal_remove_checkpoint(jh) + 1;
                jbd_unlock_bh_state(bh);
-               jbd2_journal_remove_journal_head(bh);
                BUFFER_TRACE(bh, "release");
                __brelse(bh);
        } else {
@@ -223,8 +227,8 @@ restart:
                        spin_lock(&journal->j_list_lock);
                        goto restart;
                }
+               get_bh(bh);
                if (buffer_locked(bh)) {
-                       atomic_inc(&bh->b_count);
                        spin_unlock(&journal->j_list_lock);
                        jbd_unlock_bh_state(bh);
                        wait_on_buffer(bh);
@@ -243,7 +247,6 @@ restart:
                 */
                released = __jbd2_journal_remove_checkpoint(jh);
                jbd_unlock_bh_state(bh);
-               jbd2_journal_remove_journal_head(bh);
                __brelse(bh);
        }
 
@@ -284,7 +287,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
        int ret = 0;
 
        if (buffer_locked(bh)) {
-               atomic_inc(&bh->b_count);
+               get_bh(bh);
                spin_unlock(&journal->j_list_lock);
                jbd_unlock_bh_state(bh);
                wait_on_buffer(bh);
@@ -316,12 +319,12 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
                ret = 1;
                if (unlikely(buffer_write_io_error(bh)))
                        ret = -EIO;
+               get_bh(bh);
                J_ASSERT_JH(jh, !buffer_jbddirty(bh));
                BUFFER_TRACE(bh, "remove from checkpoint");
                __jbd2_journal_remove_checkpoint(jh);
                spin_unlock(&journal->j_list_lock);
                jbd_unlock_bh_state(bh);
-               jbd2_journal_remove_journal_head(bh);
                __brelse(bh);
        } else {
                /*
@@ -554,7 +557,8 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
 /*
  * journal_clean_one_cp_list
  *
- * Find all the written-back checkpoint buffers in the given list and release them.
+ * Find all the written-back checkpoint buffers in the given list and
+ * release them.
  *
  * Called with the journal locked.
  * Called with j_list_lock held.
@@ -663,8 +667,8 @@ out:
  * checkpoint lists.
  *
  * The function returns 1 if it frees the transaction, 0 otherwise.
+ * The function can free jh and bh.
  *
- * This function is called with the journal locked.
  * This function is called with j_list_lock held.
  * This function is called with jbd_lock_bh_state(jh2bh(jh))
  */
@@ -684,13 +688,14 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
        }
        journal = transaction->t_journal;
 
+       JBUFFER_TRACE(jh, "removing from transaction");
        __buffer_unlink(jh);
        jh->b_cp_transaction = NULL;
+       jbd2_journal_put_journal_head(jh);
 
        if (transaction->t_checkpoint_list != NULL ||
            transaction->t_checkpoint_io_list != NULL)
                goto out;
-       JBUFFER_TRACE(jh, "transaction has no more buffers");
 
        /*
         * There is one special case to worry about: if we have just pulled the
@@ -701,10 +706,8 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
         * The locking here around t_state is a bit sleazy.
         * See the comment at the end of jbd2_journal_commit_transaction().
         */
-       if (transaction->t_state != T_FINISHED) {
-               JBUFFER_TRACE(jh, "belongs to running/committing transaction");
+       if (transaction->t_state != T_FINISHED)
                goto out;
-       }
 
        /* OK, that was the last buffer for the transaction: we can now
           safely remove this transaction from the log */
@@ -723,7 +726,6 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
        wake_up(&journal->j_wait_logspace);
        ret = 1;
 out:
-       JBUFFER_TRACE(jh, "exit");
        return ret;
 }
 
@@ -742,6 +744,8 @@ void __jbd2_journal_insert_checkpoint(struct journal_head *jh,
        J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh)));
        J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
 
+       /* Get reference for checkpointing transaction */
+       jbd2_journal_grab_journal_head(jh2bh(jh));
        jh->b_cp_transaction = transaction;
 
        if (!transaction->t_checkpoint_list) {
index 7f21cf3aaf92e66d892259289cae31553423312b..eef6979821a4c8db91f48854c46f81c291ef4c27 100644 (file)
@@ -848,10 +848,16 @@ restart_loop:
        while (commit_transaction->t_forget) {
                transaction_t *cp_transaction;
                struct buffer_head *bh;
+               int try_to_free = 0;
 
                jh = commit_transaction->t_forget;
                spin_unlock(&journal->j_list_lock);
                bh = jh2bh(jh);
+               /*
+                * Get a reference so that bh cannot be freed before we are
+                * done with it.
+                */
+               get_bh(bh);
                jbd_lock_bh_state(bh);
                J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
 
@@ -914,28 +920,27 @@ restart_loop:
                        __jbd2_journal_insert_checkpoint(jh, commit_transaction);
                        if (is_journal_aborted(journal))
                                clear_buffer_jbddirty(bh);
-                       JBUFFER_TRACE(jh, "refile for checkpoint writeback");
-                       __jbd2_journal_refile_buffer(jh);
-                       jbd_unlock_bh_state(bh);
                } else {
                        J_ASSERT_BH(bh, !buffer_dirty(bh));
-                       /* The buffer on BJ_Forget list and not jbddirty means
+                       /*
+                        * The buffer on BJ_Forget list and not jbddirty means
                         * it has been freed by this transaction and hence it
                         * could not have been reallocated until this
                         * transaction has committed. *BUT* it could be
                         * reallocated once we have written all the data to
                         * disk and before we process the buffer on BJ_Forget
-                        * list. */
-                       JBUFFER_TRACE(jh, "refile or unfile freed buffer");
-                       __jbd2_journal_refile_buffer(jh);
-                       if (!jh->b_transaction) {
-                               jbd_unlock_bh_state(bh);
-                                /* needs a brelse */
-                               jbd2_journal_remove_journal_head(bh);
-                               release_buffer_page(bh);
-                       } else
-                               jbd_unlock_bh_state(bh);
+                        * list.
+                        */
+                       if (!jh->b_next_transaction)
+                               try_to_free = 1;
                }
+               JBUFFER_TRACE(jh, "refile or unfile buffer");
+               __jbd2_journal_refile_buffer(jh);
+               jbd_unlock_bh_state(bh);
+               if (try_to_free)
+                       release_buffer_page(bh);        /* Drops bh reference */
+               else
+                       __brelse(bh);
                cond_resched_lock(&journal->j_list_lock);
        }
        spin_unlock(&journal->j_list_lock);
index 9a78269903041934f0896148e1999e8467fd0872..0dfa5b598e68fa3f358f043c442eb39558bf6f52 100644 (file)
@@ -2078,10 +2078,9 @@ static void journal_free_journal_head(struct journal_head *jh)
  * When a buffer has its BH_JBD bit set it is immune from being released by
  * core kernel code, mainly via ->b_count.
  *
- * A journal_head may be detached from its buffer_head when the journal_head's
- * b_transaction, b_cp_transaction and b_next_transaction pointers are NULL.
- * Various places in JBD call jbd2_journal_remove_journal_head() to indicate that the
- * journal_head can be dropped if needed.
+ * A journal_head is detached from its buffer_head when the journal_head's
+ * b_jcount reaches zero. Running transaction (b_transaction) and checkpoint
+ * transaction (b_cp_transaction) hold their references to b_jcount.
  *
  * Various places in the kernel want to attach a journal_head to a buffer_head
  * _before_ attaching the journal_head to a transaction.  To protect the
@@ -2094,17 +2093,16 @@ static void journal_free_journal_head(struct journal_head *jh)
  *     (Attach a journal_head if needed.  Increments b_jcount)
  *     struct journal_head *jh = jbd2_journal_add_journal_head(bh);
  *     ...
+ *      (Get another reference for transaction)
+ *     jbd2_journal_grab_journal_head(bh);
  *     jh->b_transaction = xxx;
+ *     (Put original reference)
  *     jbd2_journal_put_journal_head(jh);
- *
- * Now, the journal_head's b_jcount is zero, but it is safe from being released
- * because it has a non-zero b_transaction.
  */
 
 /*
  * Give a buffer_head a journal_head.
  *
- * Doesn't need the journal lock.
  * May sleep.
  */
 struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh)
@@ -2168,61 +2166,29 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
        struct journal_head *jh = bh2jh(bh);
 
        J_ASSERT_JH(jh, jh->b_jcount >= 0);
-
-       get_bh(bh);
-       if (jh->b_jcount == 0) {
-               if (jh->b_transaction == NULL &&
-                               jh->b_next_transaction == NULL &&
-                               jh->b_cp_transaction == NULL) {
-                       J_ASSERT_JH(jh, jh->b_jlist == BJ_None);
-                       J_ASSERT_BH(bh, buffer_jbd(bh));
-                       J_ASSERT_BH(bh, jh2bh(jh) == bh);
-                       BUFFER_TRACE(bh, "remove journal_head");
-                       if (jh->b_frozen_data) {
-                               printk(KERN_WARNING "%s: freeing "
-                                               "b_frozen_data\n",
-                                               __func__);
-                               jbd2_free(jh->b_frozen_data, bh->b_size);
-                       }
-                       if (jh->b_committed_data) {
-                               printk(KERN_WARNING "%s: freeing "
-                                               "b_committed_data\n",
-                                               __func__);
-                               jbd2_free(jh->b_committed_data, bh->b_size);
-                       }
-                       bh->b_private = NULL;
-                       jh->b_bh = NULL;        /* debug, really */
-                       clear_buffer_jbd(bh);
-                       __brelse(bh);
-                       journal_free_journal_head(jh);
-               } else {
-                       BUFFER_TRACE(bh, "journal_head was locked");
-               }
+       J_ASSERT_JH(jh, jh->b_transaction == NULL);
+       J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
+       J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
+       J_ASSERT_JH(jh, jh->b_jlist == BJ_None);
+       J_ASSERT_BH(bh, buffer_jbd(bh));
+       J_ASSERT_BH(bh, jh2bh(jh) == bh);
+       BUFFER_TRACE(bh, "remove journal_head");
+       if (jh->b_frozen_data) {
+               printk(KERN_WARNING "%s: freeing b_frozen_data\n", __func__);
+               jbd2_free(jh->b_frozen_data, bh->b_size);
        }
+       if (jh->b_committed_data) {
+               printk(KERN_WARNING "%s: freeing b_committed_data\n", __func__);
+               jbd2_free(jh->b_committed_data, bh->b_size);
+       }
+       bh->b_private = NULL;
+       jh->b_bh = NULL;        /* debug, really */
+       clear_buffer_jbd(bh);
+       journal_free_journal_head(jh);
 }
 
 /*
- * jbd2_journal_remove_journal_head(): if the buffer isn't attached to a transaction
- * and has a zero b_jcount then remove and release its journal_head.   If we did
- * see that the buffer is not used by any transaction we also "logically"
- * decrement ->b_count.
- *
- * We in fact take an additional increment on ->b_count as a convenience,
- * because the caller usually wants to do additional things with the bh
- * after calling here.
- * The caller of jbd2_journal_remove_journal_head() *must* run __brelse(bh) at some
- * time.  Once the caller has run __brelse(), the buffer is eligible for
- * reaping by try_to_free_buffers().
- */
-void jbd2_journal_remove_journal_head(struct buffer_head *bh)
-{
-       jbd_lock_bh_journal_head(bh);
-       __journal_remove_journal_head(bh);
-       jbd_unlock_bh_journal_head(bh);
-}
-
-/*
- * Drop a reference on the passed journal_head.  If it fell to zero then try to
+ * Drop a reference on the passed journal_head.  If it fell to zero then
  * release the journal_head from the buffer_head.
  */
 void jbd2_journal_put_journal_head(struct journal_head *jh)
@@ -2232,11 +2198,12 @@ void jbd2_journal_put_journal_head(struct journal_head *jh)
        jbd_lock_bh_journal_head(bh);
        J_ASSERT_JH(jh, jh->b_jcount > 0);
        --jh->b_jcount;
-       if (!jh->b_jcount && !jh->b_transaction) {
+       if (!jh->b_jcount) {
                __journal_remove_journal_head(bh);
+               jbd_unlock_bh_journal_head(bh);
                __brelse(bh);
-       }
-       jbd_unlock_bh_journal_head(bh);
+       } else
+               jbd_unlock_bh_journal_head(bh);
 }
 
 /*
index 3eec82d32fd4c6886fdf823e7d3690ca2e665162..2d7109414cdd6b7a4d21bdb2e738ff20581523a4 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/module.h>
 
 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
+static void __jbd2_journal_unfile_buffer(struct journal_head *jh);
 
 /*
  * jbd2_get_transaction: obtain a new transaction_t object.
@@ -764,7 +765,6 @@ repeat:
        if (!jh->b_transaction) {
                JBUFFER_TRACE(jh, "no transaction");
                J_ASSERT_JH(jh, !jh->b_next_transaction);
-               jh->b_transaction = transaction;
                JBUFFER_TRACE(jh, "file as BJ_Reserved");
                spin_lock(&journal->j_list_lock);
                __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
@@ -814,7 +814,6 @@ out:
  * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
  * @handle: transaction to add buffer modifications to
  * @bh:     bh to be used for metadata writes
- * @credits: variable that will receive credits for the buffer
  *
  * Returns an error code or 0 on success.
  *
@@ -896,8 +895,6 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
                 * committed and so it's safe to clear the dirty bit.
                 */
                clear_buffer_dirty(jh2bh(jh));
-               jh->b_transaction = transaction;
-
                /* first access by this transaction */
                jh->b_modified = 0;
 
@@ -932,7 +929,6 @@ out:
  *     non-rewindable consequences
  * @handle: transaction
  * @bh: buffer to undo
- * @credits: store the number of taken credits here (if not NULL)
  *
  * Sometimes there is a need to distinguish between metadata which has
  * been committed to disk and that which has not.  The ext3fs code uses
@@ -1232,8 +1228,6 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
                        __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
                } else {
                        __jbd2_journal_unfile_buffer(jh);
-                       jbd2_journal_remove_journal_head(bh);
-                       __brelse(bh);
                        if (!buffer_jbd(bh)) {
                                spin_unlock(&journal->j_list_lock);
                                jbd_unlock_bh_state(bh);
@@ -1556,19 +1550,32 @@ void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
                mark_buffer_dirty(bh);  /* Expose it to the VM */
 }
 
-void __jbd2_journal_unfile_buffer(struct journal_head *jh)
+/*
+ * Remove buffer from all transactions.
+ *
+ * Called with bh_state lock and j_list_lock
+ *
+ * jh and bh may be already freed when this function returns.
+ */
+static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
 {
        __jbd2_journal_temp_unlink_buffer(jh);
        jh->b_transaction = NULL;
+       jbd2_journal_put_journal_head(jh);
 }
 
 void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
 {
-       jbd_lock_bh_state(jh2bh(jh));
+       struct buffer_head *bh = jh2bh(jh);
+
+       /* Get reference so that buffer cannot be freed before we unlock it */
+       get_bh(bh);
+       jbd_lock_bh_state(bh);
        spin_lock(&journal->j_list_lock);
        __jbd2_journal_unfile_buffer(jh);
        spin_unlock(&journal->j_list_lock);
-       jbd_unlock_bh_state(jh2bh(jh));
+       jbd_unlock_bh_state(bh);
+       __brelse(bh);
 }
 
 /*
@@ -1595,8 +1602,6 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
                if (jh->b_jlist == BJ_None) {
                        JBUFFER_TRACE(jh, "remove from checkpoint list");
                        __jbd2_journal_remove_checkpoint(jh);
-                       jbd2_journal_remove_journal_head(bh);
-                       __brelse(bh);
                }
        }
        spin_unlock(&journal->j_list_lock);
@@ -1659,7 +1664,6 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal,
                /*
                 * We take our own ref against the journal_head here to avoid
                 * having to add tons of locking around each instance of
-                * jbd2_journal_remove_journal_head() and
                 * jbd2_journal_put_journal_head().
                 */
                jh = jbd2_journal_grab_journal_head(bh);
@@ -1697,10 +1701,9 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
        int may_free = 1;
        struct buffer_head *bh = jh2bh(jh);
 
-       __jbd2_journal_unfile_buffer(jh);
-
        if (jh->b_cp_transaction) {
                JBUFFER_TRACE(jh, "on running+cp transaction");
+               __jbd2_journal_temp_unlink_buffer(jh);
                /*
                 * We don't want to write the buffer anymore, clear the
                 * bit so that we don't confuse checks in
@@ -1711,8 +1714,7 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
                may_free = 0;
        } else {
                JBUFFER_TRACE(jh, "on running transaction");
-               jbd2_journal_remove_journal_head(bh);
-               __brelse(bh);
+               __jbd2_journal_unfile_buffer(jh);
        }
        return may_free;
 }
@@ -1990,6 +1992,8 @@ void __jbd2_journal_file_buffer(struct journal_head *jh,
 
        if (jh->b_transaction)
                __jbd2_journal_temp_unlink_buffer(jh);
+       else
+               jbd2_journal_grab_journal_head(bh);
        jh->b_transaction = transaction;
 
        switch (jlist) {
@@ -2041,9 +2045,10 @@ void jbd2_journal_file_buffer(struct journal_head *jh,
  * already started to be used by a subsequent transaction, refile the
  * buffer on that transaction's metadata list.
  *
- * Called under journal->j_list_lock
- *
+ * Called under j_list_lock
  * Called under jbd_lock_bh_state(jh2bh(jh))
+ *
+ * jh and bh may be already free when this function returns
  */
 void __jbd2_journal_refile_buffer(struct journal_head *jh)
 {
@@ -2067,6 +2072,11 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
 
        was_dirty = test_clear_buffer_jbddirty(bh);
        __jbd2_journal_temp_unlink_buffer(jh);
+       /*
+        * We set b_transaction here because b_next_transaction will inherit
+        * our jh reference and thus __jbd2_journal_file_buffer() must not
+        * take a new one.
+        */
        jh->b_transaction = jh->b_next_transaction;
        jh->b_next_transaction = NULL;
        if (buffer_freed(bh))
@@ -2083,30 +2093,21 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
 }
 
 /*
- * For the unlocked version of this call, also make sure that any
- * hanging journal_head is cleaned up if necessary.
- *
- * __jbd2_journal_refile_buffer is usually called as part of a single locked
- * operation on a buffer_head, in which the caller is probably going to
- * be hooking the journal_head onto other lists.  In that case it is up
- * to the caller to remove the journal_head if necessary.  For the
- * unlocked jbd2_journal_refile_buffer call, the caller isn't going to be
- * doing anything else to the buffer so we need to do the cleanup
- * ourselves to avoid a jh leak.
- *
- * *** The journal_head may be freed by this call! ***
+ * __jbd2_journal_refile_buffer() with necessary locking added. We take our
+ * bh reference so that we can safely unlock bh.
+ *
+ * The jh and bh may be freed by this call.
  */
 void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
 {
        struct buffer_head *bh = jh2bh(jh);
 
+       /* Get reference so that buffer cannot be freed before we unlock it */
+       get_bh(bh);
        jbd_lock_bh_state(bh);
        spin_lock(&journal->j_list_lock);
-
        __jbd2_journal_refile_buffer(jh);
        jbd_unlock_bh_state(bh);
-       jbd2_journal_remove_journal_head(bh);
-
        spin_unlock(&journal->j_list_lock);
        __brelse(bh);
 }
index c5ce6c1d1ff406c567d0b59d0ffe03ed11b8daa5..2f3f531f36064772dfd4e934847dd5c33fd1cbe4 100644 (file)
@@ -66,9 +66,9 @@ static int jfs_open(struct inode *inode, struct file *file)
                struct jfs_inode_info *ji = JFS_IP(inode);
                spin_lock_irq(&ji->ag_lock);
                if (ji->active_ag == -1) {
-                       ji->active_ag = ji->agno;
-                       atomic_inc(
-                           &JFS_SBI(inode->i_sb)->bmap->db_active[ji->agno]);
+                       struct jfs_sb_info *jfs_sb = JFS_SBI(inode->i_sb);
+                       ji->active_ag = BLKTOAG(addressPXD(&ji->ixpxd), jfs_sb);
+                       atomic_inc( &jfs_sb->bmap->db_active[ji->active_ag]);
                }
                spin_unlock_irq(&ji->ag_lock);
        }
index ed53a4740168bc2d7692ea07531663049d991dfe..b78b2f978f043dae0f18c24713532218b3201582 100644 (file)
@@ -397,7 +397,7 @@ int diRead(struct inode *ip)
        release_metapage(mp);
 
        /* set the ag for the inode */
-       JFS_IP(ip)->agno = BLKTOAG(agstart, sbi);
+       JFS_IP(ip)->agstart = agstart;
        JFS_IP(ip)->active_ag = -1;
 
        return (rc);
@@ -901,7 +901,7 @@ int diFree(struct inode *ip)
 
        /* get the allocation group for this ino.
         */
-       agno = JFS_IP(ip)->agno;
+       agno = BLKTOAG(JFS_IP(ip)->agstart, JFS_SBI(ip->i_sb));
 
        /* Lock the AG specific inode map information
         */
@@ -1315,12 +1315,11 @@ int diFree(struct inode *ip)
 static inline void
 diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
 {
-       struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
        struct jfs_inode_info *jfs_ip = JFS_IP(ip);
 
        ip->i_ino = (iagno << L2INOSPERIAG) + ino;
        jfs_ip->ixpxd = iagp->inoext[extno];
-       jfs_ip->agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi);
+       jfs_ip->agstart = le64_to_cpu(iagp->agstart);
        jfs_ip->active_ag = -1;
 }
 
@@ -1379,7 +1378,7 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
         */
 
        /* get the ag number of this iag */
-       agno = JFS_IP(pip)->agno;
+       agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb));
 
        if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) {
                /*
@@ -2921,10 +2920,9 @@ int diExtendFS(struct inode *ipimap, struct inode *ipbmap)
                        continue;
                }
 
-               /* agstart that computes to the same ag is treated as same; */
                agstart = le64_to_cpu(iagp->agstart);
-               /* iagp->agstart = agstart & ~(mp->db_agsize - 1); */
                n = agstart >> mp->db_agl2size;
+               iagp->agstart = cpu_to_le64((s64)n << mp->db_agl2size);
 
                /* compute backed inodes */
                numinos = (EXTSPERIAG - le32_to_cpu(iagp->nfreeexts))
index 1439f119ec830876ded18b7c5bf66b2ed1fdc80d..584a4a1a6e81478a2e212b2f20ec08400159e946 100644 (file)
@@ -50,8 +50,9 @@ struct jfs_inode_info {
        short   btindex;        /* btpage entry index*/
        struct inode *ipimap;   /* inode map                    */
        unsigned long cflag;    /* commit flags         */
+       u64     agstart;        /* agstart of the containing IAG */
        u16     bxflag;         /* xflag of pseudo buffer?      */
-       unchar  agno;           /* ag number                    */
+       unchar  pad;
        signed char active_ag;  /* ag currently allocating from */
        lid_t   blid;           /* lid of pseudo buffer?        */
        lid_t   atlhead;        /* anonymous tlock list head    */
index 8ea5efb5a34e3c5ccc01d24d4764469b20a71ddc..8d0c1c7c08204177e5f6f6127daa5caa7c545757 100644 (file)
@@ -80,7 +80,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
        int log_formatted = 0;
        struct inode *iplist[1];
        struct jfs_superblock *j_sb, *j_sb2;
-       uint old_agsize;
+       s64 old_agsize;
        int agsizechanged = 0;
        struct buffer_head *bh, *bh2;
 
index c88eab55aec95f4ab26427da882fd88c8b3ef512..275ca4749a2ee3280fd0544680c1df4f23baf3d5 100644 (file)
@@ -822,7 +822,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
                goto out;
 
        attr->set_buf[size] = '\0';
-       val = simple_strtol(attr->set_buf, NULL, 0);
+       val = simple_strtoll(attr->set_buf, NULL, 0);
        ret = attr->set(attr->data, val);
        if (ret == 0)
                ret = len; /* on success, claim we got the whole input */
index adb45ec9038cc95530e5d1be50d521a927110ac2..e374050a911c6141342bf5b464bb04fca396ce3e 100644 (file)
@@ -708,7 +708,13 @@ static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
 
        if (task->tk_status < 0) {
                dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
-               goto retry_rebind;
+               switch (task->tk_status) {
+               case -EACCES:
+               case -EIO:
+                       goto die;
+               default:
+                       goto retry_rebind;
+               }
        }
        if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
                rpc_delay(task, NLMCLNT_GRACE_WAIT);
index 0a4f50dfadfbf672ff3bc0bcba0581a3e94b83bb..b286539d547a5a11db6760688fcc8f1a37360ec3 100644 (file)
@@ -160,10 +160,28 @@ EXPORT_SYMBOL_GPL(unlock_flocks);
 
 static struct kmem_cache *filelock_cache __read_mostly;
 
+static void locks_init_lock_always(struct file_lock *fl)
+{
+       fl->fl_next = NULL;
+       fl->fl_fasync = NULL;
+       fl->fl_owner = NULL;
+       fl->fl_pid = 0;
+       fl->fl_nspid = NULL;
+       fl->fl_file = NULL;
+       fl->fl_flags = 0;
+       fl->fl_type = 0;
+       fl->fl_start = fl->fl_end = 0;
+}
+
 /* Allocate an empty lock structure. */
 struct file_lock *locks_alloc_lock(void)
 {
-       return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
+       struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
+
+       if (fl)
+               locks_init_lock_always(fl);
+
+       return fl;
 }
 EXPORT_SYMBOL_GPL(locks_alloc_lock);
 
@@ -200,17 +218,9 @@ void locks_init_lock(struct file_lock *fl)
        INIT_LIST_HEAD(&fl->fl_link);
        INIT_LIST_HEAD(&fl->fl_block);
        init_waitqueue_head(&fl->fl_wait);
-       fl->fl_next = NULL;
-       fl->fl_fasync = NULL;
-       fl->fl_owner = NULL;
-       fl->fl_pid = 0;
-       fl->fl_nspid = NULL;
-       fl->fl_file = NULL;
-       fl->fl_flags = 0;
-       fl->fl_type = 0;
-       fl->fl_start = fl->fl_end = 0;
        fl->fl_ops = NULL;
        fl->fl_lmops = NULL;
+       locks_init_lock_always(fl);
 }
 
 EXPORT_SYMBOL(locks_init_lock);
index 9ed89d1663f839c86b84e133617a47f4d855ee25..1afae26cf2364936d3053801c41f12cbb06f29d9 100644 (file)
@@ -555,13 +555,6 @@ static int logfs_symlink(struct inode *dir, struct dentry *dentry,
        return __logfs_create(dir, dentry, inode, target, destlen);
 }
 
-static int logfs_permission(struct inode *inode, int mask, unsigned int flags)
-{
-       if (flags & IPERM_FLAG_RCU)
-               return -ECHILD;
-       return generic_permission(inode, mask, flags, NULL);
-}
-
 static int logfs_link(struct dentry *old_dentry, struct inode *dir,
                struct dentry *dentry)
 {
@@ -820,7 +813,6 @@ const struct inode_operations logfs_dir_iops = {
        .mknod          = logfs_mknod,
        .rename         = logfs_rename,
        .rmdir          = logfs_rmdir,
-       .permission     = logfs_permission,
        .symlink        = logfs_symlink,
        .unlink         = logfs_unlink,
 };
index 9e425e7e6c8fbdcb2229fb9516480666c2ac0880..14ab8d3f2f0c8f7fc3e829ed26404e53a2420028 100644 (file)
@@ -238,7 +238,8 @@ int generic_permission(struct inode *inode, int mask, unsigned int flags,
 
        /*
         * Read/write DACs are always overridable.
-        * Executable DACs are overridable if at least one exec bit is set.
+        * Executable DACs are overridable for all directories and
+        * for non-directories that have least one exec bit set.
         */
        if (!(mask & MAY_EXEC) || execute_ok(inode))
                if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
@@ -432,6 +433,8 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
                        goto err_parent;
                BUG_ON(nd->inode != parent->d_inode);
        } else {
+               if (dentry->d_parent != parent)
+                       goto err_parent;
                spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
                if (!__d_rcu_to_refcount(dentry, nd->seq))
                        goto err_child;
@@ -939,7 +942,6 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
                 * Don't forget we might have a non-mountpoint managed dentry
                 * that wants to block transit.
                 */
-               *inode = path->dentry->d_inode;
                if (unlikely(managed_dentry_might_block(path->dentry)))
                        return false;
 
@@ -952,6 +954,12 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
                path->mnt = mounted;
                path->dentry = mounted->mnt_root;
                nd->seq = read_seqcount_begin(&path->dentry->d_seq);
+               /*
+                * Update the inode too. We don't need to re-check the
+                * dentry sequence number here after this d_inode read,
+                * because a mount-point is always pinned.
+                */
+               *inode = path->dentry->d_inode;
        }
        return true;
 }
@@ -1011,9 +1019,6 @@ failed:
  * Follow down to the covering mount currently visible to userspace.  At each
  * point, the filesystem owning that dentry may be queried as to whether the
  * caller is permitted to proceed or not.
- *
- * Care must be taken as namespace_sem may be held (indicated by mounting_here
- * being true).
  */
 int follow_down(struct path *path)
 {
index ce153a6b3aecb42ee0567e6087f4e9597a55be76..419119c371bf81d3a5487448a289cf5e3e219314 100644 (file)
@@ -259,12 +259,10 @@ static void nfs_fscache_disable_inode_cookie(struct inode *inode)
                dfprintk(FSCACHE,
                         "NFS: nfsi 0x%p turning cache off\n", NFS_I(inode));
 
-               /* Need to invalidate any mapped pages that were read in before
-                * turning off the cache.
+               /* Need to uncache any pages attached to this inode that
+                * fscache knows about before turning off the cache.
                 */
-               if (inode->i_mapping && inode->i_mapping->nrpages)
-                       invalidate_inode_pages2(inode->i_mapping);
-
+               fscache_uncache_all_inode_pages(NFS_I(inode)->fscache, inode);
                nfs_fscache_zap_inode_cookie(inode);
        }
 }
index 144f2a3c718575cf0b3448118cd881600339e5b6..6f4850deb272857ae5829251d64bf7aabf702130 100644 (file)
@@ -256,7 +256,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
 
        nfs_attr_check_mountpoint(sb, fattr);
 
-       if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0 && (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0)
+       if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) &&
+           !nfs_attr_use_mounted_on_fileid(fattr))
                goto out_no_inode;
        if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0)
                goto out_no_inode;
@@ -1294,7 +1295,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                if (new_isize != cur_isize) {
                        /* Do we perhaps have any outstanding writes, or has
                         * the file grown beyond our last write? */
-                       if (nfsi->npages == 0 || new_isize > cur_isize) {
+                       if ((nfsi->npages == 0 && !test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) ||
+                            new_isize > cur_isize) {
                                i_size_write(inode, new_isize);
                                invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
                        }
index b9056cbe68d6c0f8f6c9db7b22c5563d5fc1f7d7..2a55347a2daa85f66aa24b44d7bb7c93efe1d409 100644 (file)
@@ -45,6 +45,17 @@ static inline void nfs_attr_check_mountpoint(struct super_block *parent, struct
                fattr->valid |= NFS_ATTR_FATTR_MOUNTPOINT;
 }
 
+static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr)
+{
+       if (((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) == 0) ||
+           (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) &&
+            ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0)))
+               return 0;
+
+       fattr->fileid = fattr->mounted_on_fileid;
+       return 1;
+}
+
 struct nfs_clone_mount {
        const struct super_block *sb;
        const struct dentry *dentry;
index 426908809c9738601f00cbf25fd220512f2141c1..f9d03abcd04cd803f7ec69fcd81a84eba2d7f66b 100644 (file)
@@ -30,6 +30,7 @@
  */
 
 #include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
 
 #include "internal.h"
 #include "nfs4filelayout.h"
@@ -397,7 +398,6 @@ filelayout_write_pagelist(struct nfs_write_data *data, int sync)
         * this offset and save the original offset.
         */
        data->args.offset = filelayout_get_dserver_offset(lseg, offset);
-       data->mds_offset = offset;
 
        /* Perform an asynchronous write */
        status = nfs_initiate_write(data, ds->ds_clp->cl_rpcclient,
@@ -552,13 +552,18 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
                __func__, nfl_util, fl->num_fh, fl->first_stripe_index,
                fl->pattern_offset);
 
-       if (!fl->num_fh)
+       /* Note that a zero value for num_fh is legal for STRIPE_SPARSE.
+        * Futher checking is done in filelayout_check_layout */
+       if (fl->num_fh < 0 || fl->num_fh >
+           max(NFS4_PNFS_MAX_STRIPE_CNT, NFS4_PNFS_MAX_MULTI_CNT))
                goto out_err;
 
-       fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
-                              gfp_flags);
-       if (!fl->fh_array)
-               goto out_err;
+       if (fl->num_fh > 0) {
+               fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
+                                      gfp_flags);
+               if (!fl->fh_array)
+                       goto out_err;
+       }
 
        for (i = 0; i < fl->num_fh; i++) {
                /* Do we want to use a mempool here? */
@@ -661,8 +666,9 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
        u64 p_stripe, r_stripe;
        u32 stripe_unit;
 
-       if (!pnfs_generic_pg_test(pgio, prev, req))
-               return 0;
+       if (!pnfs_generic_pg_test(pgio, prev, req) ||
+           !nfs_generic_pg_test(pgio, prev, req))
+               return false;
 
        if (!pgio->pg_lseg)
                return 1;
index d2c4b59c896d3dda94980f789d91601f184cc53d..5879b23e0c99a1b0dfcff8c072bb0432ade497bf 100644 (file)
@@ -2265,12 +2265,14 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
        return nfs4_map_errors(status);
 }
 
+static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
 /*
  * Get locations and (maybe) other attributes of a referral.
  * Note that we'll actually follow the referral later when
  * we detect fsid mismatch in inode revalidation
  */
-static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct nfs_fattr *fattr, struct nfs_fh *fhandle)
+static int nfs4_get_referral(struct inode *dir, const struct qstr *name,
+                            struct nfs_fattr *fattr, struct nfs_fh *fhandle)
 {
        int status = -ENOMEM;
        struct page *page = NULL;
@@ -2288,15 +2290,16 @@ static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct
                goto out;
        /* Make sure server returned a different fsid for the referral */
        if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
-               dprintk("%s: server did not return a different fsid for a referral at %s\n", __func__, name->name);
+               dprintk("%s: server did not return a different fsid for"
+                       " a referral at %s\n", __func__, name->name);
                status = -EIO;
                goto out;
        }
+       /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
+       nfs_fixup_referral_attributes(&locations->fattr);
 
+       /* replace the lookup nfs_fattr with the locations nfs_fattr */
        memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
-       fattr->valid |= NFS_ATTR_FATTR_V4_REFERRAL;
-       if (!fattr->mode)
-               fattr->mode = S_IFDIR;
        memset(fhandle, 0, sizeof(struct nfs_fh));
 out:
        if (page)
@@ -4667,11 +4670,15 @@ static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
        return len;
 }
 
+/*
+ * nfs_fhget will use either the mounted_on_fileid or the fileid
+ */
 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
 {
-       if (!((fattr->valid & NFS_ATTR_FATTR_FILEID) &&
-               (fattr->valid & NFS_ATTR_FATTR_FSID) &&
-               (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)))
+       if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
+              (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
+             (fattr->valid & NFS_ATTR_FATTR_FSID) &&
+             (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)))
                return;
 
        fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
@@ -4686,7 +4693,6 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
        struct nfs_server *server = NFS_SERVER(dir);
        u32 bitmask[2] = {
                [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
-               [1] = FATTR4_WORD1_MOUNTED_ON_FILEID,
        };
        struct nfs4_fs_locations_arg args = {
                .dir_fh = NFS_FH(dir),
@@ -4705,11 +4711,18 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
        int status;
 
        dprintk("%s: start\n", __func__);
+
+       /* Ask for the fileid of the absent filesystem if mounted_on_fileid
+        * is not supported */
+       if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
+               bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
+       else
+               bitmask[0] |= FATTR4_WORD0_FILEID;
+
        nfs_fattr_init(&fs_locations->fattr);
        fs_locations->server = server;
        fs_locations->nlocations = 0;
        status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
-       nfs_fixup_referral_attributes(&fs_locations->fattr);
        dprintk("%s: returned status = %d\n", __func__, status);
        return status;
 }
@@ -5098,7 +5111,6 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
        if (mxresp_sz == 0)
                mxresp_sz = NFS_MAX_FILE_IO_SIZE;
        /* Fore channel attributes */
-       args->fc_attrs.headerpadsz = 0;
        args->fc_attrs.max_rqst_sz = mxrqst_sz;
        args->fc_attrs.max_resp_sz = mxresp_sz;
        args->fc_attrs.max_ops = NFS4_MAX_OPS;
@@ -5111,7 +5123,6 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
                args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
 
        /* Back channel attributes */
-       args->bc_attrs.headerpadsz = 0;
        args->bc_attrs.max_rqst_sz = PAGE_SIZE;
        args->bc_attrs.max_resp_sz = PAGE_SIZE;
        args->bc_attrs.max_resp_sz_cached = 0;
@@ -5131,8 +5142,6 @@ static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args
        struct nfs4_channel_attrs *sent = &args->fc_attrs;
        struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
 
-       if (rcvd->headerpadsz > sent->headerpadsz)
-               return -EINVAL;
        if (rcvd->max_resp_sz > sent->max_resp_sz)
                return -EINVAL;
        /*
@@ -5697,6 +5706,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
 {
        struct nfs4_layoutreturn *lrp = calldata;
        struct nfs_server *server;
+       struct pnfs_layout_hdr *lo = NFS_I(lrp->args.inode)->layout;
 
        dprintk("--> %s\n", __func__);
 
@@ -5708,16 +5718,15 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
                nfs_restart_rpc(task, lrp->clp);
                return;
        }
+       spin_lock(&lo->plh_inode->i_lock);
        if (task->tk_status == 0) {
-               struct pnfs_layout_hdr *lo = NFS_I(lrp->args.inode)->layout;
-
                if (lrp->res.lrs_present) {
-                       spin_lock(&lo->plh_inode->i_lock);
                        pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
-                       spin_unlock(&lo->plh_inode->i_lock);
                } else
                        BUG_ON(!list_empty(&lo->plh_segs));
        }
+       lo->plh_block_lgets--;
+       spin_unlock(&lo->plh_inode->i_lock);
        dprintk("<-- %s\n", __func__);
 }
 
index d869a5e5464b46cf0276e8df3466dcfafd83acda..e6e8f3b9a1dea29908a5179a38293dfbefce0ad8 100644 (file)
@@ -91,7 +91,7 @@ static int nfs4_stat_to_errno(int);
 #define encode_getfh_maxsz      (op_encode_hdr_maxsz)
 #define decode_getfh_maxsz      (op_decode_hdr_maxsz + 1 + \
                                ((3+NFS4_FHSIZE) >> 2))
-#define nfs4_fattr_bitmap_maxsz 3
+#define nfs4_fattr_bitmap_maxsz 4
 #define encode_getattr_maxsz    (op_encode_hdr_maxsz + nfs4_fattr_bitmap_maxsz)
 #define nfs4_name_maxsz                (1 + ((3 + NFS4_MAXNAMLEN) >> 2))
 #define nfs4_path_maxsz                (1 + ((3 + NFS4_MAXPATHLEN) >> 2))
@@ -255,7 +255,7 @@ static int nfs4_stat_to_errno(int);
 #define decode_fs_locations_maxsz \
                                (0)
 #define encode_secinfo_maxsz   (op_encode_hdr_maxsz + nfs4_name_maxsz)
-#define decode_secinfo_maxsz   (op_decode_hdr_maxsz + 4 + (NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN)))
+#define decode_secinfo_maxsz   (op_decode_hdr_maxsz + 1 + ((NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN)) / 4))
 
 #if defined(CONFIG_NFS_V4_1)
 #define NFS4_MAX_MACHINE_NAME_LEN (64)
@@ -1725,7 +1725,7 @@ static void encode_create_session(struct xdr_stream *xdr,
        *p++ = cpu_to_be32(args->flags);                        /*flags */
 
        /* Fore Channel */
-       *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */
+       *p++ = cpu_to_be32(0);                          /* header padding size */
        *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */
        *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */
        *p++ = cpu_to_be32(max_resp_sz_cached);         /* Max resp sz cached */
@@ -1734,7 +1734,7 @@ static void encode_create_session(struct xdr_stream *xdr,
        *p++ = cpu_to_be32(0);                          /* rdmachannel_attrs */
 
        /* Back Channel */
-       *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */
+       *p++ = cpu_to_be32(0);                          /* header padding size */
        *p++ = cpu_to_be32(args->bc_attrs.max_rqst_sz); /* max req size */
        *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz); /* max resp size */
        *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz_cached);  /* Max resp sz cached */
@@ -3098,7 +3098,7 @@ out_overflow:
        return -EIO;
 }
 
-static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap)
+static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap, int32_t *res)
 {
        __be32 *p;
 
@@ -3109,7 +3109,7 @@ static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap)
                if (unlikely(!p))
                        goto out_overflow;
                bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR;
-               return -be32_to_cpup(p);
+               *res = -be32_to_cpup(p);
        }
        return 0;
 out_overflow:
@@ -4070,6 +4070,7 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
        int status;
        umode_t fmode = 0;
        uint32_t type;
+       int32_t err;
 
        status = decode_attr_type(xdr, bitmap, &type);
        if (status < 0)
@@ -4095,13 +4096,12 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
                goto xdr_error;
        fattr->valid |= status;
 
-       status = decode_attr_error(xdr, bitmap);
-       if (status == -NFS4ERR_WRONGSEC) {
-               nfs_fixup_secinfo_attributes(fattr, fh);
-               status = 0;
-       }
+       err = 0;
+       status = decode_attr_error(xdr, bitmap, &err);
        if (status < 0)
                goto xdr_error;
+       if (err == -NFS4ERR_WRONGSEC)
+               nfs_fixup_secinfo_attributes(fattr, fh);
 
        status = decode_attr_filehandle(xdr, bitmap, fh);
        if (status < 0)
@@ -4997,12 +4997,14 @@ static int decode_chan_attrs(struct xdr_stream *xdr,
                             struct nfs4_channel_attrs *attrs)
 {
        __be32 *p;
-       u32 nr_attrs;
+       u32 nr_attrs, val;
 
        p = xdr_inline_decode(xdr, 28);
        if (unlikely(!p))
                goto out_overflow;
-       attrs->headerpadsz = be32_to_cpup(p++);
+       val = be32_to_cpup(p++);        /* headerpadsz */
+       if (val)
+               return -EINVAL;         /* no support for header padding yet */
        attrs->max_rqst_sz = be32_to_cpup(p++);
        attrs->max_resp_sz = be32_to_cpup(p++);
        attrs->max_resp_sz_cached = be32_to_cpup(p++);
index 9cf208df1f2594b0ecbfc61e673e97c32844156a..8ff2ea3f10ef865ecd2b60ee89122afb159f6cdc 100644 (file)
@@ -108,7 +108,6 @@ _dev_list_add(const struct nfs_server *nfss,
                de = n;
        }
 
-       atomic_inc(&de->id_node.ref);
        return de;
 }
 
@@ -1001,6 +1000,9 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
        if (!pnfs_generic_pg_test(pgio, prev, req))
                return false;
 
+       if (pgio->pg_lseg == NULL)
+               return true;
+
        return pgio->pg_count + req->wb_bytes <=
                        OBJIO_LSEG(pgio->pg_lseg)->max_io_size;
 }
index dc3956c0de80df2c6360c97f54e75663b6505924..1d06f8e2adeaec53fd8dc71c4e1a9cae2baef9c3 100644 (file)
@@ -291,7 +291,7 @@ objlayout_read_done(struct objlayout_io_state *state, ssize_t status, bool sync)
        struct nfs_read_data *rdata;
 
        state->status = status;
-       dprintk("%s: Begin status=%ld eof=%d\n", __func__, status, eof);
+       dprintk("%s: Begin status=%zd eof=%d\n", __func__, status, eof);
        rdata = state->rpcdata;
        rdata->task.tk_status = status;
        if (status >= 0) {
index 7913961aff225995fda011973ed048276ae826e9..009855716286bf098eba9ac20a6c04db201d3d06 100644 (file)
@@ -204,7 +204,7 @@ nfs_wait_on_request(struct nfs_page *req)
                        TASK_UNINTERRUPTIBLE);
 }
 
-static bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
+bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
 {
        /*
         * FIXME: ideally we should be able to coalesce all requests
@@ -218,6 +218,7 @@ static bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_p
 
        return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
 }
+EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
 
 /**
  * nfs_pageio_init - initialise a page io descriptor
index 8c1309d852a6f4709a53c4fb6e7470371aaf1ea8..29c0ca7fc347cdf8cebf6bd83960de18942ca4e4 100644 (file)
@@ -634,14 +634,16 @@ _pnfs_return_layout(struct inode *ino)
 
        spin_lock(&ino->i_lock);
        lo = nfsi->layout;
-       if (!lo || !mark_matching_lsegs_invalid(lo, &tmp_list, NULL)) {
+       if (!lo) {
                spin_unlock(&ino->i_lock);
-               dprintk("%s: no layout segments to return\n", __func__);
-               goto out;
+               dprintk("%s: no layout to return\n", __func__);
+               return status;
        }
        stateid = nfsi->layout->plh_stateid;
        /* Reference matched in nfs4_layoutreturn_release */
        get_layout_hdr(lo);
+       mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
+       lo->plh_block_lgets++;
        spin_unlock(&ino->i_lock);
        pnfs_free_lseg_list(&tmp_list);
 
@@ -650,6 +652,9 @@ _pnfs_return_layout(struct inode *ino)
        lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
        if (unlikely(lrp == NULL)) {
                status = -ENOMEM;
+               set_bit(NFS_LAYOUT_RW_FAILED, &lo->plh_flags);
+               set_bit(NFS_LAYOUT_RO_FAILED, &lo->plh_flags);
+               put_layout_hdr(lo);
                goto out;
        }
 
@@ -887,7 +892,7 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo,
                        ret = get_lseg(lseg);
                        break;
                }
-               if (cmp_layout(range, &lseg->pls_range) > 0)
+               if (lseg->pls_range.offset > range->offset)
                        break;
        }
 
@@ -1059,23 +1064,36 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
                gfp_flags = GFP_NOFS;
        }
 
-       if (pgio->pg_count == prev->wb_bytes) {
+       if (pgio->pg_lseg == NULL) {
+               if (pgio->pg_count != prev->wb_bytes)
+                       return true;
                /* This is first coelesce call for a series of nfs_pages */
                pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
                                                   prev->wb_context,
-                                                  req_offset(req),
+                                                  req_offset(prev),
                                                   pgio->pg_count,
                                                   access_type,
                                                   gfp_flags);
-               return true;
+               if (pgio->pg_lseg == NULL)
+                       return true;
        }
 
-       if (pgio->pg_lseg &&
-           req_offset(req) > end_offset(pgio->pg_lseg->pls_range.offset,
-                                        pgio->pg_lseg->pls_range.length))
-               return false;
-
-       return true;
+       /*
+        * Test if a nfs_page is fully contained in the pnfs_layout_range.
+        * Note that this test makes several assumptions:
+        * - that the previous nfs_page in the struct nfs_pageio_descriptor
+        *   is known to lie within the range.
+        *   - that the nfs_page being tested is known to be contiguous with the
+        *   previous nfs_page.
+        *   - Layout ranges are page aligned, so we only have to test the
+        *   start offset of the request.
+        *
+        * Please also note that 'end_offset' is actually the offset of the
+        * first byte that lies outside the pnfs_layout_range. FIXME?
+        *
+        */
+       return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset,
+                                        pgio->pg_lseg->pls_range.length);
 }
 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
 
index 48d0a8e4d06212264469df6b4c21acba110597f0..96bf4e6f45beda6d9646c5b19a48d13ef1ec3f5d 100644 (file)
@@ -186,6 +186,7 @@ int pnfs_ld_read_done(struct nfs_read_data *);
 /* pnfs_dev.c */
 struct nfs4_deviceid_node {
        struct hlist_node               node;
+       struct hlist_node               tmpnode;
        const struct pnfs_layoutdriver_type *ld;
        const struct nfs_client         *nfs_client;
        struct nfs4_deviceid            deviceid;
index c65e133ce9c071d17e26729fe767b23f4271b445..f0f8e1e22f6c945119ff359491dce07b79a0cf69 100644 (file)
@@ -174,6 +174,7 @@ nfs4_init_deviceid_node(struct nfs4_deviceid_node *d,
                        const struct nfs4_deviceid *id)
 {
        INIT_HLIST_NODE(&d->node);
+       INIT_HLIST_NODE(&d->tmpnode);
        d->ld = ld;
        d->nfs_client = nfs_client;
        d->deviceid = *id;
@@ -208,6 +209,7 @@ nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new)
 
        hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
        spin_unlock(&nfs4_deviceid_lock);
+       atomic_inc(&new->ref);
 
        return new;
 }
@@ -238,24 +240,29 @@ static void
 _deviceid_purge_client(const struct nfs_client *clp, long hash)
 {
        struct nfs4_deviceid_node *d;
-       struct hlist_node *n, *next;
+       struct hlist_node *n;
        HLIST_HEAD(tmp);
 
+       spin_lock(&nfs4_deviceid_lock);
        rcu_read_lock();
        hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
                if (d->nfs_client == clp && atomic_read(&d->ref)) {
                        hlist_del_init_rcu(&d->node);
-                       hlist_add_head(&d->node, &tmp);
+                       hlist_add_head(&d->tmpnode, &tmp);
                }
        rcu_read_unlock();
+       spin_unlock(&nfs4_deviceid_lock);
 
        if (hlist_empty(&tmp))
                return;
 
        synchronize_rcu();
-       hlist_for_each_entry_safe(d, n, next, &tmp, node)
+       while (!hlist_empty(&tmp)) {
+               d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
+               hlist_del(&d->tmpnode);
                if (atomic_dec_and_test(&d->ref))
                        d->ld->free_deviceid_node(d);
+       }
 }
 
 void
@@ -263,8 +270,8 @@ nfs4_deviceid_purge_client(const struct nfs_client *clp)
 {
        long h;
 
-       spin_lock(&nfs4_deviceid_lock);
+       if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
+               return;
        for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
                _deviceid_purge_client(clp, h);
-       spin_unlock(&nfs4_deviceid_lock);
 }
index e268e3b23497282f02e7cb9d209f189a886c60a7..727168059684e92b22b0c1015fc229668a4bf07d 100644 (file)
@@ -864,6 +864,8 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
 
        data->args.fh     = NFS_FH(inode);
        data->args.offset = req_offset(req) + offset;
+       /* pnfs_set_layoutcommit needs this */
+       data->mds_offset = data->args.offset;
        data->args.pgbase = req->wb_pgbase + offset;
        data->args.pages  = data->pagevec;
        data->args.count  = count;
index 18b3e8975fe05c2364dd507d50b364b4c13b0dfd..fbb2a5ef5817cfa8c3800c0a1205562ae47549db 100644 (file)
@@ -82,6 +82,7 @@ config NFSD_V4
        select NFSD_V3
        select FS_POSIX_ACL
        select SUNRPC_GSS
+       select CRYPTO
        help
          This option enables support in your system's NFS server for
          version 4 of the NFS protocol (RFC 3530).
index 1f5eae40f34ef4aa57174be24445a39854906e71..2b1449dd2f495b4c6389399b8e29e0da7165d2c2 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/lockd/lockd.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/gss_api.h>
+#include <linux/sunrpc/gss_krb5_enctypes.h>
 
 #include "idmap.h"
 #include "nfsd.h"
@@ -189,18 +190,10 @@ static struct file_operations export_features_operations = {
        .release        = single_release,
 };
 
-#ifdef CONFIG_SUNRPC_GSS
+#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE)
 static int supported_enctypes_show(struct seq_file *m, void *v)
 {
-       struct gss_api_mech *k5mech;
-
-       k5mech = gss_mech_get_by_name("krb5");
-       if (k5mech == NULL)
-               goto out;
-       if (k5mech->gm_upcall_enctypes != NULL)
-               seq_printf(m, k5mech->gm_upcall_enctypes);
-       gss_mech_put(k5mech);
-out:
+       seq_printf(m, KRB5_SUPPORTED_ENCTYPES);
        return 0;
 }
 
@@ -215,7 +208,7 @@ static struct file_operations supported_enctypes_ops = {
        .llseek         = seq_lseek,
        .release        = single_release,
 };
-#endif /* CONFIG_SUNRPC_GSS */
+#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */
 
 extern int nfsd_pool_stats_open(struct inode *inode, struct file *file);
 extern int nfsd_pool_stats_release(struct inode *inode, struct file *file);
@@ -1427,9 +1420,9 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
                [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR},
                [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO},
                [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO},
-#ifdef CONFIG_SUNRPC_GSS
+#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE)
                [NFSD_SupportedEnctypes] = {"supported_krb5_enctypes", &supported_enctypes_ops, S_IRUGO},
-#endif /* CONFIG_SUNRPC_GSS */
+#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */
 #ifdef CONFIG_NFSD_V4
                [NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
                [NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR},
index d5718273bb32f216922c474635ad479c7912064d..fd0acca5370a5da699fe62a5d94d0b6e81f67b78 100644 (file)
@@ -696,7 +696,15 @@ nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *suppor
 }
 #endif /* CONFIG_NFSD_V3 */
 
+static int nfsd_open_break_lease(struct inode *inode, int access)
+{
+       unsigned int mode;
 
+       if (access & NFSD_MAY_NOT_BREAK_LEASE)
+               return 0;
+       mode = (access & NFSD_MAY_WRITE) ? O_WRONLY : O_RDONLY;
+       return break_lease(inode, mode | O_NONBLOCK);
+}
 
 /*
  * Open an existing file or directory.
@@ -744,12 +752,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
        if (!inode->i_fop)
                goto out;
 
-       /*
-        * Check to see if there are any leases on this file.
-        * This may block while leases are broken.
-        */
-       if (!(access & NFSD_MAY_NOT_BREAK_LEASE))
-               host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
+       host_err = nfsd_open_break_lease(inode, access);
        if (host_err) /* NOMEM or WOULDBLOCK */
                goto out_nfserr;
 
@@ -1660,8 +1663,10 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
        if (!dold->d_inode)
                goto out_drop_write;
        host_err = nfsd_break_lease(dold->d_inode);
-       if (host_err)
+       if (host_err) {
+               err = nfserrno(host_err);
                goto out_drop_write;
+       }
        host_err = vfs_link(dold, dirp, dnew);
        if (!host_err) {
                err = nfserrno(commit_metadata(ffhp));
index b954878ad6cef17094d7337f41823bd73fe48e14..b9b45fc2903e6c38f52410a23bbe258032edd872 100644 (file)
@@ -801,12 +801,7 @@ out_err:
 
 int nilfs_permission(struct inode *inode, int mask, unsigned int flags)
 {
-       struct nilfs_root *root;
-
-       if (flags & IPERM_FLAG_RCU)
-               return -ECHILD;
-
-       root = NILFS_I(inode)->i_root;
+       struct nilfs_root *root = NILFS_I(inode)->i_root;
        if ((mask & MAY_WRITE) && root &&
            root->cno != NILFS_CPTREE_CURRENT_CNO)
                return -EROFS; /* snapshot is not writable */
index d738a7e493ddc07ed1b4b1fb7a30198b3d477c5f..2c6d95257a4d5c7a61bf39b895c5ebbd647cc49b 100644 (file)
@@ -4,7 +4,6 @@
  * Released under GPL v2.
  */
 
-#include <linux/version.h>
 #include <linux/module.h>
 #include <linux/fs.h>
 #include <linux/buffer_head.h>
index 14def991d9dd1cc1ac18faa981f135f138ebb13b..fc5bc27676926cccbc27f3349886e02bbc047211 100644 (file)
@@ -2169,11 +2169,7 @@ static const struct file_operations proc_fd_operations = {
  */
 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
 {
-       int rv;
-
-       if (flags & IPERM_FLAG_RCU)
-               return -ECHILD;
-       rv = generic_permission(inode, mask, flags, NULL);
+       int rv = generic_permission(inode, mask, flags, NULL);
        if (rv == 0)
                return 0;
        if (task_pid(current) == proc_pid(inode))
@@ -2712,6 +2708,9 @@ static int do_io_accounting(struct task_struct *task, char *buffer, int whole)
        struct task_io_accounting acct = task->ioac;
        unsigned long flags;
 
+       if (!ptrace_may_access(task, PTRACE_MODE_READ))
+               return -EACCES;
+
        if (whole && lock_task_sighand(task, &flags)) {
                struct task_struct *t = task;
 
@@ -2843,7 +2842,7 @@ static const struct pid_entry tgid_base_stuff[] = {
        REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations),
 #endif
 #ifdef CONFIG_TASK_IO_ACCOUNTING
-       INF("io",       S_IRUGO, proc_tgid_io_accounting),
+       INF("io",       S_IRUSR, proc_tgid_io_accounting),
 #endif
 #ifdef CONFIG_HARDWALL
        INF("hardwall",   S_IRUGO, proc_pid_hardwall),
@@ -3185,7 +3184,7 @@ static const struct pid_entry tid_base_stuff[] = {
        REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
 #endif
 #ifdef CONFIG_TASK_IO_ACCOUNTING
-       INF("io",       S_IRUGO, proc_tid_io_accounting),
+       INF("io",       S_IRUSR, proc_tid_io_accounting),
 #endif
 #ifdef CONFIG_HARDWALL
        INF("hardwall",   S_IRUGO, proc_pid_hardwall),
index f50133c11c2458a223f4575aa3b04e884cafd60d..d167de365a8de0f52ae880e0b88c75750109d981 100644 (file)
@@ -304,9 +304,6 @@ static int proc_sys_permission(struct inode *inode, int mask,unsigned int flags)
        struct ctl_table *table;
        int error;
 
-       if (flags & IPERM_FLAG_RCU)
-               return -ECHILD;
-
        /* Executable files are not allowed under /proc/sys/ */
        if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))
                return -EACCES;
index e8a62f41b458010b6922c763b6644b230b96fac4..d78089690965b3540344ea8fafbd9403fe825cf8 100644 (file)
@@ -954,8 +954,6 @@ static int xattr_mount_check(struct super_block *s)
 
 int reiserfs_permission(struct inode *inode, int mask, unsigned int flags)
 {
-       if (flags & IPERM_FLAG_RCU)
-               return -ECHILD;
        /*
         * We don't do permission checks on the internal objects.
         * Permissions are determined by the "owning" object.
index f0511e8169679fa3872938a2a9e0e9c19998049b..eed99428f1046d6a3dbd6191340d9d70e18b7ce0 100644 (file)
@@ -27,14 +27,18 @@ static unsigned long romfs_get_unmapped_area(struct file *file,
 {
        struct inode *inode = file->f_mapping->host;
        struct mtd_info *mtd = inode->i_sb->s_mtd;
-       unsigned long isize, offset;
+       unsigned long isize, offset, maxpages, lpages;
 
        if (!mtd)
                goto cant_map_directly;
 
+       /* the mapping mustn't extend beyond the EOF */
+       lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
        isize = i_size_read(inode);
        offset = pgoff << PAGE_SHIFT;
-       if (offset > isize || len > isize || offset > isize - len)
+
+       maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       if ((pgoff >= maxpages) || (maxpages - pgoff < lpages))
                return (unsigned long) -EINVAL;
 
        /* we need to call down to the MTD layer to do the actual mapping */
index f67acbdda5e8c13fce54e51f72fe0882f2fa94da..dffeb3795af1d4204f8554447dbb2d2c33992429 100644 (file)
@@ -61,7 +61,9 @@ static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
 
 /*
  * Called when the clock was set to cancel the timers in the cancel
- * list.
+ * list. This will wake up processes waiting on these timers. The
+ * wake-up requires ctx->ticks to be non zero, therefore we increment
+ * it before calling wake_up_locked().
  */
 void timerfd_clock_was_set(void)
 {
@@ -76,6 +78,7 @@ void timerfd_clock_was_set(void)
                spin_lock_irqsave(&ctx->wqh.lock, flags);
                if (ctx->moffs.tv64 != moffs.tv64) {
                        ctx->moffs.tv64 = KTIME_MAX;
+                       ctx->ticks++;
                        wake_up_locked(&ctx->wqh);
                }
                spin_unlock_irqrestore(&ctx->wqh.lock, flags);
index 8c892c2d5300f2d6a44e8faf481b14ecb904d9fe..529be058202938cf1226ec8644401951df8ab032 100644 (file)
@@ -2146,6 +2146,7 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
        if (IS_ERR(sb)) {
                err = PTR_ERR(sb);
                kfree(c);
+               goto out_close;
        }
 
        if (sb->s_root) {
index 29309e25417fdf30209f69d301ea55e4daaed401..b57aab9a1184719a027715d16786a54cdbd43d70 100644 (file)
@@ -56,16 +56,12 @@ static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, stru
 
        lock_ufs(dir->i_sb);
        ino = ufs_inode_by_name(dir, &dentry->d_name);
-       if (ino) {
+       if (ino)
                inode = ufs_iget(dir->i_sb, ino);
-               if (IS_ERR(inode)) {
-                       unlock_ufs(dir->i_sb);
-                       return ERR_CAST(inode);
-               }
-       }
        unlock_ufs(dir->i_sb);
-       d_add(dentry, inode);
-       return NULL;
+       if (IS_ERR(inode))
+               return ERR_CAST(inode);
+       return d_splice_alias(inode, dentry);
 }
 
 /*
index f4213ba1ff853dad53d16d27b6cd713f01784ea7..7f782af286bfa0edd73a125cdcb892339025913a 100644 (file)
@@ -131,19 +131,34 @@ xfs_file_fsync(
 {
        struct inode            *inode = file->f_mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
+       struct xfs_mount        *mp = ip->i_mount;
        struct xfs_trans        *tp;
        int                     error = 0;
        int                     log_flushed = 0;
 
        trace_xfs_file_fsync(ip);
 
-       if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+       if (XFS_FORCED_SHUTDOWN(mp))
                return -XFS_ERROR(EIO);
 
        xfs_iflags_clear(ip, XFS_ITRUNCATED);
 
        xfs_ioend_wait(ip);
 
+       if (mp->m_flags & XFS_MOUNT_BARRIER) {
+               /*
+                * If we have an RT and/or log subvolume we need to make sure
+                * to flush the write cache the device used for file data
+                * first.  This is to ensure newly written file data make
+                * it to disk before logging the new inode size in case of
+                * an extending write.
+                */
+               if (XFS_IS_REALTIME_INODE(ip))
+                       xfs_blkdev_issue_flush(mp->m_rtdev_targp);
+               else if (mp->m_logdev_targp != mp->m_ddev_targp)
+                       xfs_blkdev_issue_flush(mp->m_ddev_targp);
+       }
+
        /*
         * We always need to make sure that the required inode state is safe on
         * disk.  The inode might be clean but we still might need to force the
@@ -175,9 +190,9 @@ xfs_file_fsync(
                 * updates.  The sync transaction will also force the log.
                 */
                xfs_iunlock(ip, XFS_ILOCK_SHARED);
-               tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS);
+               tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
                error = xfs_trans_reserve(tp, 0,
-                               XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0);
+                               XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
                if (error) {
                        xfs_trans_cancel(tp, 0);
                        return -error;
@@ -209,28 +224,25 @@ xfs_file_fsync(
                 * force the log.
                 */
                if (xfs_ipincount(ip)) {
-                       error = _xfs_log_force_lsn(ip->i_mount,
+                       error = _xfs_log_force_lsn(mp,
                                        ip->i_itemp->ili_last_lsn,
                                        XFS_LOG_SYNC, &log_flushed);
                }
                xfs_iunlock(ip, XFS_ILOCK_SHARED);
        }
 
-       if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) {
-               /*
-                * If the log write didn't issue an ordered tag we need
-                * to flush the disk cache for the data device now.
-                */
-               if (!log_flushed)
-                       xfs_blkdev_issue_flush(ip->i_mount->m_ddev_targp);
-
-               /*
-                * If this inode is on the RT dev we need to flush that
-                * cache as well.
-                */
-               if (XFS_IS_REALTIME_INODE(ip))
-                       xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp);
-       }
+       /*
+        * If we only have a single device, and the log force about was
+        * a no-op we might have to flush the data device cache here.
+        * This can only happen for fdatasync/O_DSYNC if we were overwriting
+        * an already allocated file and thus do not have any metadata to
+        * commit.
+        */
+       if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
+           mp->m_logdev_targp == mp->m_ddev_targp &&
+           !XFS_IS_REALTIME_INODE(ip) &&
+           !log_flushed)
+               xfs_blkdev_issue_flush(mp->m_ddev_targp);
 
        return -error;
 }
index dd21784525a8096ef76f6d6cd14c80a319918839..d44d92cd12b17c7645156b4754c39ea29b5b10e5 100644 (file)
@@ -182,7 +182,7 @@ xfs_vn_mknod(
        if (IS_POSIXACL(dir)) {
                default_acl = xfs_get_acl(dir, ACL_TYPE_DEFAULT);
                if (IS_ERR(default_acl))
-                       return -PTR_ERR(default_acl);
+                       return PTR_ERR(default_acl);
 
                if (!default_acl)
                        mode &= ~current_umask();
index 1e3a7ce804dce2feb1956036eec54ac7ed27da36..a1a881e68a9aa86a1aa76c27931e02202a57a08d 100644 (file)
@@ -627,68 +627,6 @@ xfs_blkdev_put(
                blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 }
 
-/*
- * Try to write out the superblock using barriers.
- */
-STATIC int
-xfs_barrier_test(
-       xfs_mount_t     *mp)
-{
-       xfs_buf_t       *sbp = xfs_getsb(mp, 0);
-       int             error;
-
-       XFS_BUF_UNDONE(sbp);
-       XFS_BUF_UNREAD(sbp);
-       XFS_BUF_UNDELAYWRITE(sbp);
-       XFS_BUF_WRITE(sbp);
-       XFS_BUF_UNASYNC(sbp);
-       XFS_BUF_ORDERED(sbp);
-
-       xfsbdstrat(mp, sbp);
-       error = xfs_buf_iowait(sbp);
-
-       /*
-        * Clear all the flags we set and possible error state in the
-        * buffer.  We only did the write to try out whether barriers
-        * worked and shouldn't leave any traces in the superblock
-        * buffer.
-        */
-       XFS_BUF_DONE(sbp);
-       XFS_BUF_ERROR(sbp, 0);
-       XFS_BUF_UNORDERED(sbp);
-
-       xfs_buf_relse(sbp);
-       return error;
-}
-
-STATIC void
-xfs_mountfs_check_barriers(xfs_mount_t *mp)
-{
-       int error;
-
-       if (mp->m_logdev_targp != mp->m_ddev_targp) {
-               xfs_notice(mp,
-                 "Disabling barriers, not supported with external log device");
-               mp->m_flags &= ~XFS_MOUNT_BARRIER;
-               return;
-       }
-
-       if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
-               xfs_notice(mp,
-                       "Disabling barriers, underlying device is readonly");
-               mp->m_flags &= ~XFS_MOUNT_BARRIER;
-               return;
-       }
-
-       error = xfs_barrier_test(mp);
-       if (error) {
-               xfs_notice(mp,
-                       "Disabling barriers, trial barrier write failed");
-               mp->m_flags &= ~XFS_MOUNT_BARRIER;
-               return;
-       }
-}
-
 void
 xfs_blkdev_issue_flush(
        xfs_buftarg_t           *buftarg)
@@ -1240,14 +1178,6 @@ xfs_fs_remount(
                switch (token) {
                case Opt_barrier:
                        mp->m_flags |= XFS_MOUNT_BARRIER;
-
-                       /*
-                        * Test if barriers are actually working if we can,
-                        * else delay this check until the filesystem is
-                        * marked writeable.
-                        */
-                       if (!(mp->m_flags & XFS_MOUNT_RDONLY))
-                               xfs_mountfs_check_barriers(mp);
                        break;
                case Opt_nobarrier:
                        mp->m_flags &= ~XFS_MOUNT_BARRIER;
@@ -1282,8 +1212,6 @@ xfs_fs_remount(
        /* ro -> rw */
        if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
                mp->m_flags &= ~XFS_MOUNT_RDONLY;
-               if (mp->m_flags & XFS_MOUNT_BARRIER)
-                       xfs_mountfs_check_barriers(mp);
 
                /*
                 * If this is the first remount to writeable state we
@@ -1465,9 +1393,6 @@ xfs_fs_fill_super(
        if (error)
                goto out_free_sb;
 
-       if (mp->m_flags & XFS_MOUNT_BARRIER)
-               xfs_mountfs_check_barriers(mp);
-
        error = xfs_filestream_mount(mp);
        if (error)
                goto out_free_sb;
index c8637537881082a58fb30efbb56abef66d2b26f4..01d2072fb6d4580ca6ceb7f77a20eed67c94cf1c 100644 (file)
@@ -489,6 +489,13 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
        args.total = 0;
        args.whichfork = XFS_ATTR_FORK;
 
+       /*
+        * we have no control over the attribute names that userspace passes us
+        * to remove, so we have to allow the name lookup prior to attribute
+        * removal to fail.
+        */
+       args.op_flags = XFS_DA_OP_OKNOENT;
+
        /*
         * Attach the dquots to the inode.
         */
index cb9b6d1469f7579256061f6de61755c0d6b63df6..3631783b2b5385ee939a61e5cafd5b06ef6b0a45 100644 (file)
@@ -253,16 +253,21 @@ xfs_iget_cache_hit(
                        rcu_read_lock();
                        spin_lock(&ip->i_flags_lock);
 
-                       ip->i_flags &= ~XFS_INEW;
-                       ip->i_flags |= XFS_IRECLAIMABLE;
-                       __xfs_inode_set_reclaim_tag(pag, ip);
+                       ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
+                       ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
                        trace_xfs_iget_reclaim_fail(ip);
                        goto out_error;
                }
 
                spin_lock(&pag->pag_ici_lock);
                spin_lock(&ip->i_flags_lock);
-               ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM);
+
+               /*
+                * Clear the per-lifetime state in the inode as we are now
+                * effectively a new inode and need to return to the initial
+                * state before reuse occurs.
+                */
+               ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
                ip->i_flags |= XFS_INEW;
                __xfs_inode_clear_reclaim_tag(mp, pag, ip);
                inode->i_state = I_NEW;
index 3ae6d58e54739b42b8ff5ff49dfaad7648201add..964cfea776868684afb26f818b8a761ce652b1b5 100644 (file)
@@ -383,6 +383,16 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
 #define XFS_ITRUNCATED         0x0020  /* truncated down so flush-on-close */
 #define XFS_IDIRTY_RELEASE     0x0040  /* dirty release already seen */
 
+/*
+ * Per-lifetime flags need to be reset when re-using a reclaimable inode during
+ * inode lookup. Thi prevents unintended behaviour on the new inode from
+ * ocurring.
+ */
+#define XFS_IRECLAIM_RESET_FLAGS       \
+       (XFS_IRECLAIMABLE | XFS_IRECLAIM | \
+        XFS_IDIRTY_RELEASE | XFS_ITRUNCATED | \
+        XFS_IFILESTREAM);
+
 /*
  * Flags for inode locking.
  * Bit ranges: 1<<1  - 1<<16-1 -- iolock/ilock modes (bitfield)
index 09983a3344a569094a1799dfa32990214cdb38b9..b1e88d56069caea9016cf907e6f4a0fd040b999b 100644 (file)
@@ -681,15 +681,15 @@ xfs_inode_item_unlock(
  * where the cluster buffer may be unpinned before the inode is inserted into
  * the AIL during transaction committed processing. If the buffer is unpinned
  * before the inode item has been committed and inserted, then it is possible
- * for the buffer to be written and IO completions before the inode is inserted
+ * for the buffer to be written and IO completes before the inode is inserted
  * into the AIL. In that case, we'd be inserting a clean, stale inode into the
  * AIL which will never get removed. It will, however, get reclaimed which
  * triggers an assert in xfs_inode_free() complaining about freein an inode
  * still in the AIL.
  *
- * To avoid this, return a lower LSN than the one passed in so that the
- * transaction committed code will not move the inode forward in the AIL but
- * will still unpin it properly.
+ * To avoid this, just unpin the inode directly and return a LSN of -1 so the
+ * transaction committed code knows that it does not need to do any further
+ * processing on the item.
  */
 STATIC xfs_lsn_t
 xfs_inode_item_committed(
@@ -699,8 +699,10 @@ xfs_inode_item_committed(
        struct xfs_inode_log_item *iip = INODE_ITEM(lip);
        struct xfs_inode        *ip = iip->ili_inode;
 
-       if (xfs_iflags_test(ip, XFS_ISTALE))
-               return lsn - 1;
+       if (xfs_iflags_test(ip, XFS_ISTALE)) {
+               xfs_inode_item_unpin(lip, 0);
+               return -1;
+       }
        return lsn;
 }
 
index 211930246f2073f4759a569936b79ab387003e21..41d5b8f2bf92d3fd3fae9773f667a0bf1cb42381 100644 (file)
@@ -1372,8 +1372,17 @@ xlog_sync(xlog_t         *log,
        XFS_BUF_ASYNC(bp);
        bp->b_flags |= XBF_LOG_BUFFER;
 
-       if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
+       if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
+               /*
+                * If we have an external log device, flush the data device
+                * before flushing the log to make sure all meta data
+                * written back from the AIL actually made it to disk
+                * before writing out the new log tail LSN in the log buffer.
+                */
+               if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp)
+                       xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
                XFS_BUF_ORDERED(bp);
+       }
 
        ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
        ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
index 7c7bc2b786bd47d6ec89e31bcf966f6dc5121ec1..c83f63b33aaed62ba6f1bf8b290091e6f5d398e6 100644 (file)
@@ -1361,7 +1361,7 @@ xfs_trans_item_committed(
                lip->li_flags |= XFS_LI_ABORTED;
        item_lsn = IOP_COMMITTED(lip, commit_lsn);
 
-       /* If the committed routine returns -1, item has been freed. */
+       /* item_lsn of -1 means the item needs no further processing */
        if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
                return;
 
@@ -1474,7 +1474,7 @@ xfs_trans_committed_bulk(
                        lip->li_flags |= XFS_LI_ABORTED;
                item_lsn = IOP_COMMITTED(lip, commit_lsn);
 
-               /* item_lsn of -1 means the item was freed */
+               /* item_lsn of -1 means the item needs no further processing */
                if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
                        continue;
 
index b7a5fe7c52c895776f125d53851d40c48795dce7..619720705bc6843e4624ce02c27829fa32256ba9 100644 (file)
@@ -960,8 +960,11 @@ xfs_release(
                 * be exposed to that problem.
                 */
                truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
-               if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
-                       xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
+               if (truncated) {
+                       xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
+                       if (VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
+                               xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
+               }
        }
 
        if (ip->i_d.di_nlink == 0)
index 3a10ef5914eb5ab67dc981e3f6d21ee4cf0b3908..6cd5b6403a7b9f37eac4317807c97003c79cd9ba 100644 (file)
@@ -210,7 +210,7 @@ struct acpi_device_power_state {
 struct acpi_device_power {
        int state;              /* Current state */
        struct acpi_device_power_flags flags;
-       struct acpi_device_power_state states[4];       /* Power states (D0-D3) */
+       struct acpi_device_power_state states[ACPI_D_STATE_COUNT];      /* Power states (D0-D3Cold) */
 };
 
 /* Performance Management */
index a756bc8d866db15af5fbe3c2813e873b48aa48d7..4543b6f75867dfa4e6695e52e764cdd20c8f778d 100644 (file)
@@ -98,8 +98,11 @@ acpi_os_table_override(struct acpi_table_header *existing_table,
 /*
  * Spinlock primitives
  */
+
+#ifndef acpi_os_create_lock
 acpi_status
 acpi_os_create_lock(acpi_spinlock *out_handle);
+#endif
 
 void acpi_os_delete_lock(acpi_spinlock handle);
 
index 5d2a5e9544d9d4743202852cde379a60fb2f7600..2ce1be9f62918c38a52edf3d8424af1e4c48d25a 100644 (file)
@@ -159,6 +159,24 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
        } while (0)
 #endif
 
+/*
+ * When lockdep is enabled, the spin_lock_init() macro stringifies it's
+ * argument and uses that as a name for the lock in debugging.
+ * By executing spin_lock_init() in a macro the key changes from "lock" for
+ * all locks to the name of the argument of acpi_os_create_lock(), which
+ * prevents lockdep from reporting false positives for ACPICA locks.
+ */
+#define acpi_os_create_lock(__handle)                          \
+({                                                             \
+       spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock));        \
+                                                               \
+       if (lock) {                                             \
+               *(__handle) = lock;                             \
+               spin_lock_init(*(__handle));                    \
+       }                                                       \
+       lock ? AE_OK : AE_NO_MEMORY;                            \
+})
+
 #endif /* __KERNEL__ */
 
 #endif /* __ACLINUX_H__ */
index fcdcb5d5c99539871c448411dbdb0fddd2be56b8..d494001b12260a2f56b9337e01226f17dcee3634 100644 (file)
@@ -170,16 +170,6 @@ extern int __gpio_cansleep(unsigned gpio);
 
 extern int __gpio_to_irq(unsigned gpio);
 
-#define GPIOF_DIR_OUT  (0 << 0)
-#define GPIOF_DIR_IN   (1 << 0)
-
-#define GPIOF_INIT_LOW (0 << 1)
-#define GPIOF_INIT_HIGH        (1 << 1)
-
-#define GPIOF_IN               (GPIOF_DIR_IN)
-#define GPIOF_OUT_INIT_LOW     (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
-#define GPIOF_OUT_INIT_HIGH    (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
-
 /**
  * struct gpio - a structure describing a GPIO with configuration
  * @gpio:      the GPIO number
index e08f344c6cffc546660506b37e8659679ac78211..3d53efd25ab906889e081acb8ae10f1065ef180a 100644 (file)
        {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6762, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
index 5479fdc849e9432d9b162c399205c59faf8250c6..514ed45c462eaf0516cbdd9f07d3f96bfada4cc7 100644 (file)
@@ -201,6 +201,9 @@ struct amba_pl011_data {
        bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
        void *dma_rx_param;
        void *dma_tx_param;
+        void (*init) (void);
+       void (*exit) (void);
+       void (*reset) (void);
 };
 #endif
 
index 2a7cea53ca0d3d047617d1fff5f6e3848493a60f..6395692b2e7a9dec9725dcf42dbf244585f7c83e 100644 (file)
@@ -167,7 +167,7 @@ enum rq_flag_bits {
        (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
 #define REQ_COMMON_MASK \
        (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
-        REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
+        REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
 #define REQ_CLONE_MASK         REQ_COMMON_MASK
 
 #define REQ_RAHEAD             (1 << __REQ_RAHEAD)
index b22fb0d3db0f5fb0fa364bbae9e12ee46b8e918a..8c7c2de7631a6fb59ab8f4e86334516a7f07e7d1 100644 (file)
@@ -169,7 +169,8 @@ extern void blk_trace_shutdown(struct request_queue *);
 extern int do_blk_trace_setup(struct request_queue *q, char *name,
                              dev_t dev, struct block_device *bdev,
                              struct blk_user_trace_setup *buts);
-extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
+extern __attribute__((format(printf, 2, 3)))
+void __trace_note_message(struct blk_trace *, const char *fmt, ...);
 
 /**
  * blk_add_trace_msg - Add a (simple) message to the blktrace stream
index d4646b48dc4a7b0074dc7491e7f66186f398e70e..18a1baf31f2d531493526a48132dc38a5f2f0f03 100644 (file)
@@ -188,6 +188,7 @@ struct clocksource {
 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
        /* Watchdog related data, used by the framework */
        struct list_head wd_list;
+       cycle_t cs_last;
        cycle_t wd_last;
 #endif
 } ____cacheline_aligned;
index ddcb7db38e67cc1f822d9cca89c7253a0883a63e..846bb1792572e3cb5cd88eae67e0150a4423d8c2 100644 (file)
@@ -467,6 +467,8 @@ asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
                                      char __user *optval, unsigned int optlen);
 asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg,
                                   unsigned flags);
+asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
+                                   unsigned vlen, unsigned int flags);
 asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
                                   unsigned int flags);
 asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len,
index 7c60d0942adb436b7ddb58d5132d952c0bd65541..f696bccd48cb614a1d459245c6ea8413b0299d0f 100644 (file)
@@ -44,7 +44,7 @@
 #define CN_VAL_DRBD                    0x1
 #define CN_KVP_IDX                     0x9     /* HyperV KVP */
 
-#define CN_NETLINK_USERS               9
+#define CN_NETLINK_USERS               10      /* Highest index + 1 */
 
 /*
  * Maximum connector's message size.
index c66111affca9a6e93f246b66f39ff5e4b998da58..e4f62d8896b7c9adf3cfb745cbf4c79ec5603d4d 100644 (file)
@@ -530,7 +530,6 @@ struct device_dma_parameters {
  * @dma_mem:   Internal for coherent mem override.
  * @archdata:  For arch-specific additions.
  * @of_node:   Associated device tree node.
- * @of_match:  Matching of_device_id from driver.
  * @devt:      For creating the sysfs "dev".
  * @devres_lock: Spinlock to protect the resource of the device.
  * @devres_head: The resources list of the device.
@@ -654,13 +653,13 @@ static inline int device_is_registered(struct device *dev)
 
 static inline void device_enable_async_suspend(struct device *dev)
 {
-       if (!dev->power.in_suspend)
+       if (!dev->power.is_prepared)
                dev->power.async_suspend = true;
 }
 
 static inline void device_disable_async_suspend(struct device *dev)
 {
-       if (!dev->power.in_suspend)
+       if (!dev->power.is_prepared)
                dev->power.async_suspend = false;
 }
 
index 0b0d9c39ed670d10c1ddf7d9863fbbb67cfd356b..7aad1f440867e796ed6f5d2c811771e700e7ad15 100644 (file)
@@ -2,8 +2,16 @@
 #include <linux/fs.h>
 
 #ifdef CONFIG_CGROUP_DEVICE
-extern int devcgroup_inode_permission(struct inode *inode, int mask);
+extern int __devcgroup_inode_permission(struct inode *inode, int mask);
 extern int devcgroup_inode_mknod(int mode, dev_t dev);
+static inline int devcgroup_inode_permission(struct inode *inode, int mask)
+{
+       if (likely(!inode->i_rdev))
+               return 0;
+       if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
+               return 0;
+       return __devcgroup_inode_permission(inode, mask);
+}
 #else
 static inline int devcgroup_inode_permission(struct inode *inode, int mask)
 { return 0; }
index 246f576c981d67c2d5430d036c0f39bab88b12f4..447c36752385a52bc661adcea0cec1ca37ffad7f 100644 (file)
 /* drbdsetup XY resize -d Z
  * you are free to reduce the device size to nothing, if you want to.
  * the upper limit with 64bit kernel, enough ram and flexible meta data
- * is 16 TB, currently. */
+ * is 1 PiB, currently. */
 /* DRBD_MAX_SECTORS */
 #define DRBD_DISK_SIZE_SECT_MIN  0
-#define DRBD_DISK_SIZE_SECT_MAX  (16 * (2LLU << 30))
+#define DRBD_DISK_SIZE_SECT_MAX  (1 * (2LLU << 40))
 #define DRBD_DISK_SIZE_SECT_DEF  0 /* = disabled = no user size... */
 
 #define DRBD_ON_IO_ERROR_DEF EP_PASS_ON
index 1c777878f1ea5d375be6f04f3364ac2fb9d6c8ba..b5b979247863718f10da2764b3eab280a3005e9f 100644 (file)
@@ -639,6 +639,7 @@ struct address_space {
        struct prio_tree_root   i_mmap;         /* tree of private and shared mappings */
        struct list_head        i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
        struct mutex            i_mmap_mutex;   /* protect tree, count, list */
+       /* Protected by tree_lock together with the radix tree */
        unsigned long           nrpages;        /* number of total pages */
        pgoff_t                 writeback_index;/* writeback starts here */
        const struct address_space_operations *a_ops;   /* methods */
@@ -744,7 +745,7 @@ struct inode {
 
        spinlock_t              i_lock; /* i_blocks, i_bytes, maybe i_size */
        unsigned int            i_flags;
-       unsigned int            i_state;
+       unsigned long           i_state;
 #ifdef CONFIG_SECURITY
        void                    *i_security;
 #endif
index 7c4d72f5581f34ec81580edf108810e4319285e5..9ec20dec3353afa83478ef0509fb15309be5fca0 100644 (file)
@@ -204,6 +204,8 @@ extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
 extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
 extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *,
                                         gfp_t);
+extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *,
+                                             struct inode *);
 
 /**
  * fscache_register_netfs - Register a filesystem as desiring caching services
@@ -643,4 +645,23 @@ bool fscache_maybe_release_page(struct fscache_cookie *cookie,
        return false;
 }
 
+/**
+ * fscache_uncache_all_inode_pages - Uncache all an inode's pages
+ * @cookie: The cookie representing the inode's cache object.
+ * @inode: The inode to uncache pages from.
+ *
+ * Uncache all the pages in an inode that are marked PG_fscache, assuming them
+ * to be associated with the given cookie.
+ *
+ * This function may sleep.  It will wait for pages that are being written out
+ * and will wait whilst the PG_fscache mark is removed by the cache.
+ */
+static inline
+void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
+                                    struct inode *inode)
+{
+       if (fscache_cookie_valid(cookie))
+               __fscache_uncache_all_inode_pages(cookie, inode);
+}
+
 #endif /* _LINUX_FSCACHE_H */
index 781d4671415f9c5f25aaa50250a06190ea91f0bb..daa9952d2174ad811cab346e9842d932079dd87c 100644 (file)
  * See mpc8610fb_set_par(), map_video_memory(), and unmap_video_memory()
  */
 #define MEM_ALLOC_THRESHOLD (1024*768*4+32)
-/* Minimum value that the pixel clock can be set to in pico seconds
- * This is determined by platform clock/3 where the minimum platform
- * clock is 533MHz. This gives 5629 pico seconds.
- */
-#define MIN_PIX_CLK 5629
-#define MAX_PIX_CLK 96096
 
 #include <linux/types.h>
 
index 32d47e710661e55879b5473f9850316948f484fe..17b5a0d80e4239cc10fceb670be29dbf4e49a0f0 100644 (file)
@@ -3,6 +3,17 @@
 
 /* see Documentation/gpio.txt */
 
+/* make these flag values available regardless of GPIO kconfig options */
+#define GPIOF_DIR_OUT  (0 << 0)
+#define GPIOF_DIR_IN   (1 << 0)
+
+#define GPIOF_INIT_LOW (0 << 1)
+#define GPIOF_INIT_HIGH        (1 << 1)
+
+#define GPIOF_IN               (GPIOF_DIR_IN)
+#define GPIOF_OUT_INIT_LOW     (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
+#define GPIOF_OUT_INIT_HIGH    (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
+
 #ifdef CONFIG_GENERIC_GPIO
 #include <asm/gpio.h>
 
index 51932e5acf7ce8cd44b57c0a7fe2288013d1967d..fd0dc30c9f154af94155b8c8c47e0a228fbd2573 100644 (file)
@@ -135,6 +135,7 @@ struct hrtimer_sleeper {
  * @cpu_base:          per cpu clock base
  * @index:             clock type index for per_cpu support when moving a
  *                     timer to a base on another cpu.
+ * @clockid:           clock id for per_cpu support
  * @active:            red black tree root node for the active timers
  * @resolution:                the resolution of the clock, in nanoseconds
  * @get_time:          function to retrieve the current time of the clock
index 649dc7f12925d0ac7a9152e7e9f7e79b269b3ec6..5d253cd93691d2bec2a12b34fe2791f61e57a09f 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef __SH_KEYSC_H__
 #define __SH_KEYSC_H__
 
-#define SH_KEYSC_MAXKEYS 49
+#define SH_KEYSC_MAXKEYS 64
 
 struct sh_keysc_info {
        enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3,
index 6c12989839d9093f970b9f90d8dffe44dec6f7a2..f6efed0039edfdb06cbe1430588e51bf2db07aca 100644 (file)
@@ -414,6 +414,7 @@ enum
        TASKLET_SOFTIRQ,
        SCHED_SOFTIRQ,
        HRTIMER_SOFTIRQ,
+       RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
 
        NR_SOFTIRQS
 };
index 8b4538446636da5fca09de70d5e6ea33c5fac28c..baa397eb9c335d5bf65b0858ed527a8f2d8e4801 100644 (file)
@@ -676,7 +676,8 @@ void irq_gc_mask_disable_reg(struct irq_data *d);
 void irq_gc_mask_set_bit(struct irq_data *d);
 void irq_gc_mask_clr_bit(struct irq_data *d);
 void irq_gc_unmask_enable_reg(struct irq_data *d);
-void irq_gc_ack(struct irq_data *d);
+void irq_gc_ack_set_bit(struct irq_data *d);
+void irq_gc_ack_clr_bit(struct irq_data *d);
 void irq_gc_mask_disable_reg_and_ack(struct irq_data *d);
 void irq_gc_eoi(struct irq_data *d);
 int irq_gc_set_wake(struct irq_data *d, unsigned int on);
index 4ecb7b16b278061a280d8f29bdeb7c88eae24979..d087c2e7b2aa0303a22b181cdc617a67e99cff80 100644 (file)
@@ -1024,7 +1024,6 @@ struct journal_s
 
 /* Filing buffers */
 extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
-extern void __jbd2_journal_unfile_buffer(struct journal_head *);
 extern void __jbd2_journal_refile_buffer(struct journal_head *);
 extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
 extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
@@ -1165,7 +1164,6 @@ extern void          jbd2_journal_release_jbd_inode(journal_t *journal, struct jbd2_in
  */
 struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh);
 struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh);
-void jbd2_journal_remove_journal_head(struct buffer_head *bh);
 void jbd2_journal_put_journal_head(struct journal_head *jh);
 
 /*
index d4a5c84c503d7307a577474bc0eb4d75cb7265f3..0da38cf7db7bddc8841d14620a9e831866afcfd2 100644 (file)
@@ -45,7 +45,7 @@ static inline int request_module_nowait(const char *name, ...) { return -ENOSYS;
 #endif
 
 
-struct key;
+struct cred;
 struct file;
 
 enum umh_wait {
@@ -62,7 +62,7 @@ struct subprocess_info {
        char **envp;
        enum umh_wait wait;
        int retval;
-       int (*init)(struct subprocess_info *info);
+       int (*init)(struct subprocess_info *info, struct cred *new);
        void (*cleanup)(struct subprocess_info *info);
        void *data;
 };
@@ -73,7 +73,7 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
 
 /* Set various pieces of state into the subprocess_info structure */
 void call_usermodehelper_setfns(struct subprocess_info *info,
-                   int (*init)(struct subprocess_info *info),
+                   int (*init)(struct subprocess_info *info, struct cred *new),
                    void (*cleanup)(struct subprocess_info *info),
                    void *data);
 
@@ -87,7 +87,7 @@ void call_usermodehelper_freeinfo(struct subprocess_info *info);
 static inline int
 call_usermodehelper_fns(char *path, char **argv, char **envp,
                        enum umh_wait wait,
-                       int (*init)(struct subprocess_info *info),
+                       int (*init)(struct subprocess_info *info, struct cred *new),
                        void (*cleanup)(struct subprocess_info *), void *data)
 {
        struct subprocess_info *info;
index e1e3b2b84f85dbb0e6d6e803e99026edaa8d58bf..935699b30b7c0c5266282fa6fe2b45968e828cb0 100644 (file)
@@ -20,6 +20,8 @@
 #include <linux/compiler.h>
 #include <linux/mutex.h>
 
+#define MIN_MEMORY_BLOCK_SIZE     (1 << SECTION_SIZE_BITS)
+
 struct memory_block {
        unsigned long start_section_nr;
        unsigned long end_section_nr;
index be469a357cbbcf3cbbe774e20857bc068cd44c01..38a372a0e2854fed3f06b790f4f591ca052661c7 100644 (file)
@@ -3,4 +3,11 @@
 struct ds1wm_driver_data {
        int active_high;
        int clock_rate;
+       /* in milliseconds, the amount of time to */
+       /* sleep following a reset pulse. Zero    */
+       /* should work if your bus devices recover*/
+       /* time respects the 1-wire spec since the*/
+       /* ds1wm implements the precise timings of*/
+       /* a reset pulse/presence detect sequence.*/
+       unsigned int reset_recover_delay;
 };
index c6927a4d157fd3cfb9f4a963065353c730364b99..6ad43554ac0521293772db9025261a854ca82593 100644 (file)
@@ -64,6 +64,19 @@ struct mmc_ext_csd {
        unsigned long long      enhanced_area_offset;   /* Units: Byte */
        unsigned int            enhanced_area_size;     /* Units: KB */
        unsigned int            boot_size;              /* in bytes */
+       u8                      raw_partition_support;  /* 160 */
+       u8                      raw_erased_mem_count;   /* 181 */
+       u8                      raw_ext_csd_structure;  /* 194 */
+       u8                      raw_card_type;          /* 196 */
+       u8                      raw_s_a_timeout;                /* 217 */
+       u8                      raw_hc_erase_gap_size;  /* 221 */
+       u8                      raw_erase_timeout_mult; /* 223 */
+       u8                      raw_hc_erase_grp_size;  /* 224 */
+       u8                      raw_sec_trim_mult;      /* 229 */
+       u8                      raw_sec_erase_mult;     /* 230 */
+       u8                      raw_sec_feature_support;/* 231 */
+       u8                      raw_trim_mult;          /* 232 */
+       u8                      raw_sectors[4];         /* 212 - 4 bytes */
 };
 
 struct sd_scr {
index c928dac6cad0c3b1022ebdca9d53ecef1865b797..9f7c3ebcbbad9d7d985c1d41a5a50adf8463854f 100644 (file)
@@ -647,6 +647,13 @@ typedef struct pglist_data {
 #endif
 #define nid_page_nr(nid, pagenr)       pgdat_page_nr(NODE_DATA(nid),(pagenr))
 
+#define node_start_pfn(nid)    (NODE_DATA(nid)->node_start_pfn)
+
+#define node_end_pfn(nid) ({\
+       pg_data_t *__pgdat = NODE_DATA(nid);\
+       __pgdat->node_start_pfn + __pgdat->node_spanned_pages;\
+})
+
 #include <linux/memory_hotplug.h>
 
 extern struct mutex zonelists_mutex;
index 54b8b4d7b68f1a2a42f68de4128f563f9f86eb4e..9e19477991ad87e8f1b0421742a4243131755495 100644 (file)
@@ -1097,12 +1097,6 @@ struct net_device {
 #define NETIF_F_ALL_FCOE       (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
                                 NETIF_F_FSO)
 
-#define NETIF_F_ALL_TX_OFFLOADS        (NETIF_F_ALL_CSUM | NETIF_F_SG | \
-                                NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
-                                NETIF_F_HIGHDMA | \
-                                NETIF_F_SCTP_CSUM | \
-                                NETIF_F_ALL_FCOE)
-
        /*
         * If one device supports one of these features, then enable them
         * for all in netdev_increment_features.
index 3a34e80ae92fbf3d3920d172898ef3123c43708a..25311b3bedf855574eb79170ea7b25d98962b0d6 100644 (file)
@@ -92,6 +92,9 @@ extern        int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
                                   struct nfs_page *);
 extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc);
 extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t);
+extern bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
+                               struct nfs_page *prev,
+                               struct nfs_page *req);
 extern  int nfs_wait_on_request(struct nfs_page *);
 extern void nfs_unlock_request(struct nfs_page *req);
 extern int nfs_set_page_tag_locked(struct nfs_page *req);
index 5e8444a11adf0a6cb7a545657df9a47b2376bd0b..00848d86ffb250fe50b7e5090986646aac43aa47 100644 (file)
@@ -158,7 +158,6 @@ struct nfs_seqid;
 
 /* nfs41 sessions channel attributes */
 struct nfs4_channel_attrs {
-       u32                     headerpadsz;
        u32                     max_rqst_sz;
        u32                     max_resp_sz;
        u32                     max_resp_sz_cached;
index a311008af5e1538c262990459fb2daefe7d8dddb..f8910e15556617e90fa7bb39365c47767e82d451 100644 (file)
 #define PCI_DEVICE_ID_RICOH_RL5C476    0x0476
 #define PCI_DEVICE_ID_RICOH_RL5C478    0x0478
 #define PCI_DEVICE_ID_RICOH_R5C822     0x0822
+#define PCI_DEVICE_ID_RICOH_R5CE823    0xe823
 #define PCI_DEVICE_ID_RICOH_R5C832     0x0832
 #define PCI_DEVICE_ID_RICOH_R5C843     0x0843
 
index 3160648ccdda1ffe47312d3b2e90b368f36898f3..411e4f4be52b984fa22f9a36502a8c2b79b18b4a 100644 (file)
@@ -425,7 +425,8 @@ struct dev_pm_info {
        pm_message_t            power_state;
        unsigned int            can_wakeup:1;
        unsigned int            async_suspend:1;
-       unsigned int            in_suspend:1;   /* Owned by the PM core */
+       bool                    is_prepared:1;  /* Owned by the PM core */
+       bool                    is_suspended:1; /* Ditto */
        spinlock_t              lock;
 #ifdef CONFIG_PM_SLEEP
        struct list_head        entry;
index a837b20ba190330c23b59ec5a3fb584fc9f6308f..14a6c7b545de5bf3ad51dea50d5d484cfc8bfbb5 100644 (file)
@@ -808,7 +808,7 @@ enum cpu_idle_type {
  * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
  * increased costs.
  */
-#if BITS_PER_LONG > 32
+#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load  */
 # define SCHED_LOAD_RESOLUTION 10
 # define scale_load(w)         ((w) << SCHED_LOAD_RESOLUTION)
 # define scale_load_down(w)    ((w) >> SCHED_LOAD_RESOLUTION)
@@ -844,6 +844,7 @@ enum cpu_idle_type {
 #define SD_SERIALIZE           0x0400  /* Only a single load balancing instance */
 #define SD_ASYM_PACKING                0x0800  /* Place busy groups earlier in the domain */
 #define SD_PREFER_SIBLING      0x1000  /* Prefer to place tasks in a sibling domain */
+#define SD_OVERLAP             0x2000  /* sched_domains of this level overlap */
 
 enum powersavings_balance_level {
        POWERSAVINGS_BALANCE_NONE = 0,  /* No power saving load balance */
@@ -893,16 +894,21 @@ static inline int sd_power_saving_flags(void)
        return 0;
 }
 
-struct sched_group {
-       struct sched_group *next;       /* Must be a circular list */
+struct sched_group_power {
        atomic_t ref;
-
        /*
         * CPU power of this group, SCHED_LOAD_SCALE being max power for a
         * single CPU.
         */
-       unsigned int cpu_power, cpu_power_orig;
+       unsigned int power, power_orig;
+};
+
+struct sched_group {
+       struct sched_group *next;       /* Must be a circular list */
+       atomic_t ref;
+
        unsigned int group_weight;
+       struct sched_group_power *sgp;
 
        /*
         * The CPUs this group covers.
@@ -1254,6 +1260,9 @@ struct task_struct {
 #ifdef CONFIG_PREEMPT_RCU
        int rcu_read_lock_nesting;
        char rcu_read_unlock_special;
+#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU)
+       int rcu_boosted;
+#endif /* #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) */
        struct list_head rcu_node_entry;
 #endif /* #ifdef CONFIG_PREEMPT_RCU */
 #ifdef CONFIG_TREE_PREEMPT_RCU
index 564acd3a71c1a89b0dcc9175b8f3068cf9e086c7..9995c7fc3f60ce08e2b7474fc6cd955ce736e7a5 100644 (file)
@@ -112,11 +112,7 @@ struct sdla_dlci_conf {
    short Tb_max;
 };
 
-#ifndef __KERNEL__
-
-void sdla(void *cfg_info, char *dev, struct frad_conf *conf, int quiet);
-
-#else
+#ifdef __KERNEL__
 
 /* important Z80 window addresses */
 #define SDLA_CONTROL_WND               0xE000
index 2b7fec840517ff00ae46d14b0662bbf85bb4dc62..aa08fa8fd79b9c85bd3adaa68e22631b0f425e97 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/swap.h>
 #include <linux/mempolicy.h>
+#include <linux/pagemap.h>
 #include <linux/percpu_counter.h>
 
 /* inode in-kernel data */
@@ -45,7 +46,27 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
        return container_of(inode, struct shmem_inode_info, vfs_inode);
 }
 
+/*
+ * Functions in mm/shmem.c called directly from elsewhere:
+ */
 extern int init_tmpfs(void);
 extern int shmem_fill_super(struct super_block *sb, void *data, int silent);
+extern struct file *shmem_file_setup(const char *name,
+                                       loff_t size, unsigned long flags);
+extern int shmem_zero_setup(struct vm_area_struct *);
+extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
+extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
+                                       pgoff_t index, gfp_t gfp_mask);
+extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
+extern int shmem_unuse(swp_entry_t entry, struct page *page);
+extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
+                                       struct page **pagep, swp_entry_t *ent);
+
+static inline struct page *shmem_read_mapping_page(
+                               struct address_space *mapping, pgoff_t index)
+{
+       return shmem_read_mapping_page_gfp(mapping, index,
+                                       mapping_gfp_mask(mapping));
+}
 
 #endif
index 7ad824d510a2138bca6b7e3d1ec8aae4c0ac014a..8cc38d3bab0c57a79f68fe53e4915f9393335cba 100644 (file)
@@ -85,12 +85,15 @@ int smp_call_function_any(const struct cpumask *mask,
  * Generic and arch helpers
  */
 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
+void __init call_function_init(void);
 void generic_smp_call_function_single_interrupt(void);
 void generic_smp_call_function_interrupt(void);
 void ipi_call_lock(void);
 void ipi_call_unlock(void);
 void ipi_call_lock_irq(void);
 void ipi_call_unlock_irq(void);
+#else
+static inline void call_function_init(void) { }
 #endif
 
 /*
@@ -134,7 +137,7 @@ static inline void smp_send_reschedule(int cpu) { }
 #define smp_prepare_boot_cpu()                 do {} while (0)
 #define smp_call_function_many(mask, func, info, wait) \
                        (up_smp_call_function(func, info))
-static inline void init_call_single_data(void) { }
+static inline void call_function_init(void) { }
 
 static inline int
 smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
diff --git a/include/linux/sunrpc/gss_krb5_enctypes.h b/include/linux/sunrpc/gss_krb5_enctypes.h
new file mode 100644 (file)
index 0000000..ec6234e
--- /dev/null
@@ -0,0 +1,4 @@
+/*
+ * Dumb way to share this static piece of information with nfsd
+ */
+#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2"
index f73c482ec9c6080cc5201c047546928c53c8b723..fe2d8e6b923b416908220e2ef70bed01dee60897 100644 (file)
@@ -84,7 +84,8 @@ struct rpc_task {
 #endif
        unsigned char           tk_priority : 2,/* Task priority */
                                tk_garb_retry : 2,
-                               tk_cred_retry : 2;
+                               tk_cred_retry : 2,
+                               tk_rebind_retry : 2;
 };
 #define tk_xprt                        tk_client->cl_xprt
 
index e70564647039fdcbdf5516aa9f8a9ed1cffd4d6d..a273468f82858d6af3b61aa35b2db536385cbf05 100644 (file)
@@ -300,16 +300,6 @@ static inline void scan_unevictable_unregister_node(struct node *node)
 extern int kswapd_run(int nid);
 extern void kswapd_stop(int nid);
 
-#ifdef CONFIG_MMU
-/* linux/mm/shmem.c */
-extern int shmem_unuse(swp_entry_t entry, struct page *page);
-#endif /* CONFIG_MMU */
-
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
-                                       struct page **pagep, swp_entry_t *ent);
-#endif
-
 #ifdef CONFIG_SWAP
 /* linux/mm/page_io.c */
 extern int swap_readpage(struct page *);
index 630e702c9511466f46813c0552eed7371450a807..168dd0b1bae236ea3859e1fb384b6cc79766a1da 100644 (file)
@@ -9,7 +9,7 @@
 #ifndef _LINUX_LIRC_DEV_H
 #define _LINUX_LIRC_DEV_H
 
-#define MAX_IRCTL_DEVICES 4
+#define MAX_IRCTL_DEVICES 8
 #define BUFLEN            16
 
 #define mod(n, div) ((n) % (div))
index 2d7e7ca2313db8cff79b262f3ec03921a4abe931..aac2c0e06d5ee78c6795f23efdd99b6aedf4767e 100644 (file)
@@ -2,10 +2,10 @@
  * Driver header for M-5MOLS 8M Pixel camera sensor with ISP
  *
  * Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
  *
  * Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 1562c4ff3a650de87a1dc20581b2a389fd458808..2884e3e69cb15b31442624444111a3585ebc8e9c 100644 (file)
@@ -173,16 +173,20 @@ struct v4l2_subdev_core_ops {
                                 struct v4l2_event_subscription *sub);
 };
 
-/* s_mode: switch the tuner to a specific tuner mode. Replacement of s_radio.
+/* s_radio: v4l device was opened in radio mode.
 
-   s_radio: v4l device was opened in Radio mode, to be replaced by s_mode.
+   g_frequency: freq->type must be filled in. Normally done by video_ioctl2
+       or the bridge driver.
+
+   g_tuner:
+   s_tuner: vt->type must be filled in. Normally done by video_ioctl2 or the
+       bridge driver.
 
    s_type_addr: sets tuner type and its I2C addr.
 
    s_config: sets tda9887 specific stuff, like port1, port2 and qss
  */
 struct v4l2_subdev_tuner_ops {
-       int (*s_mode)(struct v4l2_subdev *sd, enum v4l2_tuner_type);
        int (*s_radio)(struct v4l2_subdev *sd);
        int (*s_frequency)(struct v4l2_subdev *sd, struct v4l2_frequency *freq);
        int (*g_frequency)(struct v4l2_subdev *sd, struct v4l2_frequency *freq);
index 0589f554788aea2dd5dc28c9a372ee10637e3eb6..396e8fc8910e5901a8ae7e3b0c976fb3435b7e2b 100644 (file)
@@ -2688,7 +2688,7 @@ void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf,
  * @dev: network device
  * @addr: The source MAC address of the frame
  * @key_type: The key type that the received frame used
- * @key_id: Key identifier (0..3)
+ * @key_id: Key identifier (0..3). Can be -1 if missing.
  * @tsc: The TSC value of the frame that generated the MIC failure (6 octets)
  * @gfp: allocation flags
  *
index 7d15d238b6ecc4f3d4c47af4389525ead0f7a2c7..e12ddfb9eb1652626b9d8cc1d0c0f62f177895d2 100644 (file)
@@ -77,6 +77,7 @@ struct dst_entry {
 #define DST_NOPOLICY           0x0004
 #define DST_NOHASH             0x0008
 #define DST_NOCACHE            0x0010
+#define DST_NOCOUNT            0x0020
        union {
                struct dst_entry        *next;
                struct rtable __rcu     *rt_next;
index c7c42e7acc31004d10ba3adaa0f42c1e9131b127..5d4f8e586e32e6489da5fb7ff5d1525a5b8fe1b1 100644 (file)
@@ -307,6 +307,12 @@ static inline int nf_ct_is_untracked(const struct nf_conn *ct)
        return test_bit(IPS_UNTRACKED_BIT, &ct->status);
 }
 
+/* Packet is received from loopback */
+static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
+{
+       return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
+}
+
 extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
 extern unsigned int nf_conntrack_htable_size;
 extern unsigned int nf_conntrack_max;
index dd6847e5d6e46264ffe6db00ccb7128ce61483fa..6506458ccd33bbc3df02f76f8ae661c2fafd18d9 100644 (file)
@@ -63,6 +63,7 @@ typedef enum {
        SCTP_CMD_ECN_ECNE,      /* Do delayed ECNE processing. */
        SCTP_CMD_ECN_CWR,       /* Do delayed CWR processing.  */
        SCTP_CMD_TIMER_START,   /* Start a timer.  */
+       SCTP_CMD_TIMER_START_ONCE, /* Start a timer once */
        SCTP_CMD_TIMER_RESTART, /* Restart a timer. */
        SCTP_CMD_TIMER_STOP,    /* Stop a timer. */
        SCTP_CMD_INIT_CHOOSE_TRANSPORT, /* Choose transport for an INIT. */
index 99b027b2adce972e3df9c4b93e9800ae8fa820bf..ca4693b4e09e4bb879c0ef8e483247c913bffb6b 100644 (file)
@@ -80,7 +80,7 @@ static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb)
 
 void sctp_ulpevent_free(struct sctp_ulpevent *);
 int sctp_ulpevent_is_notification(const struct sctp_ulpevent *);
-void sctp_queue_purge_ulpevents(struct sk_buff_head *list);
+unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list);
 
 struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
        const struct sctp_association *asoc,
index f2046e404a610da5c095abfcc88872b5ac1c5c42..c0b938cb4b1ac196448494bfe1f8aed36a13caeb 100644 (file)
@@ -178,7 +178,6 @@ struct sock_common {
   *    @sk_dst_cache: destination cache
   *    @sk_dst_lock: destination cache lock
   *    @sk_policy: flow policy
-  *    @sk_rmem_alloc: receive queue bytes committed
   *    @sk_receive_queue: incoming packets
   *    @sk_wmem_alloc: transmit queue bytes committed
   *    @sk_write_queue: Packet sending queue
index 736eac71d053dd165ae065ded9beb0a11ca00c56..af1b49e982dfc0dcaa34ddbc0042998bddcd6b69 100644 (file)
@@ -99,7 +99,14 @@ struct snd_sb_csp_info {
 /* get CSP information */
 #define SNDRV_SB_CSP_IOCTL_INFO                _IOR('H', 0x10, struct snd_sb_csp_info)
 /* load microcode to CSP */
-#define SNDRV_SB_CSP_IOCTL_LOAD_CODE   _IOW('H', 0x11, struct snd_sb_csp_microcode)
+/* NOTE: struct snd_sb_csp_microcode overflows the max size (13 bits)
+ * defined for some architectures like MIPS, and it leads to build errors.
+ * (x86 and co have 14-bit size, thus it's valid, though.)
+ * As a workaround for skipping the size-limit check, here we don't use the
+ * normal _IOW() macro but _IOC() with the manual argument.
+ */
+#define SNDRV_SB_CSP_IOCTL_LOAD_CODE   \
+       _IOC(_IOC_WRITE, 'H', 0x11, sizeof(struct snd_sb_csp_microcode))
 /* unload microcode from CSP */
 #define SNDRV_SB_CSP_IOCTL_UNLOAD_CODE _IO('H', 0x12)
 /* start CSP */
index f1de3e0c75bcad1da0cbed760272fe5e190bccdc..3a4bd3a3c68d25cb2ccd6896a2750a761668937a 100644 (file)
@@ -248,8 +248,7 @@ typedef int (*hw_write_t)(void *,const char* ,int);
 extern struct snd_ac97_bus_ops soc_ac97_ops;
 
 enum snd_soc_control_type {
-       SND_SOC_CUSTOM = 1,
-       SND_SOC_I2C,
+       SND_SOC_I2C = 1,
        SND_SOC_SPI,
 };
 
index e09592d2f916adfdf041272e3b7a672379f7eea1..5ce2b2f5f524de65de6902a97232886196d7575f 100644 (file)
@@ -26,7 +26,7 @@ TRACE_EVENT(ext4_free_inode,
                __field(        umode_t, mode                   )
                __field(        uid_t,  uid                     )
                __field(        gid_t,  gid                     )
-               __field(        blkcnt_t, blocks                )
+               __field(        __u64, blocks                   )
        ),
 
        TP_fast_assign(
@@ -40,9 +40,8 @@ TRACE_EVENT(ext4_free_inode,
 
        TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 (unsigned long) __entry->ino,
-                 __entry->mode, __entry->uid, __entry->gid,
-                 (unsigned long long) __entry->blocks)
+                 (unsigned long) __entry->ino, __entry->mode,
+                 __entry->uid, __entry->gid, __entry->blocks)
 );
 
 TRACE_EVENT(ext4_request_inode,
@@ -178,7 +177,7 @@ TRACE_EVENT(ext4_begin_ordered_truncate,
        TP_printk("dev %d,%d ino %lu new_size %lld",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 (long long) __entry->new_size)
+                 __entry->new_size)
 );
 
 DECLARE_EVENT_CLASS(ext4__write_begin,
@@ -204,7 +203,7 @@ DECLARE_EVENT_CLASS(ext4__write_begin,
                __entry->flags  = flags;
        ),
 
-       TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u",
+       TP_printk("dev %d,%d ino %lu pos %lld len %u flags %u",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  __entry->pos, __entry->len, __entry->flags)
@@ -248,7 +247,7 @@ DECLARE_EVENT_CLASS(ext4__write_end,
                __entry->copied = copied;
        ),
 
-       TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u",
+       TP_printk("dev %d,%d ino %lu pos %lld len %u copied %u",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  __entry->pos, __entry->len, __entry->copied)
@@ -286,29 +285,6 @@ DEFINE_EVENT(ext4__write_end, ext4_da_write_end,
        TP_ARGS(inode, pos, len, copied)
 );
 
-TRACE_EVENT(ext4_writepage,
-       TP_PROTO(struct inode *inode, struct page *page),
-
-       TP_ARGS(inode, page),
-
-       TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
-               __field(        ino_t,  ino                     )
-               __field(        pgoff_t, index                  )
-
-       ),
-
-       TP_fast_assign(
-               __entry->dev    = inode->i_sb->s_dev;
-               __entry->ino    = inode->i_ino;
-               __entry->index  = page->index;
-       ),
-
-       TP_printk("dev %d,%d ino %lu page_index %lu",
-                 MAJOR(__entry->dev), MINOR(__entry->dev),
-                 (unsigned long) __entry->ino, __entry->index)
-);
-
 TRACE_EVENT(ext4_da_writepages,
        TP_PROTO(struct inode *inode, struct writeback_control *wbc),
 
@@ -341,7 +317,7 @@ TRACE_EVENT(ext4_da_writepages,
        ),
 
        TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld "
-                 "range_start %llu range_end %llu sync_mode %d"
+                 "range_start %lld range_end %lld sync_mode %d"
                  "for_kupdate %d range_cyclic %d writeback_index %lu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino, __entry->nr_to_write,
@@ -449,7 +425,14 @@ DECLARE_EVENT_CLASS(ext4__page_op,
        TP_printk("dev %d,%d ino %lu page_index %lu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 __entry->index)
+                 (unsigned long) __entry->index)
+);
+
+DEFINE_EVENT(ext4__page_op, ext4_writepage,
+
+       TP_PROTO(struct page *page),
+
+       TP_ARGS(page)
 );
 
 DEFINE_EVENT(ext4__page_op, ext4_readpage,
@@ -489,7 +472,7 @@ TRACE_EVENT(ext4_invalidatepage,
        TP_printk("dev %d,%d ino %lu page_index %lu offset %lu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 __entry->index, __entry->offset)
+                 (unsigned long) __entry->index, __entry->offset)
 );
 
 TRACE_EVENT(ext4_discard_blocks,
@@ -562,12 +545,10 @@ DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa,
 );
 
 TRACE_EVENT(ext4_mb_release_inode_pa,
-       TP_PROTO(struct super_block *sb,
-                struct inode *inode,
-                struct ext4_prealloc_space *pa,
+       TP_PROTO(struct ext4_prealloc_space *pa,
                 unsigned long long block, unsigned int count),
 
-       TP_ARGS(sb, inode, pa, block, count),
+       TP_ARGS(pa, block, count),
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
@@ -578,8 +559,8 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
        ),
 
        TP_fast_assign(
-               __entry->dev            = sb->s_dev;
-               __entry->ino            = inode->i_ino;
+               __entry->dev            = pa->pa_inode->i_sb->s_dev;
+               __entry->ino            = pa->pa_inode->i_ino;
                __entry->block          = block;
                __entry->count          = count;
        ),
@@ -591,10 +572,9 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
 );
 
 TRACE_EVENT(ext4_mb_release_group_pa,
-       TP_PROTO(struct super_block *sb,
-                struct ext4_prealloc_space *pa),
+       TP_PROTO(struct ext4_prealloc_space *pa),
 
-       TP_ARGS(sb, pa),
+       TP_ARGS(pa),
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
@@ -604,7 +584,7 @@ TRACE_EVENT(ext4_mb_release_group_pa,
        ),
 
        TP_fast_assign(
-               __entry->dev            = sb->s_dev;
+               __entry->dev            = pa->pa_inode->i_sb->s_dev;
                __entry->pa_pstart      = pa->pa_pstart;
                __entry->pa_len         = pa->pa_len;
        ),
@@ -666,10 +646,10 @@ TRACE_EVENT(ext4_request_blocks,
                __field(        ino_t,  ino                     )
                __field(        unsigned int, flags             )
                __field(        unsigned int, len               )
-               __field(        __u64,  logical                 )
+               __field(        __u32,  logical                 )
+               __field(        __u32,  lleft                   )
+               __field(        __u32,  lright                  )
                __field(        __u64,  goal                    )
-               __field(        __u64,  lleft                   )
-               __field(        __u64,  lright                  )
                __field(        __u64,  pleft                   )
                __field(        __u64,  pright                  )
        ),
@@ -687,17 +667,13 @@ TRACE_EVENT(ext4_request_blocks,
                __entry->pright = ar->pright;
        ),
 
-       TP_printk("dev %d,%d ino %lu flags %u len %u lblk %llu goal %llu "
-                 "lleft %llu lright %llu pleft %llu pright %llu ",
+       TP_printk("dev %d,%d ino %lu flags %u len %u lblk %u goal %llu "
+                 "lleft %u lright %u pleft %llu pright %llu ",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 (unsigned long) __entry->ino,
-                 __entry->flags, __entry->len,
-                 (unsigned long long) __entry->logical,
-                 (unsigned long long) __entry->goal,
-                 (unsigned long long) __entry->lleft,
-                 (unsigned long long) __entry->lright,
-                 (unsigned long long) __entry->pleft,
-                 (unsigned long long) __entry->pright)
+                 (unsigned long) __entry->ino, __entry->flags,
+                 __entry->len, __entry->logical, __entry->goal,
+                 __entry->lleft, __entry->lright, __entry->pleft,
+                 __entry->pright)
 );
 
 TRACE_EVENT(ext4_allocate_blocks,
@@ -711,10 +687,10 @@ TRACE_EVENT(ext4_allocate_blocks,
                __field(        __u64,  block                   )
                __field(        unsigned int, flags             )
                __field(        unsigned int, len               )
-               __field(        __u64,  logical                 )
+               __field(        __u32,  logical                 )
+               __field(        __u32,  lleft                   )
+               __field(        __u32,  lright                  )
                __field(        __u64,  goal                    )
-               __field(        __u64,  lleft                   )
-               __field(        __u64,  lright                  )
                __field(        __u64,  pleft                   )
                __field(        __u64,  pright                  )
        ),
@@ -733,17 +709,13 @@ TRACE_EVENT(ext4_allocate_blocks,
                __entry->pright = ar->pright;
        ),
 
-       TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %llu "
-                 "goal %llu lleft %llu lright %llu pleft %llu pright %llu",
+       TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %u "
+                 "goal %llu lleft %u lright %u pleft %llu pright %llu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 (unsigned long) __entry->ino,
-                 __entry->flags, __entry->len, __entry->block,
-                 (unsigned long long) __entry->logical,
-                 (unsigned long long) __entry->goal,
-                 (unsigned long long) __entry->lleft,
-                 (unsigned long long) __entry->lright,
-                 (unsigned long long) __entry->pleft,
-                 (unsigned long long) __entry->pright)
+                 (unsigned long) __entry->ino, __entry->flags,
+                 __entry->len, __entry->block, __entry->logical,
+                 __entry->goal,  __entry->lleft, __entry->lright,
+                 __entry->pleft, __entry->pright)
 );
 
 TRACE_EVENT(ext4_free_blocks,
@@ -755,10 +727,10 @@ TRACE_EVENT(ext4_free_blocks,
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
                __field(        ino_t,  ino                     )
-               __field(      umode_t, mode                     )
+               __field(        umode_t, mode                   )
                __field(        __u64,  block                   )
                __field(        unsigned long,  count           )
-               __field(         int,   flags                   )
+               __field(        int,    flags                   )
        ),
 
        TP_fast_assign(
@@ -798,7 +770,7 @@ TRACE_EVENT(ext4_sync_file_enter,
                __entry->parent         = dentry->d_parent->d_inode->i_ino;
        ),
 
-       TP_printk("dev %d,%d ino %ld parent %ld datasync %d ",
+       TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  (unsigned long) __entry->parent, __entry->datasync)
@@ -821,7 +793,7 @@ TRACE_EVENT(ext4_sync_file_exit,
                __entry->dev            = inode->i_sb->s_dev;
        ),
 
-       TP_printk("dev %d,%d ino %ld ret %d",
+       TP_printk("dev %d,%d ino %lu ret %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  __entry->ret)
@@ -1005,7 +977,7 @@ DECLARE_EVENT_CLASS(ext4__mballoc,
                __entry->result_len     = len;
        ),
 
-       TP_printk("dev %d,%d inode %lu extent %u/%d/%u ",
+       TP_printk("dev %d,%d inode %lu extent %u/%d/%d ",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  __entry->result_group, __entry->result_start,
@@ -1093,7 +1065,7 @@ TRACE_EVENT(ext4_da_update_reserve_space,
                  "allocated_meta_blocks %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 __entry->mode,  (unsigned long long) __entry->i_blocks,
+                 __entry->mode, __entry->i_blocks,
                  __entry->used_blocks, __entry->reserved_data_blocks,
                  __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
 );
@@ -1127,7 +1099,7 @@ TRACE_EVENT(ext4_da_reserve_space,
                  "reserved_data_blocks %d reserved_meta_blocks %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 __entry->mode, (unsigned long long) __entry->i_blocks,
+                 __entry->mode, __entry->i_blocks,
                  __entry->md_needed, __entry->reserved_data_blocks,
                  __entry->reserved_meta_blocks)
 );
@@ -1164,7 +1136,7 @@ TRACE_EVENT(ext4_da_release_space,
                  "allocated_meta_blocks %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 __entry->mode, (unsigned long long) __entry->i_blocks,
+                 __entry->mode, __entry->i_blocks,
                  __entry->freed_blocks, __entry->reserved_data_blocks,
                  __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
 );
@@ -1239,14 +1211,15 @@ TRACE_EVENT(ext4_direct_IO_enter,
                __entry->rw     = rw;
        ),
 
-       TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d",
+       TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 (unsigned long long) __entry->pos, __entry->len, __entry->rw)
+                 __entry->pos, __entry->len, __entry->rw)
 );
 
 TRACE_EVENT(ext4_direct_IO_exit,
-       TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw, int ret),
+       TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
+                int rw, int ret),
 
        TP_ARGS(inode, offset, len, rw, ret),
 
@@ -1268,10 +1241,10 @@ TRACE_EVENT(ext4_direct_IO_exit,
                __entry->ret    = ret;
        ),
 
-       TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d ret %d",
+       TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d ret %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 (unsigned long long) __entry->pos, __entry->len,
+                 __entry->pos, __entry->len,
                  __entry->rw, __entry->ret)
 );
 
@@ -1296,15 +1269,15 @@ TRACE_EVENT(ext4_fallocate_enter,
                __entry->mode   = mode;
        ),
 
-       TP_printk("dev %d,%d ino %ld pos %llu len %llu mode %d",
+       TP_printk("dev %d,%d ino %lu pos %lld len %lld mode %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 (unsigned long) __entry->ino,
-                 (unsigned long long) __entry->pos,
-                 (unsigned long long) __entry->len, __entry->mode)
+                 (unsigned long) __entry->ino, __entry->pos,
+                 __entry->len, __entry->mode)
 );
 
 TRACE_EVENT(ext4_fallocate_exit,
-       TP_PROTO(struct inode *inode, loff_t offset, unsigned int max_blocks, int ret),
+       TP_PROTO(struct inode *inode, loff_t offset,
+                unsigned int max_blocks, int ret),
 
        TP_ARGS(inode, offset, max_blocks, ret),
 
@@ -1312,7 +1285,7 @@ TRACE_EVENT(ext4_fallocate_exit,
                __field(        ino_t,  ino                     )
                __field(        dev_t,  dev                     )
                __field(        loff_t, pos                     )
-               __field(        unsigned,       blocks          )
+               __field(        unsigned int,   blocks          )
                __field(        int,    ret                     )
        ),
 
@@ -1324,10 +1297,10 @@ TRACE_EVENT(ext4_fallocate_exit,
                __entry->ret    = ret;
        ),
 
-       TP_printk("dev %d,%d ino %ld pos %llu blocks %d ret %d",
+       TP_printk("dev %d,%d ino %lu pos %lld blocks %u ret %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 (unsigned long long) __entry->pos, __entry->blocks,
+                 __entry->pos, __entry->blocks,
                  __entry->ret)
 );
 
@@ -1350,7 +1323,7 @@ TRACE_EVENT(ext4_unlink_enter,
                __entry->dev            = dentry->d_inode->i_sb->s_dev;
        ),
 
-       TP_printk("dev %d,%d ino %ld size %lld parent %ld",
+       TP_printk("dev %d,%d ino %lu size %lld parent %lu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino, __entry->size,
                  (unsigned long) __entry->parent)
@@ -1373,7 +1346,7 @@ TRACE_EVENT(ext4_unlink_exit,
                __entry->ret            = ret;
        ),
 
-       TP_printk("dev %d,%d ino %ld ret %d",
+       TP_printk("dev %d,%d ino %lu ret %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  __entry->ret)
@@ -1387,7 +1360,7 @@ DECLARE_EVENT_CLASS(ext4__truncate,
        TP_STRUCT__entry(
                __field(        ino_t,          ino             )
                __field(        dev_t,          dev             )
-               __field(        blkcnt_t,       blocks          )
+               __field(        __u64,          blocks          )
        ),
 
        TP_fast_assign(
@@ -1396,9 +1369,9 @@ DECLARE_EVENT_CLASS(ext4__truncate,
                __entry->blocks = inode->i_blocks;
        ),
 
-       TP_printk("dev %d,%d ino %lu blocks %lu",
+       TP_printk("dev %d,%d ino %lu blocks %llu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 (unsigned long) __entry->ino, (unsigned long) __entry->blocks)
+                 (unsigned long) __entry->ino, __entry->blocks)
 );
 
 DEFINE_EVENT(ext4__truncate, ext4_truncate_enter,
@@ -1417,7 +1390,7 @@ DEFINE_EVENT(ext4__truncate, ext4_truncate_exit,
 
 DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
        TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
-                unsigned len, unsigned flags),
+                unsigned int len, unsigned int flags),
 
        TP_ARGS(inode, lblk, len, flags),
 
@@ -1425,8 +1398,8 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
                __field(        ino_t,          ino             )
                __field(        dev_t,          dev             )
                __field(        ext4_lblk_t,    lblk            )
-               __field(        unsigned,       len             )
-               __field(        unsigned,       flags           )
+               __field(        unsigned int,   len             )
+               __field(        unsigned int,   flags           )
        ),
 
        TP_fast_assign(
@@ -1440,7 +1413,7 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
        TP_printk("dev %d,%d ino %lu lblk %u len %u flags %u",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 (unsigned) __entry->lblk, __entry->len, __entry->flags)
+                 __entry->lblk, __entry->len, __entry->flags)
 );
 
 DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter,
@@ -1459,7 +1432,7 @@ DEFINE_EVENT(ext4__map_blocks_enter, ext4_ind_map_blocks_enter,
 
 DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
        TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
-                ext4_fsblk_t pblk, unsigned len, int ret),
+                ext4_fsblk_t pblk, unsigned int len, int ret),
 
        TP_ARGS(inode, lblk, pblk, len, ret),
 
@@ -1468,7 +1441,7 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
                __field(        dev_t,          dev             )
                __field(        ext4_lblk_t,    lblk            )
                __field(        ext4_fsblk_t,   pblk            )
-               __field(        unsigned,       len             )
+               __field(        unsigned int,   len             )
                __field(        int,            ret             )
        ),
 
@@ -1484,7 +1457,7 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
        TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u ret %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 (unsigned) __entry->lblk, (unsigned long long) __entry->pblk,
+                 __entry->lblk, __entry->pblk,
                  __entry->len, __entry->ret)
 );
 
@@ -1524,7 +1497,7 @@ TRACE_EVENT(ext4_ext_load_extent,
        TP_printk("dev %d,%d ino %lu lblk %u pblk %llu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 (unsigned) __entry->lblk, (unsigned long long) __entry->pblk)
+                 __entry->lblk, __entry->pblk)
 );
 
 TRACE_EVENT(ext4_load_inode,
index ae045ca7d356033d49c8a9650a618c206503f68c..1c09820df58564f8d0430d996998edc6f8d893c0 100644 (file)
@@ -20,7 +20,8 @@ struct softirq_action;
                         softirq_name(BLOCK_IOPOLL),    \
                         softirq_name(TASKLET),         \
                         softirq_name(SCHED),           \
-                        softirq_name(HRTIMER))
+                        softirq_name(HRTIMER),         \
+                        softirq_name(RCU))
 
 /**
  * irq_handler_entry - called immediately before the irq action handler
index 2568d22a304ecc667b302b0b5d17956ed6d143a8..aae2f40fea4cbea200f0658c7f6afddb60f1d934 100644 (file)
@@ -245,30 +245,32 @@ recalibrate:
 
 void __cpuinit calibrate_delay(void)
 {
+       unsigned long lpj;
        static bool printed;
 
        if (preset_lpj) {
-               loops_per_jiffy = preset_lpj;
+               lpj = preset_lpj;
                if (!printed)
                        pr_info("Calibrating delay loop (skipped) "
                                "preset value.. ");
        } else if ((!printed) && lpj_fine) {
-               loops_per_jiffy = lpj_fine;
+               lpj = lpj_fine;
                pr_info("Calibrating delay loop (skipped), "
                        "value calculated using timer frequency.. ");
-       } else if ((loops_per_jiffy = calibrate_delay_direct()) != 0) {
+       } else if ((lpj = calibrate_delay_direct()) != 0) {
                if (!printed)
                        pr_info("Calibrating delay using timer "
                                "specific routine.. ");
        } else {
                if (!printed)
                        pr_info("Calibrating delay loop... ");
-               loops_per_jiffy = calibrate_delay_converge();
+               lpj = calibrate_delay_converge();
        }
        if (!printed)
                pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
-                       loops_per_jiffy/(500000/HZ),
-                       (loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy);
+                       lpj/(500000/HZ),
+                       (lpj/(5000/HZ)) % 100, lpj);
 
+       loops_per_jiffy = lpj;
        printed = true;
 }
index cafba67c13bf8b57493e49b0501ef4265e76204b..d7211faed2adfb295caf46bbb9c70835f622eabe 100644 (file)
@@ -542,6 +542,7 @@ asmlinkage void __init start_kernel(void)
        timekeeping_init();
        time_init();
        profile_init();
+       call_function_init();
        if (!irqs_disabled())
                printk(KERN_CRIT "start_kernel(): bug: interrupts were "
                                 "enabled early\n");
index 31a9db711906f0a9e13302340c5b38c9753d7a73..3a2cab407b93fd77024c2dff4165034b04ca832f 100644 (file)
@@ -101,10 +101,10 @@ void irq_gc_unmask_enable_reg(struct irq_data *d)
 }
 
 /**
- * irq_gc_ack - Ack pending interrupt
+ * irq_gc_ack_set_bit - Ack pending interrupt via setting bit
  * @d: irq_data
  */
-void irq_gc_ack(struct irq_data *d)
+void irq_gc_ack_set_bit(struct irq_data *d)
 {
        struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
        u32 mask = 1 << (d->irq - gc->irq_base);
@@ -114,6 +114,20 @@ void irq_gc_ack(struct irq_data *d)
        irq_gc_unlock(gc);
 }
 
+/**
+ * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
+ * @d: irq_data
+ */
+void irq_gc_ack_clr_bit(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       u32 mask = ~(1 << (d->irq - gc->irq_base));
+
+       irq_gc_lock(gc);
+       irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack);
+       irq_gc_unlock(gc);
+}
+
 /**
  * irq_gc_mask_disable_reg_and_ack- Mask and ack pending interrupt
  * @d: irq_data
index fa27e750dbc0edb3dd5c502da1b5be38d83d8c8b..a8ce45097f3d21354c7ed8c18e5fcbc4e11792d1 100644 (file)
@@ -375,15 +375,19 @@ int jump_label_text_reserved(void *start, void *end)
 
 static void jump_label_update(struct jump_label_key *key, int enable)
 {
-       struct jump_entry *entry = key->entries;
-
-       /* if there are no users, entry can be NULL */
-       if (entry)
-               __jump_label_update(key, entry, __stop___jump_table, enable);
+       struct jump_entry *entry = key->entries, *stop = __stop___jump_table;
 
 #ifdef CONFIG_MODULES
+       struct module *mod = __module_address((jump_label_t)key);
+
        __jump_label_mod_update(key, enable);
+
+       if (mod)
+               stop = mod->jump_entries + mod->num_jump_entries;
 #endif
+       /* if there are no users, entry can be NULL */
+       if (entry)
+               __jump_label_update(key, entry, stop, enable);
 }
 
 #endif
index ad6a81c58b44e2d5ae57c5dbe866da1cf1008a03..47613dfb7b28c340825493ed36a4c3347a39290d 100644 (file)
@@ -156,12 +156,6 @@ static int ____call_usermodehelper(void *data)
         */
        set_user_nice(current, 0);
 
-       if (sub_info->init) {
-               retval = sub_info->init(sub_info);
-               if (retval)
-                       goto fail;
-       }
-
        retval = -ENOMEM;
        new = prepare_kernel_cred(current);
        if (!new)
@@ -173,6 +167,14 @@ static int ____call_usermodehelper(void *data)
                                             new->cap_inheritable);
        spin_unlock(&umh_sysctl_lock);
 
+       if (sub_info->init) {
+               retval = sub_info->init(sub_info, new);
+               if (retval) {
+                       abort_creds(new);
+                       goto fail;
+               }
+       }
+
        commit_creds(new);
 
        retval = kernel_execve(sub_info->path,
@@ -388,7 +390,7 @@ EXPORT_SYMBOL(call_usermodehelper_setup);
  * context in which call_usermodehelper_exec is called.
  */
 void call_usermodehelper_setfns(struct subprocess_info *info,
-                   int (*init)(struct subprocess_info *info),
+                   int (*init)(struct subprocess_info *info, struct cred *new),
                    void (*cleanup)(struct subprocess_info *info),
                    void *data)
 {
index ace55889f7027b051acbaabdbce71389315418e5..06efa54f93d6686bfc2f218d92149ee56cf9e511 100644 (file)
@@ -1211,7 +1211,11 @@ static void free_unnecessary_pages(void)
                to_free_highmem = alloc_highmem - save;
        } else {
                to_free_highmem = 0;
-               to_free_normal -= save - alloc_highmem;
+               save -= alloc_highmem;
+               if (to_free_normal > save)
+                       to_free_normal -= save;
+               else
+                       to_free_normal = 0;
        }
 
        memory_bm_position_reset(&copy_bm);
index 7d02d33be699f97d4956c76ea71b939bb0832d06..42ddbc6f0de6ffcf2d245dd9c8ec253c2780ffab 100644 (file)
@@ -113,8 +113,10 @@ static int snapshot_open(struct inode *inode, struct file *filp)
                if (error)
                        pm_notifier_call_chain(PM_POST_RESTORE);
        }
-       if (error)
+       if (error) {
+               free_basic_memory_bitmaps();
                atomic_inc(&snapshot_device_available);
+       }
        data->frozen = 0;
        data->ready = 0;
        data->platform_support = 0;
index 89419ff92e996c1e52fade38475ba14604a8f7bd..ba06207b1dd3bf9f9d42bc8800998b80b909c02d 100644 (file)
@@ -84,9 +84,34 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
 
 static struct rcu_state *rcu_state;
 
+/*
+ * The rcu_scheduler_active variable transitions from zero to one just
+ * before the first task is spawned.  So when this variable is zero, RCU
+ * can assume that there is but one task, allowing RCU to (for example)
+ * optimized synchronize_sched() to a simple barrier().  When this variable
+ * is one, RCU must actually do all the hard work required to detect real
+ * grace periods.  This variable is also used to suppress boot-time false
+ * positives from lockdep-RCU error checking.
+ */
 int rcu_scheduler_active __read_mostly;
 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
 
+/*
+ * The rcu_scheduler_fully_active variable transitions from zero to one
+ * during the early_initcall() processing, which is after the scheduler
+ * is capable of creating new tasks.  So RCU processing (for example,
+ * creating tasks for RCU priority boosting) must be delayed until after
+ * rcu_scheduler_fully_active transitions from zero to one.  We also
+ * currently delay invocation of any RCU callbacks until after this point.
+ *
+ * It might later prove better for people registering RCU callbacks during
+ * early boot to take responsibility for these callbacks, but one step at
+ * a time.
+ */
+static int rcu_scheduler_fully_active __read_mostly;
+
+#ifdef CONFIG_RCU_BOOST
+
 /*
  * Control variables for per-CPU and per-rcu_node kthreads.  These
  * handle all flavors of RCU.
@@ -96,10 +121,12 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
 DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
 DEFINE_PER_CPU(char, rcu_cpu_has_work);
-static char rcu_kthreads_spawnable;
+
+#endif /* #ifdef CONFIG_RCU_BOOST */
 
 static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
-static void invoke_rcu_cpu_kthread(void);
+static void invoke_rcu_core(void);
+static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
 
 #define RCU_KTHREAD_PRIO 1     /* RT priority for per-CPU kthreads. */
 
@@ -1088,14 +1115,8 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
        int need_report = 0;
        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
        struct rcu_node *rnp;
-       struct task_struct *t;
 
-       /* Stop the CPU's kthread. */
-       t = per_cpu(rcu_cpu_kthread_task, cpu);
-       if (t != NULL) {
-               per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
-               kthread_stop(t);
-       }
+       rcu_stop_cpu_kthread(cpu);
 
        /* Exclude any attempts to start a new grace period. */
        raw_spin_lock_irqsave(&rsp->onofflock, flags);
@@ -1231,7 +1252,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
 
        /* Re-raise the RCU softirq if there are callbacks remaining. */
        if (cpu_has_callbacks_ready_to_invoke(rdp))
-               invoke_rcu_cpu_kthread();
+               invoke_rcu_core();
 }
 
 /*
@@ -1277,7 +1298,7 @@ void rcu_check_callbacks(int cpu, int user)
        }
        rcu_preempt_check_callbacks(cpu);
        if (rcu_pending(cpu))
-               invoke_rcu_cpu_kthread();
+               invoke_rcu_core();
 }
 
 #ifdef CONFIG_SMP
@@ -1442,13 +1463,14 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
        }
 
        /* If there are callbacks ready, invoke them. */
-       rcu_do_batch(rsp, rdp);
+       if (cpu_has_callbacks_ready_to_invoke(rdp))
+               invoke_rcu_callbacks(rsp, rdp);
 }
 
 /*
  * Do softirq processing for the current CPU.
  */
-static void rcu_process_callbacks(void)
+static void rcu_process_callbacks(struct softirq_action *unused)
 {
        __rcu_process_callbacks(&rcu_sched_state,
                                &__get_cpu_var(rcu_sched_data));
@@ -1465,342 +1487,22 @@ static void rcu_process_callbacks(void)
  * the current CPU with interrupts disabled, the rcu_cpu_kthread_task
  * cannot disappear out from under us.
  */
-static void invoke_rcu_cpu_kthread(void)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-       __this_cpu_write(rcu_cpu_has_work, 1);
-       if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
-               local_irq_restore(flags);
-               return;
-       }
-       wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
-       local_irq_restore(flags);
-}
-
-/*
- * Wake up the specified per-rcu_node-structure kthread.
- * Because the per-rcu_node kthreads are immortal, we don't need
- * to do anything to keep them alive.
- */
-static void invoke_rcu_node_kthread(struct rcu_node *rnp)
-{
-       struct task_struct *t;
-
-       t = rnp->node_kthread_task;
-       if (t != NULL)
-               wake_up_process(t);
-}
-
-/*
- * Set the specified CPU's kthread to run RT or not, as specified by
- * the to_rt argument.  The CPU-hotplug locks are held, so the task
- * is not going away.
- */
-static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
-{
-       int policy;
-       struct sched_param sp;
-       struct task_struct *t;
-
-       t = per_cpu(rcu_cpu_kthread_task, cpu);
-       if (t == NULL)
-               return;
-       if (to_rt) {
-               policy = SCHED_FIFO;
-               sp.sched_priority = RCU_KTHREAD_PRIO;
-       } else {
-               policy = SCHED_NORMAL;
-               sp.sched_priority = 0;
-       }
-       sched_setscheduler_nocheck(t, policy, &sp);
-}
-
-/*
- * Timer handler to initiate the waking up of per-CPU kthreads that
- * have yielded the CPU due to excess numbers of RCU callbacks.
- * We wake up the per-rcu_node kthread, which in turn will wake up
- * the booster kthread.
- */
-static void rcu_cpu_kthread_timer(unsigned long arg)
-{
-       struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
-       struct rcu_node *rnp = rdp->mynode;
-
-       atomic_or(rdp->grpmask, &rnp->wakemask);
-       invoke_rcu_node_kthread(rnp);
-}
-
-/*
- * Drop to non-real-time priority and yield, but only after posting a
- * timer that will cause us to regain our real-time priority if we
- * remain preempted.  Either way, we restore our real-time priority
- * before returning.
- */
-static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
-{
-       struct sched_param sp;
-       struct timer_list yield_timer;
-
-       setup_timer_on_stack(&yield_timer, f, arg);
-       mod_timer(&yield_timer, jiffies + 2);
-       sp.sched_priority = 0;
-       sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
-       set_user_nice(current, 19);
-       schedule();
-       sp.sched_priority = RCU_KTHREAD_PRIO;
-       sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
-       del_timer(&yield_timer);
-}
-
-/*
- * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
- * This can happen while the corresponding CPU is either coming online
- * or going offline.  We cannot wait until the CPU is fully online
- * before starting the kthread, because the various notifier functions
- * can wait for RCU grace periods.  So we park rcu_cpu_kthread() until
- * the corresponding CPU is online.
- *
- * Return 1 if the kthread needs to stop, 0 otherwise.
- *
- * Caller must disable bh.  This function can momentarily enable it.
- */
-static int rcu_cpu_kthread_should_stop(int cpu)
-{
-       while (cpu_is_offline(cpu) ||
-              !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
-              smp_processor_id() != cpu) {
-               if (kthread_should_stop())
-                       return 1;
-               per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
-               per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
-               local_bh_enable();
-               schedule_timeout_uninterruptible(1);
-               if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
-                       set_cpus_allowed_ptr(current, cpumask_of(cpu));
-               local_bh_disable();
-       }
-       per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
-       return 0;
-}
-
-/*
- * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
- * earlier RCU softirq.
- */
-static int rcu_cpu_kthread(void *arg)
-{
-       int cpu = (int)(long)arg;
-       unsigned long flags;
-       int spincnt = 0;
-       unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
-       char work;
-       char *workp = &per_cpu(rcu_cpu_has_work, cpu);
-
-       for (;;) {
-               *statusp = RCU_KTHREAD_WAITING;
-               rcu_wait(*workp != 0 || kthread_should_stop());
-               local_bh_disable();
-               if (rcu_cpu_kthread_should_stop(cpu)) {
-                       local_bh_enable();
-                       break;
-               }
-               *statusp = RCU_KTHREAD_RUNNING;
-               per_cpu(rcu_cpu_kthread_loops, cpu)++;
-               local_irq_save(flags);
-               work = *workp;
-               *workp = 0;
-               local_irq_restore(flags);
-               if (work)
-                       rcu_process_callbacks();
-               local_bh_enable();
-               if (*workp != 0)
-                       spincnt++;
-               else
-                       spincnt = 0;
-               if (spincnt > 10) {
-                       *statusp = RCU_KTHREAD_YIELDING;
-                       rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
-                       spincnt = 0;
-               }
-       }
-       *statusp = RCU_KTHREAD_STOPPED;
-       return 0;
-}
-
-/*
- * Spawn a per-CPU kthread, setting up affinity and priority.
- * Because the CPU hotplug lock is held, no other CPU will be attempting
- * to manipulate rcu_cpu_kthread_task.  There might be another CPU
- * attempting to access it during boot, but the locking in kthread_bind()
- * will enforce sufficient ordering.
- */
-static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
-{
-       struct sched_param sp;
-       struct task_struct *t;
-
-       if (!rcu_kthreads_spawnable ||
-           per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
-               return 0;
-       t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
-       if (IS_ERR(t))
-               return PTR_ERR(t);
-       kthread_bind(t, cpu);
-       per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
-       WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
-       per_cpu(rcu_cpu_kthread_task, cpu) = t;
-       sp.sched_priority = RCU_KTHREAD_PRIO;
-       sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
-       return 0;
-}
-
-/*
- * Per-rcu_node kthread, which is in charge of waking up the per-CPU
- * kthreads when needed.  We ignore requests to wake up kthreads
- * for offline CPUs, which is OK because force_quiescent_state()
- * takes care of this case.
- */
-static int rcu_node_kthread(void *arg)
+static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
 {
-       int cpu;
-       unsigned long flags;
-       unsigned long mask;
-       struct rcu_node *rnp = (struct rcu_node *)arg;
-       struct sched_param sp;
-       struct task_struct *t;
-
-       for (;;) {
-               rnp->node_kthread_status = RCU_KTHREAD_WAITING;
-               rcu_wait(atomic_read(&rnp->wakemask) != 0);
-               rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
-               raw_spin_lock_irqsave(&rnp->lock, flags);
-               mask = atomic_xchg(&rnp->wakemask, 0);
-               rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
-               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
-                       if ((mask & 0x1) == 0)
-                               continue;
-                       preempt_disable();
-                       t = per_cpu(rcu_cpu_kthread_task, cpu);
-                       if (!cpu_online(cpu) || t == NULL) {
-                               preempt_enable();
-                               continue;
-                       }
-                       per_cpu(rcu_cpu_has_work, cpu) = 1;
-                       sp.sched_priority = RCU_KTHREAD_PRIO;
-                       sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
-                       preempt_enable();
-               }
-       }
-       /* NOTREACHED */
-       rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
-       return 0;
-}
-
-/*
- * Set the per-rcu_node kthread's affinity to cover all CPUs that are
- * served by the rcu_node in question.  The CPU hotplug lock is still
- * held, so the value of rnp->qsmaskinit will be stable.
- *
- * We don't include outgoingcpu in the affinity set, use -1 if there is
- * no outgoing CPU.  If there are no CPUs left in the affinity set,
- * this function allows the kthread to execute on any CPU.
- */
-static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
-{
-       cpumask_var_t cm;
-       int cpu;
-       unsigned long mask = rnp->qsmaskinit;
-
-       if (rnp->node_kthread_task == NULL)
+       if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
                return;
-       if (!alloc_cpumask_var(&cm, GFP_KERNEL))
+       if (likely(!rsp->boost)) {
+               rcu_do_batch(rsp, rdp);
                return;
-       cpumask_clear(cm);
-       for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
-               if ((mask & 0x1) && cpu != outgoingcpu)
-                       cpumask_set_cpu(cpu, cm);
-       if (cpumask_weight(cm) == 0) {
-               cpumask_setall(cm);
-               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
-                       cpumask_clear_cpu(cpu, cm);
-               WARN_ON_ONCE(cpumask_weight(cm) == 0);
        }
-       set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
-       rcu_boost_kthread_setaffinity(rnp, cm);
-       free_cpumask_var(cm);
+       invoke_rcu_callbacks_kthread();
 }
 
-/*
- * Spawn a per-rcu_node kthread, setting priority and affinity.
- * Called during boot before online/offline can happen, or, if
- * during runtime, with the main CPU-hotplug locks held.  So only
- * one of these can be executing at a time.
- */
-static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
-                                               struct rcu_node *rnp)
+static void invoke_rcu_core(void)
 {
-       unsigned long flags;
-       int rnp_index = rnp - &rsp->node[0];
-       struct sched_param sp;
-       struct task_struct *t;
-
-       if (!rcu_kthreads_spawnable ||
-           rnp->qsmaskinit == 0)
-               return 0;
-       if (rnp->node_kthread_task == NULL) {
-               t = kthread_create(rcu_node_kthread, (void *)rnp,
-                                  "rcun%d", rnp_index);
-               if (IS_ERR(t))
-                       return PTR_ERR(t);
-               raw_spin_lock_irqsave(&rnp->lock, flags);
-               rnp->node_kthread_task = t;
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
-               sp.sched_priority = 99;
-               sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
-       }
-       return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
+       raise_softirq(RCU_SOFTIRQ);
 }
 
-static void rcu_wake_one_boost_kthread(struct rcu_node *rnp);
-
-/*
- * Spawn all kthreads -- called as soon as the scheduler is running.
- */
-static int __init rcu_spawn_kthreads(void)
-{
-       int cpu;
-       struct rcu_node *rnp;
-       struct task_struct *t;
-
-       rcu_kthreads_spawnable = 1;
-       for_each_possible_cpu(cpu) {
-               per_cpu(rcu_cpu_has_work, cpu) = 0;
-               if (cpu_online(cpu)) {
-                       (void)rcu_spawn_one_cpu_kthread(cpu);
-                       t = per_cpu(rcu_cpu_kthread_task, cpu);
-                       if (t)
-                               wake_up_process(t);
-               }
-       }
-       rnp = rcu_get_root(rcu_state);
-       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
-       if (rnp->node_kthread_task)
-               wake_up_process(rnp->node_kthread_task);
-       if (NUM_RCU_NODES > 1) {
-               rcu_for_each_leaf_node(rcu_state, rnp) {
-                       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
-                       t = rnp->node_kthread_task;
-                       if (t)
-                               wake_up_process(t);
-                       rcu_wake_one_boost_kthread(rnp);
-               }
-       }
-       return 0;
-}
-early_initcall(rcu_spawn_kthreads);
-
 static void
 __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
           struct rcu_state *rsp)
@@ -2207,44 +1909,6 @@ static void __cpuinit rcu_prepare_cpu(int cpu)
        rcu_preempt_init_percpu_data(cpu);
 }
 
-static void __cpuinit rcu_prepare_kthreads(int cpu)
-{
-       struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
-       struct rcu_node *rnp = rdp->mynode;
-
-       /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
-       if (rcu_kthreads_spawnable) {
-               (void)rcu_spawn_one_cpu_kthread(cpu);
-               if (rnp->node_kthread_task == NULL)
-                       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
-       }
-}
-
-/*
- * kthread_create() creates threads in TASK_UNINTERRUPTIBLE state,
- * but the RCU threads are woken on demand, and if demand is low this
- * could be a while triggering the hung task watchdog.
- *
- * In order to avoid this, poke all tasks once the CPU is fully
- * up and running.
- */
-static void __cpuinit rcu_online_kthreads(int cpu)
-{
-       struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
-       struct rcu_node *rnp = rdp->mynode;
-       struct task_struct *t;
-
-       t = per_cpu(rcu_cpu_kthread_task, cpu);
-       if (t)
-               wake_up_process(t);
-
-       t = rnp->node_kthread_task;
-       if (t)
-               wake_up_process(t);
-
-       rcu_wake_one_boost_kthread(rnp);
-}
-
 /*
  * Handle CPU online/offline notification events.
  */
@@ -2262,7 +1926,6 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
                rcu_prepare_kthreads(cpu);
                break;
        case CPU_ONLINE:
-               rcu_online_kthreads(cpu);
        case CPU_DOWN_FAILED:
                rcu_node_kthread_setaffinity(rnp, -1);
                rcu_cpu_kthread_setrt(cpu, 1);
@@ -2410,6 +2073,7 @@ void __init rcu_init(void)
        rcu_init_one(&rcu_sched_state, &rcu_sched_data);
        rcu_init_one(&rcu_bh_state, &rcu_bh_data);
        __rcu_init_preempt();
+        open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 
        /*
         * We don't need protection against CPU-hotplug here because
index 7b9a08b4aaea01d48eacb8781296a7be205fb898..01b2ccda26fbf82880cc1d4a50bf9f2877e91bba 100644 (file)
@@ -369,6 +369,7 @@ struct rcu_state {
                                                /*  period because */
                                                /*  force_quiescent_state() */
                                                /*  was running. */
+       u8      boost;                          /* Subject to priority boost. */
        unsigned long gpnum;                    /* Current gp number. */
        unsigned long completed;                /* # of last completed gp. */
 
@@ -426,6 +427,7 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
 #ifdef CONFIG_HOTPLUG_CPU
 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
                                      unsigned long flags);
+static void rcu_stop_cpu_kthread(int cpu);
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 static void rcu_print_detail_task_stall(struct rcu_state *rsp);
 static void rcu_print_task_stall(struct rcu_node *rnp);
@@ -450,11 +452,19 @@ static void rcu_preempt_send_cbs_to_online(void);
 static void __init __rcu_init_preempt(void);
 static void rcu_needs_cpu_flush(void);
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
+static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
+static void invoke_rcu_callbacks_kthread(void);
+#ifdef CONFIG_RCU_BOOST
+static void rcu_preempt_do_callbacks(void);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
                                          cpumask_var_t cm);
-static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
 static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
                                                 struct rcu_node *rnp,
                                                 int rnp_index);
+static void invoke_rcu_node_kthread(struct rcu_node *rnp);
+static void rcu_yield(void (*f)(unsigned long), unsigned long arg);
+#endif /* #ifdef CONFIG_RCU_BOOST */
+static void rcu_cpu_kthread_setrt(int cpu, int to_rt);
+static void __cpuinit rcu_prepare_kthreads(int cpu);
 
 #endif /* #ifndef RCU_TREE_NONCORE */
index c8bff3099a89eeeccf9e3d330a16f6c4d9a2c340..8aafbb80b8b093e1072f2fcc4dc66bf40f249b7f 100644 (file)
@@ -68,6 +68,7 @@ struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
 static struct rcu_state *rcu_state = &rcu_preempt_state;
 
+static void rcu_read_unlock_special(struct task_struct *t);
 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
 
 /*
@@ -147,7 +148,7 @@ static void rcu_preempt_note_context_switch(int cpu)
        struct rcu_data *rdp;
        struct rcu_node *rnp;
 
-       if (t->rcu_read_lock_nesting &&
+       if (t->rcu_read_lock_nesting > 0 &&
            (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
 
                /* Possibly blocking in an RCU read-side critical section. */
@@ -190,6 +191,14 @@ static void rcu_preempt_note_context_switch(int cpu)
                                rnp->gp_tasks = &t->rcu_node_entry;
                }
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       } else if (t->rcu_read_lock_nesting < 0 &&
+                  t->rcu_read_unlock_special) {
+
+               /*
+                * Complete exit from RCU read-side critical section on
+                * behalf of preempted instance of __rcu_read_unlock().
+                */
+               rcu_read_unlock_special(t);
        }
 
        /*
@@ -284,7 +293,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
  * notify RCU core processing or task having blocked during the RCU
  * read-side critical section.
  */
-static void rcu_read_unlock_special(struct task_struct *t)
+static noinline void rcu_read_unlock_special(struct task_struct *t)
 {
        int empty;
        int empty_exp;
@@ -309,7 +318,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
        }
 
        /* Hardware IRQ handlers cannot block. */
-       if (in_irq()) {
+       if (in_irq() || in_serving_softirq()) {
                local_irq_restore(flags);
                return;
        }
@@ -342,6 +351,11 @@ static void rcu_read_unlock_special(struct task_struct *t)
 #ifdef CONFIG_RCU_BOOST
                if (&t->rcu_node_entry == rnp->boost_tasks)
                        rnp->boost_tasks = np;
+               /* Snapshot and clear ->rcu_boosted with rcu_node lock held. */
+               if (t->rcu_boosted) {
+                       special |= RCU_READ_UNLOCK_BOOSTED;
+                       t->rcu_boosted = 0;
+               }
 #endif /* #ifdef CONFIG_RCU_BOOST */
                t->rcu_blocked_node = NULL;
 
@@ -358,7 +372,6 @@ static void rcu_read_unlock_special(struct task_struct *t)
 #ifdef CONFIG_RCU_BOOST
                /* Unboost if we were boosted. */
                if (special & RCU_READ_UNLOCK_BOOSTED) {
-                       t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED;
                        rt_mutex_unlock(t->rcu_boost_mutex);
                        t->rcu_boost_mutex = NULL;
                }
@@ -387,13 +400,22 @@ void __rcu_read_unlock(void)
        struct task_struct *t = current;
 
        barrier();  /* needed if we ever invoke rcu_read_unlock in rcutree.c */
-       --t->rcu_read_lock_nesting;
-       barrier();  /* decrement before load of ->rcu_read_unlock_special */
-       if (t->rcu_read_lock_nesting == 0 &&
-           unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
-               rcu_read_unlock_special(t);
+       if (t->rcu_read_lock_nesting != 1)
+               --t->rcu_read_lock_nesting;
+       else {
+               t->rcu_read_lock_nesting = INT_MIN;
+               barrier();  /* assign before ->rcu_read_unlock_special load */
+               if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
+                       rcu_read_unlock_special(t);
+               barrier();  /* ->rcu_read_unlock_special load before assign */
+               t->rcu_read_lock_nesting = 0;
+       }
 #ifdef CONFIG_PROVE_LOCKING
-       WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0);
+       {
+               int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
+
+               WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
+       }
 #endif /* #ifdef CONFIG_PROVE_LOCKING */
 }
 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
@@ -589,7 +611,8 @@ static void rcu_preempt_check_callbacks(int cpu)
                rcu_preempt_qs(cpu);
                return;
        }
-       if (per_cpu(rcu_preempt_data, cpu).qs_pending)
+       if (t->rcu_read_lock_nesting > 0 &&
+           per_cpu(rcu_preempt_data, cpu).qs_pending)
                t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
 }
 
@@ -602,6 +625,15 @@ static void rcu_preempt_process_callbacks(void)
                                &__get_cpu_var(rcu_preempt_data));
 }
 
+#ifdef CONFIG_RCU_BOOST
+
+static void rcu_preempt_do_callbacks(void)
+{
+       rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
+}
+
+#endif /* #ifdef CONFIG_RCU_BOOST */
+
 /*
  * Queue a preemptible-RCU callback for invocation after a grace period.
  */
@@ -686,9 +718,12 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
        for (;;) {
-               if (!sync_rcu_preempt_exp_done(rnp))
+               if (!sync_rcu_preempt_exp_done(rnp)) {
+                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
                        break;
+               }
                if (rnp->parent == NULL) {
+                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
                        wake_up(&sync_rcu_preempt_exp_wq);
                        break;
                }
@@ -698,7 +733,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
                raw_spin_lock(&rnp->lock); /* irqs already disabled */
                rnp->expmask &= ~mask;
        }
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
 }
 
 /*
@@ -1165,7 +1199,7 @@ static int rcu_boost(struct rcu_node *rnp)
        t = container_of(tb, struct task_struct, rcu_node_entry);
        rt_mutex_init_proxy_locked(&mtx, t);
        t->rcu_boost_mutex = &mtx;
-       t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED;
+       t->rcu_boosted = 1;
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
        rt_mutex_lock(&mtx);  /* Side effect: boosts task t's priority. */
        rt_mutex_unlock(&mtx);  /* Keep lockdep happy. */
@@ -1248,6 +1282,23 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
        }
 }
 
+/*
+ * Wake up the per-CPU kthread to invoke RCU callbacks.
+ */
+static void invoke_rcu_callbacks_kthread(void)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       __this_cpu_write(rcu_cpu_has_work, 1);
+       if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
+               local_irq_restore(flags);
+               return;
+       }
+       wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
+       local_irq_restore(flags);
+}
+
 /*
  * Set the affinity of the boost kthread.  The CPU-hotplug locks are
  * held, so no one should be messing with the existence of the boost
@@ -1288,6 +1339,7 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
 
        if (&rcu_preempt_state != rsp)
                return 0;
+       rsp->boost = 1;
        if (rnp->boost_kthread_task != NULL)
                return 0;
        t = kthread_create(rcu_boost_kthread, (void *)rnp,
@@ -1299,13 +1351,372 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
        sp.sched_priority = RCU_KTHREAD_PRIO;
        sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+       wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
        return 0;
 }
 
-static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp)
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * Stop the RCU's per-CPU kthread when its CPU goes offline,.
+ */
+static void rcu_stop_cpu_kthread(int cpu)
+{
+       struct task_struct *t;
+
+       /* Stop the CPU's kthread. */
+       t = per_cpu(rcu_cpu_kthread_task, cpu);
+       if (t != NULL) {
+               per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
+               kthread_stop(t);
+       }
+}
+
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
+static void rcu_kthread_do_work(void)
 {
-       if (rnp->boost_kthread_task)
-               wake_up_process(rnp->boost_kthread_task);
+       rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
+       rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
+       rcu_preempt_do_callbacks();
+}
+
+/*
+ * Wake up the specified per-rcu_node-structure kthread.
+ * Because the per-rcu_node kthreads are immortal, we don't need
+ * to do anything to keep them alive.
+ */
+static void invoke_rcu_node_kthread(struct rcu_node *rnp)
+{
+       struct task_struct *t;
+
+       t = rnp->node_kthread_task;
+       if (t != NULL)
+               wake_up_process(t);
+}
+
+/*
+ * Set the specified CPU's kthread to run RT or not, as specified by
+ * the to_rt argument.  The CPU-hotplug locks are held, so the task
+ * is not going away.
+ */
+static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
+{
+       int policy;
+       struct sched_param sp;
+       struct task_struct *t;
+
+       t = per_cpu(rcu_cpu_kthread_task, cpu);
+       if (t == NULL)
+               return;
+       if (to_rt) {
+               policy = SCHED_FIFO;
+               sp.sched_priority = RCU_KTHREAD_PRIO;
+       } else {
+               policy = SCHED_NORMAL;
+               sp.sched_priority = 0;
+       }
+       sched_setscheduler_nocheck(t, policy, &sp);
+}
+
+/*
+ * Timer handler to initiate the waking up of per-CPU kthreads that
+ * have yielded the CPU due to excess numbers of RCU callbacks.
+ * We wake up the per-rcu_node kthread, which in turn will wake up
+ * the booster kthread.
+ */
+static void rcu_cpu_kthread_timer(unsigned long arg)
+{
+       struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
+       struct rcu_node *rnp = rdp->mynode;
+
+       atomic_or(rdp->grpmask, &rnp->wakemask);
+       invoke_rcu_node_kthread(rnp);
+}
+
+/*
+ * Drop to non-real-time priority and yield, but only after posting a
+ * timer that will cause us to regain our real-time priority if we
+ * remain preempted.  Either way, we restore our real-time priority
+ * before returning.
+ */
+static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
+{
+       struct sched_param sp;
+       struct timer_list yield_timer;
+
+       setup_timer_on_stack(&yield_timer, f, arg);
+       mod_timer(&yield_timer, jiffies + 2);
+       sp.sched_priority = 0;
+       sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
+       set_user_nice(current, 19);
+       schedule();
+       sp.sched_priority = RCU_KTHREAD_PRIO;
+       sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
+       del_timer(&yield_timer);
+}
+
+/*
+ * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
+ * This can happen while the corresponding CPU is either coming online
+ * or going offline.  We cannot wait until the CPU is fully online
+ * before starting the kthread, because the various notifier functions
+ * can wait for RCU grace periods.  So we park rcu_cpu_kthread() until
+ * the corresponding CPU is online.
+ *
+ * Return 1 if the kthread needs to stop, 0 otherwise.
+ *
+ * Caller must disable bh.  This function can momentarily enable it.
+ */
+static int rcu_cpu_kthread_should_stop(int cpu)
+{
+       while (cpu_is_offline(cpu) ||
+              !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
+              smp_processor_id() != cpu) {
+               if (kthread_should_stop())
+                       return 1;
+               per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
+               per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
+               local_bh_enable();
+               schedule_timeout_uninterruptible(1);
+               if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
+                       set_cpus_allowed_ptr(current, cpumask_of(cpu));
+               local_bh_disable();
+       }
+       per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
+       return 0;
+}
+
+/*
+ * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
+ * earlier RCU softirq.
+ */
+static int rcu_cpu_kthread(void *arg)
+{
+       int cpu = (int)(long)arg;
+       unsigned long flags;
+       int spincnt = 0;
+       unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
+       char work;
+       char *workp = &per_cpu(rcu_cpu_has_work, cpu);
+
+       for (;;) {
+               *statusp = RCU_KTHREAD_WAITING;
+               rcu_wait(*workp != 0 || kthread_should_stop());
+               local_bh_disable();
+               if (rcu_cpu_kthread_should_stop(cpu)) {
+                       local_bh_enable();
+                       break;
+               }
+               *statusp = RCU_KTHREAD_RUNNING;
+               per_cpu(rcu_cpu_kthread_loops, cpu)++;
+               local_irq_save(flags);
+               work = *workp;
+               *workp = 0;
+               local_irq_restore(flags);
+               if (work)
+                       rcu_kthread_do_work();
+               local_bh_enable();
+               if (*workp != 0)
+                       spincnt++;
+               else
+                       spincnt = 0;
+               if (spincnt > 10) {
+                       *statusp = RCU_KTHREAD_YIELDING;
+                       rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
+                       spincnt = 0;
+               }
+       }
+       *statusp = RCU_KTHREAD_STOPPED;
+       return 0;
+}
+
+/*
+ * Spawn a per-CPU kthread, setting up affinity and priority.
+ * Because the CPU hotplug lock is held, no other CPU will be attempting
+ * to manipulate rcu_cpu_kthread_task.  There might be another CPU
+ * attempting to access it during boot, but the locking in kthread_bind()
+ * will enforce sufficient ordering.
+ *
+ * Please note that we cannot simply refuse to wake up the per-CPU
+ * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
+ * which can result in softlockup complaints if the task ends up being
+ * idle for more than a couple of minutes.
+ *
+ * However, please note also that we cannot bind the per-CPU kthread to its
+ * CPU until that CPU is fully online.  We also cannot wait until the
+ * CPU is fully online before we create its per-CPU kthread, as this would
+ * deadlock the system when CPU notifiers tried waiting for grace
+ * periods.  So we bind the per-CPU kthread to its CPU only if the CPU
+ * is online.  If its CPU is not yet fully online, then the code in
+ * rcu_cpu_kthread() will wait until it is fully online, and then do
+ * the binding.
+ */
+static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
+{
+       struct sched_param sp;
+       struct task_struct *t;
+
+       if (!rcu_scheduler_fully_active ||
+           per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
+               return 0;
+       t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
+       if (IS_ERR(t))
+               return PTR_ERR(t);
+       if (cpu_online(cpu))
+               kthread_bind(t, cpu);
+       per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
+       WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
+       sp.sched_priority = RCU_KTHREAD_PRIO;
+       sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+       per_cpu(rcu_cpu_kthread_task, cpu) = t;
+       wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
+       return 0;
+}
+
+/*
+ * Per-rcu_node kthread, which is in charge of waking up the per-CPU
+ * kthreads when needed.  We ignore requests to wake up kthreads
+ * for offline CPUs, which is OK because force_quiescent_state()
+ * takes care of this case.
+ */
+static int rcu_node_kthread(void *arg)
+{
+       int cpu;
+       unsigned long flags;
+       unsigned long mask;
+       struct rcu_node *rnp = (struct rcu_node *)arg;
+       struct sched_param sp;
+       struct task_struct *t;
+
+       for (;;) {
+               rnp->node_kthread_status = RCU_KTHREAD_WAITING;
+               rcu_wait(atomic_read(&rnp->wakemask) != 0);
+               rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
+               raw_spin_lock_irqsave(&rnp->lock, flags);
+               mask = atomic_xchg(&rnp->wakemask, 0);
+               rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
+               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
+                       if ((mask & 0x1) == 0)
+                               continue;
+                       preempt_disable();
+                       t = per_cpu(rcu_cpu_kthread_task, cpu);
+                       if (!cpu_online(cpu) || t == NULL) {
+                               preempt_enable();
+                               continue;
+                       }
+                       per_cpu(rcu_cpu_has_work, cpu) = 1;
+                       sp.sched_priority = RCU_KTHREAD_PRIO;
+                       sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+                       preempt_enable();
+               }
+       }
+       /* NOTREACHED */
+       rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
+       return 0;
+}
+
+/*
+ * Set the per-rcu_node kthread's affinity to cover all CPUs that are
+ * served by the rcu_node in question.  The CPU hotplug lock is still
+ * held, so the value of rnp->qsmaskinit will be stable.
+ *
+ * We don't include outgoingcpu in the affinity set, use -1 if there is
+ * no outgoing CPU.  If there are no CPUs left in the affinity set,
+ * this function allows the kthread to execute on any CPU.
+ */
+static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
+{
+       cpumask_var_t cm;
+       int cpu;
+       unsigned long mask = rnp->qsmaskinit;
+
+       if (rnp->node_kthread_task == NULL)
+               return;
+       if (!alloc_cpumask_var(&cm, GFP_KERNEL))
+               return;
+       cpumask_clear(cm);
+       for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
+               if ((mask & 0x1) && cpu != outgoingcpu)
+                       cpumask_set_cpu(cpu, cm);
+       if (cpumask_weight(cm) == 0) {
+               cpumask_setall(cm);
+               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
+                       cpumask_clear_cpu(cpu, cm);
+               WARN_ON_ONCE(cpumask_weight(cm) == 0);
+       }
+       set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
+       rcu_boost_kthread_setaffinity(rnp, cm);
+       free_cpumask_var(cm);
+}
+
+/*
+ * Spawn a per-rcu_node kthread, setting priority and affinity.
+ * Called during boot before online/offline can happen, or, if
+ * during runtime, with the main CPU-hotplug locks held.  So only
+ * one of these can be executing at a time.
+ */
+static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
+                                               struct rcu_node *rnp)
+{
+       unsigned long flags;
+       int rnp_index = rnp - &rsp->node[0];
+       struct sched_param sp;
+       struct task_struct *t;
+
+       if (!rcu_scheduler_fully_active ||
+           rnp->qsmaskinit == 0)
+               return 0;
+       if (rnp->node_kthread_task == NULL) {
+               t = kthread_create(rcu_node_kthread, (void *)rnp,
+                                  "rcun%d", rnp_index);
+               if (IS_ERR(t))
+                       return PTR_ERR(t);
+               raw_spin_lock_irqsave(&rnp->lock, flags);
+               rnp->node_kthread_task = t;
+               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               sp.sched_priority = 99;
+               sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+               wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
+       }
+       return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
+}
+
+/*
+ * Spawn all kthreads -- called as soon as the scheduler is running.
+ */
+static int __init rcu_spawn_kthreads(void)
+{
+       int cpu;
+       struct rcu_node *rnp;
+
+       rcu_scheduler_fully_active = 1;
+       for_each_possible_cpu(cpu) {
+               per_cpu(rcu_cpu_has_work, cpu) = 0;
+               if (cpu_online(cpu))
+                       (void)rcu_spawn_one_cpu_kthread(cpu);
+       }
+       rnp = rcu_get_root(rcu_state);
+       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
+       if (NUM_RCU_NODES > 1) {
+               rcu_for_each_leaf_node(rcu_state, rnp)
+                       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
+       }
+       return 0;
+}
+early_initcall(rcu_spawn_kthreads);
+
+static void __cpuinit rcu_prepare_kthreads(int cpu)
+{
+       struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
+       struct rcu_node *rnp = rdp->mynode;
+
+       /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
+       if (rcu_scheduler_fully_active) {
+               (void)rcu_spawn_one_cpu_kthread(cpu);
+               if (rnp->node_kthread_task == NULL)
+                       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
+       }
 }
 
 #else /* #ifdef CONFIG_RCU_BOOST */
@@ -1315,23 +1726,39 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 }
 
-static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
-                                         cpumask_var_t cm)
+static void invoke_rcu_callbacks_kthread(void)
 {
+       WARN_ON_ONCE(1);
 }
 
 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
 {
 }
 
-static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
-                                                struct rcu_node *rnp,
-                                                int rnp_index)
+#ifdef CONFIG_HOTPLUG_CPU
+
+static void rcu_stop_cpu_kthread(int cpu)
+{
+}
+
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
+static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
+{
+}
+
+static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
+{
+}
+
+static int __init rcu_scheduler_really_started(void)
 {
+       rcu_scheduler_fully_active = 1;
        return 0;
 }
+early_initcall(rcu_scheduler_really_started);
 
-static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp)
+static void __cpuinit rcu_prepare_kthreads(int cpu)
 {
 }
 
@@ -1509,7 +1936,7 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
  *
  * Because it is not legal to invoke rcu_process_callbacks() with irqs
  * disabled, we do one pass of force_quiescent_state(), then do a
- * invoke_rcu_cpu_kthread() to cause rcu_process_callbacks() to be invoked
+ * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
  * later.  The per-cpu rcu_dyntick_drain variable controls the sequencing.
  */
 int rcu_needs_cpu(int cpu)
@@ -1560,7 +1987,7 @@ int rcu_needs_cpu(int cpu)
 
        /* If RCU callbacks are still pending, RCU still needs this CPU. */
        if (c)
-               invoke_rcu_cpu_kthread();
+               invoke_rcu_core();
        return c;
 }
 
index 9678cc3650f5e9c8f4e3eb409d26e9c4e784e7d5..4e144876dc68208b89931518c2d64d1433def061 100644 (file)
@@ -46,6 +46,8 @@
 #define RCU_TREE_NONCORE
 #include "rcutree.h"
 
+#ifdef CONFIG_RCU_BOOST
+
 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu);
 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
@@ -58,6 +60,8 @@ static char convert_kthread_status(unsigned int kthread_status)
        return "SRWOY"[kthread_status];
 }
 
+#endif /* #ifdef CONFIG_RCU_BOOST */
+
 static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
 {
        if (!rdp->beenonline)
@@ -76,7 +80,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
                   rdp->dynticks_fqs);
 #endif /* #ifdef CONFIG_NO_HZ */
        seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
-       seq_printf(m, " ql=%ld qs=%c%c%c%c kt=%d/%c/%d ktl=%x b=%ld",
+       seq_printf(m, " ql=%ld qs=%c%c%c%c",
                   rdp->qlen,
                   ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
                        rdp->nxttail[RCU_NEXT_TAIL]],
@@ -84,13 +88,16 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
                        rdp->nxttail[RCU_NEXT_READY_TAIL]],
                   ".W"[rdp->nxttail[RCU_DONE_TAIL] !=
                        rdp->nxttail[RCU_WAIT_TAIL]],
-                  ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]],
+                  ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
+#ifdef CONFIG_RCU_BOOST
+       seq_printf(m, " kt=%d/%c/%d ktl=%x",
                   per_cpu(rcu_cpu_has_work, rdp->cpu),
                   convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
                                          rdp->cpu)),
                   per_cpu(rcu_cpu_kthread_cpu, rdp->cpu),
-                  per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff,
-                  rdp->blimit);
+                  per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff);
+#endif /* #ifdef CONFIG_RCU_BOOST */
+       seq_printf(m, " b=%ld", rdp->blimit);
        seq_printf(m, " ci=%lu co=%lu ca=%lu\n",
                   rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
 }
@@ -147,18 +154,21 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
                   rdp->dynticks_fqs);
 #endif /* #ifdef CONFIG_NO_HZ */
        seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
-       seq_printf(m, ",%ld,\"%c%c%c%c\",%d,\"%c\",%ld", rdp->qlen,
+       seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen,
                   ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
                        rdp->nxttail[RCU_NEXT_TAIL]],
                   ".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
                        rdp->nxttail[RCU_NEXT_READY_TAIL]],
                   ".W"[rdp->nxttail[RCU_DONE_TAIL] !=
                        rdp->nxttail[RCU_WAIT_TAIL]],
-                  ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]],
+                  ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
+#ifdef CONFIG_RCU_BOOST
+       seq_printf(m, ",%d,\"%c\"",
                   per_cpu(rcu_cpu_has_work, rdp->cpu),
                   convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
-                                         rdp->cpu)),
-                  rdp->blimit);
+                                         rdp->cpu)));
+#endif /* #ifdef CONFIG_RCU_BOOST */
+       seq_printf(m, ",%ld", rdp->blimit);
        seq_printf(m, ",%lu,%lu,%lu\n",
                   rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
 }
@@ -169,7 +179,11 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
 #ifdef CONFIG_NO_HZ
        seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
 #endif /* #ifdef CONFIG_NO_HZ */
-       seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n");
+       seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\"");
+#ifdef CONFIG_RCU_BOOST
+       seq_puts(m, "\"kt\",\"ktl\"");
+#endif /* #ifdef CONFIG_RCU_BOOST */
+       seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n");
 #ifdef CONFIG_TREE_PREEMPT_RCU
        seq_puts(m, "\"rcu_preempt:\"\n");
        PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m);
index 798e2fae2a069645c13853f4b658dada92af8f66..3ff40178dce77d21fdd1ec508fc2baab5e87db84 100644 (file)
@@ -38,6 +38,14 @@ struct resource iomem_resource = {
 };
 EXPORT_SYMBOL(iomem_resource);
 
+/* constraints to be met while allocating resources */
+struct resource_constraint {
+       resource_size_t min, max, align;
+       resource_size_t (*alignf)(void *, const struct resource *,
+                       resource_size_t, resource_size_t);
+       void *alignf_data;
+};
+
 static DEFINE_RWLOCK(resource_lock);
 
 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
@@ -384,16 +392,13 @@ static bool resource_contains(struct resource *res1, struct resource *res2)
 }
 
 /*
- * Find empty slot in the resource tree given range and alignment.
+ * Find empty slot in the resource tree with the given range and
+ * alignment constraints
  */
-static int find_resource(struct resource *root, struct resource *new,
-                        resource_size_t size, resource_size_t min,
-                        resource_size_t max, resource_size_t align,
-                        resource_size_t (*alignf)(void *,
-                                                  const struct resource *,
-                                                  resource_size_t,
-                                                  resource_size_t),
-                        void *alignf_data)
+static int __find_resource(struct resource *root, struct resource *old,
+                        struct resource *new,
+                        resource_size_t  size,
+                        struct resource_constraint *constraint)
 {
        struct resource *this = root->child;
        struct resource tmp = *new, avail, alloc;
@@ -404,25 +409,26 @@ static int find_resource(struct resource *root, struct resource *new,
         * Skip past an allocated resource that starts at 0, since the assignment
         * of this->start - 1 to tmp->end below would cause an underflow.
         */
-       if (this && this->start == 0) {
-               tmp.start = this->end + 1;
+       if (this && this->start == root->start) {
+               tmp.start = (this == old) ? old->start : this->end + 1;
                this = this->sibling;
        }
        for(;;) {
                if (this)
-                       tmp.end = this->start - 1;
+                       tmp.end = (this == old) ?  this->end : this->start - 1;
                else
                        tmp.end = root->end;
 
-               resource_clip(&tmp, min, max);
+               resource_clip(&tmp, constraint->min, constraint->max);
                arch_remove_reservations(&tmp);
 
                /* Check for overflow after ALIGN() */
                avail = *new;
-               avail.start = ALIGN(tmp.start, align);
+               avail.start = ALIGN(tmp.start, constraint->align);
                avail.end = tmp.end;
                if (avail.start >= tmp.start) {
-                       alloc.start = alignf(alignf_data, &avail, size, align);
+                       alloc.start = constraint->alignf(constraint->alignf_data, &avail,
+                                       size, constraint->align);
                        alloc.end = alloc.start + size - 1;
                        if (resource_contains(&avail, &alloc)) {
                                new->start = alloc.start;
@@ -432,14 +438,75 @@ static int find_resource(struct resource *root, struct resource *new,
                }
                if (!this)
                        break;
-               tmp.start = this->end + 1;
+               if (this != old)
+                       tmp.start = this->end + 1;
                this = this->sibling;
        }
        return -EBUSY;
 }
 
+/*
+ * Find empty slot in the resource tree given range and alignment.
+ */
+static int find_resource(struct resource *root, struct resource *new,
+                       resource_size_t size,
+                       struct resource_constraint  *constraint)
+{
+       return  __find_resource(root, NULL, new, size, constraint);
+}
+
 /**
- * allocate_resource - allocate empty slot in the resource tree given range & alignment
+ * reallocate_resource - allocate a slot in the resource tree given range & alignment.
+ *     The resource will be relocated if the new size cannot be reallocated in the
+ *     current location.
+ *
+ * @root: root resource descriptor
+ * @old:  resource descriptor desired by caller
+ * @newsize: new size of the resource descriptor
+ * @constraint: the size and alignment constraints to be met.
+ */
+int reallocate_resource(struct resource *root, struct resource *old,
+                       resource_size_t newsize,
+                       struct resource_constraint  *constraint)
+{
+       int err=0;
+       struct resource new = *old;
+       struct resource *conflict;
+
+       write_lock(&resource_lock);
+
+       if ((err = __find_resource(root, old, &new, newsize, constraint)))
+               goto out;
+
+       if (resource_contains(&new, old)) {
+               old->start = new.start;
+               old->end = new.end;
+               goto out;
+       }
+
+       if (old->child) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       if (resource_contains(old, &new)) {
+               old->start = new.start;
+               old->end = new.end;
+       } else {
+               __release_resource(old);
+               *old = new;
+               conflict = __request_resource(root, old);
+               BUG_ON(conflict);
+       }
+out:
+       write_unlock(&resource_lock);
+       return err;
+}
+
+
+/**
+ * allocate_resource - allocate empty slot in the resource tree given range & alignment.
+ *     The resource will be reallocated with a new size if it was already allocated
  * @root: root resource descriptor
  * @new: resource descriptor desired by caller
  * @size: requested resource region size
@@ -459,12 +526,25 @@ int allocate_resource(struct resource *root, struct resource *new,
                      void *alignf_data)
 {
        int err;
+       struct resource_constraint constraint;
 
        if (!alignf)
                alignf = simple_align_resource;
 
+       constraint.min = min;
+       constraint.max = max;
+       constraint.align = align;
+       constraint.alignf = alignf;
+       constraint.alignf_data = alignf_data;
+
+       if ( new->parent ) {
+               /* resource is already allocated, try reallocating with
+                  the new constraints */
+               return reallocate_resource(root, new, size, &constraint);
+       }
+
        write_lock(&resource_lock);
-       err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
+       err = find_resource(root, new, size, &constraint);
        if (err >= 0 && __request_resource(root, new))
                err = -EBUSY;
        write_unlock(&resource_lock);
index 3f2e502d609bb7957e10b4ec6aa647c9b3069c6e..fde6ff90352583d65ff890a407200f2fb0c3073e 100644 (file)
@@ -292,8 +292,8 @@ static DEFINE_SPINLOCK(task_group_lock);
  * (The default weight is 1024 - so there's no practical
  *  limitation from this.)
  */
-#define MIN_SHARES     2
-#define MAX_SHARES     (1UL << (18 + SCHED_LOAD_RESOLUTION))
+#define MIN_SHARES     (1UL <<  1)
+#define MAX_SHARES     (1UL << 18)
 
 static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
 #endif
@@ -2544,13 +2544,9 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
 }
 
 #ifdef CONFIG_SMP
-static void sched_ttwu_pending(void)
+static void sched_ttwu_do_pending(struct task_struct *list)
 {
        struct rq *rq = this_rq();
-       struct task_struct *list = xchg(&rq->wake_list, NULL);
-
-       if (!list)
-               return;
 
        raw_spin_lock(&rq->lock);
 
@@ -2563,9 +2559,45 @@ static void sched_ttwu_pending(void)
        raw_spin_unlock(&rq->lock);
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+
+static void sched_ttwu_pending(void)
+{
+       struct rq *rq = this_rq();
+       struct task_struct *list = xchg(&rq->wake_list, NULL);
+
+       if (!list)
+               return;
+
+       sched_ttwu_do_pending(list);
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
 void scheduler_ipi(void)
 {
-       sched_ttwu_pending();
+       struct rq *rq = this_rq();
+       struct task_struct *list = xchg(&rq->wake_list, NULL);
+
+       if (!list)
+               return;
+
+       /*
+        * Not all reschedule IPI handlers call irq_enter/irq_exit, since
+        * traditionally all their work was done from the interrupt return
+        * path. Now that we actually do some work, we need to make sure
+        * we do call them.
+        *
+        * Some archs already do call them, luckily irq_enter/exit nest
+        * properly.
+        *
+        * Arguably we should visit all archs and update all handlers,
+        * however a fair share of IPIs are still resched only so this would
+        * somewhat pessimize the simple resched case.
+        */
+       irq_enter();
+       sched_ttwu_do_pending(list);
+       irq_exit();
 }
 
 static void ttwu_queue_remote(struct task_struct *p, int cpu)
@@ -6557,7 +6589,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                        break;
                }
 
-               if (!group->cpu_power) {
+               if (!group->sgp->power) {
                        printk(KERN_CONT "\n");
                        printk(KERN_ERR "ERROR: domain->cpu_power not "
                                        "set\n");
@@ -6581,9 +6613,9 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
 
                printk(KERN_CONT " %s", str);
-               if (group->cpu_power != SCHED_POWER_SCALE) {
+               if (group->sgp->power != SCHED_POWER_SCALE) {
                        printk(KERN_CONT " (cpu_power = %d)",
-                               group->cpu_power);
+                               group->sgp->power);
                }
 
                group = group->next;
@@ -6774,11 +6806,39 @@ static struct root_domain *alloc_rootdomain(void)
        return rd;
 }
 
+static void free_sched_groups(struct sched_group *sg, int free_sgp)
+{
+       struct sched_group *tmp, *first;
+
+       if (!sg)
+               return;
+
+       first = sg;
+       do {
+               tmp = sg->next;
+
+               if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
+                       kfree(sg->sgp);
+
+               kfree(sg);
+               sg = tmp;
+       } while (sg != first);
+}
+
 static void free_sched_domain(struct rcu_head *rcu)
 {
        struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
-       if (atomic_dec_and_test(&sd->groups->ref))
+
+       /*
+        * If its an overlapping domain it has private groups, iterate and
+        * nuke them all.
+        */
+       if (sd->flags & SD_OVERLAP) {
+               free_sched_groups(sd->groups, 1);
+       } else if (atomic_dec_and_test(&sd->groups->ref)) {
+               kfree(sd->groups->sgp);
                kfree(sd->groups);
+       }
        kfree(sd);
 }
 
@@ -6945,6 +7005,7 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
 struct sd_data {
        struct sched_domain **__percpu sd;
        struct sched_group **__percpu sg;
+       struct sched_group_power **__percpu sgp;
 };
 
 struct s_data {
@@ -6964,15 +7025,73 @@ struct sched_domain_topology_level;
 typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
 
+#define SDTL_OVERLAP   0x01
+
 struct sched_domain_topology_level {
        sched_domain_init_f init;
        sched_domain_mask_f mask;
+       int                 flags;
        struct sd_data      data;
 };
 
-/*
- * Assumes the sched_domain tree is fully constructed
- */
+static int
+build_overlap_sched_groups(struct sched_domain *sd, int cpu)
+{
+       struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
+       const struct cpumask *span = sched_domain_span(sd);
+       struct cpumask *covered = sched_domains_tmpmask;
+       struct sd_data *sdd = sd->private;
+       struct sched_domain *child;
+       int i;
+
+       cpumask_clear(covered);
+
+       for_each_cpu(i, span) {
+               struct cpumask *sg_span;
+
+               if (cpumask_test_cpu(i, covered))
+                       continue;
+
+               sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
+                               GFP_KERNEL, cpu_to_node(i));
+
+               if (!sg)
+                       goto fail;
+
+               sg_span = sched_group_cpus(sg);
+
+               child = *per_cpu_ptr(sdd->sd, i);
+               if (child->child) {
+                       child = child->child;
+                       cpumask_copy(sg_span, sched_domain_span(child));
+               } else
+                       cpumask_set_cpu(i, sg_span);
+
+               cpumask_or(covered, covered, sg_span);
+
+               sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
+               atomic_inc(&sg->sgp->ref);
+
+               if (cpumask_test_cpu(cpu, sg_span))
+                       groups = sg;
+
+               if (!first)
+                       first = sg;
+               if (last)
+                       last->next = sg;
+               last = sg;
+               last->next = first;
+       }
+       sd->groups = groups;
+
+       return 0;
+
+fail:
+       free_sched_groups(first, 0);
+
+       return -ENOMEM;
+}
+
 static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
 {
        struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
@@ -6981,24 +7100,24 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
        if (child)
                cpu = cpumask_first(sched_domain_span(child));
 
-       if (sg)
+       if (sg) {
                *sg = *per_cpu_ptr(sdd->sg, cpu);
+               (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
+               atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
+       }
 
        return cpu;
 }
 
 /*
- * build_sched_groups takes the cpumask we wish to span, and a pointer
- * to a function which identifies what group(along with sched group) a CPU
- * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
- * (due to the fact that we keep track of groups covered with a struct cpumask).
- *
  * build_sched_groups will build a circular linked list of the groups
  * covered by the given span, and will set each group's ->cpumask correctly,
  * and ->cpu_power to 0.
+ *
+ * Assumes the sched_domain tree is fully constructed
  */
-static void
-build_sched_groups(struct sched_domain *sd)
+static int
+build_sched_groups(struct sched_domain *sd, int cpu)
 {
        struct sched_group *first = NULL, *last = NULL;
        struct sd_data *sdd = sd->private;
@@ -7006,6 +7125,12 @@ build_sched_groups(struct sched_domain *sd)
        struct cpumask *covered;
        int i;
 
+       get_group(cpu, sdd, &sd->groups);
+       atomic_inc(&sd->groups->ref);
+
+       if (cpu != cpumask_first(sched_domain_span(sd)))
+               return 0;
+
        lockdep_assert_held(&sched_domains_mutex);
        covered = sched_domains_tmpmask;
 
@@ -7020,7 +7145,7 @@ build_sched_groups(struct sched_domain *sd)
                        continue;
 
                cpumask_clear(sched_group_cpus(sg));
-               sg->cpu_power = 0;
+               sg->sgp->power = 0;
 
                for_each_cpu(j, span) {
                        if (get_group(j, sdd, NULL) != group)
@@ -7037,6 +7162,8 @@ build_sched_groups(struct sched_domain *sd)
                last = sg;
        }
        last->next = first;
+
+       return 0;
 }
 
 /*
@@ -7051,12 +7178,17 @@ build_sched_groups(struct sched_domain *sd)
  */
 static void init_sched_groups_power(int cpu, struct sched_domain *sd)
 {
-       WARN_ON(!sd || !sd->groups);
+       struct sched_group *sg = sd->groups;
 
-       if (cpu != group_first_cpu(sd->groups))
-               return;
+       WARN_ON(!sd || !sg);
+
+       do {
+               sg->group_weight = cpumask_weight(sched_group_cpus(sg));
+               sg = sg->next;
+       } while (sg != sd->groups);
 
-       sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
+       if (cpu != group_first_cpu(sg))
+               return;
 
        update_group_power(sd, cpu);
 }
@@ -7177,15 +7309,15 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
 static void claim_allocations(int cpu, struct sched_domain *sd)
 {
        struct sd_data *sdd = sd->private;
-       struct sched_group *sg = sd->groups;
 
        WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
        *per_cpu_ptr(sdd->sd, cpu) = NULL;
 
-       if (cpu == cpumask_first(sched_group_cpus(sg))) {
-               WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg);
+       if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
                *per_cpu_ptr(sdd->sg, cpu) = NULL;
-       }
+
+       if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
+               *per_cpu_ptr(sdd->sgp, cpu) = NULL;
 }
 
 #ifdef CONFIG_SCHED_SMT
@@ -7210,7 +7342,7 @@ static struct sched_domain_topology_level default_topology[] = {
 #endif
        { sd_init_CPU, cpu_cpu_mask, },
 #ifdef CONFIG_NUMA
-       { sd_init_NODE, cpu_node_mask, },
+       { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
        { sd_init_ALLNODES, cpu_allnodes_mask, },
 #endif
        { NULL, },
@@ -7234,9 +7366,14 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
                if (!sdd->sg)
                        return -ENOMEM;
 
+               sdd->sgp = alloc_percpu(struct sched_group_power *);
+               if (!sdd->sgp)
+                       return -ENOMEM;
+
                for_each_cpu(j, cpu_map) {
                        struct sched_domain *sd;
                        struct sched_group *sg;
+                       struct sched_group_power *sgp;
 
                        sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
                                        GFP_KERNEL, cpu_to_node(j));
@@ -7251,6 +7388,13 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
                                return -ENOMEM;
 
                        *per_cpu_ptr(sdd->sg, j) = sg;
+
+                       sgp = kzalloc_node(sizeof(struct sched_group_power),
+                                       GFP_KERNEL, cpu_to_node(j));
+                       if (!sgp)
+                               return -ENOMEM;
+
+                       *per_cpu_ptr(sdd->sgp, j) = sgp;
                }
        }
 
@@ -7266,11 +7410,15 @@ static void __sdt_free(const struct cpumask *cpu_map)
                struct sd_data *sdd = &tl->data;
 
                for_each_cpu(j, cpu_map) {
-                       kfree(*per_cpu_ptr(sdd->sd, j));
+                       struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
+                       if (sd && (sd->flags & SD_OVERLAP))
+                               free_sched_groups(sd->groups, 0);
                        kfree(*per_cpu_ptr(sdd->sg, j));
+                       kfree(*per_cpu_ptr(sdd->sgp, j));
                }
                free_percpu(sdd->sd);
                free_percpu(sdd->sg);
+               free_percpu(sdd->sgp);
        }
 }
 
@@ -7316,8 +7464,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
                struct sched_domain_topology_level *tl;
 
                sd = NULL;
-               for (tl = sched_domain_topology; tl->init; tl++)
+               for (tl = sched_domain_topology; tl->init; tl++) {
                        sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
+                       if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
+                               sd->flags |= SD_OVERLAP;
+                       if (cpumask_equal(cpu_map, sched_domain_span(sd)))
+                               break;
+               }
 
                while (sd->child)
                        sd = sd->child;
@@ -7329,13 +7482,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
        for_each_cpu(i, cpu_map) {
                for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
                        sd->span_weight = cpumask_weight(sched_domain_span(sd));
-                       get_group(i, sd->private, &sd->groups);
-                       atomic_inc(&sd->groups->ref);
-
-                       if (i != cpumask_first(sched_domain_span(sd)))
-                               continue;
-
-                       build_sched_groups(sd);
+                       if (sd->flags & SD_OVERLAP) {
+                               if (build_overlap_sched_groups(sd, i))
+                                       goto error;
+                       } else {
+                               if (build_sched_groups(sd, i))
+                                       goto error;
+                       }
                }
        }
 
@@ -7757,6 +7910,9 @@ static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
 #endif
 #endif
        cfs_rq->min_vruntime = (u64)(-(1LL << 20));
+#ifndef CONFIG_64BIT
+       cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
+#endif
 }
 
 static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
@@ -8450,10 +8606,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
        if (!tg->se[0])
                return -EINVAL;
 
-       if (shares < MIN_SHARES)
-               shares = MIN_SHARES;
-       else if (shares > MAX_SHARES)
-               shares = MAX_SHARES;
+       shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
 
        mutex_lock(&shares_mutex);
        if (tg->shares == shares)
index 433491c2dc8f5c9952655de72958c7019dadd57f..c768588e180b5ae7a83bebad45ace3ac34da0d19 100644 (file)
@@ -1585,7 +1585,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                }
 
                /* Adjust by relative CPU power of the group */
-               avg_load = (avg_load * SCHED_POWER_SCALE) / group->cpu_power;
+               avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
 
                if (local_group) {
                        this_load = avg_load;
@@ -2631,7 +2631,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
                power >>= SCHED_POWER_SHIFT;
        }
 
-       sdg->cpu_power_orig = power;
+       sdg->sgp->power_orig = power;
 
        if (sched_feat(ARCH_POWER))
                power *= arch_scale_freq_power(sd, cpu);
@@ -2647,7 +2647,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
                power = 1;
 
        cpu_rq(cpu)->cpu_power = power;
-       sdg->cpu_power = power;
+       sdg->sgp->power = power;
 }
 
 static void update_group_power(struct sched_domain *sd, int cpu)
@@ -2665,11 +2665,11 @@ static void update_group_power(struct sched_domain *sd, int cpu)
 
        group = child->groups;
        do {
-               power += group->cpu_power;
+               power += group->sgp->power;
                group = group->next;
        } while (group != child->groups);
 
-       sdg->cpu_power = power;
+       sdg->sgp->power = power;
 }
 
 /*
@@ -2691,7 +2691,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
        /*
         * If ~90% of the cpu_power is still there, we're good.
         */
-       if (group->cpu_power * 32 > group->cpu_power_orig * 29)
+       if (group->sgp->power * 32 > group->sgp->power_orig * 29)
                return 1;
 
        return 0;
@@ -2771,7 +2771,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
        }
 
        /* Adjust by relative CPU power of the group */
-       sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->cpu_power;
+       sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
 
        /*
         * Consider the group unbalanced when the imbalance is larger
@@ -2788,7 +2788,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
        if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1)
                sgs->group_imb = 1;
 
-       sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power,
+       sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
                                                SCHED_POWER_SCALE);
        if (!sgs->group_capacity)
                sgs->group_capacity = fix_small_capacity(sd, group);
@@ -2877,7 +2877,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
                        return;
 
                sds->total_load += sgs.group_load;
-               sds->total_pwr += sg->cpu_power;
+               sds->total_pwr += sg->sgp->power;
 
                /*
                 * In case the child domain prefers tasks go to siblings
@@ -2962,7 +2962,7 @@ static int check_asym_packing(struct sched_domain *sd,
        if (this_cpu > busiest_cpu)
                return 0;
 
-       *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->cpu_power,
+       *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power,
                                       SCHED_POWER_SCALE);
        return 1;
 }
@@ -2993,7 +2993,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
 
        scaled_busy_load_per_task = sds->busiest_load_per_task
                                         * SCHED_POWER_SCALE;
-       scaled_busy_load_per_task /= sds->busiest->cpu_power;
+       scaled_busy_load_per_task /= sds->busiest->sgp->power;
 
        if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
                        (scaled_busy_load_per_task * imbn)) {
@@ -3007,28 +3007,28 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
         * moving them.
         */
 
-       pwr_now += sds->busiest->cpu_power *
+       pwr_now += sds->busiest->sgp->power *
                        min(sds->busiest_load_per_task, sds->max_load);
-       pwr_now += sds->this->cpu_power *
+       pwr_now += sds->this->sgp->power *
                        min(sds->this_load_per_task, sds->this_load);
        pwr_now /= SCHED_POWER_SCALE;
 
        /* Amount of load we'd subtract */
        tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
-               sds->busiest->cpu_power;
+               sds->busiest->sgp->power;
        if (sds->max_load > tmp)
-               pwr_move += sds->busiest->cpu_power *
+               pwr_move += sds->busiest->sgp->power *
                        min(sds->busiest_load_per_task, sds->max_load - tmp);
 
        /* Amount of load we'd add */
-       if (sds->max_load * sds->busiest->cpu_power <
+       if (sds->max_load * sds->busiest->sgp->power <
                sds->busiest_load_per_task * SCHED_POWER_SCALE)
-               tmp = (sds->max_load * sds->busiest->cpu_power) /
-                       sds->this->cpu_power;
+               tmp = (sds->max_load * sds->busiest->sgp->power) /
+                       sds->this->sgp->power;
        else
                tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
-                       sds->this->cpu_power;
-       pwr_move += sds->this->cpu_power *
+                       sds->this->sgp->power;
+       pwr_move += sds->this->sgp->power *
                        min(sds->this_load_per_task, sds->this_load + tmp);
        pwr_move /= SCHED_POWER_SCALE;
 
@@ -3074,7 +3074,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
 
                load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
 
-               load_above_capacity /= sds->busiest->cpu_power;
+               load_above_capacity /= sds->busiest->sgp->power;
        }
 
        /*
@@ -3090,8 +3090,8 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
        max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
 
        /* How much load to actually move to equalise the imbalance */
-       *imbalance = min(max_pull * sds->busiest->cpu_power,
-               (sds->avg_load - sds->this_load) * sds->this->cpu_power)
+       *imbalance = min(max_pull * sds->busiest->sgp->power,
+               (sds->avg_load - sds->this_load) * sds->this->sgp->power)
                        / SCHED_POWER_SCALE;
 
        /*
index be40f7371ee1ac2e2d91c6679bafbf798606e08e..1e7066d76c268c33bb17316b040e480389eac7b8 100644 (file)
@@ -70,3 +70,5 @@ SCHED_FEAT(NONIRQ_POWER, 1)
  * using the scheduler IPI. Reduces rq->lock contention/bounces.
  */
 SCHED_FEAT(TTWU_QUEUE, 1)
+
+SCHED_FEAT(FORCE_SD_OVERLAP, 0)
index ff7678603328b3ba5e00c74dc9fd08bcbf113987..415d85d6f6c637b099826d012e46f70832b1d557 100644 (file)
@@ -1178,18 +1178,25 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
 {
        struct sighand_struct *sighand;
 
-       rcu_read_lock();
        for (;;) {
+               local_irq_save(*flags);
+               rcu_read_lock();
                sighand = rcu_dereference(tsk->sighand);
-               if (unlikely(sighand == NULL))
+               if (unlikely(sighand == NULL)) {
+                       rcu_read_unlock();
+                       local_irq_restore(*flags);
                        break;
+               }
 
-               spin_lock_irqsave(&sighand->siglock, *flags);
-               if (likely(sighand == tsk->sighand))
+               spin_lock(&sighand->siglock);
+               if (likely(sighand == tsk->sighand)) {
+                       rcu_read_unlock();
                        break;
-               spin_unlock_irqrestore(&sighand->siglock, *flags);
+               }
+               spin_unlock(&sighand->siglock);
+               rcu_read_unlock();
+               local_irq_restore(*flags);
        }
-       rcu_read_unlock();
 
        return sighand;
 }
index 73a1951935581f48915d77e8409a3049b78de8df..fb67dfa8394edc70174a51fd93b72965a3929b71 100644 (file)
@@ -74,7 +74,7 @@ static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
        .notifier_call          = hotplug_cfd,
 };
 
-static int __cpuinit init_call_single_data(void)
+void __init call_function_init(void)
 {
        void *cpu = (void *)(long)smp_processor_id();
        int i;
@@ -88,10 +88,7 @@ static int __cpuinit init_call_single_data(void)
 
        hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
        register_cpu_notifier(&hotplug_cfd_notifier);
-
-       return 0;
 }
-early_initcall(init_call_single_data);
 
 /*
  * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
index 13960170cad4d91f0ec00dba8e0132d64e7c68fd..fca82c32042b73133f2ab74838287c94cf8ad152 100644 (file)
@@ -58,7 +58,7 @@ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 
 char *softirq_to_name[NR_SOFTIRQS] = {
        "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
-       "TASKLET", "SCHED", "HRTIMER"
+       "TASKLET", "SCHED", "HRTIMER", "RCU"
 };
 
 /*
@@ -315,16 +315,24 @@ static inline void invoke_softirq(void)
 {
        if (!force_irqthreads)
                __do_softirq();
-       else
+       else {
+               __local_bh_disable((unsigned long)__builtin_return_address(0),
+                               SOFTIRQ_OFFSET);
                wakeup_softirqd();
+               __local_bh_enable(SOFTIRQ_OFFSET);
+       }
 }
 #else
 static inline void invoke_softirq(void)
 {
        if (!force_irqthreads)
                do_softirq();
-       else
+       else {
+               __local_bh_disable((unsigned long)__builtin_return_address(0),
+                               SOFTIRQ_OFFSET);
                wakeup_softirqd();
+               __local_bh_enable(SOFTIRQ_OFFSET);
+       }
 }
 #endif
 
index 9ffea360a778a3ebf2890fdff5c1b79ddc5542c6..fc0f220054172bb4a0f8b1edcd0f3cf9b50202f2 100644 (file)
@@ -285,16 +285,18 @@ ret:
 static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
 {
        struct listener_list *listeners;
-       struct listener *s, *tmp;
+       struct listener *s, *tmp, *s2;
        unsigned int cpu;
 
        if (!cpumask_subset(mask, cpu_possible_mask))
                return -EINVAL;
 
+       s = NULL;
        if (isadd == REGISTER) {
                for_each_cpu(cpu, mask) {
-                       s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
-                                        cpu_to_node(cpu));
+                       if (!s)
+                               s = kmalloc_node(sizeof(struct listener),
+                                                GFP_KERNEL, cpu_to_node(cpu));
                        if (!s)
                                goto cleanup;
                        s->pid = pid;
@@ -303,9 +305,16 @@ static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
 
                        listeners = &per_cpu(listener_array, cpu);
                        down_write(&listeners->sem);
+                       list_for_each_entry_safe(s2, tmp, &listeners->list, list) {
+                               if (s2->pid == pid)
+                                       goto next_cpu;
+                       }
                        list_add(&s->list, &listeners->list);
+                       s = NULL;
+next_cpu:
                        up_write(&listeners->sem);
                }
+               kfree(s);
                return 0;
        }
 
index 2d966244ea60d0a8dcf963e1052b2dbfbfcac76d..59f369f98a04311f5bfa49d01e706d4b12ca97c4 100644 (file)
@@ -42,15 +42,75 @@ static struct alarm_base {
        clockid_t               base_clockid;
 } alarm_bases[ALARM_NUMTYPE];
 
+/* freezer delta & lock used to handle clock_nanosleep triggered wakeups */
+static ktime_t freezer_delta;
+static DEFINE_SPINLOCK(freezer_delta_lock);
+
 #ifdef CONFIG_RTC_CLASS
 /* rtc timer and device for setting alarm wakeups at suspend */
 static struct rtc_timer                rtctimer;
 static struct rtc_device       *rtcdev;
-#endif
+static DEFINE_SPINLOCK(rtcdev_lock);
 
-/* freezer delta & lock used to handle clock_nanosleep triggered wakeups */
-static ktime_t freezer_delta;
-static DEFINE_SPINLOCK(freezer_delta_lock);
+/**
+ * has_wakealarm - check rtc device has wakealarm ability
+ * @dev: current device
+ * @name_ptr: name to be returned
+ *
+ * This helper function checks to see if the rtc device can wake
+ * from suspend.
+ */
+static int has_wakealarm(struct device *dev, void *name_ptr)
+{
+       struct rtc_device *candidate = to_rtc_device(dev);
+
+       if (!candidate->ops->set_alarm)
+               return 0;
+       if (!device_may_wakeup(candidate->dev.parent))
+               return 0;
+
+       *(const char **)name_ptr = dev_name(dev);
+       return 1;
+}
+
+/**
+ * alarmtimer_get_rtcdev - Return selected rtcdevice
+ *
+ * This function returns the rtc device to use for wakealarms.
+ * If one has not already been chosen, it checks to see if a
+ * functional rtc device is available.
+ */
+static struct rtc_device *alarmtimer_get_rtcdev(void)
+{
+       struct device *dev;
+       char *str;
+       unsigned long flags;
+       struct rtc_device *ret;
+
+       spin_lock_irqsave(&rtcdev_lock, flags);
+       if (!rtcdev) {
+               /* Find an rtc device and init the rtc_timer */
+               dev = class_find_device(rtc_class, NULL, &str, has_wakealarm);
+               /* If we have a device then str is valid. See has_wakealarm() */
+               if (dev) {
+                       rtcdev = rtc_class_open(str);
+                       /*
+                        * Drop the reference we got in class_find_device,
+                        * rtc_open takes its own.
+                        */
+                       put_device(dev);
+                       rtc_timer_init(&rtctimer, NULL, NULL);
+               }
+       }
+       ret = rtcdev;
+       spin_unlock_irqrestore(&rtcdev_lock, flags);
+
+       return ret;
+}
+#else
+#define alarmtimer_get_rtcdev() (0)
+#define rtcdev (0)
+#endif
 
 
 /**
@@ -166,6 +226,7 @@ static int alarmtimer_suspend(struct device *dev)
        struct rtc_time tm;
        ktime_t min, now;
        unsigned long flags;
+       struct rtc_device *rtc;
        int i;
 
        spin_lock_irqsave(&freezer_delta_lock, flags);
@@ -173,8 +234,9 @@ static int alarmtimer_suspend(struct device *dev)
        freezer_delta = ktime_set(0, 0);
        spin_unlock_irqrestore(&freezer_delta_lock, flags);
 
+       rtc = rtcdev;
        /* If we have no rtcdev, just return */
-       if (!rtcdev)
+       if (!rtc)
                return 0;
 
        /* Find the soonest timer to expire*/
@@ -199,12 +261,12 @@ static int alarmtimer_suspend(struct device *dev)
        WARN_ON(min.tv64 < NSEC_PER_SEC);
 
        /* Setup an rtc timer to fire that far in the future */
-       rtc_timer_cancel(rtcdev, &rtctimer);
-       rtc_read_time(rtcdev, &tm);
+       rtc_timer_cancel(rtc, &rtctimer);
+       rtc_read_time(rtc, &tm);
        now = rtc_tm_to_ktime(tm);
        now = ktime_add(now, min);
 
-       rtc_timer_start(rtcdev, &rtctimer, now, ktime_set(0, 0));
+       rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0));
 
        return 0;
 }
@@ -322,6 +384,9 @@ static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp)
 {
        clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid;
 
+       if (!alarmtimer_get_rtcdev())
+               return -ENOTSUPP;
+
        return hrtimer_get_res(baseid, tp);
 }
 
@@ -336,6 +401,9 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp)
 {
        struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)];
 
+       if (!alarmtimer_get_rtcdev())
+               return -ENOTSUPP;
+
        *tp = ktime_to_timespec(base->gettime());
        return 0;
 }
@@ -351,6 +419,9 @@ static int alarm_timer_create(struct k_itimer *new_timer)
        enum  alarmtimer_type type;
        struct alarm_base *base;
 
+       if (!alarmtimer_get_rtcdev())
+               return -ENOTSUPP;
+
        if (!capable(CAP_WAKE_ALARM))
                return -EPERM;
 
@@ -385,6 +456,9 @@ static void alarm_timer_get(struct k_itimer *timr,
  */
 static int alarm_timer_del(struct k_itimer *timr)
 {
+       if (!rtcdev)
+               return -ENOTSUPP;
+
        alarm_cancel(&timr->it.alarmtimer);
        return 0;
 }
@@ -402,6 +476,9 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
                                struct itimerspec *new_setting,
                                struct itimerspec *old_setting)
 {
+       if (!rtcdev)
+               return -ENOTSUPP;
+
        /* Save old values */
        old_setting->it_interval =
                        ktime_to_timespec(timr->it.alarmtimer.period);
@@ -541,6 +618,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
        int ret = 0;
        struct restart_block *restart;
 
+       if (!alarmtimer_get_rtcdev())
+               return -ENOTSUPP;
+
        if (!capable(CAP_WAKE_ALARM))
                return -EPERM;
 
@@ -638,65 +718,3 @@ static int __init alarmtimer_init(void)
 }
 device_initcall(alarmtimer_init);
 
-#ifdef CONFIG_RTC_CLASS
-/**
- * has_wakealarm - check rtc device has wakealarm ability
- * @dev: current device
- * @name_ptr: name to be returned
- *
- * This helper function checks to see if the rtc device can wake
- * from suspend.
- */
-static int __init has_wakealarm(struct device *dev, void *name_ptr)
-{
-       struct rtc_device *candidate = to_rtc_device(dev);
-
-       if (!candidate->ops->set_alarm)
-               return 0;
-       if (!device_may_wakeup(candidate->dev.parent))
-               return 0;
-
-       *(const char **)name_ptr = dev_name(dev);
-       return 1;
-}
-
-/**
- * alarmtimer_init_late - Late initializing of alarmtimer code
- *
- * This function locates a rtc device to use for wakealarms.
- * Run as late_initcall to make sure rtc devices have been
- * registered.
- */
-static int __init alarmtimer_init_late(void)
-{
-       struct device *dev;
-       char *str;
-
-       /* Find an rtc device and init the rtc_timer */
-       dev = class_find_device(rtc_class, NULL, &str, has_wakealarm);
-       /* If we have a device then str is valid. See has_wakealarm() */
-       if (dev) {
-               rtcdev = rtc_class_open(str);
-               /*
-                * Drop the reference we got in class_find_device,
-                * rtc_open takes its own.
-                */
-               put_device(dev);
-       }
-       if (!rtcdev) {
-               printk(KERN_WARNING "No RTC device found, ALARM timers will"
-                       " not wake from suspend");
-       }
-       rtc_timer_init(&rtctimer, NULL, NULL);
-
-       return 0;
-}
-#else
-static int __init alarmtimer_init_late(void)
-{
-       printk(KERN_WARNING "Kernel not built with RTC support, ALARM timers"
-               " will not wake from suspend");
-       return 0;
-}
-#endif
-late_initcall(alarmtimer_init_late);
index 1c95fd677328e4fe3d0e582fc62284d9ec150a68..e0980f0d9a0ad2d559b98c12f26317b55044cad1 100644 (file)
@@ -185,7 +185,6 @@ static struct clocksource *watchdog;
 static struct timer_list watchdog_timer;
 static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
 static DEFINE_SPINLOCK(watchdog_lock);
-static cycle_t watchdog_last;
 static int watchdog_running;
 
 static int clocksource_watchdog_kthread(void *data);
@@ -254,11 +253,6 @@ static void clocksource_watchdog(unsigned long data)
        if (!watchdog_running)
                goto out;
 
-       wdnow = watchdog->read(watchdog);
-       wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
-                                    watchdog->mult, watchdog->shift);
-       watchdog_last = wdnow;
-
        list_for_each_entry(cs, &watchdog_list, wd_list) {
 
                /* Clocksource already marked unstable? */
@@ -268,19 +262,28 @@ static void clocksource_watchdog(unsigned long data)
                        continue;
                }
 
+               local_irq_disable();
                csnow = cs->read(cs);
+               wdnow = watchdog->read(watchdog);
+               local_irq_enable();
 
                /* Clocksource initialized ? */
                if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
                        cs->flags |= CLOCK_SOURCE_WATCHDOG;
-                       cs->wd_last = csnow;
+                       cs->wd_last = wdnow;
+                       cs->cs_last = csnow;
                        continue;
                }
 
-               /* Check the deviation from the watchdog clocksource. */
-               cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) &
+               wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask,
+                                            watchdog->mult, watchdog->shift);
+
+               cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) &
                                             cs->mask, cs->mult, cs->shift);
-               cs->wd_last = csnow;
+               cs->cs_last = csnow;
+               cs->wd_last = wdnow;
+
+               /* Check the deviation from the watchdog clocksource. */
                if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
                        clocksource_unstable(cs, cs_nsec - wd_nsec);
                        continue;
@@ -318,7 +321,6 @@ static inline void clocksource_start_watchdog(void)
                return;
        init_timer(&watchdog_timer);
        watchdog_timer.function = clocksource_watchdog;
-       watchdog_last = watchdog->read(watchdog);
        watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
        add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
        watchdog_running = 1;
index dff763b7baf172d9d7b90c8f0dddc0427a83a1c4..1f06468a10d7a65ad8c0bffa8e13c0eaf44a8ad4 100644 (file)
@@ -240,13 +240,10 @@ static const char **find_next(void *v, loff_t *pos)
        const char **fmt = v;
        int start_index;
 
-       if (!fmt)
-               fmt = __start___trace_bprintk_fmt + *pos;
-
        start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt;
 
        if (*pos < start_index)
-               return fmt;
+               return __start___trace_bprintk_fmt + *pos;
 
        return find_next_mod_format(start_index, v, fmt, pos);
 }
index 9d86e45086f54c06bd39f7e989593f4b3ccfa086..a78b7c6e042c9f64fb76bcb252236c456bf8a6a0 100644 (file)
@@ -198,7 +198,7 @@ static void free_object(struct debug_obj *obj)
         * initialized:
         */
        if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
-               sched = !work_pending(&debug_obj_work);
+               sched = keventd_up() && !work_pending(&debug_obj_work);
        hlist_add_head(&obj->node, &obj_pool);
        obj_pool_free++;
        obj_pool_used--;
index cf7d027a8844b115bcc6d213264707d220c6c3d9..e013b8e57d25cd198c410743adcadb8446176e75 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/limits.h>
 #include <linux/mutex.h>
 #include <linux/rbtree.h>
+#include <linux/shmem_fs.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/swapops.h>
@@ -107,10 +108,12 @@ enum mem_cgroup_events_index {
 enum mem_cgroup_events_target {
        MEM_CGROUP_TARGET_THRESH,
        MEM_CGROUP_TARGET_SOFTLIMIT,
+       MEM_CGROUP_TARGET_NUMAINFO,
        MEM_CGROUP_NTARGETS,
 };
 #define THRESHOLDS_EVENTS_TARGET (128)
 #define SOFTLIMIT_EVENTS_TARGET (1024)
+#define NUMAINFO_EVENTS_TARGET (1024)
 
 struct mem_cgroup_stat_cpu {
        long count[MEM_CGROUP_STAT_NSTATS];
@@ -236,7 +239,8 @@ struct mem_cgroup {
        int last_scanned_node;
 #if MAX_NUMNODES > 1
        nodemask_t      scan_nodes;
-       unsigned long   next_scan_node_update;
+       atomic_t        numainfo_events;
+       atomic_t        numainfo_updating;
 #endif
        /*
         * Should the accounting and control be hierarchical, per subtree?
@@ -576,15 +580,6 @@ static long mem_cgroup_read_stat(struct mem_cgroup *mem,
        return val;
 }
 
-static long mem_cgroup_local_usage(struct mem_cgroup *mem)
-{
-       long ret;
-
-       ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
-       ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
-       return ret;
-}
-
 static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
                                         bool charge)
 {
@@ -688,6 +683,9 @@ static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target)
        case MEM_CGROUP_TARGET_SOFTLIMIT:
                next = val + SOFTLIMIT_EVENTS_TARGET;
                break;
+       case MEM_CGROUP_TARGET_NUMAINFO:
+               next = val + NUMAINFO_EVENTS_TARGET;
+               break;
        default:
                return;
        }
@@ -706,11 +704,19 @@ static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
                mem_cgroup_threshold(mem);
                __mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH);
                if (unlikely(__memcg_event_check(mem,
-                       MEM_CGROUP_TARGET_SOFTLIMIT))){
+                            MEM_CGROUP_TARGET_SOFTLIMIT))) {
                        mem_cgroup_update_tree(mem, page);
                        __mem_cgroup_target_update(mem,
-                               MEM_CGROUP_TARGET_SOFTLIMIT);
+                                                  MEM_CGROUP_TARGET_SOFTLIMIT);
+               }
+#if MAX_NUMNODES > 1
+               if (unlikely(__memcg_event_check(mem,
+                       MEM_CGROUP_TARGET_NUMAINFO))) {
+                       atomic_inc(&mem->numainfo_events);
+                       __mem_cgroup_target_update(mem,
+                               MEM_CGROUP_TARGET_NUMAINFO);
                }
+#endif
        }
 }
 
@@ -1128,7 +1134,6 @@ unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
        return MEM_CGROUP_ZSTAT(mz, lru);
 }
 
-#ifdef CONFIG_NUMA
 static unsigned long mem_cgroup_node_nr_file_lru_pages(struct mem_cgroup *memcg,
                                                        int nid)
 {
@@ -1140,6 +1145,17 @@ static unsigned long mem_cgroup_node_nr_file_lru_pages(struct mem_cgroup *memcg,
        return ret;
 }
 
+static unsigned long mem_cgroup_node_nr_anon_lru_pages(struct mem_cgroup *memcg,
+                                                       int nid)
+{
+       unsigned long ret;
+
+       ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_ANON) +
+               mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_ANON);
+       return ret;
+}
+
+#if MAX_NUMNODES > 1
 static unsigned long mem_cgroup_nr_file_lru_pages(struct mem_cgroup *memcg)
 {
        u64 total = 0;
@@ -1151,17 +1167,6 @@ static unsigned long mem_cgroup_nr_file_lru_pages(struct mem_cgroup *memcg)
        return total;
 }
 
-static unsigned long mem_cgroup_node_nr_anon_lru_pages(struct mem_cgroup *memcg,
-                                                       int nid)
-{
-       unsigned long ret;
-
-       ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_ANON) +
-               mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_ANON);
-
-       return ret;
-}
-
 static unsigned long mem_cgroup_nr_anon_lru_pages(struct mem_cgroup *memcg)
 {
        u64 total = 0;
@@ -1558,6 +1563,28 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
        return ret;
 }
 
+/**
+ * test_mem_cgroup_node_reclaimable
+ * @mem: the target memcg
+ * @nid: the node ID to be checked.
+ * @noswap : specify true here if the user wants flle only information.
+ *
+ * This function returns whether the specified memcg contains any
+ * reclaimable pages on a node. Returns true if there are any reclaimable
+ * pages in the node.
+ */
+static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *mem,
+               int nid, bool noswap)
+{
+       if (mem_cgroup_node_nr_file_lru_pages(mem, nid))
+               return true;
+       if (noswap || !total_swap_pages)
+               return false;
+       if (mem_cgroup_node_nr_anon_lru_pages(mem, nid))
+               return true;
+       return false;
+
+}
 #if MAX_NUMNODES > 1
 
 /*
@@ -1569,26 +1596,26 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem)
 {
        int nid;
-
-       if (time_after(mem->next_scan_node_update, jiffies))
+       /*
+        * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
+        * pagein/pageout changes since the last update.
+        */
+       if (!atomic_read(&mem->numainfo_events))
+               return;
+       if (atomic_inc_return(&mem->numainfo_updating) > 1)
                return;
 
-       mem->next_scan_node_update = jiffies + 10*HZ;
        /* make a nodemask where this memcg uses memory from */
        mem->scan_nodes = node_states[N_HIGH_MEMORY];
 
        for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
 
-               if (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_FILE) ||
-                   mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_FILE))
-                       continue;
-
-               if (total_swap_pages &&
-                   (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_ANON) ||
-                    mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_ANON)))
-                       continue;
-               node_clear(nid, mem->scan_nodes);
+               if (!test_mem_cgroup_node_reclaimable(mem, nid, false))
+                       node_clear(nid, mem->scan_nodes);
        }
+
+       atomic_set(&mem->numainfo_events, 0);
+       atomic_set(&mem->numainfo_updating, 0);
 }
 
 /*
@@ -1626,11 +1653,51 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
        return node;
 }
 
+/*
+ * Check all nodes whether it contains reclaimable pages or not.
+ * For quick scan, we make use of scan_nodes. This will allow us to skip
+ * unused nodes. But scan_nodes is lazily updated and may not cotain
+ * enough new information. We need to do double check.
+ */
+bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
+{
+       int nid;
+
+       /*
+        * quick check...making use of scan_node.
+        * We can skip unused nodes.
+        */
+       if (!nodes_empty(mem->scan_nodes)) {
+               for (nid = first_node(mem->scan_nodes);
+                    nid < MAX_NUMNODES;
+                    nid = next_node(nid, mem->scan_nodes)) {
+
+                       if (test_mem_cgroup_node_reclaimable(mem, nid, noswap))
+                               return true;
+               }
+       }
+       /*
+        * Check rest of nodes.
+        */
+       for_each_node_state(nid, N_HIGH_MEMORY) {
+               if (node_isset(nid, mem->scan_nodes))
+                       continue;
+               if (test_mem_cgroup_node_reclaimable(mem, nid, noswap))
+                       return true;
+       }
+       return false;
+}
+
 #else
 int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
 {
        return 0;
 }
+
+bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
+{
+       return test_mem_cgroup_node_reclaimable(mem, 0, noswap);
+}
 #endif
 
 /*
@@ -1701,7 +1768,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
                                }
                        }
                }
-               if (!mem_cgroup_local_usage(victim)) {
+               if (!mem_cgroup_reclaimable(victim, noswap)) {
                        /* this cgroup's local usage == 0 */
                        css_put(&victim->css);
                        continue;
index eac0ba5614912e57c7b83b22382bc1165138ef2a..740c4f52059cef1bff55fb4293ec41218967fc02 100644 (file)
@@ -391,10 +391,11 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
        struct task_struct *tsk;
        struct anon_vma *av;
 
-       read_lock(&tasklist_lock);
        av = page_lock_anon_vma(page);
        if (av == NULL) /* Not actually mapped anymore */
-               goto out;
+               return;
+
+       read_lock(&tasklist_lock);
        for_each_process (tsk) {
                struct anon_vma_chain *vmac;
 
@@ -408,9 +409,8 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
                                add_to_kill(tsk, page, vma, to_kill, tkc);
                }
        }
-       page_unlock_anon_vma(av);
-out:
        read_unlock(&tasklist_lock);
+       page_unlock_anon_vma(av);
 }
 
 /*
@@ -424,17 +424,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
        struct prio_tree_iter iter;
        struct address_space *mapping = page->mapping;
 
-       /*
-        * A note on the locking order between the two locks.
-        * We don't rely on this particular order.
-        * If you have some other code that needs a different order
-        * feel free to switch them around. Or add a reverse link
-        * from mm_struct to task_struct, then this could be all
-        * done without taking tasklist_lock and looping over all tasks.
-        */
-
-       read_lock(&tasklist_lock);
        mutex_lock(&mapping->i_mmap_mutex);
+       read_lock(&tasklist_lock);
        for_each_process(tsk) {
                pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 
@@ -454,8 +445,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
                                add_to_kill(tsk, page, vma, to_kill, tkc);
                }
        }
-       mutex_unlock(&mapping->i_mmap_mutex);
        read_unlock(&tasklist_lock);
+       mutex_unlock(&mapping->i_mmap_mutex);
 }
 
 /*
index 87d935333f0dda3d477c5cacd1219b78e7b910e7..9b8a01d941cbc77dc0b1674fd817b54bb4f281f4 100644 (file)
@@ -305,6 +305,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
        if (batch->nr == batch->max) {
                if (!tlb_next_batch(tlb))
                        return 0;
+               batch = tlb->active;
        }
        VM_BUG_ON(batch->nr > batch->max);
 
@@ -2798,30 +2799,6 @@ void unmap_mapping_range(struct address_space *mapping,
 }
 EXPORT_SYMBOL(unmap_mapping_range);
 
-int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
-{
-       struct address_space *mapping = inode->i_mapping;
-
-       /*
-        * If the underlying filesystem is not going to provide
-        * a way to truncate a range of blocks (punch a hole) -
-        * we should return failure right now.
-        */
-       if (!inode->i_op->truncate_range)
-               return -ENOSYS;
-
-       mutex_lock(&inode->i_mutex);
-       down_write(&inode->i_alloc_sem);
-       unmap_mapping_range(mapping, offset, (end - offset), 1);
-       truncate_inode_pages_range(mapping, offset, end);
-       unmap_mapping_range(mapping, offset, (end - offset), 1);
-       inode->i_op->truncate_range(inode, offset, end);
-       up_write(&inode->i_alloc_sem);
-       mutex_unlock(&inode->i_mutex);
-
-       return 0;
-}
-
 /*
  * We enter with non-exclusive mmap_sem (to exclude vma changes,
  * but allow concurrent faults), and pte mapped but not yet locked.
index 02159c755136ea0463f57a2a951f9cad223aceb1..c46887b5a11eaa5bbe48008d16ad71bd37f7302f 100644 (file)
@@ -498,7 +498,9 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
         * The node we allocated has no zone fallback lists. For avoiding
         * to access not-initialized zonelist, build here.
         */
+       mutex_lock(&zonelists_mutex);
        build_all_zonelists(NULL);
+       mutex_unlock(&zonelists_mutex);
 
        return pgdat;
 }
@@ -521,7 +523,7 @@ int mem_online_node(int nid)
 
        lock_memory_hotplug();
        pgdat = hotadd_new_pgdat(nid, 0);
-       if (pgdat) {
+       if (!pgdat) {
                ret = -ENOMEM;
                goto out;
        }
index 1fd0c51b10a63db69181fc6ba2f42127aaad6ef7..9edc897a3970e3a22753e6bbed4ce0bdde9d0f19 100644 (file)
@@ -1813,10 +1813,13 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
        return NULL;
 }
 
-int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
-               unsigned long to, unsigned long size, pgprot_t prot)
+int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
+               unsigned long pfn, unsigned long size, pgprot_t prot)
 {
-       vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;
+       if (addr != (pfn << PAGE_SHIFT))
+               return -EINVAL;
+
+       vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
        return 0;
 }
 EXPORT_SYMBOL(remap_pfn_range);
index 0eb463ea88dd71326b70342f8492df873a6b5df1..23295f65ae43386574f9058e30e28cdca9a8a708 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -38,9 +38,8 @@
  *                           in arch-dependent flush_dcache_mmap_lock,
  *                           within inode_wb_list_lock in __sync_single_inode)
  *
- * (code doesn't rely on that order so it could be switched around)
- * ->tasklist_lock
- *   anon_vma->mutex      (memory_failure, collect_procs_anon)
+ * anon_vma->mutex,mapping->i_mutex      (memory_failure, collect_procs_anon)
+ *   ->tasklist_lock
  *     pte map lock
  */
 
@@ -112,9 +111,9 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
        kmem_cache_free(anon_vma_cachep, anon_vma);
 }
 
-static inline struct anon_vma_chain *anon_vma_chain_alloc(void)
+static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
 {
-       return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL);
+       return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
 }
 
 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
@@ -159,7 +158,7 @@ int anon_vma_prepare(struct vm_area_struct *vma)
                struct mm_struct *mm = vma->vm_mm;
                struct anon_vma *allocated;
 
-               avc = anon_vma_chain_alloc();
+               avc = anon_vma_chain_alloc(GFP_KERNEL);
                if (!avc)
                        goto out_enomem;
 
@@ -200,6 +199,32 @@ int anon_vma_prepare(struct vm_area_struct *vma)
        return -ENOMEM;
 }
 
+/*
+ * This is a useful helper function for locking the anon_vma root as
+ * we traverse the vma->anon_vma_chain, looping over anon_vma's that
+ * have the same vma.
+ *
+ * Such anon_vma's should have the same root, so you'd expect to see
+ * just a single mutex_lock for the whole traversal.
+ */
+static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
+{
+       struct anon_vma *new_root = anon_vma->root;
+       if (new_root != root) {
+               if (WARN_ON_ONCE(root))
+                       mutex_unlock(&root->mutex);
+               root = new_root;
+               mutex_lock(&root->mutex);
+       }
+       return root;
+}
+
+static inline void unlock_anon_vma_root(struct anon_vma *root)
+{
+       if (root)
+               mutex_unlock(&root->mutex);
+}
+
 static void anon_vma_chain_link(struct vm_area_struct *vma,
                                struct anon_vma_chain *avc,
                                struct anon_vma *anon_vma)
@@ -208,13 +233,11 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
        avc->anon_vma = anon_vma;
        list_add(&avc->same_vma, &vma->anon_vma_chain);
 
-       anon_vma_lock(anon_vma);
        /*
         * It's critical to add new vmas to the tail of the anon_vma,
         * see comment in huge_memory.c:__split_huge_page().
         */
        list_add_tail(&avc->same_anon_vma, &anon_vma->head);
-       anon_vma_unlock(anon_vma);
 }
 
 /*
@@ -224,13 +247,24 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
 {
        struct anon_vma_chain *avc, *pavc;
+       struct anon_vma *root = NULL;
 
        list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
-               avc = anon_vma_chain_alloc();
-               if (!avc)
-                       goto enomem_failure;
-               anon_vma_chain_link(dst, avc, pavc->anon_vma);
+               struct anon_vma *anon_vma;
+
+               avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
+               if (unlikely(!avc)) {
+                       unlock_anon_vma_root(root);
+                       root = NULL;
+                       avc = anon_vma_chain_alloc(GFP_KERNEL);
+                       if (!avc)
+                               goto enomem_failure;
+               }
+               anon_vma = pavc->anon_vma;
+               root = lock_anon_vma_root(root, anon_vma);
+               anon_vma_chain_link(dst, avc, anon_vma);
        }
+       unlock_anon_vma_root(root);
        return 0;
 
  enomem_failure:
@@ -263,7 +297,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
        anon_vma = anon_vma_alloc();
        if (!anon_vma)
                goto out_error;
-       avc = anon_vma_chain_alloc();
+       avc = anon_vma_chain_alloc(GFP_KERNEL);
        if (!avc)
                goto out_error_free_anon_vma;
 
@@ -280,7 +314,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
        get_anon_vma(anon_vma->root);
        /* Mark this anon_vma as the one where our new (COWed) pages go. */
        vma->anon_vma = anon_vma;
+       anon_vma_lock(anon_vma);
        anon_vma_chain_link(vma, avc, anon_vma);
+       anon_vma_unlock(anon_vma);
 
        return 0;
 
@@ -291,36 +327,43 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
        return -ENOMEM;
 }
 
-static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
-{
-       struct anon_vma *anon_vma = anon_vma_chain->anon_vma;
-       int empty;
-
-       /* If anon_vma_fork fails, we can get an empty anon_vma_chain. */
-       if (!anon_vma)
-               return;
-
-       anon_vma_lock(anon_vma);
-       list_del(&anon_vma_chain->same_anon_vma);
-
-       /* We must garbage collect the anon_vma if it's empty */
-       empty = list_empty(&anon_vma->head);
-       anon_vma_unlock(anon_vma);
-
-       if (empty)
-               put_anon_vma(anon_vma);
-}
-
 void unlink_anon_vmas(struct vm_area_struct *vma)
 {
        struct anon_vma_chain *avc, *next;
+       struct anon_vma *root = NULL;
 
        /*
         * Unlink each anon_vma chained to the VMA.  This list is ordered
         * from newest to oldest, ensuring the root anon_vma gets freed last.
         */
        list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
-               anon_vma_unlink(avc);
+               struct anon_vma *anon_vma = avc->anon_vma;
+
+               root = lock_anon_vma_root(root, anon_vma);
+               list_del(&avc->same_anon_vma);
+
+               /*
+                * Leave empty anon_vmas on the list - we'll need
+                * to free them outside the lock.
+                */
+               if (list_empty(&anon_vma->head))
+                       continue;
+
+               list_del(&avc->same_vma);
+               anon_vma_chain_free(avc);
+       }
+       unlock_anon_vma_root(root);
+
+       /*
+        * Iterate the list once more, it now only contains empty and unlinked
+        * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
+        * needing to acquire the anon_vma->root->mutex.
+        */
+       list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
+               struct anon_vma *anon_vma = avc->anon_vma;
+
+               put_anon_vma(anon_vma);
+
                list_del(&avc->same_vma);
                anon_vma_chain_free(avc);
        }
index d221a1cfd7b196175ab9134aac147b22400c13e6..fcedf5464eb79dba02ad485681fcc67530bdb720 100644 (file)
@@ -539,7 +539,7 @@ static void shmem_free_pages(struct list_head *next)
        } while (next);
 }
 
-static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
+void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
 {
        struct shmem_inode_info *info = SHMEM_I(inode);
        unsigned long idx;
@@ -562,6 +562,8 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
        spinlock_t *punch_lock;
        unsigned long upper_limit;
 
+       truncate_inode_pages_range(inode->i_mapping, start, end);
+
        inode->i_ctime = inode->i_mtime = CURRENT_TIME;
        idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
        if (idx >= info->next_index)
@@ -738,16 +740,8 @@ done2:
                 * lowered next_index.  Also, though shmem_getpage checks
                 * i_size before adding to cache, no recheck after: so fix the
                 * narrow window there too.
-                *
-                * Recalling truncate_inode_pages_range and unmap_mapping_range
-                * every time for punch_hole (which never got a chance to clear
-                * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
-                * yet hardly ever necessary: try to optimize them out later.
                 */
                truncate_inode_pages_range(inode->i_mapping, start, end);
-               if (punch_hole)
-                       unmap_mapping_range(inode->i_mapping, start,
-                                                       end - start, 1);
        }
 
        spin_lock(&info->lock);
@@ -766,22 +760,23 @@ done2:
                shmem_free_pages(pages_to_free.next);
        }
 }
+EXPORT_SYMBOL_GPL(shmem_truncate_range);
 
-static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
+static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
 {
        struct inode *inode = dentry->d_inode;
-       loff_t newsize = attr->ia_size;
        int error;
 
        error = inode_change_ok(inode, attr);
        if (error)
                return error;
 
-       if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)
-                                       && newsize != inode->i_size) {
+       if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
+               loff_t oldsize = inode->i_size;
+               loff_t newsize = attr->ia_size;
                struct page *page = NULL;
 
-               if (newsize < inode->i_size) {
+               if (newsize < oldsize) {
                        /*
                         * If truncating down to a partial page, then
                         * if that page is already allocated, hold it
@@ -810,12 +805,19 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
                                spin_unlock(&info->lock);
                        }
                }
-
-               /* XXX(truncate): truncate_setsize should be called last */
-               truncate_setsize(inode, newsize);
+               if (newsize != oldsize) {
+                       i_size_write(inode, newsize);
+                       inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+               }
+               if (newsize < oldsize) {
+                       loff_t holebegin = round_up(newsize, PAGE_SIZE);
+                       unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
+                       shmem_truncate_range(inode, newsize, (loff_t)-1);
+                       /* unmap again to remove racily COWed private pages */
+                       unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
+               }
                if (page)
                        page_cache_release(page);
-               shmem_truncate_range(inode, newsize, (loff_t)-1);
        }
 
        setattr_copy(inode, attr);
@@ -832,7 +834,6 @@ static void shmem_evict_inode(struct inode *inode)
        struct shmem_xattr *xattr, *nxattr;
 
        if (inode->i_mapping->a_ops == &shmem_aops) {
-               truncate_inode_pages(inode->i_mapping, 0);
                shmem_unacct_size(info->flags, inode->i_size);
                inode->i_size = 0;
                shmem_truncate_range(inode, 0, (loff_t)-1);
@@ -2706,7 +2707,7 @@ static const struct file_operations shmem_file_operations = {
 };
 
 static const struct inode_operations shmem_inode_operations = {
-       .setattr        = shmem_notify_change,
+       .setattr        = shmem_setattr,
        .truncate_range = shmem_truncate_range,
 #ifdef CONFIG_TMPFS_XATTR
        .setxattr       = shmem_setxattr,
@@ -2739,7 +2740,7 @@ static const struct inode_operations shmem_dir_inode_operations = {
        .removexattr    = shmem_removexattr,
 #endif
 #ifdef CONFIG_TMPFS_POSIX_ACL
-       .setattr        = shmem_notify_change,
+       .setattr        = shmem_setattr,
        .check_acl      = generic_check_acl,
 #endif
 };
@@ -2752,7 +2753,7 @@ static const struct inode_operations shmem_special_inode_operations = {
        .removexattr    = shmem_removexattr,
 #endif
 #ifdef CONFIG_TMPFS_POSIX_ACL
-       .setattr        = shmem_notify_change,
+       .setattr        = shmem_setattr,
        .check_acl      = generic_check_acl,
 #endif
 };
@@ -2908,6 +2909,12 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
        return 0;
 }
 
+void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
+{
+       truncate_inode_pages_range(inode->i_mapping, start, end);
+}
+EXPORT_SYMBOL_GPL(shmem_truncate_range);
+
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
 /**
  * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
@@ -3028,3 +3035,26 @@ int shmem_zero_setup(struct vm_area_struct *vma)
        vma->vm_flags |= VM_CAN_NONLINEAR;
        return 0;
 }
+
+/**
+ * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
+ * @mapping:   the page's address_space
+ * @index:     the page index
+ * @gfp:       the page allocator flags to use if allocating
+ *
+ * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
+ * with any new page allocations done using the specified allocation flags.
+ * But read_cache_page_gfp() uses the ->readpage() method: which does not
+ * suit tmpfs, since it may have pages in swapcache, and needs to find those
+ * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
+ *
+ * Provide a stub for those callers to start using now, then later
+ * flesh it out to call shmem_getpage() with additional gfp mask, when
+ * shmem_file_splice_read() is added and shmem_readpage() is removed.
+ */
+struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
+                                        pgoff_t index, gfp_t gfp)
+{
+       return read_cache_page_gfp(mapping, index, gfp);
+}
+EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
index d537d29e9b7bb5d9364b531e17f972618a5cad4b..ff8dc1a18cb4fb6e15dac52b3dff0c491337f9af 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/vmalloc.h>
 #include <linux/pagemap.h>
 #include <linux/namei.h>
-#include <linux/shm.h>
+#include <linux/shmem_fs.h>
 #include <linux/blkdev.h>
 #include <linux/random.h>
 #include <linux/writeback.h>
index 3a29a6180212d7bcf25ab0d6fd97acdb16fc8f42..e13f22efaad741bfb0268ba577c3d34807769201 100644 (file)
@@ -304,6 +304,11 @@ EXPORT_SYMBOL(truncate_inode_pages_range);
  * @lstart: offset from which to truncate
  *
  * Called under (and serialised by) inode->i_mutex.
+ *
+ * Note: When this function returns, there can be a page in the process of
+ * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
+ * mapping->nrpages can be non-zero when this function returns even after
+ * truncation of the whole mapping.
  */
 void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
 {
@@ -603,3 +608,27 @@ int vmtruncate(struct inode *inode, loff_t offset)
        return 0;
 }
 EXPORT_SYMBOL(vmtruncate);
+
+int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
+{
+       struct address_space *mapping = inode->i_mapping;
+
+       /*
+        * If the underlying filesystem is not going to provide
+        * a way to truncate a range of blocks (punch a hole) -
+        * we should return failure right now.
+        */
+       if (!inode->i_op->truncate_range)
+               return -ENOSYS;
+
+       mutex_lock(&inode->i_mutex);
+       down_write(&inode->i_alloc_sem);
+       unmap_mapping_range(mapping, offset, (end - offset), 1);
+       inode->i_op->truncate_range(inode, offset, end);
+       /* unmap again to remove racily COWed private pages */
+       unmap_mapping_range(mapping, offset, (end - offset), 1);
+       up_write(&inode->i_alloc_sem);
+       mutex_unlock(&inode->i_mutex);
+
+       return 0;
+}
index 8ff834e19c2460d33844e43154b058974a6c9849..d036e59d302b092bc4186dbb753066416e35c9f6 100644 (file)
@@ -1995,14 +1995,13 @@ restart:
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
+static void shrink_zones(int priority, struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
        struct zoneref *z;
        struct zone *zone;
        unsigned long nr_soft_reclaimed;
        unsigned long nr_soft_scanned;
-       unsigned long total_scanned = 0;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                        gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -2017,19 +2016,23 @@ static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
                                continue;
                        if (zone->all_unreclaimable && priority != DEF_PRIORITY)
                                continue;       /* Let kswapd poll it */
+                       /*
+                        * This steals pages from memory cgroups over softlimit
+                        * and returns the number of reclaimed pages and
+                        * scanned pages. This works for global memory pressure
+                        * and balancing, not for a memcg's limit.
+                        */
+                       nr_soft_scanned = 0;
+                       nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+                                               sc->order, sc->gfp_mask,
+                                               &nr_soft_scanned);
+                       sc->nr_reclaimed += nr_soft_reclaimed;
+                       sc->nr_scanned += nr_soft_scanned;
+                       /* need some check for avoid more shrink_zone() */
                }
 
-               nr_soft_scanned = 0;
-               nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
-                                                       sc->order, sc->gfp_mask,
-                                                       &nr_soft_scanned);
-               sc->nr_reclaimed += nr_soft_reclaimed;
-               total_scanned += nr_soft_scanned;
-
                shrink_zone(priority, zone, sc);
        }
-
-       return total_scanned;
 }
 
 static bool zone_reclaimable(struct zone *zone)
@@ -2094,7 +2097,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                sc->nr_scanned = 0;
                if (!priority)
                        disable_swap_token(sc->mem_cgroup);
-               total_scanned += shrink_zones(priority, zonelist, sc);
+               shrink_zones(priority, zonelist, sc);
                /*
                 * Don't shrink slabs when reclaiming memory from
                 * over limit cgroups
@@ -2307,7 +2310,8 @@ static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
        for (i = 0; i <= classzone_idx; i++)
                present_pages += pgdat->node_zones[i].present_pages;
 
-       return balanced_pages > (present_pages >> 2);
+       /* A special case here: if zone has no page, we think it's balanced */
+       return balanced_pages >= (present_pages >> 2);
 }
 
 /* is kswapd sleeping prematurely? */
@@ -2323,7 +2327,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
                return true;
 
        /* Check the watermark levels */
-       for (i = 0; i < pgdat->nr_zones; i++) {
+       for (i = 0; i <= classzone_idx; i++) {
                struct zone *zone = pgdat->node_zones + i;
 
                if (!populated_zone(zone))
@@ -2341,7 +2345,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
                }
 
                if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
-                                                       classzone_idx, 0))
+                                                       i, 0))
                        all_zones_ok = false;
                else
                        balanced += zone->present_pages;
@@ -2448,7 +2452,6 @@ loop_again:
                        if (!zone_watermark_ok_safe(zone, order,
                                        high_wmark_pages(zone), 0, 0)) {
                                end_zone = i;
-                               *classzone_idx = i;
                                break;
                        }
                }
@@ -2507,18 +2510,18 @@ loop_again:
                                KSWAPD_ZONE_BALANCE_GAP_RATIO);
                        if (!zone_watermark_ok_safe(zone, order,
                                        high_wmark_pages(zone) + balance_gap,
-                                       end_zone, 0))
+                                       end_zone, 0)) {
                                shrink_zone(priority, zone, &sc);
-                       reclaim_state->reclaimed_slab = 0;
-                       nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
-                       sc.nr_reclaimed += reclaim_state->reclaimed_slab;
-                       total_scanned += sc.nr_scanned;
 
-                       if (zone->all_unreclaimable)
-                               continue;
-                       if (nr_slab == 0 &&
-                           !zone_reclaimable(zone))
-                               zone->all_unreclaimable = 1;
+                               reclaim_state->reclaimed_slab = 0;
+                               nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
+                               sc.nr_reclaimed += reclaim_state->reclaimed_slab;
+                               total_scanned += sc.nr_scanned;
+
+                               if (nr_slab == 0 && !zone_reclaimable(zone))
+                                       zone->all_unreclaimable = 1;
+                       }
+
                        /*
                         * If we've done a decent amount of scanning and
                         * the reclaim ratio is low, start doing writepage
@@ -2528,6 +2531,12 @@ loop_again:
                            total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
                                sc.may_writepage = 1;
 
+                       if (zone->all_unreclaimable) {
+                               if (end_zone && end_zone == i)
+                                       end_zone--;
+                               continue;
+                       }
+
                        if (!zone_watermark_ok_safe(zone, order,
                                        high_wmark_pages(zone), end_zone, 0)) {
                                all_zones_ok = 0;
@@ -2706,8 +2715,8 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
  */
 static int kswapd(void *p)
 {
-       unsigned long order;
-       int classzone_idx;
+       unsigned long order, new_order;
+       int classzone_idx, new_classzone_idx;
        pg_data_t *pgdat = (pg_data_t*)p;
        struct task_struct *tsk = current;
 
@@ -2737,17 +2746,23 @@ static int kswapd(void *p)
        tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
        set_freezable();
 
-       order = 0;
-       classzone_idx = MAX_NR_ZONES - 1;
+       order = new_order = 0;
+       classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
        for ( ; ; ) {
-               unsigned long new_order;
-               int new_classzone_idx;
                int ret;
 
-               new_order = pgdat->kswapd_max_order;
-               new_classzone_idx = pgdat->classzone_idx;
-               pgdat->kswapd_max_order = 0;
-               pgdat->classzone_idx = MAX_NR_ZONES - 1;
+               /*
+                * If the last balance_pgdat was unsuccessful it's unlikely a
+                * new request of a similar or harder type will succeed soon
+                * so consider going to sleep on the basis we reclaimed at
+                */
+               if (classzone_idx >= new_classzone_idx && order == new_order) {
+                       new_order = pgdat->kswapd_max_order;
+                       new_classzone_idx = pgdat->classzone_idx;
+                       pgdat->kswapd_max_order =  0;
+                       pgdat->classzone_idx = pgdat->nr_zones - 1;
+               }
+
                if (order < new_order || classzone_idx > new_classzone_idx) {
                        /*
                         * Don't sleep if someone wants a larger 'order'
@@ -2760,7 +2775,7 @@ static int kswapd(void *p)
                        order = pgdat->kswapd_max_order;
                        classzone_idx = pgdat->classzone_idx;
                        pgdat->kswapd_max_order = 0;
-                       pgdat->classzone_idx = MAX_NR_ZONES - 1;
+                       pgdat->classzone_idx = pgdat->nr_zones - 1;
                }
 
                ret = try_to_freeze();
index c7a581a96894c7193ff75c48573976aad6133255..917ecb93ea28d477d75c729accac39eacbdfa752 100644 (file)
@@ -205,7 +205,7 @@ int register_vlan_dev(struct net_device *dev)
        grp->nr_vlans++;
 
        if (ngrp) {
-               if (ops->ndo_vlan_rx_register)
+               if (ops->ndo_vlan_rx_register && (real_dev->features & NETIF_F_HW_VLAN_RX))
                        ops->ndo_vlan_rx_register(real_dev, ngrp);
                rcu_assign_pointer(real_dev->vlgrp, ngrp);
        }
index 7ea5cf9ea08a9db7f3b6edaf083f14030914463c..6e82148edfc8c1009adec988e57c35a224111e3c 100644 (file)
@@ -528,7 +528,11 @@ static int vlan_dev_init(struct net_device *dev)
                                          (1<<__LINK_STATE_DORMANT))) |
                      (1<<__LINK_STATE_PRESENT);
 
-       dev->hw_features = NETIF_F_ALL_TX_OFFLOADS;
+       dev->hw_features = NETIF_F_ALL_CSUM | NETIF_F_SG |
+                          NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |
+                          NETIF_F_HIGHDMA | NETIF_F_SCTP_CSUM |
+                          NETIF_F_ALL_FCOE;
+
        dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
        dev->gso_max_size = real_dev->gso_max_size;
 
@@ -586,9 +590,14 @@ static void vlan_dev_uninit(struct net_device *dev)
 static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
 {
        struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       u32 old_features = features;
 
        features &= real_dev->features;
        features &= real_dev->vlan_features;
+
+       if (old_features & NETIF_F_SOFT_FEATURES)
+               features |= old_features & NETIF_F_SOFT_FEATURES;
+
        if (dev_ethtool_get_rx_csum(real_dev))
                features |= NETIF_F_RXCSUM;
        features |= NETIF_F_LLTX;
index 3163330cd4f1388e3c0b5d593b5e54ca75ba0c12..bcd158f40bb9e4d7a7fa5c167c1281779d60847d 100644 (file)
@@ -393,6 +393,9 @@ int hci_conn_del(struct hci_conn *conn)
 
        hci_dev_put(hdev);
 
+       if (conn->handle == 0)
+               kfree(conn);
+
        return 0;
 }
 
@@ -608,11 +611,11 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
                goto encrypt;
 
 auth:
-       if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+       if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
                return 0;
 
-       hci_conn_auth(conn, sec_level, auth_type);
-       return 0;
+       if (!hci_conn_auth(conn, sec_level, auth_type))
+               return 0;
 
 encrypt:
        if (conn->link_mode & HCI_LM_ENCRYPT)
index f13ddbf858ba4e90dd92b891d1b13d3fece1d496..77930aa522e3456231d6482f0683d60598771937 100644 (file)
@@ -477,14 +477,16 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
         * command otherwise */
        u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
 
-       /* Events for 1.2 and newer controllers */
-       if (hdev->lmp_ver > 1) {
-               events[4] |= 0x01; /* Flow Specification Complete */
-               events[4] |= 0x02; /* Inquiry Result with RSSI */
-               events[4] |= 0x04; /* Read Remote Extended Features Complete */
-               events[5] |= 0x08; /* Synchronous Connection Complete */
-               events[5] |= 0x10; /* Synchronous Connection Changed */
-       }
+       /* CSR 1.1 dongles does not accept any bitfield so don't try to set
+        * any event mask for pre 1.2 devices */
+       if (hdev->lmp_ver <= 1)
+               return;
+
+       events[4] |= 0x01; /* Flow Specification Complete */
+       events[4] |= 0x02; /* Inquiry Result with RSSI */
+       events[4] |= 0x04; /* Read Remote Extended Features Complete */
+       events[5] |= 0x08; /* Synchronous Connection Complete */
+       events[5] |= 0x10; /* Synchronous Connection Changed */
 
        if (hdev->features[3] & LMP_RSSI_INQ)
                events[4] |= 0x04; /* Inquiry Result with RSSI */
index c405a954a603341b52b51a98592ecccd618398e9..43b4c2deb7cc05bdc875e3f7b23a999468f5c1a4 100644 (file)
@@ -464,7 +464,8 @@ static void hidp_idle_timeout(unsigned long arg)
 {
        struct hidp_session *session = (struct hidp_session *) arg;
 
-       kthread_stop(session->task);
+       atomic_inc(&session->terminate);
+       wake_up_process(session->task);
 }
 
 static void hidp_set_timer(struct hidp_session *session)
@@ -535,7 +536,8 @@ static void hidp_process_hid_control(struct hidp_session *session,
                skb_queue_purge(&session->ctrl_transmit);
                skb_queue_purge(&session->intr_transmit);
 
-               kthread_stop(session->task);
+               atomic_inc(&session->terminate);
+               wake_up_process(current);
        }
 }
 
@@ -706,9 +708,8 @@ static int hidp_session(void *arg)
        add_wait_queue(sk_sleep(intr_sk), &intr_wait);
        session->waiting_for_startup = 0;
        wake_up_interruptible(&session->startup_queue);
-       while (!kthread_should_stop()) {
-               set_current_state(TASK_INTERRUPTIBLE);
-
+       set_current_state(TASK_INTERRUPTIBLE);
+       while (!atomic_read(&session->terminate)) {
                if (ctrl_sk->sk_state != BT_CONNECTED ||
                                intr_sk->sk_state != BT_CONNECTED)
                        break;
@@ -726,6 +727,7 @@ static int hidp_session(void *arg)
                hidp_process_transmit(session);
 
                schedule();
+               set_current_state(TASK_INTERRUPTIBLE);
        }
        set_current_state(TASK_RUNNING);
        remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
@@ -1060,7 +1062,8 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
 err_add_device:
        hid_destroy_device(session->hid);
        session->hid = NULL;
-       kthread_stop(session->task);
+       atomic_inc(&session->terminate);
+       wake_up_process(session->task);
 
 unlink:
        hidp_del_timer(session);
@@ -1111,7 +1114,8 @@ int hidp_del_connection(struct hidp_conndel_req *req)
                        skb_queue_purge(&session->ctrl_transmit);
                        skb_queue_purge(&session->intr_transmit);
 
-                       kthread_stop(session->task);
+                       atomic_inc(&session->terminate);
+                       wake_up_process(session->task);
                }
        } else
                err = -ENOENT;
index 19e95004b28654fb14bb009d40efeec81b56eca1..af1bcc823f26d8196587be3624b8f091f29f1d8f 100644 (file)
@@ -142,6 +142,7 @@ struct hidp_session {
        uint ctrl_mtu;
        uint intr_mtu;
 
+       atomic_t terminate;
        struct task_struct *task;
 
        unsigned char keys[8];
index e64a1c2df238d3fc0aac368075b8b0fd3bbb6917..7705e26e699f158f028c773a53d61ee5b08dba84 100644 (file)
@@ -620,7 +620,8 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
                                        struct sock *parent = bt_sk(sk)->parent;
                                        rsp.result = cpu_to_le16(L2CAP_CR_PEND);
                                        rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
-                                       parent->sk_data_ready(parent, 0);
+                                       if (parent)
+                                               parent->sk_data_ready(parent, 0);
 
                                } else {
                                        sk->sk_state = BT_CONFIG;
@@ -2323,7 +2324,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
 
        sk = chan->sk;
 
-       if (sk->sk_state != BT_CONFIG) {
+       if (sk->sk_state != BT_CONFIG && sk->sk_state != BT_CONNECT2) {
                struct l2cap_cmd_rej rej;
 
                rej.reason = cpu_to_le16(0x0002);
@@ -2334,7 +2335,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
 
        /* Reject if config buffer is too small. */
        len = cmd_len - sizeof(*req);
-       if (chan->conf_len + len > sizeof(chan->conf_req)) {
+       if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
                l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
                                l2cap_build_conf_rsp(chan, rsp,
                                        L2CAP_CONF_REJECT, flags), rsp);
@@ -4002,21 +4003,31 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
                        }
                } else if (sk->sk_state == BT_CONNECT2) {
                        struct l2cap_conn_rsp rsp;
-                       __u16 result;
+                       __u16 res, stat;
 
                        if (!status) {
-                               sk->sk_state = BT_CONFIG;
-                               result = L2CAP_CR_SUCCESS;
+                               if (bt_sk(sk)->defer_setup) {
+                                       struct sock *parent = bt_sk(sk)->parent;
+                                       res = L2CAP_CR_PEND;
+                                       stat = L2CAP_CS_AUTHOR_PEND;
+                                       if (parent)
+                                               parent->sk_data_ready(parent, 0);
+                               } else {
+                                       sk->sk_state = BT_CONFIG;
+                                       res = L2CAP_CR_SUCCESS;
+                                       stat = L2CAP_CS_NO_INFO;
+                               }
                        } else {
                                sk->sk_state = BT_DISCONN;
                                l2cap_sock_set_timer(sk, HZ / 10);
-                               result = L2CAP_CR_SEC_BLOCK;
+                               res = L2CAP_CR_SEC_BLOCK;
+                               stat = L2CAP_CS_NO_INFO;
                        }
 
                        rsp.scid   = cpu_to_le16(chan->dcid);
                        rsp.dcid   = cpu_to_le16(chan->scid);
-                       rsp.result = cpu_to_le16(result);
-                       rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+                       rsp.result = cpu_to_le16(res);
+                       rsp.status = cpu_to_le16(stat);
                        l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
                                                        sizeof(rsp), &rsp);
                }
index 18dc9888d8c28064f9a32bd129c7a23d6711af04..8248303f44e892f66ba5af8841f094696e88f8cd 100644 (file)
@@ -413,6 +413,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
                        break;
                }
 
+               memset(&cinfo, 0, sizeof(cinfo));
                cinfo.hci_handle = chan->conn->hcon->handle;
                memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
 
index 386cfaffd4b7569e919c933f89192eeb30cf14e2..1b10727ce523e077c1f571910275e085b6ee32f6 100644 (file)
@@ -788,6 +788,7 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
 
                l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
 
+               memset(&cinfo, 0, sizeof(cinfo));
                cinfo.hci_handle = conn->hcon->handle;
                memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
 
index 42fdffd1d76c0c17c5ab3382b94ea5bf4fcc62c9..cb4fb7837e5c381736b257cf77917ac6992df3b7 100644 (file)
@@ -369,6 +369,15 @@ static void __sco_sock_close(struct sock *sk)
 
        case BT_CONNECTED:
        case BT_CONFIG:
+               if (sco_pi(sk)->conn) {
+                       sk->sk_state = BT_DISCONN;
+                       sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
+                       hci_conn_put(sco_pi(sk)->conn->hcon);
+                       sco_pi(sk)->conn->hcon = NULL;
+               } else
+                       sco_chan_del(sk, ECONNRESET);
+               break;
+
        case BT_CONNECT:
        case BT_DISCONN:
                sco_chan_del(sk, ECONNRESET);
@@ -819,7 +828,9 @@ static void sco_chan_del(struct sock *sk, int err)
                conn->sk = NULL;
                sco_pi(sk)->conn = NULL;
                sco_conn_unlock(conn);
-               hci_conn_put(conn->hcon);
+
+               if (conn->hcon)
+                       hci_conn_put(conn->hcon);
        }
 
        sk->sk_state = BT_CLOSED;
index a6b2f86378c75437bba1300d23acc3b9ca188a58..32b8f9f7f79e4a5ce555e4c803f7ecd79878006e 100644 (file)
@@ -49,7 +49,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        skb_pull(skb, ETH_HLEN);
 
        rcu_read_lock();
-       if (is_multicast_ether_addr(dest)) {
+       if (is_broadcast_ether_addr(dest))
+               br_flood_deliver(br, skb);
+       else if (is_multicast_ether_addr(dest)) {
                if (unlikely(netpoll_tx_running(dev))) {
                        br_flood_deliver(br, skb);
                        goto out;
@@ -243,6 +245,7 @@ int br_netpoll_enable(struct net_bridge_port *p)
                goto out;
 
        np->dev = p->dev;
+       strlcpy(np->dev_name, p->dev->name, IFNAMSIZ);
 
        err = __netpoll_setup(np);
        if (err) {
index f3ac1e858ee1c321484fc59a67451efab47e0f0d..f06ee39c73fd64dfe958e6f94e735e60e3ba0b78 100644 (file)
@@ -60,7 +60,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
        br = p->br;
        br_fdb_update(br, p, eth_hdr(skb)->h_source);
 
-       if (is_multicast_ether_addr(dest) &&
+       if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
            br_multicast_rcv(br, p, skb))
                goto drop;
 
@@ -77,7 +77,9 @@ int br_handle_frame_finish(struct sk_buff *skb)
 
        dst = NULL;
 
-       if (is_multicast_ether_addr(dest)) {
+       if (is_broadcast_ether_addr(dest))
+               skb2 = skb;
+       else if (is_multicast_ether_addr(dest)) {
                mdst = br_mdb_get(br, skb);
                if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
                        if ((mdst && mdst->mglist) ||
index 2f14eafdeeabe80d5dd5a5643e55ecf3a2e74f52..2d85ca7111d3994f1c60941a4992da749d195b57 100644 (file)
@@ -1379,8 +1379,11 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
        if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
                return -EINVAL;
 
-       if (iph->protocol != IPPROTO_IGMP)
+       if (iph->protocol != IPPROTO_IGMP) {
+               if ((iph->daddr & IGMP_LOCAL_GROUP_MASK) != IGMP_LOCAL_GROUP)
+                       BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
                return 0;
+       }
 
        len = ntohs(iph->tot_len);
        if (skb->len < len || len < ip_hdrlen(skb))
@@ -1424,7 +1427,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
        switch (ih->type) {
        case IGMP_HOST_MEMBERSHIP_REPORT:
        case IGMPV2_HOST_MEMBERSHIP_REPORT:
-               BR_INPUT_SKB_CB(skb2)->mrouters_only = 1;
+               BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
                err = br_ip4_multicast_add_group(br, port, ih->group);
                break;
        case IGMPV3_HOST_MEMBERSHIP_REPORT:
@@ -1543,7 +1546,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
                        goto out;
                }
                mld = (struct mld_msg *)skb_transport_header(skb2);
-               BR_INPUT_SKB_CB(skb2)->mrouters_only = 1;
+               BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
                err = br_ip6_multicast_add_group(br, port, &mld->mld_mca);
                break;
            }
index 3a66b8c10e09ab1876e7b678efc65073699342a0..c23979e79dfadce502c5d70927412849b79b868e 100644 (file)
@@ -255,7 +255,7 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
 
                if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) {
 
-                       if ((ctrl == _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND ||
+                       if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND ||
                                ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) &&
                                        layer->id != 0) {
 
index a3a3a31d3c37b0006d0e9bfe26fd06a7ebe3b47d..41466ccb972a6698416d007acf375a81de00b71f 100644 (file)
@@ -36,16 +36,19 @@ int ceph_flags_to_mode(int flags)
        if ((flags & O_DIRECTORY) == O_DIRECTORY)
                return CEPH_FILE_MODE_PIN;
 #endif
-       if ((flags & O_APPEND) == O_APPEND)
-               flags |= O_WRONLY;
 
-       if ((flags & O_ACCMODE) == O_RDWR)
-               mode = CEPH_FILE_MODE_RDWR;
-       else if ((flags & O_ACCMODE) == O_WRONLY)
+       switch (flags & O_ACCMODE) {
+       case O_WRONLY:
                mode = CEPH_FILE_MODE_WR;
-       else
+               break;
+       case O_RDONLY:
                mode = CEPH_FILE_MODE_RD;
-
+               break;
+       case O_RDWR:
+       case O_ACCMODE: /* this is what the VFS does */
+               mode = CEPH_FILE_MODE_RDWR;
+               break;
+       }
 #ifdef O_LAZY
        if (flags & O_LAZY)
                mode |= CEPH_FILE_MODE_LAZY;
index 9cb627a4073aef3007afe4dd600ae96d85f0af0b..7330c2757c0c23c6cf0f11b1331d59de82983690 100644 (file)
@@ -477,8 +477,9 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
        calc_layout(osdc, vino, layout, off, plen, req, ops);
        req->r_file_layout = *layout;  /* keep a copy */
 
-       /* in case it differs from natural alignment that calc_layout
-          filled in for us */
+       /* in case it differs from natural (file) alignment that
+          calc_layout filled in for us */
+       req->r_num_pages = calc_pages_for(page_align, *plen);
        req->r_page_alignment = page_align;
 
        ceph_osdc_build_request(req, off, plen, ops,
@@ -2027,8 +2028,9 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
                int want = calc_pages_for(req->r_page_alignment, data_len);
 
                if (unlikely(req->r_num_pages < want)) {
-                       pr_warning("tid %lld reply %d > expected %d pages\n",
-                                  tid, want, m->nr_pages);
+                       pr_warning("tid %lld reply has %d bytes %d pages, we"
+                                  " had only %d pages ready\n", tid, data_len,
+                                  want, req->r_num_pages);
                        *skip = 1;
                        ceph_msg_put(m);
                        m = NULL;
index 9ccca038444f11fda683bbed8bf56ad0d5e1b65e..6135f3671692689b773c2289d940ecabb29c6a26 100644 (file)
@@ -190,7 +190,8 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
        dst->lastuse = jiffies;
        dst->flags = flags;
        dst->next = NULL;
-       dst_entries_add(ops, 1);
+       if (!(flags & DST_NOCOUNT))
+               dst_entries_add(ops, 1);
        return dst;
 }
 EXPORT_SYMBOL(dst_alloc);
@@ -243,7 +244,8 @@ again:
                neigh_release(neigh);
        }
 
-       dst_entries_add(dst->ops, -1);
+       if (!(dst->flags & DST_NOCOUNT))
+               dst_entries_add(dst->ops, -1);
 
        if (dst->ops->destroy)
                dst->ops->destroy(dst);
index ed0eab39f531f028e96deaa5f61755c46c4e5855..02548b292b53bf416f4e0ced8daeeb9e842666e3 100644 (file)
@@ -44,7 +44,7 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
        pr_debug("%s\n", __func__);
 
        if (!buf)
-               goto out;
+               return -EMSGSIZE;
 
        hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
                IEEE802154_LIST_PHY);
@@ -65,6 +65,7 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
                                pages * sizeof(uint32_t), buf);
 
        mutex_unlock(&phy->pib_lock);
+       kfree(buf);
        return genlmsg_end(msg, hdr);
 
 nla_put_failure:
index 9c1926027a268162a3c37374a5f3397661c6dba6..ef1528af7abf0f9dadb567f4156f4e10da360828 100644 (file)
@@ -465,8 +465,10 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        if (addr_len < sizeof(struct sockaddr_in))
                goto out;
 
-       if (addr->sin_family != AF_INET)
+       if (addr->sin_family != AF_INET) {
+               err = -EAFNOSUPPORT;
                goto out;
+       }
 
        chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
 
@@ -676,6 +678,7 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
 
        lock_sock(sk2);
 
+       sock_rps_record_flow(sk2);
        WARN_ON(!((1 << sk2->sk_state) &
                  (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
 
index 6ffe94ca5bc98bfea2488679ba416ef71295b71a..3267d389843794e0b481f05c22021e905fc6e6d1 100644 (file)
@@ -437,7 +437,7 @@ static int valid_cc(const void *bc, int len, int cc)
                        return 0;
                if (cc == len)
                        return 1;
-               if (op->yes < 4)
+               if (op->yes < 4 || op->yes & 3)
                        return 0;
                len -= op->yes;
                bc  += op->yes;
@@ -447,11 +447,11 @@ static int valid_cc(const void *bc, int len, int cc)
 
 static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
 {
-       const unsigned char *bc = bytecode;
+       const void *bc = bytecode;
        int  len = bytecode_len;
 
        while (len > 0) {
-               struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc;
+               const struct inet_diag_bc_op *op = bc;
 
 //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
                switch (op->code) {
@@ -462,22 +462,20 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
                case INET_DIAG_BC_S_LE:
                case INET_DIAG_BC_D_GE:
                case INET_DIAG_BC_D_LE:
-                       if (op->yes < 4 || op->yes > len + 4)
-                               return -EINVAL;
                case INET_DIAG_BC_JMP:
-                       if (op->no < 4 || op->no > len + 4)
+                       if (op->no < 4 || op->no > len + 4 || op->no & 3)
                                return -EINVAL;
                        if (op->no < len &&
                            !valid_cc(bytecode, bytecode_len, len - op->no))
                                return -EINVAL;
                        break;
                case INET_DIAG_BC_NOP:
-                       if (op->yes < 4 || op->yes > len + 4)
-                               return -EINVAL;
                        break;
                default:
                        return -EINVAL;
                }
+               if (op->yes < 4 || op->yes > len + 4 || op->yes & 3)
+                       return -EINVAL;
                bc  += op->yes;
                len -= op->yes;
        }
index a8024eaa0e87b15ebdc86058a87c24f0813a610a..84f26e8e6c6075a53806f7392eff87782df42151 100644 (file)
@@ -802,8 +802,6 @@ static int __ip_append_data(struct sock *sk,
        skb = skb_peek_tail(queue);
 
        exthdrlen = !skb ? rt->dst.header_len : 0;
-       length += exthdrlen;
-       transhdrlen += exthdrlen;
        mtu = cork->fragsize;
 
        hh_len = LL_RESERVED_SPACE(rt->dst.dev);
@@ -830,7 +828,7 @@ static int __ip_append_data(struct sock *sk,
        cork->length += length;
        if (((length > mtu) || (skb && skb_is_gso(skb))) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
-           (rt->dst.dev->features & NETIF_F_UFO)) {
+           (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
                err = ip_ufo_append_data(sk, queue, getfrag, from, length,
                                         hh_len, fragheaderlen, transhdrlen,
                                         mtu, flags);
@@ -883,17 +881,16 @@ alloc_new_skb:
                        else
                                alloclen = fraglen;
 
+                       alloclen += exthdrlen;
+
                        /* The last fragment gets additional space at tail.
                         * Note, with MSG_MORE we overallocate on fragments,
                         * because we have no idea what fragment will be
                         * the last.
                         */
-                       if (datalen == length + fraggap) {
+                       if (datalen == length + fraggap)
                                alloclen += rt->dst.trailer_len;
-                               /* make sure mtu is not reached */
-                               if (datalen > mtu - fragheaderlen - rt->dst.trailer_len)
-                                       datalen -= ALIGN(rt->dst.trailer_len, 8);
-                       }
+
                        if (transhdrlen) {
                                skb = sock_alloc_send_skb(sk,
                                                alloclen + hh_len + 15,
@@ -926,11 +923,11 @@ alloc_new_skb:
                        /*
                         *      Find where to start putting bytes.
                         */
-                       data = skb_put(skb, fraglen);
+                       data = skb_put(skb, fraglen + exthdrlen);
                        skb_set_network_header(skb, exthdrlen);
                        skb->transport_header = (skb->network_header +
                                                 fragheaderlen);
-                       data += fragheaderlen;
+                       data += fragheaderlen + exthdrlen;
 
                        if (fraggap) {
                                skb->csum = skb_copy_and_csum_bits(
@@ -1064,7 +1061,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
         */
        *rtp = NULL;
        cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ?
-                        rt->dst.dev->mtu : dst_mtu(rt->dst.path);
+                        rt->dst.dev->mtu : dst_mtu(&rt->dst);
        cork->dst = &rt->dst;
        cork->length = 0;
        cork->tx_flags = ipc->tx_flags;
index 4614babdc45fec0a2c5dbf553576267ba78d79b8..2e97e3ec1eb72a25d5c6bd084f2c17f53e7e4d4d 100644 (file)
@@ -17,51 +17,35 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
        const struct iphdr *iph = ip_hdr(skb);
        struct rtable *rt;
        struct flowi4 fl4 = {};
-       unsigned long orefdst;
+       __be32 saddr = iph->saddr;
+       __u8 flags = 0;
        unsigned int hh_len;
-       unsigned int type;
 
-       type = inet_addr_type(net, iph->saddr);
-       if (skb->sk && inet_sk(skb->sk)->transparent)
-               type = RTN_LOCAL;
-       if (addr_type == RTN_UNSPEC)
-               addr_type = type;
+       if (!skb->sk && addr_type != RTN_LOCAL) {
+               if (addr_type == RTN_UNSPEC)
+                       addr_type = inet_addr_type(net, saddr);
+               if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST)
+                       flags |= FLOWI_FLAG_ANYSRC;
+               else
+                       saddr = 0;
+       }
 
        /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
         * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook.
         */
-       if (addr_type == RTN_LOCAL) {
-               fl4.daddr = iph->daddr;
-               if (type == RTN_LOCAL)
-                       fl4.saddr = iph->saddr;
-               fl4.flowi4_tos = RT_TOS(iph->tos);
-               fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
-               fl4.flowi4_mark = skb->mark;
-               fl4.flowi4_flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
-               rt = ip_route_output_key(net, &fl4);
-               if (IS_ERR(rt))
-                       return -1;
-
-               /* Drop old route. */
-               skb_dst_drop(skb);
-               skb_dst_set(skb, &rt->dst);
-       } else {
-               /* non-local src, find valid iif to satisfy
-                * rp-filter when calling ip_route_input. */
-               fl4.daddr = iph->saddr;
-               rt = ip_route_output_key(net, &fl4);
-               if (IS_ERR(rt))
-                       return -1;
+       fl4.daddr = iph->daddr;
+       fl4.saddr = saddr;
+       fl4.flowi4_tos = RT_TOS(iph->tos);
+       fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
+       fl4.flowi4_mark = skb->mark;
+       fl4.flowi4_flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : flags;
+       rt = ip_route_output_key(net, &fl4);
+       if (IS_ERR(rt))
+               return -1;
 
-               orefdst = skb->_skb_refdst;
-               if (ip_route_input(skb, iph->daddr, iph->saddr,
-                                  RT_TOS(iph->tos), rt->dst.dev) != 0) {
-                       dst_release(&rt->dst);
-                       return -1;
-               }
-               dst_release(&rt->dst);
-               refdst_drop(orefdst);
-       }
+       /* Drop old route. */
+       skb_dst_drop(skb);
+       skb_dst_set(skb, &rt->dst);
 
        if (skb_dst(skb)->error)
                return -1;
index f7f9bd7ba12d8b4f6d0f938835efac7e34dfbed5..5c9b9d963918aa538086239d2351afea574324cb 100644 (file)
@@ -203,7 +203,8 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
        else
                pmsg->outdev_name[0] = '\0';
 
-       if (entry->indev && entry->skb->dev) {
+       if (entry->indev && entry->skb->dev &&
+           entry->skb->mac_header != entry->skb->network_header) {
                pmsg->hw_type = entry->skb->dev->type;
                pmsg->hw_addrlen = dev_parse_header(entry->skb,
                                                    pmsg->hw_addr);
index 7647438435030a4fa29ba98539ec423921086b16..24e556e83a3ba97fe633525c10e63d4bcb767b34 100644 (file)
@@ -566,7 +566,7 @@ check_entry(const struct ipt_entry *e, const char *name)
        const struct xt_entry_target *t;
 
        if (!ip_checkentry(&e->ip)) {
-               duprintf("ip check failed %p %s.\n", e, par->match->name);
+               duprintf("ip check failed %p %s.\n", e, name);
                return -EINVAL;
        }
 
index 1ff79e557f96306b428874462f491ffb54c5403e..51f13f8ec724f8c5dad543073e8a0d82639b555a 100644 (file)
@@ -40,7 +40,6 @@ static void send_reset(struct sk_buff *oldskb, int hook)
        struct iphdr *niph;
        const struct tcphdr *oth;
        struct tcphdr _otcph, *tcph;
-       unsigned int addr_type;
 
        /* IP header checks: fragment. */
        if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
@@ -55,6 +54,9 @@ static void send_reset(struct sk_buff *oldskb, int hook)
        if (oth->rst)
                return;
 
+       if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+               return;
+
        /* Check checksum */
        if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
                return;
@@ -101,19 +103,11 @@ static void send_reset(struct sk_buff *oldskb, int hook)
        nskb->csum_start = (unsigned char *)tcph - nskb->head;
        nskb->csum_offset = offsetof(struct tcphdr, check);
 
-       addr_type = RTN_UNSPEC;
-       if (hook != NF_INET_FORWARD
-#ifdef CONFIG_BRIDGE_NETFILTER
-           || (nskb->nf_bridge && nskb->nf_bridge->mask & BRNF_BRIDGED)
-#endif
-          )
-               addr_type = RTN_LOCAL;
-
        /* ip_route_me_harder expects skb->dst to be set */
        skb_dst_set_noref(nskb, skb_dst(oldskb));
 
        nskb->protocol = htons(ETH_P_IP);
-       if (ip_route_me_harder(nskb, addr_type))
+       if (ip_route_me_harder(nskb, RTN_UNSPEC))
                goto free_nskb;
 
        niph->ttl       = ip4_dst_hoplimit(skb_dst(nskb));
index af6e9c778345ff802916cc7fa0078cbd79735c81..2b57e52c746c4518b914da30775044692a740396 100644 (file)
@@ -25,7 +25,8 @@ MODULE_LICENSE("GPL");
 static inline bool match_ip(const struct sk_buff *skb,
                            const struct ipt_ecn_info *einfo)
 {
-       return (ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect;
+       return ((ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect) ^
+              !!(einfo->invert & IPT_ECN_OP_MATCH_IP);
 }
 
 static inline bool match_tcp(const struct sk_buff *skb,
@@ -76,8 +77,6 @@ static bool ecn_mt(const struct sk_buff *skb, struct xt_action_param *par)
                        return false;
 
        if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) {
-               if (ip_hdr(skb)->protocol != IPPROTO_TCP)
-                       return false;
                if (!match_tcp(skb, info, &par->hotdrop))
                        return false;
        }
@@ -97,7 +96,7 @@ static int ecn_mt_check(const struct xt_mtchk_param *par)
                return -EINVAL;
 
        if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) &&
-           ip->proto != IPPROTO_TCP) {
+           (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) {
                pr_info("cannot match TCP bits in rule for non-tcp packets\n");
                return -EINVAL;
        }
index db10075dd88e4720e1ba97bf7365b424e2fb124b..de9da21113a11be6c9f57b15a97b3936a574e512 100644 (file)
@@ -121,7 +121,9 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
                return ret;
        }
 
-       if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) {
+       /* adjust seqs for loopback traffic only in outgoing direction */
+       if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
+           !nf_is_loopback_packet(skb)) {
                typeof(nf_nat_seq_adjust_hook) seq_adjust;
 
                seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
index 9aaa67165f42453ed0af947cebb53cc49163ab4a..39b403f854c6debeb03437a2a9a0c9746083b399 100644 (file)
@@ -41,7 +41,6 @@
 #include <linux/proc_fs.h>
 #include <net/sock.h>
 #include <net/ping.h>
-#include <net/icmp.h>
 #include <net/udp.h>
 #include <net/route.h>
 #include <net/inet_common.h>
index 045f0ec6a4a02a257cd60760283cdd3fb8cd5fda..aa13ef1051108dac93a9151d0819d9fd7447ec64 100644 (file)
@@ -1902,9 +1902,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 
        hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
        rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
-       err = 0;
-       if (IS_ERR(rth))
-               err = PTR_ERR(rth);
+       return IS_ERR(rth) ? PTR_ERR(rth) : 0;
 
 e_nobufs:
        return -ENOBUFS;
index 054a59d21eb0d4211bf4857f6a15b33d5e80abdc..46febcacb729f0657c9c325ee18f007c2f9fe417 100644 (file)
@@ -3220,7 +3220,7 @@ __setup("thash_entries=", set_thash_entries);
 void __init tcp_init(void)
 {
        struct sk_buff *skb = NULL;
-       unsigned long nr_pages, limit;
+       unsigned long limit;
        int i, max_share, cnt;
        unsigned long jiffy = jiffies;
 
@@ -3277,13 +3277,7 @@ void __init tcp_init(void)
        sysctl_tcp_max_orphans = cnt / 2;
        sysctl_max_syn_backlog = max(128, cnt / 256);
 
-       /* Set the pressure threshold to be a fraction of global memory that
-        * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
-        * memory, with a floor of 128 pages.
-        */
-       nr_pages = totalram_pages - totalhigh_pages;
-       limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
-       limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
+       limit = nr_free_buffer_pages() / 8;
        limit = max(limit, 128UL);
        sysctl_tcp_mem[0] = limit / 4 * 3;
        sysctl_tcp_mem[1] = limit;
index a7d6671e33b8a6e46aba0b352ad083736d1b421b..708dc203b0348c3365b6ac3cd2e36559a069d0cc 100644 (file)
@@ -1589,6 +1589,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
                        goto discard;
 
                if (nsk != sk) {
+                       sock_rps_save_rxhash(nsk, skb->rxhash);
                        if (tcp_child_process(sk, nsk, skb)) {
                                rsk = nsk;
                                goto reset;
index abca870d8ff69fe76a19bef7d9338d4f80fabaf0..198f75b7bdd3d0d6b2c43f1e4f71183667948d24 100644 (file)
@@ -1249,6 +1249,9 @@ csum_copy_err:
 
        if (noblock)
                return -EAGAIN;
+
+       /* starting over for a new packet */
+       msg->msg_flags &= ~MSG_TRUNC;
        goto try_again;
 }
 
@@ -2206,16 +2209,10 @@ void __init udp_table_init(struct udp_table *table, const char *name)
 
 void __init udp_init(void)
 {
-       unsigned long nr_pages, limit;
+       unsigned long limit;
 
        udp_table_init(&udp_table, "UDP");
-       /* Set the pressure threshold up by the same strategy of TCP. It is a
-        * fraction of global memory that is up to 1/2 at 256 MB, decreasing
-        * toward zero with the amount of memory, with a floor of 128 pages.
-        */
-       nr_pages = totalram_pages - totalhigh_pages;
-       limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
-       limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
+       limit = nr_free_buffer_pages() / 8;
        limit = max(limit, 128UL);
        sysctl_udp_mem[0] = limit / 4 * 3;
        sysctl_udp_mem[1] = limit;
index 2d51840e53a114be661abd42200a632b9ebf791e..327a617d594cd04929ffa668082ed575b0f54d7f 100644 (file)
@@ -32,7 +32,12 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
        dst = skb_dst(skb);
        mtu = dst_mtu(dst);
        if (skb->len > mtu) {
-               icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
+               if (skb->sk)
+                       ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr,
+                                      inet_sk(skb->sk)->inet_dport, mtu);
+               else
+                       icmp_send(skb, ICMP_DEST_UNREACH,
+                                 ICMP_FRAG_NEEDED, htonl(mtu));
                ret = -EMSGSIZE;
        }
 out:
index d450a2f9fc0645b7addef3fcbf2d586e89d86da7..3b5669a2582df03c1f1b896991fba664e137d62e 100644 (file)
@@ -274,7 +274,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                return -EINVAL;
 
        if (addr->sin6_family != AF_INET6)
-               return -EINVAL;
+               return -EAFNOSUPPORT;
 
        addr_type = ipv6_addr_type(&addr->sin6_addr);
        if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM)
index 065fe405fb58486430a1f81209d9d4b6ec31abdf..249394863284bcb2edcb3228183a953393df1a26 100644 (file)
@@ -204,7 +204,8 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
        else
                pmsg->outdev_name[0] = '\0';
 
-       if (entry->indev && entry->skb->dev) {
+       if (entry->indev && entry->skb->dev &&
+           entry->skb->mac_header != entry->skb->network_header) {
                pmsg->hw_type = entry->skb->dev->type;
                pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr);
        }
index de2b1decd78668d832937cf525115152411bb075..0ef1f086feb8ec294ed71fef231f0cab1642faa7 100644 (file)
@@ -228,9 +228,10 @@ static struct rt6_info ip6_blk_hole_entry_template = {
 
 /* allocate dst with ip6_dst_ops */
 static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
-                                            struct net_device *dev)
+                                            struct net_device *dev,
+                                            int flags)
 {
-       struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, 0);
+       struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
 
        memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
 
@@ -1042,7 +1043,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
        if (unlikely(idev == NULL))
                return NULL;
 
-       rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev);
+       rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0);
        if (unlikely(rt == NULL)) {
                in6_dev_put(idev);
                goto out;
@@ -1062,14 +1063,6 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
        dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
        rt->dst.output  = ip6_output;
 
-#if 0  /* there's no chance to use these for ndisc */
-       rt->dst.flags   = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
-                               ? DST_HOST
-                               : 0;
-       ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
-       rt->rt6i_dst.plen = 128;
-#endif
-
        spin_lock_bh(&icmp6_dst_lock);
        rt->dst.next = icmp6_dst_gc_list;
        icmp6_dst_gc_list = &rt->dst;
@@ -1214,7 +1207,7 @@ int ip6_route_add(struct fib6_config *cfg)
                goto out;
        }
 
-       rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL);
+       rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT);
 
        if (rt == NULL) {
                err = -ENOMEM;
@@ -1244,7 +1237,7 @@ int ip6_route_add(struct fib6_config *cfg)
        ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
        rt->rt6i_dst.plen = cfg->fc_dst_len;
        if (rt->rt6i_dst.plen == 128)
-              rt->dst.flags = DST_HOST;
+              rt->dst.flags |= DST_HOST;
 
 #ifdef CONFIG_IPV6_SUBTREES
        ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
@@ -1734,7 +1727,7 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
 {
        struct net *net = dev_net(ort->rt6i_dev);
        struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
-                                           ort->dst.dev);
+                                           ort->dst.dev, 0);
 
        if (rt) {
                rt->dst.input = ort->dst.input;
@@ -2013,7 +2006,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
 {
        struct net *net = dev_net(idev->dev);
        struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
-                                           net->loopback_dev);
+                                           net->loopback_dev, 0);
        struct neighbour *neigh;
 
        if (rt == NULL) {
@@ -2025,7 +2018,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
 
        in6_dev_hold(idev);
 
-       rt->dst.flags = DST_HOST;
+       rt->dst.flags |= DST_HOST;
        rt->dst.input = ip6_input;
        rt->dst.output = ip6_output;
        rt->rt6i_idev = idev;
index d1fd28711ba50e1ad8ac90a0c32df360ec911d70..87551ca568cd80e35671153b80acb5c6f6986fe8 100644 (file)
@@ -1644,6 +1644,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
                 * the new socket..
                 */
                if(nsk != sk) {
+                       sock_rps_save_rxhash(nsk, skb->rxhash);
                        if (tcp_child_process(sk, nsk, skb))
                                goto reset;
                        if (opt_skb)
index 41f8c9c08dbaf86051a16c5488b6c961f65278fc..328985c408838ad9e50fb1e209cc4f9c592a56a3 100644 (file)
@@ -453,8 +453,11 @@ csum_copy_err:
        }
        unlock_sock_fast(sk, slow);
 
-       if (flags & MSG_DONTWAIT)
+       if (noblock)
                return -EAGAIN;
+
+       /* starting over for a new packet */
+       msg->msg_flags &= ~MSG_TRUNC;
        goto try_again;
 }
 
index 58ffa7d069c791c7d2c2c681861212d806260956..669d2e32efb61475384c2134840ce43bf15e2eed 100644 (file)
@@ -877,7 +877,8 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
        for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
                local->sched_scan_ies.ie[i] = kzalloc(2 +
                                                      IEEE80211_MAX_SSID_LEN +
-                                                     local->scan_ies_len,
+                                                     local->scan_ies_len +
+                                                     req->ie_len,
                                                      GFP_KERNEL);
                if (!local->sched_scan_ies.ie[i]) {
                        ret = -ENOMEM;
index 9dc3b5f26e80c800a306aca5287b0e22fcda5378..8f6a302d2ac3b89d191708f6f7f8eea1409ced8b 100644 (file)
@@ -86,6 +86,11 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
        struct sk_buff *skb = rx->skb;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       int queue = rx->queue;
+
+       /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */
+       if (rx->queue == NUM_RX_DATA_QUEUES - 1)
+               queue = 0;
 
        /*
         * it makes no sense to check for MIC errors on anything other
@@ -148,13 +153,19 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
 
 update_iv:
        /* update IV in key information to be able to detect replays */
-       rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32;
-       rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16;
+       rx->key->u.tkip.rx[queue].iv32 = rx->tkip_iv32;
+       rx->key->u.tkip.rx[queue].iv16 = rx->tkip_iv16;
 
        return RX_CONTINUE;
 
 mic_fail:
-       mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx,
+       /*
+        * In some cases the key can be unset - e.g. a multicast packet, in
+        * a driver that supports HW encryption. Send up the key idx only if
+        * the key is set.
+        */
+       mac80211_ev_michael_mic_failure(rx->sdata,
+                                       rx->key ? rx->key->conf.keyidx : -1,
                                        (void *) skb->data, NULL, GFP_ATOMIC);
        return RX_DROP_UNUSABLE;
 }
@@ -235,6 +246,11 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
        struct ieee80211_key *key = rx->key;
        struct sk_buff *skb = rx->skb;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+       int queue = rx->queue;
+
+       /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */
+       if (rx->queue == NUM_RX_DATA_QUEUES - 1)
+               queue = 0;
 
        hdrlen = ieee80211_hdrlen(hdr->frame_control);
 
@@ -255,7 +271,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
        res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm,
                                          key, skb->data + hdrlen,
                                          skb->len - hdrlen, rx->sta->sta.addr,
-                                         hdr->addr1, hwaccel, rx->queue,
+                                         hdr->addr1, hwaccel, queue,
                                          &rx->tkip_iv32,
                                          &rx->tkip_iv16);
        if (res != TKIP_DECRYPT_OK)
index bf28ac2fc99b7cf164d82417e6248049787ff923..782db275ac53591eeae96b750f5bbf5cc14b063f 100644 (file)
@@ -776,8 +776,16 @@ static void ip_vs_conn_expire(unsigned long data)
                if (cp->control)
                        ip_vs_control_del(cp);
 
-               if (cp->flags & IP_VS_CONN_F_NFCT)
+               if (cp->flags & IP_VS_CONN_F_NFCT) {
                        ip_vs_conn_drop_conntrack(cp);
+                       /* Do not access conntracks during subsys cleanup
+                        * because nf_conntrack_find_get can not be used after
+                        * conntrack cleanup for the net.
+                        */
+                       smp_rmb();
+                       if (ipvs->enable)
+                               ip_vs_conn_drop_conntrack(cp);
+               }
 
                ip_vs_pe_put(cp->pe);
                kfree(cp->pe_data);
index 55af2242bccd482b8cb762a8f3419cc922724791..24c28d238dcb62f7dc148648a35e8a05b7278fbc 100644 (file)
@@ -1945,6 +1945,7 @@ static void __net_exit __ip_vs_dev_cleanup(struct net *net)
 {
        EnterFunction(2);
        net_ipvs(net)->enable = 0;      /* Disable packet reception */
+       smp_wmb();
        __ip_vs_sync_cleanup(net);
        LeaveFunction(2);
 }
index e0ee010935e7008a8730b2c87a2c9c689226fb8a..2e7ccbb43ddb563b7491eafe197ec0e91d8c4b4b 100644 (file)
@@ -456,7 +456,8 @@ __build_packet_message(struct nfulnl_instance *inst,
        if (skb->mark)
                NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark));
 
-       if (indev && skb->dev) {
+       if (indev && skb->dev &&
+           skb->mac_header != skb->network_header) {
                struct nfulnl_msg_packet_hw phw;
                int len = dev_parse_header(skb, phw.hw_addr);
                if (len > 0) {
index b83123f12b42e30612481796bc6c9a4d3bacab60..fdd2fafe0a14ed8810181aae57168f841d5b3f1e 100644 (file)
@@ -335,7 +335,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
        if (entskb->mark)
                NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark));
 
-       if (indev && entskb->dev) {
+       if (indev && entskb->dev &&
+           entskb->mac_header != entskb->network_header) {
                struct nfqnl_msg_packet_hw phw;
                int len = dev_parse_header(entskb, phw.hw_addr);
                if (len) {
index b4f3cf06d8da7afd6f497d8e3dc1f3e3c7031afd..08b3cead6503c62f91dc8e97d9b817de7a79ffb9 100644 (file)
@@ -500,23 +500,20 @@ int sctp_packet_transmit(struct sctp_packet *packet)
         * Note: Adler-32 is no longer applicable, as has been replaced
         * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
         */
-       if (!sctp_checksum_disable &&
-           !(dst->dev->features & (NETIF_F_NO_CSUM | NETIF_F_SCTP_CSUM))) {
-               __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
+       if (!sctp_checksum_disable) {
+               if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) {
+                       __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
 
-               /* 3) Put the resultant value into the checksum field in the
-                *    common header, and leave the rest of the bits unchanged.
-                */
-               sh->checksum = sctp_end_cksum(crc32);
-       } else {
-               if (dst->dev->features & NETIF_F_SCTP_CSUM) {
+                       /* 3) Put the resultant value into the checksum field in the
+                        *    common header, and leave the rest of the bits unchanged.
+                        */
+                       sh->checksum = sctp_end_cksum(crc32);
+               } else {
                        /* no need to seed pseudo checksum for SCTP */
                        nskb->ip_summed = CHECKSUM_PARTIAL;
                        nskb->csum_start = (skb_transport_header(nskb) -
                                            nskb->head);
                        nskb->csum_offset = offsetof(struct sctphdr, checksum);
-               } else {
-                       nskb->ip_summed = CHECKSUM_UNNECESSARY;
                }
        }
 
index 1c88c8911dc50095315bc02463f2b3e5e1535509..d03682109b7a0417ea6dc1007a6277a0468f0b1b 100644 (file)
@@ -1582,6 +1582,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
 #endif /* SCTP_DEBUG */
        if (transport) {
                if (bytes_acked) {
+                       struct sctp_association *asoc = transport->asoc;
+
                        /* We may have counted DATA that was migrated
                         * to this transport due to DEL-IP operation.
                         * Subtract those bytes, since the were never
@@ -1600,6 +1602,17 @@ static void sctp_check_transmitted(struct sctp_outq *q,
                        transport->error_count = 0;
                        transport->asoc->overall_error_count = 0;
 
+                       /*
+                        * While in SHUTDOWN PENDING, we may have started
+                        * the T5 shutdown guard timer after reaching the
+                        * retransmission limit. Stop that timer as soon
+                        * as the receiver acknowledged any data.
+                        */
+                       if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
+                           del_timer(&asoc->timers
+                               [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
+                                       sctp_association_put(asoc);
+
                        /* Mark the destination transport address as
                         * active if it is not so marked.
                         */
@@ -1629,10 +1642,15 @@ static void sctp_check_transmitted(struct sctp_outq *q,
                         * A sender is doing zero window probing when the
                         * receiver's advertised window is zero, and there is
                         * only one data chunk in flight to the receiver.
+                        *
+                        * Allow the association to timeout while in SHUTDOWN
+                        * PENDING or SHUTDOWN RECEIVED in case the receiver
+                        * stays in zero window mode forever.
                         */
                        if (!q->asoc->peer.rwnd &&
                            !list_empty(&tlist) &&
-                           (sack_ctsn+2 == q->asoc->next_tsn)) {
+                           (sack_ctsn+2 == q->asoc->next_tsn) &&
+                           q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
                                SCTP_DEBUG_PRINTK("%s: SACK received for zero "
                                                  "window probe: %u\n",
                                                  __func__, sack_ctsn);
index 67380a29e2e9a93c33a6dfd7d0b8f497c9789eba..207175b2f40a9cdaaaba8fcc6e8ac8e02cc9769f 100644 (file)
@@ -1058,7 +1058,6 @@ SCTP_STATIC __init int sctp_init(void)
        int status = -EINVAL;
        unsigned long goal;
        unsigned long limit;
-       unsigned long nr_pages;
        int max_share;
        int order;
 
@@ -1148,15 +1147,7 @@ SCTP_STATIC __init int sctp_init(void)
        /* Initialize handle used for association ids. */
        idr_init(&sctp_assocs_id);
 
-       /* Set the pressure threshold to be a fraction of global memory that
-        * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
-        * memory, with a floor of 128 pages.
-        * Note this initializes the data in sctpv6_prot too
-        * Unabashedly stolen from tcp_init
-        */
-       nr_pages = totalram_pages - totalhigh_pages;
-       limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
-       limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
+       limit = nr_free_buffer_pages() / 8;
        limit = max(limit, 128UL);
        sysctl_sctp_mem[0] = limit / 4 * 3;
        sysctl_sctp_mem[1] = limit;
index 534c2e5feb054c933cbd0dcf46a0cb66386646dd..6e0f88295aafdc7012bea1cef543285c5d991f52 100644 (file)
@@ -670,10 +670,19 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
        /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
         * HEARTBEAT should clear the error counter of the destination
         * transport address to which the HEARTBEAT was sent.
-        * The association's overall error count is also cleared.
         */
        t->error_count = 0;
-       t->asoc->overall_error_count = 0;
+
+       /*
+        * Although RFC4960 specifies that the overall error count must
+        * be cleared when a HEARTBEAT ACK is received, we make an
+        * exception while in SHUTDOWN PENDING. If the peer keeps its
+        * window shut forever, we may never be able to transmit our
+        * outstanding data and rely on the retransmission limit be reached
+        * to shutdown the association.
+        */
+       if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING)
+               t->asoc->overall_error_count = 0;
 
        /* Clear the hb_sent flag to signal that we had a good
         * acknowledgement.
@@ -1437,6 +1446,13 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                        sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr);
                        break;
 
+               case SCTP_CMD_TIMER_START_ONCE:
+                       timer = &asoc->timers[cmd->obj.to];
+
+                       if (timer_pending(timer))
+                               break;
+                       /* fall through */
+
                case SCTP_CMD_TIMER_START:
                        timer = &asoc->timers[cmd->obj.to];
                        timeout = asoc->timeouts[cmd->obj.to];
index a297283154d5035c6342e54916f10445aa610d83..246117142b5c9eae0b7d4b472b630382d91f2e00 100644 (file)
@@ -5154,7 +5154,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
         * The sender of the SHUTDOWN MAY also start an overall guard timer
         * 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
         */
-       sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
+       sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
                        SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
 
        if (asoc->autoclose)
@@ -5299,14 +5299,28 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
        SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS);
 
        if (asoc->overall_error_count >= asoc->max_retrans) {
-               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
-                               SCTP_ERROR(ETIMEDOUT));
-               /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
-               sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
-                               SCTP_PERR(SCTP_ERROR_NO_ERROR));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
-               return SCTP_DISPOSITION_DELETE_TCB;
+               if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
+                       /*
+                        * We are here likely because the receiver had its rwnd
+                        * closed for a while and we have not been able to
+                        * transmit the locally queued data within the maximum
+                        * retransmission attempts limit.  Start the T5
+                        * shutdown guard timer to give the receiver one last
+                        * chance and some additional time to recover before
+                        * aborting.
+                        */
+                       sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START_ONCE,
+                               SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
+               } else {
+                       sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                                       SCTP_ERROR(ETIMEDOUT));
+                       /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
+                       sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+                                       SCTP_PERR(SCTP_ERROR_NO_ERROR));
+                       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+                       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+                       return SCTP_DISPOSITION_DELETE_TCB;
+               }
        }
 
        /* E1) For the destination address for which the timer
index 0338dc6fdc9df8328b26a3e2bc2c18c0b82008ca..7c211a7f90f4d065eec82baa0cb751373e7eb0be 100644 (file)
@@ -827,7 +827,7 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
        /* SCTP_STATE_ESTABLISHED */ \
        TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
        /* SCTP_STATE_SHUTDOWN_PENDING */ \
-       TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+       TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
        /* SCTP_STATE_SHUTDOWN_SENT */ \
        TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
        /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
index 6766913a53e626279a7a2f4eb543e1f9fa7b5263..d3ccf7973c597402ba6e0783ef583f40d87a39ca 100644 (file)
@@ -1384,6 +1384,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
        struct sctp_endpoint *ep;
        struct sctp_association *asoc;
        struct list_head *pos, *temp;
+       unsigned int data_was_unread;
 
        SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout);
 
@@ -1393,6 +1394,10 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
 
        ep = sctp_sk(sk)->ep;
 
+       /* Clean up any skbs sitting on the receive queue.  */
+       data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
+       data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
+
        /* Walk all associations on an endpoint.  */
        list_for_each_safe(pos, temp, &ep->asocs) {
                asoc = list_entry(pos, struct sctp_association, asocs);
@@ -1410,7 +1415,9 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
                        }
                }
 
-               if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
+               if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
+                   !skb_queue_empty(&asoc->ulpq.reasm) ||
+                   (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
                        struct sctp_chunk *chunk;
 
                        chunk = sctp_make_abort_user(asoc, NULL, 0);
@@ -1420,10 +1427,6 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
                        sctp_primitive_SHUTDOWN(asoc, NULL);
        }
 
-       /* Clean up any skbs sitting on the receive queue.  */
-       sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
-       sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
-
        /* On a TCP-style socket, block for at most linger_time if set. */
        if (sctp_style(sk, TCP) && timeout)
                sctp_wait_for_close(sk, timeout);
@@ -2073,10 +2076,33 @@ static int sctp_setsockopt_disable_fragments(struct sock *sk,
 static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
                                  unsigned int optlen)
 {
+       struct sctp_association *asoc;
+       struct sctp_ulpevent *event;
+
        if (optlen > sizeof(struct sctp_event_subscribe))
                return -EINVAL;
        if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
                return -EFAULT;
+
+       /*
+        * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
+        * if there is no data to be sent or retransmit, the stack will
+        * immediately send up this notification.
+        */
+       if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT,
+                                      &sctp_sk(sk)->subscribe)) {
+               asoc = sctp_id2assoc(sk, 0);
+
+               if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
+                       event = sctp_ulpevent_make_sender_dry_event(asoc,
+                                       GFP_ATOMIC);
+                       if (!event)
+                               return -ENOMEM;
+
+                       sctp_ulpq_tail_event(&asoc->ulpq, event);
+               }
+       }
+
        return 0;
 }
 
index e70e5fc87890c92031ab4b8a10e8fe3b15c5567e..8a84017834c211a840e83c1e39bebdbb001ec5c4 100644 (file)
@@ -1081,9 +1081,19 @@ void sctp_ulpevent_free(struct sctp_ulpevent *event)
 }
 
 /* Purge the skb lists holding ulpevents. */
-void sctp_queue_purge_ulpevents(struct sk_buff_head *list)
+unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list)
 {
        struct sk_buff *skb;
-       while ((skb = skb_dequeue(list)) != NULL)
-               sctp_ulpevent_free(sctp_skb2event(skb));
+       unsigned int data_unread = 0;
+
+       while ((skb = skb_dequeue(list)) != NULL) {
+               struct sctp_ulpevent *event = sctp_skb2event(skb);
+
+               if (!sctp_ulpevent_is_notification(event))
+                       data_unread += skb->len;
+
+               sctp_ulpevent_free(event);
+       }
+
+       return data_unread;
 }
index 339ba64cce1e2ee7134a2c9d09cd64f5fb9cb85e..5daf6cc4faea19f39081765433f4cf2333280605 100644 (file)
@@ -577,13 +577,13 @@ retry:
        }
        inode = &gss_msg->inode->vfs_inode;
        for (;;) {
-               prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE);
+               prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE);
                spin_lock(&inode->i_lock);
                if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
                        break;
                }
                spin_unlock(&inode->i_lock);
-               if (signalled()) {
+               if (fatal_signal_pending(current)) {
                        err = -ERESTARTSYS;
                        goto out_intr;
                }
index 0a9a2ec2e46983f0b73bf20d267721024c11646c..c3b75333b821d2b58da65d16dc099ee6cf2b9da3 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/sunrpc/gss_krb5.h>
 #include <linux/sunrpc/xdr.h>
 #include <linux/crypto.h>
+#include <linux/sunrpc/gss_krb5_enctypes.h>
 
 #ifdef RPC_DEBUG
 # define RPCDBG_FACILITY       RPCDBG_AUTH
@@ -750,7 +751,7 @@ static struct gss_api_mech gss_kerberos_mech = {
        .gm_ops         = &gss_kerberos_ops,
        .gm_pf_num      = ARRAY_SIZE(gss_kerberos_pfs),
        .gm_pfs         = gss_kerberos_pfs,
-       .gm_upcall_enctypes = "18,17,16,23,3,1,2",
+       .gm_upcall_enctypes = KRB5_SUPPORTED_ENCTYPES,
 };
 
 static int __init init_kerberos_module(void)
index b84d7395535e7aae2d523797134c27d8a9e57c11..8c9141583d6f135714eac27c100e4bc9c3426631 100644 (file)
@@ -1061,7 +1061,7 @@ call_allocate(struct rpc_task *task)
 
        dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
 
-       if (RPC_IS_ASYNC(task) || !signalled()) {
+       if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
                task->tk_action = call_allocate;
                rpc_delay(task, HZ>>4);
                return;
@@ -1175,6 +1175,9 @@ call_bind_status(struct rpc_task *task)
                        status = -EOPNOTSUPP;
                        break;
                }
+               if (task->tk_rebind_retry == 0)
+                       break;
+               task->tk_rebind_retry--;
                rpc_delay(task, 3*HZ);
                goto retry_timeout;
        case -ETIMEDOUT:
index 9a80a922c5270ee78d8a76feebac4360c551da82..e45d2fbbe5a8b3e82e6c36f3385065967663bff3 100644 (file)
@@ -597,7 +597,7 @@ void rpcb_getport_async(struct rpc_task *task)
        u32 bind_version;
        struct rpc_xprt *xprt;
        struct rpc_clnt *rpcb_clnt;
-       static struct rpcbind_args *map;
+       struct rpcbind_args *map;
        struct rpc_task *child;
        struct sockaddr_storage addr;
        struct sockaddr *sap = (struct sockaddr *)&addr;
index 6b43ee7221d5a830b8b0acea4375f9341aacfc90..4814e246a874ac1c19c51c5c36e7bbcd60b2d373 100644 (file)
@@ -616,30 +616,25 @@ static void __rpc_execute(struct rpc_task *task)
        BUG_ON(RPC_IS_QUEUED(task));
 
        for (;;) {
+               void (*do_action)(struct rpc_task *);
 
                /*
-                * Execute any pending callback.
+                * Execute any pending callback first.
                 */
-               if (task->tk_callback) {
-                       void (*save_callback)(struct rpc_task *);
-
-                       /*
-                        * We set tk_callback to NULL before calling it,
-                        * in case it sets the tk_callback field itself:
-                        */
-                       save_callback = task->tk_callback;
-                       task->tk_callback = NULL;
-                       save_callback(task);
-               } else {
+               do_action = task->tk_callback;
+               task->tk_callback = NULL;
+               if (do_action == NULL) {
                        /*
                         * Perform the next FSM step.
-                        * tk_action may be NULL when the task has been killed
-                        * by someone else.
+                        * tk_action may be NULL if the task has been killed.
+                        * In particular, note that rpc_killall_tasks may
+                        * do this at any time, so beware when dereferencing.
                         */
-                       if (task->tk_action == NULL)
+                       do_action = task->tk_action;
+                       if (do_action == NULL)
                                break;
-                       task->tk_action(task);
                }
+               do_action(task);
 
                /*
                 * Lockless check for whether task is sleeping or not.
@@ -792,6 +787,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
        /* Initialize retry counters */
        task->tk_garb_retry = 2;
        task->tk_cred_retry = 2;
+       task->tk_rebind_retry = 2;
 
        task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
        task->tk_owner = current->tgid;
index c22ef3492ee6f0b8f58f1d663c995a977a7bcc6b..880dbe2e6f94979847df154fb0a35e4ed964df69 100644 (file)
@@ -366,6 +366,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
 
        mutex_init(&rdev->mtx);
        mutex_init(&rdev->devlist_mtx);
+       mutex_init(&rdev->sched_scan_mtx);
        INIT_LIST_HEAD(&rdev->netdev_list);
        spin_lock_init(&rdev->bss_lock);
        INIT_LIST_HEAD(&rdev->bss_list);
@@ -701,6 +702,7 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
        rfkill_destroy(rdev->rfkill);
        mutex_destroy(&rdev->mtx);
        mutex_destroy(&rdev->devlist_mtx);
+       mutex_destroy(&rdev->sched_scan_mtx);
        list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
                cfg80211_put_bss(&scan->pub);
        cfg80211_rdev_free_wowlan(rdev);
@@ -737,12 +739,16 @@ static void wdev_cleanup_work(struct work_struct *work)
                ___cfg80211_scan_done(rdev, true);
        }
 
+       cfg80211_unlock_rdev(rdev);
+
+       mutex_lock(&rdev->sched_scan_mtx);
+
        if (WARN_ON(rdev->sched_scan_req &&
                    rdev->sched_scan_req->dev == wdev->netdev)) {
                __cfg80211_stop_sched_scan(rdev, false);
        }
 
-       cfg80211_unlock_rdev(rdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
 
        mutex_lock(&rdev->devlist_mtx);
        rdev->opencount--;
@@ -830,9 +836,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
                        break;
                case NL80211_IFTYPE_P2P_CLIENT:
                case NL80211_IFTYPE_STATION:
-                       cfg80211_lock_rdev(rdev);
+                       mutex_lock(&rdev->sched_scan_mtx);
                        __cfg80211_stop_sched_scan(rdev, false);
-                       cfg80211_unlock_rdev(rdev);
+                       mutex_unlock(&rdev->sched_scan_mtx);
 
                        wdev_lock(wdev);
 #ifdef CONFIG_CFG80211_WEXT
index 3dce1f167eba338a6a3c92d0c584636936600420..a570ff9214ec3e60d2a2c88fef441a88ef70967f 100644 (file)
@@ -65,6 +65,8 @@ struct cfg80211_registered_device {
        struct work_struct scan_done_wk;
        struct work_struct sched_scan_results_wk;
 
+       struct mutex sched_scan_mtx;
+
 #ifdef CONFIG_NL80211_TESTMODE
        struct genl_info *testmode_info;
 #endif
index 98fa8eb6cc4bec27e6adbf737c3653392da32adc..cea338150d0564ee0103790eb80fc6af270e861b 100644 (file)
@@ -3461,9 +3461,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
        if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
                return -EINVAL;
 
-       if (rdev->sched_scan_req)
-               return -EINPROGRESS;
-
        if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
                return -EINVAL;
 
@@ -3502,12 +3499,21 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
        if (ie_len > wiphy->max_scan_ie_len)
                return -EINVAL;
 
+       mutex_lock(&rdev->sched_scan_mtx);
+
+       if (rdev->sched_scan_req) {
+               err = -EINPROGRESS;
+               goto out;
+       }
+
        request = kzalloc(sizeof(*request)
                        + sizeof(*request->ssids) * n_ssids
                        + sizeof(*request->channels) * n_channels
                        + ie_len, GFP_KERNEL);
-       if (!request)
-               return -ENOMEM;
+       if (!request) {
+               err = -ENOMEM;
+               goto out;
+       }
 
        if (n_ssids)
                request->ssids = (void *)&request->channels[n_channels];
@@ -3605,6 +3611,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
 out_free:
        kfree(request);
 out:
+       mutex_unlock(&rdev->sched_scan_mtx);
        return err;
 }
 
@@ -3612,12 +3619,17 @@ static int nl80211_stop_sched_scan(struct sk_buff *skb,
                                   struct genl_info *info)
 {
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
+       int err;
 
        if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
            !rdev->ops->sched_scan_stop)
                return -EOPNOTSUPP;
 
-       return __cfg80211_stop_sched_scan(rdev, false);
+       mutex_lock(&rdev->sched_scan_mtx);
+       err = __cfg80211_stop_sched_scan(rdev, false);
+       mutex_unlock(&rdev->sched_scan_mtx);
+
+       return err;
 }
 
 static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
@@ -6463,7 +6475,8 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
        if (addr)
                NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
        NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type);
-       NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id);
+       if (key_id != -1)
+               NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id);
        if (tsc)
                NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc);
 
index 7a6c67667d708e97400a6974175323f7cfd712e6..ae0c2256ba3beef62c9bc3492d788fdd90882da2 100644 (file)
@@ -100,14 +100,14 @@ void __cfg80211_sched_scan_results(struct work_struct *wk)
        rdev = container_of(wk, struct cfg80211_registered_device,
                            sched_scan_results_wk);
 
-       cfg80211_lock_rdev(rdev);
+       mutex_lock(&rdev->sched_scan_mtx);
 
        /* we don't have sched_scan_req anymore if the scan is stopping */
        if (rdev->sched_scan_req)
                nl80211_send_sched_scan_results(rdev,
                                                rdev->sched_scan_req->dev);
 
-       cfg80211_unlock_rdev(rdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
 }
 
 void cfg80211_sched_scan_results(struct wiphy *wiphy)
@@ -123,9 +123,9 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
 {
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
-       cfg80211_lock_rdev(rdev);
+       mutex_lock(&rdev->sched_scan_mtx);
        __cfg80211_stop_sched_scan(rdev, true);
-       cfg80211_unlock_rdev(rdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
 }
 EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
 
@@ -135,7 +135,7 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
        int err;
        struct net_device *dev;
 
-       ASSERT_RDEV_LOCK(rdev);
+       lockdep_assert_held(&rdev->sched_scan_mtx);
 
        if (!rdev->sched_scan_req)
                return 0;
index 9bec2e8a838c384e420e8eea4f60244f357e545a..5ce74a385525c9a5ef036428670663d1d924e063 100644 (file)
@@ -50,7 +50,7 @@ static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
 static void xfrm_init_pmtu(struct dst_entry *dst);
 static int stale_bundle(struct dst_entry *dst);
-static int xfrm_bundle_ok(struct xfrm_dst *xdst, int family);
+static int xfrm_bundle_ok(struct xfrm_dst *xdst);
 
 
 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
@@ -2241,7 +2241,7 @@ static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
 
 static int stale_bundle(struct dst_entry *dst)
 {
-       return !xfrm_bundle_ok((struct xfrm_dst *)dst, AF_UNSPEC);
+       return !xfrm_bundle_ok((struct xfrm_dst *)dst);
 }
 
 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
@@ -2313,7 +2313,7 @@ static void xfrm_init_pmtu(struct dst_entry *dst)
  * still valid.
  */
 
-static int xfrm_bundle_ok(struct xfrm_dst *first, int family)
+static int xfrm_bundle_ok(struct xfrm_dst *first)
 {
        struct dst_entry *dst = &first->u.dst;
        struct xfrm_dst *last;
index d70f85eb7864f4e35326f41d63c49cc163296b05..9414b9c5b1e4284b9ed90e9fe3ff39cdfc07e2ce 100644 (file)
@@ -1345,6 +1345,8 @@ out:
                        xfrm_state_check_expire(x1);
 
                err = 0;
+               x->km.state = XFRM_STATE_DEAD;
+               __xfrm_state_put(x);
        }
        spin_unlock_bh(&x1->lock);
 
index 3b029cba2bafb8c5c8d9d77d8e5c978d257f7f49..a272356859497fec41796a37e750c4dfc891827d 100755 (executable)
@@ -21,13 +21,15 @@ fi
 # older versions of depmod require the version string to start with three
 # numbers, so we cheat with a symlink here
 depmod_hack_needed=true
-mkdir -p .tmp_depmod/lib/modules/$KERNELRELEASE
-if "$DEPMOD" -b .tmp_depmod $KERNELRELEASE 2>/dev/null; then
-       if test -e .tmp_depmod/lib/modules/$KERNELRELEASE/modules.dep -o \
-               -e .tmp_depmod/lib/modules/$KERNELRELEASE/modules.dep.bin; then
+tmp_dir=$(mktemp -d ${TMPDIR:-/tmp}/depmod.XXXXXX)
+mkdir -p "$tmp_dir/lib/modules/$KERNELRELEASE"
+if "$DEPMOD" -b "$tmp_dir" $KERNELRELEASE 2>/dev/null; then
+       if test -e "$tmp_dir/lib/modules/$KERNELRELEASE/modules.dep" -o \
+               -e "$tmp_dir/lib/modules/$KERNELRELEASE/modules.dep.bin"; then
                depmod_hack_needed=false
        fi
 fi
+rm -rf "$tmp_dir"
 if $depmod_hack_needed; then
        symlink="$INSTALL_MOD_PATH/lib/modules/99.98.$KERNELRELEASE"
        ln -s "$KERNELRELEASE" "$symlink"
index cd1f779fa51d38eafc4f75bac0649e592703a7fb..1be68269e1c2a2c55e2db2cc46d206539c5f79f3 100644 (file)
@@ -474,17 +474,11 @@ struct cgroup_subsys devices_subsys = {
        .subsys_id = devices_subsys_id,
 };
 
-int devcgroup_inode_permission(struct inode *inode, int mask)
+int __devcgroup_inode_permission(struct inode *inode, int mask)
 {
        struct dev_cgroup *dev_cgroup;
        struct dev_whitelist_item *wh;
 
-       dev_t device = inode->i_rdev;
-       if (!device)
-               return 0;
-       if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
-               return 0;
-
        rcu_read_lock();
 
        dev_cgroup = task_devcgroup(current);
index d31862e0aa1c00f415d447e12e780482e2bdbca5..82465328c39b94b5264fb65ba27399f6cdf4d68f 100644 (file)
@@ -71,9 +71,8 @@ EXPORT_SYMBOL(complete_request_key);
  * This is called in context of freshly forked kthread before kernel_execve(),
  * so we can simply install the desired session_keyring at this point.
  */
-static int umh_keys_init(struct subprocess_info *info)
+static int umh_keys_init(struct subprocess_info *info, struct cred *cred)
 {
-       struct cred *cred = (struct cred*)current_cred();
        struct key *keyring = info->data;
 
        return install_session_keyring_to_cred(cred, keyring);
@@ -470,7 +469,7 @@ static struct key *construct_key_and_link(struct key_type *type,
        } else if (ret == -EINPROGRESS) {
                ret = 0;
        } else {
-               key = ERR_PTR(ret);
+               goto couldnt_alloc_key;
        }
 
        key_put(dest_keyring);
@@ -480,6 +479,7 @@ static struct key *construct_key_and_link(struct key_type *type,
 construction_failed:
        key_negate_and_link(key, key_negative_timeout, NULL, NULL);
        key_put(key);
+couldnt_alloc_key:
        key_put(dest_keyring);
        kleave(" = %d", ret);
        return ERR_PTR(ret);
index 6e24091818950edc9c298241be2d28a751b39f83..bfee60c4d4c0e21386a39982f020350b446c4835 100644 (file)
@@ -599,4 +599,4 @@ module_exit(atmel_abdac_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Driver for Atmel Audio Bitstream DAC (ABDAC)");
-MODULE_AUTHOR("Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>");
+MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
index b310702c646e40ef59afedeed630dc320b5570be..ac35222ad0dd21993f0bd9d89a070ed3fa5fa329 100644 (file)
@@ -1199,4 +1199,4 @@ module_exit(atmel_ac97c_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Driver for Atmel AC97 controller");
-MODULE_AUTHOR("Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>");
+MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
index 86ee16ca365e6fc612a4ad25baf6ecbf9fbc6893..440030818db70c88c582d0047790baed074297c9 100644 (file)
@@ -209,6 +209,7 @@ static void isight_packet(struct fw_iso_context *context, u32 cycle,
                isight->packet_index = -1;
                return;
        }
+       fw_iso_context_queue_flush(isight->context);
 
        if (++index >= QUEUE_LENGTH)
                index = 0;
index 2ca6f4f85b412a18502ac0f6370d19cc604d7d56..e3569bdd3b64e704f82c9e621110abe8e0853f8d 100644 (file)
@@ -27,7 +27,6 @@
 #include "hpioctl.h"
 
 #include <linux/pci.h>
-#include <linux/version.h>
 #include <linux/init.h>
 #include <linux/jiffies.h>
 #include <linux/slab.h>
index f16bc8aad6ed205a7f7b983e9486e4437090c875..e083122ca55af550f6d4b72e6d5cea5c8c2e1a05 100644 (file)
@@ -149,7 +149,7 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au,
                        &((struct cs5535audio_dma_desc *) dma->desc_buf.area)[i];
                desc->addr = cpu_to_le32(addr);
                desc->size = cpu_to_le32(period_bytes);
-               desc->ctlreserved = cpu_to_le32(PRD_EOP);
+               desc->ctlreserved = cpu_to_le16(PRD_EOP);
                desc_addr += sizeof(struct cs5535audio_dma_desc);
                addr += period_bytes;
        }
@@ -157,7 +157,7 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au,
        lastdesc = &((struct cs5535audio_dma_desc *) dma->desc_buf.area)[periods];
        lastdesc->addr = cpu_to_le32((u32) dma->desc_buf.addr);
        lastdesc->size = 0;
-       lastdesc->ctlreserved = cpu_to_le32(PRD_JMP);
+       lastdesc->ctlreserved = cpu_to_le16(PRD_JMP);
        jmpprd_addr = cpu_to_le32(lastdesc->addr +
                                  (sizeof(struct cs5535audio_dma_desc)*periods));
 
index 5e619a84da061295fa4758e313935e89aea8009d..15f0161ce4a2342f9eb9b5eca86392a9481eacc2 100644 (file)
@@ -1440,6 +1440,14 @@ static struct snd_emu_chip_details emu_chip_details[] = {
         .ca0102_chip = 1,
         .spk71 = 1,
         .emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 */
+       /* EMU0404 PCIe */
+       {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40051102,
+        .driver = "Audigy2", .name = "E-mu 0404 PCIe [MAEM8984]",
+        .id = "EMU0404",
+        .emu10k2_chip = 1,
+        .ca0108_chip = 1,
+        .spk71 = 1,
+        .emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 PCIe ver_03 */
        /* Note that all E-mu cards require kernel 2.6 or newer. */
        {.vendor = 0x1102, .device = 0x0008,
         .driver = "Audigy2", .name = "SB Audigy 2 Value [Unknown]",
index f1de1bac042c260e5612a2f55f9ff13e79a30406..55f0647458c70aebb79028d3131e6f520d63eb19 100644 (file)
@@ -50,7 +50,12 @@ int snd_hda_enable_beep_device(struct hda_codec *codec, int enable);
 int snd_hda_attach_beep_device(struct hda_codec *codec, int nid);
 void snd_hda_detach_beep_device(struct hda_codec *codec);
 #else
-#define snd_hda_attach_beep_device(...)                0
-#define snd_hda_detach_beep_device(...)
+static inline int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
+{
+       return 0;
+}
+static inline void snd_hda_detach_beep_device(struct hda_codec *codec)
+{
+}
 #endif
 #endif
index b05f7be9dc1b154017838b4fda19486014ad6f47..e3e853153d14f51eb361fa5cd385504c13f2bbe9 100644 (file)
@@ -294,7 +294,7 @@ static int hdmi_update_eld(struct hdmi_eld *e,
                snd_printd(KERN_INFO "HDMI: out of range MNL %d\n", mnl);
                goto out_fail;
        } else
-               strlcpy(e->monitor_name, buf + ELD_FIXED_BYTES, mnl);
+               strlcpy(e->monitor_name, buf + ELD_FIXED_BYTES, mnl + 1);
 
        for (i = 0; i < e->sad_count; i++) {
                if (ELD_FIXED_BYTES + mnl + 3 * (i + 1) > size) {
index 694b9daf691f74208b88a069623586af1ef92c0b..7bbc5f237a5e7d8f413ee5f1b00275a476947720 100644 (file)
@@ -3074,6 +3074,7 @@ static const char * const cxt5066_models[CXT5066_MODELS] = {
 };
 
 static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
+       SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT5066_AUTO),
        SND_PCI_QUIRK_MASK(0x1025, 0xff00, 0x0400, "Acer", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO),
        SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
@@ -4389,6 +4390,8 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {
          .patch = patch_cxt5066 },
        { .id = 0x14f15069, .name = "CX20585",
          .patch = patch_cxt5066 },
+       { .id = 0x14f1506c, .name = "CX20588",
+         .patch = patch_cxt5066 },
        { .id = 0x14f1506e, .name = "CX20590",
          .patch = patch_cxt5066 },
        { .id = 0x14f15097, .name = "CX20631",
@@ -4417,6 +4420,7 @@ MODULE_ALIAS("snd-hda-codec-id:14f15066");
 MODULE_ALIAS("snd-hda-codec-id:14f15067");
 MODULE_ALIAS("snd-hda-codec-id:14f15068");
 MODULE_ALIAS("snd-hda-codec-id:14f15069");
+MODULE_ALIAS("snd-hda-codec-id:14f1506c");
 MODULE_ALIAS("snd-hda-codec-id:14f1506e");
 MODULE_ALIAS("snd-hda-codec-id:14f15097");
 MODULE_ALIAS("snd-hda-codec-id:14f15098");
index 43fcfbd32847019931bc774bf00ecd20b2887a9b..b48fb43b5448bd7d400c75de0acd6355d7156f28 100644 (file)
@@ -2715,17 +2715,30 @@ typedef int (*getput_call_t)(struct snd_kcontrol *kcontrol,
 
 static int alc_cap_getput_caller(struct snd_kcontrol *kcontrol,
                                 struct snd_ctl_elem_value *ucontrol,
-                                getput_call_t func)
+                                getput_call_t func, bool check_adc_switch)
 {
        struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
        struct alc_spec *spec = codec->spec;
-       unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
-       int err;
+       int i, err = 0;
 
        mutex_lock(&codec->control_mutex);
-       kcontrol->private_value = HDA_COMPOSE_AMP_VAL(spec->adc_nids[adc_idx],
-                                                     3, 0, HDA_INPUT);
-       err = func(kcontrol, ucontrol);
+       if (check_adc_switch && spec->dual_adc_switch) {
+               for (i = 0; i < spec->num_adc_nids; i++) {
+                       kcontrol->private_value =
+                               HDA_COMPOSE_AMP_VAL(spec->adc_nids[i],
+                                                   3, 0, HDA_INPUT);
+                       err = func(kcontrol, ucontrol);
+                       if (err < 0)
+                               goto error;
+               }
+       } else {
+               i = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
+               kcontrol->private_value =
+                       HDA_COMPOSE_AMP_VAL(spec->adc_nids[i],
+                                           3, 0, HDA_INPUT);
+               err = func(kcontrol, ucontrol);
+       }
+ error:
        mutex_unlock(&codec->control_mutex);
        return err;
 }
@@ -2734,14 +2747,14 @@ static int alc_cap_vol_get(struct snd_kcontrol *kcontrol,
                           struct snd_ctl_elem_value *ucontrol)
 {
        return alc_cap_getput_caller(kcontrol, ucontrol,
-                                    snd_hda_mixer_amp_volume_get);
+                                    snd_hda_mixer_amp_volume_get, false);
 }
 
 static int alc_cap_vol_put(struct snd_kcontrol *kcontrol,
                           struct snd_ctl_elem_value *ucontrol)
 {
        return alc_cap_getput_caller(kcontrol, ucontrol,
-                                    snd_hda_mixer_amp_volume_put);
+                                    snd_hda_mixer_amp_volume_put, true);
 }
 
 /* capture mixer elements */
@@ -2751,14 +2764,14 @@ static int alc_cap_sw_get(struct snd_kcontrol *kcontrol,
                          struct snd_ctl_elem_value *ucontrol)
 {
        return alc_cap_getput_caller(kcontrol, ucontrol,
-                                    snd_hda_mixer_amp_switch_get);
+                                    snd_hda_mixer_amp_switch_get, false);
 }
 
 static int alc_cap_sw_put(struct snd_kcontrol *kcontrol,
                          struct snd_ctl_elem_value *ucontrol)
 {
        return alc_cap_getput_caller(kcontrol, ucontrol,
-                                    snd_hda_mixer_amp_switch_put);
+                                    snd_hda_mixer_amp_switch_put, true);
 }
 
 #define _DEFINE_CAPMIX(num) \
@@ -4883,7 +4896,6 @@ static const struct snd_pci_quirk alc880_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0xe309, "ULI", ALC880_3ST_DIG),
        SND_PCI_QUIRK(0x1025, 0xe310, "ULI", ALC880_3ST),
        SND_PCI_QUIRK(0x1039, 0x1234, NULL, ALC880_6ST_DIG),
-       SND_PCI_QUIRK(0x103c, 0x2a09, "HP", ALC880_5ST),
        SND_PCI_QUIRK(0x1043, 0x10b3, "ASUS W1V", ALC880_ASUS_W1V),
        SND_PCI_QUIRK(0x1043, 0x10c2, "ASUS W6A", ALC880_ASUS_DIG),
        SND_PCI_QUIRK(0x1043, 0x10c3, "ASUS Wxx", ALC880_ASUS_DIG),
@@ -12600,6 +12612,7 @@ static const struct hda_verb alc262_toshiba_rx1_unsol_verbs[] = {
  */
 enum {
        PINFIX_FSC_H270,
+       PINFIX_HP_Z200,
 };
 
 static const struct alc_fixup alc262_fixups[] = {
@@ -12612,9 +12625,17 @@ static const struct alc_fixup alc262_fixups[] = {
                        { }
                }
        },
+       [PINFIX_HP_Z200] = {
+               .type = ALC_FIXUP_PINS,
+               .v.pins = (const struct alc_pincfg[]) {
+                       { 0x16, 0x99130120 }, /* internal speaker */
+                       { }
+               }
+       },
 };
 
 static const struct snd_pci_quirk alc262_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x103c, 0x170b, "HP Z200", PINFIX_HP_Z200),
        SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", PINFIX_FSC_H270),
        {}
 };
@@ -12731,6 +12752,8 @@ static const struct snd_pci_quirk alc262_cfg_tbl[] = {
                           ALC262_HP_BPC),
        SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1500, "HP z series",
                           ALC262_HP_BPC),
+       SND_PCI_QUIRK(0x103c, 0x170b, "HP Z200",
+                          ALC262_AUTO),
        SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1700, "HP xw series",
                           ALC262_HP_BPC),
        SND_PCI_QUIRK(0x103c, 0x2800, "HP D7000", ALC262_HP_BPC_D7000_WL),
@@ -13316,9 +13339,8 @@ static void alc268_acer_lc_setup(struct hda_codec *codec)
        struct alc_spec *spec = codec->spec;
        spec->autocfg.hp_pins[0] = 0x15;
        spec->autocfg.speaker_pins[0] = 0x14;
-       spec->automute_mixer_nid[0] = 0x0f;
        spec->automute = 1;
-       spec->automute_mode = ALC_AUTOMUTE_MIXER;
+       spec->automute_mode = ALC_AUTOMUTE_AMP;
        spec->ext_mic.pin = 0x18;
        spec->ext_mic.mux_idx = 0;
        spec->int_mic.pin = 0x12;
@@ -13873,7 +13895,6 @@ static const struct snd_pci_quirk alc268_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1205, "ASUS W7J", ALC268_3ST),
        SND_PCI_QUIRK(0x1170, 0x0040, "ZEPTO", ALC268_ZEPTO),
        SND_PCI_QUIRK(0x14c0, 0x0025, "COMPAL IFL90/JFL-92", ALC268_TOSHIBA),
-       SND_PCI_QUIRK(0x152d, 0x0763, "Diverse (CPR2000)", ALC268_ACER),
        SND_PCI_QUIRK(0x152d, 0x0771, "Quanta IL1", ALC267_QUANTA_IL1),
        {}
 };
index 605c99e1e520de5d205a76046764d7e5f0f5a852..f43bb0eaed8b8e3c6f75ca08c7179267937c3be1 100644 (file)
@@ -745,12 +745,23 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol,
        struct via_spec *spec = codec->spec;
        hda_nid_t nid = kcontrol->private_value;
        unsigned int pinsel = ucontrol->value.enumerated.item[0];
+       unsigned int parm0, parm1;
        /* Get Independent Mode index of headphone pin widget */
        spec->hp_independent_mode = spec->hp_independent_mode_index == pinsel
                ? 1 : 0;
-       if (spec->codec_type == VT1718S)
+       if (spec->codec_type == VT1718S) {
                snd_hda_codec_write(codec, nid, 0,
                                    AC_VERB_SET_CONNECT_SEL, pinsel ? 2 : 0);
+               /* Set correct mute switch for MW3 */
+               parm0 = spec->hp_independent_mode ?
+                              AMP_IN_UNMUTE(0) : AMP_IN_MUTE(0);
+               parm1 = spec->hp_independent_mode ?
+                              AMP_IN_MUTE(1) : AMP_IN_UNMUTE(1);
+               snd_hda_codec_write(codec, 0x1b, 0,
+                                   AC_VERB_SET_AMP_GAIN_MUTE, parm0);
+               snd_hda_codec_write(codec, 0x1b, 0,
+                                   AC_VERB_SET_AMP_GAIN_MUTE, parm1);
+       }
        else
                snd_hda_codec_write(codec, nid, 0,
                                    AC_VERB_SET_CONNECT_SEL, pinsel);
@@ -832,10 +843,13 @@ static int via_hp_build(struct hda_codec *codec)
        knew->subdevice = HDA_SUBDEV_NID_FLAG | nid;
        knew->private_value = nid;
 
-       knew = via_clone_control(spec, &via_hp_mixer[1]);
-       if (knew == NULL)
-               return -ENOMEM;
-       knew->subdevice = side_mute_channel(spec);
+       nid = side_mute_channel(spec);
+       if (nid) {
+               knew = via_clone_control(spec, &via_hp_mixer[1]);
+               if (knew == NULL)
+                       return -ENOMEM;
+               knew->subdevice = nid;
+       }
 
        return 0;
 }
@@ -4280,9 +4294,6 @@ static const struct hda_verb vt1718S_volume_init_verbs[] = {
        {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
        {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
        {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(5)},
-
-       /* Setup default input of Front HP to MW9 */
-       {0x28, AC_VERB_SET_CONNECT_SEL, 0x1},
        /* PW9 PW10 Output enable */
        {0x2d, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_OUT_EN},
        {0x2e, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_OUT_EN},
@@ -4291,10 +4302,10 @@ static const struct hda_verb vt1718S_volume_init_verbs[] = {
        /* Enable Boost Volume backdoor */
        {0x1, 0xf88, 0x8},
        /* MW0/1/2/3/4: un-mute index 0 (AOWx), mute index 1 (MW9) */
-       {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
+       {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
        {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
        {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-       {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
+       {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
        {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
        {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
        {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
@@ -4304,8 +4315,6 @@ static const struct hda_verb vt1718S_volume_init_verbs[] = {
        /* set MUX1 = 2 (AOW4), MUX2 = 1 (AOW3) */
        {0x34, AC_VERB_SET_CONNECT_SEL, 0x2},
        {0x35, AC_VERB_SET_CONNECT_SEL, 0x1},
-       /* Unmute MW4's index 0 */
-       {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
        { }
 };
 
@@ -4453,6 +4462,19 @@ static int vt1718S_auto_create_multi_out_ctls(struct via_spec *spec,
                        if (err < 0)
                                return err;
                } else if (i == AUTO_SEQ_FRONT) {
+                       /* add control to mixer index 0 */
+                       err = via_add_control(spec, VIA_CTL_WIDGET_VOL,
+                                             "Master Front Playback Volume",
+                                             HDA_COMPOSE_AMP_VAL(0x21, 3, 5,
+                                                                 HDA_INPUT));
+                       if (err < 0)
+                               return err;
+                       err = via_add_control(spec, VIA_CTL_WIDGET_MUTE,
+                                             "Master Front Playback Switch",
+                                             HDA_COMPOSE_AMP_VAL(0x21, 3, 5,
+                                                                 HDA_INPUT));
+                       if (err < 0)
+                               return err;
                        /* Front */
                        sprintf(name, "%s Playback Volume", chname[i]);
                        err = via_add_control(
index 34b24286d279d542e3d24e80a3531e8cf95bc8f5..2692e5ae5f2daa53822242c864a4f1b5ddf6a8ff 100644 (file)
@@ -445,7 +445,7 @@ static void lola_reset_setups(struct lola *chip)
        lola_setup_all_analog_gains(chip, PLAY, false); /* output, update */
 }
 
-static int lola_parse_tree(struct lola *chip)
+static int __devinit lola_parse_tree(struct lola *chip)
 {
        unsigned int val;
        int nid, err;
index 949691a876d3635798c6000da1b651d13684b1a6..c8e402fc378247ac3cf5dcc1cd2838fb40172e85 100644 (file)
@@ -521,6 +521,7 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
 #define HDSPM_DMA_AREA_KILOBYTES (HDSPM_DMA_AREA_BYTES/1024)
 
 /* revisions >= 230 indicate AES32 card */
+#define HDSPM_MADI_OLD_REV     207
 #define HDSPM_MADI_REV         210
 #define HDSPM_RAYDAT_REV       211
 #define HDSPM_AIO_REV          212
@@ -895,11 +896,11 @@ struct hdspm {
        unsigned char max_channels_in;
        unsigned char max_channels_out;
 
-       char *channel_map_in;
-       char *channel_map_out;
+       signed char *channel_map_in;
+       signed char *channel_map_out;
 
-       char *channel_map_in_ss, *channel_map_in_ds, *channel_map_in_qs;
-       char *channel_map_out_ss, *channel_map_out_ds, *channel_map_out_qs;
+       signed char *channel_map_in_ss, *channel_map_in_ds, *channel_map_in_qs;
+       signed char *channel_map_out_ss, *channel_map_out_ds, *channel_map_out_qs;
 
        char **port_names_in;
        char **port_names_out;
@@ -1143,7 +1144,7 @@ static int hdspm_external_sample_rate(struct hdspm *hdspm)
 
                /* if wordclock has synced freq and wordclock is valid */
                if ((status2 & HDSPM_wcLock) != 0 &&
-                               (status & HDSPM_SelSyncRef0) == 0) {
+                               (status2 & HDSPM_SelSyncRef0) == 0) {
 
                        rate_bits = status2 & HDSPM_wcFreqMask;
 
@@ -1639,12 +1640,14 @@ static int snd_hdspm_midi_input_read (struct hdspm_midi *hmidi)
                }
        }
        hmidi->pending = 0;
+       spin_unlock_irqrestore(&hmidi->lock, flags);
 
+       spin_lock_irqsave(&hmidi->hdspm->lock, flags);
        hmidi->hdspm->control_register |= hmidi->ie;
        hdspm_write(hmidi->hdspm, HDSPM_controlRegister,
                    hmidi->hdspm->control_register);
+       spin_unlock_irqrestore(&hmidi->hdspm->lock, flags);
 
-       spin_unlock_irqrestore (&hmidi->lock, flags);
        return snd_hdspm_midi_output_write (hmidi);
 }
 
@@ -6377,6 +6380,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card,
 
        switch (hdspm->firmware_rev) {
        case HDSPM_MADI_REV:
+       case HDSPM_MADI_OLD_REV:
                hdspm->io_type = MADI;
                hdspm->card_name = "RME MADI";
                hdspm->midiPorts = 3;
index b5101efd1c8733bd002cf5ae700f5807b665ebed..f1fd95bb6416ce0b44e750e356d4ddc95346f31c 100644 (file)
@@ -138,11 +138,20 @@ static snd_pcm_uframes_t bf5xx_pcm_pointer(struct snd_pcm_substream *substream)
        pr_debug("%s enter\n", __func__);
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                diff = sport_curr_offset_tx(sport);
-               frames = bytes_to_frames(substream->runtime, diff);
        } else {
                diff = sport_curr_offset_rx(sport);
-               frames = bytes_to_frames(substream->runtime, diff);
        }
+
+       /*
+        * TX at least can report one frame beyond the end of the
+        * buffer if we hit the wraparound case - clamp to within the
+        * buffer as the ALSA APIs require.
+        */
+       if (diff == snd_pcm_lib_buffer_bytes(substream))
+               diff = 0;
+
+       frames = bytes_to_frames(substream->runtime, diff);
+
        return frames;
 }
 
index 4be0570e3f1fc167fad8682e27f33dd7c810dd30..65f46047b1cbd21d0d9441966ae6ceb6c5c6120e 100644 (file)
@@ -357,7 +357,7 @@ static int ak4642_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        default:
                return -EINVAL;
        }
-       snd_soc_update_bits(codec, PW_MGMT2, MS, data);
+       snd_soc_update_bits(codec, PW_MGMT2, MS | MCKO | PMPLL, data);
        snd_soc_update_bits(codec, MD_CTL1, BCKO_MASK, bcko);
 
        /* format type */
index e2a7608d39449cb53151685f2dafaf21d13f8d21..7859bdcc93db4064cad1474a8fe2533e19026f1e 100644 (file)
@@ -161,10 +161,18 @@ static int aic26_hw_params(struct snd_pcm_substream *substream,
                dev_dbg(&aic26->spi->dev, "bad format\n"); return -EINVAL;
        }
 
-       /* Configure PLL */
+       /**
+        * Configure PLL
+        * fsref = (mclk * PLLM) / 2048
+        * where PLLM = J.DDDD (DDDD register ranges from 0 to 9999, decimal)
+        */
        pval = 1;
-       jval = (fsref == 44100) ? 7 : 8;
-       dval = (fsref == 44100) ? 5264 : 1920;
+       /* compute J portion of multiplier */
+       jval = fsref / (aic26->mclk / 2048);
+       /* compute fractional DDDD component of multiplier */
+       dval = fsref - (jval * (aic26->mclk / 2048));
+       dval = (10000 * dval) / (aic26->mclk / 2048);
+       dev_dbg(&aic26->spi->dev, "Setting PLLM to %d.%04d\n", jval, dval);
        qval = 0;
        reg = 0x8000 | qval << 11 | pval << 8 | jval << 2;
        aic26_reg_write(codec, AIC26_REG_PLL_PROG1, reg);
index c3d96fc8c26734855de19a32bd1747531e914df6..789453d44ec5d212d2974d8c1db28dc95b81b52f 100644 (file)
@@ -1114,12 +1114,19 @@ static int aic3x_set_power(struct snd_soc_codec *codec, int power)
 
                /* Sync reg_cache with the hardware */
                codec->cache_only = 0;
-               for (i = 0; i < ARRAY_SIZE(aic3x_reg); i++)
+               for (i = AIC3X_SAMPLE_RATE_SEL_REG; i < ARRAY_SIZE(aic3x_reg); i++)
                        snd_soc_write(codec, i, cache[i]);
                if (aic3x->model == AIC3X_MODEL_3007)
                        aic3x_init_3007(codec);
                codec->cache_sync = 0;
        } else {
+               /*
+                * Do soft reset to this codec instance in order to clear
+                * possible VDD leakage currents in case the supply regulators
+                * remain on
+                */
+               snd_soc_write(codec, AIC3X_RESET, SOFT_RESET);
+               codec->cache_sync = 1;
                aic3x->power = 0;
                /* HW writes are needless when bias is off */
                codec->cache_only = 1;
index 2dc964b55e4fa15ce23fec72fbc7c7925f8fbc1e..76b4361e9b8042c112a32b14b290304e17761604 100644 (file)
@@ -175,6 +175,7 @@ static const struct snd_kcontrol_new wm8731_input_mux_controls =
 SOC_DAPM_ENUM("Input Select", wm8731_insel_enum);
 
 static const struct snd_soc_dapm_widget wm8731_dapm_widgets[] = {
+SND_SOC_DAPM_SUPPLY("ACTIVE",WM8731_ACTIVE, 0, 0, NULL, 0),
 SND_SOC_DAPM_SUPPLY("OSC", WM8731_PWR, 5, 1, NULL, 0),
 SND_SOC_DAPM_MIXER("Output Mixer", WM8731_PWR, 4, 1,
        &wm8731_output_mixer_controls[0],
@@ -204,6 +205,8 @@ static int wm8731_check_osc(struct snd_soc_dapm_widget *source,
 static const struct snd_soc_dapm_route wm8731_intercon[] = {
        {"DAC", NULL, "OSC", wm8731_check_osc},
        {"ADC", NULL, "OSC", wm8731_check_osc},
+       {"DAC", NULL, "ACTIVE"},
+       {"ADC", NULL, "ACTIVE"},
 
        /* output mixer */
        {"Output Mixer", "Line Bypass Switch", "Line Input"},
@@ -315,29 +318,6 @@ static int wm8731_hw_params(struct snd_pcm_substream *substream,
        return 0;
 }
 
-static int wm8731_pcm_prepare(struct snd_pcm_substream *substream,
-                             struct snd_soc_dai *dai)
-{
-       struct snd_soc_codec *codec = dai->codec;
-
-       /* set active */
-       snd_soc_write(codec, WM8731_ACTIVE, 0x0001);
-
-       return 0;
-}
-
-static void wm8731_shutdown(struct snd_pcm_substream *substream,
-                           struct snd_soc_dai *dai)
-{
-       struct snd_soc_codec *codec = dai->codec;
-
-       /* deactivate */
-       if (!codec->active) {
-               udelay(50);
-               snd_soc_write(codec, WM8731_ACTIVE, 0x0);
-       }
-}
-
 static int wm8731_mute(struct snd_soc_dai *dai, int mute)
 {
        struct snd_soc_codec *codec = dai->codec;
@@ -480,7 +460,6 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
                snd_soc_write(codec, WM8731_PWR, reg | 0x0040);
                break;
        case SND_SOC_BIAS_OFF:
-               snd_soc_write(codec, WM8731_ACTIVE, 0x0);
                snd_soc_write(codec, WM8731_PWR, 0xffff);
                regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies),
                                       wm8731->supplies);
@@ -496,9 +475,7 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
        SNDRV_PCM_FMTBIT_S24_LE)
 
 static struct snd_soc_dai_ops wm8731_dai_ops = {
-       .prepare        = wm8731_pcm_prepare,
        .hw_params      = wm8731_hw_params,
-       .shutdown       = wm8731_shutdown,
        .digital_mute   = wm8731_mute,
        .set_sysclk     = wm8731_set_dai_sysclk,
        .set_fmt        = wm8731_set_dai_fmt,
index 3c2ee1bb73cdb8c85d21372b41d3598cf7b4bcad..6af23d06870f8a03d1a8a1910577d6a13fef67a3 100644 (file)
@@ -13,7 +13,6 @@
 
 #include <linux/module.h>
 #include <linux/moduleparam.h>
-#include <linux/version.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/delay.h>
index 970a95c5360bfc271fd9a43c0a259bedd2d0d7a9..83014a7c2e142ccf0a63cafa3c38519b4f2fd5c8 100644 (file)
@@ -1190,7 +1190,6 @@ SND_SOC_DAPM_INPUT("DMIC1DAT"),
 SND_SOC_DAPM_INPUT("DMIC2DAT"),
 SND_SOC_DAPM_INPUT("Clock"),
 
-SND_SOC_DAPM_MICBIAS("MICBIAS", WM8994_MICBIAS, 2, 0),
 SND_SOC_DAPM_SUPPLY_S("MICBIAS Supply", 1, SND_SOC_NOPM, 0, 0, micbias_ev,
                      SND_SOC_DAPM_PRE_PMU),
 
@@ -1509,8 +1508,10 @@ static const struct snd_soc_dapm_route wm8994_revd_intercon[] = {
        { "AIF2DACDAT", NULL, "AIF1DACDAT" },
        { "AIF1ADCDAT", NULL, "AIF2ADCDAT" },
        { "AIF2ADCDAT", NULL, "AIF1ADCDAT" },
-       { "MICBIAS", NULL, "CLK_SYS" },
-       { "MICBIAS", NULL, "MICBIAS Supply" },
+       { "MICBIAS1", NULL, "CLK_SYS" },
+       { "MICBIAS1", NULL, "MICBIAS Supply" },
+       { "MICBIAS2", NULL, "CLK_SYS" },
+       { "MICBIAS2", NULL, "MICBIAS Supply" },
 };
 
 static const struct snd_soc_dapm_route wm8994_intercon[] = {
@@ -1713,6 +1714,8 @@ static int _wm8994_set_fll(struct snd_soc_codec *codec, int id, int src,
                snd_soc_update_bits(codec, WM8994_FLL1_CONTROL_1 + reg_offset,
                                    WM8994_FLL1_ENA | WM8994_FLL1_FRAC,
                                    reg);
+
+               msleep(5);
        }
 
        wm8994->fll[id].in = freq_in;
@@ -2761,7 +2764,7 @@ static void wm8958_default_micdet(u16 status, void *data)
        report = SND_JACK_MICROPHONE;
 
        /* Everything else is buttons; just assign slots */
-       if (status & 0x1c0)
+       if (status & 0x1c)
                report |= SND_JACK_BTN_0;
 
 done:
index d8f130d39dd904c341d539fbe5457fe976b285b1..bb699bb55a502c041b9d47fff2f5c396c8c431c8 100644 (file)
@@ -11,9 +11,6 @@ menuconfig SND_IMX_SOC
 
 if SND_IMX_SOC
 
-config SND_MXC_SOC_SSI
-       tristate
-
 config SND_MXC_SOC_FIQ
        tristate
 
@@ -24,7 +21,6 @@ config SND_MXC_SOC_WM1133_EV1
        tristate "Audio on the the i.MX31ADS with WM1133-EV1 fitted"
        depends on MACH_MX31ADS_WM1133_EV1 && EXPERIMENTAL
        select SND_SOC_WM8350
-       select SND_MXC_SOC_SSI
        select SND_MXC_SOC_FIQ
        help
          Enable support for audio on the i.MX31ADS with the WM1133-EV1
@@ -34,7 +30,6 @@ config SND_SOC_MX27VIS_AIC32X4
        tristate "SoC audio support for Visstrim M10 boards"
        depends on MACH_IMX27_VISSTRIM_M10
        select SND_SOC_TVL320AIC32X4
-       select SND_MXC_SOC_SSI
        select SND_MXC_SOC_MX2
        help
          Say Y if you want to add support for SoC audio on Visstrim SM10
@@ -44,7 +39,6 @@ config SND_SOC_PHYCORE_AC97
        tristate "SoC Audio support for Phytec phyCORE (and phyCARD) boards"
        depends on MACH_PCM043 || MACH_PCA100
        select SND_SOC_WM9712
-       select SND_MXC_SOC_SSI
        select SND_MXC_SOC_FIQ
        help
          Say Y if you want to add support for SoC audio on Phytec phyCORE
@@ -57,7 +51,6 @@ config SND_SOC_EUKREA_TLV320
                || MACH_EUKREA_MBIMXSD35_BASEBOARD \
                || MACH_EUKREA_MBIMXSD51_BASEBOARD
        select SND_SOC_TLV320AIC23
-       select SND_MXC_SOC_SSI
        select SND_MXC_SOC_FIQ
        help
          Enable I2S based access to the TLV320AIC23B codec attached
index aab7765f401a0904397c9c22a801fc10670b73f4..4173b3d87f979d2757f1498bd503f15b07195542 100644 (file)
@@ -337,3 +337,5 @@ static void __exit snd_imx_pcm_exit(void)
        platform_driver_unregister(&imx_pcm_driver);
 }
 module_exit(snd_imx_pcm_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx-pcm-audio");
index 5b13feca753732a6664969308e0794b161936aa8..61fceb09cdb5bbbf426270696bbf48b058f4d658 100644 (file)
@@ -774,4 +774,4 @@ module_exit(imx_ssi_exit);
 MODULE_AUTHOR("Sascha Hauer, <s.hauer@pengutronix.de>");
 MODULE_DESCRIPTION("i.MX I2S/ac97 SoC Interface");
 MODULE_LICENSE("GPL");
-
+MODULE_ALIAS("platform:imx-ssi");
index 2ce0b2d891d5081df50ed6c677ef835eab845b94..fab20a54e86340f1b5d2536cc9e3002613ef1c7d 100644 (file)
@@ -95,14 +95,14 @@ static int pxa2xx_soc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
        if (!card->dev->coherent_dma_mask)
                card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
 
-       if (dai->driver->playback.channels_min) {
+       if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
                        SNDRV_PCM_STREAM_PLAYBACK);
                if (ret)
                        goto out;
        }
 
-       if (dai->driver->capture.channels_min) {
+       if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
                ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
                        SNDRV_PCM_STREAM_CAPTURE);
                if (ret)
index d6f4703b3c0796fd5869f656904d232818e86298..770a71a15366252066a773357028e862abe9101d 100644 (file)
@@ -97,7 +97,7 @@ static int fsi_ak4642_remove(struct platform_device *pdev)
 
 static struct fsi_ak4642_data fsi_a_ak4642 = {
        .name           = "AK4642",
-       .card           = "FSIA (AK4642)",
+       .card           = "FSIA-AK4642",
        .cpu_dai        = "fsia-dai",
        .codec          = "ak4642-codec.0-0012",
        .platform       = "sh_fsi.0",
@@ -106,7 +106,7 @@ static struct fsi_ak4642_data fsi_a_ak4642 = {
 
 static struct fsi_ak4642_data fsi_b_ak4642 = {
        .name           = "AK4642",
-       .card           = "FSIB (AK4642)",
+       .card           = "FSIB-AK4642",
        .cpu_dai        = "fsib-dai",
        .codec          = "ak4642-codec.0-0012",
        .platform       = "sh_fsi.0",
@@ -115,7 +115,7 @@ static struct fsi_ak4642_data fsi_b_ak4642 = {
 
 static struct fsi_ak4642_data fsi_a_ak4643 = {
        .name           = "AK4643",
-       .card           = "FSIA (AK4643)",
+       .card           = "FSIA-AK4643",
        .cpu_dai        = "fsia-dai",
        .codec          = "ak4642-codec.0-0013",
        .platform       = "sh_fsi.0",
@@ -124,7 +124,7 @@ static struct fsi_ak4642_data fsi_a_ak4643 = {
 
 static struct fsi_ak4642_data fsi_b_ak4643 = {
        .name           = "AK4643",
-       .card           = "FSIB (AK4643)",
+       .card           = "FSIB-AK4643",
        .cpu_dai        = "fsib-dai",
        .codec          = "ak4642-codec.0-0013",
        .platform       = "sh_fsi.0",
@@ -133,7 +133,7 @@ static struct fsi_ak4642_data fsi_b_ak4643 = {
 
 static struct fsi_ak4642_data fsi2_a_ak4642 = {
        .name           = "AK4642",
-       .card           = "FSI2A (AK4642)",
+       .card           = "FSI2A-AK4642",
        .cpu_dai        = "fsia-dai",
        .codec          = "ak4642-codec.0-0012",
        .platform       = "sh_fsi2",
@@ -142,7 +142,7 @@ static struct fsi_ak4642_data fsi2_a_ak4642 = {
 
 static struct fsi_ak4642_data fsi2_b_ak4642 = {
        .name           = "AK4642",
-       .card           = "FSI2B (AK4642)",
+       .card           = "FSI2B-AK4642",
        .cpu_dai        = "fsib-dai",
        .codec          = "ak4642-codec.0-0012",
        .platform       = "sh_fsi2",
@@ -151,7 +151,7 @@ static struct fsi_ak4642_data fsi2_b_ak4642 = {
 
 static struct fsi_ak4642_data fsi2_a_ak4643 = {
        .name           = "AK4643",
-       .card           = "FSI2A (AK4643)",
+       .card           = "FSI2A-AK4643",
        .cpu_dai        = "fsia-dai",
        .codec          = "ak4642-codec.0-0013",
        .platform       = "sh_fsi2",
@@ -160,7 +160,7 @@ static struct fsi_ak4642_data fsi2_a_ak4643 = {
 
 static struct fsi_ak4642_data fsi2_b_ak4643 = {
        .name           = "AK4643",
-       .card           = "FSI2B (AK4643)",
+       .card           = "FSI2B-AK4643",
        .cpu_dai        = "fsib-dai",
        .codec          = "ak4642-codec.0-0013",
        .platform       = "sh_fsi2",
index dbafd7ac559066c6b8db9f82fe55ea23f002f3b4..59553fd8c2fb9173a72b7da8b9fa5f70942fe8c2 100644 (file)
@@ -42,7 +42,7 @@ static struct snd_soc_dai_link fsi_da7210_dai = {
 };
 
 static struct snd_soc_card fsi_soc_card = {
-       .name           = "FSI (DA7210)",
+       .name           = "FSI-DA7210",
        .dai_link       = &fsi_da7210_dai,
        .num_links      = 1,
 };
index 9719985eb82d2b4c3169be670cd7138abf41047c..d3d9fd880680e346e9558e4936a8bc4e84b018e6 100644 (file)
@@ -83,13 +83,13 @@ static int fsi_hdmi_remove(struct platform_device *pdev)
 
 static struct fsi_hdmi_data fsi2_a_hdmi = {
        .cpu_dai        = "fsia-dai",
-       .card           = "FSI2A (SH MOBILE HDMI)",
+       .card           = "FSI2A-HDMI",
        .id             = FSI_PORT_A,
 };
 
 static struct fsi_hdmi_data fsi2_b_hdmi = {
        .cpu_dai        = "fsib-dai",
-       .card           = "FSI2B (SH MOBILE HDMI)",
+       .card           = "FSI2B-HDMI",
        .id             = FSI_PORT_B,
 };
 
index c005ceb70c9d1dfc624ffa11a13eec427e2215b9..039b9532b270cb8c1bb300efd0d1a7f385d95aa4 100644 (file)
@@ -409,9 +409,6 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
        codec->bulk_write_raw = snd_soc_hw_bulk_write_raw;
 
        switch (control) {
-       case SND_SOC_CUSTOM:
-               break;
-
        case SND_SOC_I2C:
 #if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
                codec->hw_write = (hw_write_t)i2c_master_send;
index d75043ed7fc0551f66881675df110dede0d5ccf0..b194be09e74d623220e06e3addb5bb4bfcf9fb70 100644 (file)
@@ -1929,8 +1929,9 @@ static void snd_soc_instantiate_card(struct snd_soc_card *card)
                 "%s", card->name);
        snprintf(card->snd_card->longname, sizeof(card->snd_card->longname),
                 "%s", card->long_name ? card->long_name : card->name);
-       snprintf(card->snd_card->driver, sizeof(card->snd_card->driver),
-                "%s", card->driver_name ? card->driver_name : card->name);
+       if (card->driver_name)
+               strlcpy(card->snd_card->driver, card->driver_name,
+                       sizeof(card->snd_card->driver));
 
        if (card->late_probe) {
                ret = card->late_probe(card);
index 6b817e20548ca07fa94938caf8202bc37b9b0386..95f03c10b4f7401da2686d2b6ff0d904e749ea87 100644 (file)
@@ -222,12 +222,18 @@ static int tegra_i2s_hw_params(struct snd_pcm_substream *substream,
        if (i2sclock % (2 * srate))
                reg |= TEGRA_I2S_TIMING_NON_SYM_ENABLE;
 
+       if (!i2s->clk_refs)
+               clk_enable(i2s->clk_i2s);
+
        tegra_i2s_write(i2s, TEGRA_I2S_TIMING, reg);
 
        tegra_i2s_write(i2s, TEGRA_I2S_FIFO_SCR,
                TEGRA_I2S_FIFO_SCR_FIFO2_ATN_LVL_FOUR_SLOTS |
                TEGRA_I2S_FIFO_SCR_FIFO1_ATN_LVL_FOUR_SLOTS);
 
+       if (!i2s->clk_refs)
+               clk_disable(i2s->clk_i2s);
+
        return 0;
 }
 
index 337a00241a1f3a406506c4baad93e0f10a8ac7d8..4dd051bdf4fd1365498cc0740173d95b3b4bdc60 100644 (file)
@@ -1124,6 +1124,6 @@ static void __exit at73c213_exit(void)
 }
 module_exit(at73c213_exit);
 
-MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
+MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
 MODULE_DESCRIPTION("Sound driver for AT73C213 with Atmel SSC");
 MODULE_LICENSE("GPL");
index a91719d5918b69c6e76e577364c59f892f5768bd..1e3ae3327dd3a65431b4a517ab5b340dac2ee6f7 100644 (file)
@@ -270,7 +270,6 @@ static int usb6fire_fw_ezusb_upload(
        data = 0x00; /* resume ezusb cpu */
        ret = usb6fire_fw_ezusb_write(device, 0xa0, 0xe600, &data, 1);
        if (ret < 0) {
-               release_firmware(fw);
                snd_printk(KERN_ERR PREFIX "unable to upload ezusb "
                                "firmware %s: end message.\n", fwname);
                return ret;
index b137b25865cc986cc8f6bc8d21ca482e34b987cd..d144cdb2f15909acefec2f0c45ea0cd1ff523030 100644 (file)
@@ -395,12 +395,12 @@ static int usb6fire_pcm_open(struct snd_pcm_substream *alsa_sub)
        alsa_rt->hw = pcm_hw;
 
        if (alsa_sub->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               if (rt->rate >= 0)
+               if (rt->rate < ARRAY_SIZE(rates))
                        alsa_rt->hw.rates = rates_alsaid[rt->rate];
                alsa_rt->hw.channels_max = OUT_N_CHANNELS;
                sub = &rt->playback;
        } else if (alsa_sub->stream == SNDRV_PCM_STREAM_CAPTURE) {
-               if (rt->rate >= 0)
+               if (rt->rate < ARRAY_SIZE(rates))
                        alsa_rt->hw.rates = rates_alsaid[rt->rate];
                alsa_rt->hw.channels_max = IN_N_CHANNELS;
                sub = &rt->capture;
index 032ba6398a5c696cc46b0957d9452b68aad2d660..940257b5774ec209d09e3b2d4fbee647ff86dd71 100644 (file)
@@ -633,7 +633,7 @@ prefix_SQ = $(subst ','\'',$(prefix))
 
 SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
 
-LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive $(EXTLIBS)
+LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive -Wl,--start-group $(EXTLIBS) -Wl,--end-group
 
 ALL_CFLAGS += $(BASIC_CFLAGS)
 ALL_CFLAGS += $(ARCH_CFLAGS)
index 1e88485c16a04b755e68bb14510c3fd5cf368769..0a7ed5b5e281c88b321de87ced66a3d29ebb003d 100644 (file)
@@ -2187,6 +2187,7 @@ static const struct flag flags[] = {
        { "TASKLET_SOFTIRQ", 6 },
        { "SCHED_SOFTIRQ", 7 },
        { "HRTIMER_SOFTIRQ", 8 },
+       { "RCU_SOFTIRQ", 9 },
 
        { "HRTIMER_NORESTART", 0 },
        { "HRTIMER_RESTART", 1 },