Merge remote-tracking branch 'origin/develop-3.0' into develop-3.0-jb
author黄涛 <huangtao@rock-chips.com>
Wed, 18 Jul 2012 02:14:33 +0000 (10:14 +0800)
committer黄涛 <huangtao@rock-chips.com>
Wed, 18 Jul 2012 02:14:33 +0000 (10:14 +0800)
1205 files changed:
Documentation/HOWTO
Documentation/cpu-freq/governors.txt
Documentation/development-process/5.Posting
Documentation/hwmon/jc42
Documentation/hwspinlock.txt
Documentation/networking/ip-sysctl.txt
Documentation/power/runtime_pm.txt
Documentation/stable_kernel_rules.txt
Documentation/usb/usbmon.txt
MAINTAINERS
Makefile
arch/alpha/include/asm/futex.h
arch/arm/Kconfig
arch/arm/configs/ezx_defconfig
arch/arm/configs/imote2_defconfig
arch/arm/configs/magician_defconfig
arch/arm/configs/rk30_sdk_defconfig
arch/arm/configs/zeus_defconfig
arch/arm/kernel/process.c
arch/arm/kernel/smp.c
arch/arm/mach-at91/at91sam9260.c
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-davinci/board-dm646x-evm.c
arch/arm/mach-dove/common.c
arch/arm/mach-imx/mach-mx21ads.c
arch/arm/mach-kirkwood/common.c
arch/arm/mach-kirkwood/mpp.h
arch/arm/mach-lpc32xx/include/mach/irqs.h
arch/arm/mach-lpc32xx/irq.c
arch/arm/mach-lpc32xx/serial.c
arch/arm/mach-mv78xx0/common.c
arch/arm/mach-mv78xx0/mpp.h
arch/arm/mach-mxs/clock-mx28.c
arch/arm/mach-mxs/include/mach/mxs.h
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/board-4430sdp.c
arch/arm/mach-omap2/board-omap4panda.c
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-omap2/gpmc.c
arch/arm/mach-omap2/smartreflex.c
arch/arm/mach-orion5x/common.c
arch/arm/mach-orion5x/mpp.h
arch/arm/mach-pxa/balloon3.c
arch/arm/mach-pxa/colibri-pxa320.c
arch/arm/mach-pxa/gumstix.c
arch/arm/mach-pxa/include/mach/palm27x.h
arch/arm/mach-pxa/palm27x.c
arch/arm/mach-pxa/palmtc.c
arch/arm/mach-pxa/vpac270.c
arch/arm/mach-ux500/Kconfig
arch/arm/mach-ux500/cpu.c
arch/arm/mm/proc-v7.S
arch/arm/oprofile/common.c
arch/arm/plat-mxc/include/mach/iomux-v3.h
arch/arm/plat-mxc/pwm.c
arch/arm/plat-orion/common.c
arch/arm/plat-orion/include/plat/common.h
arch/arm/plat-orion/mpp.c
arch/arm/plat-s3c24xx/dma.c
arch/avr32/Kconfig
arch/ia64/include/asm/futex.h
arch/ia64/include/asm/unistd.h
arch/ia64/kernel/acpi.c
arch/ia64/kernel/entry.S
arch/m68k/mac/config.c
arch/parisc/include/asm/prefetch.h
arch/parisc/kernel/entry.S
arch/parisc/kernel/pacache.S
arch/parisc/kernel/vmlinux.lds.S
arch/powerpc/include/asm/sections.h
arch/powerpc/include/asm/sparsemem.h
arch/powerpc/include/asm/synch.h
arch/powerpc/include/asm/time.h
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/kvm.c
arch/powerpc/kernel/module_32.c
arch/powerpc/kernel/perf_event.c
arch/powerpc/kernel/setup_32.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/time.c
arch/powerpc/lib/feature-fixups.c
arch/powerpc/mm/gup.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/mmu_context_hash64.c
arch/powerpc/mm/numa.c
arch/powerpc/platforms/powermac/smp.c
arch/powerpc/platforms/ps3/interrupt.c
arch/powerpc/platforms/ps3/platform.h
arch/powerpc/platforms/ps3/smp.c
arch/powerpc/platforms/pseries/dlpar.c
arch/powerpc/platforms/pseries/eeh.c
arch/powerpc/platforms/pseries/hvCall_inst.c
arch/powerpc/platforms/pseries/lpar.c
arch/s390/Kconfig
arch/s390/include/asm/compat.h
arch/s390/include/asm/pgalloc.h
arch/s390/include/asm/tlb.h
arch/s390/kernel/process.c
arch/s390/kernel/ptrace.c
arch/s390/kernel/setup.c
arch/s390/kvm/kvm-s390.c
arch/s390/mm/fault.c
arch/s390/mm/gup.c
arch/s390/mm/mmap.c
arch/s390/mm/pgtable.c
arch/s390/oprofile/init.c
arch/score/kernel/entry.S
arch/sh/include/asm/page.h
arch/sh/oprofile/common.c
arch/sparc/Kconfig
arch/sparc/Makefile
arch/sparc/include/asm/pgtable_32.h
arch/sparc/include/asm/pgtable_64.h
arch/sparc/kernel/central.c
arch/sparc/kernel/ds.c
arch/sparc/kernel/entry.h
arch/sparc/kernel/module.c
arch/sparc/kernel/pci_sun4v.c
arch/sparc/kernel/rtrap_64.S
arch/sparc/kernel/setup_64.c
arch/sparc/kernel/signal32.c
arch/sparc/kernel/signal_32.c
arch/sparc/kernel/signal_64.c
arch/sparc/kernel/systbls_64.S
arch/sparc/kernel/visemul.c
arch/sparc/lib/memcpy.S
arch/sparc/mm/Makefile
arch/sparc/mm/btfixup.c
arch/sparc/mm/generic_32.c [deleted file]
arch/sparc/mm/generic_64.c [deleted file]
arch/sparc/mm/ultra.S
arch/tile/Kconfig
arch/tile/include/asm/bitops.h
arch/tile/kernel/compat_signal.c
arch/um/drivers/ubd_kern.c
arch/um/include/asm/pgtable.h
arch/x86/crypto/aesni-intel_asm.S
arch/x86/include/asm/amd_nb.h
arch/x86/include/asm/apic.h
arch/x86/include/asm/i387.h
arch/x86/include/asm/kvm_emulate.h
arch/x86/include/asm/pgtable-3level.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/timer.h
arch/x86/include/asm/uv/uv_bau.h
arch/x86/include/asm/uv/uv_hub.h
arch/x86/kernel/amd_iommu.c
arch/x86/kernel/amd_iommu_init.c
arch/x86/kernel/amd_nb.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/bigsmp_32.c
arch/x86/kernel/apic/probe_32.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/mce-severity.c
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/perf_event_amd.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/hpet.c
arch/x86/kernel/kgdb.c
arch/x86/kernel/kprobes.c
arch/x86/kernel/microcode_amd.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/reboot.c
arch/x86/kernel/setup_percpu.c
arch/x86/kernel/tls.c
arch/x86/kernel/traps.c
arch/x86/kernel/tsc.c
arch/x86/kernel/vm86_32.c
arch/x86/kernel/xsave.c
arch/x86/kvm/emulate.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/delay.c
arch/x86/mm/gup.c
arch/x86/mm/highmem_32.c
arch/x86/mm/mmap.c
arch/x86/mm/srat.c
arch/x86/net/bpf_jit_comp.c
arch/x86/oprofile/init.c
arch/x86/pci/Makefile
arch/x86/pci/acpi.c
arch/x86/pci/amd_bus.c
arch/x86/pci/xen.c
arch/x86/platform/mrst/mrst.c
arch/x86/platform/uv/tlb_uv.c
arch/x86/xen/enlighten.c
arch/x86/xen/mmu.c
arch/x86/xen/setup.c
arch/x86/xen/smp.c
arch/x86/xen/xen-asm.S
block/blk-core.c
block/blk-map.c
block/bsg.c
block/cfq-iosched.c
block/genhd.c
block/scsi_ioctl.c
crypto/cryptd.c
crypto/sha512_generic.c
drivers/acpi/acpica/acobject.h
drivers/acpi/acpica/dsargs.c
drivers/acpi/acpica/excreate.c
drivers/acpi/acpica/tbfadt.c
drivers/acpi/atomicio.c
drivers/acpi/battery.c
drivers/acpi/numa.c
drivers/acpi/pci_root.c
drivers/acpi/processor_core.c
drivers/acpi/processor_thermal.c
drivers/acpi/sleep.c
drivers/acpi/video.c
drivers/ata/ahci.c
drivers/ata/ata_piix.c
drivers/ata/libata-eh.c
drivers/ata/pata_legacy.c
drivers/atm/solos-pci.c
drivers/base/Kconfig
drivers/base/Makefile
drivers/base/core.c
drivers/base/firmware_class.c
drivers/base/node.c
drivers/base/power/runtime.c
drivers/base/sw_sync.c [new file with mode: 0644]
drivers/base/sync.c [new file with mode: 0644]
drivers/block/cciss.c
drivers/block/cciss_scsi.c
drivers/block/sx8.c
drivers/block/ub.c
drivers/block/virtio_blk.c
drivers/block/xen-blkback/blkback.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_ldisc.c
drivers/cdrom/cdrom.c
drivers/char/agp/intel-agp.c
drivers/char/agp/intel-agp.h
drivers/cpufreq/cpufreq_interactive.c
drivers/cpufreq/powernow-k8.c
drivers/crypto/Kconfig
drivers/crypto/mv_cesa.c
drivers/dma/Kconfig
drivers/dma/at_hdmac.c
drivers/dma/at_hdmac_regs.h
drivers/dma/pch_dma.c
drivers/firewire/core-cdev.c
drivers/firewire/core-device.c
drivers/firewire/ohci.c
drivers/firmware/efivars.c
drivers/firmware/iscsi_ibft.c
drivers/firmware/iscsi_ibft_find.c
drivers/firmware/sigma.c
drivers/gpio/Kconfig
drivers/gpio/pca953x.c
drivers/gpio/pch_gpio.c
drivers/gpu/drm/drm_auth.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sdvo_regs.h
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_channel.c
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/radeon/atom.c
drivers/gpu/drm/radeon/atom.h
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_blit_shaders.c
drivers/gpu/drm/radeon/r600_hdmi.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cursor.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_i2c.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_object.h
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/rv770d.h
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/hid/Kconfig
drivers/hid/hid-apple.c
drivers/hid/hid-chicony.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-multitouch.c
drivers/hid/usbhid/hid-quirks.c
drivers/hwmon/Kconfig
drivers/hwmon/ads1015.c
drivers/hwmon/coretemp.c
drivers/hwmon/f71805f.c
drivers/hwmon/f75375s.c
drivers/hwmon/fam15h_power.c
drivers/hwmon/jc42.c
drivers/hwmon/jz4740-hwmon.c
drivers/hwmon/max6639.c
drivers/hwmon/pmbus_core.c
drivers/hwmon/sht15.c
drivers/hwmon/w83627ehf.c
drivers/hwspinlock/hwspinlock_core.c
drivers/i2c/algos/i2c-algo-bit.c
drivers/i2c/busses/i2c-ali1535.c
drivers/i2c/busses/i2c-davinci.c
drivers/i2c/busses/i2c-eg20t.c
drivers/i2c/busses/i2c-mxs.c
drivers/i2c/busses/i2c-nforce2.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-pnx.c
drivers/i2c/busses/i2c-sis5595.c
drivers/i2c/busses/i2c-sis630.c
drivers/i2c/busses/i2c-viapro.c
drivers/ide/ide-floppy_ioctl.c
drivers/idle/intel_idle.c
drivers/infiniband/core/addr.c
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/qib/qib_iba6120.c
drivers/infiniband/hw/qib/qib_iba7220.c
drivers/infiniband/hw/qib/qib_iba7322.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_initiator.c
drivers/input/evdev.c
drivers/input/mouse/alps.c
drivers/input/mouse/synaptics.c
drivers/isdn/gigaset/capi.c
drivers/leds/led-class.c
drivers/md/bitmap.c
drivers/md/dm-crypt.c
drivers/md/dm-exception-store.c
drivers/md/dm-flakey.c
drivers/md/dm-io.c
drivers/md/dm-linear.c
drivers/md/dm-mpath.c
drivers/md/dm-raid.c
drivers/md/md.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/dvb/dvb-usb/dib0700_core.c
drivers/media/dvb/frontends/dib0070.c
drivers/media/dvb/frontends/dib0090.c
drivers/media/dvb/frontends/dib7000m.c
drivers/media/dvb/frontends/dib7000p.c
drivers/media/dvb/frontends/dib8000.c
drivers/media/dvb/frontends/dib9000.c
drivers/media/dvb/frontends/dibx000_common.c
drivers/media/dvb/frontends/dibx000_common.h
drivers/media/dvb/frontends/lgdt330x.c
drivers/media/dvb/siano/smsusb.c
drivers/media/rc/ene_ir.c
drivers/media/rc/fintek-cir.c
drivers/media/rc/ite-cir.c
drivers/media/rc/nuvoton-cir.c
drivers/media/rc/winbond-cir.c
drivers/media/video/cx23885/cx23885-dvb.c
drivers/media/video/hdpvr/hdpvr-video.c
drivers/media/video/pvrusb2/pvrusb2-devattr.c
drivers/media/video/s5p-fimc/fimc-core.c
drivers/media/video/saa7164/saa7164-cards.c
drivers/media/video/saa7164/saa7164-dvb.c
drivers/media/video/saa7164/saa7164.h
drivers/media/video/uvc/uvc_driver.c
drivers/media/video/uvc/uvc_v4l2.c
drivers/media/video/uvc/uvc_video.c
drivers/media/video/uvc/uvcvideo.h
drivers/media/video/v4l2-ioctl.c
drivers/mfd/cs5535-mfd.c
drivers/mfd/mfd-core.c
drivers/mfd/twl4030-madc.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/cb710/core.c
drivers/misc/cs5535-mfgpt.c
drivers/misc/kgdbts.c
drivers/misc/pch_phub.c
drivers/misc/pmem.c [deleted file]
drivers/misc/spear13xx_pcie_gadget.c
drivers/mmc/card/block.c
drivers/mmc/core/core.c
drivers/mmc/core/core.h
drivers/mmc/core/mmc.c
drivers/mmc/core/sd.c
drivers/mmc/core/sdio.c
drivers/mmc/core/sdio_irq.c
drivers/mmc/host/atmel-mci.c
drivers/mmc/host/mmci.c
drivers/mmc/host/mxcmmc.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/vub300.c
drivers/mtd/devices/block2mtd.c
drivers/mtd/devices/lart.c
drivers/mtd/devices/m25p80.c
drivers/mtd/devices/sst25l.c
drivers/mtd/mtd_blkdevs.c
drivers/mtd/mtdchar.c
drivers/mtd/mtdoops.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nand_bbt.c
drivers/mtd/nand/pxa3xx_nand.c
drivers/mtd/redboot.c
drivers/mtd/sm_ftl.c
drivers/mtd/tests/mtd_stresstest.c
drivers/mtd/ubi/cdev.c
drivers/mtd/ubi/debug.h
drivers/mtd/ubi/eba.c
drivers/mtd/ubi/scan.c
drivers/mtd/ubi/ubi.h
drivers/mtd/ubi/wl.c
drivers/net/3c59x.c
drivers/net/8139cp.c
drivers/net/Kconfig
drivers/net/atl1c/atl1c_main.c
drivers/net/atlx/atl1.c
drivers/net/atlx/atl1.h
drivers/net/atlx/atlx.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/can/c_can/c_can.c
drivers/net/can/c_can/c_can.h
drivers/net/cxgb3/cxgb3_offload.c
drivers/net/davinci_emac.c
drivers/net/davinci_mdio.c
drivers/net/dummy.c
drivers/net/e1000/e1000.h
drivers/net/e1000/e1000_main.c
drivers/net/e1000e/e1000.h
drivers/net/e1000e/netdev.c
drivers/net/enic/enic_main.c
drivers/net/jme.c
drivers/net/jme.h
drivers/net/ks8851_mll.c
drivers/net/ksz884x.c
drivers/net/macvlan.c
drivers/net/netconsole.c
drivers/net/pch_gbe/pch_gbe_main.c
drivers/net/pch_gbe/pch_gbe_param.c
drivers/net/phy/dp83640.c
drivers/net/phy/mdio-gpio.c
drivers/net/ppp_generic.c
drivers/net/pptp.c
drivers/net/rionet.c
drivers/net/sfc/rx.c
drivers/net/sky2.c
drivers/net/sky2.h
drivers/net/smsc911x.c
drivers/net/sungem.c
drivers/net/tg3.c
drivers/net/usb/cdc_eem.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/ipheth.c
drivers/net/usb/rtl8150.c
drivers/net/usb/sierra_net.c
drivers/net/usb/smsc75xx.c
drivers/net/usb/smsc95xx.c
drivers/net/usb/usbnet.c
drivers/net/usb/zaurus.c
drivers/net/veth.c
drivers/net/via-velocity.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/wimax/i2400m/netdev.c
drivers/net/xen-netback/interface.c
drivers/oprofile/oprof.c
drivers/oprofile/oprofile_files.c
drivers/oprofile/oprofilefs.c
drivers/oprofile/timer_int.c
drivers/pci/hotplug/shpchp_core.c
drivers/pci/hotplug/shpchp_hpc.c
drivers/pci/intel-iommu.c
drivers/pci/msi.c
drivers/pci/pci-acpi.c
drivers/pci/pci.c
drivers/pci/pcie/aspm.c
drivers/pci/probe.c
drivers/pci/quirks.c
drivers/pci/xen-pcifront.c
drivers/pcmcia/ds.c
drivers/platform/x86/acer-wmi.c
drivers/platform/x86/samsung-laptop.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/wmi.c
drivers/pnp/pnpacpi/core.c
drivers/pnp/quirks.c
drivers/power/ds2780_battery.c
drivers/ptp/ptp_clock.c
drivers/regulator/88pm8607.c
drivers/regulator/max8997.c
drivers/regulator/tps6524x-regulator.c
drivers/rtc/interface.c
drivers/rtc/rtc-m41t80.c
drivers/rtc/rtc-pl031.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_ioctl.c
drivers/s390/char/fs3270.c
drivers/s390/char/vmcp.c
drivers/s390/cio/ccwgroup.c
drivers/s390/cio/chsc_sch.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/scsi/zfcp_cfdc.c
drivers/s390/scsi/zfcp_scsi.c
drivers/scsi/aacraid/linit.c
drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/device_handler/scsi_dh.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/hosts.c
drivers/scsi/hpsa.c
drivers/scsi/ipr.c
drivers/scsi/isci/init.c
drivers/scsi/isci/isci.h
drivers/scsi/isci/port_config.c
drivers/scsi/isci/request.c
drivers/scsi/isci/request.h
drivers/scsi/isci/sas.h
drivers/scsi/libsas/sas_expander.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mpt2sas/mpt2sas_base.c
drivers/scsi/mpt2sas/mpt2sas_base.h
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/osd/osd_uld.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_pm.c
drivers/scsi/scsi_priv.h
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_wait_scan.c
drivers/scsi/sd.c
drivers/scsi/st.c
drivers/scsi/sym53c8xx_2/sym_glue.c
drivers/spi/spi.c
drivers/ssb/driver_pcicore.c
drivers/staging/android/logger.c
drivers/staging/android/logger.h
drivers/staging/android/ram_console.c
drivers/staging/asus_oled/asus_oled.c
drivers/staging/brcm80211/brcmsmac/wlc_bmac.c
drivers/staging/brcm80211/brcmsmac/wlc_bmac.h
drivers/staging/brcm80211/brcmsmac/wlc_main.c
drivers/staging/comedi/comedi_fops.c
drivers/staging/hv/hyperv_storage.h
drivers/staging/hv/storvsc_drv.c
drivers/staging/iio/magnetometer/hmc5843.c
drivers/staging/lirc/lirc_serial.c
drivers/staging/quatech_usb2/quatech_usb2.c
drivers/staging/rtl8712/usb_intf.c
drivers/staging/serqt_usb2/serqt_usb2.c
drivers/staging/usbip/vhci_rx.c
drivers/switch/switch_class.c
drivers/target/loopback/tcm_loop.c
drivers/target/target_core_alua.c
drivers/target/target_core_cdb.c
drivers/target/target_core_pr.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tfc_cmd.c
drivers/tty/amiserial.c
drivers/tty/hvc/hvc_dcc.c
drivers/tty/moxa.c
drivers/tty/pty.c
drivers/tty/serial/8250_pci.c
drivers/tty/serial/Kconfig
drivers/tty/serial/altera_uart.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/clps711x.c
drivers/tty/serial/crisv10.c
drivers/tty/serial/jsm/jsm.h
drivers/tty/serial/jsm/jsm_driver.c
drivers/tty/serial/jsm/jsm_neo.c
drivers/tty/serial/jsm/jsm_tty.c
drivers/tty/serial/mxs-auart.c
drivers/tty/serial/pch_uart.c
drivers/tty/serial/pxa.c
drivers/tty/serial/serial_core.c
drivers/tty/serial/sh-sci.c
drivers/tty/tty_io.c
drivers/tty/tty_ldisc.c
drivers/tty/tty_port.c
drivers/tty/vt/consolemap.c
drivers/tty/vt/vt_ioctl.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-wdm.c
drivers/usb/core/devio.c
drivers/usb/core/driver.c
drivers/usb/core/hcd-pci.c
drivers/usb/core/hcd.c
drivers/usb/core/hub.c
drivers/usb/core/message.c
drivers/usb/core/quirks.c
drivers/usb/gadget/Kconfig
drivers/usb/gadget/android.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/f_accessory.c
drivers/usb/gadget/f_adb.c
drivers/usb/gadget/f_audio_source.c [new file with mode: 0644]
drivers/usb/gadget/f_fs.c
drivers/usb/gadget/f_loopback.c
drivers/usb/gadget/f_mass_storage.c
drivers/usb/gadget/f_rndis.c
drivers/usb/gadget/file_storage.c
drivers/usb/gadget/fsl_udc_core.c
drivers/usb/gadget/hid.c
drivers/usb/gadget/inode.c
drivers/usb/gadget/pch_udc.c
drivers/usb/gadget/printer.c
drivers/usb/gadget/rndis.c
drivers/usb/gadget/uvc.h
drivers/usb/gadget/uvc_v4l2.c
drivers/usb/host/ehci-dbg.c
drivers/usb/host/ehci-fsl.c
drivers/usb/host/ehci-fsl.h
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/ehci-pci.c
drivers/usb/host/ehci-q.c
drivers/usb/host/ehci-sched.c
drivers/usb/host/ehci.h
drivers/usb/host/fhci-sched.c
drivers/usb/host/isp1760-hcd.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/ohci-hub.c
drivers/usb/host/ohci-pci.c
drivers/usb/host/ohci.h
drivers/usb/host/pci-quirks.c
drivers/usb/host/uhci-q.c
drivers/usb/host/whci/qset.c
drivers/usb/host/xhci-ext-caps.h
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/isight_firmware.c
drivers/usb/misc/usbsevseg.c
drivers/usb/misc/usbtest.c
drivers/usb/misc/yurex.c
drivers/usb/mon/mon_bin.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/omap2430.c
drivers/usb/serial/ark3116.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio.h
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/generic.c
drivers/usb/serial/io_ti.c
drivers/usb/serial/mct_u232.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/omninet.c
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/serial/qcaux.c
drivers/usb/serial/qcserial.c
drivers/usb/serial/sierra.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/serial/ti_usb_3410_5052.h
drivers/usb/serial/usb-serial.c
drivers/usb/storage/protocol.c
drivers/usb/storage/transport.c
drivers/usb/storage/unusual_devs.h
drivers/usb/storage/usb.c
drivers/usb/storage/usb.h
drivers/uwb/hwa-rc.c
drivers/uwb/neh.c
drivers/video/atmel_lcdfb.c
drivers/video/backlight/tosa_lcd.c
drivers/video/carminefb.c
drivers/video/fbmem.c
drivers/video/fbsysfs.c
drivers/video/offb.c
drivers/video/omap2/dss/hdmi.c
drivers/video/sh_mobile_hdmi.c
drivers/video/udlfb.c
drivers/video/uvesafb.c
drivers/video/via/share.h
drivers/video/via/via_modesetting.h
drivers/video/via/viafbdev.c
drivers/virtio/virtio_pci.c
drivers/w1/slaves/w1_ds2780.c
drivers/w1/slaves/w1_ds2780.h
drivers/watchdog/hpwdt.c
drivers/xen/events.c
drivers/xen/gntalloc.c
drivers/xen/gntdev.c
drivers/xen/swiotlb-xen.c
drivers/xen/xenbus/xenbus_probe_frontend.c
drivers/xen/xenbus/xenbus_xs.c
fs/afs/internal.h
fs/afs/rxrpc.c
fs/aio.c
fs/autofs4/autofs_i.h
fs/autofs4/dev-ioctl.c
fs/autofs4/inode.c
fs/autofs4/waitq.c
fs/binfmt_elf.c
fs/block_dev.c
fs/btrfs/ctree.h
fs/buffer.c
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/sess.c
fs/dcache.c
fs/ecryptfs/crypto.c
fs/ecryptfs/file.c
fs/ecryptfs/inode.c
fs/ecryptfs/miscdev.c
fs/ecryptfs/read_write.c
fs/eventpoll.c
fs/ext3/ialloc.c
fs/ext3/inode.c
fs/ext4/ext4.h
fs/ext4/ext4_jbd2.h
fs/ext4/extents.c
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/mballoc.c
fs/ext4/namei.c
fs/ext4/page-io.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/fuse/dev.c
fs/fuse/dir.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/hfs/btree.c
fs/hfs/trans.c
fs/hfsplus/catalog.c
fs/hfsplus/dir.c
fs/hfsplus/wrapper.c
fs/hppfs/hppfs.c
fs/hugetlbfs/inode.c
fs/jbd/journal.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jbd2/transaction.c
fs/jffs2/gc.c
fs/lockd/clnt4xdr.c
fs/lockd/clntxdr.c
fs/lockd/svc.c
fs/namei.c
fs/namespace.c
fs/nfs/callback_proc.c
fs/nfs/delegation.c
fs/nfs/delegation.h
fs/nfs/dir.c
fs/nfs/file.c
fs/nfs/inode.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4filelayout.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/objlayout/objio_osd.c
fs/nfs/objlayout/objlayout.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfs/proc.c
fs/nfs/super.c
fs/nfs/write.c
fs/nfsd/export.c
fs/nfsd/nfs3xdr.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4recover.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/nfsd/state.h
fs/nfsd/vfs.c
fs/nfsd/vfs.h
fs/nilfs2/ioctl.c
fs/nilfs2/the_nilfs.c
fs/notify/mark.c
fs/ocfs2/alloc.c
fs/ocfs2/refcounttree.c
fs/ocfs2/suballoc.c
fs/partitions/check.c
fs/pipe.c
fs/proc/base.c
fs/proc/meminfo.c
fs/proc/namespaces.c
fs/proc/task_mmu.c
fs/proc/uptime.c
fs/quota/quota.c
fs/reiserfs/super.c
fs/seq_file.c
fs/signalfd.c
fs/splice.c
fs/stat.c
fs/statfs.c
fs/super.c
fs/sysfs/inode.c
fs/ubifs/debug.h
fs/udf/file.c
fs/udf/inode.c
fs/udf/super.c
fs/xfs/linux-2.6/xfs_acl.c
fs/xfs/linux-2.6/xfs_buf.h
fs/xfs/linux-2.6/xfs_discard.c
fs/xfs/linux-2.6/xfs_export.c
fs/xfs/linux-2.6/xfs_file.c
fs/xfs/linux-2.6/xfs_iops.c
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/linux-2.6/xfs_sync.c
fs/xfs/linux-2.6/xfs_sync.h
fs/xfs/quota/xfs_qm.c
fs/xfs/xfs_attr.c
fs/xfs/xfs_attr_leaf.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_iget.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_vnodeops.c
include/acpi/acpi_numa.h
include/asm-generic/pgtable.h
include/asm-generic/poll.h
include/asm-generic/statfs.h
include/asm-generic/unistd.h
include/drm/drmP.h
include/drm/drm_dp_helper.h
include/drm/drm_mode.h
include/drm/drm_pciids.h
include/linux/bitops.h
include/linux/blkdev.h
include/linux/compat.h
include/linux/dcache.h
include/linux/efi.h
include/linux/eventpoll.h
include/linux/ext2_fs.h
include/linux/ext3_fs.h
include/linux/fb.h
include/linux/fs.h
include/linux/genhd.h
include/linux/i2c/twl4030-madc.h
include/linux/input.h
include/linux/interrupt.h
include/linux/io-mapping.h
include/linux/jiffies.h
include/linux/kernel.h
include/linux/kgdb.h
include/linux/kvm_host.h
include/linux/lglock.h
include/linux/log2.h
include/linux/math64.h
include/linux/memcontrol.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/mmc/host.h
include/linux/namei.h
include/linux/netdevice.h
include/linux/netfilter/xt_IDLETIMER.h
include/linux/netlink.h
include/linux/nfs_fs.h
include/linux/nfs_xdr.h
include/linux/nl80211.h
include/linux/pci-aspm.h
include/linux/pci.h
include/linux/pci_regs.h
include/linux/phy.h
include/linux/pipe_fs_i.h
include/linux/proportions.h
include/linux/regset.h
include/linux/seqlock.h
include/linux/sigma.h
include/linux/signalfd.h
include/linux/skbuff.h
include/linux/socket.h
include/linux/sunrpc/svcsock.h
include/linux/sw_sync.h [new file with mode: 0644]
include/linux/sync.h [new file with mode: 0644]
include/linux/tty.h
include/linux/usb.h
include/linux/usb/ch11.h
include/linux/usb/ch9.h
include/linux/usb/f_accessory.h
include/linux/usb/hcd.h
include/linux/usb/usbnet.h
include/linux/videodev2.h
include/linux/vmalloc.h
include/linux/workqueue.h
include/net/arp.h
include/net/bluetooth/hci.h
include/net/cfg80211.h
include/net/dst.h
include/net/flow.h
include/net/inet_sock.h
include/net/netns/generic.h
include/net/route.h
include/net/sch_generic.h
include/net/sctp/sctp.h
include/net/sctp/structs.h
include/target/target_core_base.h
include/trace/events/cpufreq_interactive.h [new file with mode: 0644]
include/trace/events/writeback.h
include/video/omapdss.h
include/xen/interface/io/xs_wire.h
init/do_mounts.c
init/main.c
kernel/cgroup.c
kernel/cgroup_freezer.c
kernel/compat.c
kernel/cpu.c
kernel/cred.c
kernel/debug/debug_core.c
kernel/exit.c
kernel/fork.c
kernel/futex.c
kernel/futex_compat.c
kernel/hrtimer.c
kernel/hung_task.c
kernel/irq/autoprobe.c
kernel/irq/chip.c
kernel/irq/internals.h
kernel/irq/manage.c
kernel/irq/migration.c
kernel/irq/pm.c
kernel/irq/spurious.c
kernel/jump_label.c
kernel/kmod.c
kernel/kprobes.c
kernel/module.c
kernel/panic.c
kernel/power/hibernate.c
kernel/power/suspend.c
kernel/printk.c
kernel/relay.c
kernel/sched.c
kernel/sched_rt.c
kernel/signal.c
kernel/sysctl.c
kernel/sysctl_binary.c
kernel/taskstats.c
kernel/time.c
kernel/time/alarmtimer.c
kernel/time/clocksource.c
kernel/time/ntp.c
kernel/time/tick-broadcast.c
kernel/time/tick-sched.c
kernel/time/timekeeping.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_entries.h
kernel/trace/trace_events.c
kernel/trace/trace_events_filter.c
kernel/trace/trace_export.c
kernel/workqueue.c
lib/btree.c
lib/kobject_uevent.c
lib/nlattr.c
mm/backing-dev.c
mm/bootmem.c
mm/compaction.c
mm/filemap.c
mm/filemap_xip.c
mm/huge_memory.c
mm/hugetlb.c
mm/internal.h
mm/memcontrol.c
mm/memory.c
mm/mempolicy.c
mm/mincore.c
mm/nobootmem.c
mm/nommu.c
mm/oom_kill.c
mm/page_alloc.c
mm/pagewalk.c
mm/percpu-vm.c
mm/percpu.c
mm/slub.c
mm/sparse.c
mm/swap.c
mm/swap_state.c
mm/swapfile.c
mm/vmalloc.c
mm/vmscan.c
net/8021q/vlan_dev.c
net/atm/clip.c
net/ax25/af_ax25.c
net/bluetooth/hci_core.c
net/bridge/br_device.c
net/bridge/br_if.c
net/bridge/br_multicast.c
net/bridge/br_netfilter.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/caif/caif_dev.c
net/caif/cfcnfg.c
net/can/bcm.c
net/core/dev.c
net/core/dst.c
net/core/flow.c
net/core/neighbour.c
net/core/net_namespace.c
net/core/netpoll.c
net/core/pktgen.c
net/core/skbuff.c
net/core/sock.c
net/core/timestamping.c
net/decnet/dn_neigh.c
net/decnet/dn_route.c
net/ipv4/ah4.c
net/ipv4/arp.c
net/ipv4/devinet.c
net/ipv4/esp4.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/igmp.c
net/ipv4/ip_forward.c
net/ipv4/ip_gre.c
net/ipv4/ip_options.c
net/ipv4/ip_output.c
net/ipv4/ipconfig.c
net/ipv4/ipip.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/xfrm4_mode_beet.c
net/ipv4/xfrm4_mode_tunnel.c
net/ipv4/xfrm4_policy.c
net/ipv6/addrconf.c
net/ipv6/ah6.c
net/ipv6/esp6.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6mr.c
net/ipv6/mcast.c
net/ipv6/ndisc.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/xfrm6_mode_beet.c
net/ipv6/xfrm6_mode_tunnel.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ppp.c
net/llc/af_llc.c
net/mac80211/agg-rx.c
net/mac80211/agg-tx.c
net/mac80211/cfg.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/main.c
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/rate.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/status.c
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/work.c
net/mac80211/wpa.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/xt_IDLETIMER.c
net/netfilter/xt_qtaguid.c
net/netfilter/xt_qtaguid_internal.h
net/netfilter/xt_qtaguid_print.c
net/netlink/af_netlink.c
net/packet/af_packet.c
net/phonet/pep.c
net/rds/af_rds.c
net/rds/send.c
net/rose/rose_dev.c
net/sched/sch_choke.c
net/sched/sch_gred.c
net/sched/sch_mqprio.c
net/sched/sch_netem.c
net/sched/sch_sfb.c
net/sched/sch_teql.c
net/sctp/associola.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/protocol.c
net/sctp/socket.c
net/sctp/sysctl.c
net/sctp/transport.c
net/socket.c
net/sunrpc/auth_unix.c
net/sunrpc/cache.c
net/sunrpc/sched.c
net/sunrpc/svc.c
net/sunrpc/svc_xprt.c
net/sunrpc/xprtsock.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/util.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_replay.c
scripts/kconfig/menu.c
scripts/kconfig/streamline_config.pl
scripts/mod/file2alias.c
scripts/mod/modpost.c
scripts/mod/modpost.h
scripts/package/builddeb
scripts/recordmcount.h
security/apparmor/path.c
security/commoncap.c
security/integrity/ima/ima_api.c
security/integrity/ima/ima_queue.c
security/keys/user_defined.c
security/selinux/netport.c
security/selinux/selinuxfs.c
security/tomoyo/mount.c
security/tomoyo/realpath.c
sound/pci/echoaudio/echoaudio_dsp.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_codec.h
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_local.h
sound/pci/hda/hda_proc.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/ice1712/amp.c
sound/pci/intel8x0.c
sound/pci/lx6464es/lx_core.c
sound/pci/oxygen/xonar_wm87x6.c
sound/pci/sis7019.c
sound/soc/codecs/ak4535.c
sound/soc/codecs/ak4642.c
sound/soc/codecs/wm8711.c
sound/soc/codecs/wm8731.c
sound/soc/codecs/wm8741.c
sound/soc/codecs/wm8753.c
sound/soc/codecs/wm8904.c
sound/soc/codecs/wm8940.c
sound/soc/codecs/wm8962.c
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm_hubs.c
sound/soc/fsl/fsl_ssi.c
sound/soc/imx/imx-ssi.c
sound/soc/pxa/pxa-ssp.c
sound/soc/samsung/neo1973_wm8753.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/soc-utils.c
sound/usb/misc/ua101.c
sound/usb/mixer.c
sound/usb/pcm.c
sound/usb/usx2y/usb_stream.c
tools/perf/bench/mem-memcpy-x86-64-asm.S
tools/perf/util/evsel.c
tools/perf/util/hist.c
tools/perf/util/probe-event.c
tools/perf/util/sort.c
tools/perf/util/trace-event-parse.c
tools/usb/ffs-test.c
virt/kvm/iommu.c
virt/kvm/kvm_main.c

index 81bc1a9ab9d8fa1357fcbb56268156786f741287..59c080f084ef3dcfb52a21e850334b66672fe6af 100644 (file)
@@ -218,16 +218,16 @@ The development process
 Linux kernel development process currently consists of a few different
 main kernel "branches" and lots of different subsystem-specific kernel
 branches.  These different branches are:
-  - main 2.6.x kernel tree
-  - 2.6.x.y -stable kernel tree
-  - 2.6.x -git kernel patches
+  - main 3.x kernel tree
+  - 3.x.y -stable kernel tree
+  - 3.x -git kernel patches
   - subsystem specific kernel trees and patches
-  - the 2.6.x -next kernel tree for integration tests
+  - the 3.x -next kernel tree for integration tests
 
-2.6.x kernel tree
+3.x kernel tree
 -----------------
-2.6.x kernels are maintained by Linus Torvalds, and can be found on
-kernel.org in the pub/linux/kernel/v2.6/ directory.  Its development
+3.x kernels are maintained by Linus Torvalds, and can be found on
+kernel.org in the pub/linux/kernel/v3.x/ directory.  Its development
 process is as follows:
   - As soon as a new kernel is released a two weeks window is open,
     during this period of time maintainers can submit big diffs to
@@ -262,21 +262,21 @@ mailing list about kernel releases:
        released according to perceived bug status, not according to a
        preconceived timeline."
 
-2.6.x.y -stable kernel tree
+3.x.y -stable kernel tree
 ---------------------------
-Kernels with 4-part versions are -stable kernels. They contain
+Kernels with 3-part versions are -stable kernels. They contain
 relatively small and critical fixes for security problems or significant
-regressions discovered in a given 2.6.x kernel.
+regressions discovered in a given 3.x kernel.
 
 This is the recommended branch for users who want the most recent stable
 kernel and are not interested in helping test development/experimental
 versions.
 
-If no 2.6.x.y kernel is available, then the highest numbered 2.6.x
+If no 3.x.y kernel is available, then the highest numbered 3.x
 kernel is the current stable kernel.
 
-2.6.x.y are maintained by the "stable" team <stable@kernel.org>, and are
-released as needs dictate.  The normal release period is approximately 
+3.x.y are maintained by the "stable" team <stable@vger.kernel.org>, and
+are released as needs dictate.  The normal release period is approximately
 two weeks, but it can be longer if there are no pressing problems.  A
 security-related problem, instead, can cause a release to happen almost
 instantly.
@@ -285,7 +285,7 @@ The file Documentation/stable_kernel_rules.txt in the kernel tree
 documents what kinds of changes are acceptable for the -stable tree, and
 how the release process works.
 
-2.6.x -git patches
+3.x -git patches
 ------------------
 These are daily snapshots of Linus' kernel tree which are managed in a
 git repository (hence the name.) These patches are usually released
@@ -317,13 +317,13 @@ revisions to it, and maintainers can mark patches as under review,
 accepted, or rejected.  Most of these patchwork sites are listed at
 http://patchwork.kernel.org/.
 
-2.6.x -next kernel tree for integration tests
+3.x -next kernel tree for integration tests
 ---------------------------------------------
-Before updates from subsystem trees are merged into the mainline 2.6.x
+Before updates from subsystem trees are merged into the mainline 3.x
 tree, they need to be integration-tested.  For this purpose, a special
 testing repository exists into which virtually all subsystem trees are
 pulled on an almost daily basis:
-       http://git.kernel.org/?p=linux/kernel/git/sfr/linux-next.git
+       http://git.kernel.org/?p=linux/kernel/git/next/linux-next.git
        http://linux.f-seidel.de/linux-next/pmwiki/
 
 This way, the -next kernel gives a summary outlook onto what will be
index 46abbcb28b7c3fcf2dc03f997089a3103c724ded..da3cfb34825d254ca552a8f2fe24ff780a8a3eee 100644 (file)
@@ -225,11 +225,32 @@ frequency before ramping down. This is to ensure that the governor has
 seen enough historic cpu load data to determine the appropriate
 workload.  Default is 80000 uS.
 
-go_maxspeed_load: The CPU load at which to ramp to max speed.  Default
-is 85.
+hispeed_freq: An intermediate "hi speed" at which to initially ramp
+when CPU load hits the value specified in go_hispeed_load.  If load
+stays high for the amount of time specified in above_hispeed_delay,
+then speed may be bumped higher.  Default is maximum speed.
+
+go_hispeed_load: The CPU load at which to ramp to the intermediate "hi
+speed".  Default is 85%.
+
+above_hispeed_delay: Once speed is set to hispeed_freq, wait for this
+long before bumping speed higher in response to continued high load.
+Default is 20000 uS.
 
 timer_rate: Sample rate for reevaluating cpu load when the system is
-not idle.  Default is 30000 uS.
+not idle.  Default is 20000 uS.
+
+input_boost: If non-zero, boost speed of all CPUs to hispeed_freq on
+touchscreen activity.  Default is 0.
+
+boost: If non-zero, immediately boost speed of all CPUs to at least
+hispeed_freq until zero is written to this attribute.  If zero, allow
+CPU speeds to drop below hispeed_freq according to load as usual.
+
+boostpulse: Immediately boost speed of all CPUs to hispeed_freq for
+min_sample_time, after which speeds are allowed to drop below
+hispeed_freq according to load as usual.
+
 
 2.7 Hotplug
 -----------
index 903a2546f13890b17d309554f3cbda69c2ee9ade..8a48c9b62864ed464f18fb0d93c16102f18ebc2f 100644 (file)
@@ -271,10 +271,10 @@ copies should go to:
    the linux-kernel list.
 
  - If you are fixing a bug, think about whether the fix should go into the
-   next stable update.  If so, stable@kernel.org should get a copy of the
-   patch.  Also add a "Cc: stable@kernel.org" to the tags within the patch
-   itself; that will cause the stable team to get a notification when your
-   fix goes into the mainline.
+   next stable update.  If so, stable@vger.kernel.org should get a copy of
+   the patch.  Also add a "Cc: stable@vger.kernel.org" to the tags within
+   the patch itself; that will cause the stable team to get a notification
+   when your fix goes into the mainline.
 
 When selecting recipients for a patch, it is good to have an idea of who
 you think will eventually accept the patch and get it merged.  While it
index a22ecf48f255c24220eec21cb1769e29c5db11c7..52729a756c1b55374ef7521898e6ded4753b61b9 100644 (file)
@@ -7,21 +7,29 @@ Supported chips:
     Addresses scanned: I2C 0x18 - 0x1f
     Datasheets:
        http://www.analog.com/static/imported-files/data_sheets/ADT7408.pdf
-  * IDT TSE2002B3, TS3000B3
-    Prefix: 'tse2002b3', 'ts3000b3'
+  * Atmel AT30TS00
+    Prefix: 'at30ts00'
     Addresses scanned: I2C 0x18 - 0x1f
     Datasheets:
-       http://www.idt.com/products/getdoc.cfm?docid=18715691
-       http://www.idt.com/products/getdoc.cfm?docid=18715692
+       http://www.atmel.com/Images/doc8585.pdf
+  * IDT TSE2002B3, TSE2002GB2, TS3000B3, TS3000GB2
+    Prefix: 'tse2002', 'ts3000'
+    Addresses scanned: I2C 0x18 - 0x1f
+    Datasheets:
+       http://www.idt.com/sites/default/files/documents/IDT_TSE2002B3C_DST_20100512_120303152056.pdf
+       http://www.idt.com/sites/default/files/documents/IDT_TSE2002GB2A1_DST_20111107_120303145914.pdf
+       http://www.idt.com/sites/default/files/documents/IDT_TS3000B3A_DST_20101129_120303152013.pdf
+       http://www.idt.com/sites/default/files/documents/IDT_TS3000GB2A1_DST_20111104_120303151012.pdf
   * Maxim MAX6604
     Prefix: 'max6604'
     Addresses scanned: I2C 0x18 - 0x1f
     Datasheets:
        http://datasheets.maxim-ic.com/en/ds/MAX6604.pdf
-  * Microchip MCP9805, MCP98242, MCP98243, MCP9843
-    Prefixes: 'mcp9805', 'mcp98242', 'mcp98243', 'mcp9843'
+  * Microchip MCP9804, MCP9805, MCP98242, MCP98243, MCP9843
+    Prefixes: 'mcp9804', 'mcp9805', 'mcp98242', 'mcp98243', 'mcp9843'
     Addresses scanned: I2C 0x18 - 0x1f
     Datasheets:
+       http://ww1.microchip.com/downloads/en/DeviceDoc/22203C.pdf
        http://ww1.microchip.com/downloads/en/DeviceDoc/21977b.pdf
        http://ww1.microchip.com/downloads/en/DeviceDoc/21996a.pdf
        http://ww1.microchip.com/downloads/en/DeviceDoc/22153c.pdf
@@ -48,6 +56,12 @@ Supported chips:
     Datasheets:
        http://www.st.com/stonline/products/literature/ds/13447/stts424.pdf
        http://www.st.com/stonline/products/literature/ds/13448/stts424e02.pdf
+  * ST Microelectronics STTS2002, STTS3000
+    Prefix: 'stts2002', 'stts3000'
+    Addresses scanned: I2C 0x18 - 0x1f
+    Datasheets:
+       http://www.st.com/internet/com/TECHNICAL_RESOURCES/TECHNICAL_LITERATURE/DATASHEET/CD00225278.pdf
+       http://www.st.com/internet/com/TECHNICAL_RESOURCES/TECHNICAL_LITERATURE/DATA_BRIEF/CD00270920.pdf
   * JEDEC JC 42.4 compliant temperature sensor chips
     Prefix: 'jc42'
     Addresses scanned: I2C 0x18 - 0x1f
index 7dcd1a4e726c40ceea1283dfcc328c494f12d26d..69966813d5944a5fc562420b473692f369788edb 100644 (file)
@@ -39,23 +39,20 @@ independent, drivers.
      in case an unused hwspinlock isn't available. Users of this
      API will usually want to communicate the lock's id to the remote core
      before it can be used to achieve synchronization.
-     Can be called from an atomic context (this function will not sleep) but
-     not from within interrupt context.
+     Should be called from a process context (might sleep).
 
   struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
    - assign a specific hwspinlock id and return its address, or NULL
      if that hwspinlock is already in use. Usually board code will
      be calling this function in order to reserve specific hwspinlock
      ids for predefined purposes.
-     Can be called from an atomic context (this function will not sleep) but
-     not from within interrupt context.
+     Should be called from a process context (might sleep).
 
   int hwspin_lock_free(struct hwspinlock *hwlock);
    - free a previously-assigned hwspinlock; returns 0 on success, or an
      appropriate error code on failure (e.g. -EINVAL if the hwspinlock
      is already free).
-     Can be called from an atomic context (this function will not sleep) but
-     not from within interrupt context.
+     Should be called from a process context (might sleep).
 
   int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout);
    - lock a previously-assigned hwspinlock with a timeout limit (specified in
@@ -232,15 +229,14 @@ int hwspinlock_example2(void)
 
   int hwspin_lock_register(struct hwspinlock *hwlock);
    - to be called from the underlying platform-specific implementation, in
-     order to register a new hwspinlock instance. Can be called from an atomic
-     context (this function will not sleep) but not from within interrupt
-     context. Returns 0 on success, or appropriate error code on failure.
+     order to register a new hwspinlock instance. Should be called from
+     a process context (this function might sleep).
+     Returns 0 on success, or appropriate error code on failure.
 
   struct hwspinlock *hwspin_lock_unregister(unsigned int id);
    - to be called from the underlying vendor-specific implementation, in order
      to unregister an existing (and unused) hwspinlock instance.
-     Can be called from an atomic context (will not sleep) but not from
-     within interrupt context.
+     Should be called from a process context (this function might sleep).
      Returns the address of hwspinlock on success, or NULL on error (e.g.
      if the hwspinlock is sill in use).
 
index bfe924217f246a8c0a10846e3a034201a9b9095a..7d4ecaa57cfaca5de2efa789088c3075038e2d94 100644 (file)
@@ -147,7 +147,7 @@ tcp_adv_win_scale - INTEGER
        (if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale),
        if it is <= 0.
        Possible values are [-31, 31], inclusive.
-       Default: 2
+       Default: 1
 
 tcp_allowed_congestion_control - STRING
        Show/set the congestion control choices available to non-privileged
@@ -407,7 +407,7 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
        net.core.rmem_max.  Calling setsockopt() with SO_RCVBUF disables
        automatic tuning of that socket's receive buffer size, in which
        case this value is ignored.
-       Default: between 87380B and 4MB, depending on RAM size.
+       Default: between 87380B and 6MB, depending on RAM size.
 
 tcp_sack - BOOLEAN
        Enable select acknowledgments (SACKS).
index 22852b3cf49f7a6c9038b997e8eef06756a57834..a6b34307dec89553012ad65345f8237543d498c0 100644 (file)
@@ -709,6 +709,16 @@ will behave normally, not taking the autosuspend delay into account.
 Similarly, if the power.use_autosuspend field isn't set then the autosuspend
 helper functions will behave just like the non-autosuspend counterparts.
 
+Under some circumstances a driver or subsystem may want to prevent a device
+from autosuspending immediately, even though the usage counter is zero and the
+autosuspend delay time has expired.  If the ->runtime_suspend() callback
+returns -EAGAIN or -EBUSY, and if the next autosuspend delay expiration time is
+in the future (as it normally would be if the callback invoked
+pm_runtime_mark_last_busy()), the PM core will automatically reschedule the
+autosuspend.  The ->runtime_suspend() callback can't do this rescheduling
+itself because no suspend requests of any kind are accepted while the device is
+suspending (i.e., while the callback is running).
+
 The implementation is well suited for asynchronous use in interrupt contexts.
 However such use inevitably involves races, because the PM core can't
 synchronize ->runtime_suspend() callbacks with the arrival of I/O requests.
index e213f45cf9d7505c9c9e1fbd088eb91cd83292f4..21fd05c28e738e146b313081bbd4294687ddabdf 100644 (file)
@@ -24,10 +24,10 @@ Rules on what kind of patches are accepted, and which ones are not, into the
 Procedure for submitting patches to the -stable tree:
 
  - Send the patch, after verifying that it follows the above rules, to
-   stable@kernel.org.  You must note the upstream commit ID in the changelog
-   of your submission.
+   stable@vger.kernel.org.  You must note the upstream commit ID in the
+   changelog of your submission.
  - To have the patch automatically included in the stable tree, add the tag
-     Cc: stable@kernel.org
+     Cc: stable@vger.kernel.org
    in the sign-off area. Once the patch is merged it will be applied to
    the stable tree without anything else needing to be done by the author
    or subsystem maintainer.
@@ -35,10 +35,10 @@ Procedure for submitting patches to the -stable tree:
    cherry-picked than this can be specified in the following format in
    the sign-off area:
 
-     Cc: <stable@kernel.org> # .32.x: a1f84a3: sched: Check for idle
-     Cc: <stable@kernel.org> # .32.x: 1b9508f: sched: Rate-limit newidle
-     Cc: <stable@kernel.org> # .32.x: fd21073: sched: Fix affinity logic
-     Cc: <stable@kernel.org> # .32.x
+     Cc: <stable@vger.kernel.org> # .32.x: a1f84a3: sched: Check for idle
+     Cc: <stable@vger.kernel.org> # .32.x: 1b9508f: sched: Rate-limit newidle
+     Cc: <stable@vger.kernel.org> # .32.x: fd21073: sched: Fix affinity logic
+     Cc: <stable@vger.kernel.org> # .32.x
     Signed-off-by: Ingo Molnar <mingo@elte.hu>
 
    The tag sequence has the meaning of:
index a4efa0462f05c4b6e5ac8dc948a4248a4d48ab3c..5335fa8b06eb4f2b632126bd9fd95878b6efdce0 100644 (file)
@@ -47,10 +47,11 @@ This allows to filter away annoying devices that talk continuously.
 
 2. Find which bus connects to the desired device
 
-Run "cat /proc/bus/usb/devices", and find the T-line which corresponds to
-the device. Usually you do it by looking for the vendor string. If you have
-many similar devices, unplug one and compare two /proc/bus/usb/devices outputs.
-The T-line will have a bus number. Example:
+Run "cat /sys/kernel/debug/usb/devices", and find the T-line which corresponds
+to the device. Usually you do it by looking for the vendor string. If you have
+many similar devices, unplug one and compare the two
+/sys/kernel/debug/usb/devices outputs. The T-line will have a bus number.
+Example:
 
 T:  Bus=03 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#=  2 Spd=12  MxCh= 0
 D:  Ver= 1.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 8 #Cfgs=  1
@@ -58,7 +59,10 @@ P:  Vendor=0557 ProdID=2004 Rev= 1.00
 S:  Manufacturer=ATEN
 S:  Product=UC100KM V2.00
 
-Bus=03 means it's bus 3.
+"Bus=03" means it's bus 3. Alternatively, you can look at the output from
+"lsusb" and get the bus number from the appropriate line. Example:
+
+Bus 003 Device 002: ID 0557:2004 ATEN UC100KM V2.00
 
 3. Start 'cat'
 
index 187282da92137a2ea147bcb0f664e0e99d90e681..de85391c021ae81df893bd0ab5ce51ace9132cd1 100644 (file)
@@ -1221,7 +1221,7 @@ F:        Documentation/aoe/
 F:     drivers/block/aoe/
 
 ATHEROS ATH GENERIC UTILITIES
-M:     "Luis R. Rodriguez" <lrodriguez@atheros.com>
+M:     "Luis R. Rodriguez" <mcgrof@qca.qualcomm.com>
 L:     linux-wireless@vger.kernel.org
 S:     Supported
 F:     drivers/net/wireless/ath/*
@@ -1229,7 +1229,7 @@ F:        drivers/net/wireless/ath/*
 ATHEROS ATH5K WIRELESS DRIVER
 M:     Jiri Slaby <jirislaby@gmail.com>
 M:     Nick Kossifidis <mickflemm@gmail.com>
-M:     "Luis R. Rodriguez" <lrodriguez@atheros.com>
+M:     "Luis R. Rodriguez" <mcgrof@qca.qualcomm.com>
 M:     Bob Copeland <me@bobcopeland.com>
 L:     linux-wireless@vger.kernel.org
 L:     ath5k-devel@lists.ath5k.org
@@ -1238,10 +1238,10 @@ S:      Maintained
 F:     drivers/net/wireless/ath/ath5k/
 
 ATHEROS ATH9K WIRELESS DRIVER
-M:     "Luis R. Rodriguez" <lrodriguez@atheros.com>
-M:     Jouni Malinen <jmalinen@atheros.com>
-M:     Vasanthakumar Thiagarajan <vasanth@atheros.com>
-M:     Senthil Balasubramanian <senthilkumar@atheros.com>
+M:     "Luis R. Rodriguez" <mcgrof@qca.qualcomm.com>
+M:     Jouni Malinen <jouni@qca.qualcomm.com>
+M:     Vasanthakumar Thiagarajan <vthiagar@qca.qualcomm.com>
+M:     Senthil Balasubramanian <senthilb@qca.qualcomm.com>
 L:     linux-wireless@vger.kernel.org
 L:     ath9k-devel@lists.ath9k.org
 W:     http://wireless.kernel.org/en/users/Drivers/ath9k
@@ -1269,7 +1269,7 @@ F:        drivers/input/misc/ati_remote2.c
 ATLX ETHERNET DRIVERS
 M:     Jay Cliburn <jcliburn@gmail.com>
 M:     Chris Snook <chris.snook@gmail.com>
-M:     Jie Yang <jie.yang@atheros.com>
+M:     Jie Yang <yangjie@qca.qualcomm.com>
 L:     netdev@vger.kernel.org
 W:     http://sourceforge.net/projects/atl1
 W:     http://atl1.sourceforge.net
@@ -6039,7 +6039,7 @@ F:        arch/alpha/kernel/srm_env.c
 
 STABLE BRANCH
 M:     Greg Kroah-Hartman <greg@kroah.com>
-L:     stable@kernel.org
+L:     stable@vger.kernel.org
 S:     Maintained
 
 STAGING SUBSYSTEM
index 15702f930dd7f8c81ca7a6816ed0ef7c40490f0b..a8a3085a8ae35e902eede70202551bee2eb5165f 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 3
 PATCHLEVEL = 0
-SUBLEVEL = 8
+SUBLEVEL = 36
 EXTRAVERSION =
 NAME = Sneaky Weasel
 
@@ -194,11 +194,8 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
 export KBUILD_BUILDHOST := $(SUBARCH)
 #ARCH          ?= $(SUBARCH)
 ARCH           ?= arm
-ifneq ($(wildcard ../toolchain/arm-eabi-4.4.0),)
-CROSS_COMPILE  ?= ../toolchain/arm-eabi-4.4.0/bin/arm-eabi-
-endif
-ifneq ($(wildcard ../prebuilt/linux-x86/toolchain/arm-eabi-4.4.0),)
-CROSS_COMPILE  ?= ../prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/bin/arm-eabi-
+ifneq ($(wildcard ../toolchain/arm-eabi-4.4.3),)
+CROSS_COMPILE  ?= ../toolchain/arm-eabi-4.4.3/bin/arm-eabi-
 endif
 ifneq ($(wildcard ../prebuilt/linux-x86/toolchain/arm-eabi-4.4.3),)
 CROSS_COMPILE  ?= ../prebuilt/linux-x86/toolchain/arm-eabi-4.4.3/bin/arm-eabi-
index e8a761aee088a9bc5a8d2ce62d6bf1fde2183a92..f939794363ac6f94ad82a192ede6b9410f7bc002 100644 (file)
@@ -108,7 +108,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        "       lda     $31,3b-2b(%0)\n"
        "       .previous\n"
        :       "+r"(ret), "=&r"(prev), "=&r"(cmp)
-       :       "r"(uaddr), "r"((long)oldval), "r"(newval)
+       :       "r"(uaddr), "r"((long)(int)oldval), "r"(newval)
        :       "memory");
 
        *uval = prev;
index e9d0b690830107bcb7d32cd02d6754589dc2f3a9..1e56c180cb1535edb4a6137484f4ba261d34c958 100644 (file)
@@ -1217,7 +1217,7 @@ config ARM_ERRATA_743622
        depends on CPU_V7
        help
          This option enables the workaround for the 743622 Cortex-A9
-         (r2p0..r2p2) erratum. Under very rare conditions, a faulty
+         (r2p*) erratum. Under very rare conditions, a faulty
          optimisation in the Cortex-A9 Store Buffer may lead to data
          corruption. This workaround sets a specific bit in the diagnostic
          register of the Cortex-A9 which disables the Store Buffer
@@ -1350,6 +1350,18 @@ config ARM_ERRATA_764369
          relevant cache maintenance functions and sets a specific bit
          in the diagnostic control register of the SCU.
 
+config PL310_ERRATA_769419
+       bool "PL310 errata: no automatic Store Buffer drain"
+       depends on CACHE_L2X0
+       help
+         On revisions of the PL310 prior to r3p2, the Store Buffer does
+         not automatically drain. This can cause normal, non-cacheable
+         writes to be retained when the memory system is idle, leading
+         to suboptimal I/O performance for drivers using coherent DMA.
+         This option adds a write barrier to the cpu_idle loop so that,
+         on systems with an outer cache, the store buffer is drained
+         explicitly.
+
 endmenu
 
 menu "Kernel Features"
index 227a477346edb44a2a83ba49a0f9b7cd2b165ec0..d95763d5f0d83df6543364e3ea648c2ad88994bf 100644 (file)
@@ -287,7 +287,7 @@ CONFIG_USB=y
 # CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
 CONFIG_USB_ETH=m
 # CONFIG_USB_ETH_RNDIS is not set
 CONFIG_MMC=y
index 176ec22af0342f215b59a2778144d69ef0f4c806..fd996bb13022879dee93c308c8e8ad154918ec20 100644 (file)
@@ -263,7 +263,7 @@ CONFIG_USB=y
 # CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
 CONFIG_USB_ETH=m
 # CONFIG_USB_ETH_RNDIS is not set
 CONFIG_MMC=y
index a88e64d4e9a5862c28160c9d1b7d3cd01edf0550..443675d317e6de326c576caf47ae9ff179a0814c 100644 (file)
@@ -132,7 +132,7 @@ CONFIG_USB_MON=m
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_VBUS_DRAW=500
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
 CONFIG_USB_ETH=m
 # CONFIG_USB_ETH_RNDIS is not set
 CONFIG_USB_GADGETFS=m
index 8f168b32c02dc6b128111d2310e955b6f2fba528..6bd2bfc3d098283c5ca729e47b589ae39270491e 100644 (file)
@@ -10,7 +10,6 @@ CONFIG_RESOURCE_COUNTERS=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_PANIC_TIMEOUT=1
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
@@ -23,7 +22,6 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_ARCH_RK30=y
 CONFIG_WIFI_CONTROL_FUNC=y
-# CONFIG_SWP_EMULATE is not set
 CONFIG_FIQ_DEBUGGER=y
 CONFIG_FIQ_DEBUGGER_NO_SLEEP=y
 CONFIG_FIQ_DEBUGGER_CONSOLE=y
@@ -55,10 +53,12 @@ CONFIG_VFP=y
 CONFIG_NEON=y
 CONFIG_WAKELOCK=y
 CONFIG_PM_RUNTIME=y
+CONFIG_PM_DEBUG=y
 CONFIG_SUSPEND_TIME=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
@@ -78,6 +78,7 @@ CONFIG_IPV6_MIP6=y
 CONFIG_IPV6_TUNNEL=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_NETFILTER=y
+# CONFIG_BRIDGE_NETFILTER is not set
 CONFIG_NF_CONNTRACK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
 CONFIG_NF_CT_PROTO_DCCP=y
@@ -96,6 +97,7 @@ CONFIG_NF_CT_NETLINK=y
 CONFIG_NETFILTER_TPROXY=y
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
 CONFIG_NETFILTER_XT_TARGET_MARK=y
 CONFIG_NETFILTER_XT_TARGET_NFLOG=y
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
@@ -221,7 +223,16 @@ CONFIG_INPUT_KEYRESET=y
 # CONFIG_KEYBOARD_ATKBD is not set
 # CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_JOYSTICK_XPAD_FF=y
+CONFIG_JOYSTICK_XPAD_LEDS=y
 CONFIG_INPUT_TABLET=y
+CONFIG_TABLET_USB_ACECAD=y
+CONFIG_TABLET_USB_AIPTEK=y
+CONFIG_TABLET_USB_GTCO=y
+CONFIG_TABLET_USB_HANWANG=y
+CONFIG_TABLET_USB_KBTAB=y
+CONFIG_TABLET_USB_WACOM=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_GT8XX=y
 CONFIG_INPUT_MISC=y
@@ -252,6 +263,7 @@ CONFIG_I2C1_CONTROLLER_RK30=y
 CONFIG_I2C2_CONTROLLER_RK30=y
 CONFIG_I2C3_CONTROLLER_RK30=y
 CONFIG_I2C4_CONTROLLER_RK30=y
+CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_WM831X=y
 CONFIG_EXPANDED_GPIO_NUM=0
 CONFIG_EXPANDED_GPIO_IRQ_NUM=0
@@ -353,8 +365,8 @@ CONFIG_HID_WACOM=y
 CONFIG_HID_ZEROPLUS=y
 CONFIG_ZEROPLUS_FF=y
 CONFIG_HID_ZYDACRON=y
-CONFIG_USB=y
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DEVICEFS=y
 CONFIG_USB_OTG_BLACKLIST_HUB=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_SERIAL=y
@@ -399,13 +411,12 @@ CONFIG_NLS_ASCII=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
 CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_SCHEDSTATS=y
-CONFIG_TIMER_STATS=y
 # CONFIG_DEBUG_PREEMPT is not set
-# CONFIG_FTRACE is not set
+# CONFIG_EVENT_POWER_TRACING_DEPRECATED is not set
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_CRYPTO_SHA256=y
 CONFIG_CRYPTO_TWOFISH=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
index 59577ad3f4efdfda65e65753054673a32614e80f..547a3c1e59dbcd88ea9da77505691110508f9daf 100644 (file)
@@ -140,7 +140,7 @@ CONFIG_USB_SERIAL=m
 CONFIG_USB_SERIAL_GENERIC=y
 CONFIG_USB_SERIAL_MCT_U232=m
 CONFIG_USB_GADGET=m
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
 CONFIG_USB_ETH=m
 CONFIG_USB_GADGETFS=m
 CONFIG_USB_FILE_STORAGE=m
index 7d66a6e66cb6c4d09dd4666c3522b7f4adb69a5c..e5cfa6ac44a0f1b72b5e44082737607a40abba0e 100644 (file)
@@ -232,6 +232,9 @@ void cpu_idle(void)
 #endif
 
                        local_irq_disable();
+#ifdef CONFIG_PL310_ERRATA_769419
+                       wmb();
+#endif
                        if (hlt_counter) {
                                local_irq_enable();
                                cpu_relax();
index a45d38dcac09903ff11f0bbdf6850347addb8a6c..ff956e7d95017fb9ef537cb4f40726636b01c24c 100644 (file)
@@ -280,8 +280,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
        struct mm_struct *mm = &init_mm;
        unsigned int cpu = smp_processor_id();
 
-       printk("CPU%u: Booted secondary processor\n", cpu);
-
        /*
         * All kernel threads share the same mm context; grab a
         * reference and switch to it.
@@ -293,6 +291,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
        enter_lazy_tlb(mm, current);
        local_flush_tlb_all();
 
+       printk("CPU%u: Booted secondary processor\n", cpu);
+
        cpu_init();
        preempt_disable();
        trace_hardirqs_off();
index 7d606b04d313c5800359849ddff022a827655d87..eeb9478588557bb2f29c8b38334cd300edcbd2f7 100644 (file)
@@ -237,9 +237,9 @@ static struct clk_lookup periph_clocks_lookups[] = {
        CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
        CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
        CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
-       CLKDEV_CON_DEV_ID("t3_clk", "atmel_tcb.1", &tc3_clk),
-       CLKDEV_CON_DEV_ID("t4_clk", "atmel_tcb.1", &tc4_clk),
-       CLKDEV_CON_DEV_ID("t5_clk", "atmel_tcb.1", &tc5_clk),
+       CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
+       CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
+       CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
        CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk),
 };
 
index e83cc860c87a8d5c0fa79cf3881bfc13699e0c2b..f8936174ce812438923b617ed5c935eac00ab523 100644 (file)
@@ -748,7 +748,7 @@ static struct snd_platform_data da850_evm_snd_data = {
        .num_serializer = ARRAY_SIZE(da850_iis_serializer_direction),
        .tdm_slots      = 2,
        .serial_dir     = da850_iis_serializer_direction,
-       .asp_chan_q     = EVENTQ_1,
+       .asp_chan_q     = EVENTQ_0,
        .version        = MCASP_VERSION_2,
        .txnumevt       = 1,
        .rxnumevt       = 1,
index f6ac9ba74878066afdfcde05e3630108ff86f6c1..3cdd23787bd00d605665350fcd6793045e0c3010 100644 (file)
@@ -563,7 +563,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
        int val;
        u32 value;
 
-       if (!vpif_vsclkdis_reg || !cpld_client)
+       if (!vpif_vidclkctl_reg || !cpld_client)
                return -ENXIO;
 
        val = i2c_smbus_read_byte(cpld_client);
@@ -571,7 +571,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
                return val;
 
        spin_lock_irqsave(&vpif_reg_lock, flags);
-       value = __raw_readl(vpif_vsclkdis_reg);
+       value = __raw_readl(vpif_vidclkctl_reg);
        if (mux_mode) {
                val &= VPIF_INPUT_TWO_CHANNEL;
                value |= VIDCH1CLK;
@@ -579,7 +579,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
                val |= VPIF_INPUT_ONE_CHANNEL;
                value &= ~VIDCH1CLK;
        }
-       __raw_writel(value, vpif_vsclkdis_reg);
+       __raw_writel(value, vpif_vidclkctl_reg);
        spin_unlock_irqrestore(&vpif_reg_lock, flags);
 
        err = i2c_smbus_write_byte(cpld_client, val);
index cf7e5985eebff2bdaf56af9070674d2e81437927..46c04498629455e8d8959c7c8305624de8f194dc 100644 (file)
@@ -31,6 +31,7 @@
 #include <asm/mach/arch.h>
 #include <linux/irq.h>
 #include <plat/time.h>
+#include <plat/ehci-orion.h>
 #include <plat/common.h>
 #include "common.h"
 
@@ -74,7 +75,7 @@ void __init dove_map_io(void)
 void __init dove_ehci0_init(void)
 {
        orion_ehci_init(&dove_mbus_dram_info,
-                       DOVE_USB0_PHYS_BASE, IRQ_DOVE_USB0);
+                       DOVE_USB0_PHYS_BASE, IRQ_DOVE_USB0, EHCI_PHY_NA);
 }
 
 /*****************************************************************************
index 74ac88978ddd6ec22cf25d86f78b8d4f176e01ec..a37fe021d69f9cdc0408816b0c7650b6650b6163 100644 (file)
@@ -32,7 +32,7 @@
  * Memory-mapped I/O on MX21ADS base board
  */
 #define MX21ADS_MMIO_BASE_ADDR   0xf5000000
-#define MX21ADS_MMIO_SIZE        SZ_16M
+#define MX21ADS_MMIO_SIZE        0xc00000
 
 #define MX21ADS_REG_ADDR(offset)    (void __force __iomem *) \
                (MX21ADS_MMIO_BASE_ADDR + (offset))
index f3248cfbe51d058f79812d5247125fc98c2c2af6..c5dbbb35e0b18331ba187bdf48be5e4ebe3c453d 100644 (file)
@@ -28,6 +28,7 @@
 #include <plat/cache-feroceon-l2.h>
 #include <plat/mvsdio.h>
 #include <plat/orion_nand.h>
+#include <plat/ehci-orion.h>
 #include <plat/common.h>
 #include <plat/time.h>
 #include "common.h"
@@ -74,7 +75,7 @@ void __init kirkwood_ehci_init(void)
 {
        kirkwood_clk_ctrl |= CGC_USB0;
        orion_ehci_init(&kirkwood_mbus_dram_info,
-                       USB_PHYS_BASE, IRQ_KIRKWOOD_USB);
+                       USB_PHYS_BASE, IRQ_KIRKWOOD_USB, EHCI_PHY_NA);
 }
 
 
index ac787957e2d9f94e5a57afbce87c799b697d1cc3..7afccf472205e7eff953bcd8b05ce08d2f6b997f 100644 (file)
 #define MPP_F6282_MASK         MPP(  0, 0x0, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP0_GPIO              MPP(  0, 0x0, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP0_NF_IO2            MPP(  0, 0x1, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP0_SPI_SCn           MPP(  0, 0x2, 0, 1, 1,   1,   1,   1,   1 )
+#define MPP0_NF_IO2            MPP(  0, 0x1, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP0_SPI_SCn           MPP(  0, 0x2, 0, 0, 1,   1,   1,   1,   1 )
 
 #define MPP1_GPO               MPP(  1, 0x0, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP1_NF_IO3            MPP(  1, 0x1, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP1_SPI_MOSI          MPP(  1, 0x2, 0, 1, 1,   1,   1,   1,   1 )
+#define MPP1_NF_IO3            MPP(  1, 0x1, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP1_SPI_MOSI          MPP(  1, 0x2, 0, 0, 1,   1,   1,   1,   1 )
 
 #define MPP2_GPO               MPP(  2, 0x0, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP2_NF_IO4            MPP(  2, 0x1, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP2_SPI_SCK           MPP(  2, 0x2, 0, 1, 1,   1,   1,   1,   1 )
+#define MPP2_NF_IO4            MPP(  2, 0x1, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP2_SPI_SCK           MPP(  2, 0x2, 0, 0, 1,   1,   1,   1,   1 )
 
 #define MPP3_GPO               MPP(  3, 0x0, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP3_NF_IO5            MPP(  3, 0x1, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP3_SPI_MISO          MPP(  3, 0x2, 1, 0, 1,   1,   1,   1,   1 )
+#define MPP3_NF_IO5            MPP(  3, 0x1, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP3_SPI_MISO          MPP(  3, 0x2, 0, 0, 1,   1,   1,   1,   1 )
 
 #define MPP4_GPIO              MPP(  4, 0x0, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP4_NF_IO6            MPP(  4, 0x1, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP4_UART0_RXD         MPP(  4, 0x2, 1, 0, 1,   1,   1,   1,   1 )
-#define MPP4_SATA1_ACTn                MPP(  4, 0x5, 0, 1, 0,   0,   1,   1,   1 )
+#define MPP4_NF_IO6            MPP(  4, 0x1, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP4_UART0_RXD         MPP(  4, 0x2, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP4_SATA1_ACTn                MPP(  4, 0x5, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP4_LCD_VGA_HSYNC     MPP(  4, 0xb, 0, 0, 0,   0,   0,   0,   1 )
-#define MPP4_PTP_CLK           MPP(  4, 0xd, 1, 0, 1,   1,   1,   1,   0 )
+#define MPP4_PTP_CLK           MPP(  4, 0xd, 0, 0, 1,   1,   1,   1,   0 )
 
 #define MPP5_GPO               MPP(  5, 0x0, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP5_NF_IO7            MPP(  5, 0x1, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP5_UART0_TXD         MPP(  5, 0x2, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP5_PTP_TRIG_GEN      MPP(  5, 0x4, 0, 1, 1,   1,   1,   1,   0 )
-#define MPP5_SATA0_ACTn                MPP(  5, 0x5, 0, 1, 0,   1,   1,   1,   1 )
+#define MPP5_NF_IO7            MPP(  5, 0x1, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP5_UART0_TXD         MPP(  5, 0x2, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP5_PTP_TRIG_GEN      MPP(  5, 0x4, 0, 0, 1,   1,   1,   1,   0 )
+#define MPP5_SATA0_ACTn                MPP(  5, 0x5, 0, 0, 0,   1,   1,   1,   1 )
 #define MPP5_LCD_VGA_VSYNC     MPP(  5, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
-#define MPP6_SYSRST_OUTn       MPP(  6, 0x1, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP6_SPI_MOSI          MPP(  6, 0x2, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP6_PTP_TRIG_GEN      MPP(  6, 0x3, 0, 1, 1,   1,   1,   1,   0 )
+#define MPP6_SYSRST_OUTn       MPP(  6, 0x1, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP6_SPI_MOSI          MPP(  6, 0x2, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP6_PTP_TRIG_GEN      MPP(  6, 0x3, 0, 0, 1,   1,   1,   1,   0 )
 
 #define MPP7_GPO               MPP(  7, 0x0, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP7_PEX_RST_OUTn      MPP(  7, 0x1, 0, 1, 1,   1,   1,   1,   0 )
-#define MPP7_SPI_SCn           MPP(  7, 0x2, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP7_PTP_TRIG_GEN      MPP(  7, 0x3, 0, 1, 1,   1,   1,   1,   0 )
-#define MPP7_LCD_PWM           MPP(  7, 0xb, 0, 1, 0,   0,   0,   0,   1 )
+#define MPP7_PEX_RST_OUTn      MPP(  7, 0x1, 0, 0, 1,   1,   1,   1,   0 )
+#define MPP7_SPI_SCn           MPP(  7, 0x2, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP7_PTP_TRIG_GEN      MPP(  7, 0x3, 0, 0, 1,   1,   1,   1,   0 )
+#define MPP7_LCD_PWM           MPP(  7, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP8_GPIO              MPP(  8, 0x0, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP8_TW0_SDA           MPP(  8, 0x1, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP8_UART0_RTS         MPP(  8, 0x2, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP8_UART1_RTS         MPP(  8, 0x3, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP8_MII0_RXERR                MPP(  8, 0x4, 1, 0, 0,   1,   1,   1,   1 )
-#define MPP8_SATA1_PRESENTn    MPP(  8, 0x5, 0, 1, 0,   0,   1,   1,   1 )
-#define MPP8_PTP_CLK           MPP(  8, 0xc, 1, 0, 1,   1,   1,   1,   0 )
-#define MPP8_MII0_COL          MPP(  8, 0xd, 1, 0, 1,   1,   1,   1,   1 )
+#define MPP8_TW0_SDA           MPP(  8, 0x1, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP8_UART0_RTS         MPP(  8, 0x2, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP8_UART1_RTS         MPP(  8, 0x3, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP8_MII0_RXERR                MPP(  8, 0x4, 0, 0, 0,   1,   1,   1,   1 )
+#define MPP8_SATA1_PRESENTn    MPP(  8, 0x5, 0, 0, 0,   0,   1,   1,   1 )
+#define MPP8_PTP_CLK           MPP(  8, 0xc, 0, 0, 1,   1,   1,   1,   0 )
+#define MPP8_MII0_COL          MPP(  8, 0xd, 0, 0, 1,   1,   1,   1,   1 )
 
 #define MPP9_GPIO              MPP(  9, 0x0, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP9_TW0_SCK           MPP(  9, 0x1, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP9_UART0_CTS         MPP(  9, 0x2, 1, 0, 1,   1,   1,   1,   1 )
-#define MPP9_UART1_CTS         MPP(  9, 0x3, 1, 0, 1,   1,   1,   1,   1 )
-#define MPP9_SATA0_PRESENTn    MPP(  9, 0x5, 0, 1, 0,   1,   1,   1,   1 )
-#define MPP9_PTP_EVENT_REQ     MPP(  9, 0xc, 1, 0, 1,   1,   1,   1,   0 )
-#define MPP9_MII0_CRS          MPP(  9, 0xd, 1, 0, 1,   1,   1,   1,   1 )
+#define MPP9_TW0_SCK           MPP(  9, 0x1, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP9_UART0_CTS         MPP(  9, 0x2, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP9_UART1_CTS         MPP(  9, 0x3, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP9_SATA0_PRESENTn    MPP(  9, 0x5, 0, 0, 0,   1,   1,   1,   1 )
+#define MPP9_PTP_EVENT_REQ     MPP(  9, 0xc, 0, 0, 1,   1,   1,   1,   0 )
+#define MPP9_MII0_CRS          MPP(  9, 0xd, 0, 0, 1,   1,   1,   1,   1 )
 
 #define MPP10_GPO              MPP( 10, 0x0, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP10_SPI_SCK          MPP( 10, 0x2, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP10_UART0_TXD                MPP( 10, 0X3, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP10_SATA1_ACTn       MPP( 10, 0x5, 0, 1, 0,   0,   1,   1,   1 )
-#define MPP10_PTP_TRIG_GEN     MPP( 10, 0xc, 0, 1, 1,   1,   1,   1,   0 )
+#define MPP10_SPI_SCK          MPP( 10, 0x2, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP10_UART0_TXD                MPP( 10, 0X3, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP10_SATA1_ACTn       MPP( 10, 0x5, 0, 0, 0,   0,   1,   1,   1 )
+#define MPP10_PTP_TRIG_GEN     MPP( 10, 0xc, 0, 0, 1,   1,   1,   1,   0 )
 
 #define MPP11_GPIO             MPP( 11, 0x0, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP11_SPI_MISO         MPP( 11, 0x2, 1, 0, 1,   1,   1,   1,   1 )
-#define MPP11_UART0_RXD                MPP( 11, 0x3, 1, 0, 1,   1,   1,   1,   1 )
-#define MPP11_PTP_EVENT_REQ    MPP( 11, 0x4, 1, 0, 1,   1,   1,   1,   0 )
-#define MPP11_PTP_TRIG_GEN     MPP( 11, 0xc, 0, 1, 1,   1,   1,   1,   0 )
-#define MPP11_PTP_CLK          MPP( 11, 0xd, 1, 0, 1,   1,   1,   1,   0 )
-#define MPP11_SATA0_ACTn       MPP( 11, 0x5, 0, 1, 0,   1,   1,   1,   1 )
+#define MPP11_SPI_MISO         MPP( 11, 0x2, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP11_UART0_RXD                MPP( 11, 0x3, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP11_PTP_EVENT_REQ    MPP( 11, 0x4, 0, 0, 1,   1,   1,   1,   0 )
+#define MPP11_PTP_TRIG_GEN     MPP( 11, 0xc, 0, 0, 1,   1,   1,   1,   0 )
+#define MPP11_PTP_CLK          MPP( 11, 0xd, 0, 0, 1,   1,   1,   1,   0 )
+#define MPP11_SATA0_ACTn       MPP( 11, 0x5, 0, 0, 0,   1,   1,   1,   1 )
 
 #define MPP12_GPO              MPP( 12, 0x0, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP12_SD_CLK           MPP( 12, 0x1, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP12_AU_SPDIF0                MPP( 12, 0xa, 0, 1, 0,   0,   0,   0,   1 )
-#define MPP12_SPI_MOSI         MPP( 12, 0xb, 0, 1, 0,   0,   0,   0,   1 )
-#define MPP12_TW1_SDA          MPP( 12, 0xd, 1, 0, 0,   0,   0,   0,   1 )
+#define MPP12_SD_CLK           MPP( 12, 0x1, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP12_AU_SPDIF0                MPP( 12, 0xa, 0, 0, 0,   0,   0,   0,   1 )
+#define MPP12_SPI_MOSI         MPP( 12, 0xb, 0, 0, 0,   0,   0,   0,   1 )
+#define MPP12_TW1_SDA          MPP( 12, 0xd, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP13_GPIO             MPP( 13, 0x0, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP13_SD_CMD           MPP( 13, 0x1, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP13_UART1_TXD                MPP( 13, 0x3, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP13_AU_SPDIFRMCLK    MPP( 13, 0xa, 0, 1, 0,   0,   0,   0,   1 )
-#define MPP13_LCDPWM           MPP( 13, 0xb, 0, 1, 0,   0,   0,   0,   1 )
+#define MPP13_SD_CMD           MPP( 13, 0x1, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP13_UART1_TXD                MPP( 13, 0x3, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP13_AU_SPDIFRMCLK    MPP( 13, 0xa, 0, 0, 0,   0,   0,   0,   1 )
+#define MPP13_LCDPWM           MPP( 13, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP14_GPIO             MPP( 14, 0x0, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP14_SD_D0            MPP( 14, 0x1, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP14_UART1_RXD                MPP( 14, 0x3, 1, 0, 1,   1,   1,   1,   1 )
-#define MPP14_SATA1_PRESENTn   MPP( 14, 0x4, 0, 1, 0,   0,   1,   1,   1 )
-#define MPP14_AU_SPDIFI                MPP( 14, 0xa, 1, 0, 0,   0,   0,   0,   1 )
-#define MPP14_AU_I2SDI         MPP( 14, 0xb, 1, 0, 0,   0,   0,   0,   1 )
-#define MPP14_MII0_COL         MPP( 14, 0xd, 1, 0, 1,   1,   1,   1,   1 )
+#define MPP14_SD_D0            MPP( 14, 0x1, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP14_UART1_RXD                MPP( 14, 0x3, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP14_SATA1_PRESENTn   MPP( 14, 0x4, 0, 0, 0,   0,   1,   1,   1 )
+#define MPP14_AU_SPDIFI                MPP( 14, 0xa, 0, 0, 0,   0,   0,   0,   1 )
+#define MPP14_AU_I2SDI         MPP( 14, 0xb, 0, 0, 0,   0,   0,   0,   1 )
+#define MPP14_MII0_COL         MPP( 14, 0xd, 0, 0, 1,   1,   1,   1,   1 )
 
 #define MPP15_GPIO             MPP( 15, 0x0, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP15_SD_D1            MPP( 15, 0x1, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP15_UART0_RTS                MPP( 15, 0x2, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP15_UART1_TXD                MPP( 15, 0x3, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP15_SATA0_ACTn       MPP( 15, 0x4, 0, 1, 0,   1,   1,   1,   1 )
-#define MPP15_SPI_CSn          MPP( 15, 0xb, 0, 1, 0,   0,   0,   0,   1 )
+#define MPP15_SD_D1            MPP( 15, 0x1, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP15_UART0_RTS                MPP( 15, 0x2, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP15_UART1_TXD                MPP( 15, 0x3, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP15_SATA0_ACTn       MPP( 15, 0x4, 0, 0, 0,   1,   1,   1,   1 )
+#define MPP15_SPI_CSn          MPP( 15, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP16_GPIO             MPP( 16, 0x0, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP16_SD_D2            MPP( 16, 0x1, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP16_UART0_CTS                MPP( 16, 0x2, 1, 0, 1,   1,   1,   1,   1 )
-#define MPP16_UART1_RXD                MPP( 16, 0x3, 1, 0, 1,   1,   1,   1,   1 )
-#define MPP16_SATA1_ACTn       MPP( 16, 0x4, 0, 1, 0,   0,   1,   1,   1 )
-#define MPP16_LCD_EXT_REF_CLK  MPP( 16, 0xb, 1, 0, 0,   0,   0,   0,   1 )
-#define MPP16_MII0_CRS         MPP( 16, 0xd, 1, 0, 1,   1,   1,   1,   1 )
+#define MPP16_SD_D2            MPP( 16, 0x1, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP16_UART0_CTS                MPP( 16, 0x2, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP16_UART1_RXD                MPP( 16, 0x3, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP16_SATA1_ACTn       MPP( 16, 0x4, 0, 0, 0,   0,   1,   1,   1 )
+#define MPP16_LCD_EXT_REF_CLK  MPP( 16, 0xb, 0, 0, 0,   0,   0,   0,   1 )
+#define MPP16_MII0_CRS         MPP( 16, 0xd, 0, 0, 1,   1,   1,   1,   1 )
 
 #define MPP17_GPIO             MPP( 17, 0x0, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP17_SD_D3            MPP( 17, 0x1, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP17_SATA0_PRESENTn   MPP( 17, 0x4, 0, 1, 0,   1,   1,   1,   1 )
-#define MPP17_SATA1_ACTn       MPP( 17, 0xa, 0, 1, 0,   0,   0,   0,   1 )
-#define MPP17_TW1_SCK          MPP( 17, 0xd, 1, 1, 0,   0,   0,   0,   1 )
+#define MPP17_SD_D3            MPP( 17, 0x1, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP17_SATA0_PRESENTn   MPP( 17, 0x4, 0, 0, 0,   1,   1,   1,   1 )
+#define MPP17_SATA1_ACTn       MPP( 17, 0xa, 0, 0, 0,   0,   0,   0,   1 )
+#define MPP17_TW1_SCK          MPP( 17, 0xd, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP18_GPO              MPP( 18, 0x0, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP18_NF_IO0           MPP( 18, 0x1, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP18_PEX0_CLKREQ      MPP( 18, 0x2, 0, 1, 0,   0,   0,   0,   1 )
+#define MPP18_NF_IO0           MPP( 18, 0x1, 0, 0, 1,   1,   1,   1,   1 )
+#define MPP18_PEX0_CLKREQ      MPP( 18, 0x2, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP19_GPO              MPP( 19, 0x0, 0, 1, 1,   1,   1,   1,   1 )
-#define MPP19_NF_IO1           MPP( 19, 0x1, 1, 1, 1,   1,   1,   1,   1 )
+#define MPP19_NF_IO1           MPP( 19, 0x1, 0, 0, 1,   1,   1,   1,   1 )
 
 #define MPP20_GPIO             MPP( 20, 0x0, 1, 1, 0,   1,   1,   1,   1 )
-#define MPP20_TSMP0            MPP( 20, 0x1, 1, 1, 0,   0,   1,   1,   1 )
-#define MPP20_TDM_CH0_TX_QL    MPP( 20, 0x2, 0, 1, 0,   0,   1,   1,   1 )
+#define MPP20_TSMP0            MPP( 20, 0x1, 0, 0, 0,   0,   1,   1,   1 )
+#define MPP20_TDM_CH0_TX_QL    MPP( 20, 0x2, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP20_GE1_TXD0         MPP( 20, 0x3, 0, 0, 0,   1,   1,   1,   1 )
-#define MPP20_AU_SPDIFI                MPP( 20, 0x4, 1, 0, 0,   0,   1,   1,   1 )
-#define MPP20_SATA1_ACTn       MPP( 20, 0x5, 0, 1, 0,   0,   1,   1,   1 )
+#define MPP20_AU_SPDIFI                MPP( 20, 0x4, 0, 0, 0,   0,   1,   1,   1 )
+#define MPP20_SATA1_ACTn       MPP( 20, 0x5, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP20_LCD_D0           MPP( 20, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP21_GPIO             MPP( 21, 0x0, 1, 1, 0,   1,   1,   1,   1 )
-#define MPP21_TSMP1            MPP( 21, 0x1, 1, 1, 0,   0,   1,   1,   1 )
-#define MPP21_TDM_CH0_RX_QL    MPP( 21, 0x2, 0, 1, 0,   0,   1,   1,   1 )
+#define MPP21_TSMP1            MPP( 21, 0x1, 0, 0, 0,   0,   1,   1,   1 )
+#define MPP21_TDM_CH0_RX_QL    MPP( 21, 0x2, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP21_GE1_TXD1         MPP( 21, 0x3, 0, 0, 0,   1,   1,   1,   1 )
-#define MPP21_AU_SPDIFO                MPP( 21, 0x4, 0, 1, 0,   0,   1,   1,   1 )
-#define MPP21_SATA0_ACTn       MPP( 21, 0x5, 0, 1, 0,   1,   1,   1,   1 )
+#define MPP21_AU_SPDIFO                MPP( 21, 0x4, 0, 0, 0,   0,   1,   1,   1 )
+#define MPP21_SATA0_ACTn       MPP( 21, 0x5, 0, 0, 0,   1,   1,   1,   1 )
 #define MPP21_LCD_D1           MPP( 21, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP22_GPIO             MPP( 22, 0x0, 1, 1, 0,   1,   1,   1,   1 )
-#define MPP22_TSMP2            MPP( 22, 0x1, 1, 1, 0,   0,   1,   1,   1 )
-#define MPP22_TDM_CH2_TX_QL    MPP( 22, 0x2, 0, 1, 0,   0,   1,   1,   1 )
+#define MPP22_TSMP2            MPP( 22, 0x1, 0, 0, 0,   0,   1,   1,   1 )
+#define MPP22_TDM_CH2_TX_QL    MPP( 22, 0x2, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP22_GE1_TXD2         MPP( 22, 0x3, 0, 0, 0,   1,   1,   1,   1 )
-#define MPP22_AU_SPDIFRMKCLK   MPP( 22, 0x4, 0, 1, 0,   0,   1,   1,   1 )
-#define MPP22_SATA1_PRESENTn   MPP( 22, 0x5, 0, 1, 0,   0,   1,   1,   1 )
+#define MPP22_AU_SPDIFRMKCLK   MPP( 22, 0x4, 0, 0, 0,   0,   1,   1,   1 )
+#define MPP22_SATA1_PRESENTn   MPP( 22, 0x5, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP22_LCD_D2           MPP( 22, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP23_GPIO             MPP( 23, 0x0, 1, 1, 0,   1,   1,   1,   1 )
-#define MPP23_TSMP3            MPP( 23, 0x1, 1, 1, 0,   0,   1,   1,   1 )
-#define MPP23_TDM_CH2_RX_QL    MPP( 23, 0x2, 1, 0, 0,   0,   1,   1,   1 )
+#define MPP23_TSMP3            MPP( 23, 0x1, 0, 0, 0,   0,   1,   1,   1 )
+#define MPP23_TDM_CH2_RX_QL    MPP( 23, 0x2, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP23_GE1_TXD3         MPP( 23, 0x3, 0, 0, 0,   1,   1,   1,   1 )
-#define MPP23_AU_I2SBCLK       MPP( 23, 0x4, 0, 1, 0,   0,   1,   1,   1 )
-#define MPP23_SATA0_PRESENTn   MPP( 23, 0x5, 0, 1, 0,   1,   1,   1,   1 )
+#define MPP23_AU_I2SBCLK       MPP( 23, 0x4, 0, 0, 0,   0,   1,   1,   1 )
+#define MPP23_SATA0_PRESENTn   MPP( 23, 0x5, 0, 0, 0,   1,   1,   1,   1 )
 #define MPP23_LCD_D3           MPP( 23, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP24_GPIO             MPP( 24, 0x0, 1, 1, 0,   1,   1,   1,   1 )
-#define MPP24_TSMP4            MPP( 24, 0x1, 1, 1, 0,   0,   1,   1,   1 )
-#define MPP24_TDM_SPI_CS0      MPP( 24, 0x2, 0, 1, 0,   0,   1,   1,   1 )
+#define MPP24_TSMP4            MPP( 24, 0x1, 0, 0, 0,   0,   1,   1,   1 )
+#define MPP24_TDM_SPI_CS0      MPP( 24, 0x2, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP24_GE1_RXD0         MPP( 24, 0x3, 0, 0, 0,   1,   1,   1,   1 )
-#define MPP24_AU_I2SDO         MPP( 24, 0x4, 0, 1, 0,   0,   1,   1,   1 )
+#define MPP24_AU_I2SDO         MPP( 24, 0x4, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP24_LCD_D4           MPP( 24, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP25_GPIO             MPP( 25, 0x0, 1, 1, 0,   1,   1,   1,   1 )
-#define MPP25_TSMP5            MPP( 25, 0x1, 1, 1, 0,   0,   1,   1,   1 )
-#define MPP25_TDM_SPI_SCK      MPP( 25, 0x2, 0, 1, 0,   0,   1,   1,   1 )
+#define MPP25_TSMP5            MPP( 25, 0x1, 0, 0, 0,   0,   1,   1,   1 )
+#define MPP25_TDM_SPI_SCK      MPP( 25, 0x2, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP25_GE1_RXD1         MPP( 25, 0x3, 0, 0, 0,   1,   1,   1,   1 )
-#define MPP25_AU_I2SLRCLK      MPP( 25, 0x4, 0, 1, 0,   0,   1,   1,   1 )
+#define MPP25_AU_I2SLRCLK      MPP( 25, 0x4, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP25_LCD_D5           MPP( 25, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP26_GPIO             MPP( 26, 0x0, 1, 1, 0,   1,   1,   1,   1 )
-#define MPP26_TSMP6            MPP( 26, 0x1, 1, 1, 0,   0,   1,   1,   1 )
-#define MPP26_TDM_SPI_MISO     MPP( 26, 0x2, 1, 0, 0,   0,   1,   1,   1 )
+#define MPP26_TSMP6            MPP( 26, 0x1, 0, 0, 0,   0,   1,   1,   1 )
+#define MPP26_TDM_SPI_MISO     MPP( 26, 0x2, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP26_GE1_RXD2         MPP( 26, 0x3, 0, 0, 0,   1,   1,   1,   1 )
-#define MPP26_AU_I2SMCLK       MPP( 26, 0x4, 0, 1, 0,   0,   1,   1,   1 )
+#define MPP26_AU_I2SMCLK       MPP( 26, 0x4, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP26_LCD_D6           MPP( 26, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP27_GPIO             MPP( 27, 0x0, 1, 1, 0,   1,   1,   1,   1 )
-#define MPP27_TSMP7            MPP( 27, 0x1, 1, 1, 0,   0,   1,   1,   1 )
-#define MPP27_TDM_SPI_MOSI     MPP( 27, 0x2, 0, 1, 0,   0,   1,   1,   1 )
+#define MPP27_TSMP7            MPP( 27, 0x1, 0, 0, 0,   0,   1,   1,   1 )
+#define MPP27_TDM_SPI_MOSI     MPP( 27, 0x2, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP27_GE1_RXD3         MPP( 27, 0x3, 0, 0, 0,   1,   1,   1,   1 )
-#define MPP27_AU_I2SDI         MPP( 27, 0x4, 1, 0, 0,   0,   1,   1,   1 )
+#define MPP27_AU_I2SDI         MPP( 27, 0x4, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP27_LCD_D7           MPP( 27, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP28_GPIO             MPP( 28, 0x0, 1, 1, 0,   1,   1,   1,   1 )
-#define MPP28_TSMP8            MPP( 28, 0x1, 1, 1, 0,   0,   1,   1,   1 )
+#define MPP28_TSMP8            MPP( 28, 0x1, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP28_TDM_CODEC_INTn   MPP( 28, 0x2, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP28_GE1_COL          MPP( 28, 0x3, 0, 0, 0,   1,   1,   1,   1 )
-#define MPP28_AU_EXTCLK                MPP( 28, 0x4, 1, 0, 0,   0,   1,   1,   1 )
+#define MPP28_AU_EXTCLK                MPP( 28, 0x4, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP28_LCD_D8           MPP( 28, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP29_GPIO             MPP( 29, 0x0, 1, 1, 0,   1,   1,   1,   1 )
-#define MPP29_TSMP9            MPP( 29, 0x1, 1, 1, 0,   0,   1,   1,   1 )
+#define MPP29_TSMP9            MPP( 29, 0x1, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP29_TDM_CODEC_RSTn   MPP( 29, 0x2, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP29_GE1_TCLK         MPP( 29, 0x3, 0, 0, 0,   1,   1,   1,   1 )
 #define MPP29_LCD_D9           MPP( 29, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP30_GPIO             MPP( 30, 0x0, 1, 1, 0,   1,   1,   1,   1 )
-#define MPP30_TSMP10           MPP( 30, 0x1, 1, 1, 0,   0,   1,   1,   1 )
-#define MPP30_TDM_PCLK         MPP( 30, 0x2, 1, 1, 0,   0,   1,   1,   1 )
+#define MPP30_TSMP10           MPP( 30, 0x1, 0, 0, 0,   0,   1,   1,   1 )
+#define MPP30_TDM_PCLK         MPP( 30, 0x2, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP30_GE1_RXCTL                MPP( 30, 0x3, 0, 0, 0,   1,   1,   1,   1 )
 #define MPP30_LCD_D10          MPP( 30, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP31_GPIO             MPP( 31, 0x0, 1, 1, 0,   1,   1,   1,   1 )
-#define MPP31_TSMP11           MPP( 31, 0x1, 1, 1, 0,   0,   1,   1,   1 )
-#define MPP31_TDM_FS           MPP( 31, 0x2, 1, 1, 0,   0,   1,   1,   1 )
+#define MPP31_TSMP11           MPP( 31, 0x1, 0, 0, 0,   0,   1,   1,   1 )
+#define MPP31_TDM_FS           MPP( 31, 0x2, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP31_GE1_RXCLK                MPP( 31, 0x3, 0, 0, 0,   1,   1,   1,   1 )
 #define MPP31_LCD_D11          MPP( 31, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP32_GPIO             MPP( 32, 0x0, 1, 1, 0,   1,   1,   1,   1 )
-#define MPP32_TSMP12           MPP( 32, 0x1, 1, 1, 0,   0,   1,   1,   1 )
-#define MPP32_TDM_DRX          MPP( 32, 0x2, 1, 0, 0,   0,   1,   1,   1 )
+#define MPP32_TSMP12           MPP( 32, 0x1, 0, 0, 0,   0,   1,   1,   1 )
+#define MPP32_TDM_DRX          MPP( 32, 0x2, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP32_GE1_TCLKOUT      MPP( 32, 0x3, 0, 0, 0,   1,   1,   1,   1 )
 #define MPP32_LCD_D12          MPP( 32, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP33_GPO              MPP( 33, 0x0, 0, 1, 0,   1,   1,   1,   1 )
-#define MPP33_TDM_DTX          MPP( 33, 0x2, 0, 1, 0,   0,   1,   1,   1 )
+#define MPP33_TDM_DTX          MPP( 33, 0x2, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP33_GE1_TXCTL                MPP( 33, 0x3, 0, 0, 0,   1,   1,   1,   1 )
 #define MPP33_LCD_D13          MPP( 33, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP34_GPIO             MPP( 34, 0x0, 1, 1, 0,   1,   1,   1,   1 )
-#define MPP34_TDM_SPI_CS1      MPP( 34, 0x2, 0, 1, 0,   0,   1,   1,   1 )
+#define MPP34_TDM_SPI_CS1      MPP( 34, 0x2, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP34_GE1_TXEN         MPP( 34, 0x3, 0, 0, 0,   1,   1,   1,   1 )
-#define MPP34_SATA1_ACTn       MPP( 34, 0x5, 0, 1, 0,   0,   0,   1,   1 )
+#define MPP34_SATA1_ACTn       MPP( 34, 0x5, 0, 0, 0,   0,   0,   1,   1 )
 #define MPP34_LCD_D14          MPP( 34, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP35_GPIO             MPP( 35, 0x0, 1, 1, 1,   1,   1,   1,   1 )
-#define MPP35_TDM_CH0_TX_QL    MPP( 35, 0x2, 0, 1, 0,   0,   1,   1,   1 )
+#define MPP35_TDM_CH0_TX_QL    MPP( 35, 0x2, 0, 0, 0,   0,   1,   1,   1 )
 #define MPP35_GE1_RXERR                MPP( 35, 0x3, 0, 0, 0,   1,   1,   1,   1 )
-#define MPP35_SATA0_ACTn       MPP( 35, 0x5, 0, 1, 0,   1,   1,   1,   1 )
+#define MPP35_SATA0_ACTn       MPP( 35, 0x5, 0, 0, 0,   1,   1,   1,   1 )
 #define MPP35_LCD_D15          MPP( 22, 0xb, 0, 0, 0,   0,   0,   0,   1 )
-#define MPP35_MII0_RXERR       MPP( 35, 0xc, 1, 0, 1,   1,   1,   1,   1 )
+#define MPP35_MII0_RXERR       MPP( 35, 0xc, 0, 0, 1,   1,   1,   1,   1 )
 
 #define MPP36_GPIO             MPP( 36, 0x0, 1, 1, 1,   0,   0,   1,   1 )
-#define MPP36_TSMP0            MPP( 36, 0x1, 1, 1, 0,   0,   0,   1,   1 )
-#define MPP36_TDM_SPI_CS1      MPP( 36, 0x2, 0, 1, 0,   0,   0,   1,   1 )
-#define MPP36_AU_SPDIFI                MPP( 36, 0x4, 1, 0, 1,   0,   0,   1,   1 )
-#define MPP36_TW1_SDA          MPP( 36, 0xb, 1, 1, 0,   0,   0,   0,   1 )
+#define MPP36_TSMP0            MPP( 36, 0x1, 0, 0, 0,   0,   0,   1,   1 )
+#define MPP36_TDM_SPI_CS1      MPP( 36, 0x2, 0, 0, 0,   0,   0,   1,   1 )
+#define MPP36_AU_SPDIFI                MPP( 36, 0x4, 0, 0, 1,   0,   0,   1,   1 )
+#define MPP36_TW1_SDA          MPP( 36, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP37_GPIO             MPP( 37, 0x0, 1, 1, 1,   0,   0,   1,   1 )
-#define MPP37_TSMP1            MPP( 37, 0x1, 1, 1, 0,   0,   0,   1,   1 )
-#define MPP37_TDM_CH2_TX_QL    MPP( 37, 0x2, 0, 1, 0,   0,   0,   1,   1 )
-#define MPP37_AU_SPDIFO                MPP( 37, 0x4, 0, 1, 1,   0,   0,   1,   1 )
-#define MPP37_TW1_SCK          MPP( 37, 0xb, 1, 1, 0,   0,   0,   0,   1 )
+#define MPP37_TSMP1            MPP( 37, 0x1, 0, 0, 0,   0,   0,   1,   1 )
+#define MPP37_TDM_CH2_TX_QL    MPP( 37, 0x2, 0, 0, 0,   0,   0,   1,   1 )
+#define MPP37_AU_SPDIFO                MPP( 37, 0x4, 0, 0, 1,   0,   0,   1,   1 )
+#define MPP37_TW1_SCK          MPP( 37, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP38_GPIO             MPP( 38, 0x0, 1, 1, 1,   0,   0,   1,   1 )
-#define MPP38_TSMP2            MPP( 38, 0x1, 1, 1, 0,   0,   0,   1,   1 )
-#define MPP38_TDM_CH2_RX_QL    MPP( 38, 0x2, 0, 1, 0,   0,   0,   1,   1 )
-#define MPP38_AU_SPDIFRMLCLK   MPP( 38, 0x4, 0, 1, 1,   0,   0,   1,   1 )
+#define MPP38_TSMP2            MPP( 38, 0x1, 0, 0, 0,   0,   0,   1,   1 )
+#define MPP38_TDM_CH2_RX_QL    MPP( 38, 0x2, 0, 0, 0,   0,   0,   1,   1 )
+#define MPP38_AU_SPDIFRMLCLK   MPP( 38, 0x4, 0, 0, 1,   0,   0,   1,   1 )
 #define MPP38_LCD_D18          MPP( 38, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP39_GPIO             MPP( 39, 0x0, 1, 1, 1,   0,   0,   1,   1 )
-#define MPP39_TSMP3            MPP( 39, 0x1, 1, 1, 0,   0,   0,   1,   1 )
-#define MPP39_TDM_SPI_CS0      MPP( 39, 0x2, 0, 1, 0,   0,   0,   1,   1 )
-#define MPP39_AU_I2SBCLK       MPP( 39, 0x4, 0, 1, 1,   0,   0,   1,   1 )
+#define MPP39_TSMP3            MPP( 39, 0x1, 0, 0, 0,   0,   0,   1,   1 )
+#define MPP39_TDM_SPI_CS0      MPP( 39, 0x2, 0, 0, 0,   0,   0,   1,   1 )
+#define MPP39_AU_I2SBCLK       MPP( 39, 0x4, 0, 0, 1,   0,   0,   1,   1 )
 #define MPP39_LCD_D19          MPP( 39, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP40_GPIO             MPP( 40, 0x0, 1, 1, 1,   0,   0,   1,   1 )
-#define MPP40_TSMP4            MPP( 40, 0x1, 1, 1, 0,   0,   0,   1,   1 )
-#define MPP40_TDM_SPI_SCK      MPP( 40, 0x2, 0, 1, 0,   0,   0,   1,   1 )
-#define MPP40_AU_I2SDO         MPP( 40, 0x4, 0, 1, 1,   0,   0,   1,   1 )
+#define MPP40_TSMP4            MPP( 40, 0x1, 0, 0, 0,   0,   0,   1,   1 )
+#define MPP40_TDM_SPI_SCK      MPP( 40, 0x2, 0, 0, 0,   0,   0,   1,   1 )
+#define MPP40_AU_I2SDO         MPP( 40, 0x4, 0, 0, 1,   0,   0,   1,   1 )
 #define MPP40_LCD_D20          MPP( 40, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP41_GPIO             MPP( 41, 0x0, 1, 1, 1,   0,   0,   1,   1 )
-#define MPP41_TSMP5            MPP( 41, 0x1, 1, 1, 0,   0,   0,   1,   1 )
-#define MPP41_TDM_SPI_MISO     MPP( 41, 0x2, 1, 0, 0,   0,   0,   1,   1 )
-#define MPP41_AU_I2SLRCLK      MPP( 41, 0x4, 0, 1, 1,   0,   0,   1,   1 )
+#define MPP41_TSMP5            MPP( 41, 0x1, 0, 0, 0,   0,   0,   1,   1 )
+#define MPP41_TDM_SPI_MISO     MPP( 41, 0x2, 0, 0, 0,   0,   0,   1,   1 )
+#define MPP41_AU_I2SLRCLK      MPP( 41, 0x4, 0, 0, 1,   0,   0,   1,   1 )
 #define MPP41_LCD_D21          MPP( 41, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP42_GPIO             MPP( 42, 0x0, 1, 1, 1,   0,   0,   1,   1 )
-#define MPP42_TSMP6            MPP( 42, 0x1, 1, 1, 0,   0,   0,   1,   1 )
-#define MPP42_TDM_SPI_MOSI     MPP( 42, 0x2, 0, 1, 0,   0,   0,   1,   1 )
-#define MPP42_AU_I2SMCLK       MPP( 42, 0x4, 0, 1, 1,   0,   0,   1,   1 )
+#define MPP42_TSMP6            MPP( 42, 0x1, 0, 0, 0,   0,   0,   1,   1 )
+#define MPP42_TDM_SPI_MOSI     MPP( 42, 0x2, 0, 0, 0,   0,   0,   1,   1 )
+#define MPP42_AU_I2SMCLK       MPP( 42, 0x4, 0, 0, 1,   0,   0,   1,   1 )
 #define MPP42_LCD_D22          MPP( 42, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP43_GPIO             MPP( 43, 0x0, 1, 1, 1,   0,   0,   1,   1 )
-#define MPP43_TSMP7            MPP( 43, 0x1, 1, 1, 0,   0,   0,   1,   1 )
+#define MPP43_TSMP7            MPP( 43, 0x1, 0, 0, 0,   0,   0,   1,   1 )
 #define MPP43_TDM_CODEC_INTn   MPP( 43, 0x2, 0, 0, 0,   0,   0,   1,   1 )
-#define MPP43_AU_I2SDI         MPP( 43, 0x4, 1, 0, 1,   0,   0,   1,   1 )
+#define MPP43_AU_I2SDI         MPP( 43, 0x4, 0, 0, 1,   0,   0,   1,   1 )
 #define MPP43_LCD_D23          MPP( 22, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP44_GPIO             MPP( 44, 0x0, 1, 1, 1,   0,   0,   1,   1 )
-#define MPP44_TSMP8            MPP( 44, 0x1, 1, 1, 0,   0,   0,   1,   1 )
+#define MPP44_TSMP8            MPP( 44, 0x1, 0, 0, 0,   0,   0,   1,   1 )
 #define MPP44_TDM_CODEC_RSTn   MPP( 44, 0x2, 0, 0, 0,   0,   0,   1,   1 )
-#define MPP44_AU_EXTCLK                MPP( 44, 0x4, 1, 0, 1,   0,   0,   1,   1 )
+#define MPP44_AU_EXTCLK                MPP( 44, 0x4, 0, 0, 1,   0,   0,   1,   1 )
 #define MPP44_LCD_CLK          MPP( 44, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP45_GPIO             MPP( 45, 0x0, 1, 1, 0,   0,   0,   1,   1 )
-#define MPP45_TSMP9            MPP( 45, 0x1, 1, 1, 0,   0,   0,   1,   1 )
-#define MPP45_TDM_PCLK         MPP( 45, 0x2, 1, 1, 0,   0,   0,   1,   1 )
+#define MPP45_TSMP9            MPP( 45, 0x1, 0, 0, 0,   0,   0,   1,   1 )
+#define MPP45_TDM_PCLK         MPP( 45, 0x2, 0, 0, 0,   0,   0,   1,   1 )
 #define MPP245_LCD_E           MPP( 45, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP46_GPIO             MPP( 46, 0x0, 1, 1, 0,   0,   0,   1,   1 )
-#define MPP46_TSMP10           MPP( 46, 0x1, 1, 1, 0,   0,   0,   1,   1 )
-#define MPP46_TDM_FS           MPP( 46, 0x2, 1, 1, 0,   0,   0,   1,   1 )
+#define MPP46_TSMP10           MPP( 46, 0x1, 0, 0, 0,   0,   0,   1,   1 )
+#define MPP46_TDM_FS           MPP( 46, 0x2, 0, 0, 0,   0,   0,   1,   1 )
 #define MPP46_LCD_HSYNC                MPP( 46, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP47_GPIO             MPP( 47, 0x0, 1, 1, 0,   0,   0,   1,   1 )
-#define MPP47_TSMP11           MPP( 47, 0x1, 1, 1, 0,   0,   0,   1,   1 )
-#define MPP47_TDM_DRX          MPP( 47, 0x2, 1, 0, 0,   0,   0,   1,   1 )
+#define MPP47_TSMP11           MPP( 47, 0x1, 0, 0, 0,   0,   0,   1,   1 )
+#define MPP47_TDM_DRX          MPP( 47, 0x2, 0, 0, 0,   0,   0,   1,   1 )
 #define MPP47_LCD_VSYNC                MPP( 47, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP48_GPIO             MPP( 48, 0x0, 1, 1, 0,   0,   0,   1,   1 )
-#define MPP48_TSMP12           MPP( 48, 0x1, 1, 1, 0,   0,   0,   1,   1 )
-#define MPP48_TDM_DTX          MPP( 48, 0x2, 0, 1, 0,   0,   0,   1,   1 )
+#define MPP48_TSMP12           MPP( 48, 0x1, 0, 0, 0,   0,   0,   1,   1 )
+#define MPP48_TDM_DTX          MPP( 48, 0x2, 0, 0, 0,   0,   0,   1,   1 )
 #define MPP48_LCD_D16          MPP( 22, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP49_GPIO             MPP( 49, 0x0, 1, 1, 0,   0,   0,   1,   0 )
 #define MPP49_GPO              MPP( 49, 0x0, 0, 1, 0,   0,   0,   0,   1 )
-#define MPP49_TSMP9            MPP( 49, 0x1, 1, 1, 0,   0,   0,   1,   0 )
-#define MPP49_TDM_CH0_RX_QL    MPP( 49, 0x2, 0, 1, 0,   0,   0,   1,   1 )
-#define MPP49_PTP_CLK          MPP( 49, 0x5, 1, 0, 0,   0,   0,   1,   0 )
-#define MPP49_PEX0_CLKREQ      MPP( 49, 0xa, 0, 1, 0,   0,   0,   0,   1 )
+#define MPP49_TSMP9            MPP( 49, 0x1, 0, 0, 0,   0,   0,   1,   0 )
+#define MPP49_TDM_CH0_RX_QL    MPP( 49, 0x2, 0, 0, 0,   0,   0,   1,   1 )
+#define MPP49_PTP_CLK          MPP( 49, 0x5, 0, 0, 0,   0,   0,   1,   0 )
+#define MPP49_PEX0_CLKREQ      MPP( 49, 0xa, 0, 0, 0,   0,   0,   0,   1 )
 #define MPP49_LCD_D17          MPP( 49, 0xb, 0, 0, 0,   0,   0,   0,   1 )
 
 #define MPP_MAX                        49
index 2667f52e3b04da128e9ab7feef4c2fdb13b45fb1..9e3b90df32e1626c3858367910a2b446369d279d 100644 (file)
@@ -61,7 +61,7 @@
  */
 #define IRQ_LPC32XX_JTAG_COMM_TX       LPC32XX_SIC1_IRQ(1)
 #define IRQ_LPC32XX_JTAG_COMM_RX       LPC32XX_SIC1_IRQ(2)
-#define IRQ_LPC32XX_GPI_11             LPC32XX_SIC1_IRQ(4)
+#define IRQ_LPC32XX_GPI_28             LPC32XX_SIC1_IRQ(4)
 #define IRQ_LPC32XX_TS_P               LPC32XX_SIC1_IRQ(6)
 #define IRQ_LPC32XX_TS_IRQ             LPC32XX_SIC1_IRQ(7)
 #define IRQ_LPC32XX_TS_AUX             LPC32XX_SIC1_IRQ(8)
index 4eae566dfdc710934e7e834cabd7b2e83c1184cd..c74de01ab5b61bf2cd95dbdb6559545ab85e8f96 100644 (file)
@@ -118,6 +118,10 @@ static const struct lpc32xx_event_info lpc32xx_events[NR_IRQS] = {
                .event_group = &lpc32xx_event_pin_regs,
                .mask = LPC32XX_CLKPWR_EXTSRC_GPI_06_BIT,
        },
+       [IRQ_LPC32XX_GPI_28] = {
+               .event_group = &lpc32xx_event_pin_regs,
+               .mask = LPC32XX_CLKPWR_EXTSRC_GPI_28_BIT,
+       },
        [IRQ_LPC32XX_GPIO_00] = {
                .event_group = &lpc32xx_event_int_regs,
                .mask = LPC32XX_CLKPWR_INTSRC_GPIO_00_BIT,
@@ -305,9 +309,18 @@ static int lpc32xx_irq_wake(struct irq_data *d, unsigned int state)
 
                if (state)
                        eventreg |= lpc32xx_events[d->irq].mask;
-               else
+               else {
                        eventreg &= ~lpc32xx_events[d->irq].mask;
 
+                       /*
+                        * When disabling the wakeup, clear the latched
+                        * event
+                        */
+                       __raw_writel(lpc32xx_events[d->irq].mask,
+                               lpc32xx_events[d->irq].
+                               event_group->rawstat_reg);
+               }
+
                __raw_writel(eventreg,
                        lpc32xx_events[d->irq].event_group->enab_reg);
 
@@ -380,13 +393,15 @@ void __init lpc32xx_init_irq(void)
 
        /* Setup SIC1 */
        __raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_SIC1_BASE));
-       __raw_writel(MIC_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_SIC1_BASE));
-       __raw_writel(MIC_ATR_DEFAULT, LPC32XX_INTC_ACT_TYPE(LPC32XX_SIC1_BASE));
+       __raw_writel(SIC1_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_SIC1_BASE));
+       __raw_writel(SIC1_ATR_DEFAULT,
+                               LPC32XX_INTC_ACT_TYPE(LPC32XX_SIC1_BASE));
 
        /* Setup SIC2 */
        __raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_SIC2_BASE));
-       __raw_writel(MIC_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_SIC2_BASE));
-       __raw_writel(MIC_ATR_DEFAULT, LPC32XX_INTC_ACT_TYPE(LPC32XX_SIC2_BASE));
+       __raw_writel(SIC2_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_SIC2_BASE));
+       __raw_writel(SIC2_ATR_DEFAULT,
+                               LPC32XX_INTC_ACT_TYPE(LPC32XX_SIC2_BASE));
 
        /* Configure supported IRQ's */
        for (i = 0; i < NR_IRQS; i++) {
index 429cfdbb2b3d60c29f4c3fc12d4a5001aa2dd219..f2735281616a1d8a9e008c09d7483fa637a314cf 100644 (file)
@@ -88,6 +88,7 @@ struct uartinit {
        char *uart_ck_name;
        u32 ck_mode_mask;
        void __iomem *pdiv_clk_reg;
+       resource_size_t mapbase;
 };
 
 static struct uartinit uartinit_data[] __initdata = {
@@ -97,6 +98,7 @@ static struct uartinit uartinit_data[] __initdata = {
                .ck_mode_mask =
                        LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 5),
                .pdiv_clk_reg = LPC32XX_CLKPWR_UART5_CLK_CTRL,
+               .mapbase = LPC32XX_UART5_BASE,
        },
 #endif
 #ifdef CONFIG_ARCH_LPC32XX_UART3_SELECT
@@ -105,6 +107,7 @@ static struct uartinit uartinit_data[] __initdata = {
                .ck_mode_mask =
                        LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 3),
                .pdiv_clk_reg = LPC32XX_CLKPWR_UART3_CLK_CTRL,
+               .mapbase = LPC32XX_UART3_BASE,
        },
 #endif
 #ifdef CONFIG_ARCH_LPC32XX_UART4_SELECT
@@ -113,6 +116,7 @@ static struct uartinit uartinit_data[] __initdata = {
                .ck_mode_mask =
                        LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 4),
                .pdiv_clk_reg = LPC32XX_CLKPWR_UART4_CLK_CTRL,
+               .mapbase = LPC32XX_UART4_BASE,
        },
 #endif
 #ifdef CONFIG_ARCH_LPC32XX_UART6_SELECT
@@ -121,6 +125,7 @@ static struct uartinit uartinit_data[] __initdata = {
                .ck_mode_mask =
                        LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 6),
                .pdiv_clk_reg = LPC32XX_CLKPWR_UART6_CLK_CTRL,
+               .mapbase = LPC32XX_UART6_BASE,
        },
 #endif
 };
@@ -165,11 +170,24 @@ void __init lpc32xx_serial_init(void)
 
                /* pre-UART clock divider set to 1 */
                __raw_writel(0x0101, uartinit_data[i].pdiv_clk_reg);
+
+               /*
+                * Force a flush of the RX FIFOs to work around a
+                * HW bug
+                */
+               puart = uartinit_data[i].mapbase;
+               __raw_writel(0xC1, LPC32XX_UART_IIR_FCR(puart));
+               __raw_writel(0x00, LPC32XX_UART_DLL_FIFO(puart));
+               j = LPC32XX_SUART_FIFO_SIZE;
+               while (j--)
+                       tmp = __raw_readl(
+                               LPC32XX_UART_DLL_FIFO(puart));
+               __raw_writel(0, LPC32XX_UART_IIR_FCR(puart));
        }
 
        /* This needs to be done after all UART clocks are setup */
        __raw_writel(clkmodes, LPC32XX_UARTCTL_CLKMODE);
-       for (i = 0; i < ARRAY_SIZE(uartinit_data) - 1; i++) {
+       for (i = 0; i < ARRAY_SIZE(uartinit_data); i++) {
                /* Force a flush of the RX FIFOs to work around a HW bug */
                puart = serial_std_platform_data[i].mapbase;
                __raw_writel(0xC1, LPC32XX_UART_IIR_FCR(puart));
index 23d3980ef59d561e8a4c9055170266dcb3b45faa..d90e244e05e70a22aceaf11d9dba6797f7c34bc5 100644 (file)
@@ -20,6 +20,7 @@
 #include <mach/mv78xx0.h>
 #include <mach/bridge-regs.h>
 #include <plat/cache-feroceon-l2.h>
+#include <plat/ehci-orion.h>
 #include <plat/orion_nand.h>
 #include <plat/time.h>
 #include <plat/common.h>
@@ -170,7 +171,7 @@ void __init mv78xx0_map_io(void)
 void __init mv78xx0_ehci0_init(void)
 {
        orion_ehci_init(&mv78xx0_mbus_dram_info,
-                       USB0_PHYS_BASE, IRQ_MV78XX0_USB_0);
+                       USB0_PHYS_BASE, IRQ_MV78XX0_USB_0, EHCI_PHY_NA);
 }
 
 
index b61b50927123f3178320f084de9fe19910eb1862..3752302ae2ee3e3dc049cfe7cab40d5af7cb8af9 100644 (file)
 #define MPP_78100_A0_MASK    MPP(0, 0x0, 0, 0, 1)
 
 #define MPP0_GPIO        MPP(0, 0x0, 1, 1, 1)
-#define MPP0_GE0_COL        MPP(0, 0x1, 1, 0, 1)
-#define MPP0_GE1_TXCLK        MPP(0, 0x2, 0, 1, 1)
+#define MPP0_GE0_COL        MPP(0, 0x1, 0, 0, 1)
+#define MPP0_GE1_TXCLK        MPP(0, 0x2, 0, 0, 1)
 #define MPP0_UNUSED        MPP(0, 0x3, 0, 0, 1)
 
 #define MPP1_GPIO        MPP(1, 0x0, 1, 1, 1)
-#define MPP1_GE0_RXERR        MPP(1, 0x1, 1, 0, 1)
-#define MPP1_GE1_TXCTL        MPP(1, 0x2, 0, 1, 1)
+#define MPP1_GE0_RXERR        MPP(1, 0x1, 0, 0, 1)
+#define MPP1_GE1_TXCTL        MPP(1, 0x2, 0, 0, 1)
 #define MPP1_UNUSED        MPP(1, 0x3, 0, 0, 1)
 
 #define MPP2_GPIO        MPP(2, 0x0, 1, 1, 1)
-#define MPP2_GE0_CRS        MPP(2, 0x1, 1, 0, 1)
-#define MPP2_GE1_RXCTL        MPP(2, 0x2, 1, 0, 1)
+#define MPP2_GE0_CRS        MPP(2, 0x1, 0, 0, 1)
+#define MPP2_GE1_RXCTL        MPP(2, 0x2, 0, 0, 1)
 #define MPP2_UNUSED        MPP(2, 0x3, 0, 0, 1)
 
 #define MPP3_GPIO        MPP(3, 0x0, 1, 1, 1)
-#define MPP3_GE0_TXERR        MPP(3, 0x1, 0, 1, 1)
-#define MPP3_GE1_RXCLK        MPP(3, 0x2, 1, 0, 1)
+#define MPP3_GE0_TXERR        MPP(3, 0x1, 0, 0, 1)
+#define MPP3_GE1_RXCLK        MPP(3, 0x2, 0, 0, 1)
 #define MPP3_UNUSED        MPP(3, 0x3, 0, 0, 1)
 
 #define MPP4_GPIO        MPP(4, 0x0, 1, 1, 1)
-#define MPP4_GE0_TXD4        MPP(4, 0x1, 0, 1, 1)
-#define MPP4_GE1_TXD0        MPP(4, 0x2, 0, 1, 1)
+#define MPP4_GE0_TXD4        MPP(4, 0x1, 0, 0, 1)
+#define MPP4_GE1_TXD0        MPP(4, 0x2, 0, 0, 1)
 #define MPP4_UNUSED        MPP(4, 0x3, 0, 0, 1)
 
 #define MPP5_GPIO        MPP(5, 0x0, 1, 1, 1)
-#define MPP5_GE0_TXD5        MPP(5, 0x1, 0, 1, 1)
-#define MPP5_GE1_TXD1        MPP(5, 0x2, 0, 1, 1)
+#define MPP5_GE0_TXD5        MPP(5, 0x1, 0, 0, 1)
+#define MPP5_GE1_TXD1        MPP(5, 0x2, 0, 0, 1)
 #define MPP5_UNUSED        MPP(5, 0x3, 0, 0, 1)
 
 #define MPP6_GPIO        MPP(6, 0x0, 1, 1, 1)
-#define MPP6_GE0_TXD6        MPP(6, 0x1, 0, 1, 1)
-#define MPP6_GE1_TXD2        MPP(6, 0x2, 0, 1, 1)
+#define MPP6_GE0_TXD6        MPP(6, 0x1, 0, 0, 1)
+#define MPP6_GE1_TXD2        MPP(6, 0x2, 0, 0, 1)
 #define MPP6_UNUSED        MPP(6, 0x3, 0, 0, 1)
 
 #define MPP7_GPIO        MPP(7, 0x0, 1, 1, 1)
-#define MPP7_GE0_TXD7        MPP(7, 0x1, 0, 1, 1)
-#define MPP7_GE1_TXD3        MPP(7, 0x2, 0, 1, 1)
+#define MPP7_GE0_TXD7        MPP(7, 0x1, 0, 0, 1)
+#define MPP7_GE1_TXD3        MPP(7, 0x2, 0, 0, 1)
 #define MPP7_UNUSED        MPP(7, 0x3, 0, 0, 1)
 
 #define MPP8_GPIO        MPP(8, 0x0, 1, 1, 1)
-#define MPP8_GE0_RXD4        MPP(8, 0x1, 1, 0, 1)
-#define MPP8_GE1_RXD0        MPP(8, 0x2, 1, 0, 1)
+#define MPP8_GE0_RXD4        MPP(8, 0x1, 0, 0, 1)
+#define MPP8_GE1_RXD0        MPP(8, 0x2, 0, 0, 1)
 #define MPP8_UNUSED        MPP(8, 0x3, 0, 0, 1)
 
 #define MPP9_GPIO        MPP(9, 0x0, 1, 1, 1)
-#define MPP9_GE0_RXD5        MPP(9, 0x1, 1, 0, 1)
-#define MPP9_GE1_RXD1        MPP(9, 0x2, 1, 0, 1)
+#define MPP9_GE0_RXD5        MPP(9, 0x1, 0, 0, 1)
+#define MPP9_GE1_RXD1        MPP(9, 0x2, 0, 0, 1)
 #define MPP9_UNUSED        MPP(9, 0x3, 0, 0, 1)
 
 #define MPP10_GPIO        MPP(10, 0x0, 1, 1, 1)
-#define MPP10_GE0_RXD6        MPP(10, 0x1, 1, 0, 1)
-#define MPP10_GE1_RXD2        MPP(10, 0x2, 1, 0, 1)
+#define MPP10_GE0_RXD6        MPP(10, 0x1, 0, 0, 1)
+#define MPP10_GE1_RXD2        MPP(10, 0x2, 0, 0, 1)
 #define MPP10_UNUSED        MPP(10, 0x3, 0, 0, 1)
 
 #define MPP11_GPIO        MPP(11, 0x0, 1, 1, 1)
-#define MPP11_GE0_RXD7        MPP(11, 0x1, 1, 0, 1)
-#define MPP11_GE1_RXD3        MPP(11, 0x2, 1, 0, 1)
+#define MPP11_GE0_RXD7        MPP(11, 0x1, 0, 0, 1)
+#define MPP11_GE1_RXD3        MPP(11, 0x2, 0, 0, 1)
 #define MPP11_UNUSED        MPP(11, 0x3, 0, 0, 1)
 
 #define MPP12_GPIO        MPP(12, 0x0, 1, 1, 1)
-#define MPP12_M_BB        MPP(12, 0x3, 1, 0, 1)
-#define MPP12_UA0_CTSn        MPP(12, 0x4, 1, 0, 1)
-#define MPP12_NAND_FLASH_REn0    MPP(12, 0x5, 0, 1, 1)
-#define MPP12_TDM0_SCSn        MPP(12, 0X6, 0, 1, 1)
+#define MPP12_M_BB        MPP(12, 0x3, 0, 0, 1)
+#define MPP12_UA0_CTSn        MPP(12, 0x4, 0, 0, 1)
+#define MPP12_NAND_FLASH_REn0    MPP(12, 0x5, 0, 0, 1)
+#define MPP12_TDM0_SCSn        MPP(12, 0X6, 0, 0, 1)
 #define MPP12_UNUSED        MPP(12, 0x1, 0, 0, 1)
 
 #define MPP13_GPIO        MPP(13, 0x0, 1, 1, 1)
-#define MPP13_SYSRST_OUTn    MPP(13, 0x3, 0, 1, 1)
-#define MPP13_UA0_RTSn        MPP(13, 0x4, 0, 1, 1)
-#define MPP13_NAN_FLASH_WEn0    MPP(13, 0x5, 0, 1, 1)
-#define MPP13_TDM_SCLK        MPP(13, 0x6, 0, 1, 1)
+#define MPP13_SYSRST_OUTn    MPP(13, 0x3, 0, 0, 1)
+#define MPP13_UA0_RTSn        MPP(13, 0x4, 0, 0, 1)
+#define MPP13_NAN_FLASH_WEn0    MPP(13, 0x5, 0, 0, 1)
+#define MPP13_TDM_SCLK        MPP(13, 0x6, 0, 0, 1)
 #define MPP13_UNUSED        MPP(13, 0x1, 0, 0, 1)
 
 #define MPP14_GPIO        MPP(14, 0x0, 1, 1, 1)
-#define MPP14_SATA1_ACTn    MPP(14, 0x3, 0, 1, 1)
-#define MPP14_UA1_CTSn        MPP(14, 0x4, 1, 0, 1)
-#define MPP14_NAND_FLASH_REn1    MPP(14, 0x5, 0, 1, 1)
-#define MPP14_TDM_SMOSI        MPP(14, 0x6, 0, 1, 1)
+#define MPP14_SATA1_ACTn    MPP(14, 0x3, 0, 0, 1)
+#define MPP14_UA1_CTSn        MPP(14, 0x4, 0, 0, 1)
+#define MPP14_NAND_FLASH_REn1    MPP(14, 0x5, 0, 0, 1)
+#define MPP14_TDM_SMOSI        MPP(14, 0x6, 0, 0, 1)
 #define MPP14_UNUSED        MPP(14, 0x1, 0, 0, 1)
 
 #define MPP15_GPIO        MPP(15, 0x0, 1, 1, 1)
-#define MPP15_SATA0_ACTn    MPP(15, 0x3, 0, 1, 1)
-#define MPP15_UA1_RTSn        MPP(15, 0x4, 0, 1, 1)
-#define MPP15_NAND_FLASH_WEn1    MPP(15, 0x5, 0, 1, 1)
-#define MPP15_TDM_SMISO        MPP(15, 0x6, 1, 0, 1)
+#define MPP15_SATA0_ACTn    MPP(15, 0x3, 0, 0, 1)
+#define MPP15_UA1_RTSn        MPP(15, 0x4, 0, 0, 1)
+#define MPP15_NAND_FLASH_WEn1    MPP(15, 0x5, 0, 0, 1)
+#define MPP15_TDM_SMISO        MPP(15, 0x6, 0, 0, 1)
 #define MPP15_UNUSED        MPP(15, 0x1, 0, 0, 1)
 
 #define MPP16_GPIO        MPP(16, 0x0, 1, 1, 1)
-#define MPP16_SATA1_PRESENTn    MPP(16, 0x3, 0, 1, 1)
-#define MPP16_UA2_TXD        MPP(16, 0x4, 0, 1, 1)
-#define MPP16_NAND_FLASH_REn3    MPP(16, 0x5, 0, 1, 1)
-#define MPP16_TDM_INTn        MPP(16, 0x6, 1, 0, 1)
+#define MPP16_SATA1_PRESENTn    MPP(16, 0x3, 0, 0, 1)
+#define MPP16_UA2_TXD        MPP(16, 0x4, 0, 0, 1)
+#define MPP16_NAND_FLASH_REn3    MPP(16, 0x5, 0, 0, 1)
+#define MPP16_TDM_INTn        MPP(16, 0x6, 0, 0, 1)
 #define MPP16_UNUSED        MPP(16, 0x1, 0, 0, 1)
 
 
 #define MPP17_GPIO        MPP(17, 0x0, 1, 1, 1)
-#define MPP17_SATA0_PRESENTn    MPP(17, 0x3, 0, 1, 1)
-#define MPP17_UA2_RXD        MPP(17, 0x4, 1, 0, 1)
-#define MPP17_NAND_FLASH_WEn3    MPP(17, 0x5, 0, 1, 1)
-#define MPP17_TDM_RSTn        MPP(17, 0x6, 0, 1, 1)
+#define MPP17_SATA0_PRESENTn    MPP(17, 0x3, 0, 0, 1)
+#define MPP17_UA2_RXD        MPP(17, 0x4, 0, 0, 1)
+#define MPP17_NAND_FLASH_WEn3    MPP(17, 0x5, 0, 0, 1)
+#define MPP17_TDM_RSTn        MPP(17, 0x6, 0, 0, 1)
 #define MPP17_UNUSED        MPP(17, 0x1, 0, 0, 1)
 
 
 #define MPP18_GPIO        MPP(18, 0x0, 1, 1, 1)
-#define MPP18_UA0_CTSn        MPP(18, 0x4, 1, 0, 1)
-#define MPP18_BOOT_FLASH_REn    MPP(18, 0x5, 0, 1, 1)
+#define MPP18_UA0_CTSn        MPP(18, 0x4, 0, 0, 1)
+#define MPP18_BOOT_FLASH_REn    MPP(18, 0x5, 0, 0, 1)
 #define MPP18_UNUSED        MPP(18, 0x1, 0, 0, 1)
 
 
 
 #define MPP19_GPIO        MPP(19, 0x0, 1, 1, 1)
-#define MPP19_UA0_CTSn        MPP(19, 0x4, 0, 1, 1)
-#define MPP19_BOOT_FLASH_WEn    MPP(19, 0x5, 0, 1, 1)
+#define MPP19_UA0_CTSn        MPP(19, 0x4, 0, 0, 1)
+#define MPP19_BOOT_FLASH_WEn    MPP(19, 0x5, 0, 0, 1)
 #define MPP19_UNUSED        MPP(19, 0x1, 0, 0, 1)
 
 
 #define MPP20_GPIO        MPP(20, 0x0, 1, 1, 1)
-#define MPP20_UA1_CTSs        MPP(20, 0x4, 1, 0, 1)
-#define MPP20_TDM_PCLK        MPP(20, 0x6, 1, 1, 0)
+#define MPP20_UA1_CTSs        MPP(20, 0x4, 0, 0, 1)
+#define MPP20_TDM_PCLK        MPP(20, 0x6, 0, 0, 0)
 #define MPP20_UNUSED        MPP(20, 0x1, 0, 0, 1)
 
 
 
 #define MPP21_GPIO        MPP(21, 0x0, 1, 1, 1)
-#define MPP21_UA1_CTSs        MPP(21, 0x4, 0, 1, 1)
-#define MPP21_TDM_FSYNC        MPP(21, 0x6, 1, 1, 0)
+#define MPP21_UA1_CTSs        MPP(21, 0x4, 0, 0, 1)
+#define MPP21_TDM_FSYNC        MPP(21, 0x6, 0, 0, 0)
 #define MPP21_UNUSED        MPP(21, 0x1, 0, 0, 1)
 
 
 
 #define MPP22_GPIO        MPP(22, 0x0, 1, 1, 1)
-#define MPP22_UA3_TDX        MPP(22, 0x4, 0, 1, 1)
-#define MPP22_NAND_FLASH_REn2    MPP(22, 0x5, 0, 1, 1)
-#define MPP22_TDM_DRX        MPP(22, 0x6, 1, 0, 1)
+#define MPP22_UA3_TDX        MPP(22, 0x4, 0, 0, 1)
+#define MPP22_NAND_FLASH_REn2    MPP(22, 0x5, 0, 0, 1)
+#define MPP22_TDM_DRX        MPP(22, 0x6, 0, 0, 1)
 #define MPP22_UNUSED        MPP(22, 0x1, 0, 0, 1)
 
 
 
 #define MPP23_GPIO        MPP(23, 0x0, 1, 1, 1)
-#define MPP23_UA3_RDX        MPP(23, 0x4, 1, 0, 1)
-#define MPP23_NAND_FLASH_WEn2    MPP(23, 0x5, 0, 1, 1)
-#define MPP23_TDM_DTX        MPP(23, 0x6, 0, 1, 1)
+#define MPP23_UA3_RDX        MPP(23, 0x4, 0, 0, 1)
+#define MPP23_NAND_FLASH_WEn2    MPP(23, 0x5, 0, 0, 1)
+#define MPP23_TDM_DTX        MPP(23, 0x6, 0, 0, 1)
 #define MPP23_UNUSED        MPP(23, 0x1, 0, 0, 1)
 
 
 #define MPP24_GPIO        MPP(24, 0x0, 1, 1, 1)
-#define MPP24_UA2_TXD        MPP(24, 0x4, 0, 1, 1)
-#define MPP24_TDM_INTn        MPP(24, 0x6, 1, 0, 1)
+#define MPP24_UA2_TXD        MPP(24, 0x4, 0, 0, 1)
+#define MPP24_TDM_INTn        MPP(24, 0x6, 0, 0, 1)
 #define MPP24_UNUSED        MPP(24, 0x1, 0, 0, 1)
 
 
 #define MPP25_GPIO        MPP(25, 0x0, 1, 1, 1)
-#define MPP25_UA2_RXD        MPP(25, 0x4, 1, 0, 1)
-#define MPP25_TDM_RSTn        MPP(25, 0x6, 0, 1, 1)
+#define MPP25_UA2_RXD        MPP(25, 0x4, 0, 0, 1)
+#define MPP25_TDM_RSTn        MPP(25, 0x6, 0, 0, 1)
 #define MPP25_UNUSED        MPP(25, 0x1, 0, 0, 1)
 
 
 #define MPP26_GPIO        MPP(26, 0x0, 1, 1, 1)
-#define MPP26_UA2_CTSn        MPP(26, 0x4, 1, 0, 1)
-#define MPP26_TDM_PCLK        MPP(26, 0x6, 1, 1, 1)
+#define MPP26_UA2_CTSn        MPP(26, 0x4, 0, 0, 1)
+#define MPP26_TDM_PCLK        MPP(26, 0x6, 0, 0, 1)
 #define MPP26_UNUSED        MPP(26, 0x1, 0, 0, 1)
 
 
 #define MPP27_GPIO        MPP(27, 0x0, 1, 1, 1)
-#define MPP27_UA2_RTSn        MPP(27, 0x4, 0, 1, 1)
-#define MPP27_TDM_FSYNC        MPP(27, 0x6, 1, 1, 1)
+#define MPP27_UA2_RTSn        MPP(27, 0x4, 0, 0, 1)
+#define MPP27_TDM_FSYNC        MPP(27, 0x6, 0, 0, 1)
 #define MPP27_UNUSED        MPP(27, 0x1, 0, 0, 1)
 
 
 #define MPP28_GPIO        MPP(28, 0x0, 1, 1, 1)
-#define MPP28_UA3_TXD        MPP(28, 0x4, 0, 1, 1)
-#define MPP28_TDM_DRX        MPP(28, 0x6, 1, 0, 1)
+#define MPP28_UA3_TXD        MPP(28, 0x4, 0, 0, 1)
+#define MPP28_TDM_DRX        MPP(28, 0x6, 0, 0, 1)
 #define MPP28_UNUSED        MPP(28, 0x1, 0, 0, 1)
 
 #define MPP29_GPIO        MPP(29, 0x0, 1, 1, 1)
-#define MPP29_UA3_RXD        MPP(29, 0x4, 1, 0, 1)
-#define MPP29_SYSRST_OUTn    MPP(29, 0x5, 0, 1, 1)
-#define MPP29_TDM_DTX        MPP(29, 0x6, 0, 1, 1)
+#define MPP29_UA3_RXD        MPP(29, 0x4, 0, 0, 1)
+#define MPP29_SYSRST_OUTn    MPP(29, 0x5, 0, 0, 1)
+#define MPP29_TDM_DTX        MPP(29, 0x6, 0, 0, 1)
 #define MPP29_UNUSED        MPP(29, 0x1, 0, 0, 1)
 
 #define MPP30_GPIO        MPP(30, 0x0, 1, 1, 1)
-#define MPP30_UA3_CTSn        MPP(30, 0x4, 1, 0, 1)
+#define MPP30_UA3_CTSn        MPP(30, 0x4, 0, 0, 1)
 #define MPP30_UNUSED        MPP(30, 0x1, 0, 0, 1)
 
 #define MPP31_GPIO        MPP(31, 0x0, 1, 1, 1)
-#define MPP31_UA3_RTSn        MPP(31, 0x4, 0, 1, 1)
-#define MPP31_TDM1_SCSn        MPP(31, 0x6, 0, 1, 1)
+#define MPP31_UA3_RTSn        MPP(31, 0x4, 0, 0, 1)
+#define MPP31_TDM1_SCSn        MPP(31, 0x6, 0, 0, 1)
 #define MPP31_UNUSED        MPP(31, 0x1, 0, 0, 1)
 
 
 #define MPP32_GPIO        MPP(32, 0x1, 1, 1, 1)
-#define MPP32_UA3_TDX        MPP(32, 0x4, 0, 1, 1)
-#define MPP32_SYSRST_OUTn    MPP(32, 0x5, 0, 1, 1)
-#define MPP32_TDM0_RXQ        MPP(32, 0x6, 0, 1, 1)
+#define MPP32_UA3_TDX        MPP(32, 0x4, 0, 0, 1)
+#define MPP32_SYSRST_OUTn    MPP(32, 0x5, 0, 0, 1)
+#define MPP32_TDM0_RXQ        MPP(32, 0x6, 0, 0, 1)
 #define MPP32_UNUSED        MPP(32, 0x3, 0, 0, 1)
 
 
 #define MPP33_GPIO        MPP(33, 0x1, 1, 1, 1)
-#define MPP33_UA3_RDX        MPP(33, 0x4, 1, 0, 1)
-#define MPP33_TDM0_TXQ        MPP(33, 0x6, 0, 1, 1)
+#define MPP33_UA3_RDX        MPP(33, 0x4, 0, 0, 1)
+#define MPP33_TDM0_TXQ        MPP(33, 0x6, 0, 0, 1)
 #define MPP33_UNUSED        MPP(33, 0x3, 0, 0, 1)
 
 
 
 #define MPP34_GPIO        MPP(34, 0x1, 1, 1, 1)
-#define MPP34_UA2_TDX        MPP(34, 0x4, 0, 1, 1)
-#define MPP34_TDM1_RXQ        MPP(34, 0x6, 0, 1, 1)
+#define MPP34_UA2_TDX        MPP(34, 0x4, 0, 0, 1)
+#define MPP34_TDM1_RXQ        MPP(34, 0x6, 0, 0, 1)
 #define MPP34_UNUSED        MPP(34, 0x3, 0, 0, 1)
 
 
 
 #define MPP35_GPIO        MPP(35, 0x1, 1, 1, 1)
-#define MPP35_UA2_RDX        MPP(35, 0x4, 1, 0, 1)
-#define MPP35_TDM1_TXQ        MPP(35, 0x6, 0, 1, 1)
+#define MPP35_UA2_RDX        MPP(35, 0x4, 0, 0, 1)
+#define MPP35_TDM1_TXQ        MPP(35, 0x6, 0, 0, 1)
 #define MPP35_UNUSED        MPP(35, 0x3, 0, 0, 1)
 
 #define MPP36_GPIO        MPP(36, 0x1, 1, 1, 1)
-#define MPP36_UA0_CTSn        MPP(36, 0x2, 1, 0, 1)
-#define MPP36_UA2_TDX        MPP(36, 0x4, 0, 1, 1)
-#define MPP36_TDM0_SCSn        MPP(36, 0x6, 0, 1, 1)
+#define MPP36_UA0_CTSn        MPP(36, 0x2, 0, 0, 1)
+#define MPP36_UA2_TDX        MPP(36, 0x4, 0, 0, 1)
+#define MPP36_TDM0_SCSn        MPP(36, 0x6, 0, 0, 1)
 #define MPP36_UNUSED        MPP(36, 0x3, 0, 0, 1)
 
 
 #define MPP37_GPIO        MPP(37, 0x1, 1, 1, 1)
-#define MPP37_UA0_RTSn        MPP(37, 0x2, 0, 1, 1)
-#define MPP37_UA2_RXD        MPP(37, 0x4, 1, 0, 1)
-#define MPP37_SYSRST_OUTn    MPP(37, 0x5, 0, 1, 1)
-#define MPP37_TDM_SCLK        MPP(37, 0x6, 0, 1, 1)
+#define MPP37_UA0_RTSn        MPP(37, 0x2, 0, 0, 1)
+#define MPP37_UA2_RXD        MPP(37, 0x4, 0, 0, 1)
+#define MPP37_SYSRST_OUTn    MPP(37, 0x5, 0, 0, 1)
+#define MPP37_TDM_SCLK        MPP(37, 0x6, 0, 0, 1)
 #define MPP37_UNUSED        MPP(37, 0x3, 0, 0, 1)
 
 
 
 
 #define MPP38_GPIO        MPP(38, 0x1, 1, 1, 1)
-#define MPP38_UA1_CTSn        MPP(38, 0x2, 1, 0, 1)
-#define MPP38_UA3_TXD        MPP(38, 0x4, 0, 1, 1)
-#define MPP38_SYSRST_OUTn    MPP(38, 0x5, 0, 1, 1)
-#define MPP38_TDM_SMOSI        MPP(38, 0x6, 0, 1, 1)
+#define MPP38_UA1_CTSn        MPP(38, 0x2, 0, 0, 1)
+#define MPP38_UA3_TXD        MPP(38, 0x4, 0, 0, 1)
+#define MPP38_SYSRST_OUTn    MPP(38, 0x5, 0, 0, 1)
+#define MPP38_TDM_SMOSI        MPP(38, 0x6, 0, 0, 1)
 #define MPP38_UNUSED        MPP(38, 0x3, 0, 0, 1)
 
 
 
 
 #define MPP39_GPIO        MPP(39, 0x1, 1, 1, 1)
-#define MPP39_UA1_RTSn        MPP(39, 0x2, 0, 1, 1)
-#define MPP39_UA3_RXD        MPP(39, 0x4, 1, 0, 1)
-#define MPP39_SYSRST_OUTn    MPP(39, 0x5, 0, 1, 1)
-#define MPP39_TDM_SMISO        MPP(39, 0x6, 1, 0, 1)
+#define MPP39_UA1_RTSn        MPP(39, 0x2, 0, 0, 1)
+#define MPP39_UA3_RXD        MPP(39, 0x4, 0, 0, 1)
+#define MPP39_SYSRST_OUTn    MPP(39, 0x5, 0, 0, 1)
+#define MPP39_TDM_SMISO        MPP(39, 0x6, 0, 0, 1)
 #define MPP39_UNUSED        MPP(39, 0x3, 0, 0, 1)
 
 
 
 #define MPP40_GPIO        MPP(40, 0x1, 1, 1, 1)
-#define MPP40_TDM_INTn        MPP(40, 0x6, 1, 0, 1)
+#define MPP40_TDM_INTn        MPP(40, 0x6, 0, 0, 1)
 #define MPP40_UNUSED        MPP(40, 0x0, 0, 0, 1)
 
 
 
 #define MPP41_GPIO        MPP(41, 0x1, 1, 1, 1)
-#define MPP41_TDM_RSTn        MPP(41, 0x6, 0, 1, 1)
+#define MPP41_TDM_RSTn        MPP(41, 0x6, 0, 0, 1)
 #define MPP41_UNUSED        MPP(41, 0x0, 0, 0, 1)
 
 
 
 #define MPP42_GPIO        MPP(42, 0x1, 1, 1, 1)
-#define MPP42_TDM_PCLK        MPP(42, 0x6, 1, 1, 1)
+#define MPP42_TDM_PCLK        MPP(42, 0x6, 0, 0, 1)
 #define MPP42_UNUSED        MPP(42, 0x0, 0, 0, 1)
 
 
 
 #define MPP43_GPIO        MPP(43, 0x1, 1, 1, 1)
-#define MPP43_TDM_FSYNC        MPP(43, 0x6, 1, 1, 1)
+#define MPP43_TDM_FSYNC        MPP(43, 0x6, 0, 0, 1)
 #define MPP43_UNUSED        MPP(43, 0x0, 0, 0, 1)
 
 
 
 #define MPP44_GPIO        MPP(44, 0x1, 1, 1, 1)
-#define MPP44_TDM_DRX        MPP(44, 0x6, 1, 0, 1)
+#define MPP44_TDM_DRX        MPP(44, 0x6, 0, 0, 1)
 #define MPP44_UNUSED        MPP(44, 0x0, 0, 0, 1)
 
 
 
 #define MPP45_GPIO        MPP(45, 0x1, 1, 1, 1)
-#define MPP45_SATA0_ACTn    MPP(45, 0x3, 0, 1, 1)
-#define MPP45_TDM_DRX        MPP(45, 0x6, 0, 1, 1)
+#define MPP45_SATA0_ACTn    MPP(45, 0x3, 0, 0, 1)
+#define MPP45_TDM_DRX        MPP(45, 0x6, 0, 0, 1)
 #define MPP45_UNUSED        MPP(45, 0x0, 0, 0, 1)
 
 
 #define MPP46_GPIO        MPP(46, 0x1, 1, 1, 1)
-#define MPP46_TDM_SCSn        MPP(46, 0x6, 0, 1, 1)
+#define MPP46_TDM_SCSn        MPP(46, 0x6, 0, 0, 1)
 #define MPP46_UNUSED        MPP(46, 0x0, 0, 0, 1)
 
 
 
 
 #define MPP48_GPIO        MPP(48, 0x1, 1, 1, 1)
-#define MPP48_SATA1_ACTn    MPP(48, 0x3, 0, 1, 1)
+#define MPP48_SATA1_ACTn    MPP(48, 0x3, 0, 0, 1)
 #define MPP48_UNUSED        MPP(48, 0x2, 0, 0, 1)
 
 
 
 #define MPP49_GPIO        MPP(49, 0x1, 1, 1, 1)
-#define MPP49_SATA0_ACTn    MPP(49, 0x3, 0, 1, 1)
-#define MPP49_M_BB        MPP(49, 0x4, 1, 0, 1)
+#define MPP49_SATA0_ACTn    MPP(49, 0x3, 0, 0, 1)
+#define MPP49_M_BB        MPP(49, 0x4, 0, 0, 1)
 #define MPP49_UNUSED        MPP(49, 0x2, 0, 0, 1)
 
 
index 5dcc59d5b9ec9feab991fc34302273702388b21e..b3a71245f385810e687129a90c0db84bca60ea09 100644 (file)
@@ -404,7 +404,7 @@ static int name##_set_rate(struct clk *clk, unsigned long rate)             \
        reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);         \
        reg &= ~BM_CLKCTRL_##dr##_DIV;                                  \
        reg |= div << BP_CLKCTRL_##dr##_DIV;                            \
-       if (reg | (1 << clk->enable_shift)) {                           \
+       if (reg & (1 << clk->enable_shift)) {                           \
                pr_err("%s: clock is gated\n", __func__);               \
                return -EINVAL;                                         \
        }                                                               \
index 35a89dd27242d762d57e3f56b4bce85022fedb53..1332f73c9ad0407bc5e4f1d3f6a833b4b3086744 100644 (file)
@@ -30,6 +30,7 @@
  */
 #define cpu_is_mx23()          (                                       \
                machine_is_mx23evk() ||                                 \
+               machine_is_stmp378x() ||                                \
                0)
 #define cpu_is_mx28()          (                                       \
                machine_is_mx28evk() ||                                 \
index 19d5891c48e325f6afe8e1a101dfdfbca3ad0a68..841ae21f520b5df6da7f7247fa0a8e6581c79407 100644 (file)
@@ -326,6 +326,7 @@ config MACH_OMAP4_PANDA
 config OMAP3_EMU
        bool "OMAP3 debugging peripherals"
        depends on ARCH_OMAP3
+       select ARM_AMBA
        select OC_ETM
        help
          Say Y here to enable debugging hardware of omap3
index 63de2d396e2dddf84eaec2b6035aad64aba49385..14a5971d0d48792ed75793e23b6deed54137560d 100644 (file)
@@ -49,8 +49,9 @@
 #define ETH_KS8851_QUART               138
 #define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO       184
 #define OMAP4_SFH7741_ENABLE_GPIO              188
-#define HDMI_GPIO_HPD 60 /* Hot plug pin for HDMI */
+#define HDMI_GPIO_CT_CP_HPD 60 /* HPD mode enable/disable */
 #define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */
+#define HDMI_GPIO_HPD  63 /* Hotplug detect */
 
 static const int sdp4430_keymap[] = {
        KEY(0, 0, KEY_E),
@@ -578,12 +579,8 @@ static void __init omap_sfh7741prox_init(void)
 
 static void sdp4430_hdmi_mux_init(void)
 {
-       /* PAD0_HDMI_HPD_PAD1_HDMI_CEC */
-       omap_mux_init_signal("hdmi_hpd",
-                       OMAP_PIN_INPUT_PULLUP);
        omap_mux_init_signal("hdmi_cec",
                        OMAP_PIN_INPUT_PULLUP);
-       /* PAD0_HDMI_DDC_SCL_PAD1_HDMI_DDC_SDA */
        omap_mux_init_signal("hdmi_ddc_scl",
                        OMAP_PIN_INPUT_PULLUP);
        omap_mux_init_signal("hdmi_ddc_sda",
@@ -591,8 +588,9 @@ static void sdp4430_hdmi_mux_init(void)
 }
 
 static struct gpio sdp4430_hdmi_gpios[] = {
-       { HDMI_GPIO_HPD,        GPIOF_OUT_INIT_HIGH,    "hdmi_gpio_hpd"   },
+       { HDMI_GPIO_CT_CP_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ct_cp_hpd" },
        { HDMI_GPIO_LS_OE,      GPIOF_OUT_INIT_HIGH,    "hdmi_gpio_ls_oe" },
+       { HDMI_GPIO_HPD, GPIOF_DIR_IN, "hdmi_gpio_hpd" },
 };
 
 static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev)
@@ -609,26 +607,21 @@ static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev)
 
 static void sdp4430_panel_disable_hdmi(struct omap_dss_device *dssdev)
 {
-       gpio_free(HDMI_GPIO_LS_OE);
-       gpio_free(HDMI_GPIO_HPD);
+       gpio_free_array(sdp4430_hdmi_gpios, ARRAY_SIZE(sdp4430_hdmi_gpios));
 }
 
+static struct omap_dss_hdmi_data sdp4430_hdmi_data = {
+       .hpd_gpio = HDMI_GPIO_HPD,
+};
+
 static struct omap_dss_device sdp4430_hdmi_device = {
        .name = "hdmi",
        .driver_name = "hdmi_panel",
        .type = OMAP_DISPLAY_TYPE_HDMI,
-       .clocks = {
-               .dispc  = {
-                       .dispc_fclk_src = OMAP_DSS_CLK_SRC_FCK,
-               },
-               .hdmi   = {
-                       .regn   = 15,
-                       .regm2  = 1,
-               },
-       },
        .platform_enable = sdp4430_panel_enable_hdmi,
        .platform_disable = sdp4430_panel_disable_hdmi,
        .channel = OMAP_DSS_CHANNEL_DIGIT,
+       .data = &sdp4430_hdmi_data,
 };
 
 static struct omap_dss_device *sdp4430_dss_devices[] = {
@@ -645,6 +638,10 @@ void omap_4430sdp_display_init(void)
 {
        sdp4430_hdmi_mux_init();
        omap_display_init(&sdp4430_dss_data);
+
+       omap_mux_init_gpio(HDMI_GPIO_LS_OE, OMAP_PIN_OUTPUT);
+       omap_mux_init_gpio(HDMI_GPIO_CT_CP_HPD, OMAP_PIN_OUTPUT);
+       omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN);
 }
 
 #ifdef CONFIG_OMAP_MUX
index 0cfe2005cb506a32c79d96f9864bb7e24bd35a0f..107dfc377a8ae1653959a36b14cddfac356a5298 100644 (file)
@@ -52,8 +52,9 @@
 #define GPIO_HUB_NRESET                62
 #define GPIO_WIFI_PMENA                43
 #define GPIO_WIFI_IRQ          53
-#define HDMI_GPIO_HPD 60 /* Hot plug pin for HDMI */
+#define HDMI_GPIO_CT_CP_HPD 60 /* HPD mode enable/disable */
 #define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */
+#define HDMI_GPIO_HPD  63 /* Hotplug detect */
 
 /* wl127x BT, FM, GPS connectivity chip */
 static int wl1271_gpios[] = {46, -1, -1};
@@ -614,12 +615,8 @@ int __init omap4_panda_dvi_init(void)
 
 static void omap4_panda_hdmi_mux_init(void)
 {
-       /* PAD0_HDMI_HPD_PAD1_HDMI_CEC */
-       omap_mux_init_signal("hdmi_hpd",
-                       OMAP_PIN_INPUT_PULLUP);
        omap_mux_init_signal("hdmi_cec",
                        OMAP_PIN_INPUT_PULLUP);
-       /* PAD0_HDMI_DDC_SCL_PAD1_HDMI_DDC_SDA */
        omap_mux_init_signal("hdmi_ddc_scl",
                        OMAP_PIN_INPUT_PULLUP);
        omap_mux_init_signal("hdmi_ddc_sda",
@@ -627,8 +624,9 @@ static void omap4_panda_hdmi_mux_init(void)
 }
 
 static struct gpio panda_hdmi_gpios[] = {
-       { HDMI_GPIO_HPD,        GPIOF_OUT_INIT_HIGH, "hdmi_gpio_hpd"   },
+       { HDMI_GPIO_CT_CP_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ct_cp_hpd" },
        { HDMI_GPIO_LS_OE,      GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" },
+       { HDMI_GPIO_HPD, GPIOF_DIR_IN, "hdmi_gpio_hpd" },
 };
 
 static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev)
@@ -645,10 +643,13 @@ static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev)
 
 static void omap4_panda_panel_disable_hdmi(struct omap_dss_device *dssdev)
 {
-       gpio_free(HDMI_GPIO_LS_OE);
-       gpio_free(HDMI_GPIO_HPD);
+       gpio_free_array(panda_hdmi_gpios, ARRAY_SIZE(panda_hdmi_gpios));
 }
 
+static struct omap_dss_hdmi_data omap4_panda_hdmi_data = {
+       .hpd_gpio = HDMI_GPIO_HPD,
+};
+
 static struct omap_dss_device  omap4_panda_hdmi_device = {
        .name = "hdmi",
        .driver_name = "hdmi_panel",
@@ -656,6 +657,7 @@ static struct omap_dss_device  omap4_panda_hdmi_device = {
        .platform_enable = omap4_panda_panel_enable_hdmi,
        .platform_disable = omap4_panda_panel_disable_hdmi,
        .channel = OMAP_DSS_CHANNEL_DIGIT,
+       .data = &omap4_panda_hdmi_data,
 };
 
 static struct omap_dss_device *omap4_panda_dss_devices[] = {
@@ -679,6 +681,10 @@ void omap4_panda_display_init(void)
 
        omap4_panda_hdmi_mux_init();
        omap_display_init(&omap4_panda_dss_data);
+
+       omap_mux_init_gpio(HDMI_GPIO_LS_OE, OMAP_PIN_OUTPUT);
+       omap_mux_init_gpio(HDMI_GPIO_CT_CP_HPD, OMAP_PIN_OUTPUT);
+       omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN);
 }
 
 static void __init omap4_panda_init(void)
index 88bd6f7705f0317808575cd8ecda856e4e36ec87..c56597172bfcc185ca780fb4632f3d4e3a828ffc 100644 (file)
@@ -133,7 +133,7 @@ static struct platform_device rx51_charger_device = {
 static void __init rx51_charger_init(void)
 {
        WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
-               GPIOF_OUT_INIT_LOW, "isp1704_reset"));
+               GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
 
        platform_device_register(&rx51_charger_device);
 }
index 130034bf01d5f880541976cf3210de7e74293ce1..dfffbbf4c009624c87375b6322a2a7285cd8a2e7 100644 (file)
@@ -528,7 +528,13 @@ int gpmc_cs_configure(int cs, int cmd, int wval)
 
        case GPMC_CONFIG_DEV_SIZE:
                regval  = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
+
+               /* clear 2 target bits */
+               regval &= ~GPMC_CONFIG1_DEVICESIZE(3);
+
+               /* set the proper value */
                regval |= GPMC_CONFIG1_DEVICESIZE(wval);
+
                gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
                break;
 
index fb7dc52394a8fd8a872fb0e1f22d0cc867a3df9b..f5a6bc1250ce7c85ff8c97782fb47ded10f93392 100644 (file)
@@ -137,7 +137,7 @@ static irqreturn_t sr_interrupt(int irq, void *data)
                sr_write_reg(sr_info, ERRCONFIG_V1, status);
        } else if (sr_info->ip_type == SR_TYPE_V2) {
                /* Read the status bits */
-               sr_read_reg(sr_info, IRQSTATUS);
+               status = sr_read_reg(sr_info, IRQSTATUS);
 
                /* Clear them by writing back */
                sr_write_reg(sr_info, IRQSTATUS, status);
index 0ab531d047fc7a32f56f49d2ce75cd1e479272c5..8a98da0b3f8eb4f734cb72f0605c3d7c5a7aab1a 100644 (file)
@@ -29,6 +29,7 @@
 #include <mach/hardware.h>
 #include <mach/orion5x.h>
 #include <plat/orion_nand.h>
+#include <plat/ehci-orion.h>
 #include <plat/time.h>
 #include <plat/common.h>
 #include "common.h"
@@ -72,7 +73,8 @@ void __init orion5x_map_io(void)
 void __init orion5x_ehci0_init(void)
 {
        orion_ehci_init(&orion5x_mbus_dram_info,
-                       ORION5X_USB0_PHYS_BASE, IRQ_ORION5X_USB0_CTRL);
+                       ORION5X_USB0_PHYS_BASE, IRQ_ORION5X_USB0_CTRL,
+                       EHCI_PHY_ORION);
 }
 
 
index eac68978a2c2a3103ce05f34cbbdf0628f473717..db70e79a11985f7705d2ce8ad90a7c7a9be97e49 100644 (file)
@@ -65,8 +65,8 @@
 #define MPP8_GIGE               MPP(8,  0x1, 0, 0, 1,   1,   1)
 
 #define MPP9_UNUSED            MPP(9,  0x0, 0, 0, 1,   1,   1)
-#define MPP9_GPIO              MPP(9,  0x0, 0, 0, 1,   1,   1)
-#define MPP9_GIGE               MPP(9,  0x1, 1, 1, 1,   1,   1)
+#define MPP9_GPIO              MPP(9,  0x0, 1, 1, 1,   1,   1)
+#define MPP9_GIGE               MPP(9,  0x1, 0, 0, 1,   1,   1)
 
 #define MPP10_UNUSED           MPP(10, 0x0, 0, 0, 1,   1,   1)
 #define MPP10_GPIO             MPP(10, 0x0, 1, 1, 1,   1,   1)
index 810a982a66f8246291ffd8f776c5debb08714bc7..6ca327d956e92da62a81833b6bc5903f0de84611 100644 (file)
@@ -307,7 +307,7 @@ static inline void balloon3_mmc_init(void) {}
 /******************************************************************************
  * USB Gadget
  ******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
 static void balloon3_udc_command(int cmd)
 {
        if (cmd == PXA2XX_UDC_CMD_CONNECT)
index ff9ff5f4fc47e599f409c10935151e65621023e1..fdf611cdacc2cedfbfb3d4aebe35db9a57d10991 100644 (file)
@@ -147,7 +147,7 @@ static void __init colibri_pxa320_init_eth(void)
 static inline void __init colibri_pxa320_init_eth(void) {}
 #endif /* CONFIG_AX88796 */
 
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
 static struct gpio_vbus_mach_info colibri_pxa320_gpio_vbus_info = {
        .gpio_vbus              = mfp_to_gpio(MFP_PIN_GPIO96),
        .gpio_pullup            = -1,
index d65e4bde9b917401d8a109642a2faf2ddd737ead..b9e8233ac48d9b57749d247d6885b2ce20d0ebfa 100644 (file)
@@ -106,7 +106,7 @@ static void __init gumstix_mmc_init(void)
 }
 #endif
 
-#ifdef CONFIG_USB_GADGET_PXA25X
+#ifdef CONFIG_USB_PXA25X
 static struct gpio_vbus_mach_info gumstix_udc_info = {
        .gpio_vbus              = GPIO_GUMSTIX_USB_GPIOn,
        .gpio_pullup            = GPIO_GUMSTIX_USB_GPIOx,
index 0a5e5eadebf5a91f5bae443137536dc984b9784a..8d560437e6e587e8464324bafecc5dfa55f86a32 100644 (file)
@@ -37,8 +37,8 @@ extern void __init palm27x_lcd_init(int power,
 static inline void palm27x_lcd_init(int power, struct pxafb_mode_info *mode) {}
 #endif
 
-#if    defined(CONFIG_USB_GADGET_PXA27X) || \
-       defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if    defined(CONFIG_USB_PXA27X) || \
+       defined(CONFIG_USB_PXA27X_MODULE)
 extern void __init palm27x_udc_init(int vbus, int pullup,
                                        int vbus_inverted);
 #else
index 325c245c0a0dd3916129b2875caaec931758137e..fbc10d7b95d1e8ef7ceec5dd03d512c7c2d314bf 100644 (file)
@@ -164,8 +164,8 @@ void __init palm27x_lcd_init(int power, struct pxafb_mode_info *mode)
 /******************************************************************************
  * USB Gadget
  ******************************************************************************/
-#if    defined(CONFIG_USB_GADGET_PXA27X) || \
-       defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if    defined(CONFIG_USB_PXA27X) || \
+       defined(CONFIG_USB_PXA27X_MODULE)
 static struct gpio_vbus_mach_info palm27x_udc_info = {
        .gpio_vbus_inverted     = 1,
 };
index fb06bd04727257345f60ed1047b354c65c89e69c..5193ce27b9297df7ff6fa97cee085670103e2aee 100644 (file)
@@ -339,7 +339,7 @@ static inline void palmtc_mkp_init(void) {}
 /******************************************************************************
  * UDC
  ******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA25X)||defined(CONFIG_USB_GADGET_PXA25X_MODULE)
+#if defined(CONFIG_USB_PXA25X)||defined(CONFIG_USB_PXA25X_MODULE)
 static struct gpio_vbus_mach_info palmtc_udc_info = {
        .gpio_vbus              = GPIO_NR_PALMTC_USB_DETECT_N,
        .gpio_vbus_inverted     = 1,
index 67bd41488bf81f05f0d853e708c243a747e8cd03..10b80d473930220244dcb07ab4e4ad14fbf0dedf 100644 (file)
@@ -343,7 +343,7 @@ static inline void vpac270_uhc_init(void) {}
 /******************************************************************************
  * USB Gadget
  ******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
 static struct gpio_vbus_mach_info vpac270_gpio_vbus_info = {
        .gpio_vbus              = GPIO41_VPAC270_UDC_DETECT,
        .gpio_pullup            = -1,
index 9a9706cf149656b3ef5c977cdde8eee599020164..6ebdb0d0382822b8fb40e0fd76f2b2f2fc800aed 100644 (file)
@@ -7,6 +7,7 @@ config UX500_SOC_COMMON
        select HAS_MTU
        select ARM_ERRATA_753970
        select ARM_ERRATA_754322
+       select ARM_ERRATA_764369
 
 menu "Ux500 SoC"
 
index 1da23bb87c16bcec2047d9ac8e08e474d2f505ce..8aa104a4711a0617c3f9d895fca106846438d789 100644 (file)
@@ -99,7 +99,27 @@ static void ux500_l2x0_inv_all(void)
        ux500_cache_sync();
 }
 
-static int ux500_l2x0_init(void)
+static int __init ux500_l2x0_unlock(void)
+{
+       int i;
+
+       /*
+        * Unlock Data and Instruction Lock if locked. Ux500 U-Boot versions
+        * apparently locks both caches before jumping to the kernel. The
+        * l2x0 core will not touch the unlock registers if the l2x0 is
+        * already enabled, so we do it right here instead. The PL310 has
+        * 8 sets of registers, one per possible CPU.
+        */
+       for (i = 0; i < 8; i++) {
+               writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
+                              i * L2X0_LOCKDOWN_STRIDE);
+               writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
+                              i * L2X0_LOCKDOWN_STRIDE);
+       }
+       return 0;
+}
+
+static int __init ux500_l2x0_init(void)
 {
        if (cpu_is_u5500())
                l2x0_base = __io_address(U5500_L2CC_BASE);
@@ -108,6 +128,9 @@ static int ux500_l2x0_init(void)
        else
                ux500_unknown_soc();
 
+       /* Unlock before init */
+       ux500_l2x0_unlock();
+
        /* 64KB way size, 8 way associativity, force WA */
        l2x0_init(l2x0_base, 0x3e060000, 0xc0000fff);
 
index 6edea01c291e1606a4f65b5441061594cc6f36a1..e5cc05aab5e3dfae434bc7a43edafe805d417eac 100644 (file)
@@ -344,9 +344,7 @@ __v7_setup:
        mcreq   p15, 0, r10, c15, c0, 1         @ write diagnostic register
 #endif
 #ifdef CONFIG_ARM_ERRATA_743622
-       teq     r6, #0x20                       @ present in r2p0
-       teqne   r6, #0x21                       @ present in r2p1
-       teqne   r6, #0x22                       @ present in r2p2
+       teq     r5, #0x00200000                 @ only present in r2p*
        mrceq   p15, 0, r10, c15, c0, 1         @ read diagnostic register
        orreq   r10, r10, #1 << 6               @ set bit #6
        mcreq   p15, 0, r10, c15, c0, 1         @ write diagnostic register
index c074e66ad224e83d18d1f278afc78aa00e18b494..4e0a371630b38fb3a950b9063f30a53d3ed0b5a7 100644 (file)
@@ -116,7 +116,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
        return oprofile_perf_init(ops);
 }
 
-void __exit oprofile_arch_exit(void)
+void oprofile_arch_exit(void)
 {
        oprofile_perf_exit();
 }
index ebbce33097a76f81cae8696758ac713091279f88..45099566fecc64e68588126b0c794e3346f15193 100644 (file)
@@ -89,11 +89,11 @@ typedef u64 iomux_v3_cfg_t;
 #define PAD_CTL_HYS                    (1 << 8)
 
 #define PAD_CTL_PKE                    (1 << 7)
-#define PAD_CTL_PUE                    (1 << 6)
-#define PAD_CTL_PUS_100K_DOWN          (0 << 4)
-#define PAD_CTL_PUS_47K_UP             (1 << 4)
-#define PAD_CTL_PUS_100K_UP            (2 << 4)
-#define PAD_CTL_PUS_22K_UP             (3 << 4)
+#define PAD_CTL_PUE                    (1 << 6 | PAD_CTL_PKE)
+#define PAD_CTL_PUS_100K_DOWN          (0 << 4 | PAD_CTL_PUE)
+#define PAD_CTL_PUS_47K_UP             (1 << 4 | PAD_CTL_PUE)
+#define PAD_CTL_PUS_100K_UP            (2 << 4 | PAD_CTL_PUE)
+#define PAD_CTL_PUS_22K_UP             (3 << 4 | PAD_CTL_PUE)
 
 #define PAD_CTL_ODE                    (1 << 3)
 
index 7a61ef8f471a49fd3ac0b67540f949cf13eaaa43..f4b68beddbb3df49a6608589357a9962799426d4 100644 (file)
@@ -32,6 +32,9 @@
 #define MX3_PWMSAR                0x0C    /* PWM Sample Register */
 #define MX3_PWMPR                 0x10    /* PWM Period Register */
 #define MX3_PWMCR_PRESCALER(x)    (((x - 1) & 0xFFF) << 4)
+#define MX3_PWMCR_DOZEEN                (1 << 24)
+#define MX3_PWMCR_WAITEN                (1 << 23)
+#define MX3_PWMCR_DBGEN                        (1 << 22)
 #define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
 #define MX3_PWMCR_CLKSRC_IPG      (1 << 16)
 #define MX3_PWMCR_EN              (1 << 0)
@@ -74,10 +77,21 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
                do_div(c, period_ns);
                duty_cycles = c;
 
+               /*
+                * according to imx pwm RM, the real period value should be
+                * PERIOD value in PWMPR plus 2.
+                */
+               if (period_cycles > 2)
+                       period_cycles -= 2;
+               else
+                       period_cycles = 0;
+
                writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
                writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
 
-               cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN;
+               cr = MX3_PWMCR_PRESCALER(prescale) |
+                       MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
+                       MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
 
                if (cpu_is_mx25())
                        cr |= MX3_PWMCR_CLKSRC_IPG;
index 9e5451b3c8e3be92c441dc977c906b63715544a5..11dce87c24871bc5d64d82aee91561f38e72b1ff 100644 (file)
@@ -806,10 +806,7 @@ void __init orion_xor1_init(unsigned long mapbase_low,
 /*****************************************************************************
  * EHCI
  ****************************************************************************/
-static struct orion_ehci_data orion_ehci_data = {
-       .phy_version    = EHCI_PHY_NA,
-};
-
+static struct orion_ehci_data orion_ehci_data;
 static u64 ehci_dmamask = DMA_BIT_MASK(32);
 
 
@@ -830,9 +827,11 @@ static struct platform_device orion_ehci = {
 
 void __init orion_ehci_init(struct mbus_dram_target_info *mbus_dram_info,
                            unsigned long mapbase,
-                           unsigned long irq)
+                           unsigned long irq,
+                           enum orion_ehci_phy_ver phy_version)
 {
        orion_ehci_data.dram = mbus_dram_info;
+       orion_ehci_data.phy_version = phy_version;
        fill_resources(&orion_ehci, orion_ehci_resources, mapbase, SZ_4K - 1,
                       irq);
 
index a63c357e2ab1a89320d0fe0e55880319fb43fea6..a2c0e31ce0dc3e0b6bfedcf4cb92dd5de8eed0f0 100644 (file)
@@ -95,7 +95,8 @@ void __init orion_xor1_init(unsigned long mapbase_low,
 
 void __init orion_ehci_init(struct mbus_dram_target_info *mbus_dram_info,
                            unsigned long mapbase,
-                           unsigned long irq);
+                           unsigned long irq,
+                           enum orion_ehci_phy_ver phy_version);
 
 void __init orion_ehci_1_init(struct mbus_dram_target_info *mbus_dram_info,
                              unsigned long mapbase,
index 91553432711d0eb079110512f9246feafa7a2a4b..3b1e17bd3d17ddbffaf3be9e7b11631b20dfbd29 100644 (file)
@@ -64,8 +64,7 @@ void __init orion_mpp_conf(unsigned int *mpp_list, unsigned int variant_mask,
                        gpio_mode |= GPIO_INPUT_OK;
                if (*mpp_list & MPP_OUTPUT_MASK)
                        gpio_mode |= GPIO_OUTPUT_OK;
-               if (sel != 0)
-                       gpio_mode = 0;
+
                orion_gpio_set_valid(num, gpio_mode);
        }
 
index 539bd0e3defdc2ab9be00294a997a80fd060f6d6..0719f49defb2e0df0819369fbdcbd9a593323dbd 100644 (file)
@@ -1249,7 +1249,7 @@ static void s3c2410_dma_resume(void)
        struct s3c2410_dma_chan *cp = s3c2410_chans + dma_channels - 1;
        int channel;
 
-       for (channel = dma_channels - 1; channel >= 0; cp++, channel--)
+       for (channel = dma_channels - 1; channel >= 0; cp--, channel--)
                s3c2410_dma_resume_chan(cp);
 }
 
index e9d689b7c833d2c5218ce3d97876c3023785f4e8..c614484f0fca6f9355e17dce949d58b78799ace9 100644 (file)
@@ -8,6 +8,7 @@ config AVR32
        select HAVE_KPROBES
        select HAVE_GENERIC_HARDIRQS
        select GENERIC_IRQ_PROBE
+       select GENERIC_ATOMIC64
        select HARDIRQS_SW_RESEND
        select GENERIC_IRQ_SHOW
        help
index 8428525ddb225de4cf1ea49eb783b3f64d34e4a0..21ab376465d5410ef78d86c83199b3bbe5a7aed4 100644 (file)
@@ -107,15 +107,16 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                return -EFAULT;
 
        {
-               register unsigned long r8 __asm ("r8") = 0;
+               register unsigned long r8 __asm ("r8");
                unsigned long prev;
                __asm__ __volatile__(
                        "       mf;;                                    \n"
-                       "       mov ar.ccv=%3;;                         \n"
-                       "[1:]   cmpxchg4.acq %0=[%1],%2,ar.ccv          \n"
+                       "       mov %0=r0                               \n"
+                       "       mov ar.ccv=%4;;                         \n"
+                       "[1:]   cmpxchg4.acq %1=[%2],%3,ar.ccv          \n"
                        "       .xdata4 \"__ex_table\", 1b-., 2f-.      \n"
                        "[2:]"
-                       : "=r" (prev)
+                       : "=r" (r8), "=r" (prev)
                        : "r" (uaddr), "r" (newval),
                          "rO" ((long) (unsigned) oldval)
                        : "memory");
index 7c928da35b173732cc01d9a8d6f749ef4310d691..d8de1825b736f2a6c5f27568b9a1e788edce47c0 100644 (file)
 #define __NR_syncfs                    1329
 #define __NR_setns                     1330
 #define __NR_sendmmsg                  1331
+#define __NR_accept4                   1334
 
 #ifdef __KERNEL__
 
 
-#define NR_syscalls                    308 /* length of syscall table */
+#define NR_syscalls                    311 /* length of syscall table */
 
 /*
  * The following defines stop scripts/checksyscalls.sh from complaining about
index 3be485a300b1426af87db0d545a4ca176e8d67be..f19de9f7f5f53ab1f0161809b1e521e65dfc0964 100644 (file)
@@ -429,22 +429,24 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
 static struct acpi_table_slit __initdata *slit_table;
 cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
 
-static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
+static int __init
+get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
 {
        int pxm;
 
        pxm = pa->proximity_domain_lo;
-       if (ia64_platform_is("sn2"))
+       if (ia64_platform_is("sn2") || acpi_srat_revision >= 2)
                pxm += pa->proximity_domain_hi[0] << 8;
        return pxm;
 }
 
-static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
+static int __init
+get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
 {
        int pxm;
 
        pxm = ma->proximity_domain;
-       if (!ia64_platform_is("sn2"))
+       if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1)
                pxm &= 0xff;
 
        return pxm;
index 97dd2abdeb1a3b2cd66a409d1dc4bf8ec0345b55..df477f8c9d82e7e4a3b22cf79671f26fe3b05816 100644 (file)
@@ -1777,6 +1777,9 @@ sys_call_table:
        data8 sys_syncfs
        data8 sys_setns                         // 1330
        data8 sys_sendmmsg
+       data8 sys_ni_syscall    /* process_vm_readv */
+       data8 sys_ni_syscall    /* process_vm_writev */
+       data8 sys_accept4
 
        .org sys_call_table + 8*NR_syscalls     // guard against failures to increase NR_syscalls
 #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
index c247de02bc7e2fe0c49cc09522f75b9f4d0b3a67..1918d76aa06bc690f2e6993cde61fd97c67fc921 100644 (file)
@@ -950,6 +950,9 @@ int __init mac_platform_init(void)
 {
        u8 *swim_base;
 
+       if (!MACH_IS_MAC)
+               return -ENODEV;
+
        /*
         * Serial devices
         */
index c5edc60c059f08d0d7b610bdf9793a56db7af0f8..1ee7c82672c1c8791f995e5a61c6f0f30bbcf4c7 100644 (file)
 #define ARCH_HAS_PREFETCH
 static inline void prefetch(const void *addr)
 {
-       __asm__("ldw 0(%0), %%r0" : : "r" (addr));
+       __asm__(
+#ifndef CONFIG_PA20
+               /* Need to avoid prefetch of NULL on PA7300LC */
+               "       extrw,u,= %0,31,32,%%r0\n"
+#endif
+               "       ldw 0(%0), %%r0" : : "r" (addr));
 }
 
 /* LDD is a PA2.0 addition. */
index 6f05944391439bcf88ac7761616bd94b24669fcd..07ef351edd57da0b4fe576a6eed02026b148e890 100644 (file)
         * entry (identifying the physical page) and %r23 up with
         * the from tlb entry (or nothing if only a to entry---for
         * clear_user_page_asm) */
-       .macro          do_alias        spc,tmp,tmp1,va,pte,prot,fault
+       .macro          do_alias        spc,tmp,tmp1,va,pte,prot,fault,patype
        cmpib,COND(<>),n 0,\spc,\fault
        ldil            L%(TMPALIAS_MAP_START),\tmp
 #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
         */
        cmpiclr,=       0x01,\tmp,%r0
        ldi             (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
+.ifc \patype,20
        depd,z          \prot,8,7,\prot
+.else
+.ifc \patype,11
+       depw,z          \prot,8,7,\prot
+.else
+       .error "undefined PA type to do_alias"
+.endif
+.endif
        /*
         * OK, it is in the temp alias region, check whether "from" or "to".
         * Check "subtle" note in pacache.S re: r23/r26.
@@ -1185,7 +1193,7 @@ dtlb_miss_20w:
        nop
 
 dtlb_check_alias_20w:
-       do_alias        spc,t0,t1,va,pte,prot,dtlb_fault
+       do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,20
 
        idtlbt          pte,prot
 
@@ -1209,7 +1217,7 @@ nadtlb_miss_20w:
        nop
 
 nadtlb_check_alias_20w:
-       do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate
+       do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,20
 
        idtlbt          pte,prot
 
@@ -1241,7 +1249,7 @@ dtlb_miss_11:
        nop
 
 dtlb_check_alias_11:
-       do_alias        spc,t0,t1,va,pte,prot,dtlb_fault
+       do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,11
 
        idtlba          pte,(va)
        idtlbp          prot,(va)
@@ -1273,7 +1281,7 @@ nadtlb_miss_11:
        nop
 
 nadtlb_check_alias_11:
-       do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate
+       do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,11
 
        idtlba          pte,(va)
        idtlbp          prot,(va)
@@ -1300,7 +1308,7 @@ dtlb_miss_20:
        nop
 
 dtlb_check_alias_20:
-       do_alias        spc,t0,t1,va,pte,prot,dtlb_fault
+       do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,20
        
        idtlbt          pte,prot
 
@@ -1326,7 +1334,7 @@ nadtlb_miss_20:
        nop
 
 nadtlb_check_alias_20:
-       do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate
+       do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,20
 
        idtlbt          pte,prot
 
@@ -1453,7 +1461,7 @@ naitlb_miss_20w:
        nop
 
 naitlb_check_alias_20w:
-       do_alias        spc,t0,t1,va,pte,prot,naitlb_fault
+       do_alias        spc,t0,t1,va,pte,prot,naitlb_fault,20
 
        iitlbt          pte,prot
 
@@ -1507,7 +1515,7 @@ naitlb_miss_11:
        nop
 
 naitlb_check_alias_11:
-       do_alias        spc,t0,t1,va,pte,prot,itlb_fault
+       do_alias        spc,t0,t1,va,pte,prot,itlb_fault,11
 
        iitlba          pte,(%sr0, va)
        iitlbp          prot,(%sr0, va)
@@ -1553,7 +1561,7 @@ naitlb_miss_20:
        nop
 
 naitlb_check_alias_20:
-       do_alias        spc,t0,t1,va,pte,prot,naitlb_fault
+       do_alias        spc,t0,t1,va,pte,prot,naitlb_fault,20
 
        iitlbt          pte,prot
 
index 93ff3d90edd1edc8a0d5d41ecc4adae27efe45f3..5d7218ad885c3841e22c987af9419e3790d50840 100644 (file)
@@ -692,7 +692,7 @@ ENTRY(flush_icache_page_asm)
 
        /* Purge any old translation */
 
-       pitlb           (%sr0,%r28)
+       pitlb           (%sr4,%r28)
 
        ldil            L%icache_stride, %r1
        ldw             R%icache_stride(%r1), %r1
@@ -706,27 +706,29 @@ ENTRY(flush_icache_page_asm)
        sub             %r25, %r1, %r25
 
 
-1:      fic,m          %r1(%r28)
-       fic,m           %r1(%r28)
-       fic,m           %r1(%r28)
-       fic,m           %r1(%r28)
-       fic,m           %r1(%r28)
-       fic,m           %r1(%r28)
-       fic,m           %r1(%r28)
-       fic,m           %r1(%r28)
-       fic,m           %r1(%r28)
-       fic,m           %r1(%r28)
-       fic,m           %r1(%r28)
-       fic,m           %r1(%r28)
-       fic,m           %r1(%r28)
-       fic,m           %r1(%r28)
-       fic,m           %r1(%r28)
+       /* fic only has the type 26 form on PA1.1, requiring an
+        * explicit space specification, so use %sr4 */
+1:      fic,m          %r1(%sr4,%r28)
+       fic,m           %r1(%sr4,%r28)
+       fic,m           %r1(%sr4,%r28)
+       fic,m           %r1(%sr4,%r28)
+       fic,m           %r1(%sr4,%r28)
+       fic,m           %r1(%sr4,%r28)
+       fic,m           %r1(%sr4,%r28)
+       fic,m           %r1(%sr4,%r28)
+       fic,m           %r1(%sr4,%r28)
+       fic,m           %r1(%sr4,%r28)
+       fic,m           %r1(%sr4,%r28)
+       fic,m           %r1(%sr4,%r28)
+       fic,m           %r1(%sr4,%r28)
+       fic,m           %r1(%sr4,%r28)
+       fic,m           %r1(%sr4,%r28)
        cmpb,COND(<<)           %r28, %r25,1b
-       fic,m           %r1(%r28)
+       fic,m           %r1(%sr4,%r28)
 
        sync
        bv              %r0(%r2)
-       pitlb           (%sr0,%r25)
+       pitlb           (%sr4,%r25)
        .exit
 
        .procend
index fa6f2b8163e03cc1bdc953bbb1e2b8282b826c7a..64a999882e4fb8d0d584da223c7b1f43842e8d2c 100644 (file)
@@ -50,8 +50,10 @@ SECTIONS
        . = KERNEL_BINARY_TEXT_START;
 
        _text = .;              /* Text and read-only data */
-       .text ALIGN(16) : {
+       .head ALIGN(16) : {
                HEAD_TEXT
+       } = 0
+       .text ALIGN(16) : {
                TEXT_TEXT
                SCHED_TEXT
                LOCK_TEXT
@@ -65,7 +67,7 @@ SECTIONS
                *(.fixup)
                *(.lock.text)           /* out-of-line lock text */
                *(.gnu.warning)
-       } = 0
+       }
        /* End of text section */
        _etext = .;
 
index 6fbce725c710f97453079205e190c02c7365cd0b..a0f358d4a00cd57ba3b22fbbf1024c7daba65fa4 100644 (file)
@@ -8,7 +8,7 @@
 
 #ifdef __powerpc64__
 
-extern char _end[];
+extern char __end_interrupts[];
 
 static inline int in_kernel_text(unsigned long addr)
 {
index 54a47ea2c3aa23385dcd5855614f962529bdda5f..0c5fa3145615c28074208ca64e79fdac12b06170 100644 (file)
@@ -16,7 +16,7 @@
 #endif /* CONFIG_SPARSEMEM */
 
 #ifdef CONFIG_MEMORY_HOTPLUG
-extern void create_section_mapping(unsigned long start, unsigned long end);
+extern int create_section_mapping(unsigned long start, unsigned long end);
 extern int remove_section_mapping(unsigned long start, unsigned long end);
 #ifdef CONFIG_NUMA
 extern int hot_add_scn_to_nid(unsigned long scn_addr);
index d7cab44643c51d90f1f79509939e3c734b735eba..87878c68d1c2048bbac490c65a81030bfae317c7 100644 (file)
@@ -13,6 +13,7 @@
 extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
 extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
                             void *fixup_end);
+extern void do_final_fixups(void);
 
 static inline void eieio(void)
 {
index fe6f7c2c9c6889600bd0ec00fab55ed6b7daf1f6..bc3c745cb906a4d88fceffcdbac506bb1631eb5d 100644 (file)
@@ -219,5 +219,7 @@ DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
 extern void secondary_cpu_time_init(void);
 extern void iSeries_time_init_early(void);
 
+extern void decrementer_check_overflow(void);
+
 #endif /* __KERNEL__ */
 #endif /* __POWERPC_TIME_H */
index 5b428e3086662bcc802f9ce5049ada0c92aefa63..ca2987d939f5b153546a22b95cc9c6cb598cb62f 100644 (file)
@@ -170,16 +170,13 @@ notrace void arch_local_irq_restore(unsigned long en)
         */
        local_paca->hard_enabled = en;
 
-#ifndef CONFIG_BOOKE
-       /* On server, re-trigger the decrementer if it went negative since
-        * some processors only trigger on edge transitions of the sign bit.
-        *
-        * BookE has a level sensitive decrementer (latches in TSR) so we
-        * don't need that
+       /*
+        * Trigger the decrementer if we have a pending event. Some processors
+        * only trigger on edge transitions of the sign bit. We might also
+        * have disabled interrupts long enough that the decrementer wrapped
+        * to positive.
         */
-       if ((int)mfspr(SPRN_DEC) < 0)
-               mtspr(SPRN_DEC, 1);
-#endif /* CONFIG_BOOKE */
+       decrementer_check_overflow();
 
        /*
         * Force the delivery of pending soft-disabled interrupts on PS3.
index b06bdae04064f59d2c4c4c98e76304dee6ca98c8..ad892f7a7574217747fc2b53d37b6663dc21b6da 100644 (file)
@@ -131,7 +131,6 @@ static void kvm_patch_ins_b(u32 *inst, int addr)
        /* On relocatable kernels interrupts handlers and our code
           can be in different regions, so we don't patch them */
 
-       extern u32 __end_interrupts;
        if ((ulong)inst < (ulong)&__end_interrupts)
                return;
 #endif
index f832773fc28e940f82ed0dcecc6104d5e6e71939..449a7e053e6751f492967293f6e43a6825922088 100644 (file)
@@ -187,8 +187,8 @@ int apply_relocate(Elf32_Shdr *sechdrs,
 
 static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
 {
-       if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16)
-           && entry->jump[1] == 0x396b0000 + (val & 0xffff))
+       if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16)
+           && entry->jump[1] == 0x398c0000 + (val & 0xffff))
                return 1;
        return 0;
 }
@@ -215,10 +215,9 @@ static uint32_t do_plt_call(void *location,
                entry++;
        }
 
-       /* Stolen from Paul Mackerras as well... */
-       entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */
-       entry->jump[1] = 0x396b0000 + (val&0xffff);     /* addi r11,r11,sym@l*/
-       entry->jump[2] = 0x7d6903a6;                    /* mtctr r11 */
+       entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */
+       entry->jump[1] = 0x398c0000 + (val&0xffff);     /* addi r12,r12,sym@l*/
+       entry->jump[2] = 0x7d8903a6;                    /* mtctr r12 */
        entry->jump[3] = 0x4e800420;                    /* bctr */
 
        DEBUGP("Initialized plt for 0x%x at %p\n", val, entry);
index 822f63008ae11642b570986c7faf8ca61478ae10..5793c4ba5a0352578c42f32ef95fd0d0c03b0b36 100644 (file)
@@ -865,6 +865,7 @@ static void power_pmu_start(struct perf_event *event, int ef_flags)
 {
        unsigned long flags;
        s64 left;
+       unsigned long val;
 
        if (!event->hw.idx || !event->hw.sample_period)
                return;
@@ -880,7 +881,12 @@ static void power_pmu_start(struct perf_event *event, int ef_flags)
 
        event->hw.state = 0;
        left = local64_read(&event->hw.period_left);
-       write_pmc(event->hw.idx, left);
+
+       val = 0;
+       if (left < 0x80000000L)
+               val = 0x80000000L - left;
+
+       write_pmc(event->hw.idx, val);
 
        perf_event_update_userpage(event);
        perf_pmu_enable(event->pmu);
index 620d792b52e471cbd6658dba347f10e95660bf36..c7e7b8c718d415c597bef47c75805858428b0022 100644 (file)
@@ -107,6 +107,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
                         PTRRELOC(&__start___lwsync_fixup),
                         PTRRELOC(&__stop___lwsync_fixup));
 
+       do_final_fixups();
+
        return KERNELBASE + offset;
 }
 
index a88bf2713d4175a5845ff33b37fe340a4b20518a..7867fd17a0de5fcfed073d14bcc908d07eb62684 100644 (file)
@@ -352,6 +352,7 @@ void __init setup_system(void)
                          &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
        do_lwsync_fixups(cur_cpu_spec->cpu_features,
                         &__start___lwsync_fixup, &__stop___lwsync_fixup);
+       do_final_fixups();
 
        /*
         * Unflatten the device-tree passed by prom_init or kexec
index 03b29a6759ab55b087528c9e1be70d095c2a8d2b..2de304af07abf9f715d59a31975d5e8a20ceaf7f 100644 (file)
@@ -889,6 +889,15 @@ static void __init clocksource_init(void)
               clock->name, clock->mult, clock->shift);
 }
 
+void decrementer_check_overflow(void)
+{
+       u64 now = get_tb_or_rtc();
+       struct decrementer_clock *decrementer = &__get_cpu_var(decrementers);
+
+       if (now >= decrementer->next_tb)
+               set_dec(1);
+}
+
 static int decrementer_set_next_event(unsigned long evt,
                                      struct clock_event_device *dev)
 {
index 0d08d0171392a4e7ec72e21226737cc458e3a98f..7a8a7487cee8dde9d06aa86fff3bd32bdd54433e 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/init.h>
 #include <asm/cputable.h>
 #include <asm/code-patching.h>
+#include <asm/page.h>
+#include <asm/sections.h>
 
 
 struct fixup_entry {
@@ -128,6 +130,27 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
        }
 }
 
+void do_final_fixups(void)
+{
+#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
+       int *src, *dest;
+       unsigned long length;
+
+       if (PHYSICAL_START == 0)
+               return;
+
+       src = (int *)(KERNELBASE + PHYSICAL_START);
+       dest = (int *)KERNELBASE;
+       length = (__end_interrupts - _stext) / sizeof(int);
+
+       while (length--) {
+               patch_instruction(dest, *src);
+               src++;
+               dest++;
+       }
+#endif
+}
+
 #ifdef CONFIG_FTR_FIXUP_SELFTEST
 
 #define check(x)       \
index fec13200868f8e6ab40fd834abe7ed4bf6dc89f8..d7efdbf640c7d5e39cd8c3c55f79aa0b8ca64f10 100644 (file)
 
 #ifdef __HAVE_ARCH_PTE_SPECIAL
 
-static inline void get_huge_page_tail(struct page *page)
-{
-       /*
-        * __split_huge_page_refcount() cannot run
-        * from under us.
-        */
-       VM_BUG_ON(atomic_read(&page->_count) < 0);
-       atomic_inc(&page->_count);
-}
-
 /*
  * The performance critical leaf functions are made noinline otherwise gcc
  * inlines everything into a single function which results in too much
@@ -57,8 +47,6 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
                        put_page(page);
                        return 0;
                }
-               if (PageTail(page))
-                       get_huge_page_tail(page);
                pages[*nr] = page;
                (*nr)++;
 
index 26b2872b3d002e431fb3f423f0d7d6878ac049db..07f9e9f0d8713ace0ce08f494c02057cc3726252 100644 (file)
@@ -534,11 +534,11 @@ static unsigned long __init htab_get_table_size(void)
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG
-void create_section_mapping(unsigned long start, unsigned long end)
+int create_section_mapping(unsigned long start, unsigned long end)
 {
-       BUG_ON(htab_bolt_mapping(start, end, __pa(start),
+       return htab_bolt_mapping(start, end, __pa(start),
                                 pgprot_val(PAGE_KERNEL), mmu_linear_psize,
-                                mmu_kernel_ssize));
+                                mmu_kernel_ssize);
 }
 
 int remove_section_mapping(unsigned long start, unsigned long end)
index 0b9a5c1901b9e52a89880f684e9507acb9cc9e09..da5eb388570210ca9aa70ba0de41444659c49d02 100644 (file)
@@ -390,7 +390,7 @@ static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long add
 {
        unsigned long mask;
        unsigned long pte_end;
-       struct page *head, *page;
+       struct page *head, *page, *tail;
        pte_t pte;
        int refs;
 
@@ -413,6 +413,7 @@ static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long add
        head = pte_page(pte);
 
        page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
+       tail = page;
        do {
                VM_BUG_ON(compound_head(page) != head);
                pages[*nr] = page;
@@ -428,10 +429,20 @@ static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long add
 
        if (unlikely(pte_val(pte) != pte_val(*ptep))) {
                /* Could be optimized better */
-               while (*nr) {
-                       put_page(page);
-                       (*nr)--;
-               }
+               *nr -= refs;
+               while (refs--)
+                       put_page(head);
+               return 0;
+       }
+
+       /*
+        * Any tail page need their mapcount reference taken before we
+        * return.
+        */
+       while (refs--) {
+               if (PageTail(tail))
+                       get_huge_page_tail(tail);
+               tail++;
        }
 
        return 1;
index 29d4dde65c45f9b6f075d2d575fac1d05e2bbdc7..278ec8ef4f62400fbbb6a36dc520fd1d2ff015d3 100644 (file)
@@ -123,7 +123,8 @@ int arch_add_memory(int nid, u64 start, u64 size)
        pgdata = NODE_DATA(nid);
 
        start = (unsigned long)__va(start);
-       create_section_mapping(start, start + size);
+       if (create_section_mapping(start, start + size))
+               return -EINVAL;
 
        /* this should work for most non-highmem platforms */
        zone = pgdata->node_zones;
index 3bafc3deca6d66478161cca9db8bc2fb9107173a..4ff587e38250d287f56748aff3f8d61675ddda43 100644 (file)
@@ -136,8 +136,8 @@ int use_cop(unsigned long acop, struct mm_struct *mm)
        if (!mm || !acop)
                return -EINVAL;
 
-       /* We need to make sure mm_users doesn't change */
-       down_read(&mm->mmap_sem);
+       /* The page_table_lock ensures mm_users won't change under us */
+       spin_lock(&mm->page_table_lock);
        spin_lock(mm->context.cop_lockp);
 
        if (mm->context.cop_pid == COP_PID_NONE) {
@@ -164,7 +164,7 @@ int use_cop(unsigned long acop, struct mm_struct *mm)
 
 out:
        spin_unlock(mm->context.cop_lockp);
-       up_read(&mm->mmap_sem);
+       spin_unlock(&mm->page_table_lock);
 
        return ret;
 }
@@ -185,8 +185,8 @@ void drop_cop(unsigned long acop, struct mm_struct *mm)
        if (WARN_ON_ONCE(!mm))
                return;
 
-       /* We need to make sure mm_users doesn't change */
-       down_read(&mm->mmap_sem);
+       /* The page_table_lock ensures mm_users won't change under us */
+       spin_lock(&mm->page_table_lock);
        spin_lock(mm->context.cop_lockp);
 
        mm->context.acop &= ~acop;
@@ -213,7 +213,7 @@ void drop_cop(unsigned long acop, struct mm_struct *mm)
        }
 
        spin_unlock(mm->context.cop_lockp);
-       up_read(&mm->mmap_sem);
+       spin_unlock(&mm->page_table_lock);
 }
 EXPORT_SYMBOL_GPL(drop_cop);
 
index 2164006fe170f8e0e8a7adddd67e78803872c9e2..2c1ae7a5fb53c79ee09eff79f6a94e07f2cf7f53 100644 (file)
@@ -1214,11 +1214,12 @@ int hot_add_node_scn_to_nid(unsigned long scn_addr)
                        break;
                }
 
-               of_node_put(memory);
                if (nid >= 0)
                        break;
        }
 
+       of_node_put(memory);
+
        return nid;
 }
 
index db092d7c4c5b3ee41062af909af2e3a355bc06b7..53a6be7ebe3cd05e5e353137dd3b8ee6423a2254 100644 (file)
@@ -414,7 +414,7 @@ static struct irqaction psurge_irqaction = {
 
 static void __init smp_psurge_setup_cpu(int cpu_nr)
 {
-       if (cpu_nr != 0)
+       if (cpu_nr != 0 || !psurge_start)
                return;
 
        /* reset the entry point so if we get another intr we won't
index 600ed2c0ed5937ce31980eb5e3d8d9db2c1196ed..1aa478b97aace2921d375459da7cfc8373b63de8 100644 (file)
@@ -88,6 +88,7 @@ struct ps3_private {
        struct ps3_bmp bmp __attribute__ ((aligned (PS3_BMP_MINALIGN)));
        u64 ppe_id;
        u64 thread_id;
+       unsigned long ipi_mask;
 };
 
 static DEFINE_PER_CPU(struct ps3_private, ps3_private);
@@ -144,7 +145,11 @@ static void ps3_chip_unmask(struct irq_data *d)
 static void ps3_chip_eoi(struct irq_data *d)
 {
        const struct ps3_private *pd = irq_data_get_irq_chip_data(d);
-       lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq);
+
+       /* non-IPIs are EOIed here. */
+
+       if (!test_bit(63 - d->irq, &pd->ipi_mask))
+               lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq);
 }
 
 /**
@@ -691,6 +696,16 @@ void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq)
                cpu, virq, pd->bmp.ipi_debug_brk_mask);
 }
 
+void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq)
+{
+       struct ps3_private *pd = &per_cpu(ps3_private, cpu);
+
+       set_bit(63 - virq, &pd->ipi_mask);
+
+       DBG("%s:%d: cpu %u, virq %u, ipi_mask %lxh\n", __func__, __LINE__,
+               cpu, virq, pd->ipi_mask);
+}
+
 static unsigned int ps3_get_irq(void)
 {
        struct ps3_private *pd = &__get_cpu_var(ps3_private);
@@ -720,6 +735,12 @@ static unsigned int ps3_get_irq(void)
                BUG();
        }
 #endif
+
+       /* IPIs are EOIed here. */
+
+       if (test_bit(63 - plug, &pd->ipi_mask))
+               lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, plug);
+
        return plug;
 }
 
index 9a196a88eda794d6d07f50abefaebc7a21902427..1a633ed0fe98744d8994d7e6367180a11ab59a96 100644 (file)
@@ -43,6 +43,7 @@ void ps3_mm_shutdown(void);
 void ps3_init_IRQ(void);
 void ps3_shutdown_IRQ(int cpu);
 void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq);
+void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq);
 
 /* smp */
 
index 4c44794faac0b344ac50663ed12122519c37b53d..f609345b6c3aa270be9e1630e838964760c9f741 100644 (file)
@@ -94,6 +94,8 @@ static void __init ps3_smp_setup_cpu(int cpu)
 
                if (result)
                        virqs[i] = NO_IRQ;
+               else
+                       ps3_register_ipi_irq(cpu, virqs[i]);
        }
 
        ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_DEBUGGER_BREAK]);
index 57ceb92b2288a36e2e35f719b959ac0294974b2a..82766e5a79e56f8c56fe8d435f92e02b0f78c71b 100644 (file)
@@ -112,6 +112,7 @@ void dlpar_free_cc_nodes(struct device_node *dn)
        dlpar_free_one_cc_node(dn);
 }
 
+#define COMPLETE       0
 #define NEXT_SIBLING    1
 #define NEXT_CHILD      2
 #define NEXT_PROPERTY   3
@@ -158,6 +159,9 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
                spin_unlock(&rtas_data_buf_lock);
 
                switch (rc) {
+               case COMPLETE:
+                       break;
+
                case NEXT_SIBLING:
                        dn = dlpar_parse_cc_node(ccwa);
                        if (!dn)
index 46b55cf563e3a482943e3c2c6c03925f985e8b8c..3608704554d36b270f2cfc25045c3dab80609079 100644 (file)
@@ -1338,7 +1338,7 @@ static const struct file_operations proc_eeh_operations = {
 static int __init eeh_init_proc(void)
 {
        if (machine_is(pseries))
-               proc_create("ppc64/eeh", 0, NULL, &proc_eeh_operations);
+               proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations);
        return 0;
 }
 __initcall(eeh_init_proc);
index f106662f4381e23384082ffe65534e6f7cafaa91..c9311cfdfcaced8b283e53aab5e6e96e152ad39b 100644 (file)
@@ -109,7 +109,7 @@ static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long
        if (opcode > MAX_HCALL_OPCODE)
                return;
 
-       h = &get_cpu_var(hcall_stats)[opcode / 4];
+       h = &__get_cpu_var(hcall_stats)[opcode / 4];
        h->tb_start = mftb();
        h->purr_start = mfspr(SPRN_PURR);
 }
@@ -126,8 +126,6 @@ static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long
        h->num_calls++;
        h->tb_total += mftb() - h->tb_start;
        h->purr_total += mfspr(SPRN_PURR) - h->purr_start;
-
-       put_cpu_var(hcall_stats);
 }
 
 static int __init hcall_inst_init(void)
index ed96b37653777e968dcc709e2199b7fff6c6fd82..81e30d96f83fa6aadec9934a2828bf992dba2f13 100644 (file)
@@ -745,6 +745,7 @@ void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
                goto out;
 
        (*depth)++;
+       preempt_disable();
        trace_hcall_entry(opcode, args);
        (*depth)--;
 
@@ -767,6 +768,7 @@ void __trace_hcall_exit(long opcode, unsigned long retval,
 
        (*depth)++;
        trace_hcall_exit(opcode, retval, retbuf);
+       preempt_enable();
        (*depth)--;
 
 out:
index c03fef7a9c2220c45ca1584ad8f65191bb5c9e76..c395f713ce31305e1876d19ad9281453897f09bc 100644 (file)
@@ -89,7 +89,6 @@ config S390
        select HAVE_GET_USER_PAGES_FAST
        select HAVE_ARCH_MUTEX_CPU_RELAX
        select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
-       select HAVE_RCU_TABLE_FREE if SMP
        select ARCH_INLINE_SPIN_TRYLOCK
        select ARCH_INLINE_SPIN_TRYLOCK_BH
        select ARCH_INLINE_SPIN_LOCK
@@ -228,6 +227,9 @@ config COMPAT
 config SYSVIPC_COMPAT
        def_bool y if COMPAT && SYSVIPC
 
+config KEYS_COMPAT
+       def_bool y if COMPAT && KEYS
+
 config AUDIT_ARCH
        def_bool y
 
index da359ca6fe55efdc9e9aff257622550bf55f008c..f7b74bcce10cf1b9e4c0e9f776752091cafb7e19 100644 (file)
@@ -172,13 +172,6 @@ static inline int is_compat_task(void)
        return is_32bit_task();
 }
 
-#else
-
-static inline int is_compat_task(void)
-{
-       return 0;
-}
-
 #endif
 
 static inline void __user *arch_compat_alloc_user_space(long len)
index 38e71ebcd3c276c2483d5e378f4cc63f189c20a2..e4b6609fe92a29efd2a35a83b9042c353905ed02 100644 (file)
@@ -22,10 +22,7 @@ void crst_table_free(struct mm_struct *, unsigned long *);
 
 unsigned long *page_table_alloc(struct mm_struct *);
 void page_table_free(struct mm_struct *, unsigned long *);
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 void page_table_free_rcu(struct mmu_gather *, unsigned long *);
-void __tlb_remove_table(void *_table);
-#endif
 
 static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
 {
index c687a2c834626adb1f01cc24ae4f62713d83adaa..775a5eea8f9eb9896e9d38e809dc74d51823d99f 100644 (file)
 
 struct mmu_gather {
        struct mm_struct *mm;
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
        struct mmu_table_batch *batch;
-#endif
        unsigned int fullmm;
-       unsigned int need_flush;
 };
 
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 struct mmu_table_batch {
        struct rcu_head         rcu;
        unsigned int            nr;
@@ -49,7 +45,6 @@ struct mmu_table_batch {
 
 extern void tlb_table_flush(struct mmu_gather *tlb);
 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-#endif
 
 static inline void tlb_gather_mmu(struct mmu_gather *tlb,
                                  struct mm_struct *mm,
@@ -57,29 +52,20 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
 {
        tlb->mm = mm;
        tlb->fullmm = full_mm_flush;
-       tlb->need_flush = 0;
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
        tlb->batch = NULL;
-#endif
        if (tlb->fullmm)
                __tlb_flush_mm(mm);
 }
 
 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 {
-       if (!tlb->need_flush)
-               return;
-       tlb->need_flush = 0;
-       __tlb_flush_mm(tlb->mm);
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
        tlb_table_flush(tlb);
-#endif
 }
 
 static inline void tlb_finish_mmu(struct mmu_gather *tlb,
                                  unsigned long start, unsigned long end)
 {
-       tlb_flush_mmu(tlb);
+       tlb_table_flush(tlb);
 }
 
 /*
@@ -105,10 +91,8 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
                                unsigned long address)
 {
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
        if (!tlb->fullmm)
                return page_table_free_rcu(tlb, (unsigned long *) pte);
-#endif
        page_table_free(tlb->mm, (unsigned long *) pte);
 }
 
@@ -125,10 +109,8 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
 #ifdef __s390x__
        if (tlb->mm->context.asce_limit <= (1UL << 31))
                return;
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
        if (!tlb->fullmm)
                return tlb_remove_table(tlb, pmd);
-#endif
        crst_table_free(tlb->mm, (unsigned long *) pmd);
 #endif
 }
@@ -146,10 +128,8 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
 #ifdef __s390x__
        if (tlb->mm->context.asce_limit <= (1UL << 42))
                return;
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
        if (!tlb->fullmm)
                return tlb_remove_table(tlb, pud);
-#endif
        crst_table_free(tlb->mm, (unsigned long *) pud);
 #endif
 }
index 541a7509faebd47f7b9d5a72a1f779c3efa2e1f4..abdc2b1063ed080f98b8fa50e7ba40ed00cde85a 100644 (file)
@@ -28,7 +28,6 @@
 #include <asm/irq.h>
 #include <asm/timer.h>
 #include <asm/nmi.h>
-#include <asm/compat.h>
 #include <asm/smp.h>
 #include "entry.h"
 
index ef86ad2439868ffd4889b56f378f19e5c1f11968..5c55466e78e6ac7c1b91c5bfea8b581c53d9dc58 100644 (file)
@@ -20,8 +20,8 @@
 #include <linux/regset.h>
 #include <linux/tracehook.h>
 #include <linux/seccomp.h>
+#include <linux/compat.h>
 #include <trace/syscall.h>
-#include <asm/compat.h>
 #include <asm/segment.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -47,29 +47,31 @@ enum s390_regset {
 
 void update_per_regs(struct task_struct *task)
 {
-       static const struct per_regs per_single_step = {
-               .control = PER_EVENT_IFETCH,
-               .start = 0,
-               .end = PSW_ADDR_INSN,
-       };
        struct pt_regs *regs = task_pt_regs(task);
        struct thread_struct *thread = &task->thread;
-       const struct per_regs *new;
-       struct per_regs old;
-
-       /* TIF_SINGLE_STEP overrides the user specified PER registers. */
-       new = test_tsk_thread_flag(task, TIF_SINGLE_STEP) ?
-               &per_single_step : &thread->per_user;
+       struct per_regs old, new;
+
+       /* Copy user specified PER registers */
+       new.control = thread->per_user.control;
+       new.start = thread->per_user.start;
+       new.end = thread->per_user.end;
+
+       /* merge TIF_SINGLE_STEP into user specified PER registers. */
+       if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) {
+               new.control |= PER_EVENT_IFETCH;
+               new.start = 0;
+               new.end = PSW_ADDR_INSN;
+       }
 
        /* Take care of the PER enablement bit in the PSW. */
-       if (!(new->control & PER_EVENT_MASK)) {
+       if (!(new.control & PER_EVENT_MASK)) {
                regs->psw.mask &= ~PSW_MASK_PER;
                return;
        }
        regs->psw.mask |= PSW_MASK_PER;
        __ctl_store(old, 9, 11);
-       if (memcmp(new, &old, sizeof(struct per_regs)) != 0)
-               __ctl_load(*new, 9, 11);
+       if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
+               __ctl_load(new, 9, 11);
 }
 
 void user_enable_single_step(struct task_struct *task)
@@ -895,6 +897,14 @@ static int s390_last_break_get(struct task_struct *target,
        return 0;
 }
 
+static int s390_last_break_set(struct task_struct *target,
+                              const struct user_regset *regset,
+                              unsigned int pos, unsigned int count,
+                              const void *kbuf, const void __user *ubuf)
+{
+       return 0;
+}
+
 #endif
 
 static const struct user_regset s390_regsets[] = {
@@ -921,6 +931,7 @@ static const struct user_regset s390_regsets[] = {
                .size = sizeof(long),
                .align = sizeof(long),
                .get = s390_last_break_get,
+               .set = s390_last_break_set,
        },
 #endif
 };
@@ -1078,6 +1089,14 @@ static int s390_compat_last_break_get(struct task_struct *target,
        return 0;
 }
 
+static int s390_compat_last_break_set(struct task_struct *target,
+                                     const struct user_regset *regset,
+                                     unsigned int pos, unsigned int count,
+                                     const void *kbuf, const void __user *ubuf)
+{
+       return 0;
+}
+
 static const struct user_regset s390_compat_regsets[] = {
        [REGSET_GENERAL] = {
                .core_note_type = NT_PRSTATUS,
@@ -1101,6 +1120,7 @@ static const struct user_regset s390_compat_regsets[] = {
                .size = sizeof(long),
                .align = sizeof(long),
                .get = s390_compat_last_break_get,
+               .set = s390_compat_last_break_set,
        },
        [REGSET_GENERAL_EXTENDED] = {
                .core_note_type = NT_S390_HIGH_GPRS,
index 0c35dee10b00e93b005114c7b0d031531b7fc253..0260051c08f22ce246396832d9d4b63676f74d00 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/reboot.h>
 #include <linux/topology.h>
 #include <linux/ftrace.h>
+#include <linux/compat.h>
 
 #include <asm/ipl.h>
 #include <asm/uaccess.h>
index 67345ae7ce8d967a6e952070e2969d10ad7dcd5e..2ada634fc7c8dbc049ac4cc419c885c1b9357d8f 100644 (file)
@@ -301,11 +301,17 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
                                      unsigned int id)
 {
-       struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
-       int rc = -ENOMEM;
+       struct kvm_vcpu *vcpu;
+       int rc = -EINVAL;
+
+       if (id >= KVM_MAX_VCPUS)
+               goto out;
+
+       rc = -ENOMEM;
 
+       vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
        if (!vcpu)
-               goto out_nomem;
+               goto out;
 
        vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
                                        get_zeroed_page(GFP_KERNEL);
@@ -341,7 +347,7 @@ out_free_sie_block:
        free_page((unsigned long)(vcpu->arch.sie_block));
 out_free_cpu:
        kfree(vcpu);
-out_nomem:
+out:
        return ERR_PTR(rc);
 }
 
index fe103e891e7a0eb32dc7c67fc0cea3217bd4b6e3..6903d441068eaad2e012d94cb6eeffd32549cda6 100644 (file)
@@ -36,7 +36,6 @@
 #include <asm/pgtable.h>
 #include <asm/irq.h>
 #include <asm/mmu_context.h>
-#include <asm/compat.h>
 #include "../kernel/entry.h"
 
 #ifndef CONFIG_64BIT
@@ -568,6 +567,7 @@ static void pfault_interrupt(unsigned int ext_int_code,
                        tsk->thread.pfault_wait = 0;
                        list_del(&tsk->thread.list);
                        wake_up_process(tsk);
+                       put_task_struct(tsk);
                } else {
                        /* Completion interrupt was faster than initial
                         * interrupt. Set pfault_wait to -1 so the initial
@@ -577,14 +577,22 @@ static void pfault_interrupt(unsigned int ext_int_code,
                put_task_struct(tsk);
        } else {
                /* signal bit not set -> a real page is missing. */
-               if (tsk->thread.pfault_wait == -1) {
+               if (tsk->thread.pfault_wait == 1) {
+                       /* Already on the list with a reference: put to sleep */
+                       set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+                       set_tsk_need_resched(tsk);
+               } else if (tsk->thread.pfault_wait == -1) {
                        /* Completion interrupt was faster than the initial
                         * interrupt (pfault_wait == -1). Set pfault_wait
                         * back to zero and exit. */
                        tsk->thread.pfault_wait = 0;
                } else {
                        /* Initial interrupt arrived before completion
-                        * interrupt. Let the task sleep. */
+                        * interrupt. Let the task sleep.
+                        * An extra task reference is needed since a different
+                        * cpu may set the task state to TASK_RUNNING again
+                        * before the scheduler is reached. */
+                       get_task_struct(tsk);
                        tsk->thread.pfault_wait = 1;
                        list_add(&tsk->thread.list, &pfault_list);
                        set_task_state(tsk, TASK_UNINTERRUPTIBLE);
@@ -609,6 +617,7 @@ static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
                        list_del(&thread->list);
                        tsk = container_of(thread, struct task_struct, thread);
                        wake_up_process(tsk);
+                       put_task_struct(tsk);
                }
                spin_unlock_irq(&pfault_lock);
                break;
index 45b405ca25673a416ca33bfdd5b20a81c4876264..65cb06e2af4eaa0a7b18b2e3b0f5b4b1af8be9d8 100644 (file)
@@ -52,7 +52,7 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
                unsigned long end, int write, struct page **pages, int *nr)
 {
        unsigned long mask, result;
-       struct page *head, *page;
+       struct page *head, *page, *tail;
        int refs;
 
        result = write ? 0 : _SEGMENT_ENTRY_RO;
@@ -64,6 +64,7 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
        refs = 0;
        head = pmd_page(pmd);
        page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+       tail = page;
        do {
                VM_BUG_ON(compound_head(page) != head);
                pages[*nr] = page;
@@ -81,6 +82,17 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
                *nr -= refs;
                while (refs--)
                        put_page(head);
+               return 0;
+       }
+
+       /*
+        * Any tail page need their mapcount reference taken before we
+        * return.
+        */
+       while (refs--) {
+               if (PageTail(tail))
+                       get_huge_page_tail(tail);
+               tail++;
        }
 
        return 1;
index c9a9f7f1818818cddc4eac89b57fa7e8d6cb65fb..c0cf9ceb38331295a96e63fd78d77fdd79722b5d 100644 (file)
@@ -28,8 +28,8 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/random.h>
+#include <linux/compat.h>
 #include <asm/pgalloc.h>
-#include <asm/compat.h>
 
 static unsigned long stack_maxrandom_size(void)
 {
index 37a23c22370576415441e7542728b79a233f2f58..51b80b9d1f6a19ebac6da2e686b87114cf20abb4 100644 (file)
@@ -243,8 +243,6 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
        }
 }
 
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-
 static void __page_table_free_rcu(void *table, unsigned bit)
 {
        struct page *page;
@@ -291,8 +289,9 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
 
 void __tlb_remove_table(void *_table)
 {
-       void *table = (void *)((unsigned long) _table & PAGE_MASK);
-       unsigned type = (unsigned long) _table & ~PAGE_MASK;
+       const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
+       void *table = (void *)((unsigned long) _table & ~mask);
+       unsigned type = (unsigned long) _table & mask;
 
        if (type)
                __page_table_free_rcu(table, type);
@@ -300,7 +299,66 @@ void __tlb_remove_table(void *_table)
                free_pages((unsigned long) table, ALLOC_ORDER);
 }
 
-#endif
+static void tlb_remove_table_smp_sync(void *arg)
+{
+       /* Simply deliver the interrupt */
+}
+
+static void tlb_remove_table_one(void *table)
+{
+       /*
+        * This isn't an RCU grace period and hence the page-tables cannot be
+        * assumed to be actually RCU-freed.
+        *
+        * It is however sufficient for software page-table walkers that rely
+        * on IRQ disabling. See the comment near struct mmu_table_batch.
+        */
+       smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
+       __tlb_remove_table(table);
+}
+
+static void tlb_remove_table_rcu(struct rcu_head *head)
+{
+       struct mmu_table_batch *batch;
+       int i;
+
+       batch = container_of(head, struct mmu_table_batch, rcu);
+
+       for (i = 0; i < batch->nr; i++)
+               __tlb_remove_table(batch->tables[i]);
+
+       free_page((unsigned long)batch);
+}
+
+void tlb_table_flush(struct mmu_gather *tlb)
+{
+       struct mmu_table_batch **batch = &tlb->batch;
+
+       if (*batch) {
+               __tlb_flush_mm(tlb->mm);
+               call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
+               *batch = NULL;
+       }
+}
+
+void tlb_remove_table(struct mmu_gather *tlb, void *table)
+{
+       struct mmu_table_batch **batch = &tlb->batch;
+
+       if (*batch == NULL) {
+               *batch = (struct mmu_table_batch *)
+                       __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
+               if (*batch == NULL) {
+                       __tlb_flush_mm(tlb->mm);
+                       tlb_remove_table_one(table);
+                       return;
+               }
+               (*batch)->nr = 0;
+       }
+       (*batch)->tables[(*batch)->nr++] = table;
+       if ((*batch)->nr == MAX_TABLE_BATCH)
+               tlb_table_flush(tlb);
+}
 
 /*
  * switch on pgstes for its userspace process (for kvm)
index 0e358c2cffeb5e6bf1955ba6ca2c3de7092e03b3..422110a4385beb81dd0bfb3377ef1b4ca2fa3ac6 100644 (file)
@@ -90,7 +90,7 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf,
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        if (oprofile_started)
index 577abba3fac64a092e0bac9e47b9a51cee7ed415..83bb96079c43c513a832c6dd41a8de32a9cdffe6 100644 (file)
@@ -408,7 +408,7 @@ ENTRY(handle_sys)
        sw      r9, [r0, PT_EPC]
 
        cmpi.c  r27, __NR_syscalls      # check syscall number
-       bgtu    illegal_syscall
+       bgeu    illegal_syscall
 
        slli    r8, r27, 2              # get syscall routine
        la      r11, sys_call_table
index 822d6084195b7397ff7eeff08fe33ce618630303..abcc4dcc2d94180cd511f8a4dff0e506afb47798 100644 (file)
@@ -141,8 +141,13 @@ typedef struct page *pgtable_t;
 #endif /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_UNCACHED_MAPPING
+#if defined(CONFIG_29BIT)
+#define UNCAC_ADDR(addr)       P2SEGADDR(addr)
+#define CAC_ADDR(addr)         P1SEGADDR(addr)
+#else
 #define UNCAC_ADDR(addr)       ((addr) - PAGE_OFFSET + uncached_start)
 #define CAC_ADDR(addr)         ((addr) - uncached_start + PAGE_OFFSET)
+#endif
 #else
 #define UNCAC_ADDR(addr)       ((addr))
 #define CAC_ADDR(addr)         ((addr))
index b4c2d2b946ddc084bfe48b2d58128d47e2f724aa..e4dd5d5a111506889b5a69284355721fc23c80f7 100644 (file)
@@ -49,7 +49,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
        return oprofile_perf_init(ops);
 }
 
-void __exit oprofile_arch_exit(void)
+void oprofile_arch_exit(void)
 {
        oprofile_perf_exit();
        kfree(sh_pmu_op_name);
@@ -60,5 +60,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
        ops->backtrace = sh_backtrace;
        return -ENODEV;
 }
-void __exit oprofile_arch_exit(void) {}
+void oprofile_arch_exit(void) {}
 #endif /* CONFIG_HW_PERF_EVENTS */
index 2e794193cd9b98b8c15df04d4283eeccd0eeea58..9e702570001d7a53f1bbc5f5db78cb09b212516f 100644 (file)
@@ -590,6 +590,9 @@ config SYSVIPC_COMPAT
        depends on COMPAT && SYSVIPC
        default y
 
+config KEYS_COMPAT
+       def_bool y if COMPAT && KEYS
+
 endmenu
 
 source "net/Kconfig"
index ad1fb5d969f314823db89f4ea470edef1ac252ea..eddcfb36aafb7da6b7addc5fbb3b9e187022c512 100644 (file)
@@ -31,7 +31,7 @@ UTS_MACHINE    := sparc
 
 #KBUILD_CFLAGS += -g -pipe -fcall-used-g5 -fcall-used-g7
 KBUILD_CFLAGS += -m32 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7
-KBUILD_AFLAGS += -m32
+KBUILD_AFLAGS += -m32 -Wa,-Av8
 
 #LDFLAGS_vmlinux = -N -Ttext 0xf0004000
 #  Since 2.5.40, the first stage is left not btfix-ed.
index 5b31a8e89823699fbe5f99ebf6eb8d34bac8cb46..a790cc657476320831f9753ad0fd2aff9264ca0a 100644 (file)
@@ -431,10 +431,6 @@ extern unsigned long *sparc_valid_addr_bitmap;
 #define kern_addr_valid(addr) \
        (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
 
-extern int io_remap_pfn_range(struct vm_area_struct *vma,
-                             unsigned long from, unsigned long pfn,
-                             unsigned long size, pgprot_t prot);
-
 /*
  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
  * its high 4 bits.  These macros/functions put it there or get it from there.
@@ -443,6 +439,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma,
 #define GET_IOSPACE(pfn)               (pfn >> (BITS_PER_LONG - 4))
 #define GET_PFN(pfn)                   (pfn & 0x0fffffffUL)
 
+extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
+                          unsigned long, pgprot_t);
+
+static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+                                    unsigned long from, unsigned long pfn,
+                                    unsigned long size, pgprot_t prot)
+{
+       unsigned long long offset, space, phys_base;
+
+       offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
+       space = GET_IOSPACE(pfn);
+       phys_base = offset | (space << 32ULL);
+
+       return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
+}
+
 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
 ({                                                                       \
index 1e03c5a6b4f73b30775f763ec014a61f409cf400..98226280423797569806a22ae31fcb00c44e3983 100644 (file)
@@ -750,10 +750,6 @@ static inline bool kern_addr_valid(unsigned long addr)
 
 extern int page_in_phys_avail(unsigned long paddr);
 
-extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
-                              unsigned long pfn,
-                              unsigned long size, pgprot_t prot);
-
 /*
  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
  * its high 4 bits.  These macros/functions put it there or get it from there.
@@ -762,6 +758,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
 #define GET_IOSPACE(pfn)               (pfn >> (BITS_PER_LONG - 4))
 #define GET_PFN(pfn)                   (pfn & 0x0fffffffffffffffUL)
 
+extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
+                          unsigned long, pgprot_t);
+
+static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+                                    unsigned long from, unsigned long pfn,
+                                    unsigned long size, pgprot_t prot)
+{
+       unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
+       int space = GET_IOSPACE(pfn);
+       unsigned long phys_base;
+
+       phys_base = offset | (((unsigned long) space) << 32UL);
+
+       return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
+}
+
 #include <asm-generic/pgtable.h>
 
 /* We provide our own get_unmapped_area to cope with VA holes and
index 7eef3f7419637f84a544ae04ce420a9e5c1e2e88..f5ddc0bae38d7f45a71421dccd74783b4d3baa59 100644 (file)
@@ -268,4 +268,4 @@ static int __init sunfire_init(void)
        return 0;
 }
 
-subsys_initcall(sunfire_init);
+fs_initcall(sunfire_init);
index 7429b47c3acad8adb97ff177c4459fafcb1ae1e3..dcae702fc1f3117b433ea832aae6bc3d9a8cdefe 100644 (file)
@@ -1269,4 +1269,4 @@ static int __init ds_init(void)
        return vio_register_driver(&ds_driver);
 }
 
-subsys_initcall(ds_init);
+fs_initcall(ds_init);
index e27f8ea8656e3e4b9b1c799a6d170cc9739e2cb2..0c218e4c0881fba70c1748e6a4c295e0db280652 100644 (file)
@@ -42,6 +42,9 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
 extern void fpload(unsigned long *fpregs, unsigned long *fsr);
 
 #else /* CONFIG_SPARC32 */
+
+#include <asm/trap_block.h>
+
 struct popc_3insn_patch_entry {
        unsigned int    addr;
        unsigned int    insns[3];
@@ -57,6 +60,10 @@ extern struct popc_6insn_patch_entry __popc_6insn_patch,
        __popc_6insn_patch_end;
 
 extern void __init per_cpu_patch(void);
+extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
+                                   struct sun4v_1insn_patch_entry *);
+extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
+                                   struct sun4v_2insn_patch_entry *);
 extern void __init sun4v_patch(void);
 extern void __init boot_cpu_id_too_large(int cpu);
 extern unsigned int dcache_parity_tl1_occurred;
index 99ba5baa9497da77a773a2fc9da7600b2ee16816..8172c18d844fadcc56e364b331cefe37f6c08701 100644 (file)
@@ -17,6 +17,8 @@
 #include <asm/processor.h>
 #include <asm/spitfire.h>
 
+#include "entry.h"
+
 #ifdef CONFIG_SPARC64
 
 #include <linux/jump_label.h>
@@ -220,6 +222,29 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
 }
 
 #ifdef CONFIG_SPARC64
+static void do_patch_sections(const Elf_Ehdr *hdr,
+                             const Elf_Shdr *sechdrs)
+{
+       const Elf_Shdr *s, *sun4v_1insn = NULL, *sun4v_2insn = NULL;
+       char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+       for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
+               if (!strcmp(".sun4v_1insn_patch", secstrings + s->sh_name))
+                       sun4v_1insn = s;
+               if (!strcmp(".sun4v_2insn_patch", secstrings + s->sh_name))
+                       sun4v_2insn = s;
+       }
+
+       if (sun4v_1insn && tlb_type == hypervisor) {
+               void *p = (void *) sun4v_1insn->sh_addr;
+               sun4v_patch_1insn_range(p, p + sun4v_1insn->sh_size);
+       }
+       if (sun4v_2insn && tlb_type == hypervisor) {
+               void *p = (void *) sun4v_2insn->sh_addr;
+               sun4v_patch_2insn_range(p, p + sun4v_2insn->sh_size);
+       }
+}
+
 int module_finalize(const Elf_Ehdr *hdr,
                    const Elf_Shdr *sechdrs,
                    struct module *me)
@@ -227,6 +252,8 @@ int module_finalize(const Elf_Ehdr *hdr,
        /* make jump label nops */
        jump_label_apply_nops(me);
 
+       do_patch_sections(hdr, sechdrs);
+
        /* Cheetah's I-cache is fully coherent.  */
        if (tlb_type == spitfire) {
                unsigned long va;
index b01a06e9ae4efc71a54384a2f3500643cd6ea65f..9e73c4a37ae99a0657fda76cf074da20f692f876 100644 (file)
@@ -848,10 +848,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
        if (!irq)
                return -ENOMEM;
 
-       if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
-               return -EINVAL;
        if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
                return -EINVAL;
+       if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
+               return -EINVAL;
 
        return irq;
 }
index 77f1b95e0806bb92dc19fd421acd6f159e7dfab0..9171fc238def230e6e14e92852f2ee2d9b33a8f2 100644 (file)
 
                .text
                .align                  32
-__handle_softirq:
-               call                    do_softirq
-                nop
-               ba,a,pt                 %xcc, __handle_softirq_continue
-                nop
 __handle_preemption:
                call                    schedule
                 wrpr                   %g0, RTRAP_PSTATE, %pstate
@@ -89,9 +84,7 @@ rtrap:
                cmp                     %l1, 0
 
                /* mm/ultra.S:xcall_report_regs KNOWS about this load. */
-               bne,pn                  %icc, __handle_softirq
                 ldx                    [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
-__handle_softirq_continue:
 rtrap_xcall:
                sethi                   %hi(0xf << 20), %l4
                and                     %l1, %l4, %l4
index 3c5bb784214f5944e5fd838cca5836f2d968a699..4e7d3ff0ccb4e8921121515cb77ececa28feb499 100644 (file)
@@ -234,40 +234,50 @@ void __init per_cpu_patch(void)
        }
 }
 
-void __init sun4v_patch(void)
+void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
+                            struct sun4v_1insn_patch_entry *end)
 {
-       extern void sun4v_hvapi_init(void);
-       struct sun4v_1insn_patch_entry *p1;
-       struct sun4v_2insn_patch_entry *p2;
-
-       if (tlb_type != hypervisor)
-               return;
+       while (start < end) {
+               unsigned long addr = start->addr;
 
-       p1 = &__sun4v_1insn_patch;
-       while (p1 < &__sun4v_1insn_patch_end) {
-               unsigned long addr = p1->addr;
-
-               *(unsigned int *) (addr +  0) = p1->insn;
+               *(unsigned int *) (addr +  0) = start->insn;
                wmb();
                __asm__ __volatile__("flush     %0" : : "r" (addr +  0));
 
-               p1++;
+               start++;
        }
+}
 
-       p2 = &__sun4v_2insn_patch;
-       while (p2 < &__sun4v_2insn_patch_end) {
-               unsigned long addr = p2->addr;
+void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
+                            struct sun4v_2insn_patch_entry *end)
+{
+       while (start < end) {
+               unsigned long addr = start->addr;
 
-               *(unsigned int *) (addr +  0) = p2->insns[0];
+               *(unsigned int *) (addr +  0) = start->insns[0];
                wmb();
                __asm__ __volatile__("flush     %0" : : "r" (addr +  0));
 
-               *(unsigned int *) (addr +  4) = p2->insns[1];
+               *(unsigned int *) (addr +  4) = start->insns[1];
                wmb();
                __asm__ __volatile__("flush     %0" : : "r" (addr +  4));
 
-               p2++;
+               start++;
        }
+}
+
+void __init sun4v_patch(void)
+{
+       extern void sun4v_hvapi_init(void);
+
+       if (tlb_type != hypervisor)
+               return;
+
+       sun4v_patch_1insn_range(&__sun4v_1insn_patch,
+                               &__sun4v_1insn_patch_end);
+
+       sun4v_patch_2insn_range(&__sun4v_2insn_patch,
+                               &__sun4v_2insn_patch_end);
 
        sun4v_hvapi_init();
 }
index 5d92488fc167f45663a24bb6cf9ee05c954e2953..2e58328c30e0a0532e633a71054fe37ffeef071f 100644 (file)
@@ -829,21 +829,23 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-void do_signal32(sigset_t *oldset, struct pt_regs * regs,
-                int restart_syscall, unsigned long orig_i0)
+void do_signal32(sigset_t *oldset, struct pt_regs * regs)
 {
        struct k_sigaction ka;
+       unsigned long orig_i0;
+       int restart_syscall;
        siginfo_t info;
        int signr;
        
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 
-       /* If the debugger messes with the program counter, it clears
-        * the "in syscall" bit, directing us to not perform a syscall
-        * restart.
-        */
-       if (restart_syscall && !pt_regs_is_syscall(regs))
-               restart_syscall = 0;
+       restart_syscall = 0;
+       orig_i0 = 0;
+       if (pt_regs_is_syscall(regs) &&
+           (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
+               restart_syscall = 1;
+               orig_i0 = regs->u_regs[UREG_G6];
+       }
 
        if (signr > 0) {
                if (restart_syscall)
index 04ede8f04add3f397ca46c8ebee249d85c92213c..2302567578ba56223dd5c4bdcaa9958b02d74d89 100644 (file)
@@ -525,10 +525,26 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        siginfo_t info;
        int signr;
 
+       /* It's a lot of work and synchronization to add a new ptrace
+        * register for GDB to save and restore in order to get
+        * orig_i0 correct for syscall restarts when debugging.
+        *
+        * Although it should be the case that most of the global
+        * registers are volatile across a system call, glibc already
+        * depends upon that fact that we preserve them.  So we can't
+        * just use any global register to save away the orig_i0 value.
+        *
+        * In particular %g2, %g3, %g4, and %g5 are all assumed to be
+        * preserved across a system call trap by various pieces of
+        * code in glibc.
+        *
+        * %g7 is used as the "thread register".   %g6 is not used in
+        * any fixed manner.  %g6 is used as a scratch register and
+        * a compiler temporary, but it's value is never used across
+        * a system call.  Therefore %g6 is usable for orig_i0 storage.
+        */
        if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C))
-               restart_syscall = 1;
-       else
-               restart_syscall = 0;
+               regs->u_regs[UREG_G6] = orig_i0;
 
        if (test_thread_flag(TIF_RESTORE_SIGMASK))
                oldset = &current->saved_sigmask;
@@ -541,8 +557,12 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
         * the software "in syscall" bit, directing us to not perform
         * a syscall restart.
         */
-       if (restart_syscall && !pt_regs_is_syscall(regs))
-               restart_syscall = 0;
+       restart_syscall = 0;
+       if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) {
+               restart_syscall = 1;
+               orig_i0 = regs->u_regs[UREG_G6];
+       }
+
 
        if (signr > 0) {
                if (restart_syscall)
index 47509df3b893acfb365ec503cbe00f2dabeea2ac..d58260bff2dfcffeb2fd148be723a9f62f3de03b 100644 (file)
@@ -535,11 +535,27 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        siginfo_t info;
        int signr;
        
+       /* It's a lot of work and synchronization to add a new ptrace
+        * register for GDB to save and restore in order to get
+        * orig_i0 correct for syscall restarts when debugging.
+        *
+        * Although it should be the case that most of the global
+        * registers are volatile across a system call, glibc already
+        * depends upon that fact that we preserve them.  So we can't
+        * just use any global register to save away the orig_i0 value.
+        *
+        * In particular %g2, %g3, %g4, and %g5 are all assumed to be
+        * preserved across a system call trap by various pieces of
+        * code in glibc.
+        *
+        * %g7 is used as the "thread register".   %g6 is not used in
+        * any fixed manner.  %g6 is used as a scratch register and
+        * a compiler temporary, but it's value is never used across
+        * a system call.  Therefore %g6 is usable for orig_i0 storage.
+        */
        if (pt_regs_is_syscall(regs) &&
-           (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
-               restart_syscall = 1;
-       } else
-               restart_syscall = 0;
+           (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
+               regs->u_regs[UREG_G6] = orig_i0;
 
        if (current_thread_info()->status & TS_RESTORE_SIGMASK)
                oldset = &current->saved_sigmask;
@@ -548,22 +564,20 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
 
 #ifdef CONFIG_COMPAT
        if (test_thread_flag(TIF_32BIT)) {
-               extern void do_signal32(sigset_t *, struct pt_regs *,
-                                       int restart_syscall,
-                                       unsigned long orig_i0);
-               do_signal32(oldset, regs, restart_syscall, orig_i0);
+               extern void do_signal32(sigset_t *, struct pt_regs *);
+               do_signal32(oldset, regs);
                return;
        }
 #endif 
 
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 
-       /* If the debugger messes with the program counter, it clears
-        * the software "in syscall" bit, directing us to not perform
-        * a syscall restart.
-        */
-       if (restart_syscall && !pt_regs_is_syscall(regs))
-               restart_syscall = 0;
+       restart_syscall = 0;
+       if (pt_regs_is_syscall(regs) &&
+           (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
+               restart_syscall = 1;
+               orig_i0 = regs->u_regs[UREG_G6];
+       }
 
        if (signr > 0) {
                if (restart_syscall)
index f566518483b5bcda9998d635adabc18a18354373..248fb676336208ec21b060162426dda9b029d314 100644 (file)
@@ -74,7 +74,7 @@ sys_call_table32:
        .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
 /*270*/        .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink
        .word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
-/*280*/        .word sys32_tee, sys_add_key, sys_request_key, sys_keyctl, compat_sys_openat
+/*280*/        .word sys32_tee, sys_add_key, sys_request_key, compat_sys_keyctl, compat_sys_openat
        .word sys_mkdirat, sys_mknodat, sys_fchownat, compat_sys_futimesat, compat_sys_fstatat64
 /*290*/        .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
        .word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll, sys_unshare
index 36357717d691019ff4772c7a4aa2993c68f13c35..9384a0cbeba4721632a75f8193efafccd0990c40 100644 (file)
@@ -713,17 +713,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
                        s16 b = (rs2 >> (i * 16)) & 0xffff;
 
                        if (a > b)
-                               rd_val |= 1 << i;
+                               rd_val |= 8 >> i;
                }
                break;
 
        case FCMPGT32_OPF:
                for (i = 0; i < 2; i++) {
-                       s32 a = (rs1 >> (i * 32)) & 0xffff;
-                       s32 b = (rs2 >> (i * 32)) & 0xffff;
+                       s32 a = (rs1 >> (i * 32)) & 0xffffffff;
+                       s32 b = (rs2 >> (i * 32)) & 0xffffffff;
 
                        if (a > b)
-                               rd_val |= 1 << i;
+                               rd_val |= 2 >> i;
                }
                break;
 
@@ -733,17 +733,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
                        s16 b = (rs2 >> (i * 16)) & 0xffff;
 
                        if (a <= b)
-                               rd_val |= 1 << i;
+                               rd_val |= 8 >> i;
                }
                break;
 
        case FCMPLE32_OPF:
                for (i = 0; i < 2; i++) {
-                       s32 a = (rs1 >> (i * 32)) & 0xffff;
-                       s32 b = (rs2 >> (i * 32)) & 0xffff;
+                       s32 a = (rs1 >> (i * 32)) & 0xffffffff;
+                       s32 b = (rs2 >> (i * 32)) & 0xffffffff;
 
                        if (a <= b)
-                               rd_val |= 1 << i;
+                               rd_val |= 2 >> i;
                }
                break;
 
@@ -753,17 +753,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
                        s16 b = (rs2 >> (i * 16)) & 0xffff;
 
                        if (a != b)
-                               rd_val |= 1 << i;
+                               rd_val |= 8 >> i;
                }
                break;
 
        case FCMPNE32_OPF:
                for (i = 0; i < 2; i++) {
-                       s32 a = (rs1 >> (i * 32)) & 0xffff;
-                       s32 b = (rs2 >> (i * 32)) & 0xffff;
+                       s32 a = (rs1 >> (i * 32)) & 0xffffffff;
+                       s32 b = (rs2 >> (i * 32)) & 0xffffffff;
 
                        if (a != b)
-                               rd_val |= 1 << i;
+                               rd_val |= 2 >> i;
                }
                break;
 
@@ -773,17 +773,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
                        s16 b = (rs2 >> (i * 16)) & 0xffff;
 
                        if (a == b)
-                               rd_val |= 1 << i;
+                               rd_val |= 8 >> i;
                }
                break;
 
        case FCMPEQ32_OPF:
                for (i = 0; i < 2; i++) {
-                       s32 a = (rs1 >> (i * 32)) & 0xffff;
-                       s32 b = (rs2 >> (i * 32)) & 0xffff;
+                       s32 a = (rs1 >> (i * 32)) & 0xffffffff;
+                       s32 b = (rs2 >> (i * 32)) & 0xffffffff;
 
                        if (a == b)
-                               rd_val |= 1 << i;
+                               rd_val |= 2 >> i;
                }
                break;
        }
index 34fe65751737dba67b9df52f55fe165e6d2202e4..4d8c497517bd6f3f17110f2a3b4e6b6fe204e39e 100644 (file)
@@ -7,40 +7,12 @@
  * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  */
 
-#ifdef __KERNEL__
-
-#define FUNC(x)                                                                                        \
+#define FUNC(x)                \
        .globl  x;              \
        .type   x,@function;    \
-       .align  4;                                                                                      \
+       .align  4;              \
 x:
 
-#undef FASTER_REVERSE
-#undef FASTER_NONALIGNED
-#define FASTER_ALIGNED
-
-/* In kernel these functions don't return a value.
- * One should use macros in asm/string.h for that purpose.
- * We return 0, so that bugs are more apparent.
- */
-#define SETUP_RETL
-#define RETL_INSN      clr     %o0
-
-#else
-
-/* libc */
-
-#include "DEFS.h"
-
-#define FASTER_REVERSE
-#define FASTER_NONALIGNED
-#define FASTER_ALIGNED
-
-#define SETUP_RETL     mov     %o0, %g6
-#define RETL_INSN      mov     %g6, %o0
-
-#endif
-
 /* Both these macros have to start with exactly the same insn */
 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
        ldd     [%src + (offset) + 0x00], %t0; \
@@ -164,30 +136,6 @@ x:
        .text
        .align  4
 
-#ifdef FASTER_REVERSE
-
-70:    /* rdword_align */
-
-       andcc           %o1, 1, %g0
-       be              4f
-        andcc          %o1, 2, %g0
-
-       ldub            [%o1 - 1], %g2
-       sub             %o1, 1, %o1
-       stb             %g2, [%o0 - 1]
-       sub             %o2, 1, %o2
-       be              3f
-        sub            %o0, 1, %o0
-4:
-       lduh            [%o1 - 2], %g2
-       sub             %o1, 2, %o1
-       sth             %g2, [%o0 - 2]
-       sub             %o2, 2, %o2
-       b               3f
-        sub            %o0, 2, %o0
-
-#endif /* FASTER_REVERSE */
-
 0:
        retl
         nop            ! Only bcopy returns here and it retuns void...
@@ -198,7 +146,7 @@ FUNC(__memmove)
 #endif
 FUNC(memmove)
        cmp             %o0, %o1
-       SETUP_RETL
+       mov             %o0, %g7
        bleu            9f
         sub            %o0, %o1, %o4
 
@@ -207,8 +155,6 @@ FUNC(memmove)
        bleu            0f
         andcc          %o4, 3, %o5
 
-#ifndef FASTER_REVERSE
-
        add             %o1, %o2, %o1
        add             %o0, %o2, %o0
        sub             %o1, 1, %o1
@@ -224,295 +170,7 @@ FUNC(memmove)
         sub            %o0, 1, %o0
 
        retl
-        RETL_INSN
-
-#else /* FASTER_REVERSE */
-
-       add             %o1, %o2, %o1
-       add             %o0, %o2, %o0
-       bne             77f
-        cmp            %o2, 15
-       bleu            91f
-        andcc          %o1, 3, %g0
-       bne             70b
-3:
-        andcc          %o1, 4, %g0
-
-       be              2f
-        mov            %o2, %g1
-
-       ld              [%o1 - 4], %o4
-       sub             %g1, 4, %g1
-       st              %o4, [%o0 - 4]
-       sub             %o1, 4, %o1
-       sub             %o0, 4, %o0
-2:
-       andcc           %g1, 0xffffff80, %g7
-       be              3f
-        andcc          %o0, 4, %g0
-
-       be              74f + 4
-5:
-       RMOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
-       RMOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
-       RMOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
-       RMOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
-       subcc           %g7, 128, %g7
-       sub             %o1, 128, %o1
-       bne             5b
-        sub            %o0, 128, %o0
-3:
-       andcc           %g1, 0x70, %g7
-       be              72f
-        andcc          %g1, 8, %g0
-
-       sethi           %hi(72f), %o5
-       srl             %g7, 1, %o4
-       add             %g7, %o4, %o4
-       sub             %o1, %g7, %o1
-       sub             %o5, %o4, %o5
-       jmpl            %o5 + %lo(72f), %g0
-        sub            %o0, %g7, %o0
-
-71:    /* rmemcpy_table */
-       RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
-       RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
-       RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
-       RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
-       RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
-       RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
-       RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
-
-72:    /* rmemcpy_table_end */
-
-       be              73f
-        andcc          %g1, 4, %g0
-
-       ldd             [%o1 - 0x08], %g2
-       sub             %o0, 8, %o0
-       sub             %o1, 8, %o1
-       st              %g2, [%o0]
-       st              %g3, [%o0 + 0x04]
-
-73:    /* rmemcpy_last7 */
-
-       be              1f
-        andcc          %g1, 2, %g0
-
-       ld              [%o1 - 4], %g2
-       sub             %o1, 4, %o1
-       st              %g2, [%o0 - 4]
-       sub             %o0, 4, %o0
-1:
-       be              1f
-        andcc          %g1, 1, %g0
-
-       lduh            [%o1 - 2], %g2
-       sub             %o1, 2, %o1
-       sth             %g2, [%o0 - 2]
-       sub             %o0, 2, %o0
-1:
-       be              1f
-        nop
-
-       ldub            [%o1 - 1], %g2
-       stb             %g2, [%o0 - 1]
-1:
-       retl
-        RETL_INSN
-
-74:    /* rldd_std */
-       RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
-       RMOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
-       RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
-       RMOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
-       subcc           %g7, 128, %g7
-       sub             %o1, 128, %o1
-       bne             74b
-        sub            %o0, 128, %o0
-
-       andcc           %g1, 0x70, %g7
-       be              72b
-        andcc          %g1, 8, %g0
-
-       sethi           %hi(72b), %o5
-       srl             %g7, 1, %o4
-       add             %g7, %o4, %o4
-       sub             %o1, %g7, %o1
-       sub             %o5, %o4, %o5
-       jmpl            %o5 + %lo(72b), %g0
-        sub            %o0, %g7, %o0
-
-75:    /* rshort_end */
-
-       and             %o2, 0xe, %o3
-2:
-       sethi           %hi(76f), %o5
-       sll             %o3, 3, %o4
-       sub             %o0, %o3, %o0
-       sub             %o5, %o4, %o5
-       sub             %o1, %o3, %o1
-       jmpl            %o5 + %lo(76f), %g0
-        andcc          %o2, 1, %g0
-
-       RMOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
-       RMOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
-       RMOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
-       RMOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
-       RMOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
-       RMOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
-       RMOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
-
-76:    /* rshort_table_end */
-
-       be              1f
-        nop
-       ldub            [%o1 - 1], %g2
-       stb             %g2, [%o0 - 1]
-1:
-       retl
-        RETL_INSN
-
-91:    /* rshort_aligned_end */
-
-       bne             75b
-        andcc          %o2, 8, %g0
-
-       be              1f
-        andcc          %o2, 4, %g0
-
-       ld              [%o1 - 0x08], %g2
-       ld              [%o1 - 0x04], %g3
-       sub             %o1, 8, %o1
-       st              %g2, [%o0 - 0x08]
-       st              %g3, [%o0 - 0x04]
-       sub             %o0, 8, %o0
-1:
-       b               73b
-        mov            %o2, %g1
-
-77:    /* rnon_aligned */
-       cmp             %o2, 15
-       bleu            75b
-        andcc          %o0, 3, %g0
-       be              64f
-        andcc          %o0, 1, %g0
-       be              63f
-        andcc          %o0, 2, %g0
-       ldub            [%o1 - 1], %g5
-       sub             %o1, 1, %o1
-       stb             %g5, [%o0 - 1]
-       sub             %o0, 1, %o0
-       be              64f
-        sub            %o2, 1, %o2
-63:
-       ldub            [%o1 - 1], %g5
-       sub             %o1, 2, %o1
-       stb             %g5, [%o0 - 1]
-       sub             %o0, 2, %o0
-       ldub            [%o1], %g5
-       sub             %o2, 2, %o2
-       stb             %g5, [%o0]
-64:    
-       and             %o1, 3, %g2
-       and             %o1, -4, %o1
-       and             %o2, 0xc, %g3
-       add             %o1, 4, %o1
-       cmp             %g3, 4
-       sll             %g2, 3, %g4
-       mov             32, %g2
-       be              4f
-        sub            %g2, %g4, %g7
-
-       blu             3f
-        cmp            %g3, 8
-
-       be              2f
-        srl            %o2, 2, %g3
-
-       ld              [%o1 - 4], %o3
-       add             %o0, -8, %o0
-       ld              [%o1 - 8], %o4
-       add             %o1, -16, %o1
-       b               7f
-        add            %g3, 1, %g3
-2:
-       ld              [%o1 - 4], %o4
-       add             %o0, -4, %o0
-       ld              [%o1 - 8], %g1
-       add             %o1, -12, %o1
-       b               8f
-        add            %g3, 2, %g3
-3:
-       ld              [%o1 - 4], %o5
-       add             %o0, -12, %o0
-       ld              [%o1 - 8], %o3
-       add             %o1, -20, %o1
-       b               6f
-        srl            %o2, 2, %g3
-4:
-       ld              [%o1 - 4], %g1
-       srl             %o2, 2, %g3
-       ld              [%o1 - 8], %o5
-       add             %o1, -24, %o1
-       add             %o0, -16, %o0
-       add             %g3, -1, %g3
-
-       ld              [%o1 + 12], %o3
-5:
-       sll             %o5, %g4, %g2
-       srl             %g1, %g7, %g5
-       or              %g2, %g5, %g2
-       st              %g2, [%o0 + 12]
-6:
-       ld              [%o1 + 8], %o4
-       sll             %o3, %g4, %g2
-       srl             %o5, %g7, %g5
-       or              %g2, %g5, %g2
-       st              %g2, [%o0 + 8]
-7:
-       ld              [%o1 + 4], %g1
-       sll             %o4, %g4, %g2
-       srl             %o3, %g7, %g5
-       or              %g2, %g5, %g2
-       st              %g2, [%o0 + 4]
-8:
-       ld              [%o1], %o5
-       sll             %g1, %g4, %g2
-       srl             %o4, %g7, %g5
-       addcc           %g3, -4, %g3
-       or              %g2, %g5, %g2
-       add             %o1, -16, %o1
-       st              %g2, [%o0]
-       add             %o0, -16, %o0
-       bne,a           5b      
-        ld             [%o1 + 12], %o3
-       sll             %o5, %g4, %g2
-       srl             %g1, %g7, %g5
-       srl             %g4, 3, %g3
-       or              %g2, %g5, %g2
-       add             %o1, %g3, %o1
-       andcc           %o2, 2, %g0
-       st              %g2, [%o0 + 12]
-       be              1f
-        andcc          %o2, 1, %g0
-       
-       ldub            [%o1 + 15], %g5
-       add             %o1, -2, %o1
-       stb             %g5, [%o0 + 11]
-       add             %o0, -2, %o0
-       ldub            [%o1 + 16], %g5
-       stb             %g5, [%o0 + 12]
-1:
-       be              1f
-        nop
-       ldub            [%o1 + 15], %g5
-       stb             %g5, [%o0 + 11]
-1:
-       retl
-        RETL_INSN
-
-#endif /* FASTER_REVERSE */
+        mov            %g7, %o0
 
 /* NOTE: This code is executed just for the cases,
          where %src (=%o1) & 3 is != 0.
@@ -546,7 +204,7 @@ FUNC(memmove)
 FUNC(memcpy)   /* %o0=dst %o1=src %o2=len */
 
        sub             %o0, %o1, %o4
-       SETUP_RETL
+       mov             %o0, %g7
 9:
        andcc           %o4, 3, %o5
 0:
@@ -569,7 +227,7 @@ FUNC(memcpy)        /* %o0=dst %o1=src %o2=len */
        add             %o1, 4, %o1
        add             %o0, 4, %o0
 2:
-       andcc           %g1, 0xffffff80, %g7
+       andcc           %g1, 0xffffff80, %g0
        be              3f
         andcc          %o0, 4, %g0
 
@@ -579,22 +237,23 @@ FUNC(memcpy)      /* %o0=dst %o1=src %o2=len */
        MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
        MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
        MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
-       subcc           %g7, 128, %g7
+       sub             %g1, 128, %g1
        add             %o1, 128, %o1
-       bne             5b
+       cmp             %g1, 128
+       bge             5b
         add            %o0, 128, %o0
 3:
-       andcc           %g1, 0x70, %g7
+       andcc           %g1, 0x70, %g4
        be              80f
         andcc          %g1, 8, %g0
 
        sethi           %hi(80f), %o5
-       srl             %g7, 1, %o4
-       add             %g7, %o4, %o4
-       add             %o1, %g7, %o1
+       srl             %g4, 1, %o4
+       add             %g4, %o4, %o4
+       add             %o1, %g4, %o1
        sub             %o5, %o4, %o5
        jmpl            %o5 + %lo(80f), %g0
-        add            %o0, %g7, %o0
+        add            %o0, %g4, %o0
 
 79:    /* memcpy_table */
 
@@ -641,43 +300,28 @@ FUNC(memcpy)      /* %o0=dst %o1=src %o2=len */
        stb             %g2, [%o0]
 1:
        retl
-        RETL_INSN
+        mov            %g7, %o0
 
 82:    /* ldd_std */
        MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
        MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
        MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
        MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
-       subcc           %g7, 128, %g7
+       subcc           %g1, 128, %g1
        add             %o1, 128, %o1
-       bne             82b
+       cmp             %g1, 128
+       bge             82b
         add            %o0, 128, %o0
 
-#ifndef FASTER_ALIGNED
-
-       andcc           %g1, 0x70, %g7
-       be              80b
-        andcc          %g1, 8, %g0
-
-       sethi           %hi(80b), %o5
-       srl             %g7, 1, %o4
-       add             %g7, %o4, %o4
-       add             %o1, %g7, %o1
-       sub             %o5, %o4, %o5
-       jmpl            %o5 + %lo(80b), %g0
-        add            %o0, %g7, %o0
-
-#else /* FASTER_ALIGNED */
-
-       andcc           %g1, 0x70, %g7
+       andcc           %g1, 0x70, %g4
        be              84f
         andcc          %g1, 8, %g0
 
        sethi           %hi(84f), %o5
-       add             %o1, %g7, %o1
-       sub             %o5, %g7, %o5
+       add             %o1, %g4, %o1
+       sub             %o5, %g4, %o5
        jmpl            %o5 + %lo(84f), %g0
-        add            %o0, %g7, %o0
+        add            %o0, %g4, %o0
 
 83:    /* amemcpy_table */
 
@@ -721,382 +365,132 @@ FUNC(memcpy)    /* %o0=dst %o1=src %o2=len */
        stb             %g2, [%o0]
 1:
        retl
-        RETL_INSN
-
-#endif /* FASTER_ALIGNED */
+        mov            %g7, %o0
 
 86:    /* non_aligned */
        cmp             %o2, 6
        bleu            88f
+        nop
 
-#ifdef FASTER_NONALIGNED
-
-        cmp            %o2, 256
-       bcc             87f
-
-#endif /* FASTER_NONALIGNED */
-
-        andcc          %o0, 3, %g0
+       save            %sp, -96, %sp
+       andcc           %i0, 3, %g0
        be              61f
-        andcc          %o0, 1, %g0
+        andcc          %i0, 1, %g0
        be              60f
-        andcc          %o0, 2, %g0
+        andcc          %i0, 2, %g0
 
-       ldub            [%o1], %g5
-       add             %o1, 1, %o1
-       stb             %g5, [%o0]
-       sub             %o2, 1, %o2
+       ldub            [%i1], %g5
+       add             %i1, 1, %i1
+       stb             %g5, [%i0]
+       sub             %i2, 1, %i2
        bne             61f
-        add            %o0, 1, %o0
+        add            %i0, 1, %i0
 60:
-       ldub            [%o1], %g3
-       add             %o1, 2, %o1
-       stb             %g3, [%o0]
-       sub             %o2, 2, %o2
-       ldub            [%o1 - 1], %g3
-       add             %o0, 2, %o0
-       stb             %g3, [%o0 - 1]
+       ldub            [%i1], %g3
+       add             %i1, 2, %i1
+       stb             %g3, [%i0]
+       sub             %i2, 2, %i2
+       ldub            [%i1 - 1], %g3
+       add             %i0, 2, %i0
+       stb             %g3, [%i0 - 1]
 61:
-       and             %o1, 3, %g2
-       and             %o2, 0xc, %g3
-       and             %o1, -4, %o1
+       and             %i1, 3, %g2
+       and             %i2, 0xc, %g3
+       and             %i1, -4, %i1
        cmp             %g3, 4
        sll             %g2, 3, %g4
        mov             32, %g2
        be              4f
-        sub            %g2, %g4, %g7
+        sub            %g2, %g4, %l0
        
        blu             3f
         cmp            %g3, 0x8
 
        be              2f
-        srl            %o2, 2, %g3
+        srl            %i2, 2, %g3
 
-       ld              [%o1], %o3
-       add             %o0, -8, %o0
-       ld              [%o1 + 4], %o4
+       ld              [%i1], %i3
+       add             %i0, -8, %i0
+       ld              [%i1 + 4], %i4
        b               8f
         add            %g3, 1, %g3
 2:
-       ld              [%o1], %o4
-       add             %o0, -12, %o0
-       ld              [%o1 + 4], %o5
+       ld              [%i1], %i4
+       add             %i0, -12, %i0
+       ld              [%i1 + 4], %i5
        add             %g3, 2, %g3
        b               9f
-        add            %o1, -4, %o1
+        add            %i1, -4, %i1
 3:
-       ld              [%o1], %g1
-       add             %o0, -4, %o0
-       ld              [%o1 + 4], %o3
-       srl             %o2, 2, %g3
+       ld              [%i1], %g1
+       add             %i0, -4, %i0
+       ld              [%i1 + 4], %i3
+       srl             %i2, 2, %g3
        b               7f
-        add            %o1, 4, %o1
+        add            %i1, 4, %i1
 4:
-       ld              [%o1], %o5
-       cmp             %o2, 7
-       ld              [%o1 + 4], %g1
-       srl             %o2, 2, %g3
+       ld              [%i1], %i5
+       cmp             %i2, 7
+       ld              [%i1 + 4], %g1
+       srl             %i2, 2, %g3
        bleu            10f
-        add            %o1, 8, %o1
+        add            %i1, 8, %i1
 
-       ld              [%o1], %o3
+       ld              [%i1], %i3
        add             %g3, -1, %g3
 5:
-       sll             %o5, %g4, %g2
-       srl             %g1, %g7, %g5
+       sll             %i5, %g4, %g2
+       srl             %g1, %l0, %g5
        or              %g2, %g5, %g2
-       st              %g2, [%o0]
+       st              %g2, [%i0]
 7:
-       ld              [%o1 + 4], %o4
+       ld              [%i1 + 4], %i4
        sll             %g1, %g4, %g2
-       srl             %o3, %g7, %g5
+       srl             %i3, %l0, %g5
        or              %g2, %g5, %g2
-       st              %g2, [%o0 + 4]
+       st              %g2, [%i0 + 4]
 8:
-       ld              [%o1 + 8], %o5
-       sll             %o3, %g4, %g2
-       srl             %o4, %g7, %g5
+       ld              [%i1 + 8], %i5
+       sll             %i3, %g4, %g2
+       srl             %i4, %l0, %g5
        or              %g2, %g5, %g2
-       st              %g2, [%o0 + 8]
+       st              %g2, [%i0 + 8]
 9:
-       ld              [%o1 + 12], %g1
-       sll             %o4, %g4, %g2
-       srl             %o5, %g7, %g5
+       ld              [%i1 + 12], %g1
+       sll             %i4, %g4, %g2
+       srl             %i5, %l0, %g5
        addcc           %g3, -4, %g3
        or              %g2, %g5, %g2
-       add             %o1, 16, %o1
-       st              %g2, [%o0 + 12]
-       add             %o0, 16, %o0
+       add             %i1, 16, %i1
+       st              %g2, [%i0 + 12]
+       add             %i0, 16, %i0
        bne,a           5b
-        ld             [%o1], %o3
+        ld             [%i1], %i3
 10:
-       sll             %o5, %g4, %g2
-       srl             %g1, %g7, %g5
-       srl             %g7, 3, %g3
+       sll             %i5, %g4, %g2
+       srl             %g1, %l0, %g5
+       srl             %l0, 3, %g3
        or              %g2, %g5, %g2
-       sub             %o1, %g3, %o1
-       andcc           %o2, 2, %g0
-       st              %g2, [%o0]
+       sub             %i1, %g3, %i1
+       andcc           %i2, 2, %g0
+       st              %g2, [%i0]
        be              1f
-        andcc          %o2, 1, %g0
-
-       ldub            [%o1], %g2
-       add             %o1, 2, %o1
-       stb             %g2, [%o0 + 4]
-       add             %o0, 2, %o0
-       ldub            [%o1 - 1], %g2
-       stb             %g2, [%o0 + 3]
+        andcc          %i2, 1, %g0
+
+       ldub            [%i1], %g2
+       add             %i1, 2, %i1
+       stb             %g2, [%i0 + 4]
+       add             %i0, 2, %i0
+       ldub            [%i1 - 1], %g2
+       stb             %g2, [%i0 + 3]
 1:
        be              1f
         nop
-       ldub            [%o1], %g2
-       stb             %g2, [%o0 + 4]
-1:
-       retl
-        RETL_INSN
-
-#ifdef FASTER_NONALIGNED
-
-87:    /* faster_nonaligned */
-
-       andcc           %o1, 3, %g0
-       be              3f
-        andcc          %o1, 1, %g0
-
-       be              4f
-        andcc          %o1, 2, %g0
-
-       ldub            [%o1], %g2
-       add             %o1, 1, %o1
-       stb             %g2, [%o0]
-       sub             %o2, 1, %o2
-       bne             3f
-        add            %o0, 1, %o0
-4:
-       lduh            [%o1], %g2
-       add             %o1, 2, %o1
-       srl             %g2, 8, %g3
-       sub             %o2, 2, %o2
-       stb             %g3, [%o0]
-       add             %o0, 2, %o0
-       stb             %g2, [%o0 - 1]
-3:
-        andcc          %o1, 4, %g0
-
-       bne             2f
-        cmp            %o5, 1
-
-       ld              [%o1], %o4
-       srl             %o4, 24, %g2
-       stb             %g2, [%o0]
-       srl             %o4, 16, %g3
-       stb             %g3, [%o0 + 1]
-       srl             %o4, 8, %g2
-       stb             %g2, [%o0 + 2]
-       sub             %o2, 4, %o2
-       stb             %o4, [%o0 + 3]
-       add             %o1, 4, %o1
-       add             %o0, 4, %o0
-2:
-       be              33f
-        cmp            %o5, 2
-       be              32f
-        sub            %o2, 4, %o2
-31:
-       ld              [%o1], %g2
-       add             %o1, 4, %o1
-       srl             %g2, 24, %g3
-       and             %o0, 7, %g5
-       stb             %g3, [%o0]
-       cmp             %g5, 7
-       sll             %g2, 8, %g1
-       add             %o0, 4, %o0
-       be              41f
-        and            %o2, 0xffffffc0, %o3
-       ld              [%o0 - 7], %o4
-4:
-       SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
-       SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
-       SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
-       SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
-       subcc           %o3, 64, %o3
-       add             %o1, 64, %o1
-       bne             4b
-        add            %o0, 64, %o0
-
-       andcc           %o2, 0x30, %o3
-       be,a            1f
-        srl            %g1, 16, %g2
-4:
-       SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
-       subcc           %o3, 16, %o3
-       add             %o1, 16, %o1
-       bne             4b
-        add            %o0, 16, %o0
-
-       srl             %g1, 16, %g2
-1:
-       st              %o4, [%o0 - 7]
-       sth             %g2, [%o0 - 3]
-       srl             %g1, 8, %g4
-       b               88f
-        stb            %g4, [%o0 - 1]
-32:
-       ld              [%o1], %g2
-       add             %o1, 4, %o1
-       srl             %g2, 16, %g3
-       and             %o0, 7, %g5
-       sth             %g3, [%o0]
-       cmp             %g5, 6
-       sll             %g2, 16, %g1
-       add             %o0, 4, %o0
-       be              42f
-        and            %o2, 0xffffffc0, %o3
-       ld              [%o0 - 6], %o4
-4:
-       SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
-       SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
-       SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
-       SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
-       subcc           %o3, 64, %o3
-       add             %o1, 64, %o1
-       bne             4b
-        add            %o0, 64, %o0
-
-       andcc           %o2, 0x30, %o3
-       be,a            1f
-        srl            %g1, 16, %g2
-4:
-       SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
-       subcc           %o3, 16, %o3
-       add             %o1, 16, %o1
-       bne             4b
-        add            %o0, 16, %o0
-
-       srl             %g1, 16, %g2
-1:
-       st              %o4, [%o0 - 6]
-       b               88f
-        sth            %g2, [%o0 - 2]
-33:
-       ld              [%o1], %g2
-       sub             %o2, 4, %o2
-       srl             %g2, 24, %g3
-       and             %o0, 7, %g5
-       stb             %g3, [%o0]
-       cmp             %g5, 5
-       srl             %g2, 8, %g4
-       sll             %g2, 24, %g1
-       sth             %g4, [%o0 + 1]
-       add             %o1, 4, %o1
-       be              43f
-        and            %o2, 0xffffffc0, %o3
-
-       ld              [%o0 - 1], %o4
-       add             %o0, 4, %o0
-4:
-       SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
-       SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
-       SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
-       SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
-       subcc           %o3, 64, %o3
-       add             %o1, 64, %o1
-       bne             4b
-        add            %o0, 64, %o0
-
-       andcc           %o2, 0x30, %o3
-       be,a            1f
-        srl            %g1, 24, %g2
-4:
-       SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
-       subcc           %o3, 16, %o3
-       add             %o1, 16, %o1
-       bne             4b
-        add            %o0, 16, %o0
-
-       srl             %g1, 24, %g2
-1:
-       st              %o4, [%o0 - 5]
-       b               88f
-        stb            %g2, [%o0 - 1]
-41:
-       SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
-       SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
-       SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
-       SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
-       subcc           %o3, 64, %o3
-       add             %o1, 64, %o1
-       bne             41b
-        add            %o0, 64, %o0
-        
-       andcc           %o2, 0x30, %o3
-       be,a            1f
-        srl            %g1, 16, %g2
-4:
-       SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
-       subcc           %o3, 16, %o3
-       add             %o1, 16, %o1
-       bne             4b
-        add            %o0, 16, %o0
-
-       srl             %g1, 16, %g2
+       ldub            [%i1], %g2
+       stb             %g2, [%i0 + 4]
 1:
-       sth             %g2, [%o0 - 3]
-       srl             %g1, 8, %g4
-       b               88f
-        stb            %g4, [%o0 - 1]
-43:
-       SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
-       SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
-       SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
-       SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
-       subcc           %o3, 64, %o3
-       add             %o1, 64, %o1
-       bne             43b
-        add            %o0, 64, %o0
-
-       andcc           %o2, 0x30, %o3
-       be,a            1f
-        srl            %g1, 24, %g2
-4:
-       SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
-       subcc           %o3, 16, %o3
-       add             %o1, 16, %o1
-       bne             4b
-        add            %o0, 16, %o0
-
-       srl             %g1, 24, %g2
-1:
-       stb             %g2, [%o0 + 3]
-       b               88f
-        add            %o0, 4, %o0
-42:
-       SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
-       SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
-       SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
-       SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
-       subcc           %o3, 64, %o3
-       add             %o1, 64, %o1
-       bne             42b
-        add            %o0, 64, %o0
-        
-       andcc           %o2, 0x30, %o3
-       be,a            1f
-        srl            %g1, 16, %g2
-4:
-       SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
-       subcc           %o3, 16, %o3
-       add             %o1, 16, %o1
-       bne             4b
-        add            %o0, 16, %o0
-
-       srl             %g1, 16, %g2
-1:
-       sth             %g2, [%o0 - 2]
-
-       /* Fall through */
-        
-#endif /* FASTER_NONALIGNED */
+       ret
+        restore        %g7, %g0, %o0
 
 88:    /* short_end */
 
@@ -1127,7 +521,7 @@ FUNC(memcpy)       /* %o0=dst %o1=src %o2=len */
        stb             %g2, [%o0]
 1:
        retl
-        RETL_INSN
+        mov            %g7, %o0
 
 90:    /* short_aligned_end */
        bne             88b
index 79836a7dd00c3ba78c446fd43a73edc9d9bd60ed..3b6e248650d4fd9e0e41729d552f236895564a93 100644 (file)
@@ -8,7 +8,6 @@ obj-$(CONFIG_SPARC64)   += ultra.o tlb.o tsb.o
 obj-y                   += fault_$(BITS).o
 obj-y                   += init_$(BITS).o
 obj-$(CONFIG_SPARC32)   += loadmmu.o
-obj-y                   += generic_$(BITS).o
 obj-$(CONFIG_SPARC32)   += extable.o btfixup.o srmmu.o iommu.o io-unit.o
 obj-$(CONFIG_SPARC32)   += hypersparc.o viking.o tsunami.o swift.o
 obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
index 5175ac2f4820c603d55e5fba0c06b735080edf96..8a7f81743c126b1b8648584e20504dce6111dd48 100644 (file)
@@ -302,8 +302,7 @@ void __init btfixup(void)
                                case 'i':       /* INT */
                                        if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
                                                set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
-                                       else if ((insn & 0x80002000) == 0x80002000 &&
-                                                (insn & 0x01800000) != 0x01800000) /* %LO */
+                                       else if ((insn & 0x80002000) == 0x80002000) /* %LO */
                                                set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
                                        else {
                                                prom_printf(insn_i, p, addr, insn);
diff --git a/arch/sparc/mm/generic_32.c b/arch/sparc/mm/generic_32.c
deleted file mode 100644 (file)
index e6067b7..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * generic.c: Generic Sparc mm routines that are not dependent upon
- *            MMU type but are Sparc specific.
- *
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/pagemap.h>
-
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-
-/* Remap IO memory, the same way as remap_pfn_range(), but use
- * the obio memory space.
- *
- * They use a pgprot that sets PAGE_IO and does not check the
- * mem_map table as this is independent of normal memory.
- */
-static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size,
-       unsigned long offset, pgprot_t prot, int space)
-{
-       unsigned long end;
-
-       address &= ~PMD_MASK;
-       end = address + size;
-       if (end > PMD_SIZE)
-               end = PMD_SIZE;
-       do {
-               set_pte_at(mm, address, pte, mk_pte_io(offset, prot, space));
-               address += PAGE_SIZE;
-               offset += PAGE_SIZE;
-               pte++;
-       } while (address < end);
-}
-
-static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
-       unsigned long offset, pgprot_t prot, int space)
-{
-       unsigned long end;
-
-       address &= ~PGDIR_MASK;
-       end = address + size;
-       if (end > PGDIR_SIZE)
-               end = PGDIR_SIZE;
-       offset -= address;
-       do {
-               pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
-               if (!pte)
-                       return -ENOMEM;
-               io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
-               address = (address + PMD_SIZE) & PMD_MASK;
-               pmd++;
-       } while (address < end);
-       return 0;
-}
-
-int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
-                      unsigned long pfn, unsigned long size, pgprot_t prot)
-{
-       int error = 0;
-       pgd_t * dir;
-       unsigned long beg = from;
-       unsigned long end = from + size;
-       struct mm_struct *mm = vma->vm_mm;
-       int space = GET_IOSPACE(pfn);
-       unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
-
-       /* See comment in mm/memory.c remap_pfn_range */
-       vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
-       vma->vm_pgoff = (offset >> PAGE_SHIFT) |
-               ((unsigned long)space << 28UL);
-
-       offset -= from;
-       dir = pgd_offset(mm, from);
-       flush_cache_range(vma, beg, end);
-
-       while (from < end) {
-               pmd_t *pmd = pmd_alloc(mm, dir, from);
-               error = -ENOMEM;
-               if (!pmd)
-                       break;
-               error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
-               if (error)
-                       break;
-               from = (from + PGDIR_SIZE) & PGDIR_MASK;
-               dir++;
-       }
-
-       flush_tlb_range(vma, beg, end);
-       return error;
-}
-EXPORT_SYMBOL(io_remap_pfn_range);
diff --git a/arch/sparc/mm/generic_64.c b/arch/sparc/mm/generic_64.c
deleted file mode 100644 (file)
index 3cb00df..0000000
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * generic.c: Generic Sparc mm routines that are not dependent upon
- *            MMU type but are Sparc specific.
- *
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/pagemap.h>
-
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/tlbflush.h>
-
-/* Remap IO memory, the same way as remap_pfn_range(), but use
- * the obio memory space.
- *
- * They use a pgprot that sets PAGE_IO and does not check the
- * mem_map table as this is independent of normal memory.
- */
-static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
-                                     unsigned long address,
-                                     unsigned long size,
-                                     unsigned long offset, pgprot_t prot,
-                                     int space)
-{
-       unsigned long end;
-
-       /* clear hack bit that was used as a write_combine side-effect flag */
-       offset &= ~0x1UL;
-       address &= ~PMD_MASK;
-       end = address + size;
-       if (end > PMD_SIZE)
-               end = PMD_SIZE;
-       do {
-               pte_t entry;
-               unsigned long curend = address + PAGE_SIZE;
-               
-               entry = mk_pte_io(offset, prot, space, PAGE_SIZE);
-               if (!(address & 0xffff)) {
-                       if (PAGE_SIZE < (4 * 1024 * 1024) &&
-                           !(address & 0x3fffff) &&
-                           !(offset & 0x3ffffe) &&
-                           end >= address + 0x400000) {
-                               entry = mk_pte_io(offset, prot, space,
-                                                 4 * 1024 * 1024);
-                               curend = address + 0x400000;
-                               offset += 0x400000;
-                       } else if (PAGE_SIZE < (512 * 1024) &&
-                                  !(address & 0x7ffff) &&
-                                  !(offset & 0x7fffe) &&
-                                  end >= address + 0x80000) {
-                               entry = mk_pte_io(offset, prot, space,
-                                                 512 * 1024 * 1024);
-                               curend = address + 0x80000;
-                               offset += 0x80000;
-                       } else if (PAGE_SIZE < (64 * 1024) &&
-                                  !(offset & 0xfffe) &&
-                                  end >= address + 0x10000) {
-                               entry = mk_pte_io(offset, prot, space,
-                                                 64 * 1024);
-                               curend = address + 0x10000;
-                               offset += 0x10000;
-                       } else
-                               offset += PAGE_SIZE;
-               } else
-                       offset += PAGE_SIZE;
-
-               if (pte_write(entry))
-                       entry = pte_mkdirty(entry);
-               do {
-                       BUG_ON(!pte_none(*pte));
-                       set_pte_at(mm, address, pte, entry);
-                       address += PAGE_SIZE;
-                       pte_val(entry) += PAGE_SIZE;
-                       pte++;
-               } while (address < curend);
-       } while (address < end);
-}
-
-static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
-       unsigned long offset, pgprot_t prot, int space)
-{
-       unsigned long end;
-
-       address &= ~PGDIR_MASK;
-       end = address + size;
-       if (end > PGDIR_SIZE)
-               end = PGDIR_SIZE;
-       offset -= address;
-       do {
-               pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
-               if (!pte)
-                       return -ENOMEM;
-               io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
-               pte_unmap(pte);
-               address = (address + PMD_SIZE) & PMD_MASK;
-               pmd++;
-       } while (address < end);
-       return 0;
-}
-
-static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
-       unsigned long offset, pgprot_t prot, int space)
-{
-       unsigned long end;
-
-       address &= ~PUD_MASK;
-       end = address + size;
-       if (end > PUD_SIZE)
-               end = PUD_SIZE;
-       offset -= address;
-       do {
-               pmd_t *pmd = pmd_alloc(mm, pud, address);
-               if (!pud)
-                       return -ENOMEM;
-               io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
-               address = (address + PUD_SIZE) & PUD_MASK;
-               pud++;
-       } while (address < end);
-       return 0;
-}
-
-int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
-               unsigned long pfn, unsigned long size, pgprot_t prot)
-{
-       int error = 0;
-       pgd_t * dir;
-       unsigned long beg = from;
-       unsigned long end = from + size;
-       struct mm_struct *mm = vma->vm_mm;
-       int space = GET_IOSPACE(pfn);
-       unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
-       unsigned long phys_base;
-
-       phys_base = offset | (((unsigned long) space) << 32UL);
-
-       /* See comment in mm/memory.c remap_pfn_range */
-       vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
-       vma->vm_pgoff = phys_base >> PAGE_SHIFT;
-
-       offset -= from;
-       dir = pgd_offset(mm, from);
-       flush_cache_range(vma, beg, end);
-
-       while (from < end) {
-               pud_t *pud = pud_alloc(mm, dir, from);
-               error = -ENOMEM;
-               if (!pud)
-                       break;
-               error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
-               if (error)
-                       break;
-               from = (from + PGDIR_SIZE) & PGDIR_MASK;
-               dir++;
-       }
-
-       flush_tlb_range(vma, beg, end);
-       return error;
-}
-EXPORT_SYMBOL(io_remap_pfn_range);
index b57a5942ba64f13f079898219f5e7479782b4b84..874162a11ceb824eb9e1af311c81be052a934927 100644 (file)
@@ -495,11 +495,11 @@ xcall_fetch_glob_regs:
        stx             %o7, [%g1 + GR_SNAP_O7]
        stx             %i7, [%g1 + GR_SNAP_I7]
        /* Don't try this at home kids... */
-       rdpr            %cwp, %g2
-       sub             %g2, 1, %g7
+       rdpr            %cwp, %g3
+       sub             %g3, 1, %g7
        wrpr            %g7, %cwp
        mov             %i7, %g7
-       wrpr            %g2, %cwp
+       wrpr            %g3, %cwp
        stx             %g7, [%g1 + GR_SNAP_RPC]
        sethi           %hi(trap_block), %g7
        or              %g7, %lo(trap_block), %g7
index 0249b8b4db545bdb26334d05d3e105a387b6f1d0..532a2a42ab7eb8a731e275e3437f9a03700f814e 100644 (file)
@@ -11,6 +11,7 @@ config TILE
        select GENERIC_IRQ_PROBE
        select GENERIC_PENDING_IRQ if SMP
        select GENERIC_IRQ_SHOW
+       select HAVE_SYSCALL_WRAPPERS if TILEGX
        select SYS_HYPERVISOR
 
 # FIXME: investigate whether we need/want these options.
index 16f1fa51fea13de4139c5a18c2d10088b8e395c1..bd186c4eaa505947299f0a1d98de1dbe9b9102f6 100644 (file)
@@ -77,6 +77,11 @@ static inline int ffs(int x)
        return __builtin_ffs(x);
 }
 
+static inline int fls64(__u64 w)
+{
+       return (sizeof(__u64) * 8) - __builtin_clzll(w);
+}
+
 /**
  * fls - find last set bit in word
  * @x: the word to search
@@ -90,12 +95,7 @@ static inline int ffs(int x)
  */
 static inline int fls(int x)
 {
-       return (sizeof(int) * 8) - __builtin_clz(x);
-}
-
-static inline int fls64(__u64 w)
-{
-       return (sizeof(__u64) * 8) - __builtin_clzll(w);
+       return fls64((unsigned int) x);
 }
 
 static inline unsigned int __arch_hweight32(unsigned int w)
index a7869ad627760e1315ca1fd3096233910ab56df3..41459d80b6bf1d88634a310dc73e76ec105f8023 100644 (file)
@@ -406,19 +406,17 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
         * Set up registers for signal handler.
         * Registers that we don't modify keep the value they had from
         * user-space at the time we took the signal.
+        * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
+        * since some things rely on this (e.g. glibc's debug/segfault.c).
         */
        regs->pc = ptr_to_compat_reg(ka->sa.sa_handler);
        regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
        regs->sp = ptr_to_compat_reg(frame);
        regs->lr = restorer;
        regs->regs[0] = (unsigned long) usig;
-
-       if (ka->sa.sa_flags & SA_SIGINFO) {
-               /* Need extra arguments, so mark to restore caller-saves. */
-               regs->regs[1] = ptr_to_compat_reg(&frame->info);
-               regs->regs[2] = ptr_to_compat_reg(&frame->uc);
-               regs->flags |= PT_FLAGS_CALLER_SAVES;
-       }
+       regs->regs[1] = ptr_to_compat_reg(&frame->info);
+       regs->regs[2] = ptr_to_compat_reg(&frame->uc);
+       regs->flags |= PT_FLAGS_CALLER_SAVES;
 
        /*
         * Notify any tracer that was single-stepping it.
index 620f5b70957d55c4001a31fc9b9682a2e4feb687..0491e40d6968d9916f4dfaef93bd28d7c694145d 100644 (file)
@@ -513,8 +513,37 @@ __uml_exitcall(kill_io_thread);
 static inline int ubd_file_size(struct ubd *ubd_dev, __u64 *size_out)
 {
        char *file;
+       int fd;
+       int err;
+
+       __u32 version;
+       __u32 align;
+       char *backing_file;
+       time_t mtime;
+       unsigned long long size;
+       int sector_size;
+       int bitmap_offset;
+
+       if (ubd_dev->file && ubd_dev->cow.file) {
+               file = ubd_dev->cow.file;
+
+               goto out;
+       }
 
-       file = ubd_dev->cow.file ? ubd_dev->cow.file : ubd_dev->file;
+       fd = os_open_file(ubd_dev->file, global_openflags, 0);
+       if (fd < 0)
+               return fd;
+
+       err = read_cow_header(file_reader, &fd, &version, &backing_file, \
+               &mtime, &size, &sector_size, &align, &bitmap_offset);
+       os_close_file(fd);
+
+       if(err == -EINVAL)
+               file = ubd_dev->file;
+       else
+               file = backing_file;
+
+out:
        return os_file_size(file, size_out);
 }
 
index 41474fb5eee70d3f89474ae13c59cbaef10b0f37..aa365c55ecf94b685bff6d33f96e33244247cf9f 100644 (file)
@@ -271,6 +271,12 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval)
 }
 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 
+#define __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+       return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
+}
+
 /*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
@@ -346,11 +352,11 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
 #define update_mmu_cache(vma,address,ptep) do ; while (0)
 
 /* Encode and de-code a swap entry */
-#define __swp_type(x)                  (((x).val >> 4) & 0x3f)
+#define __swp_type(x)                  (((x).val >> 5) & 0x1f)
 #define __swp_offset(x)                        ((x).val >> 11)
 
 #define __swp_entry(type, offset) \
-       ((swp_entry_t) { ((type) << 4) | ((offset) << 11) })
+       ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
 #define __pte_to_swp_entry(pte) \
        ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
 #define __swp_entry_to_pte(x)          ((pte_t) { (x).val })
index be6d9e365a800a8569fb4915fccbfc50b0793a09..3470624d7835fa646b7c9f8dafa6acd3d9ed5f2f 100644 (file)
@@ -2460,10 +2460,12 @@ ENTRY(aesni_cbc_dec)
        pxor IN3, STATE4
        movaps IN4, IV
 #else
-       pxor (INP), STATE2
-       pxor 0x10(INP), STATE3
        pxor IN1, STATE4
        movaps IN2, IV
+       movups (INP), IN1
+       pxor IN1, STATE2
+       movups 0x10(INP), IN2
+       pxor IN2, STATE3
 #endif
        movups STATE1, (OUTP)
        movups STATE2, 0x10(OUTP)
index 67f87f25761169a54e99795cecab98fed034aca7..78a1eff74223a6fe232978ce5caf30d899ae98e5 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _ASM_X86_AMD_NB_H
 #define _ASM_X86_AMD_NB_H
 
+#include <linux/ioport.h>
 #include <linux/pci.h>
 
 struct amd_nb_bus_dev_range {
@@ -13,6 +14,7 @@ extern const struct pci_device_id amd_nb_misc_ids[];
 extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
 
 extern bool early_is_amd_nb(u32 value);
+extern struct resource *amd_get_mmconfig_range(struct resource *res);
 extern int amd_cache_northbridges(void);
 extern void amd_flush_garts(void);
 extern int amd_numa_init(void);
index 4a0b7c7e2cce5c430a95f6f972c7e810c379170f..244ac77eee8dd502576f65b2ebdd3401634ec820 100644 (file)
@@ -495,7 +495,7 @@ static inline void default_wait_for_init_deassert(atomic_t *deassert)
        return;
 }
 
-extern struct apic *generic_bigsmp_probe(void);
+extern void generic_bigsmp_probe(void);
 
 
 #ifdef CONFIG_X86_LOCAL_APIC
index c9e09ea05644fdea45227388d87e04be3db0390f..a850b4d8d14d9a913a5de034882a9d5c8542163c 100644 (file)
@@ -29,8 +29,8 @@ extern unsigned int sig_xstate_size;
 extern void fpu_init(void);
 extern void mxcsr_feature_mask_init(void);
 extern int init_fpu(struct task_struct *child);
-extern asmlinkage void math_state_restore(void);
-extern void __math_state_restore(void);
+extern void __math_state_restore(struct task_struct *);
+extern void math_state_restore(void);
 extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
 
 extern user_regset_active_fn fpregs_active, xfpregs_active;
@@ -212,19 +212,11 @@ static inline void fpu_fxsave(struct fpu *fpu)
 
 #endif /* CONFIG_X86_64 */
 
-/* We need a safe address that is cheap to find and that is already
-   in L1 during context switch. The best choices are unfortunately
-   different for UP and SMP */
-#ifdef CONFIG_SMP
-#define safe_address (__per_cpu_offset[0])
-#else
-#define safe_address (kstat_cpu(0).cpustat.user)
-#endif
-
 /*
- * These must be called with preempt disabled
+ * These must be called with preempt disabled. Returns
+ * 'true' if the FPU state is still intact.
  */
-static inline void fpu_save_init(struct fpu *fpu)
+static inline int fpu_save_init(struct fpu *fpu)
 {
        if (use_xsave()) {
                fpu_xsave(fpu);
@@ -233,33 +225,33 @@ static inline void fpu_save_init(struct fpu *fpu)
                 * xsave header may indicate the init state of the FP.
                 */
                if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
-                       return;
+                       return 1;
        } else if (use_fxsr()) {
                fpu_fxsave(fpu);
        } else {
                asm volatile("fnsave %[fx]; fwait"
                             : [fx] "=m" (fpu->state->fsave));
-               return;
+               return 0;
        }
 
-       if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES))
+       /*
+        * If exceptions are pending, we need to clear them so
+        * that we don't randomly get exceptions later.
+        *
+        * FIXME! Is this perhaps only true for the old-style
+        * irq13 case? Maybe we could leave the x87 state
+        * intact otherwise?
+        */
+       if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
                asm volatile("fnclex");
-
-       /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
-          is pending.  Clear the x87 state here by setting it to fixed
-          values. safe_address is a random variable that should be in L1 */
-       alternative_input(
-               ASM_NOP8 ASM_NOP2,
-               "emms\n\t"              /* clear stack tags */
-               "fildl %P[addr]",       /* set F?P to defined value */
-               X86_FEATURE_FXSAVE_LEAK,
-               [addr] "m" (safe_address));
+               return 0;
+       }
+       return 1;
 }
 
-static inline void __save_init_fpu(struct task_struct *tsk)
+static inline int __save_init_fpu(struct task_struct *tsk)
 {
-       fpu_save_init(&tsk->thread.fpu);
-       task_thread_info(tsk)->status &= ~TS_USEDFPU;
+       return fpu_save_init(&tsk->thread.fpu);
 }
 
 static inline int fpu_fxrstor_checking(struct fpu *fpu)
@@ -281,39 +273,185 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
 }
 
 /*
- * Signal frame handlers...
+ * Software FPU state helpers. Careful: these need to
+ * be preemption protection *and* they need to be
+ * properly paired with the CR0.TS changes!
  */
-extern int save_i387_xstate(void __user *buf);
-extern int restore_i387_xstate(void __user *buf);
+static inline int __thread_has_fpu(struct task_struct *tsk)
+{
+       return tsk->thread.has_fpu;
+}
 
-static inline void __unlazy_fpu(struct task_struct *tsk)
+/* Must be paired with an 'stts' after! */
+static inline void __thread_clear_has_fpu(struct task_struct *tsk)
 {
-       if (task_thread_info(tsk)->status & TS_USEDFPU) {
-               __save_init_fpu(tsk);
-               stts();
-       } else
-               tsk->fpu_counter = 0;
+       tsk->thread.has_fpu = 0;
+}
+
+/* Must be paired with a 'clts' before! */
+static inline void __thread_set_has_fpu(struct task_struct *tsk)
+{
+       tsk->thread.has_fpu = 1;
 }
 
+/*
+ * Encapsulate the CR0.TS handling together with the
+ * software flag.
+ *
+ * These generally need preemption protection to work,
+ * do try to avoid using these on their own.
+ */
+static inline void __thread_fpu_end(struct task_struct *tsk)
+{
+       __thread_clear_has_fpu(tsk);
+       stts();
+}
+
+static inline void __thread_fpu_begin(struct task_struct *tsk)
+{
+       clts();
+       __thread_set_has_fpu(tsk);
+}
+
+/*
+ * FPU state switching for scheduling.
+ *
+ * This is a two-stage process:
+ *
+ *  - switch_fpu_prepare() saves the old state and
+ *    sets the new state of the CR0.TS bit. This is
+ *    done within the context of the old process.
+ *
+ *  - switch_fpu_finish() restores the new state as
+ *    necessary.
+ */
+typedef struct { int preload; } fpu_switch_t;
+
+/*
+ * FIXME! We could do a totally lazy restore, but we need to
+ * add a per-cpu "this was the task that last touched the FPU
+ * on this CPU" variable, and the task needs to have a "I last
+ * touched the FPU on this CPU" and check them.
+ *
+ * We don't do that yet, so "fpu_lazy_restore()" always returns
+ * false, but some day..
+ */
+#define fpu_lazy_restore(tsk) (0)
+#define fpu_lazy_state_intact(tsk) do { } while (0)
+
+static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new)
+{
+       fpu_switch_t fpu;
+
+       fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
+       if (__thread_has_fpu(old)) {
+               if (__save_init_fpu(old))
+                       fpu_lazy_state_intact(old);
+               __thread_clear_has_fpu(old);
+               old->fpu_counter++;
+
+               /* Don't change CR0.TS if we just switch! */
+               if (fpu.preload) {
+                       __thread_set_has_fpu(new);
+                       prefetch(new->thread.fpu.state);
+               } else
+                       stts();
+       } else {
+               old->fpu_counter = 0;
+               if (fpu.preload) {
+                       if (fpu_lazy_restore(new))
+                               fpu.preload = 0;
+                       else
+                               prefetch(new->thread.fpu.state);
+                       __thread_fpu_begin(new);
+               }
+       }
+       return fpu;
+}
+
+/*
+ * By the time this gets called, we've already cleared CR0.TS and
+ * given the process the FPU if we are going to preload the FPU
+ * state - all we need to do is to conditionally restore the register
+ * state itself.
+ */
+static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
+{
+       if (fpu.preload)
+               __math_state_restore(new);
+}
+
+/*
+ * Signal frame handlers...
+ */
+extern int save_i387_xstate(void __user *buf);
+extern int restore_i387_xstate(void __user *buf);
+
 static inline void __clear_fpu(struct task_struct *tsk)
 {
-       if (task_thread_info(tsk)->status & TS_USEDFPU) {
+       if (__thread_has_fpu(tsk)) {
                /* Ignore delayed exceptions from user space */
                asm volatile("1: fwait\n"
                             "2:\n"
                             _ASM_EXTABLE(1b, 2b));
-               task_thread_info(tsk)->status &= ~TS_USEDFPU;
-               stts();
+               __thread_fpu_end(tsk);
        }
 }
 
+/*
+ * Were we in an interrupt that interrupted kernel mode?
+ *
+ * We can do a kernel_fpu_begin/end() pair *ONLY* if that
+ * pair does nothing at all: the thread must not have fpu (so
+ * that we don't try to save the FPU state), and TS must
+ * be set (so that the clts/stts pair does nothing that is
+ * visible in the interrupted kernel thread).
+ */
+static inline bool interrupted_kernel_fpu_idle(void)
+{
+       return !__thread_has_fpu(current) &&
+               (read_cr0() & X86_CR0_TS);
+}
+
+/*
+ * Were we in user mode (or vm86 mode) when we were
+ * interrupted?
+ *
+ * Doing kernel_fpu_begin/end() is ok if we are running
+ * in an interrupt context from user mode - we'll just
+ * save the FPU state as required.
+ */
+static inline bool interrupted_user_mode(void)
+{
+       struct pt_regs *regs = get_irq_regs();
+       return regs && user_mode_vm(regs);
+}
+
+/*
+ * Can we use the FPU in kernel mode with the
+ * whole "kernel_fpu_begin/end()" sequence?
+ *
+ * It's always ok in process context (ie "not interrupt")
+ * but it is sometimes ok even from an irq.
+ */
+static inline bool irq_fpu_usable(void)
+{
+       return !in_interrupt() ||
+               interrupted_user_mode() ||
+               interrupted_kernel_fpu_idle();
+}
+
 static inline void kernel_fpu_begin(void)
 {
-       struct thread_info *me = current_thread_info();
+       struct task_struct *me = current;
+
+       WARN_ON_ONCE(!irq_fpu_usable());
        preempt_disable();
-       if (me->status & TS_USEDFPU)
-               __save_init_fpu(me->task);
-       else
+       if (__thread_has_fpu(me)) {
+               __save_init_fpu(me);
+               __thread_clear_has_fpu(me);
+               /* We do 'stts()' in kernel_fpu_end() */
+       } else
                clts();
 }
 
@@ -323,14 +461,6 @@ static inline void kernel_fpu_end(void)
        preempt_enable();
 }
 
-static inline bool irq_fpu_usable(void)
-{
-       struct pt_regs *regs;
-
-       return !in_interrupt() || !(regs = get_irq_regs()) || \
-               user_mode(regs) || (read_cr0() & X86_CR0_TS);
-}
-
 /*
  * Some instructions like VIA's padlock instructions generate a spurious
  * DNA fault but don't modify SSE registers. And these instructions
@@ -362,21 +492,65 @@ static inline void irq_ts_restore(int TS_state)
                stts();
 }
 
+/*
+ * The question "does this thread have fpu access?"
+ * is slightly racy, since preemption could come in
+ * and revoke it immediately after the test.
+ *
+ * However, even in that very unlikely scenario,
+ * we can just assume we have FPU access - typically
+ * to save the FP state - we'll just take a #NM
+ * fault and get the FPU access back.
+ *
+ * The actual user_fpu_begin/end() functions
+ * need to be preemption-safe, though.
+ *
+ * NOTE! user_fpu_end() must be used only after you
+ * have saved the FP state, and user_fpu_begin() must
+ * be used only immediately before restoring it.
+ * These functions do not do any save/restore on
+ * their own.
+ */
+static inline int user_has_fpu(void)
+{
+       return __thread_has_fpu(current);
+}
+
+static inline void user_fpu_end(void)
+{
+       preempt_disable();
+       __thread_fpu_end(current);
+       preempt_enable();
+}
+
+static inline void user_fpu_begin(void)
+{
+       preempt_disable();
+       if (!user_has_fpu())
+               __thread_fpu_begin(current);
+       preempt_enable();
+}
+
 /*
  * These disable preemption on their own and are safe
  */
 static inline void save_init_fpu(struct task_struct *tsk)
 {
+       WARN_ON_ONCE(!__thread_has_fpu(tsk));
        preempt_disable();
        __save_init_fpu(tsk);
-       stts();
+       __thread_fpu_end(tsk);
        preempt_enable();
 }
 
 static inline void unlazy_fpu(struct task_struct *tsk)
 {
        preempt_disable();
-       __unlazy_fpu(tsk);
+       if (__thread_has_fpu(tsk)) {
+               __save_init_fpu(tsk);
+               __thread_fpu_end(tsk);
+       } else
+               tsk->fpu_counter = 0;
        preempt_enable();
 }
 
index 0049211959c08a2c1d12844d9a64181f6b3a3cc7..0ab6a4dcb911b808eabfb74be75a7434491b09bd 100644 (file)
@@ -189,6 +189,9 @@ struct x86_emulate_ops {
        int (*intercept)(struct x86_emulate_ctxt *ctxt,
                         struct x86_instruction_info *info,
                         enum x86_intercept_stage stage);
+
+       bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
+                        u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
 };
 
 typedef u32 __attribute__((vector_size(16))) sse128_t;
@@ -298,6 +301,19 @@ struct x86_emulate_ctxt {
 #define X86EMUL_MODE_PROT     (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \
                               X86EMUL_MODE_PROT64)
 
+/* CPUID vendors */
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65
+
+#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx 0x69444d41
+#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574
+#define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273
+
+#define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547
+#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e
+#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69
+
 enum x86_intercept_stage {
        X86_ICTP_NONE = 0,   /* Allow zero-init to not match anything */
        X86_ICPT_PRE_EXCEPT,
index effff47a3c8280fe4d0b5979c433a8400d570129..43876f16caf1ca8981288089d57b362e017bafbf 100644 (file)
@@ -31,6 +31,56 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
        ptep->pte_low = pte.pte_low;
 }
 
+#define pmd_read_atomic pmd_read_atomic
+/*
+ * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
+ * a "*pmdp" dereference done by gcc. Problem is, in certain places
+ * where pte_offset_map_lock is called, concurrent page faults are
+ * allowed, if the mmap_sem is hold for reading. An example is mincore
+ * vs page faults vs MADV_DONTNEED. On the page fault side
+ * pmd_populate rightfully does a set_64bit, but if we're reading the
+ * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
+ * because gcc will not read the 64bit of the pmd atomically. To fix
+ * this all places running pmd_offset_map_lock() while holding the
+ * mmap_sem in read mode, shall read the pmdp pointer using this
+ * function to know if the pmd is null nor not, and in turn to know if
+ * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
+ * operations.
+ *
+ * Without THP if the mmap_sem is hold for reading, the
+ * pmd can only transition from null to not null while pmd_read_atomic runs.
+ * So there's no need of literally reading it atomically.
+ *
+ * With THP if the mmap_sem is hold for reading, the pmd can become
+ * THP or null or point to a pte (and in turn become "stable") at any
+ * time under pmd_read_atomic, so it's mandatory to read it atomically
+ * with cmpxchg8b.
+ */
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+       pmdval_t ret;
+       u32 *tmp = (u32 *)pmdp;
+
+       ret = (pmdval_t) (*tmp);
+       if (ret) {
+               /*
+                * If the low part is null, we must not read the high part
+                * or we can end up with a partial pmd.
+                */
+               smp_rmb();
+               ret |= ((pmdval_t)*(tmp + 1)) << 32;
+       }
+
+       return (pmd_t) { ret };
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+       return (pmd_t) { atomic64_read((atomic64_t *)pmdp) };
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
 {
        set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
index 219371546afd343ddeb39a1f3018ce0e5c6cfc31..5d9c61d0b2709da8b81c3ff3e3e721df7ceff55e 100644 (file)
@@ -454,6 +454,7 @@ struct thread_struct {
        unsigned long           trap_no;
        unsigned long           error_code;
        /* floating point and extended processor state */
+       unsigned long           has_fpu;
        struct fpu              fpu;
 #ifdef CONFIG_X86_32
        /* Virtual 86 mode info */
index 1f2e61e28981b7a32d8f9f6f409475460a2ca73d..278d3d5f9062b52487c8e8f6baf1f68ea137981f 100644 (file)
@@ -242,8 +242,6 @@ static inline struct thread_info *current_thread_info(void)
  * ever touches our thread-synchronous status, so we don't
  * have to worry about atomic accesses.
  */
-#define TS_USEDFPU             0x0001  /* FPU was used by this task
-                                          this quantum (SMP) */
 #define TS_COMPAT              0x0002  /* 32bit syscall active (64BIT)*/
 #define TS_POLLING             0x0004  /* idle task polling need_resched,
                                           skip sending interrupt */
index fa7b9176b76cb33820034403fd8f4a50dc49709c..34baa0eb5d0c972109fba546e4402508f7cda6fa 100644 (file)
@@ -32,6 +32,22 @@ extern int no_timer_check;
  *  (mathieu.desnoyers@polymtl.ca)
  *
  *                     -johnstul@us.ibm.com "math is hard, lets go shopping!"
+ *
+ * In:
+ *
+ * ns = cycles * cyc2ns_scale / SC
+ *
+ * Although we may still have enough bits to store the value of ns,
+ * in some cases, we may not have enough bits to store cycles * cyc2ns_scale,
+ * leading to an incorrect result.
+ *
+ * To avoid this, we can decompose 'cycles' into quotient and remainder
+ * of division by SC.  Then,
+ *
+ * ns = (quot * SC + rem) * cyc2ns_scale / SC
+ *    = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC
+ *
+ *                     - sqazi@google.com
  */
 
 DECLARE_PER_CPU(unsigned long, cyc2ns);
@@ -43,7 +59,8 @@ static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
 {
        int cpu = smp_processor_id();
        unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
-       ns += cyc * per_cpu(cyc2ns, cpu) >> CYC2NS_SCALE_FACTOR;
+       ns += mult_frac(cyc, per_cpu(cyc2ns, cpu),
+                       (1UL << CYC2NS_SCALE_FACTOR));
        return ns;
 }
 
index a291c40efd4360ec47150d7b9874d5965b97ad7f..5d62d651a6286da80cc5b5ec3d64f7f709ee56df 100644 (file)
@@ -55,6 +55,7 @@
 #define UV_BAU_TUNABLES_DIR            "sgi_uv"
 #define UV_BAU_TUNABLES_FILE           "bau_tunables"
 #define WHITESPACE                     " \t\n"
+#define uv_mmask                       ((1UL << uv_hub_info->m_val) - 1)
 #define uv_physnodeaddr(x)             ((__pa((unsigned long)(x)) & uv_mmask))
 #define cpubit_isset(cpu, bau_local_cpumask) \
        test_bit((cpu), (bau_local_cpumask).bits)
index f26544a15214e41f418b0554c52581ce23f9aacb..21f7385badb8f9eb4249aef142487e27b264b32d 100644 (file)
  *     PNODE   - the low N bits of the GNODE. The PNODE is the most useful variant
  *               of the nasid for socket usage.
  *
+ *     GPA     - (global physical address) a socket physical address converted
+ *               so that it can be used by the GRU as a global address. Socket
+ *               physical addresses 1) need additional NASID (node) bits added
+ *               to the high end of the address, and 2) unaliased if the
+ *               partition does not have a physical address 0. In addition, on
+ *               UV2 rev 1, GPAs need the gnode left shifted to bits 39 or 40.
+ *
  *
  *  NumaLink Global Physical Address Format:
  *  +--------------------------------+---------------------+
@@ -141,6 +148,8 @@ struct uv_hub_info_s {
        unsigned int            gnode_extra;
        unsigned char           hub_revision;
        unsigned char           apic_pnode_shift;
+       unsigned char           m_shift;
+       unsigned char           n_lshift;
        unsigned long           gnode_upper;
        unsigned long           lowmem_remap_top;
        unsigned long           lowmem_remap_base;
@@ -177,6 +186,16 @@ static inline int is_uv2_hub(void)
        return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE;
 }
 
+static inline int is_uv2_1_hub(void)
+{
+       return uv_hub_info->hub_revision == UV2_HUB_REVISION_BASE;
+}
+
+static inline int is_uv2_2_hub(void)
+{
+       return uv_hub_info->hub_revision == UV2_HUB_REVISION_BASE + 1;
+}
+
 union uvh_apicid {
     unsigned long       v;
     struct uvh_apicid_s {
@@ -276,7 +295,10 @@ static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
 {
        if (paddr < uv_hub_info->lowmem_remap_top)
                paddr |= uv_hub_info->lowmem_remap_base;
-       return paddr | uv_hub_info->gnode_upper;
+       paddr |= uv_hub_info->gnode_upper;
+       paddr = ((paddr << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
+               ((paddr >> uv_hub_info->m_val) << uv_hub_info->n_lshift);
+       return paddr;
 }
 
 
@@ -296,20 +318,23 @@ uv_gpa_in_mmr_space(unsigned long gpa)
 /* UV global physical address --> socket phys RAM */
 static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
 {
-       unsigned long paddr = gpa & uv_hub_info->gpa_mask;
+       unsigned long paddr;
        unsigned long remap_base = uv_hub_info->lowmem_remap_base;
        unsigned long remap_top =  uv_hub_info->lowmem_remap_top;
 
+       gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
+               ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val);
+       paddr = gpa & uv_hub_info->gpa_mask;
        if (paddr >= remap_base && paddr < remap_base + remap_top)
                paddr -= remap_base;
        return paddr;
 }
 
 
-/* gnode -> pnode */
+/* gpa -> pnode */
 static inline unsigned long uv_gpa_to_gnode(unsigned long gpa)
 {
-       return gpa >> uv_hub_info->m_val;
+       return gpa >> uv_hub_info->n_lshift;
 }
 
 /* gpa -> pnode */
@@ -320,6 +345,12 @@ static inline int uv_gpa_to_pnode(unsigned long gpa)
        return uv_gpa_to_gnode(gpa) & n_mask;
 }
 
+/* gpa -> node offset*/
+static inline unsigned long uv_gpa_to_offset(unsigned long gpa)
+{
+       return (gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift;
+}
+
 /* pnode, offset --> socket virtual */
 static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
 {
index d3d9d50d93a549e7a2fe0981c3dcaf0a03d185da..bfd75ff5d5bda6083651eaa23cfc86e8f3a843de 100644 (file)
@@ -1203,7 +1203,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
                if (!pte || !IOMMU_PTE_PRESENT(*pte))
                        continue;
 
-               dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
+               dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1);
        }
 
        update_domain(&dma_dom->domain);
index bfc8453bd98dfae4bd62620e1ebd20a9151b9ea3..33df6e82f653d74db2496d783c9fd78b067c9380 100644 (file)
@@ -1031,8 +1031,9 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
 {
        int r;
 
-       if (pci_enable_msi(iommu->dev))
-               return 1;
+       r = pci_enable_msi(iommu->dev);
+       if (r)
+               return r;
 
        r = request_threaded_irq(iommu->dev->irq,
                                 amd_iommu_int_handler,
@@ -1042,24 +1043,33 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
 
        if (r) {
                pci_disable_msi(iommu->dev);
-               return 1;
+               return r;
        }
 
        iommu->int_enabled = true;
-       iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
 
        return 0;
 }
 
 static int iommu_init_msi(struct amd_iommu *iommu)
 {
+       int ret;
+
        if (iommu->int_enabled)
-               return 0;
+               goto enable_faults;
 
        if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
-               return iommu_setup_msi(iommu);
+               ret = iommu_setup_msi(iommu);
+       else
+               ret = -ENODEV;
 
-       return 1;
+       if (ret)
+               return ret;
+
+enable_faults:
+       iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
+
+       return 0;
 }
 
 /****************************************************************************
index 4c39baa8facc25529c7018e53b38de43eab823d9..bae1efe6d515e2c7fc79f1ab10c60a8489afc11f 100644 (file)
@@ -119,6 +119,37 @@ bool __init early_is_amd_nb(u32 device)
        return false;
 }
 
+struct resource *amd_get_mmconfig_range(struct resource *res)
+{
+       u32 address;
+       u64 base, msr;
+       unsigned segn_busn_bits;
+
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+               return NULL;
+
+       /* assume all cpus from fam10h have mmconfig */
+        if (boot_cpu_data.x86 < 0x10)
+               return NULL;
+
+       address = MSR_FAM10H_MMIO_CONF_BASE;
+       rdmsrl(address, msr);
+
+       /* mmconfig is not enabled */
+       if (!(msr & FAM10H_MMIO_CONF_ENABLE))
+               return NULL;
+
+       base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
+
+       segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
+                        FAM10H_MMIO_CONF_BUSRANGE_MASK;
+
+       res->flags = IORESOURCE_MEM;
+       res->start = base;
+       res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
+       return res;
+}
+
 int amd_get_subcaches(int cpu)
 {
        struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
index b9338b8cf420ca94e37c7da64dc1fa1910f19f91..147169569274b702df88b998e3f1059ee759384b 100644 (file)
@@ -1558,9 +1558,11 @@ static int __init apic_verify(void)
        mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
 
        /* The BIOS may have set up the APIC at some other address */
-       rdmsr(MSR_IA32_APICBASE, l, h);
-       if (l & MSR_IA32_APICBASE_ENABLE)
-               mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
+       if (boot_cpu_data.x86 >= 6) {
+               rdmsr(MSR_IA32_APICBASE, l, h);
+               if (l & MSR_IA32_APICBASE_ENABLE)
+                       mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
+       }
 
        pr_info("Found and enabled local APIC!\n");
        return 0;
@@ -1578,13 +1580,15 @@ int __init apic_force_enable(unsigned long addr)
         * MSR. This can only be done in software for Intel P6 or later
         * and AMD K7 (Model > 1) or later.
         */
-       rdmsr(MSR_IA32_APICBASE, l, h);
-       if (!(l & MSR_IA32_APICBASE_ENABLE)) {
-               pr_info("Local APIC disabled by BIOS -- reenabling.\n");
-               l &= ~MSR_IA32_APICBASE_BASE;
-               l |= MSR_IA32_APICBASE_ENABLE | addr;
-               wrmsr(MSR_IA32_APICBASE, l, h);
-               enabled_via_apicbase = 1;
+       if (boot_cpu_data.x86 >= 6) {
+               rdmsr(MSR_IA32_APICBASE, l, h);
+               if (!(l & MSR_IA32_APICBASE_ENABLE)) {
+                       pr_info("Local APIC disabled by BIOS -- reenabling.\n");
+                       l &= ~MSR_IA32_APICBASE_BASE;
+                       l |= MSR_IA32_APICBASE_ENABLE | addr;
+                       wrmsr(MSR_IA32_APICBASE, l, h);
+                       enabled_via_apicbase = 1;
+               }
        }
        return apic_verify();
 }
@@ -2112,10 +2116,12 @@ static void lapic_resume(void)
                 * FIXME! This will be wrong if we ever support suspend on
                 * SMP! We'll need to do this as part of the CPU restore!
                 */
-               rdmsr(MSR_IA32_APICBASE, l, h);
-               l &= ~MSR_IA32_APICBASE_BASE;
-               l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
-               wrmsr(MSR_IA32_APICBASE, l, h);
+               if (boot_cpu_data.x86 >= 6) {
+                       rdmsr(MSR_IA32_APICBASE, l, h);
+                       l &= ~MSR_IA32_APICBASE_BASE;
+                       l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
+                       wrmsr(MSR_IA32_APICBASE, l, h);
+               }
        }
 
        maxlvt = lapic_get_maxlvt();
index efd737e827f45b72c86c34643700e89caab6e7b6..521bead01137f2921fc53473e7bdcebcd31ce61e 100644 (file)
@@ -255,12 +255,24 @@ static struct apic apic_bigsmp = {
        .x86_32_early_logical_apicid    = bigsmp_early_logical_apicid,
 };
 
-struct apic * __init generic_bigsmp_probe(void)
+void __init generic_bigsmp_probe(void)
 {
-       if (probe_bigsmp())
-               return &apic_bigsmp;
+       unsigned int cpu;
 
-       return NULL;
+       if (!probe_bigsmp())
+               return;
+
+       apic = &apic_bigsmp;
+
+       for_each_possible_cpu(cpu) {
+               if (early_per_cpu(x86_cpu_to_logical_apicid,
+                                 cpu) == BAD_APICID)
+                       continue;
+               early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
+                       bigsmp_early_logical_apicid(cpu);
+       }
+
+       pr_info("Overriding APIC driver with %s\n", apic_bigsmp.name);
 }
 
 apic_driver(apic_bigsmp);
index b5254ad044abdacd5d96a5d97eb2f87a0b8f0981..0787bb3412f4fe98d55479c813c792705043dbe2 100644 (file)
@@ -200,14 +200,8 @@ void __init default_setup_apic_routing(void)
         * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
         */
 
-       if (!cmdline_apic && apic == &apic_default) {
-               struct apic *bigsmp = generic_bigsmp_probe();
-               if (bigsmp) {
-                       apic = bigsmp;
-                       printk(KERN_INFO "Overriding APIC driver with %s\n",
-                              apic->name);
-               }
-       }
+       if (!cmdline_apic && apic == &apic_default)
+               generic_bigsmp_probe();
 #endif
 
        if (apic->setup_apic_routing)
index 34b18594e72467212f0e430c329e9fa523f3aa09..874c20877140dbe081170762c366c805f74b5fb4 100644 (file)
@@ -779,7 +779,12 @@ void __init uv_system_init(void)
        for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
                uv_possible_blades +=
                  hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
-       printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
+
+       /* uv_num_possible_blades() is really the hub count */
+       printk(KERN_INFO "UV: Found %d blades, %d hubs\n",
+                       is_uv1_hub() ? uv_num_possible_blades() :
+                       (uv_num_possible_blades() + 1) / 2,
+                       uv_num_possible_blades());
 
        bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
        uv_blade_info = kzalloc(bytes, GFP_KERNEL);
@@ -832,6 +837,10 @@ void __init uv_system_init(void)
                uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift;
                uv_cpu_hub_info(cpu)->hub_revision = uv_hub_info->hub_revision;
 
+               uv_cpu_hub_info(cpu)->m_shift = 64 - m_val;
+               uv_cpu_hub_info(cpu)->n_lshift = is_uv2_1_hub() ?
+                               (m_val == 40 ? 40 : 39) : m_val;
+
                pnode = uv_apicid_to_pnode(apicid);
                blade = boot_pnode_to_blade(pnode);
                lcpu = uv_blade_info[blade].nr_possible_cpus;
@@ -862,8 +871,7 @@ void __init uv_system_init(void)
                if (uv_node_to_blade[nid] >= 0)
                        continue;
                paddr = node_start_pfn(nid) << PAGE_SHIFT;
-               paddr = uv_soc_phys_ram_to_gpa(paddr);
-               pnode = (paddr >> m_val) & pnode_mask;
+               pnode = uv_gpa_to_pnode(uv_soc_phys_ram_to_gpa(paddr));
                blade = boot_pnode_to_blade(pnode);
                uv_node_to_blade[nid] = blade;
        }
index c105c533ed94575e54b047ab131ff4a9170282b8..fde44284cf21447230d3863fbb838ddea3d102a0 100644 (file)
@@ -330,8 +330,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
        l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
 }
 
-static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf,
-                                       int index)
+static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
 {
        static struct amd_l3_cache *__cpuinitdata l3_caches;
        int node;
@@ -748,14 +747,16 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
 #define CPUID4_INFO_IDX(x, y)  (&((per_cpu(ici_cpuid4_info, x))[y]))
 
 #ifdef CONFIG_SMP
-static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
+
+static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
 {
-       struct _cpuid4_info     *this_leaf, *sibling_leaf;
-       unsigned long num_threads_sharing;
-       int index_msb, i, sibling;
+       struct _cpuid4_info *this_leaf;
+       int ret, i, sibling;
        struct cpuinfo_x86 *c = &cpu_data(cpu);
 
-       if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
+       ret = 0;
+       if (index == 3) {
+               ret = 1;
                for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
                        if (!per_cpu(ici_cpuid4_info, i))
                                continue;
@@ -766,8 +767,35 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
                                set_bit(sibling, this_leaf->shared_cpu_map);
                        }
                }
-               return;
+       } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) {
+               ret = 1;
+               for_each_cpu(i, cpu_sibling_mask(cpu)) {
+                       if (!per_cpu(ici_cpuid4_info, i))
+                               continue;
+                       this_leaf = CPUID4_INFO_IDX(i, index);
+                       for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
+                               if (!cpu_online(sibling))
+                                       continue;
+                               set_bit(sibling, this_leaf->shared_cpu_map);
+                       }
+               }
        }
+
+       return ret;
+}
+
+static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
+{
+       struct _cpuid4_info *this_leaf, *sibling_leaf;
+       unsigned long num_threads_sharing;
+       int index_msb, i;
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+       if (c->x86_vendor == X86_VENDOR_AMD) {
+               if (cache_shared_amd_cpu_map_setup(cpu, index))
+                       return;
+       }
+
        this_leaf = CPUID4_INFO_IDX(cpu, index);
        num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
 
index 1e8d66c1336a177a717a3fa1a1772f0cc5bd31aa..362190bd9e1eb3d9cdf543776ed88be0154424e5 100644 (file)
@@ -101,15 +101,19 @@ static struct severity {
 };
 
 /*
- * If the EIPV bit is set, it means the saved IP is the
- * instruction which caused the MCE.
+ * If mcgstatus indicated that ip/cs on the stack were
+ * no good, then "m->cs" will be zero and we will have
+ * to assume the worst case (IN_KERNEL) as we actually
+ * have no idea what we were executing when the machine
+ * check hit.
+ * If we do have a good "m->cs" (or a faked one in the
+ * case we were executing in VM86 mode) we can use it to
+ * distinguish an exception taken in user from from one
+ * taken in the kernel.
  */
 static int error_context(struct mce *m)
 {
-       if (m->mcgstatus & MCG_STATUS_EIPV)
-               return (m->ip && (m->cs & 3) == 3) ? IN_USER : IN_KERNEL;
-       /* Unknown, assume kernel */
-       return IN_KERNEL;
+       return ((m->cs & 3) == 3) ? IN_USER : IN_KERNEL;
 }
 
 int mce_severity(struct mce *a, int tolerant, char **msg)
index bb0adad3514339d5a2f25d471af5f568f9b4d03e..dc4fb779a724c40c89a909ab06c7d9e8d5ef0204 100644 (file)
@@ -52,6 +52,7 @@ struct threshold_block {
        unsigned int            cpu;
        u32                     address;
        u16                     interrupt_enable;
+       bool                    interrupt_capable;
        u16                     threshold_limit;
        struct kobject          kobj;
        struct list_head        miscj;
@@ -86,6 +87,21 @@ struct thresh_restart {
        u16                     old_limit;
 };
 
+static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
+{
+       /*
+        * bank 4 supports APIC LVT interrupts implicitly since forever.
+        */
+       if (bank == 4)
+               return true;
+
+       /*
+        * IntP: interrupt present; if this bit is set, the thresholding
+        * bank can generate APIC LVT interrupts
+        */
+       return msr_high_bits & BIT(28);
+}
+
 static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
 {
        int msr = (hi & MASK_LVTOFF_HI) >> 20;
@@ -107,8 +123,10 @@ static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
        return 1;
 };
 
-/* must be called with correct cpu affinity */
-/* Called via smp_call_function_single() */
+/*
+ * Called via smp_call_function_single(), must be called with correct
+ * cpu affinity.
+ */
 static void threshold_restart_bank(void *_tr)
 {
        struct thresh_restart *tr = _tr;
@@ -131,6 +149,12 @@ static void threshold_restart_bank(void *_tr)
                    (new_count & THRESHOLD_MAX);
        }
 
+       /* clear IntType */
+       hi &= ~MASK_INT_TYPE_HI;
+
+       if (!tr->b->interrupt_capable)
+               goto done;
+
        if (tr->set_lvt_off) {
                if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
                        /* set new lvt offset */
@@ -139,9 +163,10 @@ static void threshold_restart_bank(void *_tr)
                }
        }
 
-       tr->b->interrupt_enable ?
-           (hi = (hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
-           (hi &= ~MASK_INT_TYPE_HI);
+       if (tr->b->interrupt_enable)
+               hi |= INT_TYPE_APIC;
+
+ done:
 
        hi |= MASK_COUNT_EN_HI;
        wrmsr(tr->b->address, lo, hi);
@@ -206,14 +231,18 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
                        if (shared_bank[bank] && c->cpu_core_id)
                                break;
 #endif
-                       offset = setup_APIC_mce(offset,
-                                               (high & MASK_LVTOFF_HI) >> 20);
 
                        memset(&b, 0, sizeof(b));
-                       b.cpu           = cpu;
-                       b.bank          = bank;
-                       b.block         = block;
-                       b.address       = address;
+                       b.cpu                   = cpu;
+                       b.bank                  = bank;
+                       b.block                 = block;
+                       b.address               = address;
+                       b.interrupt_capable     = lvt_interrupt_supported(bank, high);
+
+                       if (b.interrupt_capable) {
+                               int new = (high & MASK_LVTOFF_HI) >> 20;
+                               offset  = setup_APIC_mce(offset, new);
+                       }
 
                        mce_threshold_block_init(&b, offset);
                        mce_threshold_vector = amd_threshold_interrupt;
@@ -313,6 +342,9 @@ store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
        struct thresh_restart tr;
        unsigned long new;
 
+       if (!b->interrupt_capable)
+               return -EINVAL;
+
        if (strict_strtoul(buf, 0, &new) < 0)
                return -EINVAL;
 
@@ -471,6 +503,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
        b->cpu                  = cpu;
        b->address              = address;
        b->interrupt_enable     = 0;
+       b->interrupt_capable    = lvt_interrupt_supported(bank, high);
        b->threshold_limit      = THRESHOLD_MAX;
 
        INIT_LIST_HEAD(&b->miscj);
index fe29c1d2219ecfa80d325d856a5db56241213150..4b50c965f0e6a44ca19418cdc0ac37deef6afd24 100644 (file)
@@ -437,6 +437,7 @@ static __initconst const struct x86_pmu amd_pmu = {
  * 0x023       DE      PERF_CTL[2:0]
  * 0x02D       LS      PERF_CTL[3]
  * 0x02E       LS      PERF_CTL[3,0]
+ * 0x031       LS      PERF_CTL[2:0] (**)
  * 0x043       CU      PERF_CTL[2:0]
  * 0x045       CU      PERF_CTL[2:0]
  * 0x046       CU      PERF_CTL[2:0]
@@ -450,10 +451,12 @@ static __initconst const struct x86_pmu amd_pmu = {
  * 0x0DD       LS      PERF_CTL[5:0]
  * 0x0DE       LS      PERF_CTL[5:0]
  * 0x0DF       LS      PERF_CTL[5:0]
+ * 0x1C0       EX      PERF_CTL[5:3]
  * 0x1D6       EX      PERF_CTL[5:0]
  * 0x1D8       EX      PERF_CTL[5:0]
  *
- * (*) depending on the umask all FPU counters may be used
+ * (*)  depending on the umask all FPU counters may be used
+ * (**) only one unitmask enabled at a time
  */
 
 static struct event_constraint amd_f15_PMC0  = EVENT_CONSTRAINT(0, 0x01, 0);
@@ -503,6 +506,12 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev
                        return &amd_f15_PMC3;
                case 0x02E:
                        return &amd_f15_PMC30;
+               case 0x031:
+                       if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
+                               return &amd_f15_PMC20;
+                       return &emptyconstraint;
+               case 0x1C0:
+                       return &amd_f15_PMC53;
                default:
                        return &amd_f15_PMC50;
                }
index bab491b8ee25e37b81d33e8051699f099326c717..d812fe2d02be98bcaddf5a4472cd6ee27435f9a7 100644 (file)
@@ -508,6 +508,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
        unsigned long from = cpuc->lbr_entries[0].from;
        unsigned long old_to, to = cpuc->lbr_entries[0].to;
        unsigned long ip = regs->ip;
+       int is_64bit = 0;
 
        /*
         * We don't need to fixup if the PEBS assist is fault like
@@ -559,7 +560,10 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
                } else
                        kaddr = (void *)to;
 
-               kernel_insn_init(&insn, kaddr);
+#ifdef CONFIG_X86_64
+               is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
+#endif
+               insn_init(&insn, kaddr, is_64bit);
                insn_get_length(&insn);
                to += insn.length;
        } while (to < ip);
index 5c1a91974918d1b6104c9068ed4eef41ff6e30ab..edb3d46c16db3372a1ab3bc53fec17a8c31c2fea 100644 (file)
 #endif
 .endm
 
-#ifdef CONFIG_VM86
-#define resume_userspace_sig   check_userspace
-#else
-#define resume_userspace_sig   resume_userspace
-#endif
-
 /*
  * User gs save/restore
  *
@@ -327,10 +321,19 @@ ret_from_exception:
        preempt_stop(CLBR_ANY)
 ret_from_intr:
        GET_THREAD_INFO(%ebp)
-check_userspace:
+resume_userspace_sig:
+#ifdef CONFIG_VM86
        movl PT_EFLAGS(%esp), %eax      # mix EFLAGS and CS
        movb PT_CS(%esp), %al
        andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
+#else
+       /*
+        * We can be coming here from a syscall done in the kernel space,
+        * e.g. a failed kernel_execve().
+        */
+       movl PT_CS(%esp), %eax
+       andl $SEGMENT_RPL_MASK, %eax
+#endif
        cmpl $USER_RPL, %eax
        jb resume_kernel                # not returning to v8086 or userspace
 
index 6781765b3a0df0317fc67a0051fb459aa3c15d01..aa083d3507469d3060a89eb555b11e0d435be08a 100644 (file)
@@ -1054,6 +1054,14 @@ int hpet_rtc_timer_init(void)
 }
 EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
 
+static void hpet_disable_rtc_channel(void)
+{
+       unsigned long cfg;
+       cfg = hpet_readl(HPET_T1_CFG);
+       cfg &= ~HPET_TN_ENABLE;
+       hpet_writel(cfg, HPET_T1_CFG);
+}
+
 /*
  * The functions below are called from rtc driver.
  * Return 0 if HPET is not being used.
@@ -1065,6 +1073,9 @@ int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
                return 0;
 
        hpet_rtc_flags &= ~bit_mask;
+       if (unlikely(!hpet_rtc_flags))
+               hpet_disable_rtc_channel();
+
        return 1;
 }
 EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
@@ -1130,15 +1141,11 @@ EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
 
 static void hpet_rtc_timer_reinit(void)
 {
-       unsigned int cfg, delta;
+       unsigned int delta;
        int lost_ints = -1;
 
-       if (unlikely(!hpet_rtc_flags)) {
-               cfg = hpet_readl(HPET_T1_CFG);
-               cfg &= ~HPET_TN_ENABLE;
-               hpet_writel(cfg, HPET_T1_CFG);
-               return;
-       }
+       if (unlikely(!hpet_rtc_flags))
+               hpet_disable_rtc_channel();
 
        if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
                delta = hpet_default_delta;
index 5f9ecff328b5f9604eb4d053e21b19c7d2618c88..fc1f48dc9989fdfbacb14deb2a58c03640699a5a 100644 (file)
@@ -43,6 +43,8 @@
 #include <linux/smp.h>
 #include <linux/nmi.h>
 #include <linux/hw_breakpoint.h>
+#include <linux/uaccess.h>
+#include <linux/memory.h>
 
 #include <asm/debugreg.h>
 #include <asm/apicdef.h>
@@ -710,6 +712,64 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
        regs->ip = ip;
 }
 
+int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+{
+       int err;
+       char opc[BREAK_INSTR_SIZE];
+
+       bpt->type = BP_BREAKPOINT;
+       err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
+                               BREAK_INSTR_SIZE);
+       if (err)
+               return err;
+       err = probe_kernel_write((char *)bpt->bpt_addr,
+                                arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
+#ifdef CONFIG_DEBUG_RODATA
+       if (!err)
+               return err;
+       /*
+        * It is safe to call text_poke() because normal kernel execution
+        * is stopped on all cores, so long as the text_mutex is not locked.
+        */
+       if (mutex_is_locked(&text_mutex))
+               return -EBUSY;
+       text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
+                 BREAK_INSTR_SIZE);
+       err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
+       if (err)
+               return err;
+       if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
+               return -EINVAL;
+       bpt->type = BP_POKE_BREAKPOINT;
+#endif /* CONFIG_DEBUG_RODATA */
+       return err;
+}
+
+int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+{
+#ifdef CONFIG_DEBUG_RODATA
+       int err;
+       char opc[BREAK_INSTR_SIZE];
+
+       if (bpt->type != BP_POKE_BREAKPOINT)
+               goto knl_write;
+       /*
+        * It is safe to call text_poke() because normal kernel execution
+        * is stopped on all cores, so long as the text_mutex is not locked.
+        */
+       if (mutex_is_locked(&text_mutex))
+               goto knl_write;
+       text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
+       err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
+       if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
+               goto knl_write;
+       return err;
+knl_write:
+#endif /* CONFIG_DEBUG_RODATA */
+       return probe_kernel_write((char *)bpt->bpt_addr,
+                                 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
+}
+
 struct kgdb_arch arch_kgdb_ops = {
        /* Breakpoint instruction: */
        .gdb_bpt_instr          = { 0xcc },
index f1a6244d7d93769c35d219b79de6358d8a4096e3..794bc95134cd5f45f85345f6a006a9d45442fcb3 100644 (file)
@@ -75,8 +75,10 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
        /*
         * Undefined/reserved opcodes, conditional jump, Opcode Extension
         * Groups, and some special opcodes can not boost.
+        * This is non-const to keep gcc from statically optimizing it out, as
+        * variable_test_bit makes gcc think only *(unsigned long*) is used.
         */
-static const u32 twobyte_is_boostable[256 / 32] = {
+static u32 twobyte_is_boostable[256 / 32] = {
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
        /*      ----------------------------------------------          */
        W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
index c5610384ab167c162f33abbb6da8401b5cbba45d..b727450f5d78fcdca65083704c4e38abc6b307fb 100644 (file)
@@ -298,13 +298,33 @@ free_table:
        return state;
 }
 
+/*
+ * AMD microcode firmware naming convention, up to family 15h they are in
+ * the legacy file:
+ *
+ *    amd-ucode/microcode_amd.bin
+ *
+ * This legacy file is always smaller than 2K in size.
+ *
+ * Starting at family 15h they are in family specific firmware files:
+ *
+ *    amd-ucode/microcode_amd_fam15h.bin
+ *    amd-ucode/microcode_amd_fam16h.bin
+ *    ...
+ *
+ * These might be larger than 2K.
+ */
 static enum ucode_state request_microcode_amd(int cpu, struct device *device)
 {
-       const char *fw_name = "amd-ucode/microcode_amd.bin";
+       char fw_name[36] = "amd-ucode/microcode_amd.bin";
        const struct firmware *fw;
        enum ucode_state ret = UCODE_NFOUND;
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+       if (c->x86 >= 0x15)
+               snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
 
-       if (request_firmware(&fw, fw_name, device)) {
+       if (request_firmware(&fw, (const char *)fw_name, device)) {
                pr_err("failed to load file %s\n", fw_name);
                goto out;
        }
index 9103b89c145a534215824a9b2a7d80aa9e112527..0741b062a3048a6e2b1b5bd0eb4edbbf3d5bb9cf 100644 (file)
@@ -95,8 +95,8 @@ static void __init MP_bus_info(struct mpc_bus *m)
        }
 #endif
 
+       set_bit(m->busid, mp_bus_not_pci);
        if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
-               set_bit(m->busid, mp_bus_not_pci);
 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
                mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
 #endif
index a3d0dc59067be542d7423d2a8abbc99801c0d3f6..fcdb1b34aa1c3a806b089ff77f2b5de91585ddf9 100644 (file)
@@ -293,22 +293,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
                                 *next = &next_p->thread;
        int cpu = smp_processor_id();
        struct tss_struct *tss = &per_cpu(init_tss, cpu);
-       bool preload_fpu;
+       fpu_switch_t fpu;
 
        /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
 
-       /*
-        * If the task has used fpu the last 5 timeslices, just do a full
-        * restore of the math state immediately to avoid the trap; the
-        * chances of needing FPU soon are obviously high now
-        */
-       preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
-
-       __unlazy_fpu(prev_p);
-
-       /* we're going to use this soon, after a few expensive things */
-       if (preload_fpu)
-               prefetch(next->fpu.state);
+       fpu = switch_fpu_prepare(prev_p, next_p);
 
        /*
         * Reload esp0.
@@ -348,11 +337,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
                     task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
                __switch_to_xtra(prev_p, next_p, tss);
 
-       /* If we're going to preload the fpu context, make sure clts
-          is run while we're batching the cpu state updates. */
-       if (preload_fpu)
-               clts();
-
        /*
         * Leave lazy mode, flushing any hypercalls made here.
         * This must be done before restoring TLS segments so
@@ -362,15 +346,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         */
        arch_end_context_switch(next_p);
 
-       if (preload_fpu)
-               __math_state_restore();
-
        /*
         * Restore %gs if needed (which is common)
         */
        if (prev->gs | next->gs)
                lazy_load_gs(next->gs);
 
+       switch_fpu_finish(next_p, fpu);
+
        percpu_write(current_task, next_p);
 
        return prev_p;
index 63c8aedbe5b306bbe7b51655ad479c28d6a22b78..eeb50045bfd3eafd141a795a7b20a74b470775a3 100644 (file)
@@ -363,18 +363,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        int cpu = smp_processor_id();
        struct tss_struct *tss = &per_cpu(init_tss, cpu);
        unsigned fsindex, gsindex;
-       bool preload_fpu;
+       fpu_switch_t fpu;
 
-       /*
-        * If the task has used fpu the last 5 timeslices, just do a full
-        * restore of the math state immediately to avoid the trap; the
-        * chances of needing FPU soon are obviously high now
-        */
-       preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
-
-       /* we're going to use this soon, after a few expensive things */
-       if (preload_fpu)
-               prefetch(next->fpu.state);
+       fpu = switch_fpu_prepare(prev_p, next_p);
 
        /*
         * Reload esp0, LDT and the page table pointer:
@@ -404,13 +395,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 
        load_TLS(next, cpu);
 
-       /* Must be after DS reload */
-       __unlazy_fpu(prev_p);
-
-       /* Make sure cpu is ready for new context */
-       if (preload_fpu)
-               clts();
-
        /*
         * Leave lazy mode, flushing any hypercalls made here.
         * This must be done before restoring TLS segments so
@@ -451,6 +435,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
                wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
        prev->gsindex = gsindex;
 
+       switch_fpu_finish(next_p, fpu);
+
        /*
         * Switch the PDA and FPU contexts.
         */
@@ -469,13 +455,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
                     task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
                __switch_to_xtra(prev_p, next_p, tss);
 
-       /*
-        * Preload the FPU context, now that we've determined that the
-        * task is likely to be using it. 
-        */
-       if (preload_fpu)
-               __math_state_restore();
-
        return prev_p;
 }
 
index 9242436e9937e5a4ef91c6baa04eaf2e90243125..d4a705f228315fd0469eaa25be0deb3e77152374 100644 (file)
@@ -124,7 +124,7 @@ __setup("reboot=", reboot_setup);
  */
 
 /*
- * Some machines require the "reboot=b"  commandline option,
+ * Some machines require the "reboot=b" or "reboot=k"  commandline options,
  * this quirk makes that automatic.
  */
 static int __init set_bios_reboot(const struct dmi_system_id *d)
@@ -136,6 +136,15 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
        return 0;
 }
 
+static int __init set_kbd_reboot(const struct dmi_system_id *d)
+{
+       if (reboot_type != BOOT_KBD) {
+               reboot_type = BOOT_KBD;
+               printk(KERN_INFO "%s series board detected. Selecting KBD-method for reboot.\n", d->ident);
+       }
+       return 0;
+}
+
 static struct dmi_system_id __initdata reboot_dmi_table[] = {
        {       /* Handle problems with rebooting on Dell E520's */
                .callback = set_bios_reboot,
@@ -295,7 +304,7 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                },
        },
        { /* Handle reboot issue on Acer Aspire one */
-               .callback = set_bios_reboot,
+               .callback = set_kbd_reboot,
                .ident = "Acer Aspire One A110",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
index 71f4727da3735ffc93c2346a2b7712208df9a020..5a98aa27218417737c631cbf4c43e3e50cf69540 100644 (file)
@@ -185,10 +185,22 @@ void __init setup_per_cpu_areas(void)
 #endif
        rc = -EINVAL;
        if (pcpu_chosen_fc != PCPU_FC_PAGE) {
-               const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE;
                const size_t dyn_size = PERCPU_MODULE_RESERVE +
                        PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
+               size_t atom_size;
 
+               /*
+                * On 64bit, use PMD_SIZE for atom_size so that embedded
+                * percpu areas are aligned to PMD.  This, in the future,
+                * can also allow using PMD mappings in vmalloc area.  Use
+                * PAGE_SIZE on 32bit as vmalloc space is highly contended
+                * and large vmalloc area allocs can easily fail.
+                */
+#ifdef CONFIG_X86_64
+               atom_size = PMD_SIZE;
+#else
+               atom_size = PAGE_SIZE;
+#endif
                rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
                                            dyn_size, atom_size,
                                            pcpu_cpu_distance,
index 6bb7b8579e70e0a73a67ddcb9f69c01696bb6413..bcfec2d23769338f55a8f3c4f6c4be26d80230fc 100644 (file)
@@ -163,7 +163,7 @@ int regset_tls_get(struct task_struct *target, const struct user_regset *regset,
 {
        const struct desc_struct *tls;
 
-       if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
+       if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
            (pos % sizeof(struct user_desc)) != 0 ||
            (count % sizeof(struct user_desc)) != 0)
                return -EINVAL;
@@ -198,7 +198,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
        struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
        const struct user_desc *info;
 
-       if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
+       if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
            (pos % sizeof(struct user_desc)) != 0 ||
            (count % sizeof(struct user_desc)) != 0)
                return -EINVAL;
index b9b67166f9ded16045308aaab63b1f814b59dcfc..1b26e01047b506eb9d9c196437327bb75fcd1ccc 100644 (file)
@@ -717,25 +717,34 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
 }
 
 /*
- * __math_state_restore assumes that cr0.TS is already clear and the
- * fpu state is all ready for use.  Used during context switch.
+ * This gets called with the process already owning the
+ * FPU state, and with CR0.TS cleared. It just needs to
+ * restore the FPU register state.
  */
-void __math_state_restore(void)
+void __math_state_restore(struct task_struct *tsk)
 {
-       struct thread_info *thread = current_thread_info();
-       struct task_struct *tsk = thread->task;
+       /* We need a safe address that is cheap to find and that is already
+          in L1. We've just brought in "tsk->thread.has_fpu", so use that */
+#define safe_address (tsk->thread.has_fpu)
+
+       /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
+          is pending.  Clear the x87 state here by setting it to fixed
+          values. safe_address is a random variable that should be in L1 */
+       alternative_input(
+               ASM_NOP8 ASM_NOP2,
+               "emms\n\t"              /* clear stack tags */
+               "fildl %P[addr]",       /* set F?P to defined value */
+               X86_FEATURE_FXSAVE_LEAK,
+               [addr] "m" (safe_address));
 
        /*
         * Paranoid restore. send a SIGSEGV if we fail to restore the state.
         */
        if (unlikely(restore_fpu_checking(tsk))) {
-               stts();
+               __thread_fpu_end(tsk);
                force_sig(SIGSEGV, tsk);
                return;
        }
-
-       thread->status |= TS_USEDFPU;   /* So we fnsave on switch_to() */
-       tsk->fpu_counter++;
 }
 
 /*
@@ -745,13 +754,12 @@ void __math_state_restore(void)
  * Careful.. There are problems with IBM-designed IRQ13 behaviour.
  * Don't touch unless you *really* know how it works.
  *
- * Must be called with kernel preemption disabled (in this case,
- * local interrupts are disabled at the call-site in entry.S).
+ * Must be called with kernel preemption disabled (eg with local
+ * local interrupts as in the case of do_device_not_available).
  */
-asmlinkage void math_state_restore(void)
+void math_state_restore(void)
 {
-       struct thread_info *thread = current_thread_info();
-       struct task_struct *tsk = thread->task;
+       struct task_struct *tsk = current;
 
        if (!tsk_used_math(tsk)) {
                local_irq_enable();
@@ -768,9 +776,10 @@ asmlinkage void math_state_restore(void)
                local_irq_disable();
        }
 
-       clts();                         /* Allow maths ops (or we recurse) */
+       __thread_fpu_begin(tsk);
+       __math_state_restore(tsk);
 
-       __math_state_restore();
+       tsk->fpu_counter++;
 }
 EXPORT_SYMBOL_GPL(math_state_restore);
 
index 6cc6922262af7ca285c814eaf13fdf65fd30e033..4406c038a0a8de32835603eb23ac2e9cfd31ca11 100644 (file)
@@ -623,7 +623,8 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
 
        if (cpu_khz) {
                *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
-               *offset = ns_now - (tsc_now * *scale >> CYC2NS_SCALE_FACTOR);
+               *offset = ns_now - mult_frac(tsc_now, *scale,
+                                            (1UL << CYC2NS_SCALE_FACTOR));
        }
 
        sched_clock_idle_wakeup_event(0);
@@ -956,6 +957,16 @@ static int __init init_tsc_clocksource(void)
                clocksource_tsc.rating = 0;
                clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
        }
+
+       /*
+        * Trust the results of the earlier calibration on systems
+        * exporting a reliable TSC.
+        */
+       if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
+               clocksource_register_khz(&clocksource_tsc, tsc_khz);
+               return 0;
+       }
+
        schedule_delayed_work(&tsc_irqwork, 0);
        return 0;
 }
index 863f8753ab0ae696f8981ef30d9ee031dd0e310b..04b87269edfbcfeb84c63402bf4534457a12de5e 100644 (file)
@@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
        spinlock_t *ptl;
        int i;
 
+       down_write(&mm->mmap_sem);
        pgd = pgd_offset(mm, 0xA0000);
        if (pgd_none_or_clear_bad(pgd))
                goto out;
@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
        }
        pte_unmap_unlock(pte, ptl);
 out:
+       up_write(&mm->mmap_sem);
        flush_tlb();
 }
 
index a3911343976b8ae50040228b7993d5d90773002f..711091114119532a1c9d2dff819da9c41e8a39b9 100644 (file)
@@ -47,7 +47,7 @@ void __sanitize_i387_state(struct task_struct *tsk)
        if (!fx)
                return;
 
-       BUG_ON(task_thread_info(tsk)->status & TS_USEDFPU);
+       BUG_ON(__thread_has_fpu(tsk));
 
        xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv;
 
@@ -168,7 +168,7 @@ int save_i387_xstate(void __user *buf)
        if (!used_math())
                return 0;
 
-       if (task_thread_info(tsk)->status & TS_USEDFPU) {
+       if (user_has_fpu()) {
                if (use_xsave())
                        err = xsave_user(buf);
                else
@@ -176,8 +176,7 @@ int save_i387_xstate(void __user *buf)
 
                if (err)
                        return err;
-               task_thread_info(tsk)->status &= ~TS_USEDFPU;
-               stts();
+               user_fpu_end();
        } else {
                sanitize_i387_state(tsk);
                if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave,
@@ -292,10 +291,7 @@ int restore_i387_xstate(void __user *buf)
                        return err;
        }
 
-       if (!(task_thread_info(current)->status & TS_USEDFPU)) {
-               clts();
-               task_thread_info(current)->status |= TS_USEDFPU;
-       }
+       user_fpu_begin();
        if (use_xsave())
                err = restore_user_xstate(buf);
        else
index adc98675cda03505cff59be011df491cd271072a..3e7d9138dd2ef75923987a6c5ae7a0bb133b60c9 100644 (file)
@@ -1901,6 +1901,51 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
        ss->p = 1;
 }
 
+static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
+{
+       struct x86_emulate_ops *ops = ctxt->ops;
+       u32 eax, ebx, ecx, edx;
+
+       /*
+        * syscall should always be enabled in longmode - so only become
+        * vendor specific (cpuid) if other modes are active...
+        */
+       if (ctxt->mode == X86EMUL_MODE_PROT64)
+               return true;
+
+       eax = 0x00000000;
+       ecx = 0x00000000;
+       if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) {
+               /*
+                * Intel ("GenuineIntel")
+                * remark: Intel CPUs only support "syscall" in 64bit
+                * longmode. Also an 64bit guest with a
+                * 32bit compat-app running will #UD !! While this
+                * behaviour can be fixed (by emulating) into AMD
+                * response - CPUs of AMD can't behave like Intel.
+                */
+               if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
+                   ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
+                   edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
+                       return false;
+
+               /* AMD ("AuthenticAMD") */
+               if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
+                   ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
+                   edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
+                       return true;
+
+               /* AMD ("AMDisbetter!") */
+               if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
+                   ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
+                   edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
+                       return true;
+       }
+
+       /* default: (not Intel, not AMD), apply Intel's stricter rules... */
+       return false;
+}
+
 static int
 emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
 {
@@ -1915,9 +1960,15 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
            ctxt->mode == X86EMUL_MODE_VM86)
                return emulate_ud(ctxt);
 
+       if (!(em_syscall_is_enabled(ctxt)))
+               return emulate_ud(ctxt);
+
        ops->get_msr(ctxt, MSR_EFER, &efer);
        setup_syscalls_segments(ctxt, ops, &cs, &ss);
 
+       if (!(efer & EFER_SCE))
+               return emulate_ud(ctxt);
+
        ops->get_msr(ctxt, MSR_STAR, &msr_data);
        msr_data >>= 32;
        cs_sel = (u16)(msr_data & 0xfffc);
index d48ec60ea421a8211271195db0193bc42a9a7532..2ad060acc445cba4080afeaed95e0c7b4e7c9ecf 100644 (file)
@@ -948,7 +948,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
 #ifdef CONFIG_X86_64
        wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
 #endif
-       if (current_thread_info()->status & TS_USEDFPU)
+       if (__thread_has_fpu(current))
                clts();
        load_gdt(&__get_cpu_var(host_gdt));
 }
index 77c9d8673dc40430a2d5cd609ad0d30dee105621..fbb093601b5a7c215d828b97ee3cd7e3b5456bb4 100644 (file)
@@ -4407,6 +4407,28 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
        return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
 }
 
+static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
+                              u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
+{
+       struct kvm_cpuid_entry2 *cpuid = NULL;
+
+       if (eax && ecx)
+               cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt),
+                                           *eax, *ecx);
+
+       if (cpuid) {
+               *eax = cpuid->eax;
+               *ecx = cpuid->ecx;
+               if (ebx)
+                       *ebx = cpuid->ebx;
+               if (edx)
+                       *edx = cpuid->edx;
+               return true;
+       }
+
+       return false;
+}
+
 static struct x86_emulate_ops emulate_ops = {
        .read_std            = kvm_read_guest_virt_system,
        .write_std           = kvm_write_guest_virt_system,
@@ -4437,6 +4459,7 @@ static struct x86_emulate_ops emulate_ops = {
        .get_fpu             = emulator_get_fpu,
        .put_fpu             = emulator_put_fpu,
        .intercept           = emulator_intercept,
+       .get_cpuid           = emulator_get_cpuid,
 };
 
 static void cache_all_regs(struct kvm_vcpu *vcpu)
index fc45ba887d051e504dd592be40ec2e78d70eea33..e395693abdb16068cd2b268f258d9c79d3d0f3f3 100644 (file)
@@ -48,9 +48,9 @@ static void delay_loop(unsigned long loops)
 }
 
 /* TSC based delay: */
-static void delay_tsc(unsigned long loops)
+static void delay_tsc(unsigned long __loops)
 {
-       unsigned long bclock, now;
+       u32 bclock, now, loops = __loops;
        int cpu;
 
        preempt_disable();
index dbe34b9313743f1cae72ac28aee5c99e5d2c9369..dd74e46828c0fc243740b61a18c2dea654fafb5e 100644 (file)
@@ -108,16 +108,6 @@ static inline void get_head_page_multiple(struct page *page, int nr)
        SetPageReferenced(page);
 }
 
-static inline void get_huge_page_tail(struct page *page)
-{
-       /*
-        * __split_huge_page_refcount() cannot run
-        * from under us.
-        */
-       VM_BUG_ON(atomic_read(&page->_count) < 0);
-       atomic_inc(&page->_count);
-}
-
 static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
                unsigned long end, int write, struct page **pages, int *nr)
 {
@@ -211,6 +201,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
        do {
                VM_BUG_ON(compound_head(page) != head);
                pages[*nr] = page;
+               if (PageTail(page))
+                       get_huge_page_tail(page);
                (*nr)++;
                page++;
                refs++;
index b49962662101a0cf7361f0035e1b017333efc22a..f4f29b19fac5f2cc7c46023ef86c02a66b137e8e 100644 (file)
@@ -45,6 +45,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        BUG_ON(!pte_none(*(kmap_pte-idx)));
        set_pte(kmap_pte-idx, mk_pte(page, prot));
+       arch_flush_lazy_mmu_mode();
 
        return (void *)vaddr;
 }
@@ -88,6 +89,7 @@ void __kunmap_atomic(void *kvaddr)
                 */
                kpte_clear_flush(kmap_pte-idx, vaddr);
                kmap_atomic_idx_pop();
+               arch_flush_lazy_mmu_mode();
        }
 #ifdef CONFIG_DEBUG_HIGHMEM
        else {
index 1dab5194fd9df9f6bead4b0e09d154bd2b26e178..f927429d07ca57e0eb9a43579c27453d2050705b 100644 (file)
@@ -87,9 +87,9 @@ static unsigned long mmap_rnd(void)
        */
        if (current->flags & PF_RANDOMIZE) {
                if (mmap_is_ia32())
-                       rnd = (long)get_random_int() % (1<<8);
+                       rnd = get_random_int() % (1<<8);
                else
-                       rnd = (long)(get_random_int() % (1<<28));
+                       rnd = get_random_int() % (1<<28);
        }
        return rnd << PAGE_SHIFT;
 }
index 81dbfdeb080db2e42ff61cd16a3fb63de23085db..7efd0c615d58d9189c723fdde0e52838a320a6a3 100644 (file)
@@ -104,6 +104,8 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
        if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
                return;
        pxm = pa->proximity_domain_lo;
+       if (acpi_srat_revision >= 2)
+               pxm |= *((unsigned int*)pa->proximity_domain_hi) << 8;
        node = setup_node(pxm);
        if (node < 0) {
                printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
@@ -155,6 +157,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
        start = ma->base_address;
        end = start + ma->length;
        pxm = ma->proximity_domain;
+       if (acpi_srat_revision <= 1)
+               pxm &= 0xff;
        node = setup_node(pxm);
        if (node < 0) {
                printk(KERN_ERR "SRAT: Too many proximity domains.\n");
index bfab3fa10edc63e50b4184d00e75566678242a0d..5a5b6e4dd7386586b5a9e8d5559b6b104f48d447 100644 (file)
@@ -151,17 +151,18 @@ void bpf_jit_compile(struct sk_filter *fp)
        cleanup_addr = proglen; /* epilogue address */
 
        for (pass = 0; pass < 10; pass++) {
+               u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
                /* no prologue/epilogue for trivial filters (RET something) */
                proglen = 0;
                prog = temp;
 
-               if (seen) {
+               if (seen_or_pass0) {
                        EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
                        EMIT4(0x48, 0x83, 0xec, 96);    /* subq  $96,%rsp       */
                        /* note : must save %rbx in case bpf_error is hit */
-                       if (seen & (SEEN_XREG | SEEN_DATAREF))
+                       if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
                                EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
-                       if (seen & SEEN_XREG)
+                       if (seen_or_pass0 & SEEN_XREG)
                                CLEAR_X(); /* make sure we dont leek kernel memory */
 
                        /*
@@ -170,7 +171,7 @@ void bpf_jit_compile(struct sk_filter *fp)
                         *  r9 = skb->len - skb->data_len
                         *  r8 = skb->data
                         */
-                       if (seen & SEEN_DATAREF) {
+                       if (seen_or_pass0 & SEEN_DATAREF) {
                                if (offsetof(struct sk_buff, len) <= 127)
                                        /* mov    off8(%rdi),%r9d */
                                        EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
@@ -260,9 +261,14 @@ void bpf_jit_compile(struct sk_filter *fp)
                        case BPF_S_ALU_DIV_X: /* A /= X; */
                                seen |= SEEN_XREG;
                                EMIT2(0x85, 0xdb);      /* test %ebx,%ebx */
-                               if (pc_ret0 != -1)
-                                       EMIT_COND_JMP(X86_JE, addrs[pc_ret0] - (addrs[i] - 4));
-                               else {
+                               if (pc_ret0 > 0) {
+                                       /* addrs[pc_ret0 - 1] is start address of target
+                                        * (addrs[i] - 4) is the address following this jmp
+                                        * ("xor %edx,%edx; div %ebx" being 4 bytes long)
+                                        */
+                                       EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
+                                                               (addrs[i] - 4));
+                               } else {
                                        EMIT_COND_JMP(X86_JNE, 2 + 5);
                                        CLEAR_A();
                                        EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
@@ -283,7 +289,7 @@ void bpf_jit_compile(struct sk_filter *fp)
                                        EMIT2(0x24, K & 0xFF); /* and imm8,%al */
                                } else if (K >= 0xFFFF0000) {
                                        EMIT2(0x66, 0x25);      /* and imm16,%ax */
-                                       EMIT2(K, 2);
+                                       EMIT(K, 2);
                                } else {
                                        EMIT1_off32(0x25, K);   /* and imm32,%eax */
                                }
@@ -335,12 +341,12 @@ void bpf_jit_compile(struct sk_filter *fp)
                                }
                                /* fallinto */
                        case BPF_S_RET_A:
-                               if (seen) {
+                               if (seen_or_pass0) {
                                        if (i != flen - 1) {
                                                EMIT_JMP(cleanup_addr - addrs[i]);
                                                break;
                                        }
-                                       if (seen & SEEN_XREG)
+                                       if (seen_or_pass0 & SEEN_XREG)
                                                EMIT4(0x48, 0x8b, 0x5d, 0xf8);  /* mov  -8(%rbp),%rbx */
                                        EMIT1(0xc9);            /* leaveq */
                                }
@@ -469,8 +475,10 @@ void bpf_jit_compile(struct sk_filter *fp)
                        case BPF_S_LD_W_ABS:
                                func = sk_load_word;
 common_load:                   seen |= SEEN_DATAREF;
-                               if ((int)K < 0)
+                               if ((int)K < 0) {
+                                       /* Abort the JIT because __load_pointer() is needed. */
                                        goto out;
+                               }
                                t_offset = func - (image + addrs[i]);
                                EMIT1_off32(0xbe, K); /* mov imm32,%esi */
                                EMIT1_off32(0xe8, t_offset); /* call */
@@ -483,13 +491,8 @@ common_load:                       seen |= SEEN_DATAREF;
                                goto common_load;
                        case BPF_S_LDX_B_MSH:
                                if ((int)K < 0) {
-                                       if (pc_ret0 != -1) {
-                                               EMIT_JMP(addrs[pc_ret0] - addrs[i]);
-                                               break;
-                                       }
-                                       CLEAR_A();
-                                       EMIT_JMP(cleanup_addr - addrs[i]);
-                                       break;
+                                       /* Abort the JIT because __load_pointer() is needed. */
+                                       goto out;
                                }
                                seen |= SEEN_DATAREF | SEEN_XREG;
                                t_offset = sk_load_byte_msh - (image + addrs[i]);
@@ -568,8 +571,8 @@ cond_branch:                        f_offset = addrs[i + filter[i].jf] - addrs[i];
                                        break;
                                }
                                if (filter[i].jt != 0) {
-                                       if (filter[i].jf)
-                                               t_offset += is_near(f_offset) ? 2 : 6;
+                                       if (filter[i].jf && f_offset)
+                                               t_offset += is_near(f_offset) ? 2 : 5;
                                        EMIT_COND_JMP(t_op, t_offset);
                                        if (filter[i].jf)
                                                EMIT_JMP(f_offset);
@@ -599,13 +602,14 @@ cond_branch:                      f_offset = addrs[i + filter[i].jf] - addrs[i];
                 * use it to give the cleanup instruction(s) addr
                 */
                cleanup_addr = proglen - 1; /* ret */
-               if (seen)
+               if (seen_or_pass0)
                        cleanup_addr -= 1; /* leaveq */
-               if (seen & SEEN_XREG)
+               if (seen_or_pass0 & SEEN_XREG)
                        cleanup_addr -= 4; /* mov  -8(%rbp),%rbx */
 
                if (image) {
-                       WARN_ON(proglen != oldproglen);
+                       if (proglen != oldproglen)
+                               pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
                        break;
                }
                if (proglen == oldproglen) {
index cdfe4c54decac05e4943a00e27803f78898b6419..f148cf65267836d66e1fa666d612dca5669950c3 100644 (file)
@@ -21,6 +21,7 @@ extern int op_nmi_timer_init(struct oprofile_operations *ops);
 extern void op_nmi_exit(void);
 extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
 
+static int nmi_timer;
 
 int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
@@ -31,8 +32,9 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
 #ifdef CONFIG_X86_LOCAL_APIC
        ret = op_nmi_init(ops);
 #endif
+       nmi_timer = (ret != 0);
 #ifdef CONFIG_X86_IO_APIC
-       if (ret < 0)
+       if (nmi_timer)
                ret = op_nmi_timer_init(ops);
 #endif
        ops->backtrace = x86_backtrace;
@@ -44,6 +46,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
 void oprofile_arch_exit(void)
 {
 #ifdef CONFIG_X86_LOCAL_APIC
-       op_nmi_exit();
+       if (!nmi_timer)
+               op_nmi_exit();
 #endif
 }
index 6b8759f7634e661de3983dbc7e6accb26e939a8c..d24d3da72926c19102b1723f9241c7ee92627efe 100644 (file)
@@ -18,8 +18,9 @@ obj-$(CONFIG_X86_NUMAQ)               += numaq_32.o
 obj-$(CONFIG_X86_MRST)         += mrst.o
 
 obj-y                          += common.o early.o
-obj-y                          += amd_bus.o bus_numa.o
+obj-y                          += bus_numa.o
 
+obj-$(CONFIG_AMD_NB)           += amd_bus.o
 obj-$(CONFIG_PCI_CNB20LE_QUIRK)        += broadcom_bus.o
 
 ifeq ($(CONFIG_PCI_DEBUG),y)
index 50b3f14c59a16a3c77e4194702a7265705113b1a..0473a8f935012901c65426ed03b91b48b1b59ea2 100644 (file)
@@ -54,6 +54,16 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = {
                        DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
                },
        },
+       /* https://bugzilla.kernel.org/show_bug.cgi?id=42619 */
+       {
+               .callback = set_use_crs,
+               .ident = "MSI MS-7253",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+                       DMI_MATCH(DMI_BOARD_NAME, "MS-7253"),
+                       DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
+               },
+       },
        {}
 };
 
@@ -149,7 +159,7 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
        struct acpi_resource_address64 addr;
        acpi_status status;
        unsigned long flags;
-       u64 start, end;
+       u64 start, orig_end, end;
 
        status = resource_to_addr(acpi_res, &addr);
        if (!ACPI_SUCCESS(status))
@@ -165,7 +175,21 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
                return AE_OK;
 
        start = addr.minimum + addr.translation_offset;
-       end = addr.maximum + addr.translation_offset;
+       orig_end = end = addr.maximum + addr.translation_offset;
+
+       /* Exclude non-addressable range or non-addressable portion of range */
+       end = min(end, (u64)iomem_resource.end);
+       if (end <= start) {
+               dev_info(&info->bridge->dev,
+                       "host bridge window [%#llx-%#llx] "
+                       "(ignored, not CPU addressable)\n", start, orig_end);
+               return AE_OK;
+       } else if (orig_end != end) {
+               dev_info(&info->bridge->dev,
+                       "host bridge window [%#llx-%#llx] "
+                       "([%#llx-%#llx] ignored, not CPU addressable)\n",
+                       start, orig_end, end + 1, orig_end);
+       }
 
        res = &info->res[info->res_num];
        res->name = info->name;
index 026e4931d16259fe624eef5b5bb269436ad3e81e..385a940b542231cb1cf846fa17310aacf1bbc382 100644 (file)
@@ -30,34 +30,6 @@ static struct pci_hostbridge_probe pci_probes[] __initdata = {
        { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1300 },
 };
 
-static u64 __initdata fam10h_mmconf_start;
-static u64 __initdata fam10h_mmconf_end;
-static void __init get_pci_mmcfg_amd_fam10h_range(void)
-{
-       u32 address;
-       u64 base, msr;
-       unsigned segn_busn_bits;
-
-       /* assume all cpus from fam10h have mmconf */
-        if (boot_cpu_data.x86 < 0x10)
-               return;
-
-       address = MSR_FAM10H_MMIO_CONF_BASE;
-       rdmsrl(address, msr);
-
-       /* mmconfig is not enable */
-       if (!(msr & FAM10H_MMIO_CONF_ENABLE))
-               return;
-
-       base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
-
-       segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
-                        FAM10H_MMIO_CONF_BUSRANGE_MASK;
-
-       fam10h_mmconf_start = base;
-       fam10h_mmconf_end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
-}
-
 #define RANGE_NUM 16
 
 /**
@@ -85,6 +57,9 @@ static int __init early_fill_mp_bus_info(void)
        u64 val;
        u32 address;
        bool found;
+       struct resource fam10h_mmconf_res, *fam10h_mmconf;
+       u64 fam10h_mmconf_start;
+       u64 fam10h_mmconf_end;
 
        if (!early_pci_allowed())
                return -1;
@@ -211,12 +186,17 @@ static int __init early_fill_mp_bus_info(void)
                subtract_range(range, RANGE_NUM, 0, end);
 
        /* get mmconfig */
-       get_pci_mmcfg_amd_fam10h_range();
+       fam10h_mmconf = amd_get_mmconfig_range(&fam10h_mmconf_res);
        /* need to take out mmconf range */
-       if (fam10h_mmconf_end) {
-               printk(KERN_DEBUG "Fam 10h mmconf [%llx, %llx]\n", fam10h_mmconf_start, fam10h_mmconf_end);
+       if (fam10h_mmconf) {
+               printk(KERN_DEBUG "Fam 10h mmconf %pR\n", fam10h_mmconf);
+               fam10h_mmconf_start = fam10h_mmconf->start;
+               fam10h_mmconf_end = fam10h_mmconf->end;
                subtract_range(range, RANGE_NUM, fam10h_mmconf_start,
                                 fam10h_mmconf_end + 1);
+       } else {
+               fam10h_mmconf_start = 0;
+               fam10h_mmconf_end = 0;
        }
 
        /* mmio resource */
index f567965c06201cf0315605cde43c6eaa9bb228a5..6e96e65e7caaa4d1605a04020d918dcd6d8f4f62 100644 (file)
@@ -308,7 +308,7 @@ int __init pci_xen_init(void)
 
 int __init pci_xen_hvm_init(void)
 {
-       if (!xen_feature(XENFEAT_hvm_pirqs))
+       if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
                return 0;
 
 #ifdef CONFIG_ACPI
index 7000e74b30877bea018f46946f8d99b1275e7624..fe73276e026bf263f494a917c84c6a3fcaeaaeda 100644 (file)
@@ -678,36 +678,40 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
        pentry = (struct sfi_device_table_entry *)sb->pentry;
 
        for (i = 0; i < num; i++, pentry++) {
-               if (pentry->irq != (u8)0xff) { /* native RTE case */
+               int irq = pentry->irq;
+
+               if (irq != (u8)0xff) { /* native RTE case */
                        /* these SPI2 devices are not exposed to system as PCI
                         * devices, but they have separate RTE entry in IOAPIC
                         * so we have to enable them one by one here
                         */
-                       ioapic = mp_find_ioapic(pentry->irq);
+                       ioapic = mp_find_ioapic(irq);
                        irq_attr.ioapic = ioapic;
-                       irq_attr.ioapic_pin = pentry->irq;
+                       irq_attr.ioapic_pin = irq;
                        irq_attr.trigger = 1;
                        irq_attr.polarity = 1;
-                       io_apic_set_pci_routing(NULL, pentry->irq, &irq_attr);
-               }
+                       io_apic_set_pci_routing(NULL, irq, &irq_attr);
+               } else
+                       irq = 0; /* No irq */
+
                switch (pentry->type) {
                case SFI_DEV_TYPE_IPC:
                        /* ID as IRQ is a hack that will go away */
-                       pdev = platform_device_alloc(pentry->name, pentry->irq);
+                       pdev = platform_device_alloc(pentry->name, irq);
                        if (pdev == NULL) {
                                pr_err("out of memory for SFI platform device '%s'.\n",
                                                        pentry->name);
                                continue;
                        }
-                       install_irq_resource(pdev, pentry->irq);
+                       install_irq_resource(pdev, irq);
                        pr_debug("info[%2d]: IPC bus, name = %16.16s, "
-                               "irq = 0x%2x\n", i, pentry->name, pentry->irq);
+                               "irq = 0x%2x\n", i, pentry->name, irq);
                        sfi_handle_ipc_dev(pdev);
                        break;
                case SFI_DEV_TYPE_SPI:
                        memset(&spi_info, 0, sizeof(spi_info));
                        strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN);
-                       spi_info.irq = pentry->irq;
+                       spi_info.irq = irq;
                        spi_info.bus_num = pentry->host_num;
                        spi_info.chip_select = pentry->addr;
                        spi_info.max_speed_hz = pentry->max_freq;
@@ -724,7 +728,7 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
                        memset(&i2c_info, 0, sizeof(i2c_info));
                        bus = pentry->host_num;
                        strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN);
-                       i2c_info.irq = pentry->irq;
+                       i2c_info.irq = irq;
                        i2c_info.addr = pentry->addr;
                        pr_debug("info[%2d]: I2C bus = %d, name = %16.16s, "
                                "irq = 0x%2x, addr = 0x%x\n", i, bus,
index 68e467f69fec8b022446de87ff60d4327b38f893..edf435b74e85be1d57b6b5e43abaec8a4a20f614 100644 (file)
@@ -115,9 +115,6 @@ early_param("nobau", setup_nobau);
 
 /* base pnode in this partition */
 static int uv_base_pnode __read_mostly;
-/* position of pnode (which is nasid>>1): */
-static int uv_nshift __read_mostly;
-static unsigned long uv_mmask __read_mostly;
 
 static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
 static DEFINE_PER_CPU(struct bau_control, bau_control);
@@ -1426,7 +1423,7 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
 {
        int i;
        int cpu;
-       unsigned long pa;
+       unsigned long gpa;
        unsigned long m;
        unsigned long n;
        size_t dsize;
@@ -1442,9 +1439,9 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
        bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
        BUG_ON(!bau_desc);
 
-       pa = uv_gpa(bau_desc); /* need the real nasid*/
-       n = pa >> uv_nshift;
-       m = pa & uv_mmask;
+       gpa = uv_gpa(bau_desc);
+       n = uv_gpa_to_gnode(gpa);
+       m = uv_gpa_to_offset(gpa);
 
        /* the 14-bit pnode */
        write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
@@ -1516,9 +1513,9 @@ static void pq_init(int node, int pnode)
                bcp->queue_last         = pqp + (DEST_Q_SIZE - 1);
        }
        /*
-        * need the pnode of where the memory was really allocated
+        * need the gnode of where the memory was really allocated
         */
-       pn = uv_gpa(pqp) >> uv_nshift;
+       pn = uv_gpa_to_gnode(uv_gpa(pqp));
        first = uv_physnodeaddr(pqp);
        pn_first = ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | first;
        last = uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1));
@@ -1578,14 +1575,14 @@ static int calculate_destination_timeout(void)
                ts_ns = base * mult1 * mult2;
                ret = ts_ns / 1000;
        } else {
-               /* 4 bits  0/1 for 10/80us, 3 bits of multiplier */
-               mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
+               /* 4 bits  0/1 for 10/80us base, 3 bits of multiplier */
+               mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
                mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
                if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
-                       mult1 = 80;
+                       base = 80;
                else
-                       mult1 = 10;
-               base = mmr_image & UV2_ACK_MASK;
+                       base = 10;
+               mult1 = mmr_image & UV2_ACK_MASK;
                ret = mult1 * base;
        }
        return ret;
@@ -1812,8 +1809,6 @@ static int __init uv_bau_init(void)
                zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
        }
 
-       uv_nshift = uv_hub_info->m_val;
-       uv_mmask = (1UL << uv_hub_info->m_val) - 1;
        nuvhubs = uv_num_possible_blades();
        spin_lock_init(&disable_lock);
        congested_cycles = usec_2_cycles(congested_respns_us);
@@ -1825,6 +1820,8 @@ static int __init uv_bau_init(void)
                        uv_base_pnode = uv_blade_to_pnode(uvhub);
        }
 
+       enable_timeouts();
+
        if (init_per_cpu(nuvhubs, uv_base_pnode)) {
                nobau = 1;
                return 0;
@@ -1835,7 +1832,6 @@ static int __init uv_bau_init(void)
                if (uv_blade_nr_possible_cpus(uvhub))
                        init_uvhub(uvhub, vector, uv_base_pnode);
 
-       enable_timeouts();
        alloc_intr_gate(vector, uv_bau_message_intr1);
 
        for_each_possible_blade(uvhub) {
index 67d69f1e2b734697c53f6ccc99b3066697d3a46a..8385d1d33f3160ab527db1aceb2a94332fe2740c 100644 (file)
@@ -62,6 +62,7 @@
 #include <asm/reboot.h>
 #include <asm/stackprotector.h>
 #include <asm/hypervisor.h>
+#include <asm/pci_x86.h>
 
 #include "xen-ops.h"
 #include "mmu.h"
@@ -197,6 +198,9 @@ static void __init xen_banner(void)
               xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
 }
 
+#define CPUID_THERM_POWER_LEAF 6
+#define APERFMPERF_PRESENT 0
+
 static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
 static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
 
@@ -217,6 +221,11 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
                maskedx = cpuid_leaf1_edx_mask;
                break;
 
+       case CPUID_THERM_POWER_LEAF:
+               /* Disabling APERFMPERF for kernel usage */
+               maskecx = ~(1 << APERFMPERF_PRESENT);
+               break;
+
        case 0xb:
                /* Suppress extended topology stuff */
                maskebx = 0;
@@ -1259,8 +1268,10 @@ asmlinkage void __init xen_start_kernel(void)
                /* Make sure ACS will be enabled */
                pci_request_acs();
        }
-               
-
+#ifdef CONFIG_PCI
+       /* PCI BIOS service won't work from a PV guest. */
+       pci_probe &= ~PCI_PROBE_BIOS;
+#endif
        xen_raw_console_write("about to get started...\n");
 
        xen_setup_runstate_info(0);
@@ -1337,7 +1348,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
        int cpu = (long)hcpu;
        switch (action) {
        case CPU_UP_PREPARE:
-               per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
+               xen_vcpu_setup(cpu);
                if (xen_have_vector_callback)
                        xen_init_lock_cpu(cpu);
                break;
@@ -1367,7 +1378,6 @@ static void __init xen_hvm_guest_init(void)
        xen_hvm_smp_init();
        register_cpu_notifier(&xen_hvm_cpu_notifier);
        xen_unplug_emulated_devices();
-       have_vcpu_info_placement = 0;
        x86_init.irqs.intr_init = xen_init_IRQ;
        xen_hvm_init_time_ops();
        xen_hvm_init_mmu_ops();
index 5f76c0acb2c7116e6884f27743884e6b9497c52c..d957dce61ede0e7c8c6a6acf4ebfdfa783d474fa 100644 (file)
@@ -320,8 +320,13 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
 {
        if (val & _PAGE_PRESENT) {
                unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
+               unsigned long pfn = mfn_to_pfn(mfn);
+
                pteval_t flags = val & PTE_FLAGS_MASK;
-               val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
+               if (unlikely(pfn == ~0))
+                       val = flags & ~_PAGE_PRESENT;
+               else
+                       val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
        }
 
        return val;
index acea42ee4eed1bbc65b48c92e2cb0b569dd68c1b..f8dcda4931461ae5c633567b4e1fb63b836cab7b 100644 (file)
@@ -192,9 +192,21 @@ static unsigned long __init xen_get_max_pages(void)
        domid_t domid = DOMID_SELF;
        int ret;
 
-       ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
-       if (ret > 0)
-               max_pages = ret;
+       /*
+        * For the initial domain we use the maximum reservation as
+        * the maximum page.
+        *
+        * For guest domains the current maximum reservation reflects
+        * the current maximum rather than the static maximum. In this
+        * case the e820 map provided to us will cover the static
+        * maximum region.
+        */
+       if (xen_initial_domain()) {
+               ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
+               if (ret > 0)
+                       max_pages = ret;
+       }
+
        return min(max_pages, MAX_DOMAIN_PAGES);
 }
 
index d4fc6d454f8d0dced29f592dd0b33407c5088b92..2843b5e7cf078cf2e2d90717ec281cd875e554dd 100644 (file)
@@ -172,6 +172,7 @@ static void __init xen_fill_possible_map(void)
 static void __init xen_filter_cpu_maps(void)
 {
        int i, rc;
+       unsigned int subtract = 0;
 
        if (!xen_initial_domain())
                return;
@@ -186,8 +187,22 @@ static void __init xen_filter_cpu_maps(void)
                } else {
                        set_cpu_possible(i, false);
                        set_cpu_present(i, false);
+                       subtract++;
                }
        }
+#ifdef CONFIG_HOTPLUG_CPU
+       /* This is akin to using 'nr_cpus' on the Linux command line.
+        * Which is OK as when we use 'dom0_max_vcpus=X' we can only
+        * have up to X, while nr_cpu_ids is greater than X. This
+        * normally is not a problem, except when CPU hotplugging
+        * is involved and then there might be more than X CPUs
+        * in the guest - which will not work as there is no
+        * hypercall to expand the max number of VCPUs an already
+        * running guest has. So cap it up to X. */
+       if (subtract)
+               nr_cpu_ids = nr_cpu_ids - subtract;
+#endif
+
 }
 
 static void __init xen_smp_prepare_boot_cpu(void)
index 79d7362ad6d1f0e0a6fab6aad37700e2a5dd038a..3e45aa000718aa2cd63d78f5c849e42666905d0e 100644 (file)
@@ -96,7 +96,7 @@ ENTRY(xen_restore_fl_direct)
 
        /* check for unmasked and pending */
        cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
-       jz 1f
+       jnz 1f
 2:     call check_events
 1:
 ENDPATCH(xen_restore_fl_direct)
index 249d1a06e3ad7e9c20f6b5613ea6030fb195d457..44f67b52c399c7b60bdea6f043137829c70e7a71 100755 (executable)
@@ -418,6 +418,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        q->backing_dev_info.state = 0;
        q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
        q->backing_dev_info.name = "block";
+       q->node = node_id;
 
        err = bdi_init(&q->backing_dev_info);
        if (err) {
@@ -502,7 +503,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
        if (!uninit_q)
                return NULL;
 
-       q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
+       q = blk_init_allocated_queue(uninit_q, rfn, lock);
        if (!q)
                blk_cleanup_queue(uninit_q);
 
@@ -513,19 +514,10 @@ EXPORT_SYMBOL(blk_init_queue_node);
 struct request_queue *
 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
                         spinlock_t *lock)
-{
-       return blk_init_allocated_queue_node(q, rfn, lock, -1);
-}
-EXPORT_SYMBOL(blk_init_allocated_queue);
-
-struct request_queue *
-blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
-                             spinlock_t *lock, int node_id)
 {
        if (!q)
                return NULL;
 
-       q->node = node_id;
        if (blk_init_free_list(q))
                return NULL;
 
@@ -555,7 +547,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
 
        return NULL;
 }
-EXPORT_SYMBOL(blk_init_allocated_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
 
 int blk_get_queue(struct request_queue *q)
 {
index e663ac2d8e68f70ff17ce274f3cebec16c1dd18c..164cd0059706214e53c01b780b353fca98d9b829 100644 (file)
@@ -204,10 +204,11 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
                if (!iov[i].iov_len)
                        return -EINVAL;
 
-               if (uaddr & queue_dma_alignment(q)) {
+               /*
+                * Keep going so we check length of all segments
+                */
+               if (uaddr & queue_dma_alignment(q))
                        unaligned = 1;
-                       break;
-               }
        }
 
        if (unaligned || (q->dma_pad_mask & len) || map_data)
index 0c8b64a16484721308587c7a6b635344a9144a1e..792ead66675750785c1d63db11dc21303c6dbcf5 100644 (file)
@@ -985,7 +985,8 @@ void bsg_unregister_queue(struct request_queue *q)
 
        mutex_lock(&bsg_mutex);
        idr_remove(&bsg_minor_idr, bcd->minor);
-       sysfs_remove_link(&q->kobj, "bsg");
+       if (q->kobj.sd)
+               sysfs_remove_link(&q->kobj, "bsg");
        device_unregister(bcd->class_dev);
        bcd->class_dev = NULL;
        kref_put(&bcd->ref, bsg_kref_release_function);
index ae21919f15e1edf2ea6880efc6d5d43deae7fd9f..23500ac7f0f33b4a016b1743f32d01e47f53c8e5 100644 (file)
@@ -3169,7 +3169,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
                }
        }
 
-       if (ret)
+       if (ret && ret != -EEXIST)
                printk(KERN_ERR "cfq: cic link failed!\n");
 
        return ret;
@@ -3185,6 +3185,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
 {
        struct io_context *ioc = NULL;
        struct cfq_io_context *cic;
+       int ret;
 
        might_sleep_if(gfp_mask & __GFP_WAIT);
 
@@ -3192,6 +3193,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
        if (!ioc)
                return NULL;
 
+retry:
        cic = cfq_cic_lookup(cfqd, ioc);
        if (cic)
                goto out;
@@ -3200,7 +3202,12 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
        if (cic == NULL)
                goto err;
 
-       if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
+       ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask);
+       if (ret == -EEXIST) {
+               /* someone has linked cic to ioc already */
+               cfq_cic_free(cic);
+               goto retry;
+       } else if (ret)
                goto err_free;
 
 out:
@@ -4015,6 +4022,11 @@ static void *cfq_init_queue(struct request_queue *q)
 
        if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
                kfree(cfqg);
+
+               spin_lock(&cic_index_lock);
+               ida_remove(&cic_index_ida, cfqd->cic_index);
+               spin_unlock(&cic_index_lock);
+
                kfree(cfqd);
                return NULL;
        }
index cbf7b880e9d4701cf4745fad3fc1ec4b4d5e44b6..a1b0b9012cd457acbc78dc67b7f9a1602102096c 100644 (file)
@@ -36,6 +36,7 @@ static DEFINE_IDR(ext_devt_idr);
 
 static struct device_type disk_type;
 
+static void disk_alloc_events(struct gendisk *disk);
 static void disk_add_events(struct gendisk *disk);
 static void disk_del_events(struct gendisk *disk);
 static void disk_release_events(struct gendisk *disk);
@@ -602,6 +603,8 @@ void add_disk(struct gendisk *disk)
        disk->major = MAJOR(devt);
        disk->first_minor = MINOR(devt);
 
+       disk_alloc_events(disk);
+
        /* Register BDI before referencing it from bdev */ 
        bdi = &disk->queue->backing_dev_info;
        bdi_register_dev(bdi, disk_devt(disk));
@@ -611,6 +614,12 @@ void add_disk(struct gendisk *disk)
        register_disk(disk);
        blk_register_queue(disk);
 
+       /*
+        * Take an extra ref on queue which will be put on disk_release()
+        * so that it sticks around as long as @disk is there.
+        */
+       WARN_ON_ONCE(blk_get_queue(disk->queue));
+
        retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
                                   "bdi");
        WARN_ON(retval);
@@ -735,7 +744,7 @@ void __init printk_all_partitions(void)
                struct hd_struct *part;
                char name_buf[BDEVNAME_SIZE];
                char devt_buf[BDEVT_SIZE];
-               u8 uuid[PARTITION_META_INFO_UUIDLTH * 2 + 1];
+               char uuid_buf[PARTITION_META_INFO_UUIDLTH * 2 + 5];
 
                /*
                 * Don't show empty devices or things that have been
@@ -754,14 +763,16 @@ void __init printk_all_partitions(void)
                while ((part = disk_part_iter_next(&piter))) {
                        bool is_part0 = part == &disk->part0;
 
-                       uuid[0] = 0;
+                       uuid_buf[0] = '\0';
                        if (part->info)
-                               part_unpack_uuid(part->info->uuid, uuid);
+                               snprintf(uuid_buf, sizeof(uuid_buf), "%pU",
+                                        part->info->uuid);
 
                        printk("%s%s %10llu %s %s", is_part0 ? "" : "  ",
                               bdevt_str(part_devt(part), devt_buf),
                               (unsigned long long)part->nr_sects >> 1,
-                              disk_name(disk, part->partno, name_buf), uuid);
+                              disk_name(disk, part->partno, name_buf),
+                              uuid_buf);
                        if (is_part0) {
                                if (disk->driverfs_dev != NULL &&
                                    disk->driverfs_dev->driver != NULL)
@@ -1103,6 +1114,8 @@ static void disk_release(struct device *dev)
        disk_replace_part_tbl(disk, NULL);
        free_part_stats(&disk->part0);
        free_part_info(&disk->part0);
+       if (disk->queue)
+               blk_put_queue(disk->queue);
        kfree(disk);
 }
 
@@ -1493,9 +1506,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
        intv = disk_events_poll_jiffies(disk);
        set_timer_slack(&ev->dwork.timer, intv / 4);
        if (check_now)
-               queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
+               queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
        else if (intv)
-               queue_delayed_work(system_nrt_wq, &ev->dwork, intv);
+               queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
 out_unlock:
        spin_unlock_irqrestore(&ev->lock, flags);
 }
@@ -1536,7 +1549,7 @@ void disk_check_events(struct gendisk *disk)
        spin_lock_irqsave(&ev->lock, flags);
        if (!ev->block) {
                cancel_delayed_work(&ev->dwork);
-               queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
+               queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
        }
        spin_unlock_irqrestore(&ev->lock, flags);
 }
@@ -1574,7 +1587,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
 
        /* uncondtionally schedule event check and wait for it to finish */
        disk_block_events(disk);
-       queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
+       queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
        flush_delayed_work(&ev->dwork);
        __disk_unblock_events(disk, false);
 
@@ -1611,7 +1624,7 @@ static void disk_events_workfn(struct work_struct *work)
 
        intv = disk_events_poll_jiffies(disk);
        if (!ev->block && intv)
-               queue_delayed_work(system_nrt_wq, &ev->dwork, intv);
+               queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
 
        spin_unlock_irq(&ev->lock);
 
@@ -1749,9 +1762,9 @@ module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
                &disk_events_dfl_poll_msecs, 0644);
 
 /*
- * disk_{add|del|release}_events - initialize and destroy disk_events.
+ * disk_{alloc|add|del|release}_events - initialize and destroy disk_events.
  */
-static void disk_add_events(struct gendisk *disk)
+static void disk_alloc_events(struct gendisk *disk)
 {
        struct disk_events *ev;
 
@@ -1764,16 +1777,6 @@ static void disk_add_events(struct gendisk *disk)
                return;
        }
 
-       if (sysfs_create_files(&disk_to_dev(disk)->kobj,
-                              disk_events_attrs) < 0) {
-               pr_warn("%s: failed to create sysfs files for events\n",
-                       disk->disk_name);
-               kfree(ev);
-               return;
-       }
-
-       disk->ev = ev;
-
        INIT_LIST_HEAD(&ev->node);
        ev->disk = disk;
        spin_lock_init(&ev->lock);
@@ -1782,8 +1785,21 @@ static void disk_add_events(struct gendisk *disk)
        ev->poll_msecs = -1;
        INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
 
+       disk->ev = ev;
+}
+
+static void disk_add_events(struct gendisk *disk)
+{
+       if (!disk->ev)
+               return;
+
+       /* FIXME: error handling */
+       if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0)
+               pr_warn("%s: failed to create sysfs files for events\n",
+                       disk->disk_name);
+
        mutex_lock(&disk_events_mutex);
-       list_add_tail(&ev->node, &disk_events);
+       list_add_tail(&disk->ev->node, &disk_events);
        mutex_unlock(&disk_events_mutex);
 
        /*
index 4f4230b79bb6ee3c8164e462e40d4bdea3dc1fcd..5ef1f4c17e698457186b61736a0fa02f6594eb7f 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/capability.h>
 #include <linux/completion.h>
 #include <linux/cdrom.h>
+#include <linux/ratelimit.h>
 #include <linux/slab.h>
 #include <linux/times.h>
 #include <asm/uaccess.h>
@@ -691,6 +692,57 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod
 }
 EXPORT_SYMBOL(scsi_cmd_ioctl);
 
+int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
+{
+       if (bd && bd == bd->bd_contains)
+               return 0;
+
+       /* Actually none of these is particularly useful on a partition,
+        * but they are safe.
+        */
+       switch (cmd) {
+       case SCSI_IOCTL_GET_IDLUN:
+       case SCSI_IOCTL_GET_BUS_NUMBER:
+       case SCSI_IOCTL_GET_PCI:
+       case SCSI_IOCTL_PROBE_HOST:
+       case SG_GET_VERSION_NUM:
+       case SG_SET_TIMEOUT:
+       case SG_GET_TIMEOUT:
+       case SG_GET_RESERVED_SIZE:
+       case SG_SET_RESERVED_SIZE:
+       case SG_EMULATED_HOST:
+               return 0;
+       case CDROM_GET_CAPABILITY:
+               /* Keep this until we remove the printk below.  udev sends it
+                * and we do not want to spam dmesg about it.   CD-ROMs do
+                * not have partitions, so we get here only for disks.
+                */
+               return -ENOTTY;
+       default:
+               break;
+       }
+
+       /* In particular, rule out all resets and host-specific ioctls.  */
+       printk_ratelimited(KERN_WARNING
+                          "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
+
+       return capable(CAP_SYS_RAWIO) ? 0 : -ENOTTY;
+}
+EXPORT_SYMBOL(scsi_verify_blk_ioctl);
+
+int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
+                      unsigned int cmd, void __user *arg)
+{
+       int ret;
+
+       ret = scsi_verify_blk_ioctl(bd, cmd);
+       if (ret < 0)
+               return ret;
+
+       return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg);
+}
+EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
+
 static int __init blk_scsi_ioctl_init(void)
 {
        blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
index e46d21ae26bc0538003cf72b42fe1a8e90eb51cd..671d4d6d14df106b3b0278364340ef3290d7a185 100644 (file)
@@ -945,7 +945,7 @@ static void __exit cryptd_exit(void)
        crypto_unregister_template(&cryptd_tmpl);
 }
 
-module_init(cryptd_init);
+subsys_initcall(cryptd_init);
 module_exit(cryptd_exit);
 
 MODULE_LICENSE("GPL");
index 9ed9f60316e545b6bc0ad68f9ef8c50413450685..dd30f40af9f505152bbc620211fa37d109eae1d7 100644 (file)
@@ -21,8 +21,6 @@
 #include <linux/percpu.h>
 #include <asm/byteorder.h>
 
-static DEFINE_PER_CPU(u64[80], msg_schedule);
-
 static inline u64 Ch(u64 x, u64 y, u64 z)
 {
         return z ^ (x & (y ^ z));
@@ -33,11 +31,6 @@ static inline u64 Maj(u64 x, u64 y, u64 z)
         return (x & y) | (z & (x | y));
 }
 
-static inline u64 RORu64(u64 x, u64 y)
-{
-        return (x >> y) | (x << (64 - y));
-}
-
 static const u64 sha512_K[80] = {
         0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL,
         0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
@@ -68,10 +61,10 @@ static const u64 sha512_K[80] = {
         0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL,
 };
 
-#define e0(x)       (RORu64(x,28) ^ RORu64(x,34) ^ RORu64(x,39))
-#define e1(x)       (RORu64(x,14) ^ RORu64(x,18) ^ RORu64(x,41))
-#define s0(x)       (RORu64(x, 1) ^ RORu64(x, 8) ^ (x >> 7))
-#define s1(x)       (RORu64(x,19) ^ RORu64(x,61) ^ (x >> 6))
+#define e0(x)       (ror64(x,28) ^ ror64(x,34) ^ ror64(x,39))
+#define e1(x)       (ror64(x,14) ^ ror64(x,18) ^ ror64(x,41))
+#define s0(x)       (ror64(x, 1) ^ ror64(x, 8) ^ (x >> 7))
+#define s1(x)       (ror64(x,19) ^ ror64(x,61) ^ (x >> 6))
 
 static inline void LOAD_OP(int I, u64 *W, const u8 *input)
 {
@@ -80,7 +73,7 @@ static inline void LOAD_OP(int I, u64 *W, const u8 *input)
 
 static inline void BLEND_OP(int I, u64 *W)
 {
-       W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
+       W[I & 15] += s1(W[(I-2) & 15]) + W[(I-7) & 15] + s0(W[(I-15) & 15]);
 }
 
 static void
@@ -89,15 +82,7 @@ sha512_transform(u64 *state, const u8 *input)
        u64 a, b, c, d, e, f, g, h, t1, t2;
 
        int i;
-       u64 *W = get_cpu_var(msg_schedule);
-
-       /* load the input */
-        for (i = 0; i < 16; i++)
-                LOAD_OP(i, W, input);
-
-        for (i = 16; i < 80; i++) {
-                BLEND_OP(i, W);
-        }
+       u64 W[16];
 
        /* load the state into our registers */
        a=state[0];   b=state[1];   c=state[2];   d=state[3];
@@ -105,21 +90,35 @@ sha512_transform(u64 *state, const u8 *input)
 
        /* now iterate */
        for (i=0; i<80; i+=8) {
-               t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i  ] + W[i  ];
+               if (!(i & 8)) {
+                       int j;
+
+                       if (i < 16) {
+                               /* load the input */
+                               for (j = 0; j < 16; j++)
+                                       LOAD_OP(i + j, W, input);
+                       } else {
+                               for (j = 0; j < 16; j++) {
+                                       BLEND_OP(i + j, W);
+                               }
+                       }
+               }
+
+               t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i  ] + W[(i & 15)];
                t2 = e0(a) + Maj(a,b,c);    d+=t1;    h=t1+t2;
-               t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[i+1];
+               t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[(i & 15) + 1];
                t2 = e0(h) + Maj(h,a,b);    c+=t1;    g=t1+t2;
-               t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[i+2];
+               t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[(i & 15) + 2];
                t2 = e0(g) + Maj(g,h,a);    b+=t1;    f=t1+t2;
-               t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[i+3];
+               t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[(i & 15) + 3];
                t2 = e0(f) + Maj(f,g,h);    a+=t1;    e=t1+t2;
-               t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[i+4];
+               t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[(i & 15) + 4];
                t2 = e0(e) + Maj(e,f,g);    h+=t1;    d=t1+t2;
-               t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[i+5];
+               t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[(i & 15) + 5];
                t2 = e0(d) + Maj(d,e,f);    g+=t1;    c=t1+t2;
-               t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[i+6];
+               t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[(i & 15) + 6];
                t2 = e0(c) + Maj(c,d,e);    f+=t1;    b=t1+t2;
-               t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[i+7];
+               t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[(i & 15) + 7];
                t2 = e0(b) + Maj(b,c,d);    e+=t1;    a=t1+t2;
        }
 
@@ -128,8 +127,6 @@ sha512_transform(u64 *state, const u8 *input)
 
        /* erase our data */
        a = b = c = d = e = f = g = h = t1 = t2 = 0;
-       memset(W, 0, sizeof(__get_cpu_var(msg_schedule)));
-       put_cpu_var(msg_schedule);
 }
 
 static int
@@ -177,7 +174,7 @@ sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len)
        index = sctx->count[0] & 0x7f;
 
        /* Update number of bytes */
-       if (!(sctx->count[0] += len))
+       if ((sctx->count[0] += len) < len)
                sctx->count[1]++;
 
         part_len = 128 - index;
index 1055769f2f01ae97f4f69780fd93b1015512d870..6d276c20b57b764e9580bb84f10f617a69c1bd07 100644 (file)
@@ -358,6 +358,7 @@ typedef enum {
  */
 struct acpi_object_extra {
        ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG;       /* _REG method for this region (if any) */
+       struct acpi_namespace_node *scope_node;
        void *region_context;   /* Region-specific data */
        u8 *aml_start;
        u32 aml_length;
index 8c7b99728aa23163e77178a6748ddd5e699c8e45..d69e4a53175b6b3a50d3a5e189bd0aa8082679ab 100644 (file)
@@ -384,8 +384,32 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
 
        /* Execute the argument AML */
 
-       status = acpi_ds_execute_arguments(node, node->parent,
+       status = acpi_ds_execute_arguments(node, extra_desc->extra.scope_node,
                                           extra_desc->extra.aml_length,
                                           extra_desc->extra.aml_start);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       /* Validate the region address/length via the host OS */
+
+       status = acpi_os_validate_address(obj_desc->region.space_id,
+                                         obj_desc->region.address,
+                                         (acpi_size) obj_desc->region.length,
+                                         acpi_ut_get_node_name(node));
+
+       if (ACPI_FAILURE(status)) {
+               /*
+                * Invalid address/length. We will emit an error message and mark
+                * the region as invalid, so that it will cause an additional error if
+                * it is ever used. Then return AE_OK.
+                */
+               ACPI_EXCEPTION((AE_INFO, status,
+                               "During address validation of OpRegion [%4.4s]",
+                               node->name.ascii));
+               obj_desc->common.flags |= AOPOBJ_INVALID;
+               status = AE_OK;
+       }
+
        return_ACPI_STATUS(status);
 }
index 110711afada8be5c0b769c693c6fd3b624251ef4..8a06dc523af79725c458e7374eded5aabd6efa9f 100644 (file)
@@ -330,6 +330,12 @@ acpi_ex_create_region(u8 * aml_start,
        region_obj2 = obj_desc->common.next_object;
        region_obj2->extra.aml_start = aml_start;
        region_obj2->extra.aml_length = aml_length;
+       if (walk_state->scope_info) {
+               region_obj2->extra.scope_node =
+                   walk_state->scope_info->scope.node;
+       } else {
+               region_obj2->extra.scope_node = node;
+       }
 
        /* Init the region from the operands */
 
index 6f5588e62c0ac24d396288f661fa3a959b52f734..4c531b4a9625bd5a894b3e49d6d6bdcef9652d51 100644 (file)
@@ -350,10 +350,6 @@ static void acpi_tb_convert_fadt(void)
        u32 address32;
        u32 i;
 
-       /* Update the local FADT table header length */
-
-       acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
-
        /*
         * Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary.
         * Later code will always use the X 64-bit field. Also, check for an
@@ -395,6 +391,10 @@ static void acpi_tb_convert_fadt(void)
                acpi_gbl_FADT.boot_flags = 0;
        }
 
+       /* Update the local FADT table header length */
+
+       acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
+
        /*
         * Expand the ACPI 1.0 32-bit addresses to the ACPI 2.0 64-bit "X"
         * generic address structures as necessary. Later code will always use
index 7489b89c300fc3b0ab2f0a55c2bc09ead5e2c738..f151afe61aabca96f17bb92dfdbc7e855ad08620 100644 (file)
@@ -76,7 +76,7 @@ static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
 {
        struct acpi_iomap *map;
 
-       map = __acpi_find_iomap(paddr, size);
+       map = __acpi_find_iomap(paddr, size/8);
        if (map)
                return map->vaddr + (paddr - map->paddr);
        else
index fcc13ac0aa1870009ad0fd9dc41712795e582aab..d77c97de9e7382315755a4fc9befb9d84b314a81 100644 (file)
@@ -635,11 +635,19 @@ static int acpi_battery_update(struct acpi_battery *battery)
 
 static void acpi_battery_refresh(struct acpi_battery *battery)
 {
+       int power_unit;
+
        if (!battery->bat.dev)
                return;
 
+       power_unit = battery->power_unit;
+
        acpi_battery_get_info(battery);
-       /* The battery may have changed its reporting units. */
+
+       if (power_unit == battery->power_unit)
+               return;
+
+       /* The battery has changed its reporting units. */
        sysfs_remove_battery(battery);
        sysfs_add_battery(battery);
 }
index 3b5c3189fd995e4cc200c532b2d05ba6b274fac7..e56f3be7b07d36fceb32dce25ede0fa35308f0e7 100644 (file)
@@ -45,6 +45,8 @@ static int pxm_to_node_map[MAX_PXM_DOMAINS]
 static int node_to_pxm_map[MAX_NUMNODES]
                        = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
 
+unsigned char acpi_srat_revision __initdata;
+
 int pxm_to_node(int pxm)
 {
        if (pxm < 0)
@@ -255,9 +257,13 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
 
 static int __init acpi_parse_srat(struct acpi_table_header *table)
 {
+       struct acpi_table_srat *srat;
        if (!table)
                return -EINVAL;
 
+       srat = (struct acpi_table_srat *)table;
+       acpi_srat_revision = srat->header.revision;
+
        /* Real work done in acpi_table_parse_srat below. */
 
        return 0;
index d06078d660adecaeb1113bd074cf7b8945d2008a..dfafecbddb53e8b818c79028ba8e3b98a4bb318f 100644 (file)
@@ -595,6 +595,13 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
                if (ACPI_SUCCESS(status)) {
                        dev_info(root->bus->bridge,
                                "ACPI _OSC control (0x%02x) granted\n", flags);
+                       if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
+                               /*
+                                * We have ASPM control, but the FADT indicates
+                                * that it's unsupported. Clear it.
+                                */
+                               pcie_clear_aspm(root->bus);
+                       }
                } else {
                        dev_info(root->bus->bridge,
                                "ACPI _OSC request failed (%s), "
index 02d2a4c9084df510c7f6929d08a7082f1ecba5d8..0c0669fb1cc45fbc3a18e83f36e39d50b9fa3059 100644 (file)
@@ -172,8 +172,30 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
        apic_id = map_mat_entry(handle, type, acpi_id);
        if (apic_id == -1)
                apic_id = map_madt_entry(type, acpi_id);
-       if (apic_id == -1)
-               return apic_id;
+       if (apic_id == -1) {
+               /*
+                * On UP processor, there is no _MAT or MADT table.
+                * So above apic_id is always set to -1.
+                *
+                * BIOS may define multiple CPU handles even for UP processor.
+                * For example,
+                *
+                * Scope (_PR)
+                 * {
+                *     Processor (CPU0, 0x00, 0x00000410, 0x06) {}
+                *     Processor (CPU1, 0x01, 0x00000410, 0x06) {}
+                *     Processor (CPU2, 0x02, 0x00000410, 0x06) {}
+                *     Processor (CPU3, 0x03, 0x00000410, 0x06) {}
+                * }
+                *
+                * Ignores apic_id and always return 0 for CPU0's handle.
+                * Return -1 for other CPU's handle.
+                */
+               if (acpi_id == 0)
+                       return acpi_id;
+               else
+                       return apic_id;
+       }
 
 #ifdef CONFIG_SMP
        for_each_possible_cpu(i) {
index 79cb6533289456c4f43b92dda5d7059a27345afa..3854df25ac93e8a0e0e6f7c4be796f01e0222758 100644 (file)
@@ -58,6 +58,27 @@ ACPI_MODULE_NAME("processor_thermal");
 static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
 static unsigned int acpi_thermal_cpufreq_is_init = 0;
 
+#define reduction_pctg(cpu) \
+       per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
+
+/*
+ * Emulate "per package data" using per cpu data (which should really be
+ * provided elsewhere)
+ *
+ * Note we can lose a CPU on cpu hotunplug, in this case we forget the state
+ * temporarily. Fortunately that's not a big issue here (I hope)
+ */
+static int phys_package_first_cpu(int cpu)
+{
+       int i;
+       int id = topology_physical_package_id(cpu);
+
+       for_each_online_cpu(i)
+               if (topology_physical_package_id(i) == id)
+                       return i;
+       return 0;
+}
+
 static int cpu_has_cpufreq(unsigned int cpu)
 {
        struct cpufreq_policy policy;
@@ -77,7 +98,7 @@ static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
 
        max_freq = (
            policy->cpuinfo.max_freq *
-           (100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20)
+           (100 - reduction_pctg(policy->cpu) * 20)
        ) / 100;
 
        cpufreq_verify_within_limits(policy, 0, max_freq);
@@ -103,16 +124,28 @@ static int cpufreq_get_cur_state(unsigned int cpu)
        if (!cpu_has_cpufreq(cpu))
                return 0;
 
-       return per_cpu(cpufreq_thermal_reduction_pctg, cpu);
+       return reduction_pctg(cpu);
 }
 
 static int cpufreq_set_cur_state(unsigned int cpu, int state)
 {
+       int i;
+
        if (!cpu_has_cpufreq(cpu))
                return 0;
 
-       per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state;
-       cpufreq_update_policy(cpu);
+       reduction_pctg(cpu) = state;
+
+       /*
+        * Update all the CPUs in the same package because they all
+        * contribute to the temperature and often share the same
+        * frequency.
+        */
+       for_each_online_cpu(i) {
+               if (topology_physical_package_id(i) ==
+                   topology_physical_package_id(cpu))
+                       cpufreq_update_policy(i);
+       }
        return 0;
 }
 
@@ -120,10 +153,6 @@ void acpi_thermal_cpufreq_init(void)
 {
        int i;
 
-       for (i = 0; i < nr_cpu_ids; i++)
-               if (cpu_present(i))
-                       per_cpu(cpufreq_thermal_reduction_pctg, i) = 0;
-
        i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
                                      CPUFREQ_POLICY_NOTIFIER);
        if (!i)
index 6c949602cbd111d18753217ea0ee4784054ec008..79ddcdee83adf866e0dc8b14dccfe55728c77d41 100644 (file)
@@ -422,12 +422,36 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
        },
        {
        .callback = init_nvs_nosave,
+       .ident = "Sony Vaio VPCCW29FX",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
+               },
+       },
+       {
+       .callback = init_nvs_nosave,
        .ident = "Averatec AV1020-ED2",
        .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
                DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
                },
        },
+       {
+       .callback = init_nvs_nosave,
+       .ident = "Asus K54C",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
+               },
+       },
+       {
+       .callback = init_nvs_nosave,
+       .ident = "Asus K54HR",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
+               },
+       },
        {},
 };
 #endif /* CONFIG_SUSPEND */
index db39e9e607d85542e100967a5783b1bf791e792d..623a3357ce9be6ccdc3f97402da59081ab6ddf2c 100644 (file)
@@ -1732,6 +1732,7 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type)
 
 static int __init intel_opregion_present(void)
 {
+       int i915 = 0;
 #if defined(CONFIG_DRM_I915) || defined(CONFIG_DRM_I915_MODULE)
        struct pci_dev *dev = NULL;
        u32 address;
@@ -1744,10 +1745,10 @@ static int __init intel_opregion_present(void)
                pci_read_config_dword(dev, 0xfc, &address);
                if (!address)
                        continue;
-               return 1;
+               i915 = 1;
        }
 #endif
-       return 0;
+       return i915;
 }
 
 int acpi_video_register(void)
index 1e9ab9bf85494042f727fbbf7a3027c561f3b9fd..abf2f4e10fd67a6510fd9368f1a62dc94cf97d75 100644 (file)
@@ -392,6 +392,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
          .driver_data = board_ahci_yes_fbs },                  /* 88se9128 */
        { PCI_DEVICE(0x1b4b, 0x9125),
          .driver_data = board_ahci_yes_fbs },                  /* 88se9125 */
+       { PCI_DEVICE(0x1b4b, 0x917a),
+         .driver_data = board_ahci_yes_fbs },                  /* 88se9172 */
        { PCI_DEVICE(0x1b4b, 0x91a3),
          .driver_data = board_ahci_yes_fbs },
 
index 6f6e7718b05c7ff905ab3ad068aaa15160992581..6da6debee35bc2f2f00bddbf106d92cdd8763904 100644 (file)
@@ -113,6 +113,8 @@ enum {
        PIIX_PATA_FLAGS         = ATA_FLAG_SLAVE_POSS,
        PIIX_SATA_FLAGS         = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR,
 
+       PIIX_FLAG_PIO16         = (1 << 30), /*support 16bit PIO only*/
+
        PIIX_80C_PRI            = (1 << 5) | (1 << 4),
        PIIX_80C_SEC            = (1 << 7) | (1 << 6),
 
@@ -147,6 +149,7 @@ enum piix_controller_ids {
        ich8m_apple_sata,       /* locks up on second port enable */
        tolapai_sata,
        piix_pata_vmw,                  /* PIIX4 for VMware, spurious DMA_ERR */
+       ich8_sata_snb,
 };
 
 struct piix_map_db {
@@ -177,6 +180,7 @@ static int piix_sidpr_scr_write(struct ata_link *link,
 static int piix_sidpr_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
                              unsigned hints);
 static bool piix_irq_check(struct ata_port *ap);
+static int piix_port_start(struct ata_port *ap);
 #ifdef CONFIG_PM
 static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
 static int piix_pci_device_resume(struct pci_dev *pdev);
@@ -298,21 +302,21 @@ static const struct pci_device_id piix_pci_tbl[] = {
        /* SATA Controller IDE (PCH) */
        { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
        /* SATA Controller IDE (CPT) */
-       { 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+       { 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
        /* SATA Controller IDE (CPT) */
-       { 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+       { 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
        /* SATA Controller IDE (CPT) */
        { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
        /* SATA Controller IDE (CPT) */
        { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
        /* SATA Controller IDE (PBG) */
-       { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+       { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
        /* SATA Controller IDE (PBG) */
        { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
        /* SATA Controller IDE (Panther Point) */
-       { 0x8086, 0x1e00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+       { 0x8086, 0x1e00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
        /* SATA Controller IDE (Panther Point) */
-       { 0x8086, 0x1e01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+       { 0x8086, 0x1e01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
        /* SATA Controller IDE (Panther Point) */
        { 0x8086, 0x1e08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
        /* SATA Controller IDE (Panther Point) */
@@ -338,6 +342,7 @@ static struct scsi_host_template piix_sht = {
 static struct ata_port_operations piix_sata_ops = {
        .inherits               = &ata_bmdma32_port_ops,
        .sff_irq_check          = piix_irq_check,
+       .port_start             = piix_port_start,
 };
 
 static struct ata_port_operations piix_pata_ops = {
@@ -478,6 +483,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
        [ich8_2port_sata]       = &ich8_2port_map_db,
        [ich8m_apple_sata]      = &ich8m_apple_map_db,
        [tolapai_sata]          = &tolapai_map_db,
+       [ich8_sata_snb]         = &ich8_map_db,
 };
 
 static struct ata_port_info piix_port_info[] = {
@@ -606,6 +612,19 @@ static struct ata_port_info piix_port_info[] = {
                .port_ops       = &piix_vmw_ops,
        },
 
+       /*
+        * some Sandybridge chipsets have broken 32 mode up to now,
+        * see https://bugzilla.kernel.org/show_bug.cgi?id=40592
+        */
+       [ich8_sata_snb] =
+       {
+               .flags          = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
+               .pio_mask       = ATA_PIO4,
+               .mwdma_mask     = ATA_MWDMA2,
+               .udma_mask      = ATA_UDMA6,
+               .port_ops       = &piix_sata_ops,
+       },
+
 };
 
 static struct pci_bits piix_enable_bits[] = {
@@ -649,6 +668,14 @@ static const struct ich_laptop ich_laptop[] = {
        { 0, }
 };
 
+static int piix_port_start(struct ata_port *ap)
+{
+       if (!(ap->flags & PIIX_FLAG_PIO16))
+               ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
+
+       return ata_bmdma_port_start(ap);
+}
+
 /**
  *     ich_pata_cable_detect - Probe host controller cable detect info
  *     @ap: Port for which cable detect info is desired
index 7f099d6e4e0bca601f26f83597e01ec185f03d54..311c92d1db251e08a44a2587e90bda9a49bcd770 100644 (file)
@@ -3487,7 +3487,8 @@ static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg
        u64 now = get_jiffies_64();
        int *trials = void_arg;
 
-       if (ent->timestamp < now - min(now, interval))
+       if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
+           (ent->timestamp < now - min(now, interval)))
                return -1;
 
        (*trials)++;
index 6bd9425ba5ab593ab11372fb6e2e966bf43ecd81..d750962916b1135bf2258232be2f6093515763fc 100644 (file)
@@ -396,8 +396,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
        ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
 
        active = clamp_val(t.active, 2, 15);
-       recover = clamp_val(t.recover, 2, 16);
-       recover &= 0x15;
+       recover = clamp_val(t.recover, 2, 16) & 0x0F;
 
        inb(0x3E6);
        inb(0x3E6);
index 5d1d07645132bdce10dfdbe2590a7cb2da8179d3..d4525928fb7fe8f0ccd12811ef9f4ce4d3ad2329 100644 (file)
@@ -984,6 +984,7 @@ static uint32_t fpga_tx(struct solos_card *card)
                        } else if (skb && card->using_dma) {
                                SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data,
                                                                       skb->len, PCI_DMA_TODEVICE);
+                               card->tx_skb[port] = skb;
                                iowrite32(SKB_CB(skb)->dma_addr,
                                          card->config_regs + TX_DMA_ADDR(port));
                        }
@@ -1152,7 +1153,8 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
                db_fpga_upgrade = db_firmware_upgrade = 0;
        }
 
-       if (card->fpga_version >= DMA_SUPPORTED){
+       if (card->fpga_version >= DMA_SUPPORTED) {
+               pci_set_master(dev);
                card->using_dma = 1;
        } else {
                card->using_dma = 0;
index d57e8d0fb8235085b60180230d04dc8cd61f3033..13305c8c3e21014ea5b8e29b81946ee2472738fe 100644 (file)
@@ -168,4 +168,30 @@ config SYS_HYPERVISOR
        bool
        default n
 
+config SYNC
+       bool "Synchronization framework"
+       default n
+       select ANON_INODES
+       help
+         This option enables the framework for synchronization between multiple
+         drivers.  Sync implementations can take advantage of hardware
+         synchronization built into devices like GPUs.
+
+config SW_SYNC
+       bool "Software synchronization objects"
+       default n
+       depends on SYNC
+       help
+         A sync object driver that uses a 32bit counter to coordinate
+         syncrhronization.  Useful when there is no hardware primitive backing
+         the synchronization.
+
+config SW_SYNC_USER
+       bool "Userspace API for SW_SYNC"
+       default n
+       depends on SW_SYNC
+       help
+         Provides a user space API to the sw sync object.
+         *WARNING* improper use of this can result in deadlocking kernel
+        drivers from userspace.
 endmenu
index 4c5701c15f53a1d4ed4c5d55ada286d57d3b85b7..b61688245584ac8cafb920e2c37cb5a1d5b9c4c3 100644 (file)
@@ -19,5 +19,8 @@ obj-$(CONFIG_MODULES) += module.o
 endif
 obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
 
+obj-$(CONFIG_SYNC)     += sync.o
+obj-$(CONFIG_SW_SYNC)  += sw_sync.o
+
 ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
 
index bc8729d603a74834b34e75d843cbe3b1045cad50..d13851c5c6847bf50a6ca7925b6d39f059545d45 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/kallsyms.h>
 #include <linux/mutex.h>
 #include <linux/async.h>
+#include <linux/pm_runtime.h>
 
 #include "base.h"
 #include "power/power.h"
@@ -1743,6 +1744,10 @@ void device_shutdown(void)
                list_del_init(&dev->kobj.entry);
                spin_unlock(&devices_kset->list_lock);
 
+               /* Don't allow any more runtime suspends */
+               pm_runtime_get_noresume(dev);
+               pm_runtime_barrier(dev);
+
                if (dev->bus && dev->bus->shutdown) {
                        dev_dbg(dev, "shutdown\n");
                        dev->bus->shutdown(dev);
index 06ed6b4e7df5ecc0d236cd73ee2690933f8e8ee7..3719c94be19c1016812150ba8127bb04b96806a4 100644 (file)
@@ -226,13 +226,13 @@ static ssize_t firmware_loading_store(struct device *dev,
        int loading = simple_strtol(buf, NULL, 10);
        int i;
 
+       mutex_lock(&fw_lock);
+
+       if (!fw_priv->fw)
+               goto out;
+
        switch (loading) {
        case 1:
-               mutex_lock(&fw_lock);
-               if (!fw_priv->fw) {
-                       mutex_unlock(&fw_lock);
-                       break;
-               }
                firmware_free_data(fw_priv->fw);
                memset(fw_priv->fw, 0, sizeof(struct firmware));
                /* If the pages are not owned by 'struct firmware' */
@@ -243,7 +243,6 @@ static ssize_t firmware_loading_store(struct device *dev,
                fw_priv->page_array_size = 0;
                fw_priv->nr_pages = 0;
                set_bit(FW_STATUS_LOADING, &fw_priv->status);
-               mutex_unlock(&fw_lock);
                break;
        case 0:
                if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
@@ -274,7 +273,8 @@ static ssize_t firmware_loading_store(struct device *dev,
                fw_load_abort(fw_priv);
                break;
        }
-
+out:
+       mutex_unlock(&fw_lock);
        return count;
 }
 
index 793f796c4da3e1cd20143e1266d41246f2289b73..5693ecee9a4052a339b9eca105ff3704fffe29b4 100644 (file)
@@ -127,12 +127,13 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
                       nid, K(node_page_state(nid, NR_WRITEBACK)),
                       nid, K(node_page_state(nid, NR_FILE_PAGES)),
                       nid, K(node_page_state(nid, NR_FILE_MAPPED)),
-                      nid, K(node_page_state(nid, NR_ANON_PAGES)
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+                      nid, K(node_page_state(nid, NR_ANON_PAGES)
                        + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
-                       HPAGE_PMD_NR
+                       HPAGE_PMD_NR),
+#else
+                      nid, K(node_page_state(nid, NR_ANON_PAGES)),
 #endif
-                      ),
                       nid, K(node_page_state(nid, NR_SHMEM)),
                       nid, node_page_state(nid, NR_KERNEL_STACK) *
                                THREAD_SIZE / 1024,
@@ -143,13 +144,14 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
                       nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) +
                                node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
                       nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)),
-                      nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+                      nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
                        , nid,
                        K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
-                       HPAGE_PMD_NR)
+                       HPAGE_PMD_NR));
+#else
+                      nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
 #endif
-                      );
        n += hugetlb_report_node_meminfo(nid, buf + n);
        return n;
 }
index 577f4fd1648037592de188ce9dab16704ce4c620..184cf54fa01cfbde621a31047866e8c0f1246ade 100644 (file)
@@ -278,6 +278,9 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
  * If a deferred resume was requested while the callback was running then carry
  * it out; otherwise send an idle notification for the device (if the suspend
  * failed) or for its parent (if the suspend succeeded).
+ * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
+ * flag is set and the next autosuspend-delay expiration time is in the
+ * future, schedule another autosuspend attempt.
  *
  * This function must be called under dev->power.lock with interrupts disabled.
  */
@@ -389,10 +392,21 @@ static int rpm_suspend(struct device *dev, int rpmflags)
        if (retval) {
                __update_runtime_status(dev, RPM_ACTIVE);
                dev->power.deferred_resume = 0;
-               if (retval == -EAGAIN || retval == -EBUSY)
+               if (retval == -EAGAIN || retval == -EBUSY) {
                        dev->power.runtime_error = 0;
-               else
+
+                       /*
+                        * If the callback routine failed an autosuspend, and
+                        * if the last_busy time has been updated so that there
+                        * is a new autosuspend expiration time, automatically
+                        * reschedule another autosuspend.
+                        */
+                       if ((rpmflags & RPM_AUTO) &&
+                           pm_runtime_autosuspend_expiration(dev) != 0)
+                               goto repeat;
+               } else {
                        pm_runtime_cancel_pending(dev);
+               }
        } else {
  no_callback:
                __update_runtime_status(dev, RPM_SUSPENDED);
diff --git a/drivers/base/sw_sync.c b/drivers/base/sw_sync.c
new file mode 100644 (file)
index 0000000..21ddf4f
--- /dev/null
@@ -0,0 +1,256 @@
+/*
+ * drivers/base/sw_sync.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/sw_sync.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+static int sw_sync_cmp(u32 a, u32 b)
+{
+       if (a == b)
+               return 0;
+
+       return ((s32)a - (s32)b) < 0 ? -1 : 1;
+}
+
+struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value)
+{
+       struct sw_sync_pt *pt;
+
+       pt = (struct sw_sync_pt *)
+               sync_pt_create(&obj->obj, sizeof(struct sw_sync_pt));
+
+       pt->value = value;
+
+       return (struct sync_pt *)pt;
+}
+
+static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt)
+{
+       struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt;
+       struct sw_sync_timeline *obj =
+               (struct sw_sync_timeline *)sync_pt->parent;
+
+       return (struct sync_pt *) sw_sync_pt_create(obj, pt->value);
+}
+
+static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt)
+{
+       struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+       struct sw_sync_timeline *obj =
+               (struct sw_sync_timeline *)sync_pt->parent;
+
+       return sw_sync_cmp(obj->value, pt->value) >= 0;
+}
+
+static int sw_sync_pt_compare(struct sync_pt *a, struct sync_pt *b)
+{
+       struct sw_sync_pt *pt_a = (struct sw_sync_pt *)a;
+       struct sw_sync_pt *pt_b = (struct sw_sync_pt *)b;
+
+       return sw_sync_cmp(pt_a->value, pt_b->value);
+}
+
+static void sw_sync_print_obj(struct seq_file *s,
+                             struct sync_timeline *sync_timeline)
+{
+       struct sw_sync_timeline *obj = (struct sw_sync_timeline *)sync_timeline;
+
+       seq_printf(s, "%d", obj->value);
+}
+
+static void sw_sync_print_pt(struct seq_file *s, struct sync_pt *sync_pt)
+{
+       struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+       struct sw_sync_timeline *obj =
+               (struct sw_sync_timeline *)sync_pt->parent;
+
+       seq_printf(s, "%d / %d", pt->value, obj->value);
+}
+
+static int sw_sync_fill_driver_data(struct sync_pt *sync_pt,
+                                   void *data, int size)
+{
+       struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+
+       if (size < sizeof(pt->value))
+               return -ENOMEM;
+
+       memcpy(data, &pt->value, sizeof(pt->value));
+
+       return sizeof(pt->value);
+}
+
+struct sync_timeline_ops sw_sync_timeline_ops = {
+       .driver_name = "sw_sync",
+       .dup = sw_sync_pt_dup,
+       .has_signaled = sw_sync_pt_has_signaled,
+       .compare = sw_sync_pt_compare,
+       .print_obj = sw_sync_print_obj,
+       .print_pt = sw_sync_print_pt,
+       .fill_driver_data = sw_sync_fill_driver_data,
+};
+
+
+struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
+{
+       struct sw_sync_timeline *obj = (struct sw_sync_timeline *)
+               sync_timeline_create(&sw_sync_timeline_ops,
+                                    sizeof(struct sw_sync_timeline),
+                                    name);
+
+       return obj;
+}
+
+void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
+{
+       obj->value += inc;
+
+       sync_timeline_signal(&obj->obj);
+}
+
+
+#ifdef CONFIG_SW_SYNC_USER
+/* *WARNING*
+ *
+ * improper use of this can result in deadlocking kernel drivers from userspace.
+ */
+
+/* opening sw_sync create a new sync obj */
+int sw_sync_open(struct inode *inode, struct file *file)
+{
+       struct sw_sync_timeline *obj;
+       char task_comm[TASK_COMM_LEN];
+
+       get_task_comm(task_comm, current);
+
+       obj = sw_sync_timeline_create(task_comm);
+       if (obj == NULL)
+               return -ENOMEM;
+
+       file->private_data = obj;
+
+       return 0;
+}
+
+int sw_sync_release(struct inode *inode, struct file *file)
+{
+       struct sw_sync_timeline *obj = file->private_data;
+       sync_timeline_destroy(&obj->obj);
+       return 0;
+}
+
+long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj, unsigned long arg)
+{
+       int fd = get_unused_fd();
+       int err;
+       struct sync_pt *pt;
+       struct sync_fence *fence;
+       struct sw_sync_create_fence_data data;
+
+       if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+               return -EFAULT;
+
+       pt = sw_sync_pt_create(obj, data.value);
+       if (pt == NULL) {
+               err = -ENOMEM;
+               goto err;
+       }
+
+       data.name[sizeof(data.name) - 1] = '\0';
+       fence = sync_fence_create(data.name, pt);
+       if (fence == NULL) {
+               sync_pt_free(pt);
+               err = -ENOMEM;
+               goto err;
+       }
+
+       data.fence = fd;
+       if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+               sync_fence_put(fence);
+               err = -EFAULT;
+               goto err;
+       }
+
+       sync_fence_install(fence, fd);
+
+       return 0;
+
+err:
+       put_unused_fd(fd);
+       return err;
+}
+
+long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg)
+{
+       u32 value;
+
+       if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
+               return -EFAULT;
+
+       sw_sync_timeline_inc(obj, value);
+
+       return 0;
+}
+
+long sw_sync_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       struct sw_sync_timeline *obj = file->private_data;
+
+       switch (cmd) {
+       case SW_SYNC_IOC_CREATE_FENCE:
+               return sw_sync_ioctl_create_fence(obj, arg);
+
+       case SW_SYNC_IOC_INC:
+               return sw_sync_ioctl_inc(obj, arg);
+
+       default:
+               return -ENOTTY;
+       }
+}
+
+static const struct file_operations sw_sync_fops = {
+       .owner = THIS_MODULE,
+       .open = sw_sync_open,
+       .release = sw_sync_release,
+       .unlocked_ioctl = sw_sync_ioctl,
+};
+
+static struct miscdevice sw_sync_dev = {
+       .minor  = MISC_DYNAMIC_MINOR,
+       .name   = "sw_sync",
+       .fops   = &sw_sync_fops,
+};
+
+int __init sw_sync_device_init(void)
+{
+       return misc_register(&sw_sync_dev);
+}
+
+void __exit sw_sync_device_remove(void)
+{
+       misc_deregister(&sw_sync_dev);
+}
+
+module_init(sw_sync_device_init);
+module_exit(sw_sync_device_remove);
+
+#endif /* CONFIG_SW_SYNC_USER */
diff --git a/drivers/base/sync.c b/drivers/base/sync.c
new file mode 100644 (file)
index 0000000..d6913f8
--- /dev/null
@@ -0,0 +1,801 @@
+/*
+ * drivers/base/sync.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/sync.h>
+#include <linux/uaccess.h>
+
+#include <linux/anon_inodes.h>
+
+static void sync_fence_signal_pt(struct sync_pt *pt);
+static int _sync_pt_has_signaled(struct sync_pt *pt);
+
+static LIST_HEAD(sync_timeline_list_head);
+static DEFINE_SPINLOCK(sync_timeline_list_lock);
+
+static LIST_HEAD(sync_fence_list_head);
+static DEFINE_SPINLOCK(sync_fence_list_lock);
+
+struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
+                                          int size, const char *name)
+{
+       struct sync_timeline *obj;
+       unsigned long flags;
+
+       if (size < sizeof(struct sync_timeline))
+               return NULL;
+
+       obj = kzalloc(size, GFP_KERNEL);
+       if (obj == NULL)
+               return NULL;
+
+       obj->ops = ops;
+       strlcpy(obj->name, name, sizeof(obj->name));
+
+       INIT_LIST_HEAD(&obj->child_list_head);
+       spin_lock_init(&obj->child_list_lock);
+
+       INIT_LIST_HEAD(&obj->active_list_head);
+       spin_lock_init(&obj->active_list_lock);
+
+       spin_lock_irqsave(&sync_timeline_list_lock, flags);
+       list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
+       spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+
+       return obj;
+}
+
+static void sync_timeline_free(struct sync_timeline *obj)
+{
+       unsigned long flags;
+
+       if (obj->ops->release_obj)
+               obj->ops->release_obj(obj);
+
+       spin_lock_irqsave(&sync_timeline_list_lock, flags);
+       list_del(&obj->sync_timeline_list);
+       spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+
+       kfree(obj);
+}
+
+void sync_timeline_destroy(struct sync_timeline *obj)
+{
+       unsigned long flags;
+       bool needs_freeing;
+
+       spin_lock_irqsave(&obj->child_list_lock, flags);
+       obj->destroyed = true;
+       needs_freeing = list_empty(&obj->child_list_head);
+       spin_unlock_irqrestore(&obj->child_list_lock, flags);
+
+       if (needs_freeing)
+               sync_timeline_free(obj);
+       else
+               sync_timeline_signal(obj);
+}
+
+static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
+{
+       unsigned long flags;
+
+       pt->parent = obj;
+
+       spin_lock_irqsave(&obj->child_list_lock, flags);
+       list_add_tail(&pt->child_list, &obj->child_list_head);
+       spin_unlock_irqrestore(&obj->child_list_lock, flags);
+}
+
+static void sync_timeline_remove_pt(struct sync_pt *pt)
+{
+       struct sync_timeline *obj = pt->parent;
+       unsigned long flags;
+       bool needs_freeing;
+
+       spin_lock_irqsave(&obj->active_list_lock, flags);
+       if (!list_empty(&pt->active_list))
+               list_del_init(&pt->active_list);
+       spin_unlock_irqrestore(&obj->active_list_lock, flags);
+
+       spin_lock_irqsave(&obj->child_list_lock, flags);
+       list_del(&pt->child_list);
+       needs_freeing = obj->destroyed && list_empty(&obj->child_list_head);
+       spin_unlock_irqrestore(&obj->child_list_lock, flags);
+
+       if (needs_freeing)
+               sync_timeline_free(obj);
+}
+
+void sync_timeline_signal(struct sync_timeline *obj)
+{
+       unsigned long flags;
+       LIST_HEAD(signaled_pts);
+       struct list_head *pos, *n;
+
+       spin_lock_irqsave(&obj->active_list_lock, flags);
+
+       list_for_each_safe(pos, n, &obj->active_list_head) {
+               struct sync_pt *pt =
+                       container_of(pos, struct sync_pt, active_list);
+
+               if (_sync_pt_has_signaled(pt))
+                       list_move(pos, &signaled_pts);
+       }
+
+       spin_unlock_irqrestore(&obj->active_list_lock, flags);
+
+       list_for_each_safe(pos, n, &signaled_pts) {
+               struct sync_pt *pt =
+                       container_of(pos, struct sync_pt, active_list);
+
+               list_del_init(pos);
+               sync_fence_signal_pt(pt);
+       }
+}
+
+struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
+{
+       struct sync_pt *pt;
+
+       if (size < sizeof(struct sync_pt))
+               return NULL;
+
+       pt = kzalloc(size, GFP_KERNEL);
+       if (pt == NULL)
+               return NULL;
+
+       INIT_LIST_HEAD(&pt->active_list);
+       sync_timeline_add_pt(parent, pt);
+
+       return pt;
+}
+
+void sync_pt_free(struct sync_pt *pt)
+{
+       if (pt->parent->ops->free_pt)
+               pt->parent->ops->free_pt(pt);
+
+       sync_timeline_remove_pt(pt);
+
+       kfree(pt);
+}
+
+/* call with pt->parent->active_list_lock held */
+static int _sync_pt_has_signaled(struct sync_pt *pt)
+{
+       int old_status = pt->status;
+
+       if (!pt->status)
+               pt->status = pt->parent->ops->has_signaled(pt);
+
+       if (!pt->status && pt->parent->destroyed)
+               pt->status = -ENOENT;
+
+       if (pt->status != old_status)
+               pt->timestamp = ktime_get();
+
+       return pt->status;
+}
+
+static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
+{
+       return pt->parent->ops->dup(pt);
+}
+
+/* Adds a sync pt to the active queue.  Called when added to a fence */
+static void sync_pt_activate(struct sync_pt *pt)
+{
+       struct sync_timeline *obj = pt->parent;
+       unsigned long flags;
+       int err;
+
+       spin_lock_irqsave(&obj->active_list_lock, flags);
+
+       err = _sync_pt_has_signaled(pt);
+       if (err != 0)
+               goto out;
+
+       list_add_tail(&pt->active_list, &obj->active_list_head);
+
+out:
+       spin_unlock_irqrestore(&obj->active_list_lock, flags);
+}
+
+static int sync_fence_release(struct inode *inode, struct file *file);
+static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
+static long sync_fence_ioctl(struct file *file, unsigned int cmd,
+                            unsigned long arg);
+
+
+static const struct file_operations sync_fence_fops = {
+       .release = sync_fence_release,
+       .poll = sync_fence_poll,
+       .unlocked_ioctl = sync_fence_ioctl,
+};
+
+static struct sync_fence *sync_fence_alloc(const char *name)
+{
+       struct sync_fence *fence;
+       unsigned long flags;
+
+       fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
+       if (fence == NULL)
+               return NULL;
+
+       fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
+                                        fence, 0);
+       if (fence->file == NULL)
+               goto err;
+
+       strlcpy(fence->name, name, sizeof(fence->name));
+
+       INIT_LIST_HEAD(&fence->pt_list_head);
+       INIT_LIST_HEAD(&fence->waiter_list_head);
+       spin_lock_init(&fence->waiter_list_lock);
+
+       init_waitqueue_head(&fence->wq);
+
+       spin_lock_irqsave(&sync_fence_list_lock, flags);
+       list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
+       spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+
+       return fence;
+
+err:
+       kfree(fence);
+       return NULL;
+}
+
+/* TODO: implement a create which takes more that one sync_pt */
+struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
+{
+       struct sync_fence *fence;
+
+       if (pt->fence)
+               return NULL;
+
+       fence = sync_fence_alloc(name);
+       if (fence == NULL)
+               return NULL;
+
+       pt->fence = fence;
+       list_add(&pt->pt_list, &fence->pt_list_head);
+       sync_pt_activate(pt);
+
+       return fence;
+}
+
+static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
+{
+       struct list_head *pos;
+
+       list_for_each(pos, &src->pt_list_head) {
+               struct sync_pt *orig_pt =
+                       container_of(pos, struct sync_pt, pt_list);
+               struct sync_pt *new_pt = sync_pt_dup(orig_pt);
+
+               if (new_pt == NULL)
+                       return -ENOMEM;
+
+               new_pt->fence = dst;
+               list_add(&new_pt->pt_list, &dst->pt_list_head);
+               sync_pt_activate(new_pt);
+       }
+
+       return 0;
+}
+
+static void sync_fence_free_pts(struct sync_fence *fence)
+{
+       struct list_head *pos, *n;
+
+       list_for_each_safe(pos, n, &fence->pt_list_head) {
+               struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
+               sync_pt_free(pt);
+       }
+}
+
+struct sync_fence *sync_fence_fdget(int fd)
+{
+       struct file *file = fget(fd);
+
+       if (file == NULL)
+               return NULL;
+
+       if (file->f_op != &sync_fence_fops)
+               goto err;
+
+       return file->private_data;
+
+err:
+       fput(file);
+       return NULL;
+}
+
+void sync_fence_put(struct sync_fence *fence)
+{
+       fput(fence->file);
+}
+
+void sync_fence_install(struct sync_fence *fence, int fd)
+{
+       fd_install(fd, fence->file);
+}
+
+static int sync_fence_get_status(struct sync_fence *fence)
+{
+       struct list_head *pos;
+       int status = 1;
+
+       list_for_each(pos, &fence->pt_list_head) {
+               struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
+               int pt_status = pt->status;
+
+               if (pt_status < 0) {
+                       status = pt_status;
+                       break;
+               } else if (status == 1) {
+                       status = pt_status;
+               }
+       }
+
+       return status;
+}
+
+struct sync_fence *sync_fence_merge(const char *name,
+                                   struct sync_fence *a, struct sync_fence *b)
+{
+       struct sync_fence *fence;
+       int err;
+
+       fence = sync_fence_alloc(name);
+       if (fence == NULL)
+               return NULL;
+
+       err = sync_fence_copy_pts(fence, a);
+       if (err < 0)
+               goto err;
+
+       err = sync_fence_copy_pts(fence, b);
+       if (err < 0)
+               goto err;
+
+       fence->status = sync_fence_get_status(fence);
+
+       return fence;
+err:
+       sync_fence_free_pts(fence);
+       kfree(fence);
+       return NULL;
+}
+
+static void sync_fence_signal_pt(struct sync_pt *pt)
+{
+       LIST_HEAD(signaled_waiters);
+       struct sync_fence *fence = pt->fence;
+       struct list_head *pos;
+       struct list_head *n;
+       unsigned long flags;
+       int status;
+
+       status = sync_fence_get_status(fence);
+
+       spin_lock_irqsave(&fence->waiter_list_lock, flags);
+       /*
+        * this should protect against two threads racing on the signaled
+        * false -> true transition
+        */
+       if (status && !fence->status) {
+               list_for_each_safe(pos, n, &fence->waiter_list_head)
+                       list_move(pos, &signaled_waiters);
+
+               fence->status = status;
+       } else {
+               status = 0;
+       }
+       spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+
+       if (status) {
+               list_for_each_safe(pos, n, &signaled_waiters) {
+                       struct sync_fence_waiter *waiter =
+                               container_of(pos, struct sync_fence_waiter,
+                                            waiter_list);
+
+                       waiter->callback(fence, waiter->callback_data);
+                       list_del(pos);
+                       kfree(waiter);
+               }
+               wake_up(&fence->wq);
+       }
+}
+
+int sync_fence_wait_async(struct sync_fence *fence,
+                         void (*callback)(struct sync_fence *, void *data),
+                         void *callback_data)
+{
+       struct sync_fence_waiter *waiter;
+       unsigned long flags;
+       int err = 0;
+
+       waiter = kzalloc(sizeof(struct sync_fence_waiter), GFP_KERNEL);
+       if (waiter == NULL)
+               return -ENOMEM;
+
+       waiter->callback = callback;
+       waiter->callback_data = callback_data;
+
+       spin_lock_irqsave(&fence->waiter_list_lock, flags);
+
+       if (fence->status) {
+               kfree(waiter);
+               err = fence->status;
+               goto out;
+       }
+
+       list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
+out:
+       spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+
+       return err;
+}
+
+int sync_fence_wait(struct sync_fence *fence, long timeout)
+{
+       int err;
+
+       if (timeout) {
+               timeout = msecs_to_jiffies(timeout);
+               err = wait_event_interruptible_timeout(fence->wq,
+                                                      fence->status != 0,
+                                                      timeout);
+       } else {
+               err = wait_event_interruptible(fence->wq, fence->status != 0);
+       }
+
+       if (err < 0)
+               return err;
+
+       if (fence->status < 0)
+               return fence->status;
+
+       if (fence->status == 0)
+               return -ETIME;
+
+       return 0;
+}
+
+static int sync_fence_release(struct inode *inode, struct file *file)
+{
+       struct sync_fence *fence = file->private_data;
+       unsigned long flags;
+
+       sync_fence_free_pts(fence);
+
+       spin_lock_irqsave(&sync_fence_list_lock, flags);
+       list_del(&fence->sync_fence_list);
+       spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+
+       kfree(fence);
+
+       return 0;
+}
+
+static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
+{
+       struct sync_fence *fence = file->private_data;
+
+       poll_wait(file, &fence->wq, wait);
+
+       if (fence->status == 1)
+               return POLLIN;
+       else if (fence->status < 0)
+               return POLLERR;
+       else
+               return 0;
+}
+
+static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
+{
+       __u32 value;
+
+       if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
+               return -EFAULT;
+
+       return sync_fence_wait(fence, value);
+}
+
+static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
+{
+       int fd = get_unused_fd();
+       int err;
+       struct sync_fence *fence2, *fence3;
+       struct sync_merge_data data;
+
+       if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+               return -EFAULT;
+
+       fence2 = sync_fence_fdget(data.fd2);
+       if (fence2 == NULL) {
+               err = -ENOENT;
+               goto err_put_fd;
+       }
+
+       data.name[sizeof(data.name) - 1] = '\0';
+       fence3 = sync_fence_merge(data.name, fence, fence2);
+       if (fence3 == NULL) {
+               err = -ENOMEM;
+               goto err_put_fence2;
+       }
+
+       data.fence = fd;
+       if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+               err = -EFAULT;
+               goto err_put_fence3;
+       }
+
+       sync_fence_install(fence3, fd);
+       sync_fence_put(fence2);
+       return 0;
+
+err_put_fence3:
+       sync_fence_put(fence3);
+
+err_put_fence2:
+       sync_fence_put(fence2);
+
+err_put_fd:
+       put_unused_fd(fd);
+       return err;
+}
+
+int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
+{
+       struct sync_pt_info *info = data;
+       int ret;
+
+       if (size < sizeof(struct sync_pt_info))
+               return -ENOMEM;
+
+       info->len = sizeof(struct sync_pt_info);
+
+       if (pt->parent->ops->fill_driver_data) {
+               ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
+                                                       size - sizeof(*info));
+               if (ret < 0)
+                       return ret;
+
+               info->len += ret;
+       }
+
+       strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
+       strlcpy(info->driver_name, pt->parent->ops->driver_name,
+               sizeof(info->driver_name));
+       info->status = pt->status;
+       info->timestamp_ns = ktime_to_ns(pt->timestamp);
+
+       return info->len;
+}
+
+
+static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
+                                       unsigned long arg)
+{
+       struct sync_fence_info_data *data;
+       struct list_head *pos;
+       __u32 size;
+       __u32 len = 0;
+       int ret;
+
+       if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
+               return -EFAULT;
+
+       if (size < sizeof(struct sync_fence_info_data))
+               return -EINVAL;
+
+       if (size > 4096)
+               size = 4096;
+
+       data = kzalloc(size, GFP_KERNEL);
+       if (data == NULL)
+               return -ENOMEM;
+
+       strlcpy(data->name, fence->name, sizeof(data->name));
+       data->status = fence->status;
+       len = sizeof(struct sync_fence_info_data);
+
+       list_for_each(pos, &fence->pt_list_head) {
+               struct sync_pt *pt =
+                       container_of(pos, struct sync_pt, pt_list);
+
+               ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
+
+               if (ret < 0)
+                       goto out;
+
+               len += ret;
+       }
+
+       data->len = len;
+
+       if (copy_to_user((void __user *)arg, data, len))
+               ret = -EFAULT;
+       else
+               ret = 0;
+
+out:
+       kfree(data);
+
+       return ret;
+}
+
+static long sync_fence_ioctl(struct file *file, unsigned int cmd,
+                            unsigned long arg)
+{
+       struct sync_fence *fence = file->private_data;
+       switch (cmd) {
+       case SYNC_IOC_WAIT:
+               return sync_fence_ioctl_wait(fence, arg);
+
+       case SYNC_IOC_MERGE:
+               return sync_fence_ioctl_merge(fence, arg);
+
+       case SYNC_IOC_FENCE_INFO:
+               return sync_fence_ioctl_fence_info(fence, arg);
+
+       default:
+               return -ENOTTY;
+       }
+}
+
+#ifdef CONFIG_DEBUG_FS
+static const char *sync_status_str(int status)
+{
+       if (status > 0)
+               return "signaled";
+       else if (status == 0)
+               return "active";
+       else
+               return "error";
+}
+
+static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
+{
+       int status = pt->status;
+       seq_printf(s, "  %s%spt %s",
+                  fence ? pt->parent->name : "",
+                  fence ? "_" : "",
+                  sync_status_str(status));
+       if (pt->status) {
+               struct timeval tv = ktime_to_timeval(pt->timestamp);
+               seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
+       }
+
+       if (pt->parent->ops->print_pt) {
+               seq_printf(s, ": ");
+               pt->parent->ops->print_pt(s, pt);
+       }
+
+       seq_printf(s, "\n");
+}
+
+static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
+{
+       struct list_head *pos;
+       unsigned long flags;
+
+       seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
+
+       if (obj->ops->print_obj) {
+               seq_printf(s, ": ");
+               obj->ops->print_obj(s, obj);
+       }
+
+       seq_printf(s, "\n");
+
+       spin_lock_irqsave(&obj->child_list_lock, flags);
+       list_for_each(pos, &obj->child_list_head) {
+               struct sync_pt *pt =
+                       container_of(pos, struct sync_pt, child_list);
+               sync_print_pt(s, pt, false);
+       }
+       spin_unlock_irqrestore(&obj->child_list_lock, flags);
+}
+
+static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
+{
+       struct list_head *pos;
+       unsigned long flags;
+
+       seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status));
+
+       list_for_each(pos, &fence->pt_list_head) {
+               struct sync_pt *pt =
+                       container_of(pos, struct sync_pt, pt_list);
+               sync_print_pt(s, pt, true);
+       }
+
+       spin_lock_irqsave(&fence->waiter_list_lock, flags);
+       list_for_each(pos, &fence->waiter_list_head) {
+               struct sync_fence_waiter *waiter =
+                       container_of(pos, struct sync_fence_waiter,
+                                    waiter_list);
+
+               seq_printf(s, "waiter %pF %p\n", waiter->callback,
+                          waiter->callback_data);
+       }
+       spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+}
+
+static int sync_debugfs_show(struct seq_file *s, void *unused)
+{
+       unsigned long flags;
+       struct list_head *pos;
+
+       seq_printf(s, "objs:\n--------------\n");
+
+       spin_lock_irqsave(&sync_timeline_list_lock, flags);
+       list_for_each(pos, &sync_timeline_list_head) {
+               struct sync_timeline *obj =
+                       container_of(pos, struct sync_timeline,
+                                    sync_timeline_list);
+
+               sync_print_obj(s, obj);
+               seq_printf(s, "\n");
+       }
+       spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+
+       seq_printf(s, "fences:\n--------------\n");
+
+       spin_lock_irqsave(&sync_fence_list_lock, flags);
+       list_for_each(pos, &sync_fence_list_head) {
+               struct sync_fence *fence =
+                       container_of(pos, struct sync_fence, sync_fence_list);
+
+               sync_print_fence(s, fence);
+               seq_printf(s, "\n");
+       }
+       spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+       return 0;
+}
+
+static int sync_debugfs_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, sync_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations sync_debugfs_fops = {
+       .open           = sync_debugfs_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static __init int sync_debugfs_init(void)
+{
+       debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
+       return 0;
+}
+
+late_initcall(sync_debugfs_init);
+
+#endif
index 8f4ef656a1af4ca435ff3ecc5e0d6079e6a1e34d..1dab802d82b9d3a3fb28ea9309e58288df553214 100644 (file)
@@ -1716,7 +1716,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
        case CCISS_BIG_PASSTHRU:
                return cciss_bigpassthru(h, argp);
 
-       /* scsi_cmd_ioctl handles these, below, though some are not */
+       /* scsi_cmd_blk_ioctl handles these, below, though some are not */
        /* very meaningful for cciss.  SG_IO is the main one people want. */
 
        case SG_GET_VERSION_NUM:
@@ -1727,9 +1727,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
        case SG_EMULATED_HOST:
        case SG_IO:
        case SCSI_IOCTL_SEND_COMMAND:
-               return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
+               return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
 
-       /* scsi_cmd_ioctl would normally handle these, below, but */
+       /* scsi_cmd_blk_ioctl would normally handle these, below, but */
        /* they aren't a good fit for cciss, as CD-ROMs are */
        /* not supported, and we don't have any bus/target/lun */
        /* which we present to the kernel. */
@@ -4533,6 +4533,13 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev,
                pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
                pmcsr |= PCI_D0;
                pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+
+               /*
+                * The P600 requires a small delay when changing states.
+                * Otherwise we may think the board did not reset and we bail.
+                * This for kdump only and is particular to the P600.
+                */
+               msleep(500);
        }
        return 0;
 }
index 696100241a6fd017c655c424004af621ae0a491b..a552cab85a21439cf9d7503b9b15cadff7f8a373 100644 (file)
@@ -866,6 +866,7 @@ cciss_scsi_detect(ctlr_info_t *h)
        sh->can_queue = cciss_tape_cmds;
        sh->sg_tablesize = h->maxsgentries;
        sh->max_cmd_len = MAX_COMMAND_SIZE;
+       sh->max_sectors = h->cciss_max_sectors;
 
        ((struct cciss_scsi_adapter_data_t *) 
                h->scsi_ctlr)->scsi_host = sh;
@@ -1410,7 +1411,7 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c,
        /* track how many SG entries we are using */
        if (request_nsgs > h->maxSG)
                h->maxSG = request_nsgs;
-       c->Header.SGTotal = (__u8) request_nsgs + chained;
+       c->Header.SGTotal = (u16) request_nsgs + chained;
        if (request_nsgs > h->max_cmd_sgentries)
                c->Header.SGList = h->max_cmd_sgentries;
        else
index b70f0fca9a42e724e3911d62cd64729a2f20ce4b..eec7b7a43cba5cb44a7eada3f099d3d29b463e30 100644 (file)
@@ -1116,7 +1116,7 @@ static inline void carm_handle_resp(struct carm_host *host,
                        break;
                case MISC_GET_FW_VER: {
                        struct carm_fw_ver *ver = (struct carm_fw_ver *)
-                               mem + sizeof(struct carm_msg_get_fw_ver);
+                               (mem + sizeof(struct carm_msg_get_fw_ver));
                        if (!error) {
                                host->fw_ver = le32_to_cpu(ver->version);
                                host->flags |= (ver->features & FL_FW_VER_MASK);
index 0e376d46bdd1d6f73a61583882eec6faa654a832..7333b9e444113c9e2945cdcd99997c4fd3592275 100644 (file)
@@ -1744,12 +1744,11 @@ static int ub_bd_release(struct gendisk *disk, fmode_t mode)
 static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
     unsigned int cmd, unsigned long arg)
 {
-       struct gendisk *disk = bdev->bd_disk;
        void __user *usermem = (void __user *) arg;
        int ret;
 
        mutex_lock(&ub_mutex);
-       ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
+       ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, usermem);
        mutex_unlock(&ub_mutex);
 
        return ret;
index 079c08808d8a41b7fe5d27b00d389579ffae0930..5d7a9340363243250a305a2ee3e63abec5914dde 100644 (file)
@@ -236,8 +236,8 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
        if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
                return -ENOTTY;
 
-       return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
-                             (void __user *)data);
+       return scsi_cmd_blk_ioctl(bdev, mode, cmd,
+                                 (void __user *)data);
 }
 
 /* We provide getgeo only to please some old bootloader/partitioning tools */
index 5cf2993a8338c9e325fb6126d5a6ec3ca371f6e3..54139d0f5fec2ea110155d98baa4db2b162c08c2 100644 (file)
@@ -667,7 +667,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
 
        if (operation == READ)
                blkif->st_rd_sect += preq.nr_sects;
-       else if (operation == WRITE || operation == WRITE_FLUSH)
+       else if (operation & WRITE)
                blkif->st_wr_sect += preq.nr_sects;
 
        return 0;
index a5854735bb2e7882124ad1dc2c1b9c9296c93820..305e752678edc974422d3d09228dbd457b9f148e 100644 (file)
@@ -63,16 +63,24 @@ static struct usb_device_id ath3k_table[] = {
        /* Atheros AR3011 with sflash firmware*/
        { USB_DEVICE(0x0CF3, 0x3002) },
        { USB_DEVICE(0x13d3, 0x3304) },
+       { USB_DEVICE(0x0930, 0x0215) },
+       { USB_DEVICE(0x0489, 0xE03D) },
 
        /* Atheros AR9285 Malbec with sflash firmware */
        { USB_DEVICE(0x03F0, 0x311D) },
 
        /* Atheros AR3012 with sflash firmware*/
        { USB_DEVICE(0x0CF3, 0x3004) },
+       { USB_DEVICE(0x0CF3, 0x311D) },
+       { USB_DEVICE(0x13d3, 0x3375) },
+       { USB_DEVICE(0x04CA, 0x3005) },
 
        /* Atheros AR5BBU12 with sflash firmware */
        { USB_DEVICE(0x0489, 0xE02C) },
 
+       /* Atheros AR5BBU22 with sflash firmware */
+       { USB_DEVICE(0x0489, 0xE03C) },
+
        { }     /* Terminating entry */
 };
 
@@ -85,6 +93,12 @@ static struct usb_device_id ath3k_blist_tbl[] = {
 
        /* Atheros AR3012 with sflash firmware*/
        { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+
+       /* Atheros AR5BBU22 with sflash firmware */
+       { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
 
        { }     /* Terminating entry */
 };
index 91d13a9e8c657f9b838c56ecbae6608965342bc0..f4585b90841d8adb465fcd1ba92d9e0e53c6e4ef 100644 (file)
@@ -60,6 +60,9 @@ static struct usb_device_id btusb_table[] = {
        /* Generic Bluetooth USB device */
        { USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
 
+       /* Broadcom SoftSailing reporting vendor specific */
+       { USB_DEVICE(0x0a5c, 0x21e1) },
+
        /* Apple MacBookPro 7,1 */
        { USB_DEVICE(0x05ac, 0x8213) },
 
@@ -72,9 +75,15 @@ static struct usb_device_id btusb_table[] = {
        /* Apple MacBookAir3,1, MacBookAir3,2 */
        { USB_DEVICE(0x05ac, 0x821b) },
 
+       /* Apple MacBookAir4,1 */
+       { USB_DEVICE(0x05ac, 0x821f) },
+
        /* Apple MacBookPro8,2 */
        { USB_DEVICE(0x05ac, 0x821a) },
 
+       /* Apple MacMini5,1 */
+       { USB_DEVICE(0x05ac, 0x8281) },
+
        /* AVM BlueFRITZ! USB v2.0 */
        { USB_DEVICE(0x057c, 0x3800) },
 
@@ -91,6 +100,17 @@ static struct usb_device_id btusb_table[] = {
        /* Canyon CN-BTU1 with HID interfaces */
        { USB_DEVICE(0x0c10, 0x0000) },
 
+       /* Broadcom BCM20702A0 */
+       { USB_DEVICE(0x0489, 0xe042) },
+       { USB_DEVICE(0x0a5c, 0x21e3) },
+       { USB_DEVICE(0x0a5c, 0x21e6) },
+       { USB_DEVICE(0x0a5c, 0x21e8) },
+       { USB_DEVICE(0x0a5c, 0x21f3) },
+       { USB_DEVICE(0x413c, 0x8197) },
+
+       /* Foxconn - Hon Hai */
+       { USB_DEVICE(0x0489, 0xe033) },
+
        { }     /* Terminating entry */
 };
 
@@ -106,16 +126,24 @@ static struct usb_device_id blacklist_table[] = {
        /* Atheros 3011 with sflash firmware */
        { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
        { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
+       { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
+       { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
 
        /* Atheros AR9285 Malbec with sflash firmware */
        { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
 
        /* Atheros 3012 with sflash firmware */
        { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
 
        /* Atheros AR5BBU12 with sflash firmware */
        { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
 
+       /* Atheros AR5BBU12 with sflash firmware */
+       { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
+
        /* Broadcom BCM2035 */
        { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
        { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
@@ -487,15 +515,10 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags)
 
        pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress);
 
-       urb->dev      = data->udev;
-       urb->pipe     = pipe;
-       urb->context  = hdev;
-       urb->complete = btusb_isoc_complete;
-       urb->interval = data->isoc_rx_ep->bInterval;
+       usb_fill_int_urb(urb, data->udev, pipe, buf, size, btusb_isoc_complete,
+                               hdev, data->isoc_rx_ep->bInterval);
 
        urb->transfer_flags  = URB_FREE_BUFFER | URB_ISO_ASAP;
-       urb->transfer_buffer = buf;
-       urb->transfer_buffer_length = size;
 
        __fill_isoc_descriptor(urb, size,
                        le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize));
index 452132a5ebdee946474de19e1698769e5b13ded7..2c548724154c54b90aef23def0cea2e3c6eeca39 100755 (executable)
@@ -246,7 +246,6 @@ static void hci_uart_destruct(struct hci_dev *hdev)
                return;
 
        BT_DBG("%s", hdev->name);
-       kfree(hdev->driver_data);
 }
 
 /* ------ LDISC part ------ */
@@ -319,12 +318,13 @@ static void hci_uart_tty_close(struct tty_struct *tty)
                        hci_uart_close(hdev);
 
                if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
-                       hu->proto->close(hu);
                        if (hdev) {
                                hci_unregister_dev(hdev);
                                hci_free_dev(hdev);
                        }
+                       hu->proto->close(hu);
                }
+               kfree(hu);
        }
 }
 
index 75fb965b8f72b1e9fa2a8001f7fb16fb12388d5c..cc6471aa9f467d2c8b60dfd686dc3f511dd737c1 100644 (file)
@@ -2114,11 +2114,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
        if (!nr)
                return -ENOMEM;
 
-       if (!access_ok(VERIFY_WRITE, ubuf, nframes * CD_FRAMESIZE_RAW)) {
-               ret = -EFAULT;
-               goto out;
-       }
-
        cgc.data_direction = CGC_DATA_READ;
        while (nframes > 0) {
                if (nr > nframes)
@@ -2127,7 +2122,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
                ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW);
                if (ret)
                        break;
-               if (__copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
+               if (copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
                        ret = -EFAULT;
                        break;
                }
@@ -2135,7 +2130,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
                nframes -= nr;
                lba += nr;
        }
-out:
        kfree(cgc.buffer);
        return ret;
 }
@@ -2741,12 +2735,11 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
 {
        void __user *argp = (void __user *)arg;
        int ret;
-       struct gendisk *disk = bdev->bd_disk;
 
        /*
         * Try the generic SCSI command ioctl's first.
         */
-       ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
+       ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
        if (ret != -ENOTTY)
                return ret;
 
index b427711be4be385c2704d4c971dc5cd512ed85c4..58b49d1a283b5637c47511531f7a2292250cc9bc 100644 (file)
@@ -897,6 +897,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
        ID(PCI_DEVICE_ID_INTEL_B43_HB),
        ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
        ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
+       ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB),
        ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
        ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
        ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
index 5da67f165afaf8df358d1c884083429948cf781e..6f246049d5b43491f7c59219605f040a01bca251 100644 (file)
 #define PCI_DEVICE_ID_INTEL_G41_HB          0x2E30
 #define PCI_DEVICE_ID_INTEL_G41_IG          0x2E32
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB          0x0040
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB         0x0069
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG          0x0042
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB          0x0044
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB         0x0062
index fd8b2793f54a909667223375b072deafb72ab291..c8bc6488afc2cf64a3bcc3d90a66e05054ffd825 100755 (executable)
 #include <linux/workqueue.h>
 #include <linux/kthread.h>
 #include <linux/mutex.h>
-
+#include <linux/slab.h>
+#include <linux/input.h>
 #include <asm/cputime.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/cpufreq_interactive.h>
+
 static atomic_t active_count = ATOMIC_INIT(0);
 
 struct cpufreq_interactive_cpuinfo {
@@ -39,11 +43,14 @@ struct cpufreq_interactive_cpuinfo {
        u64 idle_exit_time;
        u64 timer_run_time;
        int idling;
-       u64 freq_change_time;
-       u64 freq_change_time_in_idle;
+       u64 target_set_time;
+       u64 target_set_time_in_idle;
        struct cpufreq_policy *policy;
        struct cpufreq_frequency_table *freq_table;
        unsigned int target_freq;
+       unsigned int floor_freq;
+       u64 floor_validate_time;
+       u64 hispeed_validate_time;
        int governor_enabled;
 };
 
@@ -63,25 +70,47 @@ static struct mutex set_speed_lock;
 static u64 hispeed_freq;
 
 /* Go to hi speed when CPU load at or above this value. */
-#ifdef CONFIG_PLAT_RK
-#define DEFAULT_GO_HISPEED_LOAD 80
-#else
-#define DEFAULT_GO_HISPEED_LOAD 95
-#endif
+#define DEFAULT_GO_HISPEED_LOAD 85
 static unsigned long go_hispeed_load;
 
 /*
  * The minimum amount of time to spend at a frequency before we can ramp down.
  */
-#define DEFAULT_MIN_SAMPLE_TIME 20 * USEC_PER_MSEC
+#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
 static unsigned long min_sample_time;
 
 /*
  * The sample rate of the timer used to increase frequency
  */
-#define DEFAULT_TIMER_RATE 20 * USEC_PER_MSEC
+#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
 static unsigned long timer_rate;
 
+/*
+ * Wait this long before raising speed above hispeed, by default a single
+ * timer interval.
+ */
+#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
+static unsigned long above_hispeed_delay_val;
+
+/*
+ * Boost pulse to hispeed on touchscreen input.
+ */
+
+static int input_boost_val;
+
+struct cpufreq_interactive_inputopen {
+       struct input_handle *handle;
+       struct work_struct inputopen_work;
+};
+
+static struct cpufreq_interactive_inputopen inputopen;
+
+/*
+ * Non-zero means longer-term speed boost active.
+ */
+
+static int boost_val;
+
 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                unsigned int event);
 
@@ -149,9 +178,9 @@ static void cpufreq_interactive_timer(unsigned long data)
                cpu_load = 100 * (delta_time - delta_idle) / delta_time;
 
        delta_idle = (unsigned int) cputime64_sub(now_idle,
-                                               pcpu->freq_change_time_in_idle);
+                                               pcpu->target_set_time_in_idle);
        delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
-                                                 pcpu->freq_change_time);
+                                                 pcpu->target_set_time);
 
        if ((delta_time == 0) || (delta_idle > delta_time))
                load_since_change = 0;
@@ -167,15 +196,37 @@ static void cpufreq_interactive_timer(unsigned long data)
        if (load_since_change > cpu_load)
                cpu_load = load_since_change;
 
-       if (cpu_load >= go_hispeed_load) {
-               if (pcpu->policy->cur == pcpu->policy->min)
+#ifdef CONFIG_PLAT_RK
+       pcpu->target_freq = pcpu->policy->cur;
+#endif
+
+       if (cpu_load >= go_hispeed_load || boost_val) {
+               if (pcpu->target_freq <= pcpu->policy->min) {
                        new_freq = hispeed_freq;
-               else
+               } else {
                        new_freq = pcpu->policy->max * cpu_load / 100;
+
+                       if (new_freq < hispeed_freq)
+                               new_freq = hispeed_freq;
+
+                       if (pcpu->target_freq == hispeed_freq &&
+                           new_freq > hispeed_freq &&
+                           cputime64_sub(pcpu->timer_run_time,
+                                         pcpu->hispeed_validate_time)
+                           < above_hispeed_delay_val) {
+                               trace_cpufreq_interactive_notyet(data, cpu_load,
+                                                                pcpu->target_freq,
+                                                                new_freq);
+                               goto rearm;
+                       }
+               }
        } else {
-               new_freq = pcpu->policy->cur * cpu_load / 100;
+               new_freq = pcpu->policy->max * cpu_load / 100;
        }
 
+       if (new_freq <= hispeed_freq)
+               pcpu->hispeed_validate_time = pcpu->timer_run_time;
+
        if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
                                           new_freq, CPUFREQ_RELATION_H,
                                           &index)) {
@@ -186,22 +237,34 @@ static void cpufreq_interactive_timer(unsigned long data)
 
        new_freq = pcpu->freq_table[index].frequency;
 
-#ifdef CONFIG_PLAT_RK
-       pcpu->target_freq = pcpu->policy->cur;
-#endif
-       if (pcpu->target_freq == new_freq)
-               goto rearm_if_notmax;
-
        /*
-        * Do not scale down unless we have been at this frequency for the
-        * minimum sample time.
+        * Do not scale below floor_freq unless we have been at or above the
+        * floor frequency for the minimum sample time since last validated.
         */
-       if (new_freq < pcpu->target_freq) {
-               if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time)
-                   < min_sample_time)
+       if (new_freq < pcpu->floor_freq) {
+               if (cputime64_sub(pcpu->timer_run_time,
+                                 pcpu->floor_validate_time)
+                   < min_sample_time) {
+                       trace_cpufreq_interactive_notyet(data, cpu_load,
+                                        pcpu->target_freq, new_freq);
                        goto rearm;
+               }
+       }
+
+       pcpu->floor_freq = new_freq;
+       pcpu->floor_validate_time = pcpu->timer_run_time;
+
+       if (pcpu->target_freq == new_freq) {
+               trace_cpufreq_interactive_already(data, cpu_load,
+                                                 pcpu->target_freq, new_freq);
+               goto rearm_if_notmax;
        }
 
+       trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
+                                        new_freq);
+       pcpu->target_set_time_in_idle = now_idle;
+       pcpu->target_set_time = pcpu->timer_run_time;
+
        if (new_freq < pcpu->target_freq) {
                pcpu->target_freq = new_freq;
                spin_lock_irqsave(&down_cpumask_lock, flags);
@@ -385,10 +448,8 @@ static int cpufreq_interactive_up_task(void *data)
                                                        max_freq,
                                                        CPUFREQ_RELATION_H);
                        mutex_unlock(&set_speed_lock);
-
-                       pcpu->freq_change_time_in_idle =
-                               get_cpu_idle_time_us(cpu,
-                                                    &pcpu->freq_change_time);
+                       trace_cpufreq_interactive_up(cpu, pcpu->target_freq,
+                                                    pcpu->policy->cur);
                }
        }
 
@@ -432,12 +493,137 @@ static void cpufreq_interactive_freq_down(struct work_struct *work)
                                                CPUFREQ_RELATION_H);
 
                mutex_unlock(&set_speed_lock);
-               pcpu->freq_change_time_in_idle =
-                       get_cpu_idle_time_us(cpu,
-                                            &pcpu->freq_change_time);
+               trace_cpufreq_interactive_down(cpu, pcpu->target_freq,
+                                              pcpu->policy->cur);
+       }
+}
+
+static void cpufreq_interactive_boost(void)
+{
+       int i;
+       int anyboost = 0;
+       unsigned long flags;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+
+       spin_lock_irqsave(&up_cpumask_lock, flags);
+
+       for_each_online_cpu(i) {
+               pcpu = &per_cpu(cpuinfo, i);
+
+               if (pcpu->target_freq < hispeed_freq) {
+                       pcpu->target_freq = hispeed_freq;
+                       cpumask_set_cpu(i, &up_cpumask);
+                       pcpu->target_set_time_in_idle =
+                               get_cpu_idle_time_us(i, &pcpu->target_set_time);
+                       pcpu->hispeed_validate_time = pcpu->target_set_time;
+                       anyboost = 1;
+               }
+
+               /*
+                * Set floor freq and (re)start timer for when last
+                * validated.
+                */
+
+               pcpu->floor_freq = hispeed_freq;
+               pcpu->floor_validate_time = ktime_to_us(ktime_get());
+       }
+
+       spin_unlock_irqrestore(&up_cpumask_lock, flags);
+
+       if (anyboost)
+               wake_up_process(up_task);
+}
+
+/*
+ * Pulsed boost on input event raises CPUs to hispeed_freq and lets
+ * usual algorithm of min_sample_time  decide when to allow speed
+ * to drop.
+ */
+
+static void cpufreq_interactive_input_event(struct input_handle *handle,
+                                           unsigned int type,
+                                           unsigned int code, int value)
+{
+       if (input_boost_val && type == EV_SYN && code == SYN_REPORT) {
+               trace_cpufreq_interactive_boost("input");
+               cpufreq_interactive_boost();
        }
 }
 
+static void cpufreq_interactive_input_open(struct work_struct *w)
+{
+       struct cpufreq_interactive_inputopen *io =
+               container_of(w, struct cpufreq_interactive_inputopen,
+                            inputopen_work);
+       int error;
+
+       error = input_open_device(io->handle);
+       if (error)
+               input_unregister_handle(io->handle);
+}
+
+static int cpufreq_interactive_input_connect(struct input_handler *handler,
+                                            struct input_dev *dev,
+                                            const struct input_device_id *id)
+{
+       struct input_handle *handle;
+       int error;
+
+       pr_info("%s: connect to %s\n", __func__, dev->name);
+       handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
+       if (!handle)
+               return -ENOMEM;
+
+       handle->dev = dev;
+       handle->handler = handler;
+       handle->name = "cpufreq_interactive";
+
+       error = input_register_handle(handle);
+       if (error)
+               goto err;
+
+       inputopen.handle = handle;
+       queue_work(down_wq, &inputopen.inputopen_work);
+       return 0;
+err:
+       kfree(handle);
+       return error;
+}
+
+static void cpufreq_interactive_input_disconnect(struct input_handle *handle)
+{
+       input_close_device(handle);
+       input_unregister_handle(handle);
+       kfree(handle);
+}
+
+static const struct input_device_id cpufreq_interactive_ids[] = {
+       {
+               .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+                        INPUT_DEVICE_ID_MATCH_ABSBIT,
+               .evbit = { BIT_MASK(EV_ABS) },
+               .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+                           BIT_MASK(ABS_MT_POSITION_X) |
+                           BIT_MASK(ABS_MT_POSITION_Y) },
+       }, /* multi-touch touchscreen */
+       {
+               .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
+                        INPUT_DEVICE_ID_MATCH_ABSBIT,
+               .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+               .absbit = { [BIT_WORD(ABS_X)] =
+                           BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
+       }, /* touchpad */
+       { },
+};
+
+static struct input_handler cpufreq_interactive_input_handler = {
+       .event          = cpufreq_interactive_input_event,
+       .connect        = cpufreq_interactive_input_connect,
+       .disconnect     = cpufreq_interactive_input_disconnect,
+       .name           = "cpufreq_interactive",
+       .id_table       = cpufreq_interactive_ids,
+};
+
 static ssize_t show_hispeed_freq(struct kobject *kobj,
                                 struct attribute *attr, char *buf)
 {
@@ -506,6 +692,28 @@ static ssize_t store_min_sample_time(struct kobject *kobj,
 static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
                show_min_sample_time, store_min_sample_time);
 
+static ssize_t show_above_hispeed_delay(struct kobject *kobj,
+                                       struct attribute *attr, char *buf)
+{
+       return sprintf(buf, "%lu\n", above_hispeed_delay_val);
+}
+
+static ssize_t store_above_hispeed_delay(struct kobject *kobj,
+                                        struct attribute *attr,
+                                        const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       above_hispeed_delay_val = val;
+       return count;
+}
+
+define_one_global_rw(above_hispeed_delay);
+
 static ssize_t show_timer_rate(struct kobject *kobj,
                        struct attribute *attr, char *buf)
 {
@@ -528,11 +736,84 @@ static ssize_t store_timer_rate(struct kobject *kobj,
 static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
                show_timer_rate, store_timer_rate);
 
+static ssize_t show_input_boost(struct kobject *kobj, struct attribute *attr,
+                               char *buf)
+{
+       return sprintf(buf, "%u\n", input_boost_val);
+}
+
+static ssize_t store_input_boost(struct kobject *kobj, struct attribute *attr,
+                                const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       input_boost_val = val;
+       return count;
+}
+
+define_one_global_rw(input_boost);
+
+static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
+                         char *buf)
+{
+       return sprintf(buf, "%d\n", boost_val);
+}
+
+static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
+                          const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       boost_val = val;
+
+       if (boost_val) {
+               trace_cpufreq_interactive_boost("on");
+               cpufreq_interactive_boost();
+       } else {
+               trace_cpufreq_interactive_unboost("off");
+       }
+
+       return count;
+}
+
+define_one_global_rw(boost);
+
+static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
+                               const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       trace_cpufreq_interactive_boost("pulse");
+       cpufreq_interactive_boost();
+       return count;
+}
+
+static struct global_attr boostpulse =
+       __ATTR(boostpulse, 0200, NULL, store_boostpulse);
+
 static struct attribute *interactive_attributes[] = {
        &hispeed_freq_attr.attr,
        &go_hispeed_load_attr.attr,
+       &above_hispeed_delay.attr,
        &min_sample_time_attr.attr,
        &timer_rate_attr.attr,
+       &input_boost.attr,
+       &boost.attr,
+       &boostpulse.attr,
        NULL,
 };
 
@@ -562,9 +843,14 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                        pcpu->policy = policy;
                        pcpu->target_freq = policy->cur;
                        pcpu->freq_table = freq_table;
-                       pcpu->freq_change_time_in_idle =
+                       pcpu->target_set_time_in_idle =
                                get_cpu_idle_time_us(j,
-                                            &pcpu->freq_change_time);
+                                            &pcpu->target_set_time);
+                       pcpu->floor_freq = pcpu->target_freq;
+                       pcpu->floor_validate_time =
+                               pcpu->target_set_time;
+                       pcpu->hispeed_validate_time =
+                               pcpu->target_set_time;
                        pcpu->governor_enabled = 1;
                        smp_wmb();
                }
@@ -595,6 +881,11 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                if (rc)
                        return rc;
 
+               rc = input_register_handler(&cpufreq_interactive_input_handler);
+               if (rc)
+                       pr_warn("%s: failed to register input handler\n",
+                               __func__);
+
                break;
 
        case CPUFREQ_GOV_STOP:
@@ -617,6 +908,7 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
                if (atomic_dec_return(&active_count) > 0)
                        return 0;
 
+               input_unregister_handler(&cpufreq_interactive_input_handler);
                sysfs_remove_group(cpufreq_global_kobject,
                                &interactive_attr_group);
 
@@ -662,6 +954,7 @@ static int __init cpufreq_interactive_init(void)
 
        go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
        min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
+       above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
        timer_rate = DEFAULT_TIMER_RATE;
 
        /* Initalize per-cpu timers */
@@ -695,7 +988,7 @@ static int __init cpufreq_interactive_init(void)
        mutex_init(&set_speed_lock);
 
        idle_notifier_register(&cpufreq_interactive_idle_nb);
-
+       INIT_WORK(&inputopen.inputopen_work, cpufreq_interactive_input_open);
        return cpufreq_register_governor(&cpufreq_gov_interactive);
 
 err_freeuptask:
index bce576d7478ed41f9b69ac727cc5d143d850bb83..ad683ec2c57e45434b7458d30eb0b893aa5bdb94 100644 (file)
@@ -54,6 +54,9 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
 
 static int cpu_family = CPU_OPTERON;
 
+/* array to map SW pstate number to acpi state */
+static u32 ps_to_as[8];
+
 /* core performance boost */
 static bool cpb_capable, cpb_enabled;
 static struct msr __percpu *msrs;
@@ -80,9 +83,9 @@ static u32 find_khz_freq_from_fid(u32 fid)
 }
 
 static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data,
-               u32 pstate)
+                                    u32 pstate)
 {
-       return data[pstate].frequency;
+       return data[ps_to_as[pstate]].frequency;
 }
 
 /* Return the vco fid for an input fid
@@ -926,23 +929,27 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
                        invalidate_entry(powernow_table, i);
                        continue;
                }
-               rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
-               if (!(hi & HW_PSTATE_VALID_MASK)) {
-                       pr_debug("invalid pstate %d, ignoring\n", index);
-                       invalidate_entry(powernow_table, i);
-                       continue;
-               }
 
-               powernow_table[i].index = index;
+               ps_to_as[index] = i;
 
                /* Frequency may be rounded for these */
                if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
                                 || boot_cpu_data.x86 == 0x11) {
+
+                       rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
+                       if (!(hi & HW_PSTATE_VALID_MASK)) {
+                               pr_debug("invalid pstate %d, ignoring\n", index);
+                               invalidate_entry(powernow_table, i);
+                               continue;
+                       }
+
                        powernow_table[i].frequency =
                                freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
                } else
                        powernow_table[i].frequency =
                                data->acpi_data.states[i].core_frequency * 1000;
+
+               powernow_table[i].index = index;
        }
        return 0;
 }
@@ -1189,7 +1196,8 @@ static int powernowk8_target(struct cpufreq_policy *pol,
        powernow_k8_acpi_pst_values(data, newstate);
 
        if (cpu_family == CPU_HW_PSTATE)
-               ret = transition_frequency_pstate(data, newstate);
+               ret = transition_frequency_pstate(data,
+                       data->powernow_table[newstate].index);
        else
                ret = transition_frequency_fidvid(data, newstate);
        if (ret) {
@@ -1202,7 +1210,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
 
        if (cpu_family == CPU_HW_PSTATE)
                pol->cur = find_khz_freq_from_pstate(data->powernow_table,
-                               newstate);
+                               data->powernow_table[newstate].index);
        else
                pol->cur = find_khz_freq_from_fid(data->currfid);
        ret = 0;
index e0b25de1e339249773287c13d8f457d1f9e94352..98caccfdf217925e71fd2a9362e643ac79223045 100644 (file)
@@ -173,6 +173,7 @@ config CRYPTO_DEV_MV_CESA
        select CRYPTO_ALGAPI
        select CRYPTO_AES
        select CRYPTO_BLKCIPHER2
+       select CRYPTO_HASH
        help
          This driver allows you to utilize the Cryptographic Engines and
          Security Accelerator (CESA) which can be found on the Marvell Orion
index 3cf303ee3fe3449efd185a8567252a36759d5bca..f53dd83438bcab6af17001ec540b8174eec77a6f 100644 (file)
@@ -342,11 +342,13 @@ static void mv_process_hash_current(int first_block)
                else
                        op.config |= CFG_MID_FRAG;
 
-               writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
-               writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
-               writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
-               writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
-               writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+               if (first_block) {
+                       writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
+                       writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
+                       writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
+                       writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
+                       writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+               }
        }
 
        memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
@@ -711,6 +713,7 @@ static int mv_hash_final(struct ahash_request *req)
 {
        struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
 
+       ahash_request_set_crypt(req, NULL, req->result, 0);
        mv_update_hash_req_ctx(ctx, 1, 0);
        return mv_handle_req(&req->base);
 }
index 25cf327cd1cb4c298acc6cf5416c1c25e045da99..6a718b77a2b1c476a8fb01b0a3e753e496e90570 100644 (file)
@@ -200,18 +200,17 @@ config PL330_DMA
          platform_data for a dma-pl330 device.
 
 config PCH_DMA
-       tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support"
+       tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223/ML7831) DMA support"
        depends on PCI && X86
        select DMA_ENGINE
        help
          Enable support for Intel EG20T PCH DMA engine.
-
          This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
-         Output Hub), ML7213 and ML7223.
-         ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
-         for MP(Media Phone) use.
-         ML7213/ML7223 is companion chip for Intel Atom E6xx series.
-         ML7213/ML7223 is completely compatible for Intel EG20T PCH.
+         Output Hub), ML7213, ML7223 and ML7831.
+         ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
+         for MP(Media Phone) use and ML7831 IOH is for general purpose use.
+         ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+         ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
 
 config IMX_SDMA
        tristate "i.MX SDMA support"
index 36144f88d718f383b232b25d168030077f6769d3..10c63495ea00308dd54bf130c8260c718fa4f52a 100644 (file)
@@ -237,10 +237,6 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
 
        vdbg_dump_regs(atchan);
 
-       /* clear any pending interrupt */
-       while (dma_readl(atdma, EBCISR))
-               cpu_relax();
-
        channel_writel(atchan, SADDR, 0);
        channel_writel(atchan, DADDR, 0);
        channel_writel(atchan, CTRLA, 0);
@@ -1279,7 +1275,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
 
                tasklet_init(&atchan->tasklet, atc_tasklet,
                                (unsigned long)atchan);
-               atc_enable_irq(atchan);
+               atc_enable_chan_irq(atdma, i);
        }
 
        /* set base routines */
@@ -1348,7 +1344,7 @@ static int __exit at_dma_remove(struct platform_device *pdev)
                struct at_dma_chan      *atchan = to_at_dma_chan(chan);
 
                /* Disable interrupts */
-               atc_disable_irq(atchan);
+               atc_disable_chan_irq(atdma, chan->chan_id);
                tasklet_disable(&atchan->tasklet);
 
                tasklet_kill(&atchan->tasklet);
index 087dbf1dd39cc0d87416e24e093ca7841a714b3f..19ed47056da8dde8400087125daef60c2983966a 100644 (file)
@@ -319,28 +319,27 @@ static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
 }
 
 
-static void atc_setup_irq(struct at_dma_chan *atchan, int on)
+static void atc_setup_irq(struct at_dma *atdma, int chan_id, int on)
 {
-       struct at_dma   *atdma = to_at_dma(atchan->chan_common.device);
-       u32             ebci;
+       u32 ebci;
 
        /* enable interrupts on buffer transfer completion & error */
-       ebci =    AT_DMA_BTC(atchan->chan_common.chan_id)
-               | AT_DMA_ERR(atchan->chan_common.chan_id);
+       ebci =    AT_DMA_BTC(chan_id)
+               | AT_DMA_ERR(chan_id);
        if (on)
                dma_writel(atdma, EBCIER, ebci);
        else
                dma_writel(atdma, EBCIDR, ebci);
 }
 
-static inline void atc_enable_irq(struct at_dma_chan *atchan)
+static void atc_enable_chan_irq(struct at_dma *atdma, int chan_id)
 {
-       atc_setup_irq(atchan, 1);
+       atc_setup_irq(atdma, chan_id, 1);
 }
 
-static inline void atc_disable_irq(struct at_dma_chan *atchan)
+static void atc_disable_chan_irq(struct at_dma *atdma, int chan_id)
 {
-       atc_setup_irq(atchan, 0);
+       atc_setup_irq(atdma, chan_id, 0);
 }
 
 
index ff5b38f9d45bc98249ddf2a648a5b338f63f3496..1ed89d0a1bda52cdf8f948d27704144758e4a3e2 100644 (file)
@@ -45,7 +45,8 @@
 #define DMA_STATUS_MASK_BITS           0x3
 #define DMA_STATUS_SHIFT_BITS          16
 #define DMA_STATUS_IRQ(x)              (0x1 << (x))
-#define DMA_STATUS_ERR(x)              (0x1 << ((x) + 8))
+#define DMA_STATUS0_ERR(x)             (0x1 << ((x) + 8))
+#define DMA_STATUS2_ERR(x)             (0x1 << (x))
 
 #define DMA_DESC_WIDTH_SHIFT_BITS      12
 #define DMA_DESC_WIDTH_1_BYTE          (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
 #define DMA_DESC_FOLLOW_WITHOUT_IRQ    0x2
 #define DMA_DESC_FOLLOW_WITH_IRQ       0x3
 
-#define MAX_CHAN_NR                    8
+#define MAX_CHAN_NR                    12
+
+#define DMA_MASK_CTL0_MODE     0x33333333
+#define DMA_MASK_CTL2_MODE     0x00003333
 
 static unsigned int init_nr_desc_per_channel = 64;
 module_param(init_nr_desc_per_channel, uint, 0644);
@@ -133,6 +137,7 @@ struct pch_dma {
 #define PCH_DMA_CTL3   0x0C
 #define PCH_DMA_STS0   0x10
 #define PCH_DMA_STS1   0x14
+#define PCH_DMA_STS2   0x18
 
 #define dma_readl(pd, name) \
        readl((pd)->membase + PCH_DMA_##name)
@@ -183,13 +188,19 @@ static void pdc_enable_irq(struct dma_chan *chan, int enable)
 {
        struct pch_dma *pd = to_pd(chan->device);
        u32 val;
+       int pos;
+
+       if (chan->chan_id < 8)
+               pos = chan->chan_id;
+       else
+               pos = chan->chan_id + 8;
 
        val = dma_readl(pd, CTL2);
 
        if (enable)
-               val |= 0x1 << chan->chan_id;
+               val |= 0x1 << pos;
        else
-               val &= ~(0x1 << chan->chan_id);
+               val &= ~(0x1 << pos);
 
        dma_writel(pd, CTL2, val);
 
@@ -202,10 +213,17 @@ static void pdc_set_dir(struct dma_chan *chan)
        struct pch_dma_chan *pd_chan = to_pd_chan(chan);
        struct pch_dma *pd = to_pd(chan->device);
        u32 val;
+       u32 mask_mode;
+       u32 mask_ctl;
 
        if (chan->chan_id < 8) {
                val = dma_readl(pd, CTL0);
 
+               mask_mode = DMA_CTL0_MODE_MASK_BITS <<
+                                       (DMA_CTL0_BITS_PER_CH * chan->chan_id);
+               mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
+                                      (DMA_CTL0_BITS_PER_CH * chan->chan_id));
+               val &= mask_mode;
                if (pd_chan->dir == DMA_TO_DEVICE)
                        val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
                                       DMA_CTL0_DIR_SHIFT_BITS);
@@ -213,18 +231,24 @@ static void pdc_set_dir(struct dma_chan *chan)
                        val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
                                         DMA_CTL0_DIR_SHIFT_BITS));
 
+               val |= mask_ctl;
                dma_writel(pd, CTL0, val);
        } else {
                int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
                val = dma_readl(pd, CTL3);
 
+               mask_mode = DMA_CTL0_MODE_MASK_BITS <<
+                                               (DMA_CTL0_BITS_PER_CH * ch);
+               mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
+                                                (DMA_CTL0_BITS_PER_CH * ch));
+               val &= mask_mode;
                if (pd_chan->dir == DMA_TO_DEVICE)
                        val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
                                       DMA_CTL0_DIR_SHIFT_BITS);
                else
                        val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
                                         DMA_CTL0_DIR_SHIFT_BITS));
-
+               val |= mask_ctl;
                dma_writel(pd, CTL3, val);
        }
 
@@ -236,33 +260,37 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode)
 {
        struct pch_dma *pd = to_pd(chan->device);
        u32 val;
+       u32 mask_ctl;
+       u32 mask_dir;
 
        if (chan->chan_id < 8) {
+               mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
+                          (DMA_CTL0_BITS_PER_CH * chan->chan_id));
+               mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\
+                                DMA_CTL0_DIR_SHIFT_BITS);
                val = dma_readl(pd, CTL0);
-
-               val &= ~(DMA_CTL0_MODE_MASK_BITS <<
-                       (DMA_CTL0_BITS_PER_CH * chan->chan_id));
+               val &= mask_dir;
                val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
-
+               val |= mask_ctl;
                dma_writel(pd, CTL0, val);
        } else {
                int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
-
+               mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
+                                                (DMA_CTL0_BITS_PER_CH * ch));
+               mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\
+                                DMA_CTL0_DIR_SHIFT_BITS);
                val = dma_readl(pd, CTL3);
-
-               val &= ~(DMA_CTL0_MODE_MASK_BITS <<
-                       (DMA_CTL0_BITS_PER_CH * ch));
+               val &= mask_dir;
                val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
-
+               val |= mask_ctl;
                dma_writel(pd, CTL3, val);
-
        }
 
        dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
                chan->chan_id, val);
 }
 
-static u32 pdc_get_status(struct pch_dma_chan *pd_chan)
+static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
 {
        struct pch_dma *pd = to_pd(pd_chan->chan.device);
        u32 val;
@@ -272,9 +300,27 @@ static u32 pdc_get_status(struct pch_dma_chan *pd_chan)
                        DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
 }
 
+static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
+{
+       struct pch_dma *pd = to_pd(pd_chan->chan.device);
+       u32 val;
+
+       val = dma_readl(pd, STS2);
+       return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
+                       DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
+}
+
 static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
 {
-       if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE)
+       u32 sts;
+
+       if (pd_chan->chan.chan_id < 8)
+               sts = pdc_get_status0(pd_chan);
+       else
+               sts = pdc_get_status2(pd_chan);
+
+
+       if (sts == DMA_STATUS_IDLE)
                return true;
        else
                return false;
@@ -495,11 +541,11 @@ static int pd_alloc_chan_resources(struct dma_chan *chan)
                list_add_tail(&desc->desc_node, &tmp_list);
        }
 
-       spin_lock_bh(&pd_chan->lock);
+       spin_lock_irq(&pd_chan->lock);
        list_splice(&tmp_list, &pd_chan->free_list);
        pd_chan->descs_allocated = i;
        pd_chan->completed_cookie = chan->cookie = 1;
-       spin_unlock_bh(&pd_chan->lock);
+       spin_unlock_irq(&pd_chan->lock);
 
        pdc_enable_irq(chan, 1);
 
@@ -517,10 +563,10 @@ static void pd_free_chan_resources(struct dma_chan *chan)
        BUG_ON(!list_empty(&pd_chan->active_list));
        BUG_ON(!list_empty(&pd_chan->queue));
 
-       spin_lock_bh(&pd_chan->lock);
+       spin_lock_irq(&pd_chan->lock);
        list_splice_init(&pd_chan->free_list, &tmp_list);
        pd_chan->descs_allocated = 0;
-       spin_unlock_bh(&pd_chan->lock);
+       spin_unlock_irq(&pd_chan->lock);
 
        list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
                pci_pool_free(pd->pool, desc, desc->txd.phys);
@@ -536,10 +582,10 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        dma_cookie_t last_completed;
        int ret;
 
-       spin_lock_bh(&pd_chan->lock);
+       spin_lock_irq(&pd_chan->lock);
        last_completed = pd_chan->completed_cookie;
        last_used = chan->cookie;
-       spin_unlock_bh(&pd_chan->lock);
+       spin_unlock_irq(&pd_chan->lock);
 
        ret = dma_async_is_complete(cookie, last_completed, last_used);
 
@@ -654,7 +700,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
        if (cmd != DMA_TERMINATE_ALL)
                return -ENXIO;
 
-       spin_lock_bh(&pd_chan->lock);
+       spin_lock_irq(&pd_chan->lock);
 
        pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
 
@@ -664,7 +710,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
        list_for_each_entry_safe(desc, _d, &list, desc_node)
                pdc_chain_complete(pd_chan, desc);
 
-       spin_unlock_bh(&pd_chan->lock);
+       spin_unlock_irq(&pd_chan->lock);
 
        return 0;
 }
@@ -693,30 +739,45 @@ static irqreturn_t pd_irq(int irq, void *devid)
        struct pch_dma *pd = (struct pch_dma *)devid;
        struct pch_dma_chan *pd_chan;
        u32 sts0;
+       u32 sts2;
        int i;
-       int ret = IRQ_NONE;
+       int ret0 = IRQ_NONE;
+       int ret2 = IRQ_NONE;
 
        sts0 = dma_readl(pd, STS0);
+       sts2 = dma_readl(pd, STS2);
 
        dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
 
        for (i = 0; i < pd->dma.chancnt; i++) {
                pd_chan = &pd->channels[i];
 
-               if (sts0 & DMA_STATUS_IRQ(i)) {
-                       if (sts0 & DMA_STATUS_ERR(i))
-                               set_bit(0, &pd_chan->err_status);
+               if (i < 8) {
+                       if (sts0 & DMA_STATUS_IRQ(i)) {
+                               if (sts0 & DMA_STATUS0_ERR(i))
+                                       set_bit(0, &pd_chan->err_status);
 
-                       tasklet_schedule(&pd_chan->tasklet);
-                       ret = IRQ_HANDLED;
-               }
+                               tasklet_schedule(&pd_chan->tasklet);
+                               ret0 = IRQ_HANDLED;
+                       }
+               } else {
+                       if (sts2 & DMA_STATUS_IRQ(i - 8)) {
+                               if (sts2 & DMA_STATUS2_ERR(i))
+                                       set_bit(0, &pd_chan->err_status);
 
+                               tasklet_schedule(&pd_chan->tasklet);
+                               ret2 = IRQ_HANDLED;
+                       }
+               }
        }
 
        /* clear interrupt bits in status register */
-       dma_writel(pd, STS0, sts0);
+       if (ret0)
+               dma_writel(pd, STS0, sts0);
+       if (ret2)
+               dma_writel(pd, STS2, sts2);
 
-       return ret;
+       return ret0 | ret2;
 }
 
 #ifdef CONFIG_PM
@@ -960,6 +1021,8 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
 #define PCI_DEVICE_ID_ML7223_DMA2_4CH  0x800E
 #define PCI_DEVICE_ID_ML7223_DMA3_4CH  0x8017
 #define PCI_DEVICE_ID_ML7223_DMA4_4CH  0x803B
+#define PCI_DEVICE_ID_ML7831_DMA1_8CH  0x8810
+#define PCI_DEVICE_ID_ML7831_DMA2_4CH  0x8815
 
 DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
@@ -972,6 +1035,8 @@ DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
+       { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
+       { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
        { 0, },
 };
 
@@ -999,7 +1064,7 @@ static void __exit pch_dma_exit(void)
 module_init(pch_dma_init);
 module_exit(pch_dma_exit);
 
-MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH "
-                  "DMA controller driver");
+MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICON ML7213/ML7223/ML7831 IOH"
+                       "DMA controller driver");
 MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
 MODULE_LICENSE("GPL v2");
index e6ad3bb6c1a6b5efad00df79995f733be9a9c1f7..4799393247c8ada8a626722e0bea57c0ae99409e 100644 (file)
@@ -216,15 +216,33 @@ struct inbound_phy_packet_event {
        struct fw_cdev_event_phy_packet phy_packet;
 };
 
-static inline void __user *u64_to_uptr(__u64 value)
+#ifdef CONFIG_COMPAT
+static void __user *u64_to_uptr(u64 value)
+{
+       if (is_compat_task())
+               return compat_ptr(value);
+       else
+               return (void __user *)(unsigned long)value;
+}
+
+static u64 uptr_to_u64(void __user *ptr)
+{
+       if (is_compat_task())
+               return ptr_to_compat(ptr);
+       else
+               return (u64)(unsigned long)ptr;
+}
+#else
+static inline void __user *u64_to_uptr(u64 value)
 {
        return (void __user *)(unsigned long)value;
 }
 
-static inline __u64 uptr_to_u64(void __user *ptr)
+static inline u64 uptr_to_u64(void __user *ptr)
 {
-       return (__u64)(unsigned long)ptr;
+       return (u64)(unsigned long)ptr;
 }
+#endif /* CONFIG_COMPAT */
 
 static int fw_device_op_open(struct inode *inode, struct file *file)
 {
index 95a4714018922b52ebf484897fe7bcbd106eb318..9f661e06931846fd838cf9412711cd108d0ef146 100644 (file)
@@ -455,15 +455,20 @@ static struct device_attribute fw_device_attributes[] = {
 static int read_rom(struct fw_device *device,
                    int generation, int index, u32 *data)
 {
-       int rcode;
+       u64 offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4;
+       int i, rcode;
 
        /* device->node_id, accessed below, must not be older than generation */
        smp_rmb();
 
-       rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST,
-                       device->node_id, generation, device->max_speed,
-                       (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4,
-                       data, 4);
+       for (i = 10; i < 100; i += 10) {
+               rcode = fw_run_transaction(device->card,
+                               TCODE_READ_QUADLET_REQUEST, device->node_id,
+                               generation, device->max_speed, offset, data, 4);
+               if (rcode != RCODE_BUSY)
+                       break;
+               msleep(i);
+       }
        be32_to_cpus(data);
 
        return rcode;
index ee76c8ec72f6814aa4810974b6ee47f54c09c039..271fc518dec5329e769359e194b804305ecd2602 100644 (file)
@@ -262,6 +262,7 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
 static char ohci_driver_name[] = KBUILD_MODNAME;
 
 #define PCI_DEVICE_ID_AGERE_FW643      0x5901
+#define PCI_DEVICE_ID_CREATIVE_SB1394  0x4001
 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW        0x2380
 #define PCI_DEVICE_ID_TI_TSB12LV22     0x8009
 #define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
@@ -285,6 +286,9 @@ static const struct {
        {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
                QUIRK_NO_MSI},
 
+       {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
+               QUIRK_RESET_PACKET},
+
        {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
                QUIRK_NO_MSI},
 
@@ -295,7 +299,7 @@ static const struct {
                QUIRK_NO_MSI},
 
        {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
-               QUIRK_CYCLE_TIMER},
+               QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
 
        {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
                QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
@@ -2554,15 +2558,14 @@ static int handle_ir_buffer_fill(struct context *context,
        struct iso_context *ctx =
                container_of(context, struct iso_context, context);
 
-       if (!last->transfer_status)
+       if (last->res_count != 0)
                /* Descriptor(s) not done yet, stop iteration */
                return 0;
 
        if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
                ctx->base.callback.mc(&ctx->base,
                                      le32_to_cpu(last->data_address) +
-                                     le16_to_cpu(last->req_count) -
-                                     le16_to_cpu(last->res_count),
+                                     le16_to_cpu(last->req_count),
                                      ctx->base.callback_data);
 
        return 1;
index 5f29aafd44624e8ee1ccb9215735b9068013e056..e27d56c7cc19a43497dbe3193327f2815c7105a5 100644 (file)
@@ -141,23 +141,213 @@ efivar_create_sysfs_entry(struct efivars *efivars,
 
 /* Return the number of unicode characters in data */
 static unsigned long
-utf8_strlen(efi_char16_t *data, unsigned long maxlength)
+utf16_strnlen(efi_char16_t *s, size_t maxlength)
 {
        unsigned long length = 0;
 
-       while (*data++ != 0 && length < maxlength)
+       while (*s++ != 0 && length < maxlength)
                length++;
        return length;
 }
 
+static inline unsigned long
+utf16_strlen(efi_char16_t *s)
+{
+       return utf16_strnlen(s, ~0UL);
+}
+
 /*
  * Return the number of bytes is the length of this string
  * Note: this is NOT the same as the number of unicode characters
  */
 static inline unsigned long
-utf8_strsize(efi_char16_t *data, unsigned long maxlength)
+utf16_strsize(efi_char16_t *data, unsigned long maxlength)
+{
+       return utf16_strnlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t);
+}
+
+static bool
+validate_device_path(struct efi_variable *var, int match, u8 *buffer,
+                    unsigned long len)
+{
+       struct efi_generic_dev_path *node;
+       int offset = 0;
+
+       node = (struct efi_generic_dev_path *)buffer;
+
+       if (len < sizeof(*node))
+               return false;
+
+       while (offset <= len - sizeof(*node) &&
+              node->length >= sizeof(*node) &&
+               node->length <= len - offset) {
+               offset += node->length;
+
+               if ((node->type == EFI_DEV_END_PATH ||
+                    node->type == EFI_DEV_END_PATH2) &&
+                   node->sub_type == EFI_DEV_END_ENTIRE)
+                       return true;
+
+               node = (struct efi_generic_dev_path *)(buffer + offset);
+       }
+
+       /*
+        * If we're here then either node->length pointed past the end
+        * of the buffer or we reached the end of the buffer without
+        * finding a device path end node.
+        */
+       return false;
+}
+
+static bool
+validate_boot_order(struct efi_variable *var, int match, u8 *buffer,
+                   unsigned long len)
+{
+       /* An array of 16-bit integers */
+       if ((len % 2) != 0)
+               return false;
+
+       return true;
+}
+
+static bool
+validate_load_option(struct efi_variable *var, int match, u8 *buffer,
+                    unsigned long len)
+{
+       u16 filepathlength;
+       int i, desclength = 0, namelen;
+
+       namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName));
+
+       /* Either "Boot" or "Driver" followed by four digits of hex */
+       for (i = match; i < match+4; i++) {
+               if (var->VariableName[i] > 127 ||
+                   hex_to_bin(var->VariableName[i] & 0xff) < 0)
+                       return true;
+       }
+
+       /* Reject it if there's 4 digits of hex and then further content */
+       if (namelen > match + 4)
+               return false;
+
+       /* A valid entry must be at least 8 bytes */
+       if (len < 8)
+               return false;
+
+       filepathlength = buffer[4] | buffer[5] << 8;
+
+       /*
+        * There's no stored length for the description, so it has to be
+        * found by hand
+        */
+       desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
+
+       /* Each boot entry must have a descriptor */
+       if (!desclength)
+               return false;
+
+       /*
+        * If the sum of the length of the description, the claimed filepath
+        * length and the original header are greater than the length of the
+        * variable, it's malformed
+        */
+       if ((desclength + filepathlength + 6) > len)
+               return false;
+
+       /*
+        * And, finally, check the filepath
+        */
+       return validate_device_path(var, match, buffer + desclength + 6,
+                                   filepathlength);
+}
+
+static bool
+validate_uint16(struct efi_variable *var, int match, u8 *buffer,
+               unsigned long len)
 {
-       return utf8_strlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t);
+       /* A single 16-bit integer */
+       if (len != 2)
+               return false;
+
+       return true;
+}
+
+static bool
+validate_ascii_string(struct efi_variable *var, int match, u8 *buffer,
+                     unsigned long len)
+{
+       int i;
+
+       for (i = 0; i < len; i++) {
+               if (buffer[i] > 127)
+                       return false;
+
+               if (buffer[i] == 0)
+                       return true;
+       }
+
+       return false;
+}
+
+struct variable_validate {
+       char *name;
+       bool (*validate)(struct efi_variable *var, int match, u8 *data,
+                        unsigned long len);
+};
+
+static const struct variable_validate variable_validate[] = {
+       { "BootNext", validate_uint16 },
+       { "BootOrder", validate_boot_order },
+       { "DriverOrder", validate_boot_order },
+       { "Boot*", validate_load_option },
+       { "Driver*", validate_load_option },
+       { "ConIn", validate_device_path },
+       { "ConInDev", validate_device_path },
+       { "ConOut", validate_device_path },
+       { "ConOutDev", validate_device_path },
+       { "ErrOut", validate_device_path },
+       { "ErrOutDev", validate_device_path },
+       { "Timeout", validate_uint16 },
+       { "Lang", validate_ascii_string },
+       { "PlatformLang", validate_ascii_string },
+       { "", NULL },
+};
+
+static bool
+validate_var(struct efi_variable *var, u8 *data, unsigned long len)
+{
+       int i;
+       u16 *unicode_name = var->VariableName;
+
+       for (i = 0; variable_validate[i].validate != NULL; i++) {
+               const char *name = variable_validate[i].name;
+               int match;
+
+               for (match = 0; ; match++) {
+                       char c = name[match];
+                       u16 u = unicode_name[match];
+
+                       /* All special variables are plain ascii */
+                       if (u > 127)
+                               return true;
+
+                       /* Wildcard in the matching name means we've matched */
+                       if (c == '*')
+                               return variable_validate[i].validate(var,
+                                                            match, data, len);
+
+                       /* Case sensitive match */
+                       if (c != u)
+                               break;
+
+                       /* Reached the end of the string while matching */
+                       if (!c)
+                               return variable_validate[i].validate(var,
+                                                            match, data, len);
+               }
+       }
+
+       return true;
 }
 
 static efi_status_t
@@ -283,6 +473,12 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
                return -EINVAL;
        }
 
+       if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
+           validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
+               printk(KERN_ERR "efivars: Malformed variable content\n");
+               return -EINVAL;
+       }
+
        spin_lock(&efivars->lock);
        status = efivars->ops->set_variable(new_var->VariableName,
                                            &new_var->VendorGuid,
@@ -408,14 +604,20 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
 
+       if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
+           validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
+               printk(KERN_ERR "efivars: Malformed variable content\n");
+               return -EINVAL;
+       }
+
        spin_lock(&efivars->lock);
 
        /*
         * Does this variable already exist?
         */
        list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
-               strsize1 = utf8_strsize(search_efivar->var.VariableName, 1024);
-               strsize2 = utf8_strsize(new_var->VariableName, 1024);
+               strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
+               strsize2 = utf16_strsize(new_var->VariableName, 1024);
                if (strsize1 == strsize2 &&
                        !memcmp(&(search_efivar->var.VariableName),
                                new_var->VariableName, strsize1) &&
@@ -447,8 +649,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
 
        /* Create the entry in sysfs.  Locking is not required here */
        status = efivar_create_sysfs_entry(efivars,
-                                          utf8_strsize(new_var->VariableName,
-                                                       1024),
+                                          utf16_strsize(new_var->VariableName,
+                                                        1024),
                                           new_var->VariableName,
                                           &new_var->VendorGuid);
        if (status) {
@@ -477,8 +679,8 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
         * Does this variable already exist?
         */
        list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
-               strsize1 = utf8_strsize(search_efivar->var.VariableName, 1024);
-               strsize2 = utf8_strsize(del_var->VariableName, 1024);
+               strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
+               strsize2 = utf16_strsize(del_var->VariableName, 1024);
                if (strsize1 == strsize2 &&
                        !memcmp(&(search_efivar->var.VariableName),
                                del_var->VariableName, strsize1) &&
index ce33f462695771a06f6ed1df7c02d4e66502bff4..2763643089c2964ccc80ea63a7a97226fc0b95d0 100644 (file)
@@ -738,6 +738,37 @@ static void __exit ibft_exit(void)
        ibft_cleanup();
 }
 
+#ifdef CONFIG_ACPI
+static const struct {
+       char *sign;
+} ibft_signs[] = {
+       /*
+        * One spec says "IBFT", the other says "iBFT". We have to check
+        * for both.
+        */
+       { ACPI_SIG_IBFT },
+       { "iBFT" },
+};
+
+static void __init acpi_find_ibft_region(void)
+{
+       int i;
+       struct acpi_table_header *table = NULL;
+
+       if (acpi_disabled)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
+               acpi_get_table(ibft_signs[i].sign, 0, &table);
+               ibft_addr = (struct acpi_table_ibft *)table;
+       }
+}
+#else
+static void __init acpi_find_ibft_region(void)
+{
+}
+#endif
+
 /*
  * ibft_init() - creates sysfs tree entries for the iBFT data.
  */
@@ -745,9 +776,16 @@ static int __init ibft_init(void)
 {
        int rc = 0;
 
+       /*
+          As on UEFI systems the setup_arch()/find_ibft_region()
+          is called before ACPI tables are parsed and it only does
+          legacy finding.
+       */
+       if (!ibft_addr)
+               acpi_find_ibft_region();
+
        if (ibft_addr) {
-               printk(KERN_INFO "iBFT detected at 0x%llx.\n",
-                      (u64)isa_virt_to_bus(ibft_addr));
+               pr_info("iBFT detected.\n");
 
                rc = ibft_check_device();
                if (rc)
index bfe723266fd89bb84726d88cf15c206c020ee707..4da4eb9ae92604c35349ebaeb39b4a612bac3ed6 100644 (file)
@@ -45,13 +45,6 @@ EXPORT_SYMBOL_GPL(ibft_addr);
 static const struct {
        char *sign;
 } ibft_signs[] = {
-#ifdef CONFIG_ACPI
-       /*
-        * One spec says "IBFT", the other says "iBFT". We have to check
-        * for both.
-        */
-       { ACPI_SIG_IBFT },
-#endif
        { "iBFT" },
        { "BIFT" },     /* Broadcom iSCSI Offload */
 };
@@ -62,14 +55,6 @@ static const struct {
 #define VGA_MEM 0xA0000 /* VGA buffer */
 #define VGA_SIZE 0x20000 /* 128kB */
 
-#ifdef CONFIG_ACPI
-static int __init acpi_find_ibft(struct acpi_table_header *header)
-{
-       ibft_addr = (struct acpi_table_ibft *)header;
-       return 0;
-}
-#endif /* CONFIG_ACPI */
-
 static int __init find_ibft_in_mem(void)
 {
        unsigned long pos;
@@ -94,6 +79,7 @@ static int __init find_ibft_in_mem(void)
                                 * the table cannot be valid. */
                                if (pos + len <= (IBFT_END-1)) {
                                        ibft_addr = (struct acpi_table_ibft *)virt;
+                                       pr_info("iBFT found at 0x%lx.\n", pos);
                                        goto done;
                                }
                        }
@@ -108,20 +94,12 @@ done:
  */
 unsigned long __init find_ibft_region(unsigned long *sizep)
 {
-#ifdef CONFIG_ACPI
-       int i;
-#endif
        ibft_addr = NULL;
 
-#ifdef CONFIG_ACPI
-       for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++)
-               acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft);
-#endif /* CONFIG_ACPI */
-
        /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
         * only use ACPI for this */
 
-       if (!ibft_addr && !efi_enabled)
+       if (!efi_enabled)
                find_ibft_in_mem();
 
        if (ibft_addr) {
index f10fc521951b17491348f0cd0fbb1f3013e31eae..1eedb6f7fdabe46efa082039818bef6f31fe1591 100644 (file)
 #include <linux/module.h>
 #include <linux/sigma.h>
 
-/* Return: 0==OK, <0==error, =1 ==no more actions */
+static size_t sigma_action_size(struct sigma_action *sa)
+{
+       size_t payload = 0;
+
+       switch (sa->instr) {
+       case SIGMA_ACTION_WRITEXBYTES:
+       case SIGMA_ACTION_WRITESINGLE:
+       case SIGMA_ACTION_WRITESAFELOAD:
+               payload = sigma_action_len(sa);
+               break;
+       default:
+               break;
+       }
+
+       payload = ALIGN(payload, 2);
+
+       return payload + sizeof(struct sigma_action);
+}
+
+/*
+ * Returns a negative error value in case of an error, 0 if processing of
+ * the firmware should be stopped after this action, 1 otherwise.
+ */
 static int
-process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
+process_sigma_action(struct i2c_client *client, struct sigma_action *sa)
 {
-       struct sigma_action *sa = (void *)(ssfw->fw->data + ssfw->pos);
        size_t len = sigma_action_len(sa);
-       int ret = 0;
+       int ret;
 
        pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__,
                sa->instr, sa->addr, len);
@@ -29,44 +50,50 @@ process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
        case SIGMA_ACTION_WRITEXBYTES:
        case SIGMA_ACTION_WRITESINGLE:
        case SIGMA_ACTION_WRITESAFELOAD:
-               if (ssfw->fw->size < ssfw->pos + len)
-                       return -EINVAL;
                ret = i2c_master_send(client, (void *)&sa->addr, len);
                if (ret < 0)
                        return -EINVAL;
                break;
-
        case SIGMA_ACTION_DELAY:
-               ret = 0;
                udelay(len);
                len = 0;
                break;
-
        case SIGMA_ACTION_END:
-               return 1;
-
+               return 0;
        default:
                return -EINVAL;
        }
 
-       /* when arrive here ret=0 or sent data */
-       ssfw->pos += sigma_action_size(sa, len);
-       return ssfw->pos == ssfw->fw->size;
+       return 1;
 }
 
 static int
 process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw)
 {
-       pr_debug("%s: processing %p\n", __func__, ssfw);
+       struct sigma_action *sa;
+       size_t size;
+       int ret;
+
+       while (ssfw->pos + sizeof(*sa) <= ssfw->fw->size) {
+               sa = (struct sigma_action *)(ssfw->fw->data + ssfw->pos);
+
+               size = sigma_action_size(sa);
+               ssfw->pos += size;
+               if (ssfw->pos > ssfw->fw->size || size == 0)
+                       break;
+
+               ret = process_sigma_action(client, sa);
 
-       while (1) {
-               int ret = process_sigma_action(client, ssfw);
                pr_debug("%s: action returned %i\n", __func__, ret);
-               if (ret == 1)
-                       return 0;
-               else if (ret)
+
+               if (ret <= 0)
                        return ret;
        }
+
+       if (ssfw->pos != ssfw->fw->size)
+               return -EINVAL;
+
+       return 0;
 }
 
 int process_sigma_firmware(struct i2c_client *client, const char *name)
@@ -89,16 +116,24 @@ int process_sigma_firmware(struct i2c_client *client, const char *name)
 
        /* then verify the header */
        ret = -EINVAL;
-       if (fw->size < sizeof(*ssfw_head))
+
+       /*
+        * Reject too small or unreasonable large files. The upper limit has been
+        * chosen a bit arbitrarily, but it should be enough for all practical
+        * purposes and having the limit makes it easier to avoid integer
+        * overflows later in the loading process.
+        */
+       if (fw->size < sizeof(*ssfw_head) || fw->size >= 0x4000000)
                goto done;
 
        ssfw_head = (void *)fw->data;
        if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic)))
                goto done;
 
-       crc = crc32(0, fw->data, fw->size);
+       crc = crc32(0, fw->data + sizeof(*ssfw_head),
+                       fw->size - sizeof(*ssfw_head));
        pr_debug("%s: crc=%x\n", __func__, crc);
-       if (crc != ssfw_head->crc)
+       if (crc != le32_to_cpu(ssfw_head->crc))
                goto done;
 
        ssfw.pos = sizeof(*ssfw_head);
index 3b8f6043bf0c4436e1ce47da74bc4f33e73b92d8..1eb284d433bdb2fe1c3149f8859b48af3622d474 100755 (executable)
@@ -350,18 +350,19 @@ config GPIO_LANGWELL
          Say Y here to support Intel Langwell/Penwell GPIO.
 
 config GPIO_PCH
-       tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GPIO"
+       tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7223/ML7831) GPIO"
        depends on PCI && X86
        help
          This driver is for PCH(Platform controller Hub) GPIO of Intel Topcliff
          which is an IOH(Input/Output Hub) for x86 embedded processor.
          This driver can access PCH GPIO device.
 
-         This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
-         Output Hub), ML7223.
+         This driver also can be used for LAPIS Semiconductor IOH(Input/
+         Output Hub), ML7223 and ML7831.
          ML7223 IOH is for MP(Media Phone) use.
-         ML7223 is companion chip for Intel Atom E6xx series.
-         ML7223 is completely compatible for Intel EG20T PCH.
+         ML7831 IOH is for general purpose use.
+         ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+         ML7223/ML7831 is completely compatible for Intel EG20T PCH.
 
 config GPIO_ML_IOH
        tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
index 0451d7ac94ac1d51b062d0da4ce19e4e12b6bdf4..532f69006264497edf0b90417a8b365aa19b6a84 100644 (file)
@@ -437,7 +437,7 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
 
        do {
                level = __ffs(pending);
-               generic_handle_irq(level + chip->irq_base);
+               handle_nested_irq(level + chip->irq_base);
 
                pending &= ~(1 << level);
        } while (pending);
@@ -481,8 +481,8 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
                        int irq = lvl + chip->irq_base;
 
                        irq_set_chip_data(irq, chip);
-                       irq_set_chip_and_handler(irq, &pca953x_irq_chip,
-                                                handle_simple_irq);
+                       irq_set_chip(irq, &pca953x_irq_chip);
+                       irq_set_nested_thread(irq, true);
 #ifdef CONFIG_ARM
                        set_irq_flags(irq, IRQF_VALID);
 #else
index 36919e77c495cb97c3ae3a5896a6d7dabdce38df..de26978b420be1cb1e0df69c86abaaa2fa2d3619 100644 (file)
@@ -287,6 +287,7 @@ static int pch_gpio_resume(struct pci_dev *pdev)
 static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
        { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014) },
+       { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8803) },
        { 0, }
 };
 MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
index 3f46772f0cb212d5135ad686fbd20857e6b54a25..ba23790450e9d4877ec8721ed5280140a18272ea 100644 (file)
@@ -101,7 +101,7 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
  * Searches and unlinks the entry in drm_device::magiclist with the magic
  * number hash key, while holding the drm_device::struct_mutex lock.
  */
-static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
+int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
 {
        struct drm_magic_entry *pt;
        struct drm_hash_item *hash;
@@ -136,6 +136,8 @@ static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
  * If there is a magic number in drm_file::magic then use it, otherwise
  * searches an unique non-zero magic number and add it associating it with \p
  * file_priv.
+ * This ioctl needs protection by the drm_global_mutex, which protects
+ * struct drm_file::magic and struct drm_magic_entry::priv.
  */
 int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
@@ -173,6 +175,8 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
  * \return zero if authentication successed, or a negative number otherwise.
  *
  * Checks if \p file_priv is associated with the magic number passed in \arg.
+ * This ioctl needs protection by the drm_global_mutex, which protects
+ * struct drm_file::magic and struct drm_magic_entry::priv.
  */
 int drm_authmagic(struct drm_device *dev, void *data,
                  struct drm_file *file_priv)
index 82db1850666253dc021f8a43e635fe91ebce94df..1367ced8c26ddd8d74558a23fbeb8afec46cac22 100644 (file)
@@ -1866,6 +1866,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
        }
 
        if (num_clips && clips_ptr) {
+               if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
+                       ret = -EINVAL;
+                       goto out_err1;
+               }
                clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
                if (!clips) {
                        ret = -ENOMEM;
index 802b61ac31390d92b2affaf14f787b75f8caab2f..a9dcdc7d3728e677716736f5a4c1d8117bd77d3e 100644 (file)
@@ -610,9 +610,13 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
                return -EINVAL;
 
        /* Need to resize the fb object !!! */
-       if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) {
+       if (var->bits_per_pixel > fb->bits_per_pixel ||
+           var->xres > fb->width || var->yres > fb->height ||
+           var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
                DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
-                         "object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel,
+                         "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
+                         var->xres, var->yres, var->bits_per_pixel,
+                         var->xres_virtual, var->yres_virtual,
                          fb->width, fb->height, fb->bits_per_pixel);
                return -EINVAL;
        }
index 2ec7d48fc4a8b18b58bf777b26159849b6ff65d0..c42e12cc2ddb4decf60319c5cb913bb0ece22a5f 100644 (file)
@@ -486,6 +486,11 @@ int drm_release(struct inode *inode, struct file *filp)
                  (long)old_encode_dev(file_priv->minor->device),
                  dev->open_count);
 
+       /* Release any auth tokens that might point to this file_priv,
+          (do that under the drm_global_mutex) */
+       if (file_priv->magic)
+               (void) drm_remove_magic(file_priv->master, file_priv->magic);
+
        /* if the master has gone away we can't do anything with the lock */
        if (file_priv->minor->master)
                drm_master_release(dev, filp);
index 0a893f7400fa1fa58724ce1c621684c643cf3f9f..e36efdc67e1f1c368012475eb50326308964c6f0 100644 (file)
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
                           MEMSTAT_VID_SHIFT);
                seq_printf(m, "Current P-state: %d\n",
                           (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
-       } else if (IS_GEN6(dev)) {
+       } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
                u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
                u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
                u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
index 7eef6e11d9acd971cd25954e08a87930be75fd59..ef164432ba6d8ddec3613646fe6a290a4dd3f9cf 100644 (file)
@@ -1451,6 +1451,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
 
        diff1 = now - dev_priv->last_time1;
 
+       /* Prevent division-by-zero if we are asking too fast.
+        * Also, we don't get interesting results if we are polling
+        * faster than once in 10ms, so just return the saved value
+        * in such cases.
+        */
+       if (diff1 <= 10)
+               return dev_priv->chipset_power;
+
        count1 = I915_READ(DMIEC);
        count2 = I915_READ(DDREC);
        count3 = I915_READ(CSIEC);
@@ -1481,6 +1489,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
        dev_priv->last_count1 = total_count;
        dev_priv->last_time1 = now;
 
+       dev_priv->chipset_power = ret;
+
        return ret;
 }
 
index eb91e2dd791495ed5f40575a4f2190a0b8ad276c..111686ada271609bdf473e02b08c3c9e0c314a42 100644 (file)
@@ -379,6 +379,10 @@ static int i915_drm_freeze(struct drm_device *dev)
        /* Modeset on resume, not lid events */
        dev_priv->modeset_on_lid = 0;
 
+       console_lock();
+       intel_fbdev_set_suspend(dev, 1);
+       console_unlock();
+
        return 0;
 }
 
@@ -438,7 +442,9 @@ static int i915_drm_thaw(struct drm_device *dev)
                drm_irq_install(dev);
 
                /* Resume the modeset for every activated CRTC */
+               mutex_lock(&dev->mode_config.mutex);
                drm_helper_resume_force_mode(dev);
+               mutex_unlock(&dev->mode_config.mutex);
 
                if (IS_IRONLAKE_M(dev))
                        ironlake_enable_rc6(dev);
@@ -448,6 +454,9 @@ static int i915_drm_thaw(struct drm_device *dev)
 
        dev_priv->modeset_on_lid = 0;
 
+       console_lock();
+       intel_fbdev_set_suspend(dev, 0);
+       console_unlock();
        return error;
 }
 
index ce7914c4c044662153545e7c2d87e277432fbc34..b570415c3fd92702e87946d9d6918b5f60e7b8b3 100644 (file)
@@ -325,6 +325,8 @@ typedef struct drm_i915_private {
        struct timer_list hangcheck_timer;
        int hangcheck_count;
        uint32_t last_acthd;
+       uint32_t last_acthd_bsd;
+       uint32_t last_acthd_blt;
        uint32_t last_instdone;
        uint32_t last_instdone1;
 
@@ -541,6 +543,7 @@ typedef struct drm_i915_private {
        u32 savePIPEB_LINK_M1;
        u32 savePIPEB_LINK_N1;
        u32 saveMCHBAR_RENDER_STANDBY;
+       u32 savePCH_PORT_HOTPLUG;
 
        struct {
                /** Bridge to intel-gtt-ko */
@@ -701,6 +704,7 @@ typedef struct drm_i915_private {
 
        u64 last_count1;
        unsigned long last_time1;
+       unsigned long chipset_power;
        u64 last_count2;
        struct timespec last_time2;
        unsigned long gfx_power;
index a087e1bf0c2f4359af4374aab8ae7c50ae5040a2..5548593040bf894952a37593c23a76dae65ac911 100644 (file)
@@ -1475,7 +1475,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
 
        if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
                ret = -E2BIG;
-               goto unlock;
+               goto out;
        }
 
        if (obj->madv != I915_MADV_WILLNEED) {
index 4934cf84c320336aa84320da544cc1e0d091da1c..bc927ae3164f6e01c7e1d2adf782fdca1f796f9c 100644 (file)
@@ -1046,6 +1046,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                        return -EINVAL;
                }
 
+               if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
+                       DRM_DEBUG("execbuf with %u cliprects\n",
+                                 args->num_cliprects);
+                       return -EINVAL;
+               }
                cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
                                    GFP_KERNEL);
                if (cliprects == NULL) {
@@ -1296,7 +1301,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
        struct drm_i915_gem_exec_object2 *exec2_list = NULL;
        int ret;
 
-       if (args->buffer_count < 1) {
+       if (args->buffer_count < 1 ||
+           args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
                DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
                return -EINVAL;
        }
index 9b1d669f7d4b72e0464e10337af8774664880ae8..d05f03c628471cac3e28c33f06dba2e4ba9eacd1 100644 (file)
@@ -422,14 +422,11 @@ static void gen6_pm_rps_work(struct work_struct *work)
        mutex_unlock(&dev_priv->dev->struct_mutex);
 }
 
-static void pch_irq_handler(struct drm_device *dev)
+static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       u32 pch_iir;
        int pipe;
 
-       pch_iir = I915_READ(SDEIIR);
-
        if (pch_iir & SDE_AUDIO_POWER_MASK)
                DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
                                 (pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -527,7 +524,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
        if (de_iir & DE_PCH_EVENT_IVB) {
                if (pch_iir & SDE_HOTPLUG_MASK_CPT)
                        queue_work(dev_priv->wq, &dev_priv->hotplug_work);
-               pch_irq_handler(dev);
+               pch_irq_handler(dev, pch_iir);
        }
 
        if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
@@ -626,7 +623,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
        if (de_iir & DE_PCH_EVENT) {
                if (pch_iir & hotplug_mask)
                        queue_work(dev_priv->wq, &dev_priv->hotplug_work);
-               pch_irq_handler(dev);
+               pch_irq_handler(dev, pch_iir);
        }
 
        if (de_iir & DE_PCU_EVENT) {
@@ -820,6 +817,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
 
        /* Fences */
        switch (INTEL_INFO(dev)->gen) {
+       case 7:
        case 6:
                for (i = 0; i < 16; i++)
                        error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
@@ -1664,7 +1662,7 @@ void i915_hangcheck_elapsed(unsigned long data)
 {
        struct drm_device *dev = (struct drm_device *)data;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       uint32_t acthd, instdone, instdone1;
+       uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
        bool err = false;
 
        /* If all work is done then ACTHD clearly hasn't advanced. */
@@ -1678,16 +1676,21 @@ void i915_hangcheck_elapsed(unsigned long data)
        }
 
        if (INTEL_INFO(dev)->gen < 4) {
-               acthd = I915_READ(ACTHD);
                instdone = I915_READ(INSTDONE);
                instdone1 = 0;
        } else {
-               acthd = I915_READ(ACTHD_I965);
                instdone = I915_READ(INSTDONE_I965);
                instdone1 = I915_READ(INSTDONE1);
        }
+       acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]);
+       acthd_bsd = HAS_BSD(dev) ?
+               intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0;
+       acthd_blt = HAS_BLT(dev) ?
+               intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0;
 
        if (dev_priv->last_acthd == acthd &&
+           dev_priv->last_acthd_bsd == acthd_bsd &&
+           dev_priv->last_acthd_blt == acthd_blt &&
            dev_priv->last_instdone == instdone &&
            dev_priv->last_instdone1 == instdone1) {
                if (dev_priv->hangcheck_count++ > 1) {
@@ -1719,6 +1722,8 @@ void i915_hangcheck_elapsed(unsigned long data)
                dev_priv->hangcheck_count = 0;
 
                dev_priv->last_acthd = acthd;
+               dev_priv->last_acthd_bsd = acthd_bsd;
+               dev_priv->last_acthd_blt = acthd_blt;
                dev_priv->last_instdone = instdone;
                dev_priv->last_instdone1 = instdone1;
        }
index 5d5def756c9e5beee9ae10def1cbe39d5f7cf02f..387b2b34e93fc021c9fd86ec793506562144af39 100644 (file)
 
 #define GEN6_BSD_RNCID                 0x12198
 
+#define GEN7_FF_THREAD_MODE            0x20a0
+#define   GEN7_FF_SCHED_MASK           0x0077070
+#define   GEN7_FF_TS_SCHED_HS1         (0x5<<16)
+#define   GEN7_FF_TS_SCHED_HS0         (0x3<<16)
+#define   GEN7_FF_TS_SCHED_LOAD_BALANCE        (0x1<<16)
+#define   GEN7_FF_TS_SCHED_HW          (0x0<<16) /* Default */
+#define   GEN7_FF_VS_SCHED_HS1         (0x5<<12)
+#define   GEN7_FF_VS_SCHED_HS0         (0x3<<12)
+#define   GEN7_FF_VS_SCHED_LOAD_BALANCE        (0x1<<12) /* Default */
+#define   GEN7_FF_VS_SCHED_HW          (0x0<<12)
+#define   GEN7_FF_DS_SCHED_HS1         (0x5<<4)
+#define   GEN7_FF_DS_SCHED_HS0         (0x3<<4)
+#define   GEN7_FF_DS_SCHED_LOAD_BALANCE        (0x1<<4)  /* Default */
+#define   GEN7_FF_DS_SCHED_HW          (0x0<<4)
+
 /*
  * Framebuffer compression (915+ only)
  */
 #define   PIPECONF_DISABLE     0
 #define   PIPECONF_DOUBLE_WIDE (1<<30)
 #define   I965_PIPECONF_ACTIVE (1<<30)
+#define   PIPECONF_FRAME_START_DELAY_MASK (3<<27)
 #define   PIPECONF_SINGLE_WIDE 0
 #define   PIPECONF_PIPE_UNLOCKED 0
 #define   PIPECONF_PIPE_LOCKED (1<<25)
 #define _CURBBASE              0x700c4
 #define _CURBPOS                       0x700c8
 
+#define _CURBCNTR_IVB          0x71080
+#define _CURBBASE_IVB          0x71084
+#define _CURBPOS_IVB           0x71088
+
 #define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR)
 #define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE)
 #define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS)
 
+#define CURCNTR_IVB(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR_IVB)
+#define CURBASE_IVB(pipe) _PIPE(pipe, _CURABASE, _CURBBASE_IVB)
+#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
+
 /* Display A control */
 #define _DSPACNTR                0x70180
 #define   DISPLAY_PLANE_ENABLE                 (1<<31)
 #define  DISP_TILE_SURFACE_SWIZZLING   (1<<13)
 #define  DISP_FBC_WM_DIS               (1<<15)
 
+/* GEN7 chicken */
+#define GEN7_COMMON_SLICE_CHICKEN1             0x7010
+# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC     ((1<<10) | (1<<26))
+
+#define GEN7_L3CNTLREG1                                0xB01C
+#define  GEN7_WA_FOR_GEN7_L3_CONTROL                   0x3C4FFF8C
+
+#define GEN7_L3_CHICKEN_MODE_REGISTER          0xB030
+#define  GEN7_WA_L3_CHICKEN_MODE                               0x20000000
+
+/* WaCatErrorRejectionIssue */
+#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG         0x9030
+#define  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB      (1<<11)
+
 /* PCH */
 
 /* south display engine interrupt */
 #define  FDI_LINK_TRAIN_NONE_IVB            (3<<8)
 
 /* both Tx and Rx */
+#define  FDI_COMPOSITE_SYNC            (1<<11)
 #define  FDI_LINK_TRAIN_AUTO           (1<<10)
 #define  FDI_SCRAMBLING_ENABLE          (0<<7)
 #define  FDI_SCRAMBLING_DISABLE         (1<<7)
 
 #define  GT_FIFO_FREE_ENTRIES                  0x120008
 
+#define GEN6_UCGCTL2                           0x9404
+# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE               (1 << 13)
+# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE              (1 << 12)
+# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE               (1 << 11)
+
 #define GEN6_RPNSWREQ                          0xA008
 #define   GEN6_TURBO_DISABLE                   (1<<31)
 #define   GEN6_FREQUENCY(x)                    ((x)<<25)
index 5257cfc34c3570641929cbb7825d469093170a36..bc7dcaa1c689068598ee10ffbef5db7065800261 100644 (file)
@@ -34,6 +34,10 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32     dpll_reg;
 
+       /* On IVB, 3rd pipe shares PLL with another one */
+       if (pipe > 1)
+               return false;
+
        if (HAS_PCH_SPLIT(dev))
                dpll_reg = (pipe == PIPE_A) ? _PCH_DPLL_A : _PCH_DPLL_B;
        else
@@ -370,6 +374,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
 
        /* Fences */
        switch (INTEL_INFO(dev)->gen) {
+       case 7:
        case 6:
                for (i = 0; i < 16; i++)
                        dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
@@ -404,6 +409,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
 
        /* Fences */
        switch (INTEL_INFO(dev)->gen) {
+       case 7:
        case 6:
                for (i = 0; i < 16; i++)
                        I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
@@ -814,6 +820,7 @@ int i915_save_state(struct drm_device *dev)
                dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
                dev_priv->saveMCHBAR_RENDER_STANDBY =
                        I915_READ(RSTDBYCTL);
+               dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
        } else {
                dev_priv->saveIER = I915_READ(IER);
                dev_priv->saveIMR = I915_READ(IMR);
@@ -865,6 +872,7 @@ int i915_restore_state(struct drm_device *dev)
                I915_WRITE(GTIMR, dev_priv->saveGTIMR);
                I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
                I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
+               I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG);
        } else {
                I915_WRITE(IER, dev_priv->saveIER);
                I915_WRITE(IMR, dev_priv->saveIMR);
index 927442a11925f990fcb45b5cba763a3cb7d521bc..e5fa074b78305a999cf31d90289eb90ea55734f1 100644 (file)
@@ -24,6 +24,7 @@
  *    Eric Anholt <eric@anholt.net>
  *
  */
+#include <linux/dmi.h>
 #include <drm/drm_dp_helper.h>
 #include "drmP.h"
 #include "drm.h"
@@ -592,6 +593,26 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
        dev_priv->edp.bpp = 18;
 }
 
+static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
+{
+       DRM_DEBUG_KMS("Falling back to manually reading VBT from "
+                     "VBIOS ROM for %s\n",
+                     id->ident);
+       return 1;
+}
+
+static const struct dmi_system_id intel_no_opregion_vbt[] = {
+       {
+               .callback = intel_no_opregion_vbt_callback,
+               .ident = "ThinkCentre A57",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"),
+               },
+       },
+       { }
+};
+
 /**
  * intel_parse_bios - find VBT and initialize settings from the BIOS
  * @dev: DRM device
@@ -612,7 +633,7 @@ intel_parse_bios(struct drm_device *dev)
        init_vbt_defaults(dev_priv);
 
        /* XXX Should this validation be moved to intel_opregion.c? */
-       if (dev_priv->opregion.vbt) {
+       if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) {
                struct vbt_header *vbt = dev_priv->opregion.vbt;
                if (memcmp(vbt->signature, "$VBT", 4) == 0) {
                        DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n",
index 5609c065aaf4a92206a34277b2fe2a78f23096c2..4b8e23555179885541edfb3f0691861e8704a0ba 100644 (file)
@@ -2340,6 +2340,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
        temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+       temp |= FDI_COMPOSITE_SYNC;
        I915_WRITE(reg, temp | FDI_TX_ENABLE);
 
        reg = FDI_RX_CTL(pipe);
@@ -2347,6 +2348,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
        temp &= ~FDI_LINK_TRAIN_AUTO;
        temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
        temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+       temp |= FDI_COMPOSITE_SYNC;
        I915_WRITE(reg, temp | FDI_RX_ENABLE);
 
        POSTING_READ(reg);
@@ -4970,7 +4972,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        } else if (is_sdvo && is_tv)
                factor = 20;
 
-       if (clock.m1 < factor * clock.n)
+       if (clock.m < factor * clock.n)
                fp |= FP_CB_TUNE;
 
        dpll = 0;
@@ -5263,7 +5265,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
        int i;
 
        /* The clocks have to be on to load the palette. */
-       if (!crtc->enabled)
+       if (!crtc->enabled || !intel_crtc->active)
                return;
 
        /* use legacy palette for Ironlake */
@@ -5334,6 +5336,31 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
        I915_WRITE(CURBASE(pipe), base);
 }
 
+static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       bool visible = base != 0;
+
+       if (intel_crtc->cursor_visible != visible) {
+               uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
+               if (base) {
+                       cntl &= ~CURSOR_MODE;
+                       cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+               } else {
+                       cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
+                       cntl |= CURSOR_MODE_DISABLE;
+               }
+               I915_WRITE(CURCNTR_IVB(pipe), cntl);
+
+               intel_crtc->cursor_visible = visible;
+       }
+       /* and commit changes on next vblank */
+       I915_WRITE(CURBASE_IVB(pipe), base);
+}
+
 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
                                     bool on)
@@ -5381,11 +5408,16 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
        if (!visible && !intel_crtc->cursor_visible)
                return;
 
-       I915_WRITE(CURPOS(pipe), pos);
-       if (IS_845G(dev) || IS_I865G(dev))
-               i845_update_cursor(crtc, base);
-       else
-               i9xx_update_cursor(crtc, base);
+       if (IS_IVYBRIDGE(dev)) {
+               I915_WRITE(CURPOS_IVB(pipe), pos);
+               ivb_update_cursor(crtc, base);
+       } else {
+               I915_WRITE(CURPOS(pipe), pos);
+               if (IS_845G(dev) || IS_I865G(dev))
+                       i845_update_cursor(crtc, base);
+               else
+                       i9xx_update_cursor(crtc, base);
+       }
 
        if (visible)
                intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
@@ -6547,6 +6579,13 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 reg, val;
+       int i;
+
+       /* Clear any frame start delays used for debugging left by the BIOS */
+       for_each_pipe(i) {
+               reg = PIPECONF(i);
+               I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+       }
 
        if (HAS_PCH_SPLIT(dev))
                return;
@@ -7373,6 +7412,20 @@ static void gen6_init_clock_gating(struct drm_device *dev)
        I915_WRITE(WM2_LP_ILK, 0);
        I915_WRITE(WM1_LP_ILK, 0);
 
+       /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+        * gating disable must be set.  Failure to set it results in
+        * flickering pixels due to Z write ordering failures after
+        * some amount of runtime in the Mesa "fire" demo, and Unigine
+        * Sanctuary and Tropics, and apparently anything else with
+        * alpha test or pixel discard.
+        *
+        * According to the spec, bit 11 (RCCUNIT) must also be set,
+        * but we didn't debug actual testcases to find it out.
+        */
+       I915_WRITE(GEN6_UCGCTL2,
+                  GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+                  GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
        /*
         * According to the spec the following bits should be
         * set in order to enable memory self-refresh and fbc:
@@ -7399,6 +7452,18 @@ static void gen6_init_clock_gating(struct drm_device *dev)
                           DISPPLANE_TRICKLE_FEED_DISABLE);
 }
 
+static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
+{
+       uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
+
+       reg &= ~GEN7_FF_SCHED_MASK;
+       reg |= GEN7_FF_TS_SCHED_HW;
+       reg |= GEN7_FF_VS_SCHED_HW;
+       reg |= GEN7_FF_DS_SCHED_HW;
+
+       I915_WRITE(GEN7_FF_THREAD_MODE, reg);
+}
+
 static void ivybridge_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7411,8 +7476,28 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
        I915_WRITE(WM2_LP_ILK, 0);
        I915_WRITE(WM1_LP_ILK, 0);
 
+       /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+        * This implements the WaDisableRCZUnitClockGating workaround.
+        */
+       I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+
        I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
 
+       /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+       I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+                  GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+       /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+       I915_WRITE(GEN7_L3CNTLREG1,
+                       GEN7_WA_FOR_GEN7_L3_CONTROL);
+       I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+                       GEN7_WA_L3_CHICKEN_MODE);
+
+       /* This is required by WaCatErrorRejectionIssue */
+       I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+                       I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+                       GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
        for_each_pipe(pipe)
                I915_WRITE(DSPCNTR(pipe),
                           I915_READ(DSPCNTR(pipe)) |
@@ -7525,6 +7610,8 @@ static void ironlake_teardown_rc6(struct drm_device *dev)
                drm_gem_object_unreference(&dev_priv->pwrctx->base);
                dev_priv->pwrctx = NULL;
        }
+
+       gen7_setup_fixed_func_scheduler(dev_priv);
 }
 
 static void ironlake_disable_rc6(struct drm_device *dev)
@@ -7943,7 +8030,7 @@ void intel_modeset_init(struct drm_device *dev)
                intel_init_emon(dev);
        }
 
-       if (IS_GEN6(dev))
+       if (IS_GEN6(dev) || IS_GEN7(dev))
                gen6_enable_rps(dev_priv);
 
        INIT_WORK(&dev_priv->idle_work, intel_idle_update);
@@ -7985,7 +8072,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
 
        if (IS_IRONLAKE_M(dev))
                ironlake_disable_drps(dev);
-       if (IS_GEN6(dev))
+       if (IS_GEN6(dev) || IS_GEN7(dev))
                gen6_disable_rps(dev);
 
        if (IS_IRONLAKE_M(dev))
index e2aced6eec4c78c9ee63a8802ac782ce05daa2a0..bf9fea941617b4cc90d08939f34844dfaa5db347 100644 (file)
@@ -1554,6 +1554,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
                        intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
        }
 
+       DP &= ~DP_AUDIO_OUTPUT_ENABLE;
        I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
        POSTING_READ(intel_dp->output_reg);
 }
@@ -1658,6 +1659,31 @@ g4x_dp_detect(struct intel_dp *intel_dp)
        return status;
 }
 
+static struct edid *
+intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+       struct intel_dp *intel_dp = intel_attached_dp(connector);
+       struct edid     *edid;
+
+       ironlake_edp_panel_vdd_on(intel_dp);
+       edid = drm_get_edid(connector, adapter);
+       ironlake_edp_panel_vdd_off(intel_dp);
+       return edid;
+}
+
+static int
+intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+       struct intel_dp *intel_dp = intel_attached_dp(connector);
+       int     ret;
+
+       ironlake_edp_panel_vdd_on(intel_dp);
+       ret = intel_ddc_get_modes(connector, adapter);
+       ironlake_edp_panel_vdd_off(intel_dp);
+       return ret;
+}
+
+
 /**
  * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
  *
@@ -1684,7 +1710,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
        if (intel_dp->force_audio) {
                intel_dp->has_audio = intel_dp->force_audio > 0;
        } else {
-               edid = drm_get_edid(connector, &intel_dp->adapter);
+               edid = intel_dp_get_edid(connector, &intel_dp->adapter);
                if (edid) {
                        intel_dp->has_audio = drm_detect_monitor_audio(edid);
                        connector->display_info.raw_edid = NULL;
@@ -1705,7 +1731,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
        /* We should parse the EDID data and find out if it has an audio sink
         */
 
-       ret = intel_ddc_get_modes(connector, &intel_dp->adapter);
+       ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
        if (ret) {
                if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) {
                        struct drm_display_mode *newmode;
@@ -1741,7 +1767,7 @@ intel_dp_detect_audio(struct drm_connector *connector)
        struct edid *edid;
        bool has_audio = false;
 
-       edid = drm_get_edid(connector, &intel_dp->adapter);
+       edid = intel_dp_get_edid(connector, &intel_dp->adapter);
        if (edid) {
                has_audio = drm_detect_monitor_audio(edid);
 
index 9ffa61eb4d7efab156819980452e22e5507c3c76..47bf5e15f03eca02992f94f8f472d2950023289a 100644 (file)
@@ -330,7 +330,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
                                  struct drm_i915_gem_object *obj);
 extern int intel_fbdev_init(struct drm_device *dev);
 extern void intel_fbdev_fini(struct drm_device *dev);
-
+extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
 extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
 extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
 extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
index ec49bae7338260d77cc6b78f9bc15d948303c1e8..d0ce34b78cc7705192f0b9b327791bab2a4d39ab 100644 (file)
@@ -257,6 +257,16 @@ void intel_fbdev_fini(struct drm_device *dev)
        kfree(dev_priv->fbdev);
        dev_priv->fbdev = NULL;
 }
+
+void intel_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       if (!dev_priv->fbdev)
+               return;
+
+       fb_set_suspend(dev_priv->fbdev->helper.fbdev, state);
+}
+
 MODULE_LICENSE("GPL and additional rights");
 
 void intel_fb_output_poll_changed(struct drm_device *dev)
index aa0a8e83142e1c8ee5562d0884ad92c8656b30ed..236bbe09abd5564480b1d1163296323c49b91ec7 100644 (file)
@@ -158,6 +158,10 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
        u32 temp;
+       u32 enable_bits = SDVO_ENABLE;
+
+       if (intel_hdmi->has_audio)
+               enable_bits |= SDVO_AUDIO_ENABLE;
 
        temp = I915_READ(intel_hdmi->sdvox_reg);
 
@@ -170,9 +174,9 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
        }
 
        if (mode != DRM_MODE_DPMS_ON) {
-               temp &= ~SDVO_ENABLE;
+               temp &= ~enable_bits;
        } else {
-               temp |= SDVO_ENABLE;
+               temp |= enable_bits;
        }
 
        I915_WRITE(intel_hdmi->sdvox_reg, temp);
index b28f7bd9f88a1fca15fdb0ade73cb724658d1cf8..ff85a91eb014c0823702e6f9ce3eb06cc939857a 100644 (file)
@@ -712,6 +712,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
                },
        },
+       {
+               .callback = intel_no_lvds_dmi_callback,
+                .ident = "AOpen i45GMx-I",
+                .matches = {
+                        DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+                        DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
+                },
+        },
        {
                .callback = intel_no_lvds_dmi_callback,
                .ident = "Aopen i945GTt-VFA",
@@ -735,6 +743,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
                },
        },
+       {
+               .callback = intel_no_lvds_dmi_callback,
+               .ident = "MSI Wind Box DC500",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+                       DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
+               },
+       },
 
        { }     /* terminating entry */
 };
index 05f500cd9c246c0156412e028f5bef0c35acd22b..f8aa8211fed114e59568fefa8296f2361f974ce8 100644 (file)
@@ -226,7 +226,7 @@ static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
        I915_WRITE(BLC_PWM_CPU_CTL, val | level);
 }
 
-void intel_panel_set_backlight(struct drm_device *dev, u32 level)
+static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 tmp;
@@ -254,16 +254,21 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
        I915_WRITE(BLC_PWM_CTL, tmp | level);
 }
 
-void intel_panel_disable_backlight(struct drm_device *dev)
+void intel_panel_set_backlight(struct drm_device *dev, u32 level)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (dev_priv->backlight_enabled) {
-               dev_priv->backlight_level = intel_panel_get_backlight(dev);
-               dev_priv->backlight_enabled = false;
-       }
+       dev_priv->backlight_level = level;
+       if (dev_priv->backlight_enabled)
+               intel_panel_actually_set_backlight(dev, level);
+}
+
+void intel_panel_disable_backlight(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       intel_panel_set_backlight(dev, 0);
+       dev_priv->backlight_enabled = false;
+       intel_panel_actually_set_backlight(dev, 0);
 }
 
 void intel_panel_enable_backlight(struct drm_device *dev)
@@ -273,8 +278,8 @@ void intel_panel_enable_backlight(struct drm_device *dev)
        if (dev_priv->backlight_level == 0)
                dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
 
-       intel_panel_set_backlight(dev, dev_priv->backlight_level);
        dev_priv->backlight_enabled = true;
+       intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
 }
 
 void intel_panel_setup_backlight(struct drm_device *dev)
index 1f61fc7b754c5ecd36b33416ff8d2357887b2510..2d6039b4571fbc80b6eab0373937b4ee4cdb4de3 100644 (file)
@@ -863,7 +863,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
         * of the buffer.
         */
        ring->effective_size = ring->size;
-       if (IS_I830(ring->dev))
+       if (IS_I830(ring->dev) || IS_845G(ring->dev))
                ring->effective_size -= 128;
 
        return 0;
index 30fe554d8936a8cd965b083a0de9064f44865db6..c90106074743f42f146aa4c2c8ab970976590e2d 100644 (file)
@@ -724,6 +724,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
        uint16_t width, height;
        uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
        uint16_t h_sync_offset, v_sync_offset;
+       int mode_clock;
 
        width = mode->crtc_hdisplay;
        height = mode->crtc_vdisplay;
@@ -738,7 +739,11 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
        h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
        v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
 
-       dtd->part1.clock = mode->clock / 10;
+       mode_clock = mode->clock;
+       mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
+       mode_clock /= 10;
+       dtd->part1.clock = mode_clock;
+
        dtd->part1.h_active = width & 0xff;
        dtd->part1.h_blank = h_blank_len & 0xff;
        dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
@@ -757,10 +762,12 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
                ((v_sync_len & 0x30) >> 4);
 
        dtd->part2.dtd_flags = 0x18;
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
        if (mode->flags & DRM_MODE_FLAG_PHSYNC)
-               dtd->part2.dtd_flags |= 0x2;
+               dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
        if (mode->flags & DRM_MODE_FLAG_PVSYNC)
-               dtd->part2.dtd_flags |= 0x4;
+               dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
 
        dtd->part2.sdvo_flags = 0;
        dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
@@ -794,9 +801,11 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
        mode->clock = dtd->part1.clock * 10;
 
        mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
-       if (dtd->part2.dtd_flags & 0x2)
+       if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
+               mode->flags |= DRM_MODE_FLAG_INTERLACE;
+       if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
                mode->flags |= DRM_MODE_FLAG_PHSYNC;
-       if (dtd->part2.dtd_flags & 0x4)
+       if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
                mode->flags |= DRM_MODE_FLAG_PVSYNC;
 }
 
@@ -990,7 +999,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
        struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
        u32 sdvox;
        struct intel_sdvo_in_out_map in_out;
-       struct intel_sdvo_dtd input_dtd;
+       struct intel_sdvo_dtd input_dtd, output_dtd;
        int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
        int rate;
 
@@ -1015,20 +1024,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
                                          intel_sdvo->attached_output))
                return;
 
-       /* We have tried to get input timing in mode_fixup, and filled into
-        * adjusted_mode.
-        */
-       if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
-               input_dtd = intel_sdvo->input_dtd;
-       } else {
-               /* Set the output timing to the screen */
-               if (!intel_sdvo_set_target_output(intel_sdvo,
-                                                 intel_sdvo->attached_output))
-                       return;
-
-               intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
-               (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
-       }
+       /* lvds has a special fixed output timing. */
+       if (intel_sdvo->is_lvds)
+               intel_sdvo_get_dtd_from_mode(&output_dtd,
+                                            intel_sdvo->sdvo_lvds_fixed_mode);
+       else
+               intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+       (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd);
 
        /* Set the input timing to the screen. Assume always input 0. */
        if (!intel_sdvo_set_target_input(intel_sdvo))
@@ -1046,6 +1048,10 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
            !intel_sdvo_set_tv_format(intel_sdvo))
                return;
 
+       /* We have tried to get input timing in mode_fixup, and filled into
+        * adjusted_mode.
+        */
+       intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
        (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
 
        switch (pixel_multiplier) {
@@ -1059,15 +1065,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
 
        /* Set the SDVO control regs. */
        if (INTEL_INFO(dev)->gen >= 4) {
-               sdvox = 0;
+               /* The real mode polarity is set by the SDVO commands, using
+                * struct intel_sdvo_dtd. */
+               sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
                if (intel_sdvo->is_hdmi)
                        sdvox |= intel_sdvo->color_range;
                if (INTEL_INFO(dev)->gen < 5)
                        sdvox |= SDVO_BORDER_ENABLE;
-               if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
-                       sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
-               if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
-                       sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
        } else {
                sdvox = I915_READ(intel_sdvo->sdvo_reg);
                switch (intel_sdvo->sdvo_reg) {
index 4f4e23bc2d165e92383c5ca7bd14e7b81f25cb56..c5c8ddf2c1adeca2106372226f7aa3f5e56b151d 100644 (file)
@@ -61,6 +61,11 @@ struct intel_sdvo_caps {
     u16 output_flags;
 } __attribute__((packed));
 
+/* Note: SDVO detailed timing flags match EDID misc flags. */
+#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
+#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
+#define DTD_FLAG_INTERLACE     (1 << 7)
+
 /** This matches the EDID DTD structure, more or less */
 struct intel_sdvo_dtd {
     struct {
index 113e4e7264cdfdb4ba4c3b26af49c15911c2b679..2136e6bc89375cad563ba19e7b3f800b3132f8df 100644 (file)
@@ -417,7 +417,7 @@ static const struct tv_mode tv_modes[] = {
        {
                .name           = "NTSC-M",
                .clock          = 108000,
-               .refresh        = 29970,
+               .refresh        = 59940,
                .oversample     = TV_OVERSAMPLE_8X,
                .component_only = 0,
                /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
@@ -460,7 +460,7 @@ static const struct tv_mode tv_modes[] = {
        {
                .name           = "NTSC-443",
                .clock          = 108000,
-               .refresh        = 29970,
+               .refresh        = 59940,
                .oversample     = TV_OVERSAMPLE_8X,
                .component_only = 0,
                /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */
@@ -502,7 +502,7 @@ static const struct tv_mode tv_modes[] = {
        {
                .name           = "NTSC-J",
                .clock          = 108000,
-               .refresh        = 29970,
+               .refresh        = 59940,
                .oversample     = TV_OVERSAMPLE_8X,
                .component_only = 0,
 
@@ -545,7 +545,7 @@ static const struct tv_mode tv_modes[] = {
        {
                .name           = "PAL-M",
                .clock          = 108000,
-               .refresh        = 29970,
+               .refresh        = 59940,
                .oversample     = TV_OVERSAMPLE_8X,
                .component_only = 0,
 
@@ -589,7 +589,7 @@ static const struct tv_mode tv_modes[] = {
                /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
                .name       = "PAL-N",
                .clock          = 108000,
-               .refresh        = 25000,
+               .refresh        = 50000,
                .oversample     = TV_OVERSAMPLE_8X,
                .component_only = 0,
 
@@ -634,7 +634,7 @@ static const struct tv_mode tv_modes[] = {
                /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
                .name       = "PAL",
                .clock          = 108000,
-               .refresh        = 25000,
+               .refresh        = 50000,
                .oversample     = TV_OVERSAMPLE_8X,
                .component_only = 0,
 
@@ -821,7 +821,7 @@ static const struct tv_mode tv_modes[] = {
        {
                .name       = "1080i@50Hz",
                .clock          = 148800,
-               .refresh        = 25000,
+               .refresh        = 50000,
                .oversample     = TV_OVERSAMPLE_2X,
                .component_only = 1,
 
@@ -847,7 +847,7 @@ static const struct tv_mode tv_modes[] = {
        {
                .name       = "1080i@60Hz",
                .clock          = 148800,
-               .refresh        = 30000,
+               .refresh        = 60000,
                .oversample     = TV_OVERSAMPLE_2X,
                .component_only = 1,
 
@@ -1301,6 +1301,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv,
 
        I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
        I915_WRITE(TV_CTL, save_tv_ctl);
+       POSTING_READ(TV_CTL);
+
+       /* For unknown reasons the hw barfs if we don't do this vblank wait. */
+       intel_wait_for_vblank(intel_tv->base.base.dev,
+                             to_intel_crtc(intel_tv->base.base.crtc)->pipe);
 
        /* Restore interrupt config */
        if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
index 2ad49cbf7c8b272203976af98abb1479d6d6847c..5fb98de0c57d92f9c82f2b27e8d3f5bc835b0294 100644 (file)
@@ -1075,7 +1075,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 
        nvbo->placement.fpfn = 0;
        nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
-       nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
+       nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
        return nouveau_bo_validate(nvbo, false, true, false);
 }
 
index a7583a8ddb01f13dba3f12a43d1c263630cb261a..d31d355f5ed0f7b4f60279b4c5fe1995f3b242d8 100644 (file)
@@ -159,6 +159,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
        INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
        INIT_LIST_HEAD(&chan->nvsw.flip);
        INIT_LIST_HEAD(&chan->fence.pending);
+       spin_lock_init(&chan->fence.lock);
 
        /* Allocate DMA push buffer */
        chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
index 7347075ca5b873a2192f57ad0647ee8fb15d8908..56f06b0cfdb6007fd71bc4870c20894103220d59 100644 (file)
@@ -542,8 +542,6 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
                        return ret;
        }
 
-       INIT_LIST_HEAD(&chan->fence.pending);
-       spin_lock_init(&chan->fence.lock);
        atomic_set(&chan->fence.last_sequence_irq, 0);
        return 0;
 }
index b52e46018245801776dc77ab490ce95942fbbbad..cee78b26f60f288b6f1b858cd0f9e3a5c6a3e25e 100644 (file)
@@ -314,6 +314,25 @@ retry:
        return 0;
 }
 
+static int
+validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
+{
+       struct nouveau_fence *fence = NULL;
+       int ret = 0;
+
+       spin_lock(&nvbo->bo.bdev->fence_lock);
+       if (nvbo->bo.sync_obj)
+               fence = nouveau_fence_ref(nvbo->bo.sync_obj);
+       spin_unlock(&nvbo->bo.bdev->fence_lock);
+
+       if (fence) {
+               ret = nouveau_fence_sync(fence, chan);
+               nouveau_fence_unref(&fence);
+       }
+
+       return ret;
+}
+
 static int
 validate_list(struct nouveau_channel *chan, struct list_head *list,
              struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
@@ -327,7 +346,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
        list_for_each_entry(nvbo, list, entry) {
                struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
 
-               ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
+               ret = validate_sync(chan, nvbo);
                if (unlikely(ret)) {
                        NV_ERROR(dev, "fail pre-validate sync\n");
                        return ret;
@@ -350,7 +369,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
                        return ret;
                }
 
-               ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
+               ret = validate_sync(chan, nvbo);
                if (unlikely(ret)) {
                        NV_ERROR(dev, "fail post-validate sync\n");
                        return ret;
index 9a0aee2f06504a7ea2de039238407e10976e2429..88661eaa2c747b34685d142ed44a48f44402d4c0 100644 (file)
@@ -1301,8 +1301,11 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
 
 int atom_asic_init(struct atom_context *ctx)
 {
+       struct radeon_device *rdev = ctx->card->dev->dev_private;
        int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
        uint32_t ps[16];
+       int ret;
+
        memset(ps, 0, 64);
 
        ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
@@ -1312,7 +1315,17 @@ int atom_asic_init(struct atom_context *ctx)
 
        if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
                return 1;
-       return atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+       ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+       if (ret)
+               return ret;
+
+       memset(ps, 0, 64);
+
+       if (rdev->family < CHIP_R600) {
+               if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
+                       atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
+       }
+       return ret;
 }
 
 void atom_destroy(struct atom_context *ctx)
index 93cfe2086ba023b82b4271b136730a2d3a4cd25d..25fea631dad2d2ae03bee413b7d9254045a5f9d3 100644 (file)
@@ -44,6 +44,7 @@
 #define ATOM_CMD_SETSCLK       0x0A
 #define ATOM_CMD_SETMCLK       0x0B
 #define ATOM_CMD_SETPCLK       0x0C
+#define ATOM_CMD_SPDFANCNTL    0x39
 
 #define ATOM_DATA_FWI_PTR      0xC
 #define ATOM_DATA_IIO_PTR      0x32
index 9541995e4b21df3d4ba5cb9b3ab496a3255b8034..071ded119eb9a5953610acfd28ac4886da4f6944 100644 (file)
@@ -1173,7 +1173,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
 
        WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
-              crtc->mode.vdisplay);
+              target_fb->height);
        x &= ~3;
        y &= ~1;
        WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
@@ -1342,7 +1342,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
        WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
 
        WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
-              crtc->mode.vdisplay);
+              target_fb->height);
        x &= ~3;
        y &= ~1;
        WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
index 79e8ebc0530723e9df8a8aa5f448b3b8068fab62..3b77ad60ed514fd2305fe84844a9bf089ed749d5 100644 (file)
@@ -283,7 +283,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
                }
        }
 
-       DRM_ERROR("aux i2c too many retries, giving up\n");
+       DRM_DEBUG_KMS("aux i2c too many retries, giving up\n");
        return -EREMOTEIO;
 }
 
@@ -553,6 +553,7 @@ static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
 {
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
+       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
 
        if (!ASIC_IS_DCE4(rdev))
@@ -560,10 +561,20 @@ static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
 
        if (radeon_connector_encoder_is_dp_bridge(connector))
                panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
+       else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+               u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
+               if (tmp & 1)
+                       panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+       }
 
        atombios_dig_encoder_setup(encoder,
                                   ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
                                   panel_mode);
+
+       if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
+           (panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
+               radeon_write_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
+       }
 }
 
 void radeon_dp_set_link_config(struct drm_connector *connector,
index ea7a24ed5c035903198078f27fe4acc0dc1f49e9..8846bad45e66a9570cf159e9e10bab38c2459393 100644 (file)
@@ -82,6 +82,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+       int i;
 
        /* Lock the graphics update lock */
        tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
@@ -99,7 +100,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
               (u32)crtc_base);
 
        /* Wait for update_pending to go high. */
-       while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
+                       break;
+               udelay(1);
+       }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
        /* Unlock the lock, so double-buffering can take place inside vblank */
@@ -353,6 +358,7 @@ void evergreen_hpd_init(struct radeon_device *rdev)
                default:
                        break;
                }
+               radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
        }
        if (rdev->irq.installed)
                evergreen_irq_set(rdev);
@@ -920,6 +926,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
                WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
                WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
                WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+               if ((rdev->family == CHIP_JUNIPER) ||
+                   (rdev->family == CHIP_CYPRESS) ||
+                   (rdev->family == CHIP_HEMLOCK) ||
+                   (rdev->family == CHIP_BARTS))
+                       WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
        }
        WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
@@ -2058,9 +2069,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
                WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
                WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
-        }
+       }
 
-       grbm_gfx_index |= SE_BROADCAST_WRITES;
+       grbm_gfx_index = INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES;
        WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
        WREG32(RLC_GFX_INDEX, grbm_gfx_index);
 
@@ -3251,6 +3262,18 @@ int evergreen_init(struct radeon_device *rdev)
                        rdev->accel_working = false;
                }
        }
+
+       /* Don't start up if the MC ucode is missing on BTC parts.
+        * The default clocks and voltages before the MC ucode
+        * is loaded are not suffient for advanced operations.
+        */
+       if (ASIC_IS_DCE5(rdev)) {
+               if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+                       DRM_ERROR("radeon: MC ucode required for NI+.\n");
+                       return -EINVAL;
+               }
+       }
+
        return 0;
 }
 
index b7b2714f0b327d379aa21d2a478b463e067f1bce..6078ae4cc16ed4eb313cc8a708174de99cd21bbe 100644 (file)
 #define        MC_VM_MD_L1_TLB0_CNTL                           0x2654
 #define        MC_VM_MD_L1_TLB1_CNTL                           0x2658
 #define        MC_VM_MD_L1_TLB2_CNTL                           0x265C
+#define        MC_VM_MD_L1_TLB3_CNTL                           0x2698
 
 #define        FUS_MC_VM_MD_L1_TLB0_CNTL                       0x265C
 #define        FUS_MC_VM_MD_L1_TLB1_CNTL                       0x2660
index 7fcdbbbf297965044e550e611d8eb8b32a086051..d94f440f13794634052dfbd48dddd266c8415595 100644 (file)
@@ -84,13 +84,18 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+       int i;
 
        /* Lock the graphics update lock */
        /* update the scanout addresses */
        WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
 
        /* Wait for update_pending to go high. */
-       while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
+                       break;
+               udelay(1);
+       }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
        /* Unlock the lock, so double-buffering can take place inside vblank */
@@ -434,6 +439,7 @@ void r100_hpd_init(struct radeon_device *rdev)
                default:
                        break;
                }
+               radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
        }
        if (rdev->irq.installed)
                r100_irq_set(rdev);
@@ -675,9 +681,7 @@ int r100_irq_process(struct radeon_device *rdev)
                        WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
                        break;
                default:
-                       msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
-                       WREG32(RADEON_MSI_REARM_EN, msi_rearm);
-                       WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
+                       WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
                        break;
                }
        }
@@ -2063,6 +2067,7 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev)
 void r100_bm_disable(struct radeon_device *rdev)
 {
        u32 tmp;
+       u16 tmp16;
 
        /* disable bus mastering */
        tmp = RREG32(R_000030_BUS_CNTL);
@@ -2073,8 +2078,8 @@ void r100_bm_disable(struct radeon_device *rdev)
        WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
        tmp = RREG32(RADEON_BUS_CNTL);
        mdelay(1);
-       pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
-       pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
+       pci_read_config_word(rdev->pdev, 0x4, &tmp16);
+       pci_write_config_word(rdev->pdev, 0x4, tmp16 & 0xFFFB);
        mdelay(1);
 }
 
index 1dea9d65b045eb7a8a28de5bc0d0ada44571270f..1a4ed433eba3c2d591451ab272fb9bd59cde6f5a 100644 (file)
@@ -762,13 +762,14 @@ void r600_hpd_init(struct radeon_device *rdev)
        struct drm_device *dev = rdev->ddev;
        struct drm_connector *connector;
 
-       if (ASIC_IS_DCE3(rdev)) {
-               u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
-               if (ASIC_IS_DCE32(rdev))
-                       tmp |= DC_HPDx_EN;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+               if (ASIC_IS_DCE3(rdev)) {
+                       u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
+                       if (ASIC_IS_DCE32(rdev))
+                               tmp |= DC_HPDx_EN;
 
-               list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-                       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
                        switch (radeon_connector->hpd.hpd) {
                        case RADEON_HPD_1:
                                WREG32(DC_HPD1_CONTROL, tmp);
@@ -798,10 +799,7 @@ void r600_hpd_init(struct radeon_device *rdev)
                        default:
                                break;
                        }
-               }
-       } else {
-               list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-                       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+               } else {
                        switch (radeon_connector->hpd.hpd) {
                        case RADEON_HPD_1:
                                WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
@@ -819,6 +817,7 @@ void r600_hpd_init(struct radeon_device *rdev)
                                break;
                        }
                }
+               radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
        }
        if (rdev->irq.installed)
                r600_irq_set(rdev);
index 2d1f6c5ee2a77e0466f91ceb2e4b4f28cd0cf218..73e2c7c6edbc63c02e35a198a684e40708c65c8f 100644 (file)
@@ -313,6 +313,10 @@ const u32 r6xx_default_state[] =
        0x00000000, /* VGT_REUSE_OFF */
        0x00000000, /* VGT_VTX_CNT_EN */
 
+       0xc0016900,
+       0x000000d4,
+       0x00000000, /* SX_MISC */
+
        0xc0016900,
        0x000002c8,
        0x00000000, /* VGT_STRMOUT_BUFFER_EN */
@@ -625,6 +629,10 @@ const u32 r7xx_default_state[] =
        0x00000000, /* VGT_REUSE_OFF */
        0x00000000, /* VGT_VTX_CNT_EN */
 
+       0xc0016900,
+       0x000000d4,
+       0x00000000, /* SX_MISC */
+
        0xc0016900,
        0x000002c8,
        0x00000000, /* VGT_STRMOUT_BUFFER_EN */
index f5ac7e788d813b9b744c08cdbc0cee72c0d0ec09..c45d92191fd8c25dbc26df0eac159c36fc7b3153 100644 (file)
@@ -196,6 +196,13 @@ static void r600_hdmi_videoinfoframe(
        frame[0xD] = (right_bar >> 8);
 
        r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
+       /* Our header values (type, version, length) should be alright, Intel
+        * is using the same. Checksum function also seems to be OK, it works
+        * fine for audio infoframe. However calculated value is always lower
+        * by 2 in comparison to fglrx. It breaks displaying anything in case
+        * of TVs that strictly check the checksum. Hack it manually here to
+        * workaround this issue. */
+       frame[0x0] += 2;
 
        WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
                frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
index 0bb4ddf792f650300fbd1173180444f4e5ad8370..59d72d0f5a884908e9b4b3f446a630229bb8d901 100644 (file)
@@ -93,6 +93,7 @@ extern int radeon_audio;
 extern int radeon_disp_priority;
 extern int radeon_hw_i2c;
 extern int radeon_pcie_gen2;
+extern int radeon_msi;
 
 /*
  * Copy from radeon_drv.h so we don't have to include both and have conflicting
index bf2b61584cdb5a70fc06fbc41dd2ca51733532fc..ef6b426b1ee76d1e015751be76c7fa8222e58851 100644 (file)
@@ -85,6 +85,18 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
                for (i = 0; i < num_indices; i++) {
                        gpio = &i2c_info->asGPIO_Info[i];
 
+                       /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
+                       if ((rdev->family == CHIP_R420) ||
+                           (rdev->family == CHIP_R423) ||
+                           (rdev->family == CHIP_RV410)) {
+                               if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
+                                   (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
+                                   (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
+                                       gpio->ucClkMaskShift = 0x19;
+                                       gpio->ucDataMaskShift = 0x18;
+                               }
+                       }
+
                        /* some evergreen boards have bad data for this entry */
                        if (ASIC_IS_DCE4(rdev)) {
                                if ((i == 7) &&
@@ -169,6 +181,18 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
                        gpio = &i2c_info->asGPIO_Info[i];
                        i2c.valid = false;
 
+                       /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
+                       if ((rdev->family == CHIP_R420) ||
+                           (rdev->family == CHIP_R423) ||
+                           (rdev->family == CHIP_RV410)) {
+                               if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
+                                   (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
+                                   (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
+                                       gpio->ucClkMaskShift = 0x19;
+                                       gpio->ucDataMaskShift = 0x18;
+                               }
+                       }
+
                        /* some evergreen boards have bad data for this entry */
                        if (ASIC_IS_DCE4(rdev)) {
                                if ((i == 7) &&
@@ -456,10 +480,26 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
         */
        if ((dev->pdev->device == 0x9498) &&
            (dev->pdev->subsystem_vendor == 0x1682) &&
-           (dev->pdev->subsystem_device == 0x2452)) {
+           (dev->pdev->subsystem_device == 0x2452) &&
+           (i2c_bus->valid == false) &&
+           !(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
                struct radeon_device *rdev = dev->dev_private;
                *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
        }
+
+       /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
+       if ((dev->pdev->device == 0x9802) &&
+           (dev->pdev->subsystem_vendor == 0x1734) &&
+           (dev->pdev->subsystem_device == 0x11bd)) {
+               if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
+                       *connector_type = DRM_MODE_CONNECTOR_DVII;
+                       *line_mux = 0x3103;
+               } else if (*connector_type == DRM_MODE_CONNECTOR_DVID) {
+                       *connector_type = DRM_MODE_CONNECTOR_DVII;
+               }
+       }
+
+
        return true;
 }
 
@@ -2544,7 +2584,11 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
 
        rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
        rdev->pm.current_clock_mode_index = 0;
-       rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+       if (rdev->pm.default_power_state_index >= 0)
+               rdev->pm.current_vddc =
+                       rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+       else
+               rdev->pm.current_vddc = 0;
 }
 
 void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
index cd3c86c845e46d63bacefdd1443077aab9fcaf73..859df6b5bcaa51083e978ddfe542bf701bad2d69 100644 (file)
@@ -620,8 +620,8 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
                i2c.y_data_mask = 0x80;
        } else {
                /* default masks for ddc pads */
-               i2c.mask_clk_mask = RADEON_GPIO_EN_1;
-               i2c.mask_data_mask = RADEON_GPIO_EN_0;
+               i2c.mask_clk_mask = RADEON_GPIO_MASK_1;
+               i2c.mask_data_mask = RADEON_GPIO_MASK_0;
                i2c.a_clk_mask = RADEON_GPIO_A_1;
                i2c.a_data_mask = RADEON_GPIO_A_0;
                i2c.en_clk_mask = RADEON_GPIO_EN_1;
index 05b8b2cbd4fedbf36fe3b83e20bfaf1d23e50ca9..1f6a0f55ad1941fa516c6dbd6f489cb97e13d2bb 100644 (file)
@@ -715,6 +715,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
                dret = radeon_ddc_probe(radeon_connector,
                                        radeon_connector->requires_extended_probe);
        if (dret) {
+               radeon_connector->detected_by_load = false;
                if (radeon_connector->edid) {
                        kfree(radeon_connector->edid);
                        radeon_connector->edid = NULL;
@@ -741,12 +742,21 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
        } else {
 
                /* if we aren't forcing don't do destructive polling */
-               if (!force)
-                       return connector->status;
+               if (!force) {
+                       /* only return the previous status if we last
+                        * detected a monitor via load.
+                        */
+                       if (radeon_connector->detected_by_load)
+                               return connector->status;
+                       else
+                               return ret;
+               }
 
                if (radeon_connector->dac_load_detect && encoder) {
                        encoder_funcs = encoder->helper_private;
                        ret = encoder_funcs->detect(encoder, connector);
+                       if (ret != connector_status_disconnected)
+                               radeon_connector->detected_by_load = true;
                }
        }
 
@@ -888,6 +898,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
                dret = radeon_ddc_probe(radeon_connector,
                                        radeon_connector->requires_extended_probe);
        if (dret) {
+               radeon_connector->detected_by_load = false;
                if (radeon_connector->edid) {
                        kfree(radeon_connector->edid);
                        radeon_connector->edid = NULL;
@@ -950,8 +961,18 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
        if ((ret == connector_status_connected) && (radeon_connector->use_digital == true))
                goto out;
 
+       /* DVI-D and HDMI-A are digital only */
+       if ((connector->connector_type == DRM_MODE_CONNECTOR_DVID) ||
+           (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA))
+               goto out;
+
+       /* if we aren't forcing don't do destructive polling */
        if (!force) {
-               ret = connector->status;
+               /* only return the previous status if we last
+                * detected a monitor via load.
+                */
+               if (radeon_connector->detected_by_load)
+                       ret = connector->status;
                goto out;
        }
 
@@ -969,6 +990,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
 
                        encoder = obj_to_encoder(obj);
 
+                       if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
+                           encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
+                               continue;
+
                        encoder_funcs = encoder->helper_private;
                        if (encoder_funcs->detect) {
                                if (ret != connector_status_connected) {
@@ -976,6 +1001,8 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
                                        if (ret == connector_status_connected) {
                                                radeon_connector->use_digital = false;
                                        }
+                                       if (ret != connector_status_disconnected)
+                                               radeon_connector->detected_by_load = true;
                                }
                                break;
                        }
@@ -993,6 +1020,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
         * cases the DVI port is actually a virtual KVM port connected to the service
         * processor.
         */
+out:
        if ((!rdev->is_atom_bios) &&
            (ret == connector_status_disconnected) &&
            rdev->mode_info.bios_hardcoded_edid_size) {
@@ -1000,7 +1028,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
                ret = connector_status_connected;
        }
 
-out:
        /* updated in get modes as well since we need to know if it's analog or digital */
        radeon_connector_update_scratch_regs(connector, ret);
        return ret;
index f59a6823301fdcc6a2cecaacb22c3034b775125d..3fb222615c6fcda57c551f0af055f16e3b125d2d 100644 (file)
@@ -151,7 +151,9 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
                           uint32_t height)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct radeon_device *rdev = crtc->dev->dev_private;
        struct drm_gem_object *obj;
+       struct radeon_bo *robj;
        uint64_t gpu_addr;
        int ret;
 
@@ -173,7 +175,15 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
                return -ENOENT;
        }
 
-       ret = radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
+       robj = gem_to_radeon_bo(obj);
+       ret = radeon_bo_reserve(robj, false);
+       if (unlikely(ret != 0))
+               goto fail;
+       /* Only 27 bit offset for legacy cursor */
+       ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
+                                      ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+                                      &gpu_addr);
+       radeon_bo_unreserve(robj);
        if (ret)
                goto fail;
 
@@ -181,7 +191,6 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
        radeon_crtc->cursor_height = height;
 
        radeon_lock_cursor(crtc, true);
-       /* XXX only 27 bit offset for legacy cursor */
        radeon_set_cursor(crtc, obj, gpu_addr);
        radeon_show_cursor(crtc);
        radeon_lock_cursor(crtc, false);
index 440e6ecccc40054c5620e38db033da76ee3dd3aa..e87893c2c88a3ea54e9502afef3fbe65e8b86496 100644 (file)
@@ -223,8 +223,11 @@ int radeon_wb_init(struct radeon_device *rdev)
        if (radeon_no_wb == 1)
                rdev->wb.enabled = false;
        else {
-               /* often unreliable on AGP */
                if (rdev->flags & RADEON_IS_AGP) {
+                       /* often unreliable on AGP */
+                       rdev->wb.enabled = false;
+               } else if (rdev->family < CHIP_R300) {
+                       /* often unreliable on pre-r300 */
                        rdev->wb.enabled = false;
                } else {
                        rdev->wb.enabled = true;
@@ -854,6 +857,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
+       drm_kms_helper_poll_disable(dev);
+
        /* turn off display hw */
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
@@ -940,6 +945,8 @@ int radeon_resume_kms(struct drm_device *dev)
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
        }
+
+       drm_kms_helper_poll_enable(dev);
        return 0;
 }
 
index 73dfbe8e5f9ed7d750a424c5fa9aa7a8ed6ab025..60e160578f73bd69d8ae7b2026c4a3eb388a7042 100644 (file)
@@ -117,6 +117,7 @@ int radeon_audio = 0;
 int radeon_disp_priority = 0;
 int radeon_hw_i2c = 0;
 int radeon_pcie_gen2 = 0;
+int radeon_msi = -1;
 
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -163,6 +164,9 @@ module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
 MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)");
 module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
 
+MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(msi, radeon_msi, int, 0444);
+
 static int radeon_suspend(struct drm_device *dev, pm_message_t state)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
index 6c111c1fa3f9c467f27d0a220373cf1777d80b5e..c90425c439d820ce333f91476ed556c60af08936 100644 (file)
@@ -898,6 +898,10 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
        struct radeon_i2c_chan *i2c;
        int ret;
 
+       /* don't add the mm_i2c bus unless hw_i2c is enabled */
+       if (rec->mm_i2c && (radeon_hw_i2c == 0))
+               return NULL;
+
        i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
        if (i2c == NULL)
                return NULL;
index 9ec830c77af0be7afdba4c9b32f13f1b277bdbe8..eb6fe79c691f835ac104fbafad332a7253c50300 100644 (file)
@@ -108,6 +108,58 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
        radeon_irq_set(rdev);
 }
 
+static bool radeon_msi_ok(struct radeon_device *rdev)
+{
+       /* RV370/RV380 was first asic with MSI support */
+       if (rdev->family < CHIP_RV380)
+               return false;
+
+       /* MSIs don't work on AGP */
+       if (rdev->flags & RADEON_IS_AGP)
+               return false;
+
+       /* force MSI on */
+       if (radeon_msi == 1)
+               return true;
+       else if (radeon_msi == 0)
+               return false;
+
+       /* Quirks */
+       /* HP RS690 only seems to work with MSIs. */
+       if ((rdev->pdev->device == 0x791f) &&
+           (rdev->pdev->subsystem_vendor == 0x103c) &&
+           (rdev->pdev->subsystem_device == 0x30c2))
+               return true;
+
+       /* Dell RS690 only seems to work with MSIs. */
+       if ((rdev->pdev->device == 0x791f) &&
+           (rdev->pdev->subsystem_vendor == 0x1028) &&
+           (rdev->pdev->subsystem_device == 0x01fc))
+               return true;
+
+       /* Dell RS690 only seems to work with MSIs. */
+       if ((rdev->pdev->device == 0x791f) &&
+           (rdev->pdev->subsystem_vendor == 0x1028) &&
+           (rdev->pdev->subsystem_device == 0x01fd))
+               return true;
+
+       /* RV515 seems to have MSI issues where it loses
+        * MSI rearms occasionally. This leads to lockups and freezes.
+        * disable it by default.
+        */
+       if (rdev->family == CHIP_RV515)
+               return false;
+       if (rdev->flags & RADEON_IS_IGP) {
+               /* APUs work fine with MSIs */
+               if (rdev->family >= CHIP_PALM)
+                       return true;
+               /* lots of IGPs have problems with MSIs */
+               return false;
+       }
+
+       return true;
+}
+
 int radeon_irq_kms_init(struct radeon_device *rdev)
 {
        int i;
@@ -124,12 +176,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
        }
        /* enable msi */
        rdev->msi_enabled = 0;
-       /* MSIs don't seem to work reliably on all IGP
-        * chips.  Disable MSI on them for now.
-        */
-       if ((rdev->family >= CHIP_RV380) &&
-           ((!(rdev->flags & RADEON_IS_IGP)) || (rdev->family >= CHIP_PALM)) &&
-           (!(rdev->flags & RADEON_IS_AGP))) {
+
+       if (radeon_msi_ok(rdev)) {
                int ret = pci_enable_msi(rdev->pdev);
                if (!ret) {
                        rdev->msi_enabled = 1;
index 68820f5f630354a91826da1f4a8391e8b92b5e86..ed0178f0323504dfc5d5ab8be18f6c8026b09dd7 100644 (file)
@@ -447,6 +447,7 @@ struct radeon_connector {
        struct edid *edid;
        void *con_priv;
        bool dac_load_detect;
+       bool detected_by_load; /* if the connection status was determined by load */
        uint16_t connector_object_id;
        struct radeon_hpd hpd;
        struct radeon_router router;
index 976c3b1b1b6e5aacd053bc1307a8a1313be64a45..35da1b474197bb1c55ac29d22d286617cd172928 100644 (file)
@@ -204,7 +204,8 @@ void radeon_bo_unref(struct radeon_bo **bo)
                *bo = NULL;
 }
 
-int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
+int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
+                            u64 *gpu_addr)
 {
        int r, i;
 
@@ -212,6 +213,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
                bo->pin_count++;
                if (gpu_addr)
                        *gpu_addr = radeon_bo_gpu_offset(bo);
+               WARN_ON_ONCE(max_offset != 0);
                return 0;
        }
        radeon_ttm_placement_from_domain(bo, domain);
@@ -219,6 +221,15 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
                /* force to pin into visible video ram */
                bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
        }
+       if (max_offset) {
+               u64 lpfn = max_offset >> PAGE_SHIFT;
+
+               if (!bo->placement.lpfn)
+                       bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;
+
+               if (lpfn < bo->placement.lpfn)
+                       bo->placement.lpfn = lpfn;
+       }
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
        r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
@@ -232,6 +243,11 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
        return r;
 }
 
+int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
+{
+       return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
+}
+
 int radeon_bo_unpin(struct radeon_bo *bo)
 {
        int r, i;
index ede6c13628f27c06c0a106963c2c9b3a6a532b8b..7199c6ab027e6d70253056cf60d418463778e361 100644 (file)
@@ -144,6 +144,8 @@ extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
 extern void radeon_bo_kunmap(struct radeon_bo *bo);
 extern void radeon_bo_unref(struct radeon_bo **bo);
 extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
+extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
+                                   u64 max_offset, u64 *gpu_addr);
 extern int radeon_bo_unpin(struct radeon_bo *bo);
 extern int radeon_bo_evict_vram(struct radeon_device *rdev);
 extern void radeon_bo_force_delete(struct radeon_device *rdev);
index 1f5850e473cc35716f5c70d9a4640209caea2e41..2026c2d52c5754843a3518466a31521eb2d74c8e 100644 (file)
@@ -62,6 +62,7 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+       int i;
 
        /* Lock the graphics update lock */
        tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -74,7 +75,11 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
               (u32)crtc_base);
 
        /* Wait for update_pending to go high. */
-       while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+                       break;
+               udelay(1);
+       }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
        /* Unlock the lock, so double-buffering can take place inside vblank */
@@ -287,6 +292,7 @@ void rs600_hpd_init(struct radeon_device *rdev)
                default:
                        break;
                }
+               radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
        }
        if (rdev->irq.installed)
                rs600_irq_set(rdev);
@@ -318,10 +324,10 @@ void rs600_hpd_fini(struct radeon_device *rdev)
 
 void rs600_bm_disable(struct radeon_device *rdev)
 {
-       u32 tmp;
+       u16 tmp;
 
        /* disable bus mastering */
-       pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+       pci_read_config_word(rdev->pdev, 0x4, &tmp);
        pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
        mdelay(1);
 }
@@ -692,9 +698,7 @@ int rs600_irq_process(struct radeon_device *rdev)
                        WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
                        break;
                default:
-                       msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
-                       WREG32(RADEON_MSI_REARM_EN, msi_rearm);
-                       WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
+                       WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
                        break;
                }
        }
index f2516e64805b4a5a9da1c0eb179aa3253fb5a2ad..51d20aa63d0398bdc4c0062c65594ab498f16c32 100644 (file)
@@ -47,6 +47,7 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+       int i;
 
        /* Lock the graphics update lock */
        tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -66,7 +67,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
               (u32)crtc_base);
 
        /* Wait for update_pending to go high. */
-       while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+                       break;
+               udelay(1);
+       }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
        /* Unlock the lock, so double-buffering can take place inside vblank */
@@ -146,6 +151,8 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
        WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
        WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+       if (rdev->family == CHIP_RV740)
+               WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
index 79fa588e9ed56d76bbaa73683a765759d5a524f8..75380927e9c682a25461586578c5105c6b15e725 100644 (file)
 #define        MC_VM_MD_L1_TLB0_CNTL                           0x2654
 #define        MC_VM_MD_L1_TLB1_CNTL                           0x2658
 #define        MC_VM_MD_L1_TLB2_CNTL                           0x265C
+#define        MC_VM_MD_L1_TLB3_CNTL                           0x2698
 #define        MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR              0x203C
 #define        MC_VM_SYSTEM_APERTURE_HIGH_ADDR                 0x2038
 #define        MC_VM_SYSTEM_APERTURE_LOW_ADDR                  0x2034
index e2b2d786687750b602529ffc08a4a04aa8af18a6..7632edb2f46e13b545360ffba1697af901449758 100644 (file)
@@ -394,7 +394,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 
        if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
                if (bo->ttm == NULL) {
-                       ret = ttm_bo_add_ttm(bo, false);
+                       bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
+                       ret = ttm_bo_add_ttm(bo, zero);
                        if (ret)
                                goto out_err;
                }
@@ -1808,6 +1809,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
                        spin_unlock(&glob->lru_lock);
                        (void) ttm_bo_cleanup_refs(bo, false, false, false);
                        kref_put(&bo->list_kref, ttm_bo_release_list);
+                       spin_lock(&glob->lru_lock);
                        continue;
                }
 
index dfe32e62bd90cc083a6667a3090b02c694984e92..8a38c91f4c9c216d8f0fbe9b2ab4fdf01f5fd1ba 100644 (file)
@@ -313,7 +313,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
                                  unsigned int *handle)
 {
        if (handle)
-               handle = 0;
+               *handle = 0;
 
        return 0;
 }
index 36ca465c00cefac036a22fad61fc70b2568a1ccc..7e0acf46c356514cc7a5f91bb4b8988a7ea3a1d6 100644 (file)
@@ -69,7 +69,7 @@ config HID_ACRUX
        Say Y here if you want to enable support for ACRUX game controllers.
 
 config HID_ACRUX_FF
-       tristate "ACRUX force feedback support"
+       bool "ACRUX force feedback support"
        depends on HID_ACRUX
        select INPUT_FF_MEMLESS
        ---help---
@@ -314,6 +314,7 @@ config HID_MULTITOUCH
          - Hanvon dual touch panels
          - Ilitek dual touch panels
          - IrTouch Infrared USB panels
+         - LG Display panels (Dell ST2220Tc)
          - Lumio CrystalTouch panels
          - MosArt dual-touch panels
          - PenMount dual touch panels
index b85744fe846477221ad02221f78bb397c603f54d..299d23871122f29f8651917caf2ccb461a4c8c1d 100644 (file)
@@ -444,11 +444,20 @@ static const struct hid_device_id apple_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS),
                .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
                        APPLE_RDESC_JIS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI),
+               .driver_data = APPLE_HAS_FN },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO),
+               .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS),
+               .driver_data = APPLE_HAS_FN },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
                .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO),
                .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
                        APPLE_ISO_KEYBOARD },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
+               .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
+                       APPLE_ISO_KEYBOARD },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS),
                .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
@@ -487,6 +496,24 @@ static const struct hid_device_id apple_devices[] = {
                .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
                .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI),
+               .driver_data = APPLE_HAS_FN },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO),
+               .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS),
+               .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI),
+               .driver_data = APPLE_HAS_FN },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO),
+               .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS),
+               .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
+               .driver_data = APPLE_HAS_FN },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
+               .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
+               .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
                .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
index 8965ad93d51003c47fd3ac570b897f6ee065de03..b99af346fdff310a985fc01fd7fecd7024427e40 100644 (file)
@@ -45,6 +45,12 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
        case 0xff09: ch_map_key_clear(BTN_9);   break;
        case 0xff0a: ch_map_key_clear(BTN_A);   break;
        case 0xff0b: ch_map_key_clear(BTN_B);   break;
+       case 0x00f1: ch_map_key_clear(KEY_WLAN);        break;
+       case 0x00f2: ch_map_key_clear(KEY_BRIGHTNESSDOWN);      break;
+       case 0x00f3: ch_map_key_clear(KEY_BRIGHTNESSUP);        break;
+       case 0x00f4: ch_map_key_clear(KEY_DISPLAY_OFF); break;
+       case 0x00f7: ch_map_key_clear(KEY_CAMERA);      break;
+       case 0x00f8: ch_map_key_clear(KEY_PROG1);       break;
        default:
                return 0;
        }
@@ -53,6 +59,7 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
 
 static const struct hid_device_id ch_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, ch_devices);
index 6f3289a5788812ef573561482f581d04ca6524cc..53576e7c8bcf340ff21200fad14c9ae920bfd958 100644 (file)
@@ -361,7 +361,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
 
        case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
                parser->global.report_size = item_udata(item);
-               if (parser->global.report_size > 32) {
+               if (parser->global.report_size > 96) {
                        dbg_hid("invalid report_size %d\n",
                                        parser->global.report_size);
                        return -1;
@@ -1340,9 +1340,22 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
@@ -1359,6 +1372,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) },
@@ -1369,11 +1383,13 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2515) },
        { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
@@ -1395,6 +1411,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MULTITOUCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
@@ -1707,8 +1724,8 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) },
@@ -1883,6 +1900,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
        { }
index c946d90f0ae70fd5c33772c16355c7c2f83d55dc..08cc68ba9ebe53f04524f0f671c6048805f31f51 100644 (file)
@@ -21,6 +21,7 @@
 #define USB_VENDOR_ID_3M               0x0596
 #define USB_DEVICE_ID_3M1968           0x0500
 #define USB_DEVICE_ID_3M2256           0x0502
+#define USB_DEVICE_ID_3M3266           0x0506
 
 #define USB_VENDOR_ID_A4TECH           0x09da
 #define USB_DEVICE_ID_A4TECH_WCP32PU   0x0006
@@ -58,6 +59,9 @@
 #define USB_VENDOR_ID_AIRCABLE         0x16CA
 #define USB_DEVICE_ID_AIRCABLE1                0x1502
 
+#define USB_VENDOR_ID_AIREN            0x1a2c
+#define USB_DEVICE_ID_AIREN_SLIMPLUS   0x0002
+
 #define USB_VENDOR_ID_ALCOR            0x058f
 #define USB_DEVICE_ID_ALCOR_USBRS232   0x9720
 
 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI   0x0245
 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO    0x0246
 #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS    0x0247
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI  0x0249
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO   0x024a
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS   0x024b
+#define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI   0x024c
+#define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO    0x024d
+#define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS    0x024e
+#define USB_DEVICE_ID_APPLE_ALU_REVB_ANSI      0x024f
+#define USB_DEVICE_ID_APPLE_ALU_REVB_ISO       0x0250
+#define USB_DEVICE_ID_APPLE_ALU_REVB_JIS       0x0251
+#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI  0x0252
+#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO   0x0253
+#define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS   0x0254
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI  0x0239
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO   0x023a
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS   0x023b
+#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO   0x0256
 #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY   0x030a
 #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY    0x030b
 #define USB_DEVICE_ID_APPLE_ATV_IRCONTROL      0x8241
 #define USB_DEVICE_ID_CHICONY_TACTICAL_PAD     0x0418
 #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH      0xb19d
 #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
+#define USB_DEVICE_ID_CHICONY_WIRELESS2        0x1123
 
 #define USB_VENDOR_ID_CHUNGHWAT                0x2247
 #define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH     0x0001
 
 #define USB_VENDOR_ID_DWAV             0x0eef
 #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER   0x0001
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH   0x480d
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1  0x720c
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2  0x72a1
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3  0x480e
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4  0x726b
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D      0x480d
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E      0x480e
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C      0x720c
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B      0x726b
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1      0x72a1
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA      0x72fa
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302      0x7302
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001      0xa001
 
 #define USB_VENDOR_ID_ELECOM           0x056e
 #define USB_DEVICE_ID_ELECOM_BM084     0x0061
 #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
 
 #define USB_VENDOR_ID_GENERAL_TOUCH    0x0dfc
-#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
 
 #define USB_VENDOR_ID_GLAB             0x06c2
 #define USB_DEVICE_ID_4_PHIDGETSERVO_30        0x0038
 #define USB_DEVICE_ID_PENPOWER         0x00f4
 
 #define USB_VENDOR_ID_GREENASIA                0x0e8f
+#define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD        0x3013
 
 #define USB_VENDOR_ID_GRETAGMACBETH    0x0971
 #define USB_DEVICE_ID_GRETAGMACBETH_HUEY       0x2005
 #define USB_DEVICE_ID_LD_HYBRID                0x2090
 #define USB_DEVICE_ID_LD_HEATCONTROL   0x20A0
 
+#define USB_VENDOR_ID_LG               0x1fd2
+#define USB_DEVICE_ID_LG_MULTITOUCH    0x0064
+
 #define USB_VENDOR_ID_LOGITECH         0x046d
 #define USB_DEVICE_ID_LOGITECH_RECEIVER        0xc101
 #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST  0xc110
index 5de25ff1cc3b8519d957aa35b9d609c88384fc22..91319f90a168cddcf17291c517012e68e2b1c072 100644 (file)
@@ -603,6 +603,9 @@ static const struct hid_device_id mt_devices[] = {
        { .driver_data = MT_CLS_3M,
                HID_USB_DEVICE(USB_VENDOR_ID_3M,
                        USB_DEVICE_ID_3M2256) },
+       { .driver_data = MT_CLS_3M,
+               HID_USB_DEVICE(USB_VENDOR_ID_3M,
+                       USB_DEVICE_ID_3M3266) },
 
        /* ActionStar panels */
        { .driver_data = MT_CLS_DEFAULT,
@@ -639,23 +642,32 @@ static const struct hid_device_id mt_devices[] = {
                        USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
 
        /* eGalax devices (resistive) */
-       {  .driver_data = MT_CLS_EGALAX,
+       { .driver_data = MT_CLS_EGALAX,
                HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
-                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
-       {  .driver_data = MT_CLS_EGALAX,
+                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
+       { .driver_data = MT_CLS_EGALAX,
                HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
-                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
+                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
 
        /* eGalax devices (capacitive) */
-       {  .driver_data = MT_CLS_EGALAX,
+       { .driver_data = MT_CLS_EGALAX,
+               HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
+       { .driver_data = MT_CLS_EGALAX,
+               HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
+       { .driver_data = MT_CLS_EGALAX,
                HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
-                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
-       {  .driver_data = MT_CLS_EGALAX,
+                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
+       { .driver_data = MT_CLS_EGALAX,
                HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
-                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
-       {  .driver_data = MT_CLS_EGALAX,
+                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA) },
+       { .driver_data = MT_CLS_EGALAX,
                HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
-                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
+                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
+       { .driver_data = MT_CLS_EGALAX,
+               HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
 
        /* Elo TouchSystems IntelliTouch Plus panel */
        { .driver_data = MT_CLS_DUAL_NSMU_CONTACTID,
@@ -682,6 +694,11 @@ static const struct hid_device_id mt_devices[] = {
                HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS,
                        USB_DEVICE_ID_IRTOUCH_INFRARED_USB) },
 
+       /* LG Display panels */
+       { .driver_data = MT_CLS_DEFAULT,
+               HID_USB_DEVICE(USB_VENDOR_ID_LG,
+                       USB_DEVICE_ID_LG_MULTITOUCH) },
+
        /* Lumio panels */
        { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
                HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
index 4bdb5d46c52c2a21d4ead0e592b90391d6cf5ec8..85c845f7f61eef5b75693327cdc269f53a6e0052 100644 (file)
@@ -47,10 +47,12 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
 
        { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
        { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
 
+       { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
index 5f888f7e7dcb827586c47a2fb2bfdbb3936c13f2..6030f20854994706d1f40466f8d7edba4a7b4abf 100644 (file)
@@ -474,8 +474,9 @@ config SENSORS_JC42
          If you say yes here, you get support for JEDEC JC42.4 compliant
          temperature sensors, which are used on many DDR3 memory modules for
          mobile devices and servers.  Support will include, but not be limited
-         to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
-         MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3.
+         to, ADT7408, AT30TS00, CAT34TS02, CAT6095, MAX6604, MCP9804, MCP9805,
+         MCP98242, MCP98243, MCP9843, SE97, SE98, STTS424(E), STTS2002,
+         STTS3000, TSE2002B3, TSE2002GB2, TS3000B3, and TS3000GB2.
 
          This driver can also be built as a module.  If so, the module
          will be called jc42.
index e9beeda4cbe58776ddf7042939df3a3c548f15e3..9a5af38bffd3e2ffa014a8369935981d98a328f1 100644 (file)
@@ -284,7 +284,7 @@ static int ads1015_probe(struct i2c_client *client,
                        continue;
                err = device_create_file(&client->dev, &ads1015_in[k].dev_attr);
                if (err)
-                       goto exit_free;
+                       goto exit_remove;
        }
 
        data->hwmon_dev = hwmon_device_register(&client->dev);
@@ -298,7 +298,6 @@ static int ads1015_probe(struct i2c_client *client,
 exit_remove:
        for (k = 0; k < ADS1015_CHANNELS; ++k)
                device_remove_file(&client->dev, &ads1015_in[k].dev_attr);
-exit_free:
        kfree(data);
 exit:
        return err;
index 0070d5476dd0b5ee96bba29259c821d5419bf117..3cf235385f89e098fa80553d801e3b89a251cd77 100644 (file)
@@ -42,7 +42,7 @@
 #define DRVNAME        "coretemp"
 
 #define BASE_SYSFS_ATTR_NO     2       /* Sysfs Base attr no for coretemp */
-#define NUM_REAL_CORES         16      /* Number of Real cores per cpu */
+#define NUM_REAL_CORES         32      /* Number of Real cores per cpu */
 #define CORETEMP_NAME_LENGTH   17      /* String Length of attrs */
 #define MAX_ATTRS              5       /* Maximum no of per-core attrs */
 #define MAX_CORE_DATA          (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
 #ifdef CONFIG_SMP
 #define TO_PHYS_ID(cpu)                cpu_data(cpu).phys_proc_id
 #define TO_CORE_ID(cpu)                cpu_data(cpu).cpu_core_id
-#define TO_ATTR_NO(cpu)                (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
 #define for_each_sibling(i, cpu)       for_each_cpu(i, cpu_sibling_mask(cpu))
 #else
 #define TO_PHYS_ID(cpu)                (cpu)
 #define TO_CORE_ID(cpu)                (cpu)
-#define TO_ATTR_NO(cpu)                (cpu)
 #define for_each_sibling(i, cpu)       for (i = 0; false; )
 #endif
+#define TO_ATTR_NO(cpu)                (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
 
 /*
  * Per-Core Temperature Data
@@ -540,6 +539,8 @@ static void coretemp_add_core(unsigned int cpu, int pkg_flag)
                return;
 
        pdata = platform_get_drvdata(pdev);
+       if (!pdata)
+               return;
 
        err = create_core_data(pdata, pdev, cpu, pkg_flag);
        if (err)
@@ -746,9 +747,15 @@ static void __cpuinit put_core_offline(unsigned int cpu)
                return;
 
        pdata = platform_get_drvdata(pdev);
+       if (!pdata)
+               return;
 
        indx = TO_ATTR_NO(cpu);
 
+       /* The core id is too big, just return */
+       if (indx > MAX_CORE_DATA - 1)
+               return;
+
        if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
                coretemp_remove_core(pdata, &pdev->dev, indx);
 
index 92f949767ece5ccd6878896893177e34be63da2e..6dbfd3e516e4820ef182ce2fca2ad3e787ab1c59 100644 (file)
@@ -283,11 +283,11 @@ static inline long temp_from_reg(u8 reg)
 
 static inline u8 temp_to_reg(long val)
 {
-       if (val < 0)
-               val = 0;
-       else if (val > 1000 * 0xff)
-               val = 0xff;
-       return ((val + 500) / 1000);
+       if (val <= 0)
+               return 0;
+       if (val >= 1000 * 0xff)
+               return 0xff;
+       return (val + 500) / 1000;
 }
 
 /*
index 95cbfb3a7077c5901c65b1cdb9de3281a93a45c8..040a820acd154741a20a8e7680997289ac387c7d 100644 (file)
@@ -159,7 +159,7 @@ static inline void f75375_write8(struct i2c_client *client, u8 reg,
 static inline void f75375_write16(struct i2c_client *client, u8 reg,
                u16 value)
 {
-       int err = i2c_smbus_write_byte_data(client, reg, (value << 8));
+       int err = i2c_smbus_write_byte_data(client, reg, (value >> 8));
        if (err)
                return;
        i2c_smbus_write_byte_data(client, reg + 1, (value & 0xFF));
@@ -304,20 +304,21 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
        case 0: /* Full speed */
                fanmode  |= (3 << FAN_CTRL_MODE(nr));
                data->pwm[nr] = 255;
-               f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
-                               data->pwm[nr]);
                break;
        case 1: /* PWM */
                fanmode  |= (3 << FAN_CTRL_MODE(nr));
                break;
        case 2: /* AUTOMATIC*/
-               fanmode  |= (2 << FAN_CTRL_MODE(nr));
+               fanmode  |= (1 << FAN_CTRL_MODE(nr));
                break;
        case 3: /* fan speed */
                break;
        }
        f75375_write8(client, F75375_REG_FAN_TIMER, fanmode);
        data->pwm_enable[nr] = val;
+       if (val == 0)
+               f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
+                               data->pwm[nr]);
        return 0;
 }
 
index 523f8fb9e7d92402f1fa1b6ead69766fb428430a..e8e18cab1fb8c34d63c259a64afb461833df8be7 100644 (file)
@@ -60,15 +60,15 @@ static ssize_t show_power(struct device *dev,
        pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5),
                                  REG_TDP_RUNNING_AVERAGE, &val);
        running_avg_capture = (val >> 4) & 0x3fffff;
-       running_avg_capture = sign_extend32(running_avg_capture, 22);
-       running_avg_range = val & 0xf;
+       running_avg_capture = sign_extend32(running_avg_capture, 21);
+       running_avg_range = (val & 0xf) + 1;
 
        pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5),
                                  REG_TDP_LIMIT3, &val);
 
        tdp_limit = val >> 16;
-       curr_pwr_watts = tdp_limit + data->base_tdp -
-               (s32)(running_avg_capture >> (running_avg_range + 1));
+       curr_pwr_watts = (tdp_limit + data->base_tdp) << running_avg_range;
+       curr_pwr_watts -= running_avg_capture;
        curr_pwr_watts *= data->tdp_to_watts;
 
        /*
@@ -78,7 +78,7 @@ static ssize_t show_power(struct device *dev,
         * scaling factor 1/(2^16).  For conversion we use
         * (10^6)/(2^16) = 15625/(2^10)
         */
-       curr_pwr_watts = (curr_pwr_watts * 15625) >> 10;
+       curr_pwr_watts = (curr_pwr_watts * 15625) >> (10 + running_avg_range);
        return sprintf(buf, "%u\n", (unsigned int) curr_pwr_watts);
 }
 static DEVICE_ATTR(power1_input, S_IRUGO, show_power, NULL);
@@ -122,6 +122,41 @@ static bool __devinit fam15h_power_is_internal_node0(struct pci_dev *f4)
        return true;
 }
 
+/*
+ * Newer BKDG versions have an updated recommendation on how to properly
+ * initialize the running average range (was: 0xE, now: 0x9). This avoids
+ * counter saturations resulting in bogus power readings.
+ * We correct this value ourselves to cope with older BIOSes.
+ */
+static DEFINE_PCI_DEVICE_TABLE(affected_device) = {
+       { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
+       { 0 }
+};
+
+static void __devinit tweak_runavg_range(struct pci_dev *pdev)
+{
+       u32 val;
+
+       /*
+        * let this quirk apply only to the current version of the
+        * northbridge, since future versions may change the behavior
+        */
+       if (!pci_match_id(affected_device, pdev))
+               return;
+
+       pci_bus_read_config_dword(pdev->bus,
+               PCI_DEVFN(PCI_SLOT(pdev->devfn), 5),
+               REG_TDP_RUNNING_AVERAGE, &val);
+       if ((val & 0xf) != 0xe)
+               return;
+
+       val &= ~0xf;
+       val |=  0x9;
+       pci_bus_write_config_dword(pdev->bus,
+               PCI_DEVFN(PCI_SLOT(pdev->devfn), 5),
+               REG_TDP_RUNNING_AVERAGE, val);
+}
+
 static void __devinit fam15h_power_init_data(struct pci_dev *f4,
                                             struct fam15h_power_data *data)
 {
@@ -155,6 +190,13 @@ static int __devinit fam15h_power_probe(struct pci_dev *pdev,
        struct device *dev;
        int err;
 
+       /*
+        * though we ignore every other northbridge, we still have to
+        * do the tweaking on _each_ node in MCM processors as the counters
+        * are working hand-in-hand
+        */
+       tweak_runavg_range(pdev);
+
        if (!fam15h_power_is_internal_node0(pdev)) {
                err = -ENODEV;
                goto exit;
index 02cebb74e206743f08608ad2f101398ec9ab61d8..ed4392406e93f812eb7deae2c2f16914c25318e8 100644 (file)
@@ -64,6 +64,7 @@ static const unsigned short normal_i2c[] = {
 
 /* Manufacturer IDs */
 #define ADT_MANID              0x11d4  /* Analog Devices */
+#define ATMEL_MANID            0x001f  /* Atmel */
 #define MAX_MANID              0x004d  /* Maxim */
 #define IDT_MANID              0x00b3  /* IDT */
 #define MCP_MANID              0x0054  /* Microchip */
@@ -77,15 +78,25 @@ static const unsigned short normal_i2c[] = {
 #define ADT7408_DEVID          0x0801
 #define ADT7408_DEVID_MASK     0xffff
 
+/* Atmel */
+#define AT30TS00_DEVID         0x8201
+#define AT30TS00_DEVID_MASK    0xffff
+
 /* IDT */
 #define TS3000B3_DEVID         0x2903  /* Also matches TSE2002B3 */
 #define TS3000B3_DEVID_MASK    0xffff
 
+#define TS3000GB2_DEVID                0x2912  /* Also matches TSE2002GB2 */
+#define TS3000GB2_DEVID_MASK   0xffff
+
 /* Maxim */
 #define MAX6604_DEVID          0x3e00
 #define MAX6604_DEVID_MASK     0xffff
 
 /* Microchip */
+#define MCP9804_DEVID          0x0200
+#define MCP9804_DEVID_MASK     0xfffc
+
 #define MCP98242_DEVID         0x2000
 #define MCP98242_DEVID_MASK    0xfffc
 
@@ -113,6 +124,12 @@ static const unsigned short normal_i2c[] = {
 #define STTS424E_DEVID         0x0000
 #define STTS424E_DEVID_MASK    0xfffe
 
+#define STTS2002_DEVID         0x0300
+#define STTS2002_DEVID_MASK    0xffff
+
+#define STTS3000_DEVID         0x0200
+#define STTS3000_DEVID_MASK    0xffff
+
 static u16 jc42_hysteresis[] = { 0, 1500, 3000, 6000 };
 
 struct jc42_chips {
@@ -123,8 +140,11 @@ struct jc42_chips {
 
 static struct jc42_chips jc42_chips[] = {
        { ADT_MANID, ADT7408_DEVID, ADT7408_DEVID_MASK },
+       { ATMEL_MANID, AT30TS00_DEVID, AT30TS00_DEVID_MASK },
        { IDT_MANID, TS3000B3_DEVID, TS3000B3_DEVID_MASK },
+       { IDT_MANID, TS3000GB2_DEVID, TS3000GB2_DEVID_MASK },
        { MAX_MANID, MAX6604_DEVID, MAX6604_DEVID_MASK },
+       { MCP_MANID, MCP9804_DEVID, MCP9804_DEVID_MASK },
        { MCP_MANID, MCP98242_DEVID, MCP98242_DEVID_MASK },
        { MCP_MANID, MCP98243_DEVID, MCP98243_DEVID_MASK },
        { MCP_MANID, MCP9843_DEVID, MCP9843_DEVID_MASK },
@@ -133,6 +153,8 @@ static struct jc42_chips jc42_chips[] = {
        { NXP_MANID, SE98_DEVID, SE98_DEVID_MASK },
        { STM_MANID, STTS424_DEVID, STTS424_DEVID_MASK },
        { STM_MANID, STTS424E_DEVID, STTS424E_DEVID_MASK },
+       { STM_MANID, STTS2002_DEVID, STTS2002_DEVID_MASK },
+       { STM_MANID, STTS3000_DEVID, STTS3000_DEVID_MASK },
 };
 
 /* Each client has this additional data */
@@ -161,10 +183,12 @@ static struct jc42_data *jc42_update_device(struct device *dev);
 
 static const struct i2c_device_id jc42_id[] = {
        { "adt7408", 0 },
+       { "at30ts00", 0 },
        { "cat94ts02", 0 },
        { "cat6095", 0 },
        { "jc42", 0 },
        { "max6604", 0 },
+       { "mcp9804", 0 },
        { "mcp9805", 0 },
        { "mcp98242", 0 },
        { "mcp98243", 0 },
@@ -173,8 +197,10 @@ static const struct i2c_device_id jc42_id[] = {
        { "se97b", 0 },
        { "se98", 0 },
        { "stts424", 0 },
-       { "tse2002b3", 0 },
-       { "ts3000b3", 0 },
+       { "stts2002", 0 },
+       { "stts3000", 0 },
+       { "tse2002", 0 },
+       { "ts3000", 0 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, jc42_id);
index fea292d43407cecba6e75b70021617f871d6b1b8..b65a4dae3f5e8ba7472c0743168853067f2ae597 100644 (file)
@@ -59,7 +59,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
 {
        struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
        struct completion *completion = &hwmon->read_completion;
-       unsigned long t;
+       long t;
        unsigned long val;
        int ret;
 
index f20d9978ee782602e945c96f7b60db7c79862b21..8c3df047e5687d62b822c5f2a1d2746f1e227899 100644 (file)
@@ -72,8 +72,8 @@ static unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END };
 
 static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
 
-#define FAN_FROM_REG(val, div, rpm_range)      ((val) == 0 ? -1 : \
-       (val) == 255 ? 0 : (rpm_ranges[rpm_range] * 30) / ((div + 1) * (val)))
+#define FAN_FROM_REG(val, rpm_range)   ((val) == 0 || (val) == 255 ? \
+                               0 : (rpm_ranges[rpm_range] * 30) / (val))
 #define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) / 1000, 0, 255)
 
 /*
@@ -333,7 +333,7 @@ static ssize_t show_fan_input(struct device *dev,
                return PTR_ERR(data);
 
        return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[attr->index],
-                      data->ppr, data->rpm_range));
+                      data->rpm_range));
 }
 
 static ssize_t show_alarm(struct device *dev,
@@ -429,9 +429,9 @@ static int max6639_init_client(struct i2c_client *client)
        struct max6639_data *data = i2c_get_clientdata(client);
        struct max6639_platform_data *max6639_info =
                client->dev.platform_data;
-       int i = 0;
+       int i;
        int rpm_range = 1; /* default: 4000 RPM */
-       int err = 0;
+       int err;
 
        /* Reset chip to default values, see below for GCONFIG setup */
        err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG,
@@ -446,11 +446,6 @@ static int max6639_init_client(struct i2c_client *client)
        else
                data->ppr = 2;
        data->ppr -= 1;
-       err = i2c_smbus_write_byte_data(client,
-                       MAX6639_REG_FAN_PPR(i),
-                       data->ppr << 5);
-       if (err)
-               goto exit;
 
        if (max6639_info)
                rpm_range = rpm_range_to_reg(max6639_info->rpm_range);
@@ -458,6 +453,13 @@ static int max6639_init_client(struct i2c_client *client)
 
        for (i = 0; i < 2; i++) {
 
+               /* Set Fan pulse per revolution */
+               err = i2c_smbus_write_byte_data(client,
+                               MAX6639_REG_FAN_PPR(i),
+                               data->ppr << 6);
+               if (err)
+                       goto exit;
+
                /* Fans config PWM, RPM */
                err = i2c_smbus_write_byte_data(client,
                        MAX6639_REG_FAN_CONFIG1(i),
index 8e31a8e2c746e8848c7268ab5c8091b966b648b4..ffa54dd7dbdabca339126286a203531a245ba9ea 100644 (file)
@@ -50,7 +50,8 @@
                                                   lcrit_alarm, crit_alarm */
 #define PMBUS_IOUT_BOOLEANS_PER_PAGE   3       /* alarm, lcrit_alarm,
                                                   crit_alarm */
-#define PMBUS_POUT_BOOLEANS_PER_PAGE   2       /* alarm, crit_alarm */
+#define PMBUS_POUT_BOOLEANS_PER_PAGE   3       /* cap_alarm, alarm, crit_alarm
+                                                */
 #define PMBUS_MAX_BOOLEANS_PER_FAN     2       /* alarm, fault */
 #define PMBUS_MAX_BOOLEANS_PER_TEMP    4       /* min_alarm, max_alarm,
                                                   lcrit_alarm, crit_alarm */
index cf4330b352ef2c9b439b1685c5d94ad29eba31f4..9594cdb1cd0fd1f638a2f9c1ea584552cedbbfa1 100644 (file)
@@ -883,7 +883,7 @@ static int sht15_invalidate_voltage(struct notifier_block *nb,
 
 static int __devinit sht15_probe(struct platform_device *pdev)
 {
-       int ret = 0;
+       int ret;
        struct sht15_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
        u8 status = 0;
 
@@ -901,6 +901,7 @@ static int __devinit sht15_probe(struct platform_device *pdev)
        init_waitqueue_head(&data->wait_queue);
 
        if (pdev->dev.platform_data == NULL) {
+               ret = -EINVAL;
                dev_err(&pdev->dev, "no platform data supplied\n");
                goto err_free_data;
        }
index 36d7f270b14d4e7087bcee9f35235b344cc3618a..e51b582bbc584db5b4f64dd09b4908f333c7bcce 100644 (file)
@@ -1295,6 +1295,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
 {
        struct w83627ehf_data *data = dev_get_drvdata(dev);
        struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+       struct w83627ehf_sio_data *sio_data = dev->platform_data;
        int nr = sensor_attr->index;
        unsigned long val;
        int err;
@@ -1306,6 +1307,11 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
 
        if (val > 1)
                return -EINVAL;
+
+       /* On NCT67766F, DC mode is only supported for pwm1 */
+       if (sio_data->kind == nct6776 && nr && val != 1)
+               return -EINVAL;
+
        mutex_lock(&data->update_lock);
        reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
        data->pwm_mode[nr] = val;
@@ -1577,7 +1583,7 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
        val = step_time_to_reg(val, data->pwm_mode[nr]); \
        mutex_lock(&data->update_lock); \
        data->reg[nr] = val; \
-       w83627ehf_write_value(data, W83627EHF_REG_##REG[nr], val); \
+       w83627ehf_write_value(data, data->REG_##REG[nr], val); \
        mutex_unlock(&data->update_lock); \
        return count; \
 } \
@@ -1756,7 +1762,17 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data,
                diode = 0x70;
        }
        for (i = 0; i < 3; i++) {
-               if ((tmp & (0x02 << i)))
+               const char *label = NULL;
+
+               if (data->temp_label)
+                       label = data->temp_label[data->temp_src[i]];
+
+               /* Digital source overrides analog type */
+               if (label && strncmp(label, "PECI", 4) == 0)
+                       data->temp_type[i] = 6;
+               else if (label && strncmp(label, "AMD", 3) == 0)
+                       data->temp_type[i] = 5;
+               else if ((tmp & (0x02 << i)))
                        data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 3;
                else
                        data->temp_type[i] = 4; /* thermistor */
@@ -1807,7 +1823,8 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
                goto exit;
        }
 
-       data = kzalloc(sizeof(struct w83627ehf_data), GFP_KERNEL);
+       data = devm_kzalloc(&pdev->dev, sizeof(struct w83627ehf_data),
+                           GFP_KERNEL);
        if (!data) {
                err = -ENOMEM;
                goto exit_release;
@@ -2088,9 +2105,29 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
                fan4min = 0;
                fan5pin = 0;
        } else if (sio_data->kind == nct6776) {
-               fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
-               fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01);
-               fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02);
+               bool gpok = superio_inb(sio_data->sioreg, 0x27) & 0x80;
+               u8 regval;
+
+               superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
+               regval = superio_inb(sio_data->sioreg, SIO_REG_ENABLE);
+
+               if (regval & 0x80)
+                       fan3pin = gpok;
+               else
+                       fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
+
+               if (regval & 0x40)
+                       fan4pin = gpok;
+               else
+                       fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C)
+                                    & 0x01);
+
+               if (regval & 0x20)
+                       fan5pin = gpok;
+               else
+                       fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C)
+                                    & 0x02);
+
                fan4min = fan4pin;
        } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
                fan3pin = 1;
@@ -2283,9 +2320,8 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
 
 exit_remove:
        w83627ehf_device_remove_files(dev);
-       kfree(data);
-       platform_set_drvdata(pdev, NULL);
 exit_release:
+       platform_set_drvdata(pdev, NULL);
        release_region(res->start, IOREGION_LENGTH);
 exit:
        return err;
@@ -2299,7 +2335,6 @@ static int __devexit w83627ehf_remove(struct platform_device *pdev)
        w83627ehf_device_remove_files(&pdev->dev);
        release_region(data->addr, IOREGION_LENGTH);
        platform_set_drvdata(pdev, NULL);
-       kfree(data);
 
        return 0;
 }
index 43a62714b4fba1d4799df6166caf27031082cf00..12f7c8300c75870e2542e77b6d6ebbd1381caf9b 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/radix-tree.h>
 #include <linux/hwspinlock.h>
 #include <linux/pm_runtime.h>
+#include <linux/mutex.h>
 
 #include "hwspinlock_internal.h"
 
 static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
 
 /*
- * Synchronization of access to the tree is achieved using this spinlock,
+ * Synchronization of access to the tree is achieved using this mutex,
  * as the radix-tree API requires that users provide all synchronisation.
+ * A mutex is needed because we're using non-atomic radix tree allocations.
  */
-static DEFINE_SPINLOCK(hwspinlock_tree_lock);
+static DEFINE_MUTEX(hwspinlock_tree_lock);
+
 
 /**
  * __hwspin_trylock() - attempt to lock a specific hwspinlock
@@ -261,8 +264,7 @@ EXPORT_SYMBOL_GPL(__hwspin_unlock);
  * This function should be called from the underlying platform-specific
  * implementation, to register a new hwspinlock instance.
  *
- * Can be called from an atomic context (will not sleep) but not from
- * within interrupt context.
+ * Should be called from a process context (might sleep)
  *
  * Returns 0 on success, or an appropriate error code on failure
  */
@@ -279,7 +281,7 @@ int hwspin_lock_register(struct hwspinlock *hwlock)
 
        spin_lock_init(&hwlock->lock);
 
-       spin_lock(&hwspinlock_tree_lock);
+       mutex_lock(&hwspinlock_tree_lock);
 
        ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock);
        if (ret)
@@ -293,7 +295,7 @@ int hwspin_lock_register(struct hwspinlock *hwlock)
        WARN_ON(tmp != hwlock);
 
 out:
-       spin_unlock(&hwspinlock_tree_lock);
+       mutex_unlock(&hwspinlock_tree_lock);
        return ret;
 }
 EXPORT_SYMBOL_GPL(hwspin_lock_register);
@@ -305,8 +307,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_register);
  * This function should be called from the underlying platform-specific
  * implementation, to unregister an existing (and unused) hwspinlock.
  *
- * Can be called from an atomic context (will not sleep) but not from
- * within interrupt context.
+ * Should be called from a process context (might sleep)
  *
  * Returns the address of hwspinlock @id on success, or NULL on failure
  */
@@ -315,7 +316,7 @@ struct hwspinlock *hwspin_lock_unregister(unsigned int id)
        struct hwspinlock *hwlock = NULL;
        int ret;
 
-       spin_lock(&hwspinlock_tree_lock);
+       mutex_lock(&hwspinlock_tree_lock);
 
        /* make sure the hwspinlock is not in use (tag is set) */
        ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
@@ -331,7 +332,7 @@ struct hwspinlock *hwspin_lock_unregister(unsigned int id)
        }
 
 out:
-       spin_unlock(&hwspinlock_tree_lock);
+       mutex_unlock(&hwspinlock_tree_lock);
        return hwlock;
 }
 EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
@@ -400,9 +401,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
  * to the remote core before it can be used for synchronization (to get the
  * id of a given hwlock, use hwspin_lock_get_id()).
  *
- * Can be called from an atomic context (will not sleep) but not from
- * within interrupt context (simply because there is no use case for
- * that yet).
+ * Should be called from a process context (might sleep)
  *
  * Returns the address of the assigned hwspinlock, or NULL on error
  */
@@ -411,7 +410,7 @@ struct hwspinlock *hwspin_lock_request(void)
        struct hwspinlock *hwlock;
        int ret;
 
-       spin_lock(&hwspinlock_tree_lock);
+       mutex_lock(&hwspinlock_tree_lock);
 
        /* look for an unused lock */
        ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
@@ -431,7 +430,7 @@ struct hwspinlock *hwspin_lock_request(void)
                hwlock = NULL;
 
 out:
-       spin_unlock(&hwspinlock_tree_lock);
+       mutex_unlock(&hwspinlock_tree_lock);
        return hwlock;
 }
 EXPORT_SYMBOL_GPL(hwspin_lock_request);
@@ -445,9 +444,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request);
  * Usually early board code will be calling this function in order to
  * reserve specific hwspinlock ids for predefined purposes.
  *
- * Can be called from an atomic context (will not sleep) but not from
- * within interrupt context (simply because there is no use case for
- * that yet).
+ * Should be called from a process context (might sleep)
  *
  * Returns the address of the assigned hwspinlock, or NULL on error
  */
@@ -456,7 +453,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
        struct hwspinlock *hwlock;
        int ret;
 
-       spin_lock(&hwspinlock_tree_lock);
+       mutex_lock(&hwspinlock_tree_lock);
 
        /* make sure this hwspinlock exists */
        hwlock = radix_tree_lookup(&hwspinlock_tree, id);
@@ -482,7 +479,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
                hwlock = NULL;
 
 out:
-       spin_unlock(&hwspinlock_tree_lock);
+       mutex_unlock(&hwspinlock_tree_lock);
        return hwlock;
 }
 EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
@@ -495,9 +492,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
  * Should only be called with an @hwlock that was retrieved from
  * an earlier call to omap_hwspin_lock_request{_specific}.
  *
- * Can be called from an atomic context (will not sleep) but not from
- * within interrupt context (simply because there is no use case for
- * that yet).
+ * Should be called from a process context (might sleep)
  *
  * Returns 0 on success, or an appropriate error code on failure
  */
@@ -511,7 +506,7 @@ int hwspin_lock_free(struct hwspinlock *hwlock)
                return -EINVAL;
        }
 
-       spin_lock(&hwspinlock_tree_lock);
+       mutex_lock(&hwspinlock_tree_lock);
 
        /* make sure the hwspinlock is used */
        ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id,
@@ -538,7 +533,7 @@ int hwspin_lock_free(struct hwspinlock *hwlock)
        module_put(hwlock->owner);
 
 out:
-       spin_unlock(&hwspinlock_tree_lock);
+       mutex_unlock(&hwspinlock_tree_lock);
        return ret;
 }
 EXPORT_SYMBOL_GPL(hwspin_lock_free);
index a0e039db581c0215fd184fbfdd4b14ee3d7a7629..0f25f3541f339514b0e86aa20c4a069a82cb7595 100755 (executable)
@@ -103,8 +103,14 @@ static int sclhi(struct i2c_algo_bit_data *adap)
                 * chips may hold it low ("clock stretching") while they
                 * are processing data internally.
                 */
-               if (time_after(jiffies, start + adap->timeout))
+               if (time_after(jiffies, start + adap->timeout)) {
+                       /* Test one last time, as we may have been preempted
+                        * between last check and timeout test.
+                        */
+                       if (getscl(adap))
+                               break;
                        return -ETIMEDOUT;
+               }
                cond_resched();
        }
 #ifdef DEBUG
@@ -486,7 +492,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
 
        if (flags & I2C_M_TEN) {
                /* a ten bit address */
-               addr = 0xf0 | ((msg->addr >> 7) & 0x03);
+               addr = 0xf0 | ((msg->addr >> 7) & 0x06);
                bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr);
                /* try extended address code...*/
                ret = try_address(i2c_adap, addr, retries);
@@ -496,7 +502,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
                        return -EREMOTEIO;
                }
                /* the remaining 8 bit address */
-               ret = i2c_outb(i2c_adap, msg->addr & 0x7f);
+               ret = i2c_outb(i2c_adap, msg->addr & 0xff);
                if ((ret != 1) && !nak_ok) {
                        /* the chip did not ack / xmission error occurred */
                        dev_err(&i2c_adap->dev, "died at 2nd address code\n");
index dd364171f9c5deedd88094288bd5c4d4a6573530..cd7ac5c6783e0d3897b6f29f8b7ba1ee6eca69bc 100644 (file)
@@ -140,7 +140,7 @@ static unsigned short ali1535_smba;
    defined to make the transition easier. */
 static int __devinit ali1535_setup(struct pci_dev *dev)
 {
-       int retval = -ENODEV;
+       int retval;
        unsigned char temp;
 
        /* Check the following things:
@@ -155,6 +155,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
        if (ali1535_smba == 0) {
                dev_warn(&dev->dev,
                        "ALI1535_smb region uninitialized - upgrade BIOS?\n");
+               retval = -ENODEV;
                goto exit;
        }
 
@@ -167,6 +168,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
                            ali1535_driver.name)) {
                dev_err(&dev->dev, "ALI1535_smb region 0x%x already in use!\n",
                        ali1535_smba);
+               retval = -EBUSY;
                goto exit;
        }
 
@@ -174,6 +176,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
        pci_read_config_byte(dev, SMBCFG, &temp);
        if ((temp & ALI1535_SMBIO_EN) == 0) {
                dev_err(&dev->dev, "SMB device not enabled - upgrade BIOS?\n");
+               retval = -ENODEV;
                goto exit_free;
        }
 
@@ -181,6 +184,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
        pci_read_config_byte(dev, SMBHSTCFG, &temp);
        if ((temp & 1) == 0) {
                dev_err(&dev->dev, "SMBus controller not enabled - upgrade BIOS?\n");
+               retval = -ENODEV;
                goto exit_free;
        }
 
@@ -198,12 +202,11 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
        dev_dbg(&dev->dev, "SMBREV = 0x%X\n", temp);
        dev_dbg(&dev->dev, "ALI1535_smba = 0x%X\n", ali1535_smba);
 
-       retval = 0;
-exit:
-       return retval;
+       return 0;
 
 exit_free:
        release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
+exit:
        return retval;
 }
 
index a76d85fa3ad781d15a90f73875a1c857703d14a8..79b4bcb3b85cea6d79d1c447200748811d74d6fc 100644 (file)
@@ -755,7 +755,7 @@ static int davinci_i2c_remove(struct platform_device *pdev)
        dev->clk = NULL;
 
        davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0);
-       free_irq(IRQ_I2C, dev);
+       free_irq(dev->irq, dev);
        iounmap(dev->base);
        kfree(dev);
 
index 8abfa4a03ce1d7610f34417d7033581c39cb3a8f..656b028d9816ec689ec982715aeb5cf498e608a6 100644 (file)
@@ -242,7 +242,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
        if (pch_clk > PCH_MAX_CLK)
                pch_clk = 62500;
 
-       pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8;
+       pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / (pch_i2c_speed * 8);
        /* Set transfer speed in I2CBC */
        iowrite32(pch_i2cbc, p + PCH_I2CBC);
 
index 7e78f7c87857c7f0d7af5417064f19e822b4a5f4..3d471d56bf15d1faf295f606c31f19d90f384e79 100644 (file)
@@ -72,6 +72,7 @@
 
 #define MXS_I2C_QUEUESTAT      (0x70)
 #define MXS_I2C_QUEUESTAT_RD_QUEUE_EMPTY        0x00002000
+#define MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK 0x0000001F
 
 #define MXS_I2C_QUEUECMD       (0x80)
 
@@ -219,14 +220,14 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
        int ret;
        int flags;
 
-       init_completion(&i2c->cmd_complete);
-
        dev_dbg(i2c->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
                msg->addr, msg->len, msg->flags, stop);
 
        if (msg->len == 0)
                return -EINVAL;
 
+       init_completion(&i2c->cmd_complete);
+
        flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0;
 
        if (msg->flags & I2C_M_RD)
@@ -286,6 +287,7 @@ static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id)
 {
        struct mxs_i2c_dev *i2c = dev_id;
        u32 stat = readl(i2c->regs + MXS_I2C_CTRL1) & MXS_I2C_IRQ_MASK;
+       bool is_last_cmd;
 
        if (!stat)
                return IRQ_NONE;
@@ -300,9 +302,14 @@ static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id)
        else
                i2c->cmd_err = 0;
 
-       complete(&i2c->cmd_complete);
+       is_last_cmd = (readl(i2c->regs + MXS_I2C_QUEUESTAT) &
+               MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK) == 0;
+
+       if (is_last_cmd || i2c->cmd_err)
+               complete(&i2c->cmd_complete);
 
        writel(stat, i2c->regs + MXS_I2C_CTRL1_CLR);
+
        return IRQ_HANDLED;
 }
 
index ff1e127dfea8e9b100c9fa1f6e2ebfd54c6992a6..4853b52a40a8905587ab1941e6e83fc06e3ff6f3 100644 (file)
@@ -356,7 +356,7 @@ static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
        error = acpi_check_region(smbus->base, smbus->size,
                                  nforce2_driver.name);
        if (error)
-               return -1;
+               return error;
 
        if (!request_region(smbus->base, smbus->size, nforce2_driver.name)) {
                dev_err(&smbus->adapter.dev, "Error requesting region %02x .. %02X for %s\n",
index 58a58c7eaa17d6eb7fac8b2ca6014110e0e3f225..137e1a3bfad15a3e6b2c303dd606722a96a9deaf 100644 (file)
@@ -235,7 +235,7 @@ const static u8 omap4_reg_map[] = {
        [OMAP_I2C_BUF_REG] = 0x94,
        [OMAP_I2C_CNT_REG] = 0x98,
        [OMAP_I2C_DATA_REG] = 0x9c,
-       [OMAP_I2C_SYSC_REG] = 0x20,
+       [OMAP_I2C_SYSC_REG] = 0x10,
        [OMAP_I2C_CON_REG] = 0xa4,
        [OMAP_I2C_OA_REG] = 0xa8,
        [OMAP_I2C_SA_REG] = 0xac,
index 04be9f82e14bda8518c4d9562c680731e42c23b3..eb8ad538c79ffb97616e43f135b5d9ff991cce96 100644 (file)
@@ -546,8 +546,7 @@ static int i2c_pnx_controller_suspend(struct platform_device *pdev,
 {
        struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
 
-       /* FIXME: shouldn't this be clk_disable? */
-       clk_enable(alg_data->clk);
+       clk_disable(alg_data->clk);
 
        return 0;
 }
index 437586611d4a18819f110698d7618f7dde9471c3..6d60284cc04b6b7025aaf9ad01e0b71fe1b273cb 100644 (file)
@@ -147,7 +147,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
        u16 a;
        u8 val;
        int *i;
-       int retval = -ENODEV;
+       int retval;
 
        /* Look for imposters */
        for (i = blacklist; *i != 0; i++) {
@@ -223,7 +223,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
 
 error:
        release_region(sis5595_base + SMB_INDEX, 2);
-       return retval;
+       return -ENODEV;
 }
 
 static int sis5595_transaction(struct i2c_adapter *adap)
index e6f539e26f65fb97918e132164d45b80ce4a0113..b617fd068ac7244fe81bfc0aa4547fe53562161e 100644 (file)
@@ -393,7 +393,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
 {
        unsigned char b;
        struct pci_dev *dummy = NULL;
-       int retval = -ENODEV, i;
+       int retval, i;
 
        /* check for supported SiS devices */
        for (i=0; supported[i] > 0 ; i++) {
@@ -418,18 +418,21 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
        */
        if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG,&b)) {
                dev_err(&sis630_dev->dev, "Error: Can't read bios ctl reg\n");
+               retval = -ENODEV;
                goto exit;
        }
        /* if ACPI already enabled , do nothing */
        if (!(b & 0x80) &&
            pci_write_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, b | 0x80)) {
                dev_err(&sis630_dev->dev, "Error: Can't enable ACPI\n");
+               retval = -ENODEV;
                goto exit;
        }
 
        /* Determine the ACPI base address */
        if (pci_read_config_word(sis630_dev,SIS630_ACPI_BASE_REG,&acpi_base)) {
                dev_err(&sis630_dev->dev, "Error: Can't determine ACPI base address\n");
+               retval = -ENODEV;
                goto exit;
        }
 
@@ -445,6 +448,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
                            sis630_driver.name)) {
                dev_err(&sis630_dev->dev, "SMBus registers 0x%04x-0x%04x already "
                        "in use!\n", acpi_base + SMB_STS, acpi_base + SMB_SAA);
+               retval = -EBUSY;
                goto exit;
        }
 
index 0b012f1f8ac5791ba9a4aa7926f75fbe97c57567..58261d4725b69a109d44ca48b6b8a0a19f291dee 100644 (file)
@@ -324,7 +324,7 @@ static int __devinit vt596_probe(struct pci_dev *pdev,
                                 const struct pci_device_id *id)
 {
        unsigned char temp;
-       int error = -ENODEV;
+       int error;
 
        /* Determine the address of the SMBus areas */
        if (force_addr) {
@@ -390,6 +390,7 @@ found:
                        dev_err(&pdev->dev, "SMBUS: Error: Host SMBus "
                                "controller not enabled! - upgrade BIOS or "
                                "use force=1\n");
+                       error = -ENODEV;
                        goto release_region;
                }
        }
@@ -422,9 +423,11 @@ found:
                 "SMBus Via Pro adapter at %04x", vt596_smba);
 
        vt596_pdev = pci_dev_get(pdev);
-       if (i2c_add_adapter(&vt596_adapter)) {
+       error = i2c_add_adapter(&vt596_adapter);
+       if (error) {
                pci_dev_put(vt596_pdev);
                vt596_pdev = NULL;
+               goto release_region;
        }
 
        /* Always return failure here.  This is to allow other drivers to bind
index d267b7affad63fab887a10585e647cec6eb98020..a22ca846701080d3721ef32fc72dc9b3038b02fd 100644 (file)
@@ -292,8 +292,7 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
         * and CDROM_SEND_PACKET (legacy) ioctls
         */
        if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
-               err = scsi_cmd_ioctl(bdev->bd_disk->queue, bdev->bd_disk,
-                               mode, cmd, argp);
+               err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
 
        if (err == -ENOTTY)
                err = generic_ide_ioctl(drive, bdev, cmd, arg);
index a46dddf6107884deb0852883f7d012d23e05b75e..026f9aa789e3c8c1d93aef8347fcce197f603bd1 100644 (file)
@@ -321,7 +321,8 @@ static int intel_idle_probe(void)
        cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
 
        if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
-               !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
+           !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
+           !mwait_substates)
                        return -ENODEV;
 
        pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
@@ -367,7 +368,7 @@ static int intel_idle_probe(void)
        if (boot_cpu_has(X86_FEATURE_ARAT))     /* Always Reliable APIC Timer */
                lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
        else {
-               smp_call_function(__setup_broadcast_timer, (void *)true, 1);
+               on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
                register_cpu_notifier(&setup_broadcast_notifier);
        }
 
@@ -459,7 +460,7 @@ static int intel_idle_cpuidle_devices_init(void)
                }
        }
        if (auto_demotion_disable_flags)
-               smp_call_function(auto_demotion_disable, NULL, 1);
+               on_each_cpu(auto_demotion_disable, NULL, 1);
 
        return 0;
 }
@@ -499,7 +500,7 @@ static void __exit intel_idle_exit(void)
        cpuidle_unregister_driver(&intel_idle_driver);
 
        if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
-               smp_call_function(__setup_broadcast_timer, (void *)false, 1);
+               on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
                unregister_cpu_notifier(&setup_broadcast_notifier);
        }
 
index 8e21d457b899de74d2e60896a907f1e54eb75195..f2a84c6f854332710103e0b2570c21c8e2f769d9 100644 (file)
@@ -215,7 +215,9 @@ static int addr4_resolve(struct sockaddr_in *src_in,
 
        neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
        if (!neigh || !(neigh->nud_state & NUD_VALID)) {
-               neigh_event_send(rt->dst.neighbour, NULL);
+               rcu_read_lock();
+               neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
+               rcu_read_unlock();
                ret = -ENODATA;
                if (neigh)
                        goto release;
@@ -273,14 +275,16 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
                goto put;
        }
 
-       neigh = dst->neighbour;
+       rcu_read_lock();
+       neigh = dst_get_neighbour(dst);
        if (!neigh || !(neigh->nud_state & NUD_VALID)) {
-               neigh_event_send(dst->neighbour, NULL);
+               if (neigh)
+                       neigh_event_send(neigh, NULL);
                ret = -ENODATA;
-               goto put;
+       } else {
+               ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
        }
-
-       ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
+       rcu_read_unlock();
 put:
        dst_release(dst);
        return ret;
index 2332dc22aa043cfe1f551359be945e5e2b5c1c56..e55ce7a428bea7559e02a8165a5d157971235bfd 100644 (file)
@@ -1328,6 +1328,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
        struct iwch_ep *child_ep, *parent_ep = ctx;
        struct cpl_pass_accept_req *req = cplhdr(skb);
        unsigned int hwtid = GET_TID(req);
+       struct neighbour *neigh;
        struct dst_entry *dst;
        struct l2t_entry *l2t;
        struct rtable *rt;
@@ -1364,7 +1365,10 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
                goto reject;
        }
        dst = &rt->dst;
-       l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
+       rcu_read_lock();
+       neigh = dst_get_neighbour(dst);
+       l2t = t3_l2t_get(tdev, neigh, neigh->dev);
+       rcu_read_unlock();
        if (!l2t) {
                printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
                       __func__);
@@ -1874,10 +1878,11 @@ static int is_loopback_dst(struct iw_cm_id *cm_id)
 
 int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 {
-       int err = 0;
        struct iwch_dev *h = to_iwch_dev(cm_id->device);
+       struct neighbour *neigh;
        struct iwch_ep *ep;
        struct rtable *rt;
+       int err = 0;
 
        if (is_loopback_dst(cm_id)) {
                err = -ENOSYS;
@@ -1933,9 +1938,12 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        }
        ep->dst = &rt->dst;
 
+       rcu_read_lock();
+       neigh = dst_get_neighbour(ep->dst);
+
        /* get a l2t entry */
-       ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
-                            ep->dst->neighbour->dev);
+       ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev);
+       rcu_read_unlock();
        if (!ep->l2t) {
                printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
                err = -ENOMEM;
index 31fb44085c9b4dd7e3cdd4f97f59727f4a82d107..267005d0e66f4f16032d8ea57371f6d312be7eca 100644 (file)
@@ -1325,6 +1325,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
        unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
        struct tid_info *t = dev->rdev.lldi.tids;
        unsigned int hwtid = GET_TID(req);
+       struct neighbour *neigh;
        struct dst_entry *dst;
        struct l2t_entry *l2t;
        struct rtable *rt;
@@ -1357,11 +1358,12 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
                goto reject;
        }
        dst = &rt->dst;
-       if (dst->neighbour->dev->flags & IFF_LOOPBACK) {
+       rcu_read_lock();
+       neigh = dst_get_neighbour(dst);
+       if (neigh->dev->flags & IFF_LOOPBACK) {
                pdev = ip_dev_find(&init_net, peer_ip);
                BUG_ON(!pdev);
-               l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
-                                   pdev, 0);
+               l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, pdev, 0);
                mtu = pdev->mtu;
                tx_chan = cxgb4_port_chan(pdev);
                smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
@@ -1372,18 +1374,18 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
                rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
                dev_put(pdev);
        } else {
-               l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
-                                       dst->neighbour->dev, 0);
+               l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, neigh->dev, 0);
                mtu = dst_mtu(dst);
-               tx_chan = cxgb4_port_chan(dst->neighbour->dev);
-               smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1;
+               tx_chan = cxgb4_port_chan(neigh->dev);
+               smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
                step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
-               txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
-               ctrlq_idx = cxgb4_port_idx(dst->neighbour->dev);
+               txq_idx = cxgb4_port_idx(neigh->dev) * step;
+               ctrlq_idx = cxgb4_port_idx(neigh->dev);
                step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
                rss_qid = dev->rdev.lldi.rxq_ids[
-                         cxgb4_port_idx(dst->neighbour->dev) * step];
+                         cxgb4_port_idx(neigh->dev) * step];
        }
+       rcu_read_unlock();
        if (!l2t) {
                printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
                       __func__);
@@ -1847,6 +1849,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        struct c4iw_ep *ep;
        struct rtable *rt;
        struct net_device *pdev;
+       struct neighbour *neigh;
        int step;
 
        if ((conn_param->ord > c4iw_max_read_depth) ||
@@ -1908,14 +1911,16 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        }
        ep->dst = &rt->dst;
 
+       rcu_read_lock();
+       neigh = dst_get_neighbour(ep->dst);
+
        /* get a l2t entry */
-       if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) {
+       if (neigh->dev->flags & IFF_LOOPBACK) {
                PDBG("%s LOOPBACK\n", __func__);
                pdev = ip_dev_find(&init_net,
                                   cm_id->remote_addr.sin_addr.s_addr);
                ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
-                                       ep->dst->neighbour,
-                                       pdev, 0);
+                                       neigh, pdev, 0);
                ep->mtu = pdev->mtu;
                ep->tx_chan = cxgb4_port_chan(pdev);
                ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
@@ -1930,21 +1935,20 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                dev_put(pdev);
        } else {
                ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
-                                       ep->dst->neighbour,
-                                       ep->dst->neighbour->dev, 0);
+                                       neigh, neigh->dev, 0);
                ep->mtu = dst_mtu(ep->dst);
-               ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev);
-               ep->smac_idx = (cxgb4_port_viid(ep->dst->neighbour->dev) &
-                               0x7F) << 1;
+               ep->tx_chan = cxgb4_port_chan(neigh->dev);
+               ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
                step = ep->com.dev->rdev.lldi.ntxq /
                       ep->com.dev->rdev.lldi.nchan;
-               ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
-               ep->ctrlq_idx = cxgb4_port_idx(ep->dst->neighbour->dev);
+               ep->txq_idx = cxgb4_port_idx(neigh->dev) * step;
+               ep->ctrlq_idx = cxgb4_port_idx(neigh->dev);
                step = ep->com.dev->rdev.lldi.nrxq /
                       ep->com.dev->rdev.lldi.nchan;
                ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
-                             cxgb4_port_idx(ep->dst->neighbour->dev) * step];
+                             cxgb4_port_idx(neigh->dev) * step];
        }
+       rcu_read_unlock();
        if (!ep->l2t) {
                printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
                err = -ENOMEM;
@@ -2312,6 +2316,12 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
        unsigned int tid = GET_TID(req);
 
        ep = lookup_tid(t, tid);
+       if (!ep) {
+               printk(KERN_WARNING MOD
+                      "Abort on non-existent endpoint, tid %d\n", tid);
+               kfree_skb(skb);
+               return 0;
+       }
        if (is_neg_adv_abort(req->status)) {
                PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
                     ep->hwtid);
index 57ffa50f509e1f25812f056ca6227d369abb6e20..44fc3104e9185b3c99af868950e66353f8af95b3 100644 (file)
@@ -255,12 +255,9 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags,    u8 port_num,
                        return IB_MAD_RESULT_SUCCESS;
 
                /*
-                * Don't process SMInfo queries or vendor-specific
-                * MADs -- the SMA can't handle them.
+                * Don't process SMInfo queries -- the SMA can't handle them.
                 */
-               if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
-                   ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
-                    IB_SMP_ATTR_VENDOR_MASK))
+               if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
                        return IB_MAD_RESULT_SUCCESS;
        } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
                   in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1   ||
index 2001f20a43618e74b927733d8d54b410af084979..23c04ff6519b11bdda8ae1b32ca1aa7fd0a15ef8 100644 (file)
@@ -1301,7 +1301,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
        int is_eth;
        int is_vlan = 0;
        int is_grh;
-       u16 vlan;
+       u16 vlan = 0;
 
        send_size = 0;
        for (i = 0; i < wr->num_sge; ++i)
index e74cdf9ef4716a4954f5608e2cc20121c05b67e7..a1f74f6381bae0d99126bdaba12fd6319e044b15 100644 (file)
@@ -1150,9 +1150,11 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
                neigh_release(neigh);
        }
 
-       if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID)))
-               neigh_event_send(rt->dst.neighbour, NULL);
-
+       if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) {
+               rcu_read_lock();
+               neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
+               rcu_read_unlock();
+       }
        ip_rt_put(rt);
        return rc;
 }
index d8ca0a0b970d39879e7d981acbf5cb1647dca424..65df26ce538a704da5989467b3f5012863133290 100644 (file)
@@ -2076,9 +2076,11 @@ static void qib_6120_config_ctxts(struct qib_devdata *dd)
 static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
                                    u32 updegr, u32 egrhd, u32 npkts)
 {
-       qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
        if (updegr)
                qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+       mmiowb();
+       qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+       mmiowb();
 }
 
 static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
index c765a2eb04cf27ba3b28ef4eb33e5e99ed6027ba..759bb63bb3b823a663ee9ecac1a02bb97ee64db6 100644 (file)
@@ -2704,9 +2704,11 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
 static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
                                    u32 updegr, u32 egrhd, u32 npkts)
 {
-       qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
        if (updegr)
                qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+       mmiowb();
+       qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+       mmiowb();
 }
 
 static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
index 8ec5237031a08f3aa493ccf84715a2d5652f0e39..49e4a589479d6b23dbd1f78f1bab4b78f5b95e51 100644 (file)
@@ -4060,10 +4060,12 @@ static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
         */
        if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
                adjust_rcv_timeout(rcd, npkts);
-       qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
-       qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
        if (updegr)
                qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+       mmiowb();
+       qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+       qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+       mmiowb();
 }
 
 static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
index 7b6985a2e6520f98e38d4a38337a6809e0049d85..936804efb77681a463fe2f8b0c40c7a26e9de79c 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/mutex.h>
 
 #include <net/neighbour.h>
+#include <net/sch_generic.h>
 
 #include <asm/atomic.h>
 
@@ -117,8 +118,9 @@ struct ipoib_header {
        u16     reserved;
 };
 
-struct ipoib_pseudoheader {
-       u8  hwaddr[INFINIBAND_ALEN];
+struct ipoib_cb {
+       struct qdisc_skb_cb     qdisc_cb;
+       u8                      hwaddr[INFINIBAND_ALEN];
 };
 
 /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
index 86addca9ddf6f9c035dd633ae7d2d88cafcb85e3..b811444dcdd4b2a9b9b048a549a2c6865aa3271e 100644 (file)
@@ -555,14 +555,17 @@ static int path_rec_start(struct net_device *dev,
        return 0;
 }
 
+/* called with rcu_read_lock */
 static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_path *path;
        struct ipoib_neigh *neigh;
+       struct neighbour *n;
        unsigned long flags;
 
-       neigh = ipoib_neigh_alloc(skb_dst(skb)->neighbour, skb->dev);
+       n = dst_get_neighbour(skb_dst(skb));
+       neigh = ipoib_neigh_alloc(n, skb->dev);
        if (!neigh) {
                ++dev->stats.tx_dropped;
                dev_kfree_skb_any(skb);
@@ -571,9 +574,9 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
 
        spin_lock_irqsave(&priv->lock, flags);
 
-       path = __path_find(dev, skb_dst(skb)->neighbour->ha + 4);
+       path = __path_find(dev, n->ha + 4);
        if (!path) {
-               path = path_rec_create(dev, skb_dst(skb)->neighbour->ha + 4);
+               path = path_rec_create(dev, n->ha + 4);
                if (!path)
                        goto err_path;
 
@@ -607,7 +610,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
                        }
                } else {
                        spin_unlock_irqrestore(&priv->lock, flags);
-                       ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha));
+                       ipoib_send(dev, skb, path->ah, IPOIB_QPN(n->ha));
                        return;
                }
        } else {
@@ -634,24 +637,28 @@ err_drop:
        spin_unlock_irqrestore(&priv->lock, flags);
 }
 
+/* called with rcu_read_lock */
 static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
+       struct dst_entry *dst = skb_dst(skb);
+       struct neighbour *n;
 
        /* Look up path record for unicasts */
-       if (skb_dst(skb)->neighbour->ha[4] != 0xff) {
+       n = dst_get_neighbour(dst);
+       if (n->ha[4] != 0xff) {
                neigh_add_path(skb, dev);
                return;
        }
 
        /* Add in the P_Key for multicasts */
-       skb_dst(skb)->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
-       skb_dst(skb)->neighbour->ha[9] = priv->pkey & 0xff;
-       ipoib_mcast_send(dev, skb_dst(skb)->neighbour->ha + 4, skb);
+       n->ha[8] = (priv->pkey >> 8) & 0xff;
+       n->ha[9] = priv->pkey & 0xff;
+       ipoib_mcast_send(dev, n->ha + 4, skb);
 }
 
 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
-                            struct ipoib_pseudoheader *phdr)
+                            struct ipoib_cb *cb)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_path *path;
@@ -659,17 +666,15 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
 
        spin_lock_irqsave(&priv->lock, flags);
 
-       path = __path_find(dev, phdr->hwaddr + 4);
+       path = __path_find(dev, cb->hwaddr + 4);
        if (!path || !path->valid) {
                int new_path = 0;
 
                if (!path) {
-                       path = path_rec_create(dev, phdr->hwaddr + 4);
+                       path = path_rec_create(dev, cb->hwaddr + 4);
                        new_path = 1;
                }
                if (path) {
-                       /* put pseudoheader back on for next time */
-                       skb_push(skb, sizeof *phdr);
                        __skb_queue_tail(&path->queue, skb);
 
                        if (!path->query && path_rec_start(dev, path)) {
@@ -693,12 +698,10 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
                          be16_to_cpu(path->pathrec.dlid));
 
                spin_unlock_irqrestore(&priv->lock, flags);
-               ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
+               ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
                return;
        } else if ((path->query || !path_rec_start(dev, path)) &&
                   skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
-               /* put pseudoheader back on for next time */
-               skb_push(skb, sizeof *phdr);
                __skb_queue_tail(&path->queue, skb);
        } else {
                ++dev->stats.tx_dropped;
@@ -712,18 +715,23 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_neigh *neigh;
+       struct neighbour *n = NULL;
        unsigned long flags;
 
-       if (likely(skb_dst(skb) && skb_dst(skb)->neighbour)) {
-               if (unlikely(!*to_ipoib_neigh(skb_dst(skb)->neighbour))) {
+       rcu_read_lock();
+       if (likely(skb_dst(skb)))
+               n = dst_get_neighbour(skb_dst(skb));
+
+       if (likely(n)) {
+               if (unlikely(!*to_ipoib_neigh(n))) {
                        ipoib_path_lookup(skb, dev);
-                       return NETDEV_TX_OK;
+                       goto unlock;
                }
 
-               neigh = *to_ipoib_neigh(skb_dst(skb)->neighbour);
+               neigh = *to_ipoib_neigh(n);
 
                if (unlikely((memcmp(&neigh->dgid.raw,
-                                    skb_dst(skb)->neighbour->ha + 4,
+                                    n->ha + 4,
                                     sizeof(union ib_gid))) ||
                             (neigh->dev != dev))) {
                        spin_lock_irqsave(&priv->lock, flags);
@@ -740,17 +748,17 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        ipoib_neigh_free(dev, neigh);
                        spin_unlock_irqrestore(&priv->lock, flags);
                        ipoib_path_lookup(skb, dev);
-                       return NETDEV_TX_OK;
+                       goto unlock;
                }
 
                if (ipoib_cm_get(neigh)) {
                        if (ipoib_cm_up(neigh)) {
                                ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
-                               return NETDEV_TX_OK;
+                               goto unlock;
                        }
                } else if (neigh->ah) {
-                       ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha));
-                       return NETDEV_TX_OK;
+                       ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha));
+                       goto unlock;
                }
 
                if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
@@ -762,16 +770,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        dev_kfree_skb_any(skb);
                }
        } else {
-               struct ipoib_pseudoheader *phdr =
-                       (struct ipoib_pseudoheader *) skb->data;
-               skb_pull(skb, sizeof *phdr);
+               struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
 
-               if (phdr->hwaddr[4] == 0xff) {
+               if (cb->hwaddr[4] == 0xff) {
                        /* Add in the P_Key for multicast*/
-                       phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
-                       phdr->hwaddr[9] = priv->pkey & 0xff;
+                       cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
+                       cb->hwaddr[9] = priv->pkey & 0xff;
 
-                       ipoib_mcast_send(dev, phdr->hwaddr + 4, skb);
+                       ipoib_mcast_send(dev, cb->hwaddr + 4, skb);
                } else {
                        /* unicast GID -- should be ARP or RARP reply */
 
@@ -780,17 +786,18 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n",
                                           skb_dst(skb) ? "neigh" : "dst",
                                           be16_to_cpup((__be16 *) skb->data),
-                                          IPOIB_QPN(phdr->hwaddr),
-                                          phdr->hwaddr + 4);
+                                          IPOIB_QPN(cb->hwaddr),
+                                          cb->hwaddr + 4);
                                dev_kfree_skb_any(skb);
                                ++dev->stats.tx_dropped;
-                               return NETDEV_TX_OK;
+                               goto unlock;
                        }
 
-                       unicast_arp_send(skb, dev, phdr);
+                       unicast_arp_send(skb, dev, cb);
                }
        }
-
+unlock:
+       rcu_read_unlock();
        return NETDEV_TX_OK;
 }
 
@@ -819,14 +826,13 @@ static int ipoib_hard_header(struct sk_buff *skb,
        header->reserved = 0;
 
        /*
-        * If we don't have a neighbour structure, stuff the
-        * destination address onto the front of the skb so we can
-        * figure out where to send the packet later.
+        * If we don't have a dst_entry structure, stuff the
+        * destination address into skb->cb so we can figure out where
+        * to send the packet later.
         */
-       if ((!skb_dst(skb) || !skb_dst(skb)->neighbour) && daddr) {
-               struct ipoib_pseudoheader *phdr =
-                       (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
-               memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
+       if (!skb_dst(skb)) {
+               struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
+               memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
        }
 
        return 0;
@@ -1002,11 +1008,7 @@ static void ipoib_setup(struct net_device *dev)
 
        dev->flags              |= IFF_BROADCAST | IFF_MULTICAST;
 
-       /*
-        * We add in INFINIBAND_ALEN to allow for the destination
-        * address "pseudoheader" for skbs without neighbour struct.
-        */
-       dev->hard_header_len     = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
+       dev->hard_header_len     = IPOIB_ENCAP_LEN;
        dev->addr_len            = INFINIBAND_ALEN;
        dev->type                = ARPHRD_INFINIBAND;
        dev->tx_queue_len        = ipoib_sendq_size * 2;
index 3871ac663554339ac09766a5b3bdcc7c758a65e3..8b6350606d57395e3f7825f3edd0d6f22ee4ba4b 100644 (file)
@@ -258,17 +258,14 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
        netif_tx_lock_bh(dev);
        while (!skb_queue_empty(&mcast->pkt_queue)) {
                struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
+
                netif_tx_unlock_bh(dev);
 
                skb->dev = dev;
 
-               if (!skb_dst(skb) || !skb_dst(skb)->neighbour) {
-                       /* put pseudoheader back on for next time */
-                       skb_push(skb, sizeof (struct ipoib_pseudoheader));
-               }
-
                if (dev_queue_xmit(skb))
                        ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
+
                netif_tx_lock_bh(dev);
        }
        netif_tx_unlock_bh(dev);
@@ -715,11 +712,15 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
 
 out:
        if (mcast && mcast->ah) {
-               if (skb_dst(skb)                &&
-                   skb_dst(skb)->neighbour &&
-                   !*to_ipoib_neigh(skb_dst(skb)->neighbour)) {
-                       struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb_dst(skb)->neighbour,
-                                                                       skb->dev);
+               struct dst_entry *dst = skb_dst(skb);
+               struct neighbour *n = NULL;
+
+               rcu_read_lock();
+               if (dst)
+                       n = dst_get_neighbour(dst);
+               if (n && !*to_ipoib_neigh(n)) {
+                       struct ipoib_neigh *neigh = ipoib_neigh_alloc(n,
+                                                                     skb->dev);
 
                        if (neigh) {
                                kref_get(&mcast->ah->ref);
@@ -727,7 +728,7 @@ out:
                                list_add_tail(&neigh->list, &mcast->neigh_list);
                        }
                }
-
+               rcu_read_unlock();
                spin_unlock_irqrestore(&priv->lock, flags);
                ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
                return;
index 8db008de5392209c6da296d9a2aa7f3729e4ae72..f8f57583f5d07f55ad0260770623d6ef59050f5c 100644 (file)
@@ -354,6 +354,9 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
        }
        ib_conn = ep->dd_data;
 
+       if (iser_alloc_rx_descriptors(ib_conn))
+               return -ENOMEM;
+
        /* binds the iSER connection retrieved from the previously
         * connected ep_handle to the iSCSI layer connection. exchanges
         * connection pointers */
@@ -388,19 +391,6 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
        iser_conn->ib_conn = NULL;
 }
 
-static int
-iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
-{
-       struct iscsi_conn *conn = cls_conn->dd_data;
-       int err;
-
-       err = iser_conn_set_full_featured_mode(conn);
-       if (err)
-               return err;
-
-       return iscsi_conn_start(cls_conn);
-}
-
 static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
 {
        struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
@@ -686,7 +676,7 @@ static struct iscsi_transport iscsi_iser_transport = {
        .get_conn_param         = iscsi_conn_get_param,
        .get_ep_param           = iscsi_iser_get_ep_param,
        .get_session_param      = iscsi_session_get_param,
-       .start_conn             = iscsi_iser_conn_start,
+       .start_conn             = iscsi_conn_start,
        .stop_conn              = iscsi_iser_conn_stop,
        /* iscsi host params */
        .get_host_param         = iscsi_host_get_param,
index 2f02ab0ccc1eb7e93c9a77aae44501646bc27ad8..634aef039fe25871cfd59ba5507705f06ff4b9bd 100644 (file)
@@ -365,4 +365,5 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
 int  iser_initialize_task_headers(struct iscsi_task *task,
                        struct iser_tx_desc *tx_desc);
+int iser_alloc_rx_descriptors(struct iser_conn *ib_conn);
 #endif
index 95a08a8ca8aab9fe3532cb877a38ba0026767bb0..eb1ee6f8d894899d80303df55b272d5abf291925 100644 (file)
@@ -170,7 +170,7 @@ static void iser_create_send_desc(struct iser_conn  *ib_conn,
 }
 
 
-static int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
+int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
 {
        int i, j;
        u64 dma_addr;
@@ -236,23 +236,24 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn)
        kfree(ib_conn->rx_descs);
 }
 
-/**
- *  iser_conn_set_full_featured_mode - (iSER API)
- */
-int iser_conn_set_full_featured_mode(struct iscsi_conn *conn)
+static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
 {
        struct iscsi_iser_conn *iser_conn = conn->dd_data;
 
-       iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX);
-
-       /* Check that there is no posted recv or send buffers left - */
-       /* they must be consumed during the login phase */
-       BUG_ON(iser_conn->ib_conn->post_recv_buf_count != 0);
-       BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0);
+       iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
+       /* check if this is the last login - going to full feature phase */
+       if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
+               return 0;
 
-       if (iser_alloc_rx_descriptors(iser_conn->ib_conn))
-               return -ENOMEM;
+       /*
+        * Check that there is one posted recv buffer (for the last login
+        * response) and no posted send buffers left - they must have been
+        * consumed during previous login phases.
+        */
+       WARN_ON(iser_conn->ib_conn->post_recv_buf_count != 1);
+       WARN_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0);
 
+       iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX);
        /* Initial post receive buffers */
        if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX))
                return -ENOMEM;
@@ -421,6 +422,9 @@ int iser_send_control(struct iscsi_conn *conn,
                err = iser_post_recvl(iser_conn->ib_conn);
                if (err)
                        goto send_control_error;
+               err = iser_post_rx_bufs(conn, task->hdr);
+               if (err)
+                       goto send_control_error;
        }
 
        err = iser_post_send(iser_conn->ib_conn, mdesc);
index 6e7dd687d5c9e3804902b9a059034375343f4a1a..6288d7d84fa7e061d9ad3967e37782fbc4a17c24 100644 (file)
@@ -45,6 +45,7 @@ struct evdev_client {
        unsigned int packet_head; /* [future] position of the first element of next packet */
        spinlock_t buffer_lock; /* protects access to buffer, head and tail */
        struct wake_lock wake_lock;
+       bool use_wake_lock;
        char name[28];
        struct fasync_struct *fasync;
        struct evdev *evdev;
@@ -62,7 +63,6 @@ static void evdev_pass_event(struct evdev_client *client,
        /* Interrupts are disabled, just acquire the lock. */
        spin_lock(&client->buffer_lock);
 
-       wake_lock_timeout(&client->wake_lock, 5 * HZ);
        client->buffer[client->head++] = *event;
        client->head &= client->bufsize - 1;
 
@@ -79,10 +79,14 @@ static void evdev_pass_event(struct evdev_client *client,
                client->buffer[client->tail].value = 0;
 
                client->packet_head = client->tail;
+               if (client->use_wake_lock)
+                       wake_unlock(&client->wake_lock);
        }
 
        if (event->type == EV_SYN && event->code == SYN_REPORT) {
                client->packet_head = client->head;
+               if (client->use_wake_lock)
+                       wake_lock(&client->wake_lock);
                kill_fasync(&client->fasync, SIGIO, POLL_IN);
        }
 
@@ -262,7 +266,8 @@ static int evdev_release(struct inode *inode, struct file *file)
        mutex_unlock(&evdev->mutex);
 
        evdev_detach_client(evdev, client);
-       wake_lock_destroy(&client->wake_lock);
+       if (client->use_wake_lock)
+               wake_lock_destroy(&client->wake_lock);
        kfree(client);
 
        evdev_close_device(evdev);
@@ -316,7 +321,6 @@ static int evdev_open(struct inode *inode, struct file *file)
        spin_lock_init(&client->buffer_lock);
        snprintf(client->name, sizeof(client->name), "%s-%d",
                        dev_name(&evdev->dev), task_tgid_vnr(current));
-       wake_lock_init(&client->wake_lock, WAKE_LOCK_SUSPEND, client->name);
        client->evdev = evdev;
        evdev_attach_client(evdev, client);
 
@@ -331,7 +335,6 @@ static int evdev_open(struct inode *inode, struct file *file)
 
  err_free_client:
        evdev_detach_client(evdev, client);
-       wake_lock_destroy(&client->wake_lock);
        kfree(client);
  err_put_evdev:
        put_device(&evdev->dev);
@@ -385,7 +388,8 @@ static int evdev_fetch_next_event(struct evdev_client *client,
        if (have_event) {
                *event = client->buffer[client->tail++];
                client->tail &= client->bufsize - 1;
-               if (client->head == client->tail)
+               if (client->use_wake_lock &&
+                   client->packet_head == client->tail)
                        wake_unlock(&client->wake_lock);
        }
 
@@ -635,6 +639,35 @@ static int evdev_handle_set_keycode_v2(struct input_dev *dev, void __user *p)
        return input_set_keycode(dev, &ke);
 }
 
+static int evdev_enable_suspend_block(struct evdev *evdev,
+                                     struct evdev_client *client)
+{
+       if (client->use_wake_lock)
+               return 0;
+
+       spin_lock_irq(&client->buffer_lock);
+       wake_lock_init(&client->wake_lock, WAKE_LOCK_SUSPEND, client->name);
+       client->use_wake_lock = true;
+       if (client->packet_head != client->tail)
+               wake_lock(&client->wake_lock);
+       spin_unlock_irq(&client->buffer_lock);
+       return 0;
+}
+
+static int evdev_disable_suspend_block(struct evdev *evdev,
+                                      struct evdev_client *client)
+{
+       if (!client->use_wake_lock)
+               return 0;
+
+       spin_lock_irq(&client->buffer_lock);
+       client->use_wake_lock = false;
+       wake_lock_destroy(&client->wake_lock);
+       spin_unlock_irq(&client->buffer_lock);
+
+       return 0;
+}
+
 static long evdev_do_ioctl(struct file *file, unsigned int cmd,
                           void __user *p, int compat_mode)
 {
@@ -708,6 +741,15 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
 
        case EVIOCSKEYCODE_V2:
                return evdev_handle_set_keycode_v2(dev, p);
+
+       case EVIOCGSUSPENDBLOCK:
+               return put_user(client->use_wake_lock, ip);
+
+       case EVIOCSSUSPENDBLOCK:
+               if (p)
+                       return evdev_enable_suspend_block(evdev, client);
+               else
+                       return evdev_disable_suspend_block(evdev, client);
        }
 
        size = _IOC_SIZE(cmd);
index 99d58764ef03573e94c624625ae64f710d06ee69..0b9944346ec3a067b01007371368fff4b9811052 100644 (file)
@@ -426,7 +426,9 @@ static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int
 
        /*
         * First try "E6 report".
-        * ALPS should return 0,0,10 or 0,0,100
+        * ALPS should return 0,0,10 or 0,0,100 if no buttons are pressed.
+        * The bits 0-2 of the first byte will be 1s if some buttons are
+        * pressed.
         */
        param[0] = 0;
        if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES) ||
@@ -441,7 +443,8 @@ static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int
 
        dbg("E6 report: %2.2x %2.2x %2.2x", param[0], param[1], param[2]);
 
-       if (param[0] != 0 || param[1] != 0 || (param[2] != 10 && param[2] != 100))
+       if ((param[0] & 0xf8) != 0 || param[1] != 0 ||
+           (param[2] != 10 && param[2] != 100))
                return NULL;
 
        /*
index e06e045bf907a4a77ed33998b94ad29438e6c542..6ad728f0e287d44c3f8c0c9a0c838a334a57f804 100644 (file)
@@ -24,6 +24,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/delay.h>
 #include <linux/dmi.h>
 #include <linux/input/mt.h>
 #include <linux/serio.h>
@@ -760,6 +761,16 @@ static int synaptics_reconnect(struct psmouse *psmouse)
 
        do {
                psmouse_reset(psmouse);
+               if (retry) {
+                       /*
+                        * On some boxes, right after resuming, the touchpad
+                        * needs some time to finish initializing (I assume
+                        * it needs time to calibrate) and start responding
+                        * to Synaptics-specific queries, so let's wait a
+                        * bit.
+                        */
+                       ssleep(1);
+               }
                error = synaptics_detect(psmouse, 0);
        } while (error && ++retry < 3);
 
index 658e75f18d052b5a721ff2e6882109c945318d5b..d1dde6577fa935b60d26f2186076ee761be5a420 100644 (file)
@@ -14,6 +14,7 @@
 #include "gigaset.h"
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
+#include <linux/ratelimit.h>
 #include <linux/isdn/capilli.h>
 #include <linux/isdn/capicmd.h>
 #include <linux/isdn/capiutil.h>
@@ -222,10 +223,14 @@ get_appl(struct gigaset_capi_ctr *iif, u16 appl)
 static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p)
 {
 #ifdef CONFIG_GIGASET_DEBUG
+       /* dump at most 20 messages in 20 secs */
+       static DEFINE_RATELIMIT_STATE(msg_dump_ratelimit, 20 * HZ, 20);
        _cdebbuf *cdb;
 
        if (!(gigaset_debuglevel & level))
                return;
+       if (!___ratelimit(&msg_dump_ratelimit, tag))
+               return;
 
        cdb = capi_cmsg2str(p);
        if (cdb) {
@@ -2057,12 +2062,6 @@ static void do_reset_b3_req(struct gigaset_capi_ctr *iif,
                  CapiResetProcedureNotSupportedByCurrentProtocol);
 }
 
-/*
- * dump unsupported/ignored messages at most twice per minute,
- * some apps send those very frequently
- */
-static unsigned long ignored_msg_dump_time;
-
 /*
  * unsupported CAPI message handler
  */
@@ -2072,8 +2071,7 @@ static void do_unsupported(struct gigaset_capi_ctr *iif,
 {
        /* decode message */
        capi_message2cmsg(&iif->acmsg, skb->data);
-       if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000))
-               dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+       dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
        send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
 }
 
@@ -2084,11 +2082,9 @@ static void do_nothing(struct gigaset_capi_ctr *iif,
                       struct gigaset_capi_appl *ap,
                       struct sk_buff *skb)
 {
-       if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000)) {
-               /* decode message */
-               capi_message2cmsg(&iif->acmsg, skb->data);
-               dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
-       }
+       /* decode message */
+       capi_message2cmsg(&iif->acmsg, skb->data);
+       dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
        dev_kfree_skb_any(skb);
 }
 
index dc3d3d83191a043de95e7e7b41a5aa47cf57100c..6d5628bb060115d6e3f032210f9bc862a0ef397b 100644 (file)
@@ -267,6 +267,8 @@ void led_blink_set(struct led_classdev *led_cdev,
                   unsigned long *delay_on,
                   unsigned long *delay_off)
 {
+       del_timer_sync(&led_cdev->blink_timer);
+
        if (led_cdev->blink_set &&
            !led_cdev->blink_set(led_cdev, delay_on, delay_off))
                return;
index 574b09afedd32ff1f8f30ddefab722fb01edd17d..2eba9a12a6abf8029b7a2ac477dda75e8ed3dd4a 100644 (file)
@@ -1897,7 +1897,9 @@ int bitmap_load(mddev_t *mddev)
                         * re-add of a missing device */
                        start = mddev->recovery_cp;
 
+               mutex_lock(&mddev->bitmap_info.mutex);
                err = bitmap_init_from_disk(bitmap, start);
+               mutex_unlock(&mddev->bitmap_info.mutex);
        }
        if (err)
                goto out;
@@ -1982,6 +1984,8 @@ location_store(mddev_t *mddev, const char *buf, size_t len)
                        if (mddev->pers) {
                                mddev->pers->quiesce(mddev, 1);
                                rv = bitmap_create(mddev);
+                               if (!rv)
+                                       rv = bitmap_load(mddev);
                                if (rv) {
                                        bitmap_destroy(mddev);
                                        mddev->bitmap_info.offset = 0;
index c8827ffd85bb961b7e9cad4d54647ea0251fd716..6f906bc9328bc35d9de37efb7cf53f00e0349f5d 100644 (file)
@@ -177,7 +177,6 @@ struct crypt_config {
 
 #define MIN_IOS        16
 #define MIN_POOL_PAGES 32
-#define MIN_BIO_PAGES  8
 
 static struct kmem_cache *_crypt_io_pool;
 
@@ -849,12 +848,11 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
                }
 
                /*
-                * if additional pages cannot be allocated without waiting,
-                * return a partially allocated bio, the caller will then try
-                * to allocate additional bios while submitting this partial bio
+                * If additional pages cannot be allocated without waiting,
+                * return a partially-allocated bio.  The caller will then try
+                * to allocate more bios while submitting this partial bio.
                 */
-               if (i == (MIN_BIO_PAGES - 1))
-                       gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
+               gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
 
                len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
 
@@ -1047,16 +1045,14 @@ static void kcryptd_queue_io(struct dm_crypt_io *io)
        queue_work(cc->io_queue, &io->work);
 }
 
-static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
-                                         int error, int async)
+static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
 {
        struct bio *clone = io->ctx.bio_out;
        struct crypt_config *cc = io->target->private;
 
-       if (unlikely(error < 0)) {
+       if (unlikely(io->error < 0)) {
                crypt_free_buffer_pages(cc, clone);
                bio_put(clone);
-               io->error = -EIO;
                crypt_dec_pending(io);
                return;
        }
@@ -1107,12 +1103,16 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
                sector += bio_sectors(clone);
 
                crypt_inc_pending(io);
+
                r = crypt_convert(cc, &io->ctx);
+               if (r < 0)
+                       io->error = -EIO;
+
                crypt_finished = atomic_dec_and_test(&io->ctx.pending);
 
                /* Encryption was already finished, submit io now */
                if (crypt_finished) {
-                       kcryptd_crypt_write_io_submit(io, r, 0);
+                       kcryptd_crypt_write_io_submit(io, 0);
 
                        /*
                         * If there was an error, do not try next fragments.
@@ -1163,11 +1163,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
        crypt_dec_pending(io);
 }
 
-static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error)
+static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
 {
-       if (unlikely(error < 0))
-               io->error = -EIO;
-
        crypt_dec_pending(io);
 }
 
@@ -1182,9 +1179,11 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
                           io->sector);
 
        r = crypt_convert(cc, &io->ctx);
+       if (r < 0)
+               io->error = -EIO;
 
        if (atomic_dec_and_test(&io->ctx.pending))
-               kcryptd_crypt_read_done(io, r);
+               kcryptd_crypt_read_done(io);
 
        crypt_dec_pending(io);
 }
@@ -1205,15 +1204,18 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
        if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
                error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
 
+       if (error < 0)
+               io->error = -EIO;
+
        mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
 
        if (!atomic_dec_and_test(&ctx->pending))
                return;
 
        if (bio_data_dir(io->base_bio) == READ)
-               kcryptd_crypt_read_done(io, error);
+               kcryptd_crypt_read_done(io);
        else
-               kcryptd_crypt_write_io_submit(io, error, 1);
+               kcryptd_crypt_write_io_submit(io, 1);
 }
 
 static void kcryptd_crypt(struct work_struct *work)
index 0bdb201c2c2af04ceea2905db5f56c10fe1091df..7344534294aa5e4e4f0f813023d088bfaf38123e 100644 (file)
@@ -282,7 +282,7 @@ int dm_exception_store_init(void)
        return 0;
 
 persistent_fail:
-       dm_persistent_snapshot_exit();
+       dm_transient_snapshot_exit();
 transient_fail:
        return r;
 }
index ea790623c30ba0d7522905d6f77f7db95c0cdebd..3e90b8014f984df7b47d20d2bec7f36985b3d2f8 100644 (file)
@@ -149,8 +149,17 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
 static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
 {
        struct flakey_c *fc = ti->private;
+       struct dm_dev *dev = fc->dev;
+       int r = 0;
 
-       return __blkdev_driver_ioctl(fc->dev->bdev, fc->dev->mode, cmd, arg);
+       /*
+        * Only pass ioctls through if the device sizes match exactly.
+        */
+       if (fc->start ||
+           ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+               r = scsi_verify_blk_ioctl(NULL, cmd);
+
+       return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
 }
 
 static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
index ad2eba40e3190e700eab3136b8af5dd150cbf208..ea5dd289fe2a591cf62246eb10b36ab445de201f 100644 (file)
@@ -296,6 +296,8 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
        unsigned offset;
        unsigned num_bvecs;
        sector_t remaining = where->count;
+       struct request_queue *q = bdev_get_queue(where->bdev);
+       sector_t discard_sectors;
 
        /*
         * where->count may be zero if rw holds a flush and we need to
@@ -305,9 +307,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
                /*
                 * Allocate a suitably sized-bio.
                 */
-               num_bvecs = dm_sector_div_up(remaining,
-                                            (PAGE_SIZE >> SECTOR_SHIFT));
-               num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
+               if (rw & REQ_DISCARD)
+                       num_bvecs = 1;
+               else
+                       num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
+                                         dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
+
                bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
                bio->bi_sector = where->sector + (where->count - remaining);
                bio->bi_bdev = where->bdev;
@@ -315,10 +320,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
                bio->bi_destructor = dm_bio_destructor;
                store_io_and_region_in_bio(bio, io, region);
 
-               /*
-                * Try and add as many pages as possible.
-                */
-               while (remaining) {
+               if (rw & REQ_DISCARD) {
+                       discard_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
+                       bio->bi_size = discard_sectors << SECTOR_SHIFT;
+                       remaining -= discard_sectors;
+               } else while (remaining) {
+                       /*
+                        * Try and add as many pages as possible.
+                        */
                        dp->get_page(dp, &page, &len, &offset);
                        len = min(len, to_bytes(remaining));
                        if (!bio_add_page(bio, page, len, offset))
index 3921e3bb43c15a107c90e4769fa682b1041d9a7a..9728839f844a8b71a34c04b7317a9dc1fe1497ba 100644 (file)
@@ -116,7 +116,17 @@ static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
                        unsigned long arg)
 {
        struct linear_c *lc = (struct linear_c *) ti->private;
-       return __blkdev_driver_ioctl(lc->dev->bdev, lc->dev->mode, cmd, arg);
+       struct dm_dev *dev = lc->dev;
+       int r = 0;
+
+       /*
+        * Only pass ioctls through if the device sizes match exactly.
+        */
+       if (lc->start ||
+           ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+               r = scsi_verify_blk_ioctl(NULL, cmd);
+
+       return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
 }
 
 static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
index 209991bebd30d51f6b9d82476eabb96b5945c157..70373bfa20bc4f5da87049c94c7ecd87fa9e0f9b 100644 (file)
@@ -1584,6 +1584,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
 
        spin_unlock_irqrestore(&m->lock, flags);
 
+       /*
+        * Only pass ioctls through if the device sizes match exactly.
+        */
+       if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
+               r = scsi_verify_blk_ioctl(NULL, cmd);
+
        return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
 }
 
index e5d8904fc8f647162d4a7d79150491797cfda6d2..437ae1825f1362546180dcb2f56f71356a420ddf 100644 (file)
@@ -468,6 +468,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
        INIT_WORK(&rs->md.event_work, do_table_event);
        ti->split_io = rs->md.chunk_sectors;
        ti->private = rs;
+       ti->num_flush_requests = 1;
 
        mutex_lock(&rs->md.reconfig_mutex);
        ret = md_run(&rs->md);
index bc8342812d06e0993b70ad88c03264fece4ee129..8b04a02672b41c5f92b418b758163a23b3b9e1dd 100644 (file)
@@ -348,6 +348,8 @@ void mddev_suspend(mddev_t *mddev)
        synchronize_rcu();
        wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
        mddev->pers->quiesce(mddev, 1);
+
+       del_timer_sync(&mddev->safemode_timer);
 }
 EXPORT_SYMBOL_GPL(mddev_suspend);
 
@@ -407,7 +409,7 @@ static void submit_flushes(struct work_struct *ws)
                        atomic_inc(&rdev->nr_pending);
                        atomic_inc(&rdev->nr_pending);
                        rcu_read_unlock();
-                       bi = bio_alloc_mddev(GFP_KERNEL, 0, mddev);
+                       bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
                        bi->bi_end_io = md_end_flush;
                        bi->bi_private = rdev;
                        bi->bi_bdev = rdev->bdev;
index 3a9e59fe7ad359ced4453f4d5f33be6cc2a34a02..36f1ed313ae398af80950b86a86ed4e86700f418 100644 (file)
@@ -614,9 +614,22 @@ static void wait_barrier(conf_t *conf)
        spin_lock_irq(&conf->resync_lock);
        if (conf->barrier) {
                conf->nr_waiting++;
-               wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
+               /* Wait for the barrier to drop.
+                * However if there are already pending
+                * requests (preventing the barrier from
+                * rising completely), and the
+                * pre-process bio queue isn't empty,
+                * then don't wait, as we need to empty
+                * that queue to get the nr_pending
+                * count down.
+                */
+               wait_event_lock_irq(conf->wait_barrier,
+                                   !conf->barrier ||
+                                   (conf->nr_pending &&
+                                    current->bio_list &&
+                                    !bio_list_empty(current->bio_list)),
                                    conf->resync_lock,
-                                   );
+                       );
                conf->nr_waiting--;
        }
        conf->nr_pending++;
index 17cb6ab62308f449c81419bc380db57cb821d7e3..0d6c42f70a355287e426136921bf3d35191a0b45 100644 (file)
@@ -667,9 +667,22 @@ static void wait_barrier(conf_t *conf)
        spin_lock_irq(&conf->resync_lock);
        if (conf->barrier) {
                conf->nr_waiting++;
-               wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
+               /* Wait for the barrier to drop.
+                * However if there are already pending
+                * requests (preventing the barrier from
+                * rising completely), and the
+                * pre-process bio queue isn't empty,
+                * then don't wait, as we need to empty
+                * that queue to get the nr_pending
+                * count down.
+                */
+               wait_event_lock_irq(conf->wait_barrier,
+                                   !conf->barrier ||
+                                   (conf->nr_pending &&
+                                    current->bio_list &&
+                                    !bio_list_empty(current->bio_list)),
                                    conf->resync_lock,
-                                   );
+                       );
                conf->nr_waiting--;
        }
        conf->nr_pending++;
index 2581ba127354d8e8acf30d9ffbe273366b23c157..1f6c68df6f382de7f62fddb459c3a9d5bf2377e0 100644 (file)
@@ -3078,7 +3078,7 @@ static void handle_stripe5(struct stripe_head *sh)
                        /* Not in-sync */;
                else if (test_bit(In_sync, &rdev->flags))
                        set_bit(R5_Insync, &dev->flags);
-               else {
+               else if (!test_bit(Faulty, &rdev->flags)) {
                        /* could be in-sync depending on recovery/reshape status */
                        if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
                                set_bit(R5_Insync, &dev->flags);
@@ -3120,12 +3120,16 @@ static void handle_stripe5(struct stripe_head *sh)
        /* check if the array has lost two devices and, if so, some requests might
         * need to be failed
         */
-       if (s.failed > 1 && s.to_read+s.to_write+s.written)
-               handle_failed_stripe(conf, sh, &s, disks, &return_bi);
-       if (s.failed > 1 && s.syncing) {
-               md_done_sync(conf->mddev, STRIPE_SECTORS,0);
-               clear_bit(STRIPE_SYNCING, &sh->state);
-               s.syncing = 0;
+       if (s.failed > 1) {
+               sh->check_state = 0;
+               sh->reconstruct_state = 0;
+               if (s.to_read+s.to_write+s.written)
+                       handle_failed_stripe(conf, sh, &s, disks, &return_bi);
+               if (s.syncing) {
+                       md_done_sync(conf->mddev, STRIPE_SECTORS,0);
+                       clear_bit(STRIPE_SYNCING, &sh->state);
+                       s.syncing = 0;
+               }
        }
 
        /* might be able to return some write requests if the parity block
@@ -3369,7 +3373,7 @@ static void handle_stripe6(struct stripe_head *sh)
                        /* Not in-sync */;
                else if (test_bit(In_sync, &rdev->flags))
                        set_bit(R5_Insync, &dev->flags);
-               else {
+               else if (!test_bit(Faulty, &rdev->flags)) {
                        /* in sync if before recovery_offset */
                        if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
                                set_bit(R5_Insync, &dev->flags);
@@ -3412,12 +3416,16 @@ static void handle_stripe6(struct stripe_head *sh)
        /* check if the array has lost >2 devices and, if so, some requests
         * might need to be failed
         */
-       if (s.failed > 2 && s.to_read+s.to_write+s.written)
-               handle_failed_stripe(conf, sh, &s, disks, &return_bi);
-       if (s.failed > 2 && s.syncing) {
-               md_done_sync(conf->mddev, STRIPE_SECTORS,0);
-               clear_bit(STRIPE_SYNCING, &sh->state);
-               s.syncing = 0;
+       if (s.failed > 2) {
+               sh->check_state = 0;
+               sh->reconstruct_state = 0;
+               if (s.to_read+s.to_write+s.written)
+                       handle_failed_stripe(conf, sh, &s, disks, &return_bi);
+               if (s.syncing) {
+                       md_done_sync(conf->mddev, STRIPE_SECTORS,0);
+                       clear_bit(STRIPE_SYNCING, &sh->state);
+                       s.syncing = 0;
+               }
        }
 
        /*
index 5eb91b4f8fd08ff98c38dc9e4f90e77d086879a6..a224e94325b7160ea21f684fd5857d143f534049 100644 (file)
@@ -30,6 +30,11 @@ int dib0700_get_version(struct dvb_usb_device *d, u32 *hwversion,
        struct dib0700_state *st = d->priv;
        int ret;
 
+       if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
+               deb_info("could not acquire lock");
+               return 0;
+       }
+
        ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
                                  REQUEST_GET_VERSION,
                                  USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
@@ -46,6 +51,7 @@ int dib0700_get_version(struct dvb_usb_device *d, u32 *hwversion,
        if (fwtype != NULL)
                *fwtype     = (st->buf[12] << 24) | (st->buf[13] << 16) |
                        (st->buf[14] << 8) | st->buf[15];
+       mutex_unlock(&d->usb_mutex);
        return ret;
 }
 
@@ -108,7 +114,12 @@ int dib0700_ctrl_rd(struct dvb_usb_device *d, u8 *tx, u8 txlen, u8 *rx, u8 rxlen
 int dib0700_set_gpio(struct dvb_usb_device *d, enum dib07x0_gpios gpio, u8 gpio_dir, u8 gpio_val)
 {
        struct dib0700_state *st = d->priv;
-       s16 ret;
+       int ret;
+
+       if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
+               deb_info("could not acquire lock");
+               return 0;
+       }
 
        st->buf[0] = REQUEST_SET_GPIO;
        st->buf[1] = gpio;
@@ -116,6 +127,7 @@ int dib0700_set_gpio(struct dvb_usb_device *d, enum dib07x0_gpios gpio, u8 gpio_
 
        ret = dib0700_ctrl_wr(d, st->buf, 3);
 
+       mutex_unlock(&d->usb_mutex);
        return ret;
 }
 
@@ -125,6 +137,11 @@ static int dib0700_set_usb_xfer_len(struct dvb_usb_device *d, u16 nb_ts_packets)
        int ret;
 
        if (st->fw_version >= 0x10201) {
+               if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
+                       deb_info("could not acquire lock");
+                       return 0;
+               }
+
                st->buf[0] = REQUEST_SET_USB_XFER_LEN;
                st->buf[1] = (nb_ts_packets >> 8) & 0xff;
                st->buf[2] = nb_ts_packets & 0xff;
@@ -132,6 +149,7 @@ static int dib0700_set_usb_xfer_len(struct dvb_usb_device *d, u16 nb_ts_packets)
                deb_info("set the USB xfer len to %i Ts packet\n", nb_ts_packets);
 
                ret = dib0700_ctrl_wr(d, st->buf, 3);
+               mutex_unlock(&d->usb_mutex);
        } else {
                deb_info("this firmware does not allow to change the USB xfer len\n");
                ret = -EIO;
@@ -208,6 +226,10 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
 
                } else {
                        /* Write request */
+                       if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
+                               deb_info("could not acquire lock");
+                               return 0;
+                       }
                        st->buf[0] = REQUEST_NEW_I2C_WRITE;
                        st->buf[1] = msg[i].addr << 1;
                        st->buf[2] = (en_start << 7) | (en_stop << 6) |
@@ -227,6 +249,7 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
                                                 USB_TYPE_VENDOR | USB_DIR_OUT,
                                                 0, 0, st->buf, msg[i].len + 4,
                                                 USB_CTRL_GET_TIMEOUT);
+                       mutex_unlock(&d->usb_mutex);
                        if (result < 0) {
                                deb_info("i2c write error (status = %d)\n", result);
                                break;
@@ -249,6 +272,10 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
 
        if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
                return -EAGAIN;
+       if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
+               deb_info("could not acquire lock");
+               return 0;
+       }
 
        for (i = 0; i < num; i++) {
                /* fill in the address */
@@ -279,6 +306,7 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
                                break;
                }
        }
+       mutex_unlock(&d->usb_mutex);
        mutex_unlock(&d->i2c_mutex);
 
        return i;
@@ -337,7 +365,12 @@ static int dib0700_set_clock(struct dvb_usb_device *d, u8 en_pll,
        u16 pll_loopdiv, u16 free_div, u16 dsuScaler)
 {
        struct dib0700_state *st = d->priv;
-       s16 ret;
+       int ret;
+
+       if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
+               deb_info("could not acquire lock");
+               return 0;
+       }
 
        st->buf[0] = REQUEST_SET_CLOCK;
        st->buf[1] = (en_pll << 7) | (pll_src << 6) |
@@ -352,6 +385,7 @@ static int dib0700_set_clock(struct dvb_usb_device *d, u8 en_pll,
        st->buf[9] =  dsuScaler         & 0xff; /* LSB */
 
        ret = dib0700_ctrl_wr(d, st->buf, 10);
+       mutex_unlock(&d->usb_mutex);
 
        return ret;
 }
@@ -360,10 +394,16 @@ int dib0700_set_i2c_speed(struct dvb_usb_device *d, u16 scl_kHz)
 {
        struct dib0700_state *st = d->priv;
        u16 divider;
+       int ret;
 
        if (scl_kHz == 0)
                return -EINVAL;
 
+       if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
+               deb_info("could not acquire lock");
+               return 0;
+       }
+
        st->buf[0] = REQUEST_SET_I2C_PARAM;
        divider = (u16) (30000 / scl_kHz);
        st->buf[1] = 0;
@@ -379,7 +419,11 @@ int dib0700_set_i2c_speed(struct dvb_usb_device *d, u16 scl_kHz)
        deb_info("setting I2C speed: %04x %04x %04x (%d kHz).",
                (st->buf[2] << 8) | (st->buf[3]), (st->buf[4] << 8) |
                st->buf[5], (st->buf[6] << 8) | st->buf[7], scl_kHz);
-       return dib0700_ctrl_wr(d, st->buf, 8);
+
+       ret = dib0700_ctrl_wr(d, st->buf, 8);
+       mutex_unlock(&d->usb_mutex);
+
+       return ret;
 }
 
 
@@ -515,6 +559,11 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
                }
        }
 
+       if (mutex_lock_interruptible(&adap->dev->usb_mutex) < 0) {
+               deb_info("could not acquire lock");
+               return 0;
+       }
+
        st->buf[0] = REQUEST_ENABLE_VIDEO;
        /* this bit gives a kind of command,
         * rather than enabling something or not */
@@ -548,7 +597,10 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
 
        deb_info("data for streaming: %x %x\n", st->buf[1], st->buf[2]);
 
-       return dib0700_ctrl_wr(adap->dev, st->buf, 4);
+       ret = dib0700_ctrl_wr(adap->dev, st->buf, 4);
+       mutex_unlock(&adap->dev->usb_mutex);
+
+       return ret;
 }
 
 int dib0700_change_protocol(struct rc_dev *rc, u64 rc_type)
@@ -557,6 +609,11 @@ int dib0700_change_protocol(struct rc_dev *rc, u64 rc_type)
        struct dib0700_state *st = d->priv;
        int new_proto, ret;
 
+       if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
+               deb_info("could not acquire lock");
+               return 0;
+       }
+
        st->buf[0] = REQUEST_SET_RC;
        st->buf[1] = 0;
        st->buf[2] = 0;
@@ -567,23 +624,29 @@ int dib0700_change_protocol(struct rc_dev *rc, u64 rc_type)
        else if (rc_type == RC_TYPE_NEC)
                new_proto = 0;
        else if (rc_type == RC_TYPE_RC6) {
-               if (st->fw_version < 0x10200)
-                       return -EINVAL;
+               if (st->fw_version < 0x10200) {
+                       ret = -EINVAL;
+                       goto out;
+               }
 
                new_proto = 2;
-       } else
-               return -EINVAL;
+       } else {
+               ret = -EINVAL;
+               goto out;
+       }
 
        st->buf[1] = new_proto;
 
        ret = dib0700_ctrl_wr(d, st->buf, 3);
        if (ret < 0) {
                err("ir protocol setup failed");
-               return ret;
+               goto out;
        }
 
        d->props.rc.core.protocol = rc_type;
 
+out:
+       mutex_unlock(&d->usb_mutex);
        return ret;
 }
 
index 1d47d4da7d4c4222373e8e89f55cd4fd1cd36e5e..dc1cb17a6ea716993fe5e31c187bcd1f41881838 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
+#include <linux/mutex.h>
 
 #include "dvb_frontend.h"
 
@@ -78,10 +79,18 @@ struct dib0070_state {
        struct i2c_msg msg[2];
        u8 i2c_write_buffer[3];
        u8 i2c_read_buffer[2];
+       struct mutex i2c_buffer_lock;
 };
 
-static uint16_t dib0070_read_reg(struct dib0070_state *state, u8 reg)
+static u16 dib0070_read_reg(struct dib0070_state *state, u8 reg)
 {
+       u16 ret;
+
+       if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+               dprintk("could not acquire lock");
+               return 0;
+       }
+
        state->i2c_write_buffer[0] = reg;
 
        memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
@@ -96,13 +105,23 @@ static uint16_t dib0070_read_reg(struct dib0070_state *state, u8 reg)
 
        if (i2c_transfer(state->i2c, state->msg, 2) != 2) {
                printk(KERN_WARNING "DiB0070 I2C read failed\n");
-               return 0;
-       }
-       return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+               ret = 0;
+       } else
+               ret = (state->i2c_read_buffer[0] << 8)
+                       | state->i2c_read_buffer[1];
+
+       mutex_unlock(&state->i2c_buffer_lock);
+       return ret;
 }
 
 static int dib0070_write_reg(struct dib0070_state *state, u8 reg, u16 val)
 {
+       int ret;
+
+       if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+               dprintk("could not acquire lock");
+               return -EINVAL;
+       }
        state->i2c_write_buffer[0] = reg;
        state->i2c_write_buffer[1] = val >> 8;
        state->i2c_write_buffer[2] = val & 0xff;
@@ -115,9 +134,12 @@ static int dib0070_write_reg(struct dib0070_state *state, u8 reg, u16 val)
 
        if (i2c_transfer(state->i2c, state->msg, 1) != 1) {
                printk(KERN_WARNING "DiB0070 I2C write failed\n");
-               return -EREMOTEIO;
-       }
-       return 0;
+               ret = -EREMOTEIO;
+       } else
+               ret = 0;
+
+       mutex_unlock(&state->i2c_buffer_lock);
+       return ret;
 }
 
 #define HARD_RESET(state) do { \
@@ -734,6 +756,7 @@ struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter
        state->cfg = cfg;
        state->i2c = i2c;
        state->fe  = fe;
+       mutex_init(&state->i2c_buffer_lock);
        fe->tuner_priv = state;
 
        if (dib0070_reset(fe) != 0)
index c9c935ae41e47bf9eaccb4f6e9df0ef0701b6e18..b174d1c78583ee1001b8892626c80b00450f25fa 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
+#include <linux/mutex.h>
 
 #include "dvb_frontend.h"
 
@@ -196,6 +197,7 @@ struct dib0090_state {
        struct i2c_msg msg[2];
        u8 i2c_write_buffer[3];
        u8 i2c_read_buffer[2];
+       struct mutex i2c_buffer_lock;
 };
 
 struct dib0090_fw_state {
@@ -208,10 +210,18 @@ struct dib0090_fw_state {
        struct i2c_msg msg;
        u8 i2c_write_buffer[2];
        u8 i2c_read_buffer[2];
+       struct mutex i2c_buffer_lock;
 };
 
 static u16 dib0090_read_reg(struct dib0090_state *state, u8 reg)
 {
+       u16 ret;
+
+       if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+               dprintk("could not acquire lock");
+               return 0;
+       }
+
        state->i2c_write_buffer[0] = reg;
 
        memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
@@ -226,14 +236,24 @@ static u16 dib0090_read_reg(struct dib0090_state *state, u8 reg)
 
        if (i2c_transfer(state->i2c, state->msg, 2) != 2) {
                printk(KERN_WARNING "DiB0090 I2C read failed\n");
-               return 0;
-       }
+               ret = 0;
+       } else
+               ret = (state->i2c_read_buffer[0] << 8)
+                       | state->i2c_read_buffer[1];
 
-       return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+       mutex_unlock(&state->i2c_buffer_lock);
+       return ret;
 }
 
 static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val)
 {
+       int ret;
+
+       if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+               dprintk("could not acquire lock");
+               return -EINVAL;
+       }
+
        state->i2c_write_buffer[0] = reg & 0xff;
        state->i2c_write_buffer[1] = val >> 8;
        state->i2c_write_buffer[2] = val & 0xff;
@@ -246,13 +266,23 @@ static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val)
 
        if (i2c_transfer(state->i2c, state->msg, 1) != 1) {
                printk(KERN_WARNING "DiB0090 I2C write failed\n");
-               return -EREMOTEIO;
-       }
-       return 0;
+               ret = -EREMOTEIO;
+       } else
+               ret = 0;
+
+       mutex_unlock(&state->i2c_buffer_lock);
+       return ret;
 }
 
 static u16 dib0090_fw_read_reg(struct dib0090_fw_state *state, u8 reg)
 {
+       u16 ret;
+
+       if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+               dprintk("could not acquire lock");
+               return 0;
+       }
+
        state->i2c_write_buffer[0] = reg;
 
        memset(&state->msg, 0, sizeof(struct i2c_msg));
@@ -262,13 +292,24 @@ static u16 dib0090_fw_read_reg(struct dib0090_fw_state *state, u8 reg)
        state->msg.len = 2;
        if (i2c_transfer(state->i2c, &state->msg, 1) != 1) {
                printk(KERN_WARNING "DiB0090 I2C read failed\n");
-               return 0;
-       }
-       return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+               ret = 0;
+       } else
+               ret = (state->i2c_read_buffer[0] << 8)
+                       | state->i2c_read_buffer[1];
+
+       mutex_unlock(&state->i2c_buffer_lock);
+       return ret;
 }
 
 static int dib0090_fw_write_reg(struct dib0090_fw_state *state, u8 reg, u16 val)
 {
+       int ret;
+
+       if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+               dprintk("could not acquire lock");
+               return -EINVAL;
+       }
+
        state->i2c_write_buffer[0] = val >> 8;
        state->i2c_write_buffer[1] = val & 0xff;
 
@@ -279,9 +320,12 @@ static int dib0090_fw_write_reg(struct dib0090_fw_state *state, u8 reg, u16 val)
        state->msg.len = 2;
        if (i2c_transfer(state->i2c, &state->msg, 1) != 1) {
                printk(KERN_WARNING "DiB0090 I2C write failed\n");
-               return -EREMOTEIO;
-       }
-       return 0;
+               ret = -EREMOTEIO;
+       } else
+               ret = 0;
+
+       mutex_unlock(&state->i2c_buffer_lock);
+       return ret;
 }
 
 #define HARD_RESET(state) do {  if (cfg->reset) {  if (cfg->sleep) cfg->sleep(fe, 0); msleep(10);  cfg->reset(fe, 1); msleep(10);  cfg->reset(fe, 0); msleep(10);  }  } while (0)
@@ -2440,6 +2484,7 @@ struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapte
        st->config = config;
        st->i2c = i2c;
        st->fe = fe;
+       mutex_init(&st->i2c_buffer_lock);
        fe->tuner_priv = st;
 
        if (config->wbd == NULL)
@@ -2471,6 +2516,7 @@ struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_ada
        st->config = config;
        st->i2c = i2c;
        st->fe = fe;
+       mutex_init(&st->i2c_buffer_lock);
        fe->tuner_priv = st;
 
        if (dib0090_fw_reset_digital(fe, st->config) != 0)
index 79cb1c20df24e7aece91b3f7e900efb7e13d1c57..dbb76d75c932fecb36784a62be7451a3d63fb4c8 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
+#include <linux/mutex.h>
 
 #include "dvb_frontend.h"
 
@@ -55,6 +56,7 @@ struct dib7000m_state {
        struct i2c_msg msg[2];
        u8 i2c_write_buffer[4];
        u8 i2c_read_buffer[2];
+       struct mutex i2c_buffer_lock;
 };
 
 enum dib7000m_power_mode {
@@ -69,6 +71,13 @@ enum dib7000m_power_mode {
 
 static u16 dib7000m_read_word(struct dib7000m_state *state, u16 reg)
 {
+       u16 ret;
+
+       if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+               dprintk("could not acquire lock");
+               return 0;
+       }
+
        state->i2c_write_buffer[0] = (reg >> 8) | 0x80;
        state->i2c_write_buffer[1] = reg & 0xff;
 
@@ -85,11 +94,21 @@ static u16 dib7000m_read_word(struct dib7000m_state *state, u16 reg)
        if (i2c_transfer(state->i2c_adap, state->msg, 2) != 2)
                dprintk("i2c read error on %d",reg);
 
-       return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+       ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+       mutex_unlock(&state->i2c_buffer_lock);
+
+       return ret;
 }
 
 static int dib7000m_write_word(struct dib7000m_state *state, u16 reg, u16 val)
 {
+       int ret;
+
+       if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+               dprintk("could not acquire lock");
+               return -EINVAL;
+       }
+
        state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
        state->i2c_write_buffer[1] = reg & 0xff;
        state->i2c_write_buffer[2] = (val >> 8) & 0xff;
@@ -101,7 +120,10 @@ static int dib7000m_write_word(struct dib7000m_state *state, u16 reg, u16 val)
        state->msg[0].buf = state->i2c_write_buffer;
        state->msg[0].len = 4;
 
-       return i2c_transfer(state->i2c_adap, state->msg, 1) != 1 ? -EREMOTEIO : 0;
+       ret = (i2c_transfer(state->i2c_adap, state->msg, 1) != 1 ?
+                       -EREMOTEIO : 0);
+       mutex_unlock(&state->i2c_buffer_lock);
+       return ret;
 }
 static void dib7000m_write_tab(struct dib7000m_state *state, u16 *buf)
 {
@@ -1385,6 +1407,7 @@ struct dvb_frontend * dib7000m_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr,
        demod                   = &st->demod;
        demod->demodulator_priv = st;
        memcpy(&st->demod.ops, &dib7000m_ops, sizeof(struct dvb_frontend_ops));
+       mutex_init(&st->i2c_buffer_lock);
 
        st->timf_default = cfg->bw->timf;
 
index 0c9f40c2a251fbaad72fe3520e6f6ec48d76cb13..292bc19746b917ed0d8ab915eae1ed9d177650c1 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
+#include <linux/mutex.h>
 
 #include "dvb_math.h"
 #include "dvb_frontend.h"
@@ -68,6 +69,7 @@ struct dib7000p_state {
        struct i2c_msg msg[2];
        u8 i2c_write_buffer[4];
        u8 i2c_read_buffer[2];
+       struct mutex i2c_buffer_lock;
 };
 
 enum dib7000p_power_mode {
@@ -81,6 +83,13 @@ static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff);
 
 static u16 dib7000p_read_word(struct dib7000p_state *state, u16 reg)
 {
+       u16 ret;
+
+       if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+               dprintk("could not acquire lock");
+               return 0;
+       }
+
        state->i2c_write_buffer[0] = reg >> 8;
        state->i2c_write_buffer[1] = reg & 0xff;
 
@@ -97,11 +106,20 @@ static u16 dib7000p_read_word(struct dib7000p_state *state, u16 reg)
        if (i2c_transfer(state->i2c_adap, state->msg, 2) != 2)
                dprintk("i2c read error on %d", reg);
 
-       return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+       ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+       mutex_unlock(&state->i2c_buffer_lock);
+       return ret;
 }
 
 static int dib7000p_write_word(struct dib7000p_state *state, u16 reg, u16 val)
 {
+       int ret;
+
+       if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+               dprintk("could not acquire lock");
+               return -EINVAL;
+       }
+
        state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
        state->i2c_write_buffer[1] = reg & 0xff;
        state->i2c_write_buffer[2] = (val >> 8) & 0xff;
@@ -113,7 +131,10 @@ static int dib7000p_write_word(struct dib7000p_state *state, u16 reg, u16 val)
        state->msg[0].buf = state->i2c_write_buffer;
        state->msg[0].len = 4;
 
-       return i2c_transfer(state->i2c_adap, state->msg, 1) != 1 ? -EREMOTEIO : 0;
+       ret = (i2c_transfer(state->i2c_adap, state->msg, 1) != 1 ?
+                       -EREMOTEIO : 0);
+       mutex_unlock(&state->i2c_buffer_lock);
+       return ret;
 }
 
 static void dib7000p_write_tab(struct dib7000p_state *state, u16 * buf)
@@ -1646,6 +1667,7 @@ int dib7000p_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defau
                return -ENOMEM;
 
        dpst->i2c_adap = i2c;
+       mutex_init(&dpst->i2c_buffer_lock);
 
        for (k = no_of_demods - 1; k >= 0; k--) {
                dpst->cfg = cfg[k];
@@ -2324,6 +2346,7 @@ struct dvb_frontend *dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr,
        demod = &st->demod;
        demod->demodulator_priv = st;
        memcpy(&st->demod.ops, &dib7000p_ops, sizeof(struct dvb_frontend_ops));
+       mutex_init(&st->i2c_buffer_lock);
 
        dib7000p_write_word(st, 1287, 0x0003);  /* sram lead in, rdy */
 
@@ -2333,8 +2356,9 @@ struct dvb_frontend *dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr,
        st->version = dib7000p_read_word(st, 897);
 
        /* FIXME: make sure the dev.parent field is initialized, or else
-               request_firmware() will hit an OOPS (this should be moved somewhere
-               more common) */
+          request_firmware() will hit an OOPS (this should be moved somewhere
+          more common) */
+       st->i2c_master.gated_tuner_i2c_adap.dev.parent = i2c_adap->dev.parent;
 
        dibx000_init_i2c_master(&st->i2c_master, DIB7000P, st->i2c_adap, st->i2c_addr);
 
index 7d2ea112ae2bf9217363f3f0172a81d6340b3f0f..fe284d5292f5422f16a846e1077c7df34c576098 100644 (file)
@@ -10,6 +10,8 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
+#include <linux/mutex.h>
+
 #include "dvb_math.h"
 
 #include "dvb_frontend.h"
@@ -37,6 +39,7 @@ struct i2c_device {
        u8 addr;
        u8 *i2c_write_buffer;
        u8 *i2c_read_buffer;
+       struct mutex *i2c_buffer_lock;
 };
 
 struct dib8000_state {
@@ -77,6 +80,7 @@ struct dib8000_state {
        struct i2c_msg msg[2];
        u8 i2c_write_buffer[4];
        u8 i2c_read_buffer[2];
+       struct mutex i2c_buffer_lock;
 };
 
 enum dib8000_power_mode {
@@ -86,24 +90,39 @@ enum dib8000_power_mode {
 
 static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg)
 {
+       u16 ret;
        struct i2c_msg msg[2] = {
-               {.addr = i2c->addr >> 1, .flags = 0,
-                       .buf = i2c->i2c_write_buffer, .len = 2},
-               {.addr = i2c->addr >> 1, .flags = I2C_M_RD,
-                       .buf = i2c->i2c_read_buffer, .len = 2},
+               {.addr = i2c->addr >> 1, .flags = 0, .len = 2},
+               {.addr = i2c->addr >> 1, .flags = I2C_M_RD, .len = 2},
        };
 
+       if (mutex_lock_interruptible(i2c->i2c_buffer_lock) < 0) {
+               dprintk("could not acquire lock");
+               return 0;
+       }
+
+       msg[0].buf    = i2c->i2c_write_buffer;
        msg[0].buf[0] = reg >> 8;
        msg[0].buf[1] = reg & 0xff;
+       msg[1].buf    = i2c->i2c_read_buffer;
 
        if (i2c_transfer(i2c->adap, msg, 2) != 2)
                dprintk("i2c read error on %d", reg);
 
-       return (msg[1].buf[0] << 8) | msg[1].buf[1];
+       ret = (msg[1].buf[0] << 8) | msg[1].buf[1];
+       mutex_unlock(i2c->i2c_buffer_lock);
+       return ret;
 }
 
 static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
 {
+       u16 ret;
+
+       if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+               dprintk("could not acquire lock");
+               return 0;
+       }
+
        state->i2c_write_buffer[0] = reg >> 8;
        state->i2c_write_buffer[1] = reg & 0xff;
 
@@ -120,7 +139,10 @@ static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
        if (i2c_transfer(state->i2c.adap, state->msg, 2) != 2)
                dprintk("i2c read error on %d", reg);
 
-       return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+       ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+       mutex_unlock(&state->i2c_buffer_lock);
+
+       return ret;
 }
 
 static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
@@ -135,22 +157,35 @@ static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
 
 static int dib8000_i2c_write16(struct i2c_device *i2c, u16 reg, u16 val)
 {
-       struct i2c_msg msg = {.addr = i2c->addr >> 1, .flags = 0,
-               .buf = i2c->i2c_write_buffer, .len = 4};
+       struct i2c_msg msg = {.addr = i2c->addr >> 1, .flags = 0, .len = 4};
        int ret = 0;
 
+       if (mutex_lock_interruptible(i2c->i2c_buffer_lock) < 0) {
+               dprintk("could not acquire lock");
+               return -EINVAL;
+       }
+
+       msg.buf    = i2c->i2c_write_buffer;
        msg.buf[0] = (reg >> 8) & 0xff;
        msg.buf[1] = reg & 0xff;
        msg.buf[2] = (val >> 8) & 0xff;
        msg.buf[3] = val & 0xff;
 
        ret = i2c_transfer(i2c->adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
+       mutex_unlock(i2c->i2c_buffer_lock);
 
        return ret;
 }
 
 static int dib8000_write_word(struct dib8000_state *state, u16 reg, u16 val)
 {
+       int ret;
+
+       if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+               dprintk("could not acquire lock");
+               return -EINVAL;
+       }
+
        state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
        state->i2c_write_buffer[1] = reg & 0xff;
        state->i2c_write_buffer[2] = (val >> 8) & 0xff;
@@ -162,7 +197,11 @@ static int dib8000_write_word(struct dib8000_state *state, u16 reg, u16 val)
        state->msg[0].buf = state->i2c_write_buffer;
        state->msg[0].len = 4;
 
-       return i2c_transfer(state->i2c.adap, state->msg, 1) != 1 ? -EREMOTEIO : 0;
+       ret = (i2c_transfer(state->i2c.adap, state->msg, 1) != 1 ?
+                       -EREMOTEIO : 0);
+       mutex_unlock(&state->i2c_buffer_lock);
+
+       return ret;
 }
 
 static const s16 coeff_2k_sb_1seg_dqpsk[8] = {
@@ -2434,8 +2473,15 @@ int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 defau
        if (!client.i2c_read_buffer) {
                dprintk("%s: not enough memory", __func__);
                ret = -ENOMEM;
-               goto error_memory;
+               goto error_memory_read;
+       }
+       client.i2c_buffer_lock = kzalloc(sizeof(struct mutex), GFP_KERNEL);
+       if (!client.i2c_buffer_lock) {
+               dprintk("%s: not enough memory", __func__);
+               ret = -ENOMEM;
+               goto error_memory_lock;
        }
+       mutex_init(client.i2c_buffer_lock);
 
        for (k = no_of_demods - 1; k >= 0; k--) {
                /* designated i2c address */
@@ -2476,8 +2522,10 @@ int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 defau
        }
 
 error:
+       kfree(client.i2c_buffer_lock);
+error_memory_lock:
        kfree(client.i2c_read_buffer);
-error_memory:
+error_memory_read:
        kfree(client.i2c_write_buffer);
 
        return ret;
@@ -2581,6 +2629,8 @@ struct dvb_frontend *dib8000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, s
        state->i2c.addr = i2c_addr;
        state->i2c.i2c_write_buffer = state->i2c_write_buffer;
        state->i2c.i2c_read_buffer = state->i2c_read_buffer;
+       mutex_init(&state->i2c_buffer_lock);
+       state->i2c.i2c_buffer_lock = &state->i2c_buffer_lock;
        state->gpio_val = cfg->gpio_val;
        state->gpio_dir = cfg->gpio_dir;
 
index a0855883b5ce7afa6a97d4561b06ee5a55332b6f..b931074a952172a0718aada770eb195aaf0d083b 100644 (file)
@@ -38,6 +38,15 @@ struct i2c_device {
 #define DibInitLock(lock) mutex_init(lock)
 #define DibFreeLock(lock)
 
+struct dib9000_pid_ctrl {
+#define DIB9000_PID_FILTER_CTRL 0
+#define DIB9000_PID_FILTER      1
+       u8 cmd;
+       u8 id;
+       u16 pid;
+       u8 onoff;
+};
+
 struct dib9000_state {
        struct i2c_device i2c;
 
@@ -99,6 +108,10 @@ struct dib9000_state {
        struct i2c_msg msg[2];
        u8 i2c_write_buffer[255];
        u8 i2c_read_buffer[255];
+       DIB_LOCK demod_lock;
+       u8 get_frontend_internal;
+       struct dib9000_pid_ctrl pid_ctrl[10];
+       s8 pid_ctrl_index; /* -1: empty list; -2: do not use the list */
 };
 
 static const u32 fe_info[44] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1743,19 +1756,56 @@ EXPORT_SYMBOL(dib9000_set_gpio);
 int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
 {
        struct dib9000_state *state = fe->demodulator_priv;
-       u16 val = dib9000_read_word(state, 294 + 1) & 0xffef;
+       u16 val;
+       int ret;
+
+       if ((state->pid_ctrl_index != -2) && (state->pid_ctrl_index < 9)) {
+               /* postpone the pid filtering cmd */
+               dprintk("pid filter cmd postpone");
+               state->pid_ctrl_index++;
+               state->pid_ctrl[state->pid_ctrl_index].cmd = DIB9000_PID_FILTER_CTRL;
+               state->pid_ctrl[state->pid_ctrl_index].onoff = onoff;
+               return 0;
+       }
+
+       DibAcquireLock(&state->demod_lock);
+
+       val = dib9000_read_word(state, 294 + 1) & 0xffef;
        val |= (onoff & 0x1) << 4;
 
        dprintk("PID filter enabled %d", onoff);
-       return dib9000_write_word(state, 294 + 1, val);
+       ret = dib9000_write_word(state, 294 + 1, val);
+       DibReleaseLock(&state->demod_lock);
+       return ret;
+
 }
 EXPORT_SYMBOL(dib9000_fw_pid_filter_ctrl);
 
 int dib9000_fw_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
 {
        struct dib9000_state *state = fe->demodulator_priv;
+       int ret;
+
+       if (state->pid_ctrl_index != -2) {
+               /* postpone the pid filtering cmd */
+               dprintk("pid filter postpone");
+               if (state->pid_ctrl_index < 9) {
+                       state->pid_ctrl_index++;
+                       state->pid_ctrl[state->pid_ctrl_index].cmd = DIB9000_PID_FILTER;
+                       state->pid_ctrl[state->pid_ctrl_index].id = id;
+                       state->pid_ctrl[state->pid_ctrl_index].pid = pid;
+                       state->pid_ctrl[state->pid_ctrl_index].onoff = onoff;
+               } else
+                       dprintk("can not add any more pid ctrl cmd");
+               return 0;
+       }
+
+       DibAcquireLock(&state->demod_lock);
        dprintk("Index %x, PID %d, OnOff %d", id, pid, onoff);
-       return dib9000_write_word(state, 300 + 1 + id, onoff ? (1 << 13) | pid : 0);
+       ret = dib9000_write_word(state, 300 + 1 + id,
+                       onoff ? (1 << 13) | pid : 0);
+       DibReleaseLock(&state->demod_lock);
+       return ret;
 }
 EXPORT_SYMBOL(dib9000_fw_pid_filter);
 
@@ -1778,6 +1828,7 @@ static void dib9000_release(struct dvb_frontend *demod)
        DibFreeLock(&state->platform.risc.mbx_lock);
        DibFreeLock(&state->platform.risc.mem_lock);
        DibFreeLock(&state->platform.risc.mem_mbx_lock);
+       DibFreeLock(&state->demod_lock);
        dibx000_exit_i2c_master(&st->i2c_master);
 
        i2c_del_adapter(&st->tuner_adap);
@@ -1795,14 +1846,19 @@ static int dib9000_sleep(struct dvb_frontend *fe)
 {
        struct dib9000_state *state = fe->demodulator_priv;
        u8 index_frontend;
-       int ret;
+       int ret = 0;
 
+       DibAcquireLock(&state->demod_lock);
        for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
                ret = state->fe[index_frontend]->ops.sleep(state->fe[index_frontend]);
                if (ret < 0)
-                       return ret;
+                       goto error;
        }
-       return dib9000_mbx_send(state, OUT_MSG_FE_SLEEP, NULL, 0);
+       ret = dib9000_mbx_send(state, OUT_MSG_FE_SLEEP, NULL, 0);
+
+error:
+       DibReleaseLock(&state->demod_lock);
+       return ret;
 }
 
 static int dib9000_fe_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *tune)
@@ -1816,7 +1872,10 @@ static int dib9000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
        struct dib9000_state *state = fe->demodulator_priv;
        u8 index_frontend, sub_index_frontend;
        fe_status_t stat;
-       int ret;
+       int ret = 0;
+
+       if (state->get_frontend_internal == 0)
+               DibAcquireLock(&state->demod_lock);
 
        for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
                state->fe[index_frontend]->ops.read_status(state->fe[index_frontend], &stat);
@@ -1846,14 +1905,15 @@ static int dib9000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
                                            state->fe[index_frontend]->dtv_property_cache.rolloff;
                                }
                        }
-                       return 0;
+                       ret = 0;
+                       goto return_value;
                }
        }
 
        /* get the channel from master chip */
        ret = dib9000_fw_get_channel(fe, fep);
        if (ret != 0)
-               return ret;
+               goto return_value;
 
        /* synchronize the cache with the other frontends */
        for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
@@ -1866,8 +1926,12 @@ static int dib9000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
                state->fe[index_frontend]->dtv_property_cache.code_rate_LP = fe->dtv_property_cache.code_rate_LP;
                state->fe[index_frontend]->dtv_property_cache.rolloff = fe->dtv_property_cache.rolloff;
        }
+       ret = 0;
 
-       return 0;
+return_value:
+       if (state->get_frontend_internal == 0)
+               DibReleaseLock(&state->demod_lock);
+       return ret;
 }
 
 static int dib9000_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state)
@@ -1912,6 +1976,10 @@ static int dib9000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
                dprintk("dib9000: must specify bandwidth ");
                return 0;
        }
+
+       state->pid_ctrl_index = -1; /* postpone the pid filtering cmd */
+       DibAcquireLock(&state->demod_lock);
+
        fe->dtv_property_cache.delivery_system = SYS_DVBT;
 
        /* set the master status */
@@ -1974,13 +2042,18 @@ static int dib9000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
        /* check the tune result */
        if (exit_condition == 1) {      /* tune failed */
                dprintk("tune failed");
+               DibReleaseLock(&state->demod_lock);
+               /* tune failed; put all the pid filtering cmd to junk */
+               state->pid_ctrl_index = -1;
                return 0;
        }
 
        dprintk("tune success on frontend%i", index_frontend_success);
 
        /* synchronize all the channel cache */
+       state->get_frontend_internal = 1;
        dib9000_get_frontend(state->fe[0], fep);
+       state->get_frontend_internal = 0;
 
        /* retune the other frontends with the found channel */
        channel_status.status = CHANNEL_STATUS_PARAMETERS_SET;
@@ -2025,6 +2098,28 @@ static int dib9000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
        /* turn off the diversity for the last frontend */
        dib9000_fw_set_diversity_in(state->fe[index_frontend - 1], 0);
 
+       DibReleaseLock(&state->demod_lock);
+       if (state->pid_ctrl_index >= 0) {
+               u8 index_pid_filter_cmd;
+               u8 pid_ctrl_index = state->pid_ctrl_index;
+
+               state->pid_ctrl_index = -2;
+               for (index_pid_filter_cmd = 0;
+                               index_pid_filter_cmd <= pid_ctrl_index;
+                               index_pid_filter_cmd++) {
+                       if (state->pid_ctrl[index_pid_filter_cmd].cmd == DIB9000_PID_FILTER_CTRL)
+                               dib9000_fw_pid_filter_ctrl(state->fe[0],
+                                               state->pid_ctrl[index_pid_filter_cmd].onoff);
+                       else if (state->pid_ctrl[index_pid_filter_cmd].cmd == DIB9000_PID_FILTER)
+                               dib9000_fw_pid_filter(state->fe[0],
+                                               state->pid_ctrl[index_pid_filter_cmd].id,
+                                               state->pid_ctrl[index_pid_filter_cmd].pid,
+                                               state->pid_ctrl[index_pid_filter_cmd].onoff);
+               }
+       }
+       /* do not postpone any more the pid filtering */
+       state->pid_ctrl_index = -2;
+
        return 0;
 }
 
@@ -2041,6 +2136,7 @@ static int dib9000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
        u8 index_frontend;
        u16 lock = 0, lock_slave = 0;
 
+       DibAcquireLock(&state->demod_lock);
        for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
                lock_slave |= dib9000_read_lock(state->fe[index_frontend]);
 
@@ -2059,6 +2155,8 @@ static int dib9000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
        if ((lock & 0x0008) || (lock_slave & 0x0008))
                *stat |= FE_HAS_LOCK;
 
+       DibReleaseLock(&state->demod_lock);
+
        return 0;
 }
 
@@ -2066,10 +2164,14 @@ static int dib9000_read_ber(struct dvb_frontend *fe, u32 * ber)
 {
        struct dib9000_state *state = fe->demodulator_priv;
        u16 *c;
+       int ret = 0;
 
+       DibAcquireLock(&state->demod_lock);
        DibAcquireLock(&state->platform.risc.mem_mbx_lock);
-       if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
-               return -EIO;
+       if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
+               ret = -EIO;
+               goto error;
+       }
        dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR,
                        state->i2c_read_buffer, 16 * 2);
        DibReleaseLock(&state->platform.risc.mem_mbx_lock);
@@ -2077,7 +2179,10 @@ static int dib9000_read_ber(struct dvb_frontend *fe, u32 * ber)
        c = (u16 *)state->i2c_read_buffer;
 
        *ber = c[10] << 16 | c[11];
-       return 0;
+
+error:
+       DibReleaseLock(&state->demod_lock);
+       return ret;
 }
 
 static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
@@ -2086,7 +2191,9 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
        u8 index_frontend;
        u16 *c = (u16 *)state->i2c_read_buffer;
        u16 val;
+       int ret = 0;
 
+       DibAcquireLock(&state->demod_lock);
        *strength = 0;
        for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
                state->fe[index_frontend]->ops.read_signal_strength(state->fe[index_frontend], &val);
@@ -2097,8 +2204,10 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
        }
 
        DibAcquireLock(&state->platform.risc.mem_mbx_lock);
-       if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
-               return -EIO;
+       if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
+               ret = -EIO;
+               goto error;
+       }
        dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
        DibReleaseLock(&state->platform.risc.mem_mbx_lock);
 
@@ -2107,7 +2216,10 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
                *strength = 65535;
        else
                *strength += val;
-       return 0;
+
+error:
+       DibReleaseLock(&state->demod_lock);
+       return ret;
 }
 
 static u32 dib9000_get_snr(struct dvb_frontend *fe)
@@ -2151,6 +2263,7 @@ static int dib9000_read_snr(struct dvb_frontend *fe, u16 * snr)
        u8 index_frontend;
        u32 snr_master;
 
+       DibAcquireLock(&state->demod_lock);
        snr_master = dib9000_get_snr(fe);
        for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
                snr_master += dib9000_get_snr(state->fe[index_frontend]);
@@ -2161,6 +2274,8 @@ static int dib9000_read_snr(struct dvb_frontend *fe, u16 * snr)
        } else
                *snr = 0;
 
+       DibReleaseLock(&state->demod_lock);
+
        return 0;
 }
 
@@ -2168,15 +2283,22 @@ static int dib9000_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
 {
        struct dib9000_state *state = fe->demodulator_priv;
        u16 *c = (u16 *)state->i2c_read_buffer;
+       int ret = 0;
 
+       DibAcquireLock(&state->demod_lock);
        DibAcquireLock(&state->platform.risc.mem_mbx_lock);
-       if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
-               return -EIO;
+       if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
+               ret = -EIO;
+               goto error;
+       }
        dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
        DibReleaseLock(&state->platform.risc.mem_mbx_lock);
 
        *unc = c[12];
-       return 0;
+
+error:
+       DibReleaseLock(&state->demod_lock);
+       return ret;
 }
 
 int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, u8 first_addr)
@@ -2322,6 +2444,10 @@ struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, c
        DibInitLock(&st->platform.risc.mbx_lock);
        DibInitLock(&st->platform.risc.mem_lock);
        DibInitLock(&st->platform.risc.mem_mbx_lock);
+       DibInitLock(&st->demod_lock);
+       st->get_frontend_internal = 0;
+
+       st->pid_ctrl_index = -2;
 
        st->fe[0] = fe;
        fe->demodulator_priv = st;
index dc5d17a6757944e97670358c5f1ea46428190cd8..774d507b66cc7e841b412efe4c06ea4a6c8c1be5 100644 (file)
@@ -1,4 +1,5 @@
 #include <linux/i2c.h>
+#include <linux/mutex.h>
 
 #include "dibx000_common.h"
 
@@ -10,6 +11,13 @@ MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
 
 static int dibx000_write_word(struct dibx000_i2c_master *mst, u16 reg, u16 val)
 {
+       int ret;
+
+       if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
+               dprintk("could not acquire lock");
+               return -EINVAL;
+       }
+
        mst->i2c_write_buffer[0] = (reg >> 8) & 0xff;
        mst->i2c_write_buffer[1] = reg & 0xff;
        mst->i2c_write_buffer[2] = (val >> 8) & 0xff;
@@ -21,11 +29,21 @@ static int dibx000_write_word(struct dibx000_i2c_master *mst, u16 reg, u16 val)
        mst->msg[0].buf = mst->i2c_write_buffer;
        mst->msg[0].len = 4;
 
-       return i2c_transfer(mst->i2c_adap, mst->msg, 1) != 1 ? -EREMOTEIO : 0;
+       ret = i2c_transfer(mst->i2c_adap, mst->msg, 1) != 1 ? -EREMOTEIO : 0;
+       mutex_unlock(&mst->i2c_buffer_lock);
+
+       return ret;
 }
 
 static u16 dibx000_read_word(struct dibx000_i2c_master *mst, u16 reg)
 {
+       u16 ret;
+
+       if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
+               dprintk("could not acquire lock");
+               return 0;
+       }
+
        mst->i2c_write_buffer[0] = reg >> 8;
        mst->i2c_write_buffer[1] = reg & 0xff;
 
@@ -42,7 +60,10 @@ static u16 dibx000_read_word(struct dibx000_i2c_master *mst, u16 reg)
        if (i2c_transfer(mst->i2c_adap, mst->msg, 2) != 2)
                dprintk("i2c read error on %d", reg);
 
-       return (mst->i2c_read_buffer[0] << 8) | mst->i2c_read_buffer[1];
+       ret = (mst->i2c_read_buffer[0] << 8) | mst->i2c_read_buffer[1];
+       mutex_unlock(&mst->i2c_buffer_lock);
+
+       return ret;
 }
 
 static int dibx000_is_i2c_done(struct dibx000_i2c_master *mst)
@@ -257,6 +278,7 @@ static int dibx000_i2c_gated_gpio67_xfer(struct i2c_adapter *i2c_adap,
                                        struct i2c_msg msg[], int num)
 {
        struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap);
+       int ret;
 
        if (num > 32) {
                dprintk("%s: too much I2C message to be transmitted (%i).\
@@ -264,10 +286,15 @@ static int dibx000_i2c_gated_gpio67_xfer(struct i2c_adapter *i2c_adap,
                return -ENOMEM;
        }
 
-       memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
-
        dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_GPIO_6_7);
 
+       if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
+               dprintk("could not acquire lock");
+               return -EINVAL;
+       }
+
+       memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
+
        /* open the gate */
        dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[0], msg[0].addr, 1);
        mst->msg[0].addr = mst->i2c_addr;
@@ -282,7 +309,11 @@ static int dibx000_i2c_gated_gpio67_xfer(struct i2c_adapter *i2c_adap,
        mst->msg[num + 1].buf = &mst->i2c_write_buffer[4];
        mst->msg[num + 1].len = 4;
 
-       return i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ? num : -EIO;
+       ret = (i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ?
+                       num : -EIO);
+
+       mutex_unlock(&mst->i2c_buffer_lock);
+       return ret;
 }
 
 static struct i2c_algorithm dibx000_i2c_gated_gpio67_algo = {
@@ -294,6 +325,7 @@ static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap,
                                        struct i2c_msg msg[], int num)
 {
        struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap);
+       int ret;
 
        if (num > 32) {
                dprintk("%s: too much I2C message to be transmitted (%i).\
@@ -301,10 +333,14 @@ static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap,
                return -ENOMEM;
        }
 
-       memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
-
        dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_TUNER);
 
+       if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
+               dprintk("could not acquire lock");
+               return -EINVAL;
+       }
+       memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
+
        /* open the gate */
        dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[0], msg[0].addr, 1);
        mst->msg[0].addr = mst->i2c_addr;
@@ -319,7 +355,10 @@ static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap,
        mst->msg[num + 1].buf = &mst->i2c_write_buffer[4];
        mst->msg[num + 1].len = 4;
 
-       return i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ? num : -EIO;
+       ret = (i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ?
+                       num : -EIO);
+       mutex_unlock(&mst->i2c_buffer_lock);
+       return ret;
 }
 
 static struct i2c_algorithm dibx000_i2c_gated_tuner_algo = {
@@ -390,8 +429,18 @@ static int i2c_adapter_init(struct i2c_adapter *i2c_adap,
 int dibx000_init_i2c_master(struct dibx000_i2c_master *mst, u16 device_rev,
                                struct i2c_adapter *i2c_adap, u8 i2c_addr)
 {
-       u8 tx[4];
-       struct i2c_msg m = {.addr = i2c_addr >> 1,.buf = tx,.len = 4 };
+       int ret;
+
+       mutex_init(&mst->i2c_buffer_lock);
+       if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
+               dprintk("could not acquire lock");
+               return -EINVAL;
+       }
+       memset(mst->msg, 0, sizeof(struct i2c_msg));
+       mst->msg[0].addr = i2c_addr >> 1;
+       mst->msg[0].flags = 0;
+       mst->msg[0].buf = mst->i2c_write_buffer;
+       mst->msg[0].len = 4;
 
        mst->device_rev = device_rev;
        mst->i2c_adap = i2c_adap;
@@ -431,9 +480,12 @@ int dibx000_init_i2c_master(struct dibx000_i2c_master *mst, u16 device_rev,
                                "DiBX000: could not initialize the master i2c_adapter\n");
 
        /* initialize the i2c-master by closing the gate */
-       dibx000_i2c_gate_ctrl(mst, tx, 0, 0);
+       dibx000_i2c_gate_ctrl(mst, mst->i2c_write_buffer, 0, 0);
+
+       ret = (i2c_transfer(i2c_adap, mst->msg, 1) == 1);
+       mutex_unlock(&mst->i2c_buffer_lock);
 
-       return i2c_transfer(i2c_adap, &m, 1) == 1;
+       return ret;
 }
 
 EXPORT_SYMBOL(dibx000_init_i2c_master);
index f031165c04596df1e3774db6f039c28ba543c9df..5e011474be430658b1b13d283bd2f7e11d5c2ca6 100644 (file)
@@ -33,6 +33,7 @@ struct dibx000_i2c_master {
        struct i2c_msg msg[34];
        u8 i2c_write_buffer[8];
        u8 i2c_read_buffer[2];
+       struct mutex i2c_buffer_lock;
 };
 
 extern int dibx000_init_i2c_master(struct dibx000_i2c_master *mst,
index 43971e63baa727c5a3c7de9cbc05dc4c8fc4207e..aa63d687d276f17a06359748a5650687badbe060 100644 (file)
@@ -104,8 +104,8 @@ static int i2c_write_demod_bytes (struct lgdt330x_state* state,
  * then reads the data returned for (len) bytes.
  */
 
-static u8 i2c_read_demod_bytes (struct lgdt330x_state* state,
-                              enum I2C_REG reg, u8* buf, int len)
+static int i2c_read_demod_bytes(struct lgdt330x_state *state,
+                               enum I2C_REG reg, u8 *buf, int len)
 {
        u8 wr [] = { reg };
        struct i2c_msg msg [] = {
@@ -118,6 +118,8 @@ static u8 i2c_read_demod_bytes (struct lgdt330x_state* state,
        ret = i2c_transfer(state->i2c, msg, 2);
        if (ret != 2) {
                printk(KERN_WARNING "lgdt330x: %s: addr 0x%02x select 0x%02x error (ret == %i)\n", __func__, state->config->demod_address, reg, ret);
+               if (ret >= 0)
+                       ret = -EIO;
        } else {
                ret = 0;
        }
index 0c8164a2cc36caf271c34848049290068b4ba855..d755407fb4f90873663860734a02474fcd545aef 100644 (file)
@@ -541,6 +541,8 @@ static const struct usb_device_id smsusb_id_table[] __devinitconst = {
                .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
        { USB_DEVICE(0x2040, 0xc090),
                .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+       { USB_DEVICE(0x2040, 0xc0a0),
+               .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
        { } /* Terminating entry */
        };
 
index a43ed6c41bfc475361fd9b0155826dfd69a3746e..12b91ae1b208f17e1e3f995d460dc48d36285765 100644 (file)
@@ -1017,22 +1017,6 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
 
        spin_lock_init(&dev->hw_lock);
 
-       /* claim the resources */
-       error = -EBUSY;
-       dev->hw_io = pnp_port_start(pnp_dev, 0);
-       if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
-               dev->hw_io = -1;
-               dev->irq = -1;
-               goto error;
-       }
-
-       dev->irq = pnp_irq(pnp_dev, 0);
-       if (request_irq(dev->irq, ene_isr,
-                       IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
-               dev->irq = -1;
-               goto error;
-       }
-
        pnp_set_drvdata(pnp_dev, dev);
        dev->pnp_dev = pnp_dev;
 
@@ -1085,6 +1069,22 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
        device_set_wakeup_capable(&pnp_dev->dev, true);
        device_set_wakeup_enable(&pnp_dev->dev, true);
 
+       /* claim the resources */
+       error = -EBUSY;
+       dev->hw_io = pnp_port_start(pnp_dev, 0);
+       if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
+               dev->hw_io = -1;
+               dev->irq = -1;
+               goto error;
+       }
+
+       dev->irq = pnp_irq(pnp_dev, 0);
+       if (request_irq(dev->irq, ene_isr,
+                       IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
+               dev->irq = -1;
+               goto error;
+       }
+
        error = rc_register_device(rdev);
        if (error < 0)
                goto error;
index 7f7079b12f2321ab2174a764dca2959e0c0a6952..4218f7369c52320d914ed3438dc1174bffa06abc 100644 (file)
@@ -504,16 +504,6 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
 
        spin_lock_init(&fintek->fintek_lock);
 
-       ret = -EBUSY;
-       /* now claim resources */
-       if (!request_region(fintek->cir_addr,
-                           fintek->cir_port_len, FINTEK_DRIVER_NAME))
-               goto failure;
-
-       if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED,
-                       FINTEK_DRIVER_NAME, (void *)fintek))
-               goto failure;
-
        pnp_set_drvdata(pdev, fintek);
        fintek->pdev = pdev;
 
@@ -548,6 +538,16 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
        /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
        rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
 
+       ret = -EBUSY;
+       /* now claim resources */
+       if (!request_region(fintek->cir_addr,
+                           fintek->cir_port_len, FINTEK_DRIVER_NAME))
+               goto failure;
+
+       if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED,
+                       FINTEK_DRIVER_NAME, (void *)fintek))
+               goto failure;
+
        ret = rc_register_device(rdev);
        if (ret)
                goto failure;
index ecd3d028076852b3ee8da36794bbac1c58a59157..c5ca0914087b3672e0ba3248c9e0ff2ab430743c 100644 (file)
@@ -1519,16 +1519,6 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
        /* initialize raw event */
        init_ir_raw_event(&itdev->rawir);
 
-       ret = -EBUSY;
-       /* now claim resources */
-       if (!request_region(itdev->cir_addr,
-                               dev_desc->io_region_size, ITE_DRIVER_NAME))
-               goto failure;
-
-       if (request_irq(itdev->cir_irq, ite_cir_isr, IRQF_SHARED,
-                       ITE_DRIVER_NAME, (void *)itdev))
-               goto failure;
-
        /* set driver data into the pnp device */
        pnp_set_drvdata(pdev, itdev);
        itdev->pdev = pdev;
@@ -1604,6 +1594,16 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
        rdev->driver_name = ITE_DRIVER_NAME;
        rdev->map_name = RC_MAP_RC6_MCE;
 
+       ret = -EBUSY;
+       /* now claim resources */
+       if (!request_region(itdev->cir_addr,
+                               dev_desc->io_region_size, ITE_DRIVER_NAME))
+               goto failure;
+
+       if (request_irq(itdev->cir_irq, ite_cir_isr, IRQF_SHARED,
+                       ITE_DRIVER_NAME, (void *)itdev))
+               goto failure;
+
        ret = rc_register_device(rdev);
        if (ret)
                goto failure;
index 9fd019e6b9b5125a99ee4b952c47a3dd37d83c71..c212276202f99717dbab547a91ac02737156c363 100644 (file)
@@ -1027,24 +1027,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
        spin_lock_init(&nvt->nvt_lock);
        spin_lock_init(&nvt->tx.lock);
 
-       ret = -EBUSY;
-       /* now claim resources */
-       if (!request_region(nvt->cir_addr,
-                           CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
-               goto failure;
-
-       if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
-                       NVT_DRIVER_NAME, (void *)nvt))
-               goto failure;
-
-       if (!request_region(nvt->cir_wake_addr,
-                           CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
-               goto failure;
-
-       if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
-                       NVT_DRIVER_NAME, (void *)nvt))
-               goto failure;
-
        pnp_set_drvdata(pdev, nvt);
        nvt->pdev = pdev;
 
@@ -1091,6 +1073,24 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
        rdev->tx_resolution = XYZ;
 #endif
 
+       ret = -EBUSY;
+       /* now claim resources */
+       if (!request_region(nvt->cir_addr,
+                           CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
+               goto failure;
+
+       if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
+                       NVT_DRIVER_NAME, (void *)nvt))
+               goto failure;
+
+       if (!request_region(nvt->cir_wake_addr,
+                           CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
+               goto failure;
+
+       if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
+                       NVT_DRIVER_NAME, (void *)nvt))
+               goto failure;
+
        ret = rc_register_device(rdev);
        if (ret)
                goto failure;
index 5d06b899e85959a97e75fea8478b74897f36a0ef..9e55a0c9ac5eed91a2f5ee632dbddc09506220b0 100644 (file)
@@ -1003,39 +1003,10 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
                "(w: 0x%lX, e: 0x%lX, s: 0x%lX, i: %u)\n",
                data->wbase, data->ebase, data->sbase, data->irq);
 
-       if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
-               dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
-                       data->wbase, data->wbase + WAKEUP_IOMEM_LEN - 1);
-               err = -EBUSY;
-               goto exit_free_data;
-       }
-
-       if (!request_region(data->ebase, EHFUNC_IOMEM_LEN, DRVNAME)) {
-               dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
-                       data->ebase, data->ebase + EHFUNC_IOMEM_LEN - 1);
-               err = -EBUSY;
-               goto exit_release_wbase;
-       }
-
-       if (!request_region(data->sbase, SP_IOMEM_LEN, DRVNAME)) {
-               dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
-                       data->sbase, data->sbase + SP_IOMEM_LEN - 1);
-               err = -EBUSY;
-               goto exit_release_ebase;
-       }
-
-       err = request_irq(data->irq, wbcir_irq_handler,
-                         IRQF_DISABLED, DRVNAME, device);
-       if (err) {
-               dev_err(dev, "Failed to claim IRQ %u\n", data->irq);
-               err = -EBUSY;
-               goto exit_release_sbase;
-       }
-
        led_trigger_register_simple("cir-tx", &data->txtrigger);
        if (!data->txtrigger) {
                err = -ENOMEM;
-               goto exit_free_irq;
+               goto exit_free_data;
        }
 
        led_trigger_register_simple("cir-rx", &data->rxtrigger);
@@ -1058,6 +1029,7 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
                goto exit_unregister_led;
        }
 
+       data->dev->driver_type = RC_DRIVER_IR_RAW;
        data->dev->driver_name = WBCIR_NAME;
        data->dev->input_name = WBCIR_NAME;
        data->dev->input_phys = "wbcir/cir0";
@@ -1073,9 +1045,38 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
        data->dev->priv = data;
        data->dev->dev.parent = &device->dev;
 
+       if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
+               dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
+                       data->wbase, data->wbase + WAKEUP_IOMEM_LEN - 1);
+               err = -EBUSY;
+               goto exit_free_rc;
+       }
+
+       if (!request_region(data->ebase, EHFUNC_IOMEM_LEN, DRVNAME)) {
+               dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
+                       data->ebase, data->ebase + EHFUNC_IOMEM_LEN - 1);
+               err = -EBUSY;
+               goto exit_release_wbase;
+       }
+
+       if (!request_region(data->sbase, SP_IOMEM_LEN, DRVNAME)) {
+               dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
+                       data->sbase, data->sbase + SP_IOMEM_LEN - 1);
+               err = -EBUSY;
+               goto exit_release_ebase;
+       }
+
+       err = request_irq(data->irq, wbcir_irq_handler,
+                         IRQF_DISABLED, DRVNAME, device);
+       if (err) {
+               dev_err(dev, "Failed to claim IRQ %u\n", data->irq);
+               err = -EBUSY;
+               goto exit_release_sbase;
+       }
+
        err = rc_register_device(data->dev);
        if (err)
-               goto exit_free_rc;
+               goto exit_free_irq;
 
        device_init_wakeup(&device->dev, 1);
 
@@ -1083,14 +1084,6 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
 
        return 0;
 
-exit_free_rc:
-       rc_free_device(data->dev);
-exit_unregister_led:
-       led_classdev_unregister(&data->led);
-exit_unregister_rxtrigger:
-       led_trigger_unregister_simple(data->rxtrigger);
-exit_unregister_txtrigger:
-       led_trigger_unregister_simple(data->txtrigger);
 exit_free_irq:
        free_irq(data->irq, device);
 exit_release_sbase:
@@ -1099,6 +1092,14 @@ exit_release_ebase:
        release_region(data->ebase, EHFUNC_IOMEM_LEN);
 exit_release_wbase:
        release_region(data->wbase, WAKEUP_IOMEM_LEN);
+exit_free_rc:
+       rc_free_device(data->dev);
+exit_unregister_led:
+       led_classdev_unregister(&data->led);
+exit_unregister_rxtrigger:
+       led_trigger_unregister_simple(data->rxtrigger);
+exit_unregister_txtrigger:
+       led_trigger_unregister_simple(data->txtrigger);
 exit_free_data:
        kfree(data);
        pnp_set_drvdata(device, NULL);
index 3c315f94cc8512c33f6a43a264722411fcf43a93..2b5cd2145c39adbc5e05521ea71bdd9034ad3db9 100644 (file)
@@ -843,7 +843,7 @@ static int dvb_register(struct cx23885_tsport *port)
                        static struct xc2028_ctrl ctl = {
                                .fname   = XC3028L_DEFAULT_FIRMWARE,
                                .max_len = 64,
-                               .demod   = 5000,
+                               .demod   = XC3028_FE_DIBCOM52,
                                /* This is true for all demods with
                                        v36 firmware? */
                                .type    = XC2028_D2633,
index 514aea76eaa5b68b30acf66b0ce4fd4446421fb5..4c0394a8afd9150fc6746ee4a5fc1a8dd1bdc344 100644 (file)
@@ -284,12 +284,13 @@ static int hdpvr_start_streaming(struct hdpvr_device *dev)
 
                hdpvr_config_call(dev, CTRL_START_STREAMING_VALUE, 0x00);
 
+               dev->status = STATUS_STREAMING;
+
                INIT_WORK(&dev->worker, hdpvr_transmit_buffers);
                queue_work(dev->workqueue, &dev->worker);
 
                v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev,
                         "streaming started\n");
-               dev->status = STATUS_STREAMING;
 
                return 0;
        }
index e799331389b1b7dc3e92af6a15e8ef7469d5a89e..c4eca15baf61b02d779f7c5701cbecdb409c1e4f 100644 (file)
@@ -319,7 +319,17 @@ static struct tda829x_config tda829x_no_probe = {
        .probe_tuner = TDA829X_DONT_PROBE,
 };
 
+static struct tda18271_std_map hauppauge_tda18271_dvbt_std_map = {
+        .dvbt_6   = { .if_freq = 3300, .agc_mode = 3, .std = 4,
+                      .if_lvl = 1, .rfagc_top = 0x37, },
+        .dvbt_7   = { .if_freq = 3800, .agc_mode = 3, .std = 5,
+                      .if_lvl = 1, .rfagc_top = 0x37, },
+        .dvbt_8   = { .if_freq = 4300, .agc_mode = 3, .std = 6,
+                      .if_lvl = 1, .rfagc_top = 0x37, },
+};
+
 static struct tda18271_config hauppauge_tda18271_dvb_config = {
+       .std_map = &hauppauge_tda18271_dvbt_std_map,
        .gate    = TDA18271_GATE_ANALOG,
        .output_opt = TDA18271_OUTPUT_LT_OFF,
 };
index bdf19ada91725eb45a03d6b710691dd9778f6a97..e9babcb0887c23f7c5b37be06678ad30a98a3ac2 100644 (file)
@@ -36,7 +36,7 @@ static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
 static struct fimc_fmt fimc_formats[] = {
        {
                .name           = "RGB565",
-               .fourcc         = V4L2_PIX_FMT_RGB565X,
+               .fourcc         = V4L2_PIX_FMT_RGB565,
                .depth          = { 16 },
                .color          = S5P_FIMC_RGB565,
                .memplanes      = 1,
index 69822a4e727502b72f5ccaaa9ced649a27bdda44..c71369173fae6231735c3df7b123e9ff53135a46 100644 (file)
@@ -203,6 +203,66 @@ struct saa7164_board saa7164_boards[] = {
                        .i2c_reg_len    = REGLEN_8bit,
                } },
        },
+       [SAA7164_BOARD_HAUPPAUGE_HVR2200_4] = {
+               .name           = "Hauppauge WinTV-HVR2200",
+               .porta          = SAA7164_MPEG_DVB,
+               .portb          = SAA7164_MPEG_DVB,
+               .portc          = SAA7164_MPEG_ENCODER,
+               .portd          = SAA7164_MPEG_ENCODER,
+               .porte          = SAA7164_MPEG_VBI,
+               .portf          = SAA7164_MPEG_VBI,
+               .chiprev        = SAA7164_CHIP_REV3,
+               .unit           = {{
+                       .id             = 0x1d,
+                       .type           = SAA7164_UNIT_EEPROM,
+                       .name           = "4K EEPROM",
+                       .i2c_bus_nr     = SAA7164_I2C_BUS_0,
+                       .i2c_bus_addr   = 0xa0 >> 1,
+                       .i2c_reg_len    = REGLEN_8bit,
+               }, {
+                       .id             = 0x04,
+                       .type           = SAA7164_UNIT_TUNER,
+                       .name           = "TDA18271-1",
+                       .i2c_bus_nr     = SAA7164_I2C_BUS_1,
+                       .i2c_bus_addr   = 0xc0 >> 1,
+                       .i2c_reg_len    = REGLEN_8bit,
+               }, {
+                       .id             = 0x05,
+                       .type           = SAA7164_UNIT_ANALOG_DEMODULATOR,
+                       .name           = "TDA8290-1",
+                       .i2c_bus_nr     = SAA7164_I2C_BUS_1,
+                       .i2c_bus_addr   = 0x84 >> 1,
+                       .i2c_reg_len    = REGLEN_8bit,
+               }, {
+                       .id             = 0x1b,
+                       .type           = SAA7164_UNIT_TUNER,
+                       .name           = "TDA18271-2",
+                       .i2c_bus_nr     = SAA7164_I2C_BUS_2,
+                       .i2c_bus_addr   = 0xc0 >> 1,
+                       .i2c_reg_len    = REGLEN_8bit,
+               }, {
+                       .id             = 0x1c,
+                       .type           = SAA7164_UNIT_ANALOG_DEMODULATOR,
+                       .name           = "TDA8290-2",
+                       .i2c_bus_nr     = SAA7164_I2C_BUS_2,
+                       .i2c_bus_addr   = 0x84 >> 1,
+                       .i2c_reg_len    = REGLEN_8bit,
+               }, {
+                       .id             = 0x1e,
+                       .type           = SAA7164_UNIT_DIGITAL_DEMODULATOR,
+                       .name           = "TDA10048-1",
+                       .i2c_bus_nr     = SAA7164_I2C_BUS_1,
+                       .i2c_bus_addr   = 0x10 >> 1,
+                       .i2c_reg_len    = REGLEN_8bit,
+               }, {
+                       .id             = 0x1f,
+                       .type           = SAA7164_UNIT_DIGITAL_DEMODULATOR,
+                       .name           = "TDA10048-2",
+                       .i2c_bus_nr     = SAA7164_I2C_BUS_2,
+                       .i2c_bus_addr   = 0x12 >> 1,
+                       .i2c_reg_len    = REGLEN_8bit,
+               } },
+       },
        [SAA7164_BOARD_HAUPPAUGE_HVR2250] = {
                .name           = "Hauppauge WinTV-HVR2250",
                .porta          = SAA7164_MPEG_DVB,
@@ -426,6 +486,10 @@ struct saa7164_subid saa7164_subids[] = {
                .subvendor = 0x0070,
                .subdevice = 0x8851,
                .card      = SAA7164_BOARD_HAUPPAUGE_HVR2250_2,
+       }, {
+               .subvendor = 0x0070,
+               .subdevice = 0x8940,
+               .card      = SAA7164_BOARD_HAUPPAUGE_HVR2200_4,
        },
 };
 const unsigned int saa7164_idcount = ARRAY_SIZE(saa7164_subids);
@@ -469,6 +533,7 @@ void saa7164_gpio_setup(struct saa7164_dev *dev)
        case SAA7164_BOARD_HAUPPAUGE_HVR2200:
        case SAA7164_BOARD_HAUPPAUGE_HVR2200_2:
        case SAA7164_BOARD_HAUPPAUGE_HVR2200_3:
+       case SAA7164_BOARD_HAUPPAUGE_HVR2200_4:
        case SAA7164_BOARD_HAUPPAUGE_HVR2250:
        case SAA7164_BOARD_HAUPPAUGE_HVR2250_2:
        case SAA7164_BOARD_HAUPPAUGE_HVR2250_3:
@@ -549,6 +614,7 @@ void saa7164_card_setup(struct saa7164_dev *dev)
        case SAA7164_BOARD_HAUPPAUGE_HVR2200:
        case SAA7164_BOARD_HAUPPAUGE_HVR2200_2:
        case SAA7164_BOARD_HAUPPAUGE_HVR2200_3:
+       case SAA7164_BOARD_HAUPPAUGE_HVR2200_4:
        case SAA7164_BOARD_HAUPPAUGE_HVR2250:
        case SAA7164_BOARD_HAUPPAUGE_HVR2250_2:
        case SAA7164_BOARD_HAUPPAUGE_HVR2250_3:
index f65eab63ca871fb634be16d0f17a0457ee1d933c..d3779379197fa757daf7a8ee719b665a38a7ff1f 100644 (file)
@@ -475,6 +475,7 @@ int saa7164_dvb_register(struct saa7164_port *port)
        case SAA7164_BOARD_HAUPPAUGE_HVR2200:
        case SAA7164_BOARD_HAUPPAUGE_HVR2200_2:
        case SAA7164_BOARD_HAUPPAUGE_HVR2200_3:
+       case SAA7164_BOARD_HAUPPAUGE_HVR2200_4:
                i2c_bus = &dev->i2c_bus[port->nr + 1];
                switch (port->nr) {
                case 0:
index 16745d2fb349003deefb9d6cff205a68222fb3af..13bd27e9bd1cd20500f2765e318a17aa21d60eca 100644 (file)
@@ -83,6 +83,7 @@
 #define SAA7164_BOARD_HAUPPAUGE_HVR2200_3      6
 #define SAA7164_BOARD_HAUPPAUGE_HVR2250_2      7
 #define SAA7164_BOARD_HAUPPAUGE_HVR2250_3      8
+#define SAA7164_BOARD_HAUPPAUGE_HVR2200_4      9
 
 #define SAA7164_MAX_UNITS              8
 #define SAA7164_TS_NUMBER_OF_LINES     312
index b6eae48d7fb802f53a950b90ef3f9107f96e9271..1f962dc3a6a96f1ca8810db8294625c8ab2fed0d 100644 (file)
@@ -1960,7 +1960,7 @@ static int __uvc_resume(struct usb_interface *intf, int reset)
 
        list_for_each_entry(stream, &dev->streams, list) {
                if (stream->intf == intf)
-                       return uvc_video_resume(stream);
+                       return uvc_video_resume(stream, reset);
        }
 
        uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface "
index 82dc041d99a84dd094bf00df988799ee07478fe0..06e2091d056470a14e2b411201f6171a5e657000 100644 (file)
@@ -65,6 +65,15 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
                        goto done;
                }
 
+               /* Prevent excessive memory consumption, as well as integer
+                * overflows.
+                */
+               if (xmap->menu_count == 0 ||
+                   xmap->menu_count > UVC_MAX_CONTROL_MENU_ENTRIES) {
+                       ret = -EINVAL;
+                       goto done;
+               }
+
                size = xmap->menu_count * sizeof(*map->menu_info);
                map->menu_info = kmalloc(size, GFP_KERNEL);
                if (map->menu_info == NULL) {
@@ -704,7 +713,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
                                        break;
                        }
                        pin = iterm->id;
-               } else if (pin < selector->bNrInPins) {
+               } else if (index < selector->bNrInPins) {
                        pin = selector->baSourceID[index];
                        list_for_each_entry(iterm, &chain->entities, chain) {
                                if (!UVC_ENTITY_IS_ITERM(iterm))
index 97e1c66a5039d2df105c4c82569614a2cf795e9f..fb32eb11ee536462ef8279b87750ca29e92edf18 100755 (executable)
@@ -1167,10 +1167,18 @@ int uvc_video_suspend(struct uvc_streaming *stream)
  * buffers, making sure userspace applications are notified of the problem
  * instead of waiting forever.
  */
-int uvc_video_resume(struct uvc_streaming *stream)
+int uvc_video_resume(struct uvc_streaming *stream, int reset)
 {
        int ret;
 
+       /* If the bus has been reset on resume, set the alternate setting to 0.
+        * This should be the default value, but some devices crash or otherwise
+        * misbehave if they don't receive a SET_INTERFACE request before any
+        * other video control request.
+        */
+       if (reset)
+               usb_set_interface(stream->dev->udev, stream->intfnum, 0);
+
        stream->frozen = 0;
 
        ret = uvc_commit_video(stream, &stream->ctrl);
index 06e24361b266a330f3b8073f74f6dc89c8f1adc9..4dc3c2de8eb549ab031c436ac65ad7163d2dc223 100755 (executable)
@@ -200,6 +200,7 @@ struct uvc_xu_control {
 
 /* Maximum allowed number of control mappings per device */
 #define UVC_MAX_CONTROL_MAPPINGS       1024
+#define UVC_MAX_CONTROL_MENU_ENTRIES   32
 
 /* Devices quirks */
 #define UVC_QUIRK_STATUS_INTERVAL      0x00000001
@@ -644,7 +645,7 @@ extern void uvc_mc_cleanup_entity(struct uvc_entity *entity);
 /* Video */
 extern int uvc_video_init(struct uvc_streaming *stream);
 extern int uvc_video_suspend(struct uvc_streaming *stream);
-extern int uvc_video_resume(struct uvc_streaming *stream);
+extern int uvc_video_resume(struct uvc_streaming *stream, int reset);
 extern int uvc_video_enable(struct uvc_streaming *stream, int enable);
 extern int uvc_probe_video(struct uvc_streaming *stream,
                struct uvc_streaming_control *probe);
index 5ea5514c94ff8f07284a553638854b49d0664b32..d27e88b33ea4e27a874bfdf215cf843b84bc2946 100644 (file)
@@ -2289,6 +2289,10 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
                struct v4l2_ext_controls *ctrls = parg;
 
                if (ctrls->count != 0) {
+                       if (ctrls->count > V4L2_CID_MAX_CTRLS) {
+                               ret = -EINVAL;
+                               break;
+                       }
                        *user_ptr = (void __user *)ctrls->controls;
                        *kernel_ptr = (void **)&ctrls->controls;
                        *array_size = sizeof(struct v4l2_ext_control)
index 155fa04078821d6fc8d5a5475225bba3a436ba06..e488a78a2fd65337d60c8214ee9df83514249fc7 100644 (file)
@@ -179,7 +179,7 @@ static struct pci_device_id cs5535_mfd_pci_tbl[] = {
 };
 MODULE_DEVICE_TABLE(pci, cs5535_mfd_pci_tbl);
 
-static struct pci_driver cs5535_mfd_drv = {
+static struct pci_driver cs5535_mfd_driver = {
        .name = DRV_NAME,
        .id_table = cs5535_mfd_pci_tbl,
        .probe = cs5535_mfd_probe,
@@ -188,12 +188,12 @@ static struct pci_driver cs5535_mfd_drv = {
 
 static int __init cs5535_mfd_init(void)
 {
-       return pci_register_driver(&cs5535_mfd_drv);
+       return pci_register_driver(&cs5535_mfd_driver);
 }
 
 static void __exit cs5535_mfd_exit(void)
 {
-       pci_unregister_driver(&cs5535_mfd_drv);
+       pci_unregister_driver(&cs5535_mfd_driver);
 }
 
 module_init(cs5535_mfd_init);
index 0902523af62d47e33e139c1feab75e5b0ef2a49c..acf9dad686a14ef0b329d21bd0de1e49868b06aa 100644 (file)
@@ -122,7 +122,7 @@ static int mfd_add_device(struct device *parent, int id,
                }
 
                if (!cell->ignore_resource_conflicts) {
-                       ret = acpi_check_resource_conflict(res);
+                       ret = acpi_check_resource_conflict(&res[r]);
                        if (ret)
                                goto fail_res;
                }
index 3941ddcf15feff4bc45403569fa2296bd760ec36..834f824d3c11075b18098a5c0b64a1abf066cd69 100644 (file)
@@ -510,8 +510,9 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
        u8 ch_msb, ch_lsb;
        int ret;
 
-       if (!req)
+       if (!req || !twl4030_madc)
                return -EINVAL;
+
        mutex_lock(&twl4030_madc->lock);
        if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) {
                ret = -EINVAL;
@@ -530,13 +531,13 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
        if (ret) {
                dev_err(twl4030_madc->dev,
                        "unable to write sel register 0x%X\n", method->sel + 1);
-               return ret;
+               goto out;
        }
        ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_lsb, method->sel);
        if (ret) {
                dev_err(twl4030_madc->dev,
                        "unable to write sel register 0x%X\n", method->sel + 1);
-               return ret;
+               goto out;
        }
        /* Select averaging for all channels if do_avg is set */
        if (req->do_avg) {
@@ -546,7 +547,7 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
                        dev_err(twl4030_madc->dev,
                                "unable to write avg register 0x%X\n",
                                method->avg + 1);
-                       return ret;
+                       goto out;
                }
                ret = twl_i2c_write_u8(TWL4030_MODULE_MADC,
                                       ch_lsb, method->avg);
@@ -554,7 +555,7 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
                        dev_err(twl4030_madc->dev,
                                "unable to write sel reg 0x%X\n",
                                method->sel + 1);
-                       return ret;
+                       goto out;
                }
        }
        if (req->type == TWL4030_MADC_IRQ_ONESHOT && req->func_cb != NULL) {
@@ -706,6 +707,8 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev)
        if (!madc)
                return -ENOMEM;
 
+       madc->dev = &pdev->dev;
+
        /*
         * Phoenix provides 2 interrupt lines. The first one is connected to
         * the OMAP. The other one can be connected to the other processor such
@@ -737,6 +740,28 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev)
                        TWL4030_BCI_BCICTL1);
                goto err_i2c;
        }
+
+       /* Check that MADC clock is on */
+       ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &regval, TWL4030_REG_GPBR1);
+       if (ret) {
+               dev_err(&pdev->dev, "unable to read reg GPBR1 0x%X\n",
+                               TWL4030_REG_GPBR1);
+               goto err_i2c;
+       }
+
+       /* If MADC clk is not on, turn it on */
+       if (!(regval & TWL4030_GPBR1_MADC_HFCLK_EN)) {
+               dev_info(&pdev->dev, "clk disabled, enabling\n");
+               regval |= TWL4030_GPBR1_MADC_HFCLK_EN;
+               ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, regval,
+                                      TWL4030_REG_GPBR1);
+               if (ret) {
+                       dev_err(&pdev->dev, "unable to write reg GPBR1 0x%X\n",
+                                       TWL4030_REG_GPBR1);
+                       goto err_i2c;
+               }
+       }
+
        platform_set_drvdata(pdev, madc);
        mutex_init(&madc->lock);
        ret = request_threaded_irq(platform_get_irq(pdev, 0), NULL,
index fc68cb288615c458a7b87698834ccf957d84aee5..ab288e3a6bbaa340669746aeca523508990d4864 100644 (file)
@@ -61,10 +61,6 @@ config AD525X_DPOT_SPI
          To compile this driver as a module, choose M here: the
          module will be called ad525x_dpot-spi.
 
-config ANDROID_PMEM
-       bool "Android pmem allocator"
-       default y
-
 config ATMEL_PWM
        tristate "Atmel AT32/AT91 PWM support"
        depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9
@@ -487,7 +483,7 @@ config BMP085
          module will be called bmp085.
 
 config PCH_PHUB
-       tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223) PHUB"
+       tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
        depends on PCI
        help
          This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of
@@ -495,12 +491,13 @@ config PCH_PHUB
          processor. The Topcliff has MAC address and Option ROM data in SROM.
          This driver can access MAC address and Option ROM data in SROM.
 
-         This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
-         Output Hub), ML7213 and ML7223.
-         ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
-         for MP(Media Phone) use.
-         ML7213/ML7223 is companion chip for Intel Atom E6xx series.
-         ML7213/ML7223 is completely compatible for Intel EG20T PCH.
+         This driver also can be used for LAPIS Semiconductor's IOH,
+         ML7213/ML7223/ML7831.
+         ML7213 which is for IVI(In-Vehicle Infotainment) use.
+         ML7223 IOH is for MP(Media Phone) use.
+         ML7831 IOH is for general purpose use.
+         ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+         ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
 
          To compile this driver as a module, choose M here: the module will
          be called pch_phub.
index 47fa292d9a7b28f400c38f528d3ec3718a338d88..282485528a7395a299b6bcd18fb828812c23d430 100644 (file)
@@ -19,7 +19,6 @@ obj-$(CONFIG_PHANTOM)         += phantom.o
 obj-$(CONFIG_SENSORS_BH1780)   += bh1780gli.o
 obj-$(CONFIG_SENSORS_BH1770)   += bh1770glc.o
 obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
-obj-$(CONFIG_ANDROID_PMEM)     += pmem.o
 obj-$(CONFIG_SGI_IOC4)         += ioc4.o
 obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
 obj-$(CONFIG_KGDB_TESTS)       += kgdbts.o
index efec4139c3f68f512cded3fd51631e7097895d19..b1f16d6084aee5cc96394b3510e29f2d54cdc5ec 100644 (file)
@@ -244,6 +244,7 @@ static int __devinit cb710_probe(struct pci_dev *pdev,
        if (err)
                return err;
 
+       spin_lock_init(&chip->irq_lock);
        chip->pdev = pdev;
        chip->iobase = pcim_iomap_table(pdev)[0];
 
index bc685bfc4c33aaacf89a5dc7205943bd69f14f8d..87a390de054ce5fe8437fbe225700b4ba6849fd9 100644 (file)
@@ -262,7 +262,7 @@ static void __init reset_all_timers(void)
  * In other cases (such as with VSAless OpenFirmware), the system firmware
  * leaves timers available for us to use.
  */
-static int __init scan_timers(struct cs5535_mfgpt_chip *mfgpt)
+static int __devinit scan_timers(struct cs5535_mfgpt_chip *mfgpt)
 {
        struct cs5535_mfgpt_timer timer = { .chip = mfgpt };
        unsigned long flags;
index 8cebec5e85eeb3d3c80bd998815a3e07f1a913ad..cdefef231fbdf740713f84f5cdbe62730f0d51f5 100644 (file)
@@ -133,12 +133,17 @@ static int force_hwbrks;
 static int hwbreaks_ok;
 static int hw_break_val;
 static int hw_break_val2;
+static int cont_instead_of_sstep;
+static unsigned long cont_thread_id;
+static unsigned long sstep_thread_id;
 #if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC)
 static int arch_needs_sstep_emulation = 1;
 #else
 static int arch_needs_sstep_emulation;
 #endif
+static unsigned long cont_addr;
 static unsigned long sstep_addr;
+static int restart_from_top_after_write;
 static int sstep_state;
 
 /* Storage for the registers, in GDB format. */
@@ -186,7 +191,8 @@ static int kgdbts_unreg_thread(void *ptr)
         */
        while (!final_ack)
                msleep_interruptible(1500);
-
+       /* Pause for any other threads to exit after final ack. */
+       msleep_interruptible(1000);
        if (configured)
                kgdb_unregister_io_module(&kgdbts_io_ops);
        configured = 0;
@@ -210,7 +216,7 @@ static unsigned long lookup_addr(char *arg)
        if (!strcmp(arg, "kgdbts_break_test"))
                addr = (unsigned long)kgdbts_break_test;
        else if (!strcmp(arg, "sys_open"))
-               addr = (unsigned long)sys_open;
+               addr = (unsigned long)do_sys_open;
        else if (!strcmp(arg, "do_fork"))
                addr = (unsigned long)do_fork;
        else if (!strcmp(arg, "hw_break_val"))
@@ -282,6 +288,16 @@ static void hw_break_val_write(void)
        hw_break_val++;
 }
 
+static int get_thread_id_continue(char *put_str, char *arg)
+{
+       char *ptr = &put_str[11];
+
+       if (put_str[1] != 'T' || put_str[2] != '0')
+               return 1;
+       kgdb_hex2long(&ptr, &cont_thread_id);
+       return 0;
+}
+
 static int check_and_rewind_pc(char *put_str, char *arg)
 {
        unsigned long addr = lookup_addr(arg);
@@ -298,13 +314,21 @@ static int check_and_rewind_pc(char *put_str, char *arg)
        if (addr + BREAK_INSTR_SIZE == ip)
                offset = -BREAK_INSTR_SIZE;
 #endif
-       if (strcmp(arg, "silent") && ip + offset != addr) {
+
+       if (arch_needs_sstep_emulation && sstep_addr &&
+           ip + offset == sstep_addr &&
+           ((!strcmp(arg, "sys_open") || !strcmp(arg, "do_fork")))) {
+               /* This is special case for emulated single step */
+               v2printk("Emul: rewind hit single step bp\n");
+               restart_from_top_after_write = 1;
+       } else if (strcmp(arg, "silent") && ip + offset != addr) {
                eprintk("kgdbts: BP mismatch %lx expected %lx\n",
                           ip + offset, addr);
                return 1;
        }
        /* Readjust the instruction pointer if needed */
        ip += offset;
+       cont_addr = ip;
 #ifdef GDB_ADJUSTS_BREAK_OFFSET
        instruction_pointer_set(&kgdbts_regs, ip);
 #endif
@@ -314,6 +338,8 @@ static int check_and_rewind_pc(char *put_str, char *arg)
 static int check_single_step(char *put_str, char *arg)
 {
        unsigned long addr = lookup_addr(arg);
+       static int matched_id;
+
        /*
         * From an arch indepent point of view the instruction pointer
         * should be on a different instruction
@@ -323,6 +349,29 @@ static int check_single_step(char *put_str, char *arg)
        gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs);
        v2printk("Singlestep stopped at IP: %lx\n",
                   instruction_pointer(&kgdbts_regs));
+
+       if (sstep_thread_id != cont_thread_id) {
+               /*
+                * Ensure we stopped in the same thread id as before, else the
+                * debugger should continue until the original thread that was
+                * single stepped is scheduled again, emulating gdb's behavior.
+                */
+               v2printk("ThrID does not match: %lx\n", cont_thread_id);
+               if (arch_needs_sstep_emulation) {
+                       if (matched_id &&
+                           instruction_pointer(&kgdbts_regs) != addr)
+                               goto continue_test;
+                       matched_id++;
+                       ts.idx -= 2;
+                       sstep_state = 0;
+                       return 0;
+               }
+               cont_instead_of_sstep = 1;
+               ts.idx -= 4;
+               return 0;
+       }
+continue_test:
+       matched_id = 0;
        if (instruction_pointer(&kgdbts_regs) == addr) {
                eprintk("kgdbts: SingleStep failed at %lx\n",
                           instruction_pointer(&kgdbts_regs));
@@ -364,10 +413,40 @@ static int got_break(char *put_str, char *arg)
        return 1;
 }
 
+static void get_cont_catch(char *arg)
+{
+       /* Always send detach because the test is completed at this point */
+       fill_get_buf("D");
+}
+
+static int put_cont_catch(char *put_str, char *arg)
+{
+       /* This is at the end of the test and we catch any and all input */
+       v2printk("kgdbts: cleanup task: %lx\n", sstep_thread_id);
+       ts.idx--;
+       return 0;
+}
+
+static int emul_reset(char *put_str, char *arg)
+{
+       if (strncmp(put_str, "$OK", 3))
+               return 1;
+       if (restart_from_top_after_write) {
+               restart_from_top_after_write = 0;
+               ts.idx = -1;
+       }
+       return 0;
+}
+
 static void emul_sstep_get(char *arg)
 {
        if (!arch_needs_sstep_emulation) {
-               fill_get_buf(arg);
+               if (cont_instead_of_sstep) {
+                       cont_instead_of_sstep = 0;
+                       fill_get_buf("c");
+               } else {
+                       fill_get_buf(arg);
+               }
                return;
        }
        switch (sstep_state) {
@@ -397,9 +476,11 @@ static void emul_sstep_get(char *arg)
 static int emul_sstep_put(char *put_str, char *arg)
 {
        if (!arch_needs_sstep_emulation) {
-               if (!strncmp(put_str+1, arg, 2))
-                       return 0;
-               return 1;
+               char *ptr = &put_str[11];
+               if (put_str[1] != 'T' || put_str[2] != '0')
+                       return 1;
+               kgdb_hex2long(&ptr, &sstep_thread_id);
+               return 0;
        }
        switch (sstep_state) {
        case 1:
@@ -410,8 +491,7 @@ static int emul_sstep_put(char *put_str, char *arg)
                v2printk("Stopped at IP: %lx\n",
                         instruction_pointer(&kgdbts_regs));
                /* Want to stop at IP + break instruction size by default */
-               sstep_addr = instruction_pointer(&kgdbts_regs) +
-                       BREAK_INSTR_SIZE;
+               sstep_addr = cont_addr + BREAK_INSTR_SIZE;
                break;
        case 2:
                if (strncmp(put_str, "$OK", 3)) {
@@ -423,6 +503,9 @@ static int emul_sstep_put(char *put_str, char *arg)
                if (strncmp(put_str, "$T0", 3)) {
                        eprintk("kgdbts: failed continue sstep\n");
                        return 1;
+               } else {
+                       char *ptr = &put_str[11];
+                       kgdb_hex2long(&ptr, &sstep_thread_id);
                }
                break;
        case 4:
@@ -501,10 +584,10 @@ static struct test_struct bad_read_test[] = {
 static struct test_struct singlestep_break_test[] = {
        { "?", "S0*" }, /* Clear break points */
        { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
-       { "c", "T0*", }, /* Continue */
+       { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
+       { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */
        { "g", "kgdbts_break_test", NULL, check_and_rewind_pc },
        { "write", "OK", write_regs }, /* Write registers */
-       { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */
        { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
        { "g", "kgdbts_break_test", NULL, check_single_step },
        { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
@@ -522,16 +605,16 @@ static struct test_struct singlestep_break_test[] = {
 static struct test_struct do_fork_test[] = {
        { "?", "S0*" }, /* Clear break points */
        { "do_fork", "OK", sw_break, }, /* set sw breakpoint */
-       { "c", "T0*", }, /* Continue */
-       { "g", "do_fork", NULL, check_and_rewind_pc }, /* check location */
-       { "write", "OK", write_regs }, /* Write registers */
+       { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
        { "do_fork", "OK", sw_rem_break }, /*remove breakpoint */
+       { "g", "do_fork", NULL, check_and_rewind_pc }, /* check location */
+       { "write", "OK", write_regs, emul_reset }, /* Write registers */
        { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
        { "g", "do_fork", NULL, check_single_step },
        { "do_fork", "OK", sw_break, }, /* set sw breakpoint */
        { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
        { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */
-       { "", "" },
+       { "", "", get_cont_catch, put_cont_catch },
 };
 
 /* Test for hitting a breakpoint at sys_open for what ever the number
@@ -540,16 +623,16 @@ static struct test_struct do_fork_test[] = {
 static struct test_struct sys_open_test[] = {
        { "?", "S0*" }, /* Clear break points */
        { "sys_open", "OK", sw_break, }, /* set sw breakpoint */
-       { "c", "T0*", }, /* Continue */
-       { "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */
-       { "write", "OK", write_regs }, /* Write registers */
+       { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
        { "sys_open", "OK", sw_rem_break }, /*remove breakpoint */
+       { "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */
+       { "write", "OK", write_regs, emul_reset }, /* Write registers */
        { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
        { "g", "sys_open", NULL, check_single_step },
        { "sys_open", "OK", sw_break, }, /* set sw breakpoint */
        { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
        { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */
-       { "", "" },
+       { "", "", get_cont_catch, put_cont_catch },
 };
 
 /*
@@ -692,8 +775,8 @@ static int run_simple_test(int is_get_char, int chr)
        /* This callback is a put char which is when kgdb sends data to
         * this I/O module.
         */
-       if (ts.tst[ts.idx].get[0] == '\0' &&
-               ts.tst[ts.idx].put[0] == '\0') {
+       if (ts.tst[ts.idx].get[0] == '\0' && ts.tst[ts.idx].put[0] == '\0' &&
+           !ts.tst[ts.idx].get_handler) {
                eprintk("kgdbts: ERROR: beyond end of test on"
                           " '%s' line %i\n", ts.name, ts.idx);
                return 0;
@@ -906,6 +989,17 @@ static void kgdbts_run_tests(void)
        if (ptr)
                sstep_test = simple_strtol(ptr+1, NULL, 10);
 
+       /* All HW break point tests */
+       if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) {
+               hwbreaks_ok = 1;
+               v1printk("kgdbts:RUN hw breakpoint test\n");
+               run_breakpoint_test(1);
+               v1printk("kgdbts:RUN hw write breakpoint test\n");
+               run_hw_break_test(1);
+               v1printk("kgdbts:RUN access write breakpoint test\n");
+               run_hw_break_test(0);
+       }
+
        /* required internal KGDB tests */
        v1printk("kgdbts:RUN plant and detach test\n");
        run_plant_and_detach_test(0);
@@ -923,35 +1017,11 @@ static void kgdbts_run_tests(void)
 
        /* ===Optional tests=== */
 
-       /* All HW break point tests */
-       if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) {
-               hwbreaks_ok = 1;
-               v1printk("kgdbts:RUN hw breakpoint test\n");
-               run_breakpoint_test(1);
-               v1printk("kgdbts:RUN hw write breakpoint test\n");
-               run_hw_break_test(1);
-               v1printk("kgdbts:RUN access write breakpoint test\n");
-               run_hw_break_test(0);
-       }
-
        if (nmi_sleep) {
                v1printk("kgdbts:RUN NMI sleep %i seconds test\n", nmi_sleep);
                run_nmi_sleep_test(nmi_sleep);
        }
 
-#ifdef CONFIG_DEBUG_RODATA
-       /* Until there is an api to write to read-only text segments, use
-        * HW breakpoints for the remainder of any tests, else print a
-        * failure message if hw breakpoints do not work.
-        */
-       if (!(arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT && hwbreaks_ok)) {
-               eprintk("kgdbts: HW breakpoints do not work,"
-                       "skipping remaining tests\n");
-               return;
-       }
-       force_hwbrks = 1;
-#endif /* CONFIG_DEBUG_RODATA */
-
        /* If the do_fork test is run it will be the last test that is
         * executed because a kernel thread will be spawned at the very
         * end to unregister the debug hooks.
index 5fe79df448380a620ad88282df81eb1cab5a860c..f51c81e1884addfd05b173c69a6a6d4ba5d1064b 100644 (file)
@@ -73,6 +73,9 @@
 #define PCI_DEVICE_ID_ROHM_ML7223_mPHUB        0x8012 /* for Bus-m */
 #define PCI_DEVICE_ID_ROHM_ML7223_nPHUB        0x8002 /* for Bus-n */
 
+/* Macros for ML7831 */
+#define PCI_DEVICE_ID_ROHM_ML7831_PHUB 0x8801
+
 /* SROM ACCESS Macro */
 #define PCH_WORD_ADDR_MASK (~((1 << 2) - 1))
 
@@ -90,6 +93,7 @@
 #define PCH_PHUB_INTPIN_REG_WPERMIT_REG3       0x002C
 #define PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE   0x0040
 #define CLKCFG_REG_OFFSET                      0x500
+#define FUNCSEL_REG_OFFSET                     0x508
 
 #define PCH_PHUB_OROM_SIZE 15360
 
  * @intpin_reg_wpermit_reg3:           INTPIN_REG_WPERMIT register 3 val
  * @int_reduce_control_reg:            INT_REDUCE_CONTROL registers val
  * @clkcfg_reg:                                CLK CFG register val
+ * @funcsel_reg:                       Function select register value
  * @pch_phub_base_address:             Register base address
  * @pch_phub_extrom_base_address:      external rom base address
  * @pch_mac_start_address:             MAC address area start address
  * @pch_opt_rom_start_address:         Option ROM start address
  * @ioh_type:                          Save IOH type
+ * @pdev:                              pointer to pci device struct
  */
 struct pch_phub_reg {
        u32 phub_id_reg;
@@ -128,11 +134,13 @@ struct pch_phub_reg {
        u32 intpin_reg_wpermit_reg3;
        u32 int_reduce_control_reg[MAX_NUM_INT_REDUCE_CONTROL_REG];
        u32 clkcfg_reg;
+       u32 funcsel_reg;
        void __iomem *pch_phub_base_address;
        void __iomem *pch_phub_extrom_base_address;
        u32 pch_mac_start_address;
        u32 pch_opt_rom_start_address;
        int ioh_type;
+       struct pci_dev *pdev;
 };
 
 /* SROM SPEC for MAC address assignment offset */
@@ -211,6 +219,8 @@ static void pch_phub_save_reg_conf(struct pci_dev *pdev)
                        __func__, i, chip->int_reduce_control_reg[i]);
        }
        chip->clkcfg_reg = ioread32(p + CLKCFG_REG_OFFSET);
+       if ((chip->ioh_type == 2) || (chip->ioh_type == 4))
+               chip->funcsel_reg = ioread32(p + FUNCSEL_REG_OFFSET);
 }
 
 /* pch_phub_restore_reg_conf - restore register configuration */
@@ -271,6 +281,8 @@ static void pch_phub_restore_reg_conf(struct pci_dev *pdev)
        }
 
        iowrite32(chip->clkcfg_reg, p + CLKCFG_REG_OFFSET);
+       if ((chip->ioh_type == 2) || (chip->ioh_type == 4))
+               iowrite32(chip->funcsel_reg, p + FUNCSEL_REG_OFFSET);
 }
 
 /**
@@ -464,7 +476,7 @@ static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
        int retval;
        int i;
 
-       if (chip->ioh_type == 1) /* EG20T */
+       if ((chip->ioh_type == 1) || (chip->ioh_type == 5)) /* EG20T or ML7831*/
                retval = pch_phub_gbe_serial_rom_conf(chip);
        else    /* ML7223 */
                retval = pch_phub_gbe_serial_rom_conf_mp(chip);
@@ -491,6 +503,7 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
        unsigned int orom_size;
        int ret;
        int err;
+       ssize_t rom_size;
 
        struct pch_phub_reg *chip =
                dev_get_drvdata(container_of(kobj, struct device, kobj));
@@ -502,6 +515,10 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
        }
 
        /* Get Rom signature */
+       chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+       if (!chip->pch_phub_extrom_base_address)
+               goto exrom_map_err;
+
        pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address,
                                (unsigned char *)&rom_signature);
        rom_signature &= 0xff;
@@ -532,10 +549,13 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
                goto return_err;
        }
 return_ok:
+       pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
        mutex_unlock(&pch_phub_mutex);
        return addr_offset;
 
 return_err:
+       pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
+exrom_map_err:
        mutex_unlock(&pch_phub_mutex);
 return_err_nomutex:
        return err;
@@ -548,6 +568,7 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
        int err;
        unsigned int addr_offset;
        int ret;
+       ssize_t rom_size;
        struct pch_phub_reg *chip =
                dev_get_drvdata(container_of(kobj, struct device, kobj));
 
@@ -564,6 +585,12 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
                goto return_ok;
        }
 
+       chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+       if (!chip->pch_phub_extrom_base_address) {
+               err = -ENOMEM;
+               goto exrom_map_err;
+       }
+
        for (addr_offset = 0; addr_offset < count; addr_offset++) {
                if (PCH_PHUB_OROM_SIZE < off + addr_offset)
                        goto return_ok;
@@ -578,10 +605,14 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
        }
 
 return_ok:
+       pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
        mutex_unlock(&pch_phub_mutex);
        return addr_offset;
 
 return_err:
+       pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
+
+exrom_map_err:
        mutex_unlock(&pch_phub_mutex);
        return err;
 }
@@ -591,8 +622,14 @@ static ssize_t show_pch_mac(struct device *dev, struct device_attribute *attr,
 {
        u8 mac[8];
        struct pch_phub_reg *chip = dev_get_drvdata(dev);
+       ssize_t rom_size;
+
+       chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+       if (!chip->pch_phub_extrom_base_address)
+               return -ENOMEM;
 
        pch_phub_read_gbe_mac_addr(chip, mac);
+       pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
 
        return sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
                                mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
@@ -602,6 +639,7 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
                             const char *buf, size_t count)
 {
        u8 mac[6];
+       ssize_t rom_size;
        struct pch_phub_reg *chip = dev_get_drvdata(dev);
 
        if (count != 18)
@@ -611,7 +649,12 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
                (u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2], (u32 *)&mac[3],
                (u32 *)&mac[4], (u32 *)&mac[5]);
 
+       chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+       if (!chip->pch_phub_extrom_base_address)
+               return -ENOMEM;
+
        pch_phub_write_gbe_mac_addr(chip, mac);
+       pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
 
        return count;
 }
@@ -634,7 +677,6 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
        int retval;
 
        int ret;
-       ssize_t rom_size;
        struct pch_phub_reg *chip;
 
        chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL);
@@ -671,19 +713,7 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
                "in pch_phub_base_address variable is %p\n", __func__,
                chip->pch_phub_base_address);
 
-       if (id->driver_data != 3) {
-               chip->pch_phub_extrom_base_address =\
-                                                  pci_map_rom(pdev, &rom_size);
-               if (chip->pch_phub_extrom_base_address == 0) {
-                       dev_err(&pdev->dev, "%s: pci_map_rom FAILED", __func__);
-                       ret = -ENOMEM;
-                       goto err_pci_map;
-               }
-               dev_dbg(&pdev->dev, "%s : "
-                       "pci_map_rom SUCCESS and value in "
-                       "pch_phub_extrom_base_address variable is %p\n",
-                       __func__, chip->pch_phub_extrom_base_address);
-       }
+       chip->pdev = pdev; /* Save pci device struct */
 
        if (id->driver_data == 1) { /* EG20T PCH */
                retval = sysfs_create_file(&pdev->dev.kobj,
@@ -732,6 +762,8 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
                 * Device8(GbE)
                 */
                iowrite32(0x000a0000, chip->pch_phub_base_address + 0x14);
+               /* set the interrupt delay value */
+               iowrite32(0x25, chip->pch_phub_base_address + 0x140);
                chip->pch_opt_rom_start_address =\
                                                 PCH_PHUB_ROM_START_ADDR_ML7223;
                chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223;
@@ -749,11 +781,25 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
                 * Device6(SATA 2):f
                 */
                iowrite32(0x0000ffa0, chip->pch_phub_base_address + 0x14);
-               /* set the interrupt delay value */
-               iowrite32(0x25, chip->pch_phub_base_address + 0x140);
                chip->pch_opt_rom_start_address =\
                                                 PCH_PHUB_ROM_START_ADDR_ML7223;
                chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223;
+       } else if (id->driver_data == 5) { /* ML7831 */
+               retval = sysfs_create_file(&pdev->dev.kobj,
+                                          &dev_attr_pch_mac.attr);
+               if (retval)
+                       goto err_sysfs_create;
+
+               retval = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr);
+               if (retval)
+                       goto exit_bin_attr;
+
+               /* set the prefech value */
+               iowrite32(0x000affaa, chip->pch_phub_base_address + 0x14);
+               /* set the interrupt delay value */
+               iowrite32(0x25, chip->pch_phub_base_address + 0x44);
+               chip->pch_opt_rom_start_address = PCH_PHUB_ROM_START_ADDR_EG20T;
+               chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_EG20T;
        }
 
        chip->ioh_type = id->driver_data;
@@ -764,8 +810,6 @@ exit_bin_attr:
        sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
 
 err_sysfs_create:
-       pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address);
-err_pci_map:
        pci_iounmap(pdev, chip->pch_phub_base_address);
 err_pci_iomap:
        pci_release_regions(pdev);
@@ -783,7 +827,6 @@ static void __devexit pch_phub_remove(struct pci_dev *pdev)
 
        sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
        sysfs_remove_bin_file(&pdev->dev.kobj, &pch_bin_attr);
-       pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address);
        pci_iounmap(pdev, chip->pch_phub_base_address);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
@@ -838,6 +881,7 @@ static struct pci_device_id pch_phub_pcidev_id[] = {
        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2,  },
        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_mPHUB), 3,  },
        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_nPHUB), 4,  },
+       { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7831_PHUB), 5,  },
        { }
 };
 MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id);
diff --git a/drivers/misc/pmem.c b/drivers/misc/pmem.c
deleted file mode 100644 (file)
index 5b8b6bd..0000000
+++ /dev/null
@@ -1,1345 +0,0 @@
-/* drivers/android/pmem.c
- *
- * Copyright (C) 2007 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/miscdevice.h>
-#include <linux/platform_device.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/mm.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/debugfs.h>
-#include <linux/android_pmem.h>
-#include <linux/mempolicy.h>
-#include <linux/sched.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/cacheflush.h>
-
-#define PMEM_MAX_DEVICES 10
-#define PMEM_MAX_ORDER 128
-#define PMEM_MIN_ALLOC PAGE_SIZE
-
-#define PMEM_DEBUG 1
-
-/* indicates that a refernce to this file has been taken via get_pmem_file,
- * the file should not be released until put_pmem_file is called */
-#define PMEM_FLAGS_BUSY 0x1
-/* indicates that this is a suballocation of a larger master range */
-#define PMEM_FLAGS_CONNECTED 0x1 << 1
-/* indicates this is a master and not a sub allocation and that it is mmaped */
-#define PMEM_FLAGS_MASTERMAP 0x1 << 2
-/* submap and unsubmap flags indicate:
- * 00: subregion has never been mmaped
- * 10: subregion has been mmaped, reference to the mm was taken
- * 11: subretion has ben released, refernece to the mm still held
- * 01: subretion has been released, reference to the mm has been released
- */
-#define PMEM_FLAGS_SUBMAP 0x1 << 3
-#define PMEM_FLAGS_UNSUBMAP 0x1 << 4
-
-
-struct pmem_data {
-       /* in alloc mode: an index into the bitmap
-        * in no_alloc mode: the size of the allocation */
-       int index;
-       /* see flags above for descriptions */
-       unsigned int flags;
-       /* protects this data field, if the mm_mmap sem will be held at the
-        * same time as this sem, the mm sem must be taken first (as this is
-        * the order for vma_open and vma_close ops */
-       struct rw_semaphore sem;
-       /* info about the mmaping process */
-       struct vm_area_struct *vma;
-       /* task struct of the mapping process */
-       struct task_struct *task;
-       /* process id of teh mapping process */
-       pid_t pid;
-       /* file descriptor of the master */
-       int master_fd;
-       /* file struct of the master */
-       struct file *master_file;
-       /* a list of currently available regions if this is a suballocation */
-       struct list_head region_list;
-       /* a linked list of data so we can access them for debugging */
-       struct list_head list;
-#if PMEM_DEBUG
-       int ref;
-#endif
-};
-
-struct pmem_bits {
-       unsigned allocated:1;           /* 1 if allocated, 0 if free */
-       unsigned order:7;               /* size of the region in pmem space */
-};
-
-struct pmem_region_node {
-       struct pmem_region region;
-       struct list_head list;
-};
-
-#define PMEM_DEBUG_MSGS 0
-#if PMEM_DEBUG_MSGS
-#define DLOG(fmt,args...) \
-       do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \
-                   ##args); } \
-       while (0)
-#else
-#define DLOG(x...) do {} while (0)
-#endif
-
-struct pmem_info {
-       struct miscdevice dev;
-       /* physical start address of the remaped pmem space */
-       unsigned long base;
-       /* vitual start address of the remaped pmem space */
-       unsigned char __iomem *vbase;
-       /* total size of the pmem space */
-       unsigned long size;
-       /* number of entries in the pmem space */
-       unsigned long num_entries;
-       /* pfn of the garbage page in memory */
-       unsigned long garbage_pfn;
-       /* index of the garbage page in the pmem space */
-       int garbage_index;
-       /* the bitmap for the region indicating which entries are allocated
-        * and which are free */
-       struct pmem_bits *bitmap;
-       /* indicates the region should not be managed with an allocator */
-       unsigned no_allocator;
-       /* indicates maps of this region should be cached, if a mix of
-        * cached and uncached is desired, set this and open the device with
-        * O_SYNC to get an uncached region */
-       unsigned cached;
-       unsigned buffered;
-       /* in no_allocator mode the first mapper gets the whole space and sets
-        * this flag */
-       unsigned allocated;
-       /* for debugging, creates a list of pmem file structs, the
-        * data_list_lock should be taken before pmem_data->sem if both are
-        * needed */
-       struct mutex data_list_lock;
-       struct list_head data_list;
-       /* pmem_sem protects the bitmap array
-        * a write lock should be held when modifying entries in bitmap
-        * a read lock should be held when reading data from bits or
-        * dereferencing a pointer into bitmap
-        *
-        * pmem_data->sem protects the pmem data of a particular file
-        * Many of the function that require the pmem_data->sem have a non-
-        * locking version for when the caller is already holding that sem.
-        *
-        * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER:
-        * down(pmem_data->sem) => down(bitmap_sem)
-        */
-       struct rw_semaphore bitmap_sem;
-
-       long (*ioctl)(struct file *, unsigned int, unsigned long);
-       int (*release)(struct inode *, struct file *);
-};
-
-static struct pmem_info pmem[PMEM_MAX_DEVICES];
-static int id_count;
-
-#define PMEM_IS_FREE(id, index) !(pmem[id].bitmap[index].allocated)
-#define PMEM_ORDER(id, index) pmem[id].bitmap[index].order
-#define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index)))
-#define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index)))
-#define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC)
-#define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base)
-#define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC)
-#define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \
-       PMEM_LEN(id, index))
-#define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase)
-#define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \
-       PMEM_LEN(id, index))
-#define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED)
-#define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
-#define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \
-       (!(data->flags & PMEM_FLAGS_UNSUBMAP)))
-
-static int pmem_release(struct inode *, struct file *);
-static int pmem_mmap(struct file *, struct vm_area_struct *);
-static int pmem_open(struct inode *, struct file *);
-static long pmem_ioctl(struct file *, unsigned int, unsigned long);
-
-struct file_operations pmem_fops = {
-       .release = pmem_release,
-       .mmap = pmem_mmap,
-       .open = pmem_open,
-       .unlocked_ioctl = pmem_ioctl,
-};
-
-static int get_id(struct file *file)
-{
-       return MINOR(file->f_dentry->d_inode->i_rdev);
-}
-
-int is_pmem_file(struct file *file)
-{
-       int id;
-
-       if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode))
-               return 0;
-       id = get_id(file);
-       if (unlikely(id >= PMEM_MAX_DEVICES))
-               return 0;
-       if (unlikely(file->f_dentry->d_inode->i_rdev !=
-            MKDEV(MISC_MAJOR, pmem[id].dev.minor)))
-               return 0;
-       return 1;
-}
-
-static int has_allocation(struct file *file)
-{
-       struct pmem_data *data;
-       /* check is_pmem_file first if not accessed via pmem_file_ops */
-
-       if (unlikely(!file->private_data))
-               return 0;
-       data = (struct pmem_data *)file->private_data;
-       if (unlikely(data->index < 0))
-               return 0;
-       return 1;
-}
-
-static int is_master_owner(struct file *file)
-{
-       struct file *master_file;
-       struct pmem_data *data;
-       int put_needed, ret = 0;
-
-       if (!is_pmem_file(file) || !has_allocation(file))
-               return 0;
-       data = (struct pmem_data *)file->private_data;
-       if (PMEM_FLAGS_MASTERMAP & data->flags)
-               return 1;
-       master_file = fget_light(data->master_fd, &put_needed);
-       if (master_file && data->master_file == master_file)
-               ret = 1;
-       fput_light(master_file, put_needed);
-       return ret;
-}
-
-static int pmem_free(int id, int index)
-{
-       /* caller should hold the write lock on pmem_sem! */
-       int buddy, curr = index;
-       DLOG("index %d\n", index);
-
-       if (pmem[id].no_allocator) {
-               pmem[id].allocated = 0;
-               return 0;
-       }
-       /* clean up the bitmap, merging any buddies */
-       pmem[id].bitmap[curr].allocated = 0;
-       /* find a slots buddy Buddy# = Slot# ^ (1 << order)
-        * if the buddy is also free merge them
-        * repeat until the buddy is not free or end of the bitmap is reached
-        */
-       do {
-               buddy = PMEM_BUDDY_INDEX(id, curr);
-               if (PMEM_IS_FREE(id, buddy) &&
-                               PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) {
-                       PMEM_ORDER(id, buddy)++;
-                       PMEM_ORDER(id, curr)++;
-                       curr = min(buddy, curr);
-               } else {
-                       break;
-               }
-       } while (curr < pmem[id].num_entries);
-
-       return 0;
-}
-
-static void pmem_revoke(struct file *file, struct pmem_data *data);
-
-static int pmem_release(struct inode *inode, struct file *file)
-{
-       struct pmem_data *data = (struct pmem_data *)file->private_data;
-       struct pmem_region_node *region_node;
-       struct list_head *elt, *elt2;
-       int id = get_id(file), ret = 0;
-
-
-       mutex_lock(&pmem[id].data_list_lock);
-       /* if this file is a master, revoke all the memory in the connected
-        *  files */
-       if (PMEM_FLAGS_MASTERMAP & data->flags) {
-               struct pmem_data *sub_data;
-               list_for_each(elt, &pmem[id].data_list) {
-                       sub_data = list_entry(elt, struct pmem_data, list);
-                       down_read(&sub_data->sem);
-                       if (PMEM_IS_SUBMAP(sub_data) &&
-                           file == sub_data->master_file) {
-                               up_read(&sub_data->sem);
-                               pmem_revoke(file, sub_data);
-                       }  else
-                               up_read(&sub_data->sem);
-               }
-       }
-       list_del(&data->list);
-       mutex_unlock(&pmem[id].data_list_lock);
-
-
-       down_write(&data->sem);
-
-       /* if its not a conencted file and it has an allocation, free it */
-       if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) {
-               down_write(&pmem[id].bitmap_sem);
-               ret = pmem_free(id, data->index);
-               up_write(&pmem[id].bitmap_sem);
-       }
-
-       /* if this file is a submap (mapped, connected file), downref the
-        * task struct */
-       if (PMEM_FLAGS_SUBMAP & data->flags)
-               if (data->task) {
-                       put_task_struct(data->task);
-                       data->task = NULL;
-               }
-
-       file->private_data = NULL;
-
-       list_for_each_safe(elt, elt2, &data->region_list) {
-               region_node = list_entry(elt, struct pmem_region_node, list);
-               list_del(elt);
-               kfree(region_node);
-       }
-       BUG_ON(!list_empty(&data->region_list));
-
-       up_write(&data->sem);
-       kfree(data);
-       if (pmem[id].release)
-               ret = pmem[id].release(inode, file);
-
-       return ret;
-}
-
-static int pmem_open(struct inode *inode, struct file *file)
-{
-       struct pmem_data *data;
-       int id = get_id(file);
-       int ret = 0;
-
-       DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file));
-       /* setup file->private_data to indicate its unmapped */
-       /*  you can only open a pmem device one time */
-       if (file->private_data != NULL && file->private_data != &pmem[id].dev)
-               return -1;
-       data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL);
-       if (!data) {
-               printk("pmem: unable to allocate memory for pmem metadata.");
-               return -1;
-       }
-       data->flags = 0;
-       data->index = -1;
-       data->task = NULL;
-       data->vma = NULL;
-       data->pid = 0;
-       data->master_file = NULL;
-#if PMEM_DEBUG
-       data->ref = 0;
-#endif
-       INIT_LIST_HEAD(&data->region_list);
-       init_rwsem(&data->sem);
-
-       file->private_data = data;
-       INIT_LIST_HEAD(&data->list);
-
-       mutex_lock(&pmem[id].data_list_lock);
-       list_add(&data->list, &pmem[id].data_list);
-       mutex_unlock(&pmem[id].data_list_lock);
-       return ret;
-}
-
-static unsigned long pmem_order(unsigned long len)
-{
-       int i;
-
-       len = (len + PMEM_MIN_ALLOC - 1)/PMEM_MIN_ALLOC;
-       len--;
-       for (i = 0; i < sizeof(len)*8; i++)
-               if (len >> i == 0)
-                       break;
-       return i;
-}
-
-static int pmem_allocate(int id, unsigned long len)
-{
-       /* caller should hold the write lock on pmem_sem! */
-       /* return the corresponding pdata[] entry */
-       int curr = 0;
-       int end = pmem[id].num_entries;
-       int best_fit = -1;
-       unsigned long order = pmem_order(len);
-
-       if (pmem[id].no_allocator) {
-               DLOG("no allocator");
-               if ((len > pmem[id].size) || pmem[id].allocated)
-                       return -1;
-               pmem[id].allocated = 1;
-               return len;
-       }
-
-       if (order > PMEM_MAX_ORDER)
-               return -1;
-       DLOG("order %lx\n", order);
-
-       /* look through the bitmap:
-        *      if you find a free slot of the correct order use it
-        *      otherwise, use the best fit (smallest with size > order) slot
-        */
-       while (curr < end) {
-               if (PMEM_IS_FREE(id, curr)) {
-                       if (PMEM_ORDER(id, curr) == (unsigned char)order) {
-                               /* set the not free bit and clear others */
-                               best_fit = curr;
-                               break;
-                       }
-                       if (PMEM_ORDER(id, curr) > (unsigned char)order &&
-                           (best_fit < 0 ||
-                            PMEM_ORDER(id, curr) < PMEM_ORDER(id, best_fit)))
-                               best_fit = curr;
-               }
-               curr = PMEM_NEXT_INDEX(id, curr);
-       }
-
-       /* if best_fit < 0, there are no suitable slots,
-        * return an error
-        */
-       if (best_fit < 0) {
-               printk("pmem: no space left to allocate!\n");
-               return -1;
-       }
-
-       /* now partition the best fit:
-        *      split the slot into 2 buddies of order - 1
-        *      repeat until the slot is of the correct order
-        */
-       while (PMEM_ORDER(id, best_fit) > (unsigned char)order) {
-               int buddy;
-               PMEM_ORDER(id, best_fit) -= 1;
-               buddy = PMEM_BUDDY_INDEX(id, best_fit);
-               PMEM_ORDER(id, buddy) = PMEM_ORDER(id, best_fit);
-       }
-       pmem[id].bitmap[best_fit].allocated = 1;
-       return best_fit;
-}
-
-static pgprot_t pmem_access_prot(struct file *file, pgprot_t vma_prot)
-{
-       int id = get_id(file);
-#ifdef pgprot_noncached
-       if (pmem[id].cached == 0 || file->f_flags & O_SYNC)
-               return pgprot_noncached(vma_prot);
-#endif
-#ifdef pgprot_ext_buffered
-       else if (pmem[id].buffered)
-               return pgprot_ext_buffered(vma_prot);
-#endif
-       return vma_prot;
-}
-
-static unsigned long pmem_start_addr(int id, struct pmem_data *data)
-{
-       if (pmem[id].no_allocator)
-               return PMEM_START_ADDR(id, 0);
-       else
-               return PMEM_START_ADDR(id, data->index);
-
-}
-
-static void *pmem_start_vaddr(int id, struct pmem_data *data)
-{
-       return pmem_start_addr(id, data) - pmem[id].base + pmem[id].vbase;
-}
-
-static unsigned long pmem_len(int id, struct pmem_data *data)
-{
-       if (pmem[id].no_allocator)
-               return data->index;
-       else
-               return PMEM_LEN(id, data->index);
-}
-
-static int pmem_map_garbage(int id, struct vm_area_struct *vma,
-                           struct pmem_data *data, unsigned long offset,
-                           unsigned long len)
-{
-       int i, garbage_pages = len >> PAGE_SHIFT;
-
-       vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP | VM_SHARED | VM_WRITE;
-       for (i = 0; i < garbage_pages; i++) {
-               if (vm_insert_pfn(vma, vma->vm_start + offset + (i * PAGE_SIZE),
-                   pmem[id].garbage_pfn))
-                       return -EAGAIN;
-       }
-       return 0;
-}
-
-static int pmem_unmap_pfn_range(int id, struct vm_area_struct *vma,
-                               struct pmem_data *data, unsigned long offset,
-                               unsigned long len)
-{
-       int garbage_pages;
-       DLOG("unmap offset %lx len %lx\n", offset, len);
-
-       BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
-
-       garbage_pages = len >> PAGE_SHIFT;
-       zap_page_range(vma, vma->vm_start + offset, len, NULL);
-       pmem_map_garbage(id, vma, data, offset, len);
-       return 0;
-}
-
-static int pmem_map_pfn_range(int id, struct vm_area_struct *vma,
-                             struct pmem_data *data, unsigned long offset,
-                             unsigned long len)
-{
-       DLOG("map offset %lx len %lx\n", offset, len);
-       BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start));
-       BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end));
-       BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
-       BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset));
-
-       if (io_remap_pfn_range(vma, vma->vm_start + offset,
-               (pmem_start_addr(id, data) + offset) >> PAGE_SHIFT,
-               len, vma->vm_page_prot)) {
-               return -EAGAIN;
-       }
-       return 0;
-}
-
-static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma,
-                             struct pmem_data *data, unsigned long offset,
-                             unsigned long len)
-{
-       /* hold the mm semp for the vma you are modifying when you call this */
-       BUG_ON(!vma);
-       zap_page_range(vma, vma->vm_start + offset, len, NULL);
-       return pmem_map_pfn_range(id, vma, data, offset, len);
-}
-
-static void pmem_vma_open(struct vm_area_struct *vma)
-{
-       struct file *file = vma->vm_file;
-       struct pmem_data *data = file->private_data;
-       int id = get_id(file);
-       /* this should never be called as we don't support copying pmem
-        * ranges via fork */
-       BUG_ON(!has_allocation(file));
-       down_write(&data->sem);
-       /* remap the garbage pages, forkers don't get access to the data */
-       pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end);
-       up_write(&data->sem);
-}
-
-static void pmem_vma_close(struct vm_area_struct *vma)
-{
-       struct file *file = vma->vm_file;
-       struct pmem_data *data = file->private_data;
-
-       DLOG("current %u ppid %u file %p count %d\n", current->pid,
-            current->parent->pid, file, file_count(file));
-       if (unlikely(!is_pmem_file(file) || !has_allocation(file))) {
-               printk(KERN_WARNING "pmem: something is very wrong, you are "
-                      "closing a vm backing an allocation that doesn't "
-                      "exist!\n");
-               return;
-       }
-       down_write(&data->sem);
-       if (data->vma == vma) {
-               data->vma = NULL;
-               if ((data->flags & PMEM_FLAGS_CONNECTED) &&
-                   (data->flags & PMEM_FLAGS_SUBMAP))
-                       data->flags |= PMEM_FLAGS_UNSUBMAP;
-       }
-       /* the kernel is going to free this vma now anyway */
-       up_write(&data->sem);
-}
-
-static struct vm_operations_struct vm_ops = {
-       .open = pmem_vma_open,
-       .close = pmem_vma_close,
-};
-
-static int pmem_mmap(struct file *file, struct vm_area_struct *vma)
-{
-       struct pmem_data *data;
-       int index;
-       unsigned long vma_size =  vma->vm_end - vma->vm_start;
-       int ret = 0, id = get_id(file);
-
-       if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) {
-#if PMEM_DEBUG
-               printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned"
-                               " and a multiple of pages_size.\n");
-#endif
-               return -EINVAL;
-       }
-
-       data = (struct pmem_data *)file->private_data;
-       down_write(&data->sem);
-       /* check this file isn't already mmaped, for submaps check this file
-        * has never been mmaped */
-       if ((data->flags & PMEM_FLAGS_SUBMAP) ||
-           (data->flags & PMEM_FLAGS_UNSUBMAP)) {
-#if PMEM_DEBUG
-               printk(KERN_ERR "pmem: you can only mmap a pmem file once, "
-                      "this file is already mmaped. %x\n", data->flags);
-#endif
-               ret = -EINVAL;
-               goto error;
-       }
-       /* if file->private_data == unalloced, alloc*/
-       if (data && data->index == -1) {
-               down_write(&pmem[id].bitmap_sem);
-               index = pmem_allocate(id, vma->vm_end - vma->vm_start);
-               up_write(&pmem[id].bitmap_sem);
-               data->index = index;
-       }
-       /* either no space was available or an error occured */
-       if (!has_allocation(file)) {
-               ret = -EINVAL;
-               printk("pmem: could not find allocation for map.\n");
-               goto error;
-       }
-
-       if (pmem_len(id, data) < vma_size) {
-#if PMEM_DEBUG
-               printk(KERN_WARNING "pmem: mmap size [%lu] does not match"
-                      "size of backing region [%lu].\n", vma_size,
-                      pmem_len(id, data));
-#endif
-               ret = -EINVAL;
-               goto error;
-       }
-
-       vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT;
-       vma->vm_page_prot = pmem_access_prot(file, vma->vm_page_prot);
-
-       if (data->flags & PMEM_FLAGS_CONNECTED) {
-               struct pmem_region_node *region_node;
-               struct list_head *elt;
-               if (pmem_map_garbage(id, vma, data, 0, vma_size)) {
-                       printk("pmem: mmap failed in kernel!\n");
-                       ret = -EAGAIN;
-                       goto error;
-               }
-               list_for_each(elt, &data->region_list) {
-                       region_node = list_entry(elt, struct pmem_region_node,
-                                                list);
-                       DLOG("remapping file: %p %lx %lx\n", file,
-                               region_node->region.offset,
-                               region_node->region.len);
-                       if (pmem_remap_pfn_range(id, vma, data,
-                                                region_node->region.offset,
-                                                region_node->region.len)) {
-                               ret = -EAGAIN;
-                               goto error;
-                       }
-               }
-               data->flags |= PMEM_FLAGS_SUBMAP;
-               get_task_struct(current->group_leader);
-               data->task = current->group_leader;
-               data->vma = vma;
-#if PMEM_DEBUG
-               data->pid = current->pid;
-#endif
-               DLOG("submmapped file %p vma %p pid %u\n", file, vma,
-                    current->pid);
-       } else {
-               if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) {
-                       printk(KERN_INFO "pmem: mmap failed in kernel!\n");
-                       ret = -EAGAIN;
-                       goto error;
-               }
-               data->flags |= PMEM_FLAGS_MASTERMAP;
-               data->pid = current->pid;
-       }
-       vma->vm_ops = &vm_ops;
-error:
-       up_write(&data->sem);
-       return ret;
-}
-
-/* the following are the api for accessing pmem regions by other drivers
- * from inside the kernel */
-int get_pmem_user_addr(struct file *file, unsigned long *start,
-                  unsigned long *len)
-{
-       struct pmem_data *data;
-       if (!is_pmem_file(file) || !has_allocation(file)) {
-#if PMEM_DEBUG
-               printk(KERN_INFO "pmem: requested pmem data from invalid"
-                                 "file.\n");
-#endif
-               return -1;
-       }
-       data = (struct pmem_data *)file->private_data;
-       down_read(&data->sem);
-       if (data->vma) {
-               *start = data->vma->vm_start;
-               *len = data->vma->vm_end - data->vma->vm_start;
-       } else {
-               *start = 0;
-               *len = 0;
-       }
-       up_read(&data->sem);
-       return 0;
-}
-
-int get_pmem_addr(struct file *file, unsigned long *start,
-                 unsigned long *vstart, unsigned long *len)
-{
-       struct pmem_data *data;
-       int id;
-
-       if (!is_pmem_file(file) || !has_allocation(file)) {
-               return -1;
-       }
-
-       data = (struct pmem_data *)file->private_data;
-       if (data->index == -1) {
-#if PMEM_DEBUG
-               printk(KERN_INFO "pmem: requested pmem data from file with no "
-                      "allocation.\n");
-               return -1;
-#endif
-       }
-       id = get_id(file);
-
-       down_read(&data->sem);
-       *start = pmem_start_addr(id, data);
-       *len = pmem_len(id, data);
-       *vstart = (unsigned long)pmem_start_vaddr(id, data);
-       up_read(&data->sem);
-#if PMEM_DEBUG
-       down_write(&data->sem);
-       data->ref++;
-       up_write(&data->sem);
-#endif
-       return 0;
-}
-
-int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart,
-                 unsigned long *len, struct file **filp)
-{
-       struct file *file;
-
-       file = fget(fd);
-       if (unlikely(file == NULL)) {
-               printk(KERN_INFO "pmem: requested data from file descriptor "
-                      "that doesn't exist.");
-               return -1;
-       }
-
-       if (get_pmem_addr(file, start, vstart, len))
-               goto end;
-
-       if (filp)
-               *filp = file;
-       return 0;
-end:
-       fput(file);
-       return -1;
-}
-
-void put_pmem_file(struct file *file)
-{
-       struct pmem_data *data;
-       int id;
-
-       if (!is_pmem_file(file))
-               return;
-       id = get_id(file);
-       data = (struct pmem_data *)file->private_data;
-#if PMEM_DEBUG
-       down_write(&data->sem);
-       if (data->ref == 0) {
-               printk("pmem: pmem_put > pmem_get %s (pid %d)\n",
-                      pmem[id].dev.name, data->pid);
-               BUG();
-       }
-       data->ref--;
-       up_write(&data->sem);
-#endif
-       fput(file);
-}
-
-void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len)
-{
-       struct pmem_data *data;
-       int id;
-       void *vaddr;
-       struct pmem_region_node *region_node;
-       struct list_head *elt;
-       void *flush_start, *flush_end;
-
-       if (!is_pmem_file(file) || !has_allocation(file)) {
-               return;
-       }
-
-       id = get_id(file);
-       data = (struct pmem_data *)file->private_data;
-       if (!pmem[id].cached || file->f_flags & O_SYNC)
-               return;
-
-       down_read(&data->sem);
-       vaddr = pmem_start_vaddr(id, data);
-       /* if this isn't a submmapped file, flush the whole thing */
-       if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) {
-               dmac_flush_range(vaddr, vaddr + pmem_len(id, data));
-               goto end;
-       }
-       /* otherwise, flush the region of the file we are drawing */
-       list_for_each(elt, &data->region_list) {
-               region_node = list_entry(elt, struct pmem_region_node, list);
-               if ((offset >= region_node->region.offset) &&
-                   ((offset + len) <= (region_node->region.offset +
-                       region_node->region.len))) {
-                       flush_start = vaddr + region_node->region.offset;
-                       flush_end = flush_start + region_node->region.len;
-                       dmac_flush_range(flush_start, flush_end);
-                       break;
-               }
-       }
-end:
-       up_read(&data->sem);
-}
-
-static int pmem_connect(unsigned long connect, struct file *file)
-{
-       struct pmem_data *data = (struct pmem_data *)file->private_data;
-       struct pmem_data *src_data;
-       struct file *src_file;
-       int ret = 0, put_needed;
-
-       down_write(&data->sem);
-       /* retrieve the src file and check it is a pmem file with an alloc */
-       src_file = fget_light(connect, &put_needed);
-       DLOG("connect %p to %p\n", file, src_file);
-       if (!src_file) {
-               printk("pmem: src file not found!\n");
-               ret = -EINVAL;
-               goto err_no_file;
-       }
-       if (unlikely(!is_pmem_file(src_file) || !has_allocation(src_file))) {
-               printk(KERN_INFO "pmem: src file is not a pmem file or has no "
-                      "alloc!\n");
-               ret = -EINVAL;
-               goto err_bad_file;
-       }
-       src_data = (struct pmem_data *)src_file->private_data;
-
-       if (has_allocation(file) && (data->index != src_data->index)) {
-               printk("pmem: file is already mapped but doesn't match this"
-                      " src_file!\n");
-               ret = -EINVAL;
-               goto err_bad_file;
-       }
-       data->index = src_data->index;
-       data->flags |= PMEM_FLAGS_CONNECTED;
-       data->master_fd = connect;
-       data->master_file = src_file;
-
-err_bad_file:
-       fput_light(src_file, put_needed);
-err_no_file:
-       up_write(&data->sem);
-       return ret;
-}
-
-static void pmem_unlock_data_and_mm(struct pmem_data *data,
-                                   struct mm_struct *mm)
-{
-       up_write(&data->sem);
-       if (mm != NULL) {
-               up_write(&mm->mmap_sem);
-               mmput(mm);
-       }
-}
-
-static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data,
-                                struct mm_struct **locked_mm)
-{
-       int ret = 0;
-       struct mm_struct *mm = NULL;
-       *locked_mm = NULL;
-lock_mm:
-       down_read(&data->sem);
-       if (PMEM_IS_SUBMAP(data)) {
-               mm = get_task_mm(data->task);
-               if (!mm) {
-#if PMEM_DEBUG
-                       printk("pmem: can't remap task is gone!\n");
-#endif
-                       up_read(&data->sem);
-                       return -1;
-               }
-       }
-       up_read(&data->sem);
-
-       if (mm)
-               down_write(&mm->mmap_sem);
-
-       down_write(&data->sem);
-       /* check that the file didn't get mmaped before we could take the
-        * data sem, this should be safe b/c you can only submap each file
-        * once */
-       if (PMEM_IS_SUBMAP(data) && !mm) {
-               pmem_unlock_data_and_mm(data, mm);
-               up_write(&data->sem);
-               goto lock_mm;
-       }
-       /* now check that vma.mm is still there, it could have been
-        * deleted by vma_close before we could get the data->sem */
-       if ((data->flags & PMEM_FLAGS_UNSUBMAP) && (mm != NULL)) {
-               /* might as well release this */
-               if (data->flags & PMEM_FLAGS_SUBMAP) {
-                       put_task_struct(data->task);
-                       data->task = NULL;
-                       /* lower the submap flag to show the mm is gone */
-                       data->flags &= ~(PMEM_FLAGS_SUBMAP);
-               }
-               pmem_unlock_data_and_mm(data, mm);
-               return -1;
-       }
-       *locked_mm = mm;
-       return ret;
-}
-
-int pmem_remap(struct pmem_region *region, struct file *file,
-                     unsigned operation)
-{
-       int ret;
-       struct pmem_region_node *region_node;
-       struct mm_struct *mm = NULL;
-       struct list_head *elt, *elt2;
-       int id = get_id(file);
-       struct pmem_data *data = (struct pmem_data *)file->private_data;
-
-       /* pmem region must be aligned on a page boundry */
-       if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) ||
-                !PMEM_IS_PAGE_ALIGNED(region->len))) {
-#if PMEM_DEBUG
-               printk("pmem: request for unaligned pmem suballocation "
-                      "%lx %lx\n", region->offset, region->len);
-#endif
-               return -EINVAL;
-       }
-
-       /* if userspace requests a region of len 0, there's nothing to do */
-       if (region->len == 0)
-               return 0;
-
-       /* lock the mm and data */
-       ret = pmem_lock_data_and_mm(file, data, &mm);
-       if (ret)
-               return 0;
-
-       /* only the owner of the master file can remap the client fds
-        * that back in it */
-       if (!is_master_owner(file)) {
-#if PMEM_DEBUG
-               printk("pmem: remap requested from non-master process\n");
-#endif
-               ret = -EINVAL;
-               goto err;
-       }
-
-       /* check that the requested range is within the src allocation */
-       if (unlikely((region->offset > pmem_len(id, data)) ||
-                    (region->len > pmem_len(id, data)) ||
-                    (region->offset + region->len > pmem_len(id, data)))) {
-#if PMEM_DEBUG
-               printk(KERN_INFO "pmem: suballoc doesn't fit in src_file!\n");
-#endif
-               ret = -EINVAL;
-               goto err;
-       }
-
-       if (operation == PMEM_MAP) {
-               region_node = kmalloc(sizeof(struct pmem_region_node),
-                             GFP_KERNEL);
-               if (!region_node) {
-                       ret = -ENOMEM;
-#if PMEM_DEBUG
-                       printk(KERN_INFO "No space to allocate metadata!");
-#endif
-                       goto err;
-               }
-               region_node->region = *region;
-               list_add(&region_node->list, &data->region_list);
-       } else if (operation == PMEM_UNMAP) {
-               int found = 0;
-               list_for_each_safe(elt, elt2, &data->region_list) {
-                       region_node = list_entry(elt, struct pmem_region_node,
-                                     list);
-                       if (region->len == 0 ||
-                           (region_node->region.offset == region->offset &&
-                           region_node->region.len == region->len)) {
-                               list_del(elt);
-                               kfree(region_node);
-                               found = 1;
-                       }
-               }
-               if (!found) {
-#if PMEM_DEBUG
-                       printk("pmem: Unmap region does not map any mapped "
-                               "region!");
-#endif
-                       ret = -EINVAL;
-                       goto err;
-               }
-       }
-
-       if (data->vma && PMEM_IS_SUBMAP(data)) {
-               if (operation == PMEM_MAP)
-                       ret = pmem_remap_pfn_range(id, data->vma, data,
-                                                  region->offset, region->len);
-               else if (operation == PMEM_UNMAP)
-                       ret = pmem_unmap_pfn_range(id, data->vma, data,
-                                                  region->offset, region->len);
-       }
-
-err:
-       pmem_unlock_data_and_mm(data, mm);
-       return ret;
-}
-
-static void pmem_revoke(struct file *file, struct pmem_data *data)
-{
-       struct pmem_region_node *region_node;
-       struct list_head *elt, *elt2;
-       struct mm_struct *mm = NULL;
-       int id = get_id(file);
-       int ret = 0;
-
-       data->master_file = NULL;
-       ret = pmem_lock_data_and_mm(file, data, &mm);
-       /* if lock_data_and_mm fails either the task that mapped the fd, or
-        * the vma that mapped it have already gone away, nothing more
-        * needs to be done */
-       if (ret)
-               return;
-       /* unmap everything */
-       /* delete the regions and region list nothing is mapped any more */
-       if (data->vma)
-               list_for_each_safe(elt, elt2, &data->region_list) {
-                       region_node = list_entry(elt, struct pmem_region_node,
-                                                list);
-                       pmem_unmap_pfn_range(id, data->vma, data,
-                                            region_node->region.offset,
-                                            region_node->region.len);
-                       list_del(elt);
-                       kfree(region_node);
-       }
-       /* delete the master file */
-       pmem_unlock_data_and_mm(data, mm);
-}
-
-static void pmem_get_size(struct pmem_region *region, struct file *file)
-{
-       struct pmem_data *data = (struct pmem_data *)file->private_data;
-       int id = get_id(file);
-
-       if (!has_allocation(file)) {
-               region->offset = 0;
-               region->len = 0;
-               return;
-       } else {
-               region->offset = pmem_start_addr(id, data);
-               region->len = pmem_len(id, data);
-       }
-       DLOG("offset %lx len %lx\n", region->offset, region->len);
-}
-
-
-static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       struct pmem_data *data;
-       int id = get_id(file);
-
-       switch (cmd) {
-       case PMEM_GET_PHYS:
-               {
-                       struct pmem_region region;
-                       DLOG("get_phys\n");
-                       if (!has_allocation(file)) {
-                               region.offset = 0;
-                               region.len = 0;
-                       } else {
-                               data = (struct pmem_data *)file->private_data;
-                               region.offset = pmem_start_addr(id, data);
-                               region.len = pmem_len(id, data);
-                       }
-                       //printk(KERN_INFO "pmem: request for physical address of pmem region "
-                       //              "from process %d.\n", current->pid);
-                       if (copy_to_user((void __user *)arg, &region,
-                                               sizeof(struct pmem_region)))
-                               return -EFAULT;
-                       break;
-               }
-       case PMEM_MAP:
-               {
-                       struct pmem_region region;
-                       if (copy_from_user(&region, (void __user *)arg,
-                                               sizeof(struct pmem_region)))
-                               return -EFAULT;
-                       data = (struct pmem_data *)file->private_data;
-                       return pmem_remap(&region, file, PMEM_MAP);
-               }
-               break;
-       case PMEM_UNMAP:
-               {
-                       struct pmem_region region;
-                       if (copy_from_user(&region, (void __user *)arg,
-                                               sizeof(struct pmem_region)))
-                               return -EFAULT;
-                       data = (struct pmem_data *)file->private_data;
-                       return pmem_remap(&region, file, PMEM_UNMAP);
-                       break;
-               }
-       case PMEM_GET_SIZE:
-               {
-                       struct pmem_region region;
-                       DLOG("get_size\n");
-                       pmem_get_size(&region, file);
-                       if (copy_to_user((void __user *)arg, &region,
-                                               sizeof(struct pmem_region)))
-                               return -EFAULT;
-                       break;
-               }
-       case PMEM_GET_TOTAL_SIZE:
-               {
-                       struct pmem_region region;
-                       DLOG("get total size\n");
-                       region.offset = 0;
-                       get_id(file);
-                       region.len = pmem[id].size;
-                       if (copy_to_user((void __user *)arg, &region,
-                                               sizeof(struct pmem_region)))
-                               return -EFAULT;
-                       break;
-               }
-       case PMEM_ALLOCATE:
-               {
-                       if (has_allocation(file))
-                               return -EINVAL;
-                       data = (struct pmem_data *)file->private_data;
-                       data->index = pmem_allocate(id, arg);
-                       break;
-               }
-       case PMEM_CONNECT:
-               DLOG("connect\n");
-               return pmem_connect(arg, file);
-               break;
-       case PMEM_CACHE_FLUSH:
-               {
-                       struct pmem_region region;
-                       DLOG("flush\n");
-                       if (copy_from_user(&region, (void __user *)arg,
-                                          sizeof(struct pmem_region)))
-                               return -EFAULT;
-                       flush_pmem_file(file, region.offset, region.len);
-                       break;
-               }
-       default:
-               if (pmem[id].ioctl)
-                       return pmem[id].ioctl(file, cmd, arg);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-#if PMEM_DEBUG
-static ssize_t debug_open(struct inode *inode, struct file *file)
-{
-       file->private_data = inode->i_private;
-       return 0;
-}
-
-static ssize_t debug_read(struct file *file, char __user *buf, size_t count,
-                         loff_t *ppos)
-{
-       struct list_head *elt, *elt2;
-       struct pmem_data *data;
-       struct pmem_region_node *region_node;
-       int id = (int)file->private_data;
-       const int debug_bufmax = 4096;
-       static char buffer[4096];
-       int n = 0;
-
-       DLOG("debug open\n");
-       n = scnprintf(buffer, debug_bufmax,
-                     "pid #: mapped regions (offset, len) (offset,len)...\n");
-
-       mutex_lock(&pmem[id].data_list_lock);
-       list_for_each(elt, &pmem[id].data_list) {
-               data = list_entry(elt, struct pmem_data, list);
-               down_read(&data->sem);
-               n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:",
-                               data->pid);
-               list_for_each(elt2, &data->region_list) {
-                       region_node = list_entry(elt2, struct pmem_region_node,
-                                     list);
-                       n += scnprintf(buffer + n, debug_bufmax - n,
-                                       "(%lx,%lx) ",
-                                       region_node->region.offset,
-                                       region_node->region.len);
-               }
-               n += scnprintf(buffer + n, debug_bufmax - n, "\n");
-               up_read(&data->sem);
-       }
-       mutex_unlock(&pmem[id].data_list_lock);
-
-       n++;
-       buffer[n] = 0;
-       return simple_read_from_buffer(buf, count, ppos, buffer, n);
-}
-
-static struct file_operations debug_fops = {
-       .read = debug_read,
-       .open = debug_open,
-};
-#endif
-
-#if 0
-static struct miscdevice pmem_dev = {
-       .name = "pmem",
-       .fops = &pmem_fops,
-};
-#endif
-
-int pmem_setup(struct android_pmem_platform_data *pdata,
-              long (*ioctl)(struct file *, unsigned int, unsigned long),
-              int (*release)(struct inode *, struct file *))
-{
-       int err = 0;
-       int i, index = 0;
-       int id = id_count;
-       id_count++;
-
-       pmem[id].no_allocator = pdata->no_allocator;
-       pmem[id].cached = pdata->cached;
-       pmem[id].buffered = pdata->buffered;
-       pmem[id].base = pdata->start;
-       pmem[id].size = pdata->size;
-       pmem[id].ioctl = ioctl;
-       pmem[id].release = release;
-       init_rwsem(&pmem[id].bitmap_sem);
-       mutex_init(&pmem[id].data_list_lock);
-       INIT_LIST_HEAD(&pmem[id].data_list);
-       pmem[id].dev.name = pdata->name;
-       pmem[id].dev.minor = id;
-       pmem[id].dev.fops = &pmem_fops;
-       printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached);
-
-       err = misc_register(&pmem[id].dev);
-       if (err) {
-               printk(KERN_ALERT "Unable to register pmem driver!\n");
-               goto err_cant_register_device;
-       }
-       pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC;
-
-       pmem[id].bitmap = kmalloc(pmem[id].num_entries *
-                                 sizeof(struct pmem_bits), GFP_KERNEL);
-       if (!pmem[id].bitmap)
-               goto err_no_mem_for_metadata;
-
-       memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) *
-                                         pmem[id].num_entries);
-
-       for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) {
-               if ((pmem[id].num_entries) &  1<<i) {
-                       PMEM_ORDER(id, index) = i;
-                       index = PMEM_NEXT_INDEX(id, index);
-               }
-       }
-
-       if (pmem[id].cached)
-               pmem[id].vbase = ioremap_cached(pmem[id].base,
-                                               pmem[id].size);
-#ifdef ioremap_ext_buffered
-       else if (pmem[id].buffered)
-               pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
-                                                     pmem[id].size);
-#endif
-       else
-               pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);
-
-       if (pmem[id].vbase == 0)
-               goto error_cant_remap;
-
-       pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL));
-       if (pmem[id].no_allocator)
-               pmem[id].allocated = 0;
-
-#if PMEM_DEBUG
-       debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id,
-                           &debug_fops);
-#endif
-       return 0;
-error_cant_remap:
-       kfree(pmem[id].bitmap);
-err_no_mem_for_metadata:
-       misc_deregister(&pmem[id].dev);
-err_cant_register_device:
-       return -1;
-}
-
-static int pmem_probe(struct platform_device *pdev)
-{
-       struct android_pmem_platform_data *pdata;
-
-       if (!pdev || !pdev->dev.platform_data) {
-               printk(KERN_ALERT "Unable to probe pmem!\n");
-               return -1;
-       }
-       pdata = pdev->dev.platform_data;
-       return pmem_setup(pdata, NULL, NULL);
-}
-
-
-static int pmem_remove(struct platform_device *pdev)
-{
-       int id = pdev->id;
-       __free_page(pfn_to_page(pmem[id].garbage_pfn));
-       misc_deregister(&pmem[id].dev);
-       return 0;
-}
-
-static struct platform_driver pmem_driver = {
-       .probe = pmem_probe,
-       .remove = pmem_remove,
-       .driver = { .name = "android_pmem" }
-};
-
-
-static int __init pmem_init(void)
-{
-       return platform_driver_register(&pmem_driver);
-}
-
-static void __exit pmem_exit(void)
-{
-       platform_driver_unregister(&pmem_driver);
-}
-
-module_init(pmem_init);
-module_exit(pmem_exit);
-
index cfbddbef11de3b67c54be85797dd124843ca8596..43d073bc1d9c5fef61a1cb3a1e741a6cd6b6e0f9 100644 (file)
@@ -903,6 +903,6 @@ static void __exit spear_pcie_gadget_exit(void)
 }
 module_exit(spear_pcie_gadget_exit);
 
-MODULE_ALIAS("pcie-gadget-spear");
+MODULE_ALIAS("platform:pcie-gadget-spear");
 MODULE_AUTHOR("Pratyush Anand");
 MODULE_LICENSE("GPL");
index f7f7a3c9f3cd74ea10f4fe7f6f5d57585d20319d..7f9935ad89a2e7f33416f950896cb15fa32f38ea 100755 (executable)
@@ -247,6 +247,9 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
                goto idata_err;
        }
 
+       if (!idata->buf_bytes)
+               return idata;
+
        idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
        if (!idata->buf) {
                err = -ENOMEM;
@@ -293,25 +296,6 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
        if (IS_ERR(idata))
                return PTR_ERR(idata);
 
-       cmd.opcode = idata->ic.opcode;
-       cmd.arg = idata->ic.arg;
-       cmd.flags = idata->ic.flags;
-
-       data.sg = &sg;
-       data.sg_len = 1;
-       data.blksz = idata->ic.blksz;
-       data.blocks = idata->ic.blocks;
-
-       sg_init_one(data.sg, idata->buf, idata->buf_bytes);
-
-       if (idata->ic.write_flag)
-               data.flags = MMC_DATA_WRITE;
-       else
-               data.flags = MMC_DATA_READ;
-
-       mrq.cmd = &cmd;
-       mrq.data = &data;
-
        md = mmc_blk_get(bdev->bd_disk);
        if (!md) {
                err = -EINVAL;
@@ -324,6 +308,48 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
                goto cmd_done;
        }
 
+       cmd.opcode = idata->ic.opcode;
+       cmd.arg = idata->ic.arg;
+       cmd.flags = idata->ic.flags;
+
+       if (idata->buf_bytes) {
+               data.sg = &sg;
+               data.sg_len = 1;
+               data.blksz = idata->ic.blksz;
+               data.blocks = idata->ic.blocks;
+
+               sg_init_one(data.sg, idata->buf, idata->buf_bytes);
+
+               if (idata->ic.write_flag)
+                       data.flags = MMC_DATA_WRITE;
+               else
+                       data.flags = MMC_DATA_READ;
+
+               /* data.flags must already be set before doing this. */
+               mmc_set_data_timeout(&data, card);
+
+               /* Allow overriding the timeout_ns for empirical tuning. */
+               if (idata->ic.data_timeout_ns)
+                       data.timeout_ns = idata->ic.data_timeout_ns;
+
+               if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+                       /*
+                        * Pretend this is a data transfer and rely on the
+                        * host driver to compute timeout.  When all host
+                        * drivers support cmd.cmd_timeout for R1B, this
+                        * can be changed to:
+                        *
+                        *     mrq.data = NULL;
+                        *     cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
+                        */
+                       data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
+               }
+
+               mrq.data = &data;
+       }
+
+       mrq.cmd = &cmd;
+
        mmc_claim_host(card->host);
 
        if (idata->ic.is_acmd) {
@@ -332,24 +358,6 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
                        goto cmd_rel_host;
        }
 
-       /* data.flags must already be set before doing this. */
-       mmc_set_data_timeout(&data, card);
-       /* Allow overriding the timeout_ns for empirical tuning. */
-       if (idata->ic.data_timeout_ns)
-               data.timeout_ns = idata->ic.data_timeout_ns;
-
-       if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
-               /*
-                * Pretend this is a data transfer and rely on the host driver
-                * to compute timeout.  When all host drivers support
-                * cmd.cmd_timeout for R1B, this can be changed to:
-                *
-                *     mrq.data = NULL;
-                *     cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
-                */
-               data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
-       }
-
        mmc_wait_for_req(card->host, &mrq);
 
        if (cmd.error) {
index 21332db0e5d41780717fcfb7ec07bc56f36f886a..f18ba0aecca70c445661578d98dcfe3350727cc9 100755 (executable)
@@ -1107,7 +1107,7 @@ static void mmc_power_up(struct mmc_host *host)
        mmc_host_clk_release(host);
 }
 
-static void mmc_power_off(struct mmc_host *host)
+void mmc_power_off(struct mmc_host *host)
 {
        mmc_host_clk_hold(host);
 
@@ -1227,8 +1227,7 @@ void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
 }
 
 /*
- * Remove the current bus handler from a host. Assumes that there are
- * no interesting cards left, so the bus is powered down.
+ * Remove the current bus handler from a host.
  */
 void mmc_detach_bus(struct mmc_host *host)
 {
@@ -1245,8 +1244,6 @@ void mmc_detach_bus(struct mmc_host *host)
 
        spin_unlock_irqrestore(&host->lock, flags);
 
-       mmc_power_off(host);
-
        mmc_bus_put(host);
 }
 
@@ -1894,6 +1891,7 @@ void mmc_stop_host(struct mmc_host *host)
 
                mmc_claim_host(host);
                mmc_detach_bus(host);
+               mmc_power_off(host);
                mmc_release_host(host);
                mmc_bus_put(host);
                return;
@@ -2019,6 +2017,7 @@ int mmc_suspend_host(struct mmc_host *host)
                                host->bus_ops->remove(host);
                        mmc_claim_host(host);
                        mmc_detach_bus(host);
+                       mmc_power_off(host);
                        mmc_release_host(host);
                        host->pm_flags = 0;
                        err = 0;
@@ -2123,6 +2122,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
                        host->bus_ops->remove(host);
 
                mmc_detach_bus(host);
+               mmc_power_off(host);
                mmc_release_host(host);
                host->pm_flags = 0;
                break;
index d9411ed2a39bf6490a5a788629d45a628e14e6fe..14664f1fb16fb560c526dd30817cc10ae6db1adf 100644 (file)
@@ -43,6 +43,7 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage,
                           bool cmd11);
 void mmc_set_timing(struct mmc_host *host, unsigned int timing);
 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
+void mmc_power_off(struct mmc_host *host);
 
 static inline void mmc_delay(unsigned int ms)
 {
index f0216b8743e08ee6aa6d6972e33a9577e9d55953..4847c7392045e8db32eb25929708eed81db0be09 100755 (executable)
@@ -359,6 +359,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
                 * card has the Enhanced area enabled.  If so, export enhanced
                 * area offset and size to user by adding sysfs interface.
                 */
+               card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
                if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
                    (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
                        u8 hc_erase_grp_sz =
@@ -405,6 +406,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
        if (card->ext_csd.rev >= 5)
                card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
 
+       card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
        if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
                card->erased_byte = 0xFF;
        else
@@ -848,7 +850,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                         *
                         * WARNING: eMMC rules are NOT the same as SD DDR
                         */
-                       if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) {
+                       if (ddr == MMC_1_2V_DDR_MODE) {
                                err = mmc_set_signal_voltage(host,
                                        MMC_SIGNAL_VOLTAGE_120, 0);
                                if (err)
@@ -911,6 +913,7 @@ static void mmc_detect(struct mmc_host *host)
 
                mmc_claim_host(host);
                mmc_detach_bus(host);
+               mmc_power_off(host);
                mmc_release_host(host);
        }
 }
index dd1bfdd97782024889ab9142e2a3ce66ce19270b..2ee448439041322e6f5581e6a4fd8cc503e67922 100755 (executable)
@@ -1074,6 +1074,7 @@ static void mmc_sd_detect(struct mmc_host *host)
 
                mmc_claim_host(host);
                mmc_detach_bus(host);
+               mmc_power_off(host);
                mmc_release_host(host);
        }
 }
index 6c67160189e45393231a96d100a12ef3a9e049e5..e1bf6274cf0c9e677825e3fab4d036ea64a5e1e2 100755 (executable)
@@ -622,6 +622,7 @@ out:
 
                mmc_claim_host(host);
                mmc_detach_bus(host);
+               mmc_power_off(host);
                mmc_release_host(host);
        }
 }
@@ -689,7 +690,7 @@ static int mmc_sdio_resume(struct mmc_host *host)
        }
 
        if (!err && host->sdio_irqs)
-               mmc_signal_sdio_irq(host);
+               wake_up_process(host->sdio_irq_thread);
        mmc_release_host(host);
 
        /*
index 03ead028d2ce147ac9a33387814b040ec1e91e53..d58ae91533791cf4acbe3dd8352054e89bce24b3 100644 (file)
 
 #include "sdio_ops.h"
 
-static int process_sdio_pending_irqs(struct mmc_card *card)
+static int process_sdio_pending_irqs(struct mmc_host *host)
 {
+       struct mmc_card *card = host->card;
        int i, ret, count;
        unsigned char pending;
        struct sdio_func *func;
 
        /*
         * Optimization, if there is only 1 function interrupt registered
-        * call irq handler directly
+        * and we know an IRQ was signaled then call irq handler directly.
+        * Otherwise do the full probe.
         */
        func = card->sdio_single_irq;
-       if (func) {
+       if (func && host->sdio_irq_pending) {
                func->irq_handler(func);
                return 1;
        }
@@ -115,7 +117,8 @@ static int sdio_irq_thread(void *_host)
                ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
                if (ret)
                        break;
-               ret = process_sdio_pending_irqs(host->card);
+               ret = process_sdio_pending_irqs(host);
+               host->sdio_irq_pending = false;
                mmc_release_host(host);
 
                /*
index aa8039f473c485ad4fcbe7d9c5f5f16fcf6f3e25..b6cd3867f72321f5b639fac700bd07491f334852 100644 (file)
@@ -468,7 +468,14 @@ err:
 static inline unsigned int ns_to_clocks(struct atmel_mci *host,
                                        unsigned int ns)
 {
-       return (ns * (host->bus_hz / 1000000) + 999) / 1000;
+       /*
+        * It is easier here to use us instead of ns for the timeout,
+        * it prevents from overflows during calculation.
+        */
+       unsigned int us = DIV_ROUND_UP(ns, 1000);
+
+       /* Maximum clock frequency is host->bus_hz/2 */
+       return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
 }
 
 static void atmci_set_timeout(struct atmel_mci *host,
index fe140724a02eb878f83e45ab0654ec7e9dfeda43..9394d0b77ec57ec3d3d036c47fe8268bb57a4d9c 100644 (file)
@@ -557,7 +557,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
              unsigned int status)
 {
        /* First check for errors */
-       if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+       if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+                     MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
                u32 remain, success;
 
                /* Terminate the DMA transfer */
@@ -636,8 +637,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
        }
 
        if (!cmd->data || cmd->error) {
-               if (host->data)
+               if (host->data) {
+                       /* Terminate the DMA transfer */
+                       if (dma_inprogress(host))
+                               mmci_dma_data_error(host);
                        mmci_stop_data(host);
+               }
                mmci_request_end(host, cmd->mrq);
        } else if (!(cmd->data->flags & MMC_DATA_READ)) {
                mmci_start_data(host, cmd->data);
@@ -837,8 +842,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
                dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
 
                data = host->data;
-               if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
-                             MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
+               if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+                             MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
+                             MCI_DATABLOCKEND) && data)
                        mmci_data_irq(host, data, status);
 
                cmd = host->cmd;
index cc20e025932593db93deb84d3e27f05bf9ed89c0..e12fbc510bbda4a7b936fdd62b5ca37d39ba88cb 100644 (file)
@@ -731,6 +731,7 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                                "failed to config DMA channel. Falling back to PIO\n");
                        dma_release_channel(host->dma);
                        host->do_dma = 0;
+                       host->dma = NULL;
                }
        }
 
index ba31abee9487f80160b3845e1b1750a5339a1d1f..6fe8cede417986116f44bffc989b4f7264380b50 100644 (file)
@@ -139,8 +139,9 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
                imx_data->scratchpad = val;
                return;
        case SDHCI_COMMAND:
-               if ((host->cmd->opcode == MMC_STOP_TRANSMISSION)
-                       && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
+               if ((host->cmd->opcode == MMC_STOP_TRANSMISSION ||
+                    host->cmd->opcode == MMC_SET_BLOCK_COUNT) &&
+                   (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
                        val |= SDHCI_CMD_ABORTCMD;
                writel(val << 16 | imx_data->scratchpad,
                        host->ioaddr + SDHCI_TRANSFER_MODE);
@@ -244,8 +245,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd
        }
        pltfm_host->priv = imx_data;
 
-       if (!cpu_is_mx25())
-               host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+       host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
 
        if (cpu_is_mx25() || cpu_is_mx35()) {
                /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
index 32aeb422d3895385bdaa48aa8db1f1d0f28923ab..d517a216eaa95bb83442db98bc37306035afe71e 100644 (file)
@@ -1340,8 +1340,7 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                if ((ios->timing == MMC_TIMING_UHS_SDR50) ||
                    (ios->timing == MMC_TIMING_UHS_SDR104) ||
                    (ios->timing == MMC_TIMING_UHS_DDR50) ||
-                   (ios->timing == MMC_TIMING_UHS_SDR25) ||
-                   (ios->timing == MMC_TIMING_UHS_SDR12))
+                   (ios->timing == MMC_TIMING_UHS_SDR25))
                        ctrl |= SDHCI_CTRL_HISPD;
 
                ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
@@ -2227,9 +2226,8 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
        /* Disable tuning since we are suspending */
        if (host->version >= SDHCI_SPEC_300 && host->tuning_count &&
            host->tuning_mode == SDHCI_TUNING_MODE_1) {
+               del_timer_sync(&host->tuning_timer);
                host->flags &= ~SDHCI_NEEDS_RETUNING;
-               mod_timer(&host->tuning_timer, jiffies +
-                       host->tuning_count * HZ);
        }
 
        ret = mmc_suspend_host(host->mmc);
index d4455ffbefd8a931317beaad43eefb563b10c62b..52f4b644766a9511c4714471063c7557b252a745 100644 (file)
@@ -259,7 +259,7 @@ static int firmware_rom_wait_states = 0x04;
 static int firmware_rom_wait_states = 0x1C;
 #endif
 
-module_param(firmware_rom_wait_states, bool, 0644);
+module_param(firmware_rom_wait_states, int, 0644);
 MODULE_PARM_DESC(firmware_rom_wait_states,
                 "ROM wait states byte=RRRIIEEE (Reserved Internal External)");
 
index b78f23169d4e7dce23848eba02b7cf9b8f6cd43f..8cd983cdc6439b3ef8def896128a16d2d5186b5e 100644 (file)
@@ -284,6 +284,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
        dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
        dev->mtd.erasesize = erase_size;
        dev->mtd.writesize = 1;
+       dev->mtd.writebufsize = PAGE_SIZE;
        dev->mtd.type = MTD_RAM;
        dev->mtd.flags = MTD_CAP_RAM;
        dev->mtd.erase = block2mtd_erase;
index 772a0ff89e0f7e316279d82632d0893ad89b0f64..09d5b5aaea578e573128f87adb67fae0a6bf1588 100644 (file)
@@ -636,6 +636,7 @@ static int __init lart_flash_init (void)
    mtd.name = module_name;
    mtd.type = MTD_NORFLASH;
    mtd.writesize = 1;
+   mtd.writebufsize = 4;
    mtd.flags = MTD_CAP_NORFLASH;
    mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN;
    mtd.erasesize = FLASH_BLOCKSIZE_MAIN;
index 35180e475c4c565fded31dc91d6fe05c91aec805..9fad104d4aab79ec171ce13bf35d05ba217a9132 100644 (file)
@@ -930,6 +930,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
 
        flash->mtd.dev.parent = &spi->dev;
        flash->page_size = info->page_size;
+       flash->mtd.writebufsize = flash->page_size;
 
        if (info->addr_width)
                flash->addr_width = info->addr_width;
index 1e2c430aaad241b112d4c33a3c90d1db1e3b3efe..867710a09a48f83084b532cbf6321a55ff4006a9 100644 (file)
@@ -406,6 +406,7 @@ static int __devinit sst25l_probe(struct spi_device *spi)
        flash->mtd.flags        = MTD_CAP_NORFLASH;
        flash->mtd.erasesize    = flash_info->erase_size;
        flash->mtd.writesize    = flash_info->page_size;
+       flash->mtd.writebufsize = flash_info->page_size;
        flash->mtd.size         = flash_info->page_size * flash_info->nr_pages;
        flash->mtd.erase        = sst25l_erase;
        flash->mtd.read         = sst25l_read;
index 783db0e48819dce8a0a1afaf31d7acb3b019fa0e..444d0bd8f23d0193fb11ad22534a57fcb543c659 100755 (executable)
@@ -230,7 +230,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
 
        mutex_lock(&dev->lock);
 
-       if (dev->open++)
+       if (dev->open)
                goto unlock;
 
        kref_get(&dev->ref);
@@ -250,6 +250,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
                goto error_release;
 
 unlock:
+       dev->open++;
        mutex_unlock(&dev->lock);
        blktrans_dev_put(dev);
        return ret;
index 3f92731a5b9ebf288ed83602c36972e5d32dee8d..9f8658ef946711469ef3625933ed6fd7c60959ef 100644 (file)
@@ -320,6 +320,7 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
                        ops.mode = MTD_OOB_RAW;
                        ops.datbuf = kbuf;
                        ops.oobbuf = NULL;
+                       ops.ooboffs = 0;
                        ops.len = len;
 
                        ret = mtd->write_oob(mtd, *ppos, &ops);
index e3e40f4403235540dc5ad5f6e3efd35aa981f647..43130e8aceac186698fb4d17668e3df6617acc98 100644 (file)
@@ -253,6 +253,9 @@ static void find_next_position(struct mtdoops_context *cxt)
        size_t retlen;
 
        for (page = 0; page < cxt->oops_pages; page++) {
+               if (mtd->block_isbad &&
+                   mtd->block_isbad(mtd, page * record_size))
+                       continue;
                /* Assume the page is used */
                mark_page_used(cxt, page);
                ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
@@ -369,7 +372,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
 
        /* oops_page_used is a bit field */
        cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
-                       BITS_PER_LONG));
+                       BITS_PER_LONG) * sizeof(unsigned long));
        if (!cxt->oops_page_used) {
                printk(KERN_ERR "mtdoops: could not allocate page array\n");
                return;
index 36f2ef83798689f133bb189ced14ea35bf70b4f9..24bb84534440d63e87b03e313b9be8f8e56ad0f4 100644 (file)
@@ -2104,14 +2104,22 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
 
 /**
  * nand_fill_oob - [Internal] Transfer client buffer to oob
- * @chip:      nand chip structure
+ * @mtd:       MTD device structure
  * @oob:       oob data buffer
  * @len:       oob data write length
  * @ops:       oob ops structure
  */
-static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
-                                               struct mtd_oob_ops *ops)
+static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
+                             struct mtd_oob_ops *ops)
 {
+       struct nand_chip *chip = mtd->priv;
+
+       /*
+        * Initialise to all 0xFF, to avoid the possibility of left over OOB
+        * data from a previous OOB read.
+        */
+       memset(chip->oob_poi, 0xff, mtd->oobsize);
+
        switch (ops->mode) {
 
        case MTD_OOB_PLACE:
@@ -2208,10 +2216,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
            (chip->pagebuf << chip->page_shift) < (to + ops->len))
                chip->pagebuf = -1;
 
-       /* If we're not given explicit OOB data, let it be 0xFF */
-       if (likely(!oob))
-               memset(chip->oob_poi, 0xff, mtd->oobsize);
-
        /* Don't allow multipage oob writes with offset */
        if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
                return -EINVAL;
@@ -2233,8 +2237,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
 
                if (unlikely(oob)) {
                        size_t len = min(oobwritelen, oobmaxlen);
-                       oob = nand_fill_oob(chip, oob, len, ops);
+                       oob = nand_fill_oob(mtd, oob, len, ops);
                        oobwritelen -= len;
+               } else {
+                       /* We still need to erase leftover OOB data */
+                       memset(chip->oob_poi, 0xff, mtd->oobsize);
                }
 
                ret = chip->write_page(mtd, chip, wbuf, page, cached,
@@ -2408,10 +2415,8 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
        if (page == chip->pagebuf)
                chip->pagebuf = -1;
 
-       memset(chip->oob_poi, 0xff, mtd->oobsize);
-       nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
+       nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
        status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
-       memset(chip->oob_poi, 0xff, mtd->oobsize);
 
        if (status)
                return status;
index 400bd058c0c5e2056221a5d65ce0d0e581a58423..22234a1fe52916e3f553f212bef12f345b3a0455 100644 (file)
@@ -360,6 +360,7 @@ static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
 
                buf += mtd->oobsize + mtd->writesize;
                len -= mtd->writesize;
+               offs += mtd->writesize;
        }
        return 0;
 }
index 1fb3b3a805818edca37ed2e07bfd8bd90b3d0f4b..30689cc2b3c7d2183cecbb14f095c753c8bf62a1 100644 (file)
@@ -685,6 +685,8 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
                 * OOB, ignore such double bit errors
                 */
                if (is_buf_blank(buf, mtd->writesize))
+                       info->retcode = ERR_NONE;
+               else
                        mtd->ecc_stats.failed++;
        }
 
@@ -813,7 +815,7 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
        info->page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
        /* set info fields needed to read id */
        info->read_id_bytes = (info->page_size == 2048) ? 4 : 2;
-       info->reg_ndcr = ndcr;
+       info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
        info->cmdset = &default_cmdset;
 
        info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
@@ -882,7 +884,7 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
        struct pxa3xx_nand_info *info = mtd->priv;
        struct platform_device *pdev = info->pdev;
        struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
-       struct nand_flash_dev pxa3xx_flash_ids[2] = { {NULL,}, {NULL,} };
+       struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
        const struct pxa3xx_nand_flash *f = NULL;
        struct nand_chip *chip = mtd->priv;
        uint32_t id = -1;
@@ -942,8 +944,10 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
        pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
        if (f->flash_width == 16)
                pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
+       pxa3xx_flash_ids[1].name = NULL;
+       def = pxa3xx_flash_ids;
 KEEP_CONFIG:
-       if (nand_scan_ident(mtd, 1, pxa3xx_flash_ids))
+       if (nand_scan_ident(mtd, 1, def))
                return -ENODEV;
        /* calculate addressing information */
        info->col_addr_cycles = (mtd->writesize >= 2048) ? 2 : 1;
@@ -954,9 +958,9 @@ KEEP_CONFIG:
                info->row_addr_cycles = 2;
        mtd->name = mtd_names[0];
        chip->ecc.mode = NAND_ECC_HW;
-       chip->ecc.size = f->page_size;
+       chip->ecc.size = info->page_size;
 
-       chip->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16 : 0;
+       chip->options = (info->reg_ndcr & NDCR_DWIDTH_M) ? NAND_BUSWIDTH_16 : 0;
        chip->options |= NAND_NO_AUTOINCR;
        chip->options |= NAND_NO_READRDY;
 
index 7a87d07cd79f4bcae8dcf11241d0b052fa765cd9..4938bd0b024a96484a889c2f16c4b59566b30bf8 100644 (file)
@@ -297,6 +297,9 @@ static struct mtd_part_parser redboot_parser = {
        .name = "RedBoot",
 };
 
+/* mtd parsers will request the module by parser name */
+MODULE_ALIAS("RedBoot");
+
 static int __init redboot_parser_init(void)
 {
        return register_mtd_parser(&redboot_parser);
index ed3d6cd2c6dca216e097bfd5491544283be5ac11..0e34d564941ac05af58f89c550aeea54e9410bec 100644 (file)
@@ -1256,7 +1256,7 @@ static void sm_remove_dev(struct mtd_blktrans_dev *dev)
 
 static struct mtd_blktrans_ops sm_ftl_ops = {
        .name           = "smblk",
-       .major          = -1,
+       .major          = 0,
        .part_bits      = SM_FTL_PARTN_BITS,
        .blksize        = SM_SECTOR_SIZE,
        .getgeo         = sm_getgeo,
index 531625fc9259cdd167f0ade4755cf1f71cdefaa2..129bad2e4080579bedaa22cc995d7718dead9b1c 100644 (file)
@@ -277,6 +277,12 @@ static int __init mtd_stresstest_init(void)
               (unsigned long long)mtd->size, mtd->erasesize,
               pgsize, ebcnt, pgcnt, mtd->oobsize);
 
+       if (ebcnt < 2) {
+               printk(PRINT_PREF "error: need at least 2 eraseblocks\n");
+               err = -ENOSPC;
+               goto out_put_mtd;
+       }
+
        /* Read or write up 2 eraseblocks at a time */
        bufsize = mtd->erasesize * 2;
 
@@ -315,6 +321,7 @@ out:
        kfree(bbt);
        vfree(writebuf);
        vfree(readbuf);
+out_put_mtd:
        put_mtd_device(mtd);
        if (err)
                printk(PRINT_PREF "error %d occurred\n", err);
index 191f3bb3c41a7440a6cf831655c6431f31ec55f6..cdea6692dea0a06cc27120c0fa5295beefcc94bb 100644 (file)
@@ -628,6 +628,9 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
        if (req->alignment != 1 && n)
                goto bad;
 
+       if (!req->name[0] || !req->name_len)
+               goto bad;
+
        if (req->name_len > UBI_VOL_NAME_MAX) {
                err = -ENAMETOOLONG;
                goto bad;
index 3f1a09c5c438dde93c11e48529cc2a3cec5d93a7..5f0e4c2d9cd3d7ff27034e41048ce4e9b519264b 100644 (file)
@@ -51,7 +51,10 @@ struct ubi_mkvol_req;
        pr_debug("UBI DBG " type ": " fmt "\n", ##__VA_ARGS__)
 
 /* Just a debugging messages not related to any specific UBI subsystem */
-#define dbg_msg(fmt, ...) ubi_dbg_msg("msg", fmt, ##__VA_ARGS__)
+#define dbg_msg(fmt, ...)                                    \
+       printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
+              current->pid, __func__, ##__VA_ARGS__)
+
 /* General debugging messages */
 #define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
 /* Messages from the eraseblock association sub-system */
index 4be671815014ee3b054f2ba8f704781dfbc1f032..c696c9481c95674ebc3a9284487eab7e1dd2e24f 100644 (file)
@@ -1028,12 +1028,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
         * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
         * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
         * LEB is already locked, we just do not move it and return
-        * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later.
+        * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
+        * we do not know the reasons of the contention - it may be just a
+        * normal I/O on this LEB, so we want to re-try.
         */
        err = leb_write_trylock(ubi, vol_id, lnum);
        if (err) {
                dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
-               return MOVE_CANCEL_RACE;
+               return MOVE_RETRY;
        }
 
        /*
index 2135a53732ffce0dc0b4b7806637449f7ddec1f6..0b49eadebc3616a810c27b865ad6e77508998660 100644 (file)
@@ -1174,7 +1174,7 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
 
        ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
        if (!ech)
-               goto out_slab;
+               goto out_si;
 
        vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
        if (!vidh)
@@ -1235,8 +1235,6 @@ out_vidh:
        ubi_free_vid_hdr(ubi, vidh);
 out_ech:
        kfree(ech);
-out_slab:
-       kmem_cache_destroy(si->scan_leb_slab);
 out_si:
        ubi_scan_destroy_si(si);
        return ERR_PTR(err);
@@ -1325,7 +1323,9 @@ void ubi_scan_destroy_si(struct ubi_scan_info *si)
                }
        }
 
-       kmem_cache_destroy(si->scan_leb_slab);
+       if (si->scan_leb_slab)
+               kmem_cache_destroy(si->scan_leb_slab);
+
        kfree(si);
 }
 
index c6c22295898e97ef3fdbfa16fd4171ec650fb55a..bbfa88d459e03fbe7ca83b599ab51accb4fc5779 100644 (file)
@@ -121,6 +121,7 @@ enum {
  *                     PEB
  * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
  *                       target PEB
+ * MOVE_RETRY: retry scrubbing the PEB
  */
 enum {
        MOVE_CANCEL_RACE = 1,
@@ -128,6 +129,7 @@ enum {
        MOVE_TARGET_RD_ERR,
        MOVE_TARGET_WR_ERR,
        MOVE_CANCEL_BITFLIPS,
+       MOVE_RETRY,
 };
 
 /**
index ff2c4956eeff0aa08bbe0e528151104dd04fba68..25f18e9b643206912f1209d7fc96126eeeb07603 100644 (file)
@@ -386,7 +386,7 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
  */
 int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
 {
-       int err, medium_ec;
+       int err;
        struct ubi_wl_entry *e, *first, *last;
 
        ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
@@ -424,7 +424,7 @@ retry:
                 * For unknown data we pick a physical eraseblock with medium
                 * erase counter. But we by no means can pick a physical
                 * eraseblock with erase counter greater or equivalent than the
-                * lowest erase counter plus %WL_FREE_MAX_DIFF.
+                * lowest erase counter plus %WL_FREE_MAX_DIFF/2.
                 */
                first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
                                        u.rb);
@@ -433,10 +433,8 @@ retry:
                if (last->ec - first->ec < WL_FREE_MAX_DIFF)
                        e = rb_entry(ubi->free.rb_node,
                                        struct ubi_wl_entry, u.rb);
-               else {
-                       medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
-                       e = find_wl_entry(&ubi->free, medium_ec);
-               }
+               else
+                       e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2);
                break;
        case UBI_SHORTTERM:
                /*
@@ -792,7 +790,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
                        protect = 1;
                        goto out_not_moved;
                }
-
+               if (err == MOVE_RETRY) {
+                       scrubbing = 1;
+                       goto out_not_moved;
+               }
                if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
                    err == MOVE_TARGET_RD_ERR) {
                        /*
@@ -1046,7 +1047,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
 
        ubi_err("failed to erase PEB %d, error %d", pnum, err);
        kfree(wl_wrk);
-       kmem_cache_free(ubi_wl_entry_slab, e);
 
        if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
            err == -EBUSY) {
@@ -1059,14 +1059,16 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
                        goto out_ro;
                }
                return err;
-       } else if (err != -EIO) {
+       }
+
+       kmem_cache_free(ubi_wl_entry_slab, e);
+       if (err != -EIO)
                /*
                 * If this is not %-EIO, we have no idea what to do. Scheduling
                 * this physical eraseblock for erasure again would cause
                 * errors again and again. Well, lets switch to R/O mode.
                 */
                goto out_ro;
-       }
 
        /* It is %-EIO, the PEB went bad */
 
index 8cc22568ebd3429282b8674b912cb8fe80eb7a03..41afc408077d70255caf874165bdfa02e03b0f9c 100644 (file)
@@ -1842,7 +1842,7 @@ vortex_timer(unsigned long data)
                ok = 1;
        }
 
-       if (!netif_carrier_ok(dev))
+       if (dev->flags & IFF_SLAVE || !netif_carrier_ok(dev))
                next_tick = 5*HZ;
 
        if (vp->medialock)
index 10c45051caeae12b33f522f3ad1f774ceafd143a..a8b82da3956ef81a6f3e52dc6159916a96b06d5a 100644 (file)
@@ -992,6 +992,11 @@ static inline void cp_start_hw (struct cp_private *cp)
        cpw8(Cmd, RxOn | TxOn);
 }
 
+static void cp_enable_irq(struct cp_private *cp)
+{
+       cpw16_f(IntrMask, cp_intr_mask);
+}
+
 static void cp_init_hw (struct cp_private *cp)
 {
        struct net_device *dev = cp->dev;
@@ -1031,8 +1036,6 @@ static void cp_init_hw (struct cp_private *cp)
 
        cpw16(MultiIntr, 0);
 
-       cpw16_f(IntrMask, cp_intr_mask);
-
        cpw8_f(Cfg9346, Cfg9346_Lock);
 }
 
@@ -1164,6 +1167,8 @@ static int cp_open (struct net_device *dev)
        if (rc)
                goto err_out_hw;
 
+       cp_enable_irq(cp);
+
        netif_carrier_off(dev);
        mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
        netif_start_queue(dev);
@@ -2052,6 +2057,7 @@ static int cp_resume (struct pci_dev *pdev)
        /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
        cp_init_rings_index (cp);
        cp_init_hw (cp);
+       cp_enable_irq(cp);
        netif_start_queue (dev);
 
        spin_lock_irqsave (&cp->lock, flags);
index 9c9eec33143b6108eb08758531bc4238558d8560..0a47cad725809569732c3633b5bf390d3e66dbb1 100755 (executable)
@@ -2564,7 +2564,7 @@ config S6GMAC
 source "drivers/net/stmmac/Kconfig"
 
 config PCH_GBE
-       tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE"
+       tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
        depends on PCI
        select MII
        ---help---
@@ -2577,10 +2577,11 @@ config PCH_GBE
          This driver enables Gigabit Ethernet function.
 
          This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
-         Output Hub), ML7223.
-         ML7223 IOH is for MP(Media Phone) use.
-         ML7223 is companion chip for Intel Atom E6xx series.
-         ML7223 is completely compatible for Intel EG20T PCH.
+         Output Hub), ML7223/ML7831.
+         ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general
+         purpose use.
+         ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+         ML7223/ML7831 is completely compatible for Intel EG20T PCH.
 
 endif # NETDEV_1000
 
index 1269ba5d6e56d67b75d244d0bb89e8948d08a251..5e34e21f88894e01bc0e4a951006689dbf86d3e4 100644 (file)
@@ -2223,10 +2223,6 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
                        dev_info(&adapter->pdev->dev, "tx locked\n");
                return NETDEV_TX_LOCKED;
        }
-       if (skb->mark == 0x01)
-               type = atl1c_trans_high;
-       else
-               type = atl1c_trans_normal;
 
        if (atl1c_tpd_avail(adapter, type) < tpd_req) {
                /* no enough descriptor, just stop queue */
index cd5789ff372604b513d104bf7eb1cfa111324e4d..48c27d34eacd1ceb5aaa5a8ad4004ca4e8e2ec4d 100644 (file)
@@ -2476,7 +2476,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
                                        "pcie phy link down %x\n", status);
                        if (netif_running(adapter->netdev)) {   /* reset MAC */
                                iowrite32(0, adapter->hw.hw_addr + REG_IMR);
-                               schedule_work(&adapter->pcie_dma_to_rst_task);
+                               schedule_work(&adapter->reset_dev_task);
                                return IRQ_HANDLED;
                        }
                }
@@ -2488,7 +2488,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
                                        "pcie DMA r/w error (status = 0x%x)\n",
                                        status);
                        iowrite32(0, adapter->hw.hw_addr + REG_IMR);
-                       schedule_work(&adapter->pcie_dma_to_rst_task);
+                       schedule_work(&adapter->reset_dev_task);
                        return IRQ_HANDLED;
                }
 
@@ -2633,10 +2633,10 @@ static void atl1_down(struct atl1_adapter *adapter)
        atl1_clean_rx_ring(adapter);
 }
 
-static void atl1_tx_timeout_task(struct work_struct *work)
+static void atl1_reset_dev_task(struct work_struct *work)
 {
        struct atl1_adapter *adapter =
-               container_of(work, struct atl1_adapter, tx_timeout_task);
+               container_of(work, struct atl1_adapter, reset_dev_task);
        struct net_device *netdev = adapter->netdev;
 
        netif_device_detach(netdev);
@@ -3034,12 +3034,10 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
                    (unsigned long)adapter);
        adapter->phy_timer_pending = false;
 
-       INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
+       INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task);
 
        INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
 
-       INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
-
        err = register_netdev(netdev);
        if (err)
                goto err_common;
index 68de8cbfb3ec05da6244f1225ea23b9cef58b19c..c27b724a834b25f7c774701163c056a728eb4b57 100644 (file)
@@ -759,9 +759,8 @@ struct atl1_adapter {
        u16 link_speed;
        u16 link_duplex;
        spinlock_t lock;
-       struct work_struct tx_timeout_task;
+       struct work_struct reset_dev_task;
        struct work_struct link_chg_task;
-       struct work_struct pcie_dma_to_rst_task;
 
        struct timer_list phy_config_timer;
        bool phy_timer_pending;
index afb7f7dd1bb133901aab8f71c30bc2d86ef35fd7..2b7af060d49fbf1a30f468fd004ff88783220156 100644 (file)
@@ -193,7 +193,7 @@ static void atlx_tx_timeout(struct net_device *netdev)
 {
        struct atlx_adapter *adapter = netdev_priv(netdev);
        /* Do the reset outside of interrupt context */
-       schedule_work(&adapter->tx_timeout_task);
+       schedule_work(&adapter->reset_dev_task);
 }
 
 /*
index 2df9276720a0be47f27f909518fd270e531bd535..5e725e07d61bbe0fc60062d21286279a306b3054 100644 (file)
@@ -871,16 +871,12 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
        }
 }
 
-/* hw is a boolean parameter that determines whether we should try and
- * set the hw address of the device as well as the hw address of the
- * net_device
- */
-static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw)
+static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
 {
        struct net_device *dev = slave->dev;
        struct sockaddr s_addr;
 
-       if (!hw) {
+       if (slave->bond->params.mode == BOND_MODE_TLB) {
                memcpy(dev->dev_addr, addr, dev->addr_len);
                return 0;
        }
@@ -910,8 +906,8 @@ static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, struct
        u8 tmp_mac_addr[ETH_ALEN];
 
        memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN);
-       alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, bond->alb_info.rlb_enabled);
-       alb_set_slave_mac_addr(slave2, tmp_mac_addr, bond->alb_info.rlb_enabled);
+       alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr);
+       alb_set_slave_mac_addr(slave2, tmp_mac_addr);
 
 }
 
@@ -1058,8 +1054,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
 
                /* Try setting slave mac to bond address and fall-through
                   to code handling that situation below... */
-               alb_set_slave_mac_addr(slave, bond->dev->dev_addr,
-                                      bond->alb_info.rlb_enabled);
+               alb_set_slave_mac_addr(slave, bond->dev->dev_addr);
        }
 
        /* The slave's address is equal to the address of the bond.
@@ -1095,8 +1090,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
        }
 
        if (free_mac_slave) {
-               alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr,
-                                      bond->alb_info.rlb_enabled);
+               alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr);
 
                pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
                           bond->dev->name, slave->dev->name,
@@ -1452,8 +1446,7 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
 {
        int res;
 
-       res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr,
-                                    bond->alb_info.rlb_enabled);
+       res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr);
        if (res) {
                return res;
        }
@@ -1604,8 +1597,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
                alb_swap_mac_addr(bond, swap_slave, new_slave);
        } else {
                /* set the new_slave to the bond mac address */
-               alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr,
-                                      bond->alb_info.rlb_enabled);
+               alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
        }
 
        if (swap_slave) {
@@ -1665,8 +1657,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
                alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
                alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
        } else {
-               alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr,
-                                      bond->alb_info.rlb_enabled);
+               alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
 
                read_lock(&bond->lock);
                alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
index 9ea2f21443ee8ded27fe0582ccaf9a8bcb5df76a..e6da842cb288c5f95a3568a19e9403b5ba334601 100644 (file)
@@ -1500,6 +1500,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
        struct sk_buff *skb = *pskb;
        struct slave *slave;
        struct bonding *bond;
+       void (*recv_probe)(struct sk_buff *, struct bonding *,
+                               struct slave *);
 
        skb = skb_share_check(skb, GFP_ATOMIC);
        if (unlikely(!skb))
@@ -1513,11 +1515,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
        if (bond->params.arp_interval)
                slave->dev->last_rx = jiffies;
 
-       if (bond->recv_probe) {
+       recv_probe = ACCESS_ONCE(bond->recv_probe);
+       if (recv_probe) {
                struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
 
                if (likely(nskb)) {
-                       bond->recv_probe(nskb, bond, slave);
+                       recv_probe(nskb, bond, slave);
                        dev_kfree_skb(nskb);
                }
        }
@@ -1902,7 +1905,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                                 "but new slave device does not support netpoll.\n",
                                 bond_dev->name);
                        res = -EBUSY;
-                       goto err_close;
+                       goto err_detach;
                }
        }
 #endif
@@ -1911,7 +1914,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
        res = bond_create_slave_symlinks(bond_dev, slave_dev);
        if (res)
-               goto err_close;
+               goto err_detach;
 
        res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
                                         new_slave);
@@ -1932,6 +1935,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 err_dest_symlinks:
        bond_destroy_slave_symlinks(bond_dev, slave_dev);
 
+err_detach:
+       write_lock_bh(&bond->lock);
+       bond_detach_slave(bond, new_slave);
+       write_unlock_bh(&bond->lock);
+
 err_close:
        dev_close(slave_dev);
 
@@ -3068,7 +3076,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
                                           trans_start + delta_in_ticks)) ||
                            bond->curr_active_slave != slave) {
                                slave->link = BOND_LINK_UP;
-                               bond->current_arp_slave = NULL;
+                               if (bond->current_arp_slave) {
+                                       bond_set_slave_inactive_flags(
+                                               bond->current_arp_slave);
+                                       bond->current_arp_slave = NULL;
+                               }
 
                                pr_info("%s: link status definitely up for interface %s.\n",
                                        bond->dev->name, slave->dev->name);
index 7e5cc0bd913da3f106d1feda23194c64b5a1d1d4..1f8a8244f23e218b7eca6f8287df627875898014 100644 (file)
@@ -688,7 +688,7 @@ static int c_can_get_berr_counter(const struct net_device *dev,
  *
  * We iterate from priv->tx_echo to priv->tx_next and check if the
  * packet has been transmitted, echo it back to the CAN framework.
- * If we discover a not yet transmitted package, stop looking for more.
+ * If we discover a not yet transmitted packet, stop looking for more.
  */
 static void c_can_do_tx(struct net_device *dev)
 {
@@ -700,7 +700,7 @@ static void c_can_do_tx(struct net_device *dev)
        for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
                msg_obj_no = get_tx_echo_msg_obj(priv);
                val = c_can_read_reg32(priv, &priv->regs->txrqst1);
-               if (!(val & (1 << msg_obj_no))) {
+               if (!(val & (1 << (msg_obj_no - 1)))) {
                        can_get_echo_skb(dev,
                                        msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
                        stats->tx_bytes += priv->read_reg(priv,
@@ -708,6 +708,8 @@ static void c_can_do_tx(struct net_device *dev)
                                        & IF_MCONT_DLC_MASK;
                        stats->tx_packets++;
                        c_can_inval_msg_object(dev, 0, msg_obj_no);
+               } else {
+                       break;
                }
        }
 
@@ -952,7 +954,7 @@ static int c_can_poll(struct napi_struct *napi, int quota)
        struct net_device *dev = napi->dev;
        struct c_can_priv *priv = netdev_priv(dev);
 
-       irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+       irqstatus = priv->irqstatus;
        if (!irqstatus)
                goto end;
 
@@ -1030,12 +1032,11 @@ end:
 
 static irqreturn_t c_can_isr(int irq, void *dev_id)
 {
-       u16 irqstatus;
        struct net_device *dev = (struct net_device *)dev_id;
        struct c_can_priv *priv = netdev_priv(dev);
 
-       irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
-       if (!irqstatus)
+       priv->irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+       if (!priv->irqstatus)
                return IRQ_NONE;
 
        /* disable all interrupts and schedule the NAPI */
@@ -1065,10 +1066,11 @@ static int c_can_open(struct net_device *dev)
                goto exit_irq_fail;
        }
 
+       napi_enable(&priv->napi);
+
        /* start the c_can controller */
        c_can_start(dev);
 
-       napi_enable(&priv->napi);
        netif_start_queue(dev);
 
        return 0;
index 9b7fbef3d09a1248cda69974ac9c3cc4cf9e464e..5f32d34af507e7a9d51c4b37f8add3cd6ff0ddcb 100644 (file)
@@ -76,6 +76,7 @@ struct c_can_priv {
        unsigned int tx_next;
        unsigned int tx_echo;
        void *priv;             /* for board-specific data */
+       u16 irqstatus;
 };
 
 struct net_device *alloc_c_can_dev(void);
index 3f2e12c3ac1bbaf44f42034d2a988ce9732aef43..015b5152b0decb63c950d6d13be806613d837144 100644 (file)
@@ -971,7 +971,7 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
        case (NETEVENT_REDIRECT):{
                struct netevent_redirect *nr = ctx;
                cxgb_redirect(nr->old, nr->new);
-               cxgb_neigh_update(nr->new->neighbour);
+               cxgb_neigh_update(dst_get_neighbour(nr->new));
                break;
        }
        default:
@@ -1116,8 +1116,8 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
        struct l2t_entry *e;
        struct t3c_tid_entry *te;
 
-       olddev = old->neighbour->dev;
-       newdev = new->neighbour->dev;
+       olddev = dst_get_neighbour(old)->dev;
+       newdev = dst_get_neighbour(new)->dev;
        if (!is_offloading(olddev))
                return;
        if (!is_offloading(newdev)) {
@@ -1134,7 +1134,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
        }
 
        /* Add new L2T entry */
-       e = t3_l2t_get(tdev, new->neighbour, newdev);
+       e = t3_l2t_get(tdev, dst_get_neighbour(new), newdev);
        if (!e) {
                printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
                       __func__);
index dcc4a170b0f397ed5362341a8d5a62fb07d411a4..e5efe3aec0f4494c4d6d4d0de42ebf6ea8b192ee 100644 (file)
@@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status)
        int                     ret;
 
        /* free and bail if we are shutting down */
-       if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
+       if (unlikely(!netif_running(ndev))) {
                dev_kfree_skb_any(skb);
                return;
        }
@@ -1037,7 +1037,9 @@ static void emac_rx_handler(void *token, int len, int status)
 recycle:
        ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
                        skb_tailroom(skb), GFP_KERNEL);
-       if (WARN_ON(ret < 0))
+
+       WARN_ON(ret == -ENOMEM);
+       if (unlikely(ret < 0))
                dev_kfree_skb_any(skb);
 }
 
index 7615040df75621dda0684732017e4d21fac425eb..f470ab64b0931609f01b4e804dfa705324a69698 100644 (file)
@@ -181,6 +181,11 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
                __davinci_mdio_reset(data);
                return -EAGAIN;
        }
+
+       reg = __raw_readl(&regs->user[0].access);
+       if ((reg & USERACCESS_GO) == 0)
+               return 0;
+
        dev_err(data->dev, "timed out waiting for user access\n");
        return -ETIMEDOUT;
 }
index 39cf9b9bd673cdd0d46aed0c970ca58cccbedc88..3fa19c1621127f1297a40deb665f0d1673c68d1a 100644 (file)
@@ -106,14 +106,14 @@ static int dummy_dev_init(struct net_device *dev)
        return 0;
 }
 
-static void dummy_dev_free(struct net_device *dev)
+static void dummy_dev_uninit(struct net_device *dev)
 {
        free_percpu(dev->dstats);
-       free_netdev(dev);
 }
 
 static const struct net_device_ops dummy_netdev_ops = {
        .ndo_init               = dummy_dev_init,
+       .ndo_uninit             = dummy_dev_uninit,
        .ndo_start_xmit         = dummy_xmit,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_multicast_list = set_multicast_list,
@@ -127,7 +127,7 @@ static void dummy_setup(struct net_device *dev)
 
        /* Initialize the device structure. */
        dev->netdev_ops = &dummy_netdev_ops;
-       dev->destructor = dummy_dev_free;
+       dev->destructor = free_netdev;
 
        /* Fill in device structure with ethernet-generic values. */
        dev->tx_queue_len = 0;
index 8676899120c301f67eab5de0ffb87b019f18281f..2c71884eb46e9937c4294e39650dfb0d136e97af 100644 (file)
@@ -150,6 +150,8 @@ struct e1000_buffer {
        unsigned long time_stamp;
        u16 length;
        u16 next_to_watch;
+       unsigned int segs;
+       unsigned int bytecount;
        u16 mapped_as_page;
 };
 
index 76e8af00d86dab7d79a65842dddadf6cfed337c8..99525f9b41b64623ede0994122804a8a109d83b6 100644 (file)
@@ -2798,7 +2798,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
        struct e1000_buffer *buffer_info;
        unsigned int len = skb_headlen(skb);
        unsigned int offset = 0, size, count = 0, i;
-       unsigned int f;
+       unsigned int f, bytecount, segs;
 
        i = tx_ring->next_to_use;
 
@@ -2899,7 +2899,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                }
        }
 
+       segs = skb_shinfo(skb)->gso_segs ?: 1;
+       /* multiply data chunks by size of headers */
+       bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
+
        tx_ring->buffer_info[i].skb = skb;
+       tx_ring->buffer_info[i].segs = segs;
+       tx_ring->buffer_info[i].bytecount = bytecount;
        tx_ring->buffer_info[first].next_to_watch = i;
 
        return count;
@@ -3573,14 +3579,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
                        cleaned = (i == eop);
 
                        if (cleaned) {
-                               struct sk_buff *skb = buffer_info->skb;
-                               unsigned int segs, bytecount;
-                               segs = skb_shinfo(skb)->gso_segs ?: 1;
-                               /* multiply data chunks by size of headers */
-                               bytecount = ((segs - 1) * skb_headlen(skb)) +
-                                           skb->len;
-                               total_tx_packets += segs;
-                               total_tx_bytes += bytecount;
+                               total_tx_packets += buffer_info->segs;
+                               total_tx_bytes += buffer_info->bytecount;
                        }
                        e1000_unmap_and_free_tx_resource(adapter, buffer_info);
                        tx_desc->upper.data = 0;
index 9549879e66a0382bc0031446ea23cacaa0cfb6fe..8a265f3528de4448e97b0b3fee7fe62cfb532deb 100644 (file)
@@ -311,6 +311,7 @@ struct e1000_adapter {
        u32 txd_cmd;
 
        bool detect_tx_hung;
+       bool tx_hang_recheck;
        u8 tx_timeout_factor;
 
        u32 tx_int_delay;
index 3310c3d477d78ee51fc13d7c1b45d26a4ca010ed..5430a9a4a28c207b8781b2c33f1d3e30cefbc9b1 100644 (file)
@@ -930,6 +930,7 @@ static void e1000_print_hw_hang(struct work_struct *work)
        struct e1000_adapter *adapter = container_of(work,
                                                     struct e1000_adapter,
                                                     print_hang_task);
+       struct net_device *netdev = adapter->netdev;
        struct e1000_ring *tx_ring = adapter->tx_ring;
        unsigned int i = tx_ring->next_to_clean;
        unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
@@ -941,6 +942,21 @@ static void e1000_print_hw_hang(struct work_struct *work)
        if (test_bit(__E1000_DOWN, &adapter->state))
                return;
 
+       if (!adapter->tx_hang_recheck &&
+           (adapter->flags2 & FLAG2_DMA_BURST)) {
+               /* May be block on write-back, flush and detect again
+                * flush pending descriptor writebacks to memory
+                */
+               ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+               /* execute the writes immediately */
+               e1e_flush();
+               adapter->tx_hang_recheck = true;
+               return;
+       }
+       /* Real hang detected */
+       adapter->tx_hang_recheck = false;
+       netif_stop_queue(netdev);
+
        e1e_rphy(hw, PHY_STATUS, &phy_status);
        e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
        e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1054,10 +1070,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
                if (tx_ring->buffer_info[i].time_stamp &&
                    time_after(jiffies, tx_ring->buffer_info[i].time_stamp
                               + (adapter->tx_timeout_factor * HZ)) &&
-                   !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+                   !(er32(STATUS) & E1000_STATUS_TXOFF))
                        schedule_work(&adapter->print_hang_task);
-                       netif_stop_queue(netdev);
-               }
+               else
+                       adapter->tx_hang_recheck = false;
        }
        adapter->total_tx_bytes += total_tx_bytes;
        adapter->total_tx_packets += total_tx_packets;
@@ -3678,6 +3694,7 @@ static int e1000_open(struct net_device *netdev)
 
        e1000_irq_enable(adapter);
 
+       adapter->tx_hang_recheck = false;
        netif_start_queue(netdev);
 
        adapter->idle_check = true;
index 2f433fbfca0cfa30e8e95fd38aefb901792fad17..51fba5fe94bda5b291f88b58e7cd6e563dd471bc 100644 (file)
@@ -1718,8 +1718,12 @@ static void enic_poll_controller(struct net_device *netdev)
                        enic_isr_msix_rq(enic->msix_entry[intr].vector,
                                &enic->napi[i]);
                }
-               intr = enic_msix_wq_intr(enic, i);
-               enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
+
+               for (i = 0; i < enic->wq_count; i++) {
+                       intr = enic_msix_wq_intr(enic, i);
+                       enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
+               }
+
                break;
        case VNIC_DEV_INTR_MODE_MSI:
                enic_isr_msi(enic->pdev->irq, enic);
index 19738143aa91b4ea46adc39c2074087abc692f00..1d1ccec60728fda8ec696c28b83bd38477ef9fff 100644 (file)
@@ -2228,19 +2228,11 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
                ((new_mtu) < IPV6_MIN_MTU))
                return -EINVAL;
 
-       if (new_mtu > 4000) {
-               jme->reg_rxcs &= ~RXCS_FIFOTHNP;
-               jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
-               jme_restart_rx_engine(jme);
-       } else {
-               jme->reg_rxcs &= ~RXCS_FIFOTHNP;
-               jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
-               jme_restart_rx_engine(jme);
-       }
 
        netdev->mtu = new_mtu;
        netdev_update_features(netdev);
 
+       jme_restart_rx_engine(jme);
        jme_reset_link(jme);
 
        return 0;
index e9aaeca96abc0cb07552bd4e23978bd0553ffc2b..fff885e9274e78550b0bf90327ee499c5e6a01e1 100644 (file)
@@ -734,7 +734,7 @@ enum jme_rxcs_values {
        RXCS_RETRYCNT_60        = 0x00000F00,
 
        RXCS_DEFAULT            = RXCS_FIFOTHTP_128T |
-                                 RXCS_FIFOTHNP_128QW |
+                                 RXCS_FIFOTHNP_16QW |
                                  RXCS_DMAREQSZ_128B |
                                  RXCS_RETRYGAP_256ns |
                                  RXCS_RETRYCNT_32,
index 61631cace913c5b3762a6eee5f73ed5f53c734a7..3eacbb4fff9ae4ae6066338b30de580387599c5a 100644 (file)
@@ -38,7 +38,7 @@
 #define        DRV_NAME        "ks8851_mll"
 
 static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
-#define MAX_RECV_FRAMES                        32
+#define MAX_RECV_FRAMES                        255
 #define MAX_BUF_SIZE                   2048
 #define TX_BUF_SIZE                    2000
 #define RX_BUF_SIZE                    2000
index 41ea5920c158d37ef1c54580ae95326145ddb8dc..95b6664e93667d68b3d958d44998acbb9922fdf7 100644 (file)
@@ -5679,7 +5679,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr)
                memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN);
        }
 
-       memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
+       memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN);
 
        interrupt = hw_block_intr(hw);
 
index 2f3c48da5865677fb43c0b26e885c445e2091a30..735f726729d9b2c3866489fb79b788349ee89c24 100644 (file)
@@ -239,7 +239,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
                dest = macvlan_hash_lookup(port, eth->h_dest);
                if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
                        /* send to lowerdev first for its network taps */
-                       vlan->forward(vlan->lowerdev, skb);
+                       dev_forward_skb(vlan->lowerdev, skb);
 
                        return NET_XMIT_SUCCESS;
                }
@@ -247,7 +247,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
 
 xmit_world:
        skb->ip_summed = ip_summed;
-       skb_set_dev(skb, vlan->lowerdev);
+       skb->dev = vlan->lowerdev;
        return dev_queue_xmit(skb);
 }
 
index dfc82720065ac0d68bb657dbf19150ff7dd4c89e..4840ab7e7f8a5870074ec1d36398c0fc406ff388 100644 (file)
@@ -307,6 +307,11 @@ static ssize_t store_enabled(struct netconsole_target *nt,
                return err;
        if (enabled < 0 || enabled > 1)
                return -EINVAL;
+       if (enabled == nt->enabled) {
+               printk(KERN_INFO "netconsole: network logging has already %s\n",
+                               nt->enabled ? "started" : "stopped");
+               return -EINVAL;
+       }
 
        if (enabled) {  /* 1 */
 
index eac3c5ca9731b8d9f15819a801654a8e76fd4917..236d00ec64cc770ad51f992ea456e386f90c9fc6 100644 (file)
@@ -39,6 +39,9 @@ const char pch_driver_version[] = DRV_VERSION;
 #define PCI_VENDOR_ID_ROHM                     0x10db
 #define PCI_DEVICE_ID_ROHM_ML7223_GBE          0x8013
 
+/* Macros for ML7831 */
+#define PCI_DEVICE_ID_ROHM_ML7831_GBE          0x8802
+
 #define PCH_GBE_TX_WEIGHT         64
 #define PCH_GBE_RX_WEIGHT         64
 #define PCH_GBE_RX_BUFFER_WRITE   16
@@ -717,13 +720,6 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
        iowrite32(rdba, &hw->reg->RX_DSC_BASE);
        iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
        iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
-
-       /* Enables Receive DMA */
-       rxdma = ioread32(&hw->reg->DMA_CTRL);
-       rxdma |= PCH_GBE_RX_DMA_EN;
-       iowrite32(rxdma, &hw->reg->DMA_CTRL);
-       /* Enables Receive */
-       iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
 }
 
 /**
@@ -1097,6 +1093,19 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
        spin_unlock_irqrestore(&adapter->stats_lock, flags);
 }
 
+static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
+{
+       u32 rxdma;
+
+       /* Enables Receive DMA */
+       rxdma = ioread32(&hw->reg->DMA_CTRL);
+       rxdma |= PCH_GBE_RX_DMA_EN;
+       iowrite32(rxdma, &hw->reg->DMA_CTRL);
+       /* Enables Receive */
+       iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
+       return;
+}
+
 /**
  * pch_gbe_intr - Interrupt Handler
  * @irq:   Interrupt number
@@ -1701,6 +1710,12 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
        struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
        int err;
 
+       /* Ensure we have a valid MAC */
+       if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
+               pr_err("Error: Invalid MAC address\n");
+               return -EINVAL;
+       }
+
        /* hardware has been reset, we need to reload some things */
        pch_gbe_set_multi(netdev);
 
@@ -1717,6 +1732,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
        pch_gbe_alloc_tx_buffers(adapter, tx_ring);
        pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
        adapter->tx_queue_len = netdev->tx_queue_len;
+       pch_gbe_start_receive(&adapter->hw);
 
        mod_timer(&adapter->watchdog_timer, jiffies);
 
@@ -2118,7 +2134,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
                /* If no Tx and not enough Rx work done,
                 * exit the polling mode
                 */
-               if ((work_done < budget) || !netif_running(netdev))
+               if (work_done < budget)
                        poll_end_flag = true;
        }
 
@@ -2392,9 +2408,14 @@ static int pch_gbe_probe(struct pci_dev *pdev,
 
        memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
        if (!is_valid_ether_addr(netdev->dev_addr)) {
-               dev_err(&pdev->dev, "Invalid MAC Address\n");
-               ret = -EIO;
-               goto err_free_adapter;
+               /*
+                * If the MAC is invalid (or just missing), display a warning
+                * but do not abort setting up the device. pch_gbe_up will
+                * prevent the interface from being brought up until a valid MAC
+                * is set.
+                */
+               dev_err(&pdev->dev, "Invalid MAC address, "
+                                   "interface disabled.\n");
        }
        setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
                    (unsigned long)adapter);
@@ -2452,6 +2473,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
         .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
         .class_mask = (0xFFFF00)
         },
+       {.vendor = PCI_VENDOR_ID_ROHM,
+        .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
+        .subvendor = PCI_ANY_ID,
+        .subdevice = PCI_ANY_ID,
+        .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+        .class_mask = (0xFFFF00)
+        },
        /* required last entry */
        {0}
 };
index 5b5d90a47e29419eef6cb207066403e37e855efc..fb74ef9c81a2c9e9ac345391a239f9c95702432e 100644 (file)
@@ -320,10 +320,10 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter)
                        pr_debug("AutoNeg specified along with Speed or Duplex, AutoNeg parameter ignored\n");
                        hw->phy.autoneg_advertised = opt.def;
                } else {
-                       hw->phy.autoneg_advertised = AutoNeg;
-                       pch_gbe_validate_option(
-                               (int *)(&hw->phy.autoneg_advertised),
-                               &opt, adapter);
+                       int tmp = AutoNeg;
+
+                       pch_gbe_validate_option(&tmp, &opt, adapter);
+                       hw->phy.autoneg_advertised = tmp;
                }
        }
 
@@ -494,9 +494,10 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
                        .arg  = { .l = { .nr = (int)ARRAY_SIZE(fc_list),
                                         .p = fc_list } }
                };
-               hw->mac.fc = FlowControl;
-               pch_gbe_validate_option((int *)(&hw->mac.fc),
-                                               &opt, adapter);
+               int tmp = FlowControl;
+
+               pch_gbe_validate_option(&tmp, &opt, adapter);
+               hw->mac.fc = tmp;
        }
 
        pch_gbe_check_copper_options(adapter);
index cb6e0b486b1e220864ba72c60decf9f53b0a05c2..364cd67bb70d124ee1c3528b9165c1a68c82ea77 100644 (file)
@@ -875,6 +875,7 @@ static void dp83640_remove(struct phy_device *phydev)
        struct dp83640_clock *clock;
        struct list_head *this, *next;
        struct dp83640_private *tmp, *dp83640 = phydev->priv;
+       struct sk_buff *skb;
 
        if (phydev->addr == BROADCAST_ADDR)
                return;
@@ -882,6 +883,12 @@ static void dp83640_remove(struct phy_device *phydev)
        enable_status_frames(phydev, false);
        cancel_work_sync(&dp83640->ts_work);
 
+       while ((skb = skb_dequeue(&dp83640->rx_queue)) != NULL)
+               kfree_skb(skb);
+
+       while ((skb = skb_dequeue(&dp83640->tx_queue)) != NULL)
+               skb_complete_tx_timestamp(skb, NULL);
+
        clock = dp83640_clock_get(dp83640->clock);
 
        if (dp83640 == clock->chosen) {
@@ -1060,7 +1067,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
        struct dp83640_private *dp83640 = phydev->priv;
 
        if (!dp83640->hwts_tx_en) {
-               kfree_skb(skb);
+               skb_complete_tx_timestamp(skb, NULL);
                return;
        }
        skb_queue_tail(&dp83640->tx_queue, skb);
index 47c8339a03595cb645c672fc262b76db4bf6cffb..2843c90f712f40c27c0688cb336dc87dbede054e 100644 (file)
@@ -241,7 +241,7 @@ MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
 
 static struct platform_driver mdio_ofgpio_driver = {
        .driver = {
-               .name = "mdio-gpio",
+               .name = "mdio-ofgpio",
                .owner = THIS_MODULE,
                .of_match_table = mdio_ofgpio_match,
        },
index 4609bc0e2f563104065e56f119f9f7a509f45e40..b890401cab93f87b2ccb6f1cee248f6a0f3b3aba 100644 (file)
@@ -968,7 +968,6 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
        proto = npindex_to_proto[npi];
        put_unaligned_be16(proto, pp);
 
-       netif_stop_queue(dev);
        skb_queue_tail(&ppp->file.xq, skb);
        ppp_xmit_process(ppp);
        return NETDEV_TX_OK;
@@ -1063,6 +1062,8 @@ ppp_xmit_process(struct ppp *ppp)
                   code that we can accept some more. */
                if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
                        netif_wake_queue(ppp->dev);
+               else
+                       netif_stop_queue(ppp->dev);
        }
        ppp_xmit_unlock(ppp);
 }
@@ -2019,14 +2020,22 @@ ppp_mp_reconstruct(struct ppp *ppp)
                        continue;
                }
                if (PPP_MP_CB(p)->sequence != seq) {
+                       u32 oldseq;
                        /* Fragment `seq' is missing.  If it is after
                           minseq, it might arrive later, so stop here. */
                        if (seq_after(seq, minseq))
                                break;
                        /* Fragment `seq' is lost, keep going. */
                        lost = 1;
+                       oldseq = seq;
                        seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
                                minseq + 1: PPP_MP_CB(p)->sequence;
+
+                       if (ppp->debug & 1)
+                               netdev_printk(KERN_DEBUG, ppp->dev,
+                                             "lost frag %u..%u\n",
+                                             oldseq, seq-1);
+
                        goto again;
                }
 
@@ -2071,6 +2080,10 @@ ppp_mp_reconstruct(struct ppp *ppp)
                        struct sk_buff *tmp2;
 
                        skb_queue_reverse_walk_from_safe(list, p, tmp2) {
+                               if (ppp->debug & 1)
+                                       netdev_printk(KERN_DEBUG, ppp->dev,
+                                                     "discarding frag %u\n",
+                                                     PPP_MP_CB(p)->sequence);
                                __skb_unlink(p, list);
                                kfree_skb(p);
                        }
@@ -2086,6 +2099,17 @@ ppp_mp_reconstruct(struct ppp *ppp)
                /* If we have discarded any fragments,
                   signal a receive error. */
                if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
+                       skb_queue_walk_safe(list, p, tmp) {
+                               if (p == head)
+                                       break;
+                               if (ppp->debug & 1)
+                                       netdev_printk(KERN_DEBUG, ppp->dev,
+                                                     "discarding frag %u\n",
+                                                     PPP_MP_CB(p)->sequence);
+                               __skb_unlink(p, list);
+                               kfree_skb(p);
+                       }
+
                        if (ppp->debug & 1)
                                netdev_printk(KERN_DEBUG, ppp->dev,
                                              "  missed pkts %u..%u\n",
index 1286fe212dc4585af3659ddb066d00e9283f2ce2..4b3a68b69a6744b96fc83658ee3f87d31c931776 100644 (file)
@@ -418,10 +418,8 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
        lock_sock(sk);
 
        opt->src_addr = sp->sa_addr.pptp;
-       if (add_chan(po)) {
-               release_sock(sk);
+       if (add_chan(po))
                error = -EBUSY;
-       }
 
        release_sock(sk);
        return error;
index ca4694e8a586554640158d6ea6a0993b064fe01b..1f421d73a88f1f4bd288bc63b802c6f618f647ba 100644 (file)
@@ -88,8 +88,8 @@ static struct rio_dev **rionet_active;
 #define dev_rionet_capable(dev) \
        is_rionet_capable(dev->src_ops, dev->dst_ops)
 
-#define RIONET_MAC_MATCH(x)    (*(u32 *)x == 0x00010001)
-#define RIONET_GET_DESTID(x)   (*(u16 *)(x + 4))
+#define RIONET_MAC_MATCH(x)    (!memcmp((x), "\00\01\00\01", 4))
+#define RIONET_GET_DESTID(x)   ((*((u8 *)x + 4) << 8) | *((u8 *)x + 5))
 
 static int rionet_rx_clean(struct net_device *ndev)
 {
index 62e43649466eea9ed63676f52c9acf464ec05257..4004fc2477bed97766d3b4c4c101c90d0a7f3e64 100644 (file)
@@ -155,11 +155,10 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
                if (unlikely(!skb))
                        return -ENOMEM;
 
-               /* Adjust the SKB for padding and checksum */
+               /* Adjust the SKB for padding */
                skb_reserve(skb, NET_IP_ALIGN);
                rx_buf->len = skb_len - NET_IP_ALIGN;
                rx_buf->is_page = false;
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
 
                rx_buf->dma_addr = pci_map_single(efx->pci_dev,
                                                  skb->data, rx_buf->len,
@@ -498,6 +497,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
 
                EFX_BUG_ON_PARANOID(!checksummed);
                rx_buf->u.skb = NULL;
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
 
                gro_result = napi_gro_receive(napi, skb);
        }
index 3ee41da130c2e683063799421bb58019dfa4621e..f2e31c87d9fe7157babb47ad33de8555e8703145 100644 (file)
@@ -94,6 +94,10 @@ static int disable_msi = 0;
 module_param(disable_msi, int, 0);
 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
 
+static int legacy_pme = 0;
+module_param(legacy_pme, int, 0);
+MODULE_PARM_DESC(legacy_pme, "Legacy power management");
+
 static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
        { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
        { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
@@ -794,6 +798,13 @@ static void sky2_wol_init(struct sky2_port *sky2)
        /* Disable PiG firmware */
        sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF);
 
+       /* Needed by some broken BIOSes, use PCI rather than PCI-e for WOL */
+       if (legacy_pme) {
+               u32 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
+               reg1 |= PCI_Y2_PME_LEGACY;
+               sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+       }
+
        /* block receiver */
        sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
 }
@@ -2333,8 +2344,13 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
                skb_copy_from_linear_data(re->skb, skb->data, length);
                skb->ip_summed = re->skb->ip_summed;
                skb->csum = re->skb->csum;
+               skb->rxhash = re->skb->rxhash;
+               skb->vlan_tci = re->skb->vlan_tci;
+
                pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
                                               length, PCI_DMA_FROMDEVICE);
+               re->skb->vlan_tci = 0;
+               re->skb->rxhash = 0;
                re->skb->ip_summed = CHECKSUM_NONE;
                skb_put(skb, length);
        }
@@ -2419,9 +2435,6 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
        struct sk_buff *skb = NULL;
        u16 count = (status & GMR_FS_LEN) >> 16;
 
-       if (status & GMR_FS_VLAN)
-               count -= VLAN_HLEN;     /* Account for vlan tag */
-
        netif_printk(sky2, rx_status, KERN_DEBUG, dev,
                     "rx slot %u status 0x%x len %d\n",
                     sky2->rx_next, status, length);
@@ -2429,6 +2442,9 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
        sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
        prefetch(sky2->rx_ring + sky2->rx_next);
 
+       if (vlan_tx_tag_present(re->skb))
+               count -= VLAN_HLEN;     /* Account for vlan tag */
+
        /* This chip has hardware problems that generates bogus status.
         * So do only marginal checking and expect higher level protocols
         * to handle crap frames.
@@ -2486,11 +2502,8 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
 }
 
 static inline void sky2_skb_rx(const struct sky2_port *sky2,
-                              u32 status, struct sk_buff *skb)
+                              struct sk_buff *skb)
 {
-       if (status & GMR_FS_VLAN)
-               __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag));
-
        if (skb->ip_summed == CHECKSUM_NONE)
                netif_receive_skb(skb);
        else
@@ -2544,6 +2557,14 @@ static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
        }
 }
 
+static void sky2_rx_tag(struct sky2_port *sky2, u16 length)
+{
+       struct sk_buff *skb;
+
+       skb = sky2->rx_ring[sky2->rx_next].skb;
+       __vlan_hwaccel_put_tag(skb, be16_to_cpu(length));
+}
+
 static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
 {
        struct sk_buff *skb;
@@ -2602,8 +2623,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
                        }
 
                        skb->protocol = eth_type_trans(skb, dev);
-
-                       sky2_skb_rx(sky2, status, skb);
+                       sky2_skb_rx(sky2, skb);
 
                        /* Stop after net poll weight */
                        if (++work_done >= to_do)
@@ -2611,11 +2631,11 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
                        break;
 
                case OP_RXVLAN:
-                       sky2->rx_tag = length;
+                       sky2_rx_tag(sky2, length);
                        break;
 
                case OP_RXCHKSVLAN:
-                       sky2->rx_tag = length;
+                       sky2_rx_tag(sky2, length);
                        /* fall through */
                case OP_RXCHKS:
                        if (likely(dev->features & NETIF_F_RXCSUM))
index 318c9ae7bf91acfcf306ca63fb36cdce1dd24546..a79a1662ea9e549bd4cd5fcaf70d7c8a8c09b840 100644 (file)
@@ -2236,7 +2236,6 @@ struct sky2_port {
        u16                  rx_pending;
        u16                  rx_data_size;
        u16                  rx_nfrags;
-       u16                  rx_tag;
 
        struct {
                unsigned long last;
index c6d47d10590c6955f0d76bd72680a90fb9bdfd41..3d12e8ce93937579768f3a7b32f738197e947acd 100644 (file)
@@ -1083,10 +1083,8 @@ smsc911x_rx_counterrors(struct net_device *dev, unsigned int rxstat)
 
 /* Quickly dumps bad packets */
 static void
-smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes)
+smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktwords)
 {
-       unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2;
-
        if (likely(pktwords >= 4)) {
                unsigned int timeout = 500;
                unsigned int val;
@@ -1150,7 +1148,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
                        continue;
                }
 
-               skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN);
+               skb = netdev_alloc_skb(dev, pktwords << 2);
                if (unlikely(!skb)) {
                        SMSC_WARN(pdata, rx_err,
                                  "Unable to allocate skb for rx packet");
@@ -1160,14 +1158,12 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
                        break;
                }
 
-               skb->data = skb->head;
-               skb_reset_tail_pointer(skb);
+               pdata->ops->rx_readfifo(pdata,
+                                (unsigned int *)skb->data, pktwords);
 
                /* Align IP on 16B boundary */
                skb_reserve(skb, NET_IP_ALIGN);
                skb_put(skb, pktlength - 4);
-               pdata->ops->rx_readfifo(pdata,
-                                (unsigned int *)skb->head, pktwords);
                skb->protocol = eth_type_trans(skb, dev);
                skb_checksum_none_assert(skb);
                netif_receive_skb(skb);
@@ -1390,7 +1386,7 @@ static int smsc911x_open(struct net_device *dev)
        smsc911x_reg_write(pdata, FIFO_INT, temp);
 
        /* set RX Data offset to 2 bytes for alignment */
-       smsc911x_reg_write(pdata, RX_CFG, (2 << 8));
+       smsc911x_reg_write(pdata, RX_CFG, (NET_IP_ALIGN << 8));
 
        /* enable NAPI polling before enabling RX interrupts */
        napi_enable(&pdata->napi);
index ab5930099267ca55b3a0c7667de17a9e1ce3f5ce..361beb797d1ea08ef84a7f7b7f5e7937c2928711 100644 (file)
@@ -2363,7 +2363,7 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
                netif_device_detach(dev);
 
                /* Switch off MAC, remember WOL setting */
-               gp->asleep_wol = gp->wake_on_lan;
+               gp->asleep_wol = !!gp->wake_on_lan;
                gem_do_stop(dev, gp->asleep_wol);
        } else
                gp->asleep_wol = 0;
index 38f68594f76b0261212ff646cc4f816bc9d6905a..c4ab8a721b4ac1fde41d685e5df10006f5746756 100644 (file)
@@ -740,8 +740,13 @@ static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
                if (sblk->status & SD_STATUS_LINK_CHG)
                        work_exists = 1;
        }
-       /* check for RX/TX work to do */
-       if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
+
+       /* check for TX work to do */
+       if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
+               work_exists = 1;
+
+       /* check for RX work to do */
+       if (tnapi->rx_rcb_prod_idx &&
            *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
                work_exists = 1;
 
@@ -5216,6 +5221,9 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
                        return work_done;
        }
 
+       if (!tnapi->rx_rcb_prod_idx)
+               return work_done;
+
        /* run RX thread, within the bounds set by NAPI.
         * All RX "locking" is done by ensuring outside
         * code synchronizes with tg3->napi.poll()
@@ -6626,6 +6634,12 @@ static int tg3_alloc_consistent(struct tg3 *tp)
                 */
                switch (i) {
                default:
+                       if (tg3_flag(tp, ENABLE_RSS)) {
+                               tnapi->rx_rcb_prod_idx = NULL;
+                               break;
+                       }
+                       /* Fall through */
+               case 1:
                        tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
                        break;
                case 2:
@@ -15278,7 +15292,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
 
                cancel_work_sync(&tp->reset_task);
 
-               if (!tg3_flag(tp, USE_PHYLIB)) {
+               if (tg3_flag(tp, USE_PHYLIB)) {
                        tg3_phy_fini(tp);
                        tg3_mdio_fini(tp);
                }
index 882f53f708df46040132740b885ab839f18e876a..82d43b214f935e4dd7b4ccfbbb28bbd23cc1eefb 100644 (file)
@@ -93,6 +93,7 @@ static int eem_bind(struct usbnet *dev, struct usb_interface *intf)
        /* no jumbogram (16K) support for now */
 
        dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN;
+       dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
 
        return 0;
 }
index c924ea2bce07a4c06aa8d6ab5d9c23620b446915..544c309e0d9590febd679a0c561ea1bd70f7cbfd 100644 (file)
@@ -83,6 +83,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
        struct cdc_state                *info = (void *) &dev->data;
        int                             status;
        int                             rndis;
+       bool                            android_rndis_quirk = false;
        struct usb_driver               *driver = driver_of(intf);
        struct usb_cdc_mdlm_desc        *desc = NULL;
        struct usb_cdc_mdlm_detail_desc *detail = NULL;
@@ -195,6 +196,11 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
                                        info->control,
                                        info->u->bSlaveInterface0,
                                        info->data);
+                               /* fall back to hard-wiring for RNDIS */
+                               if (rndis) {
+                                       android_rndis_quirk = true;
+                                       goto next_desc;
+                               }
                                goto bad_desc;
                        }
                        if (info->control != intf) {
@@ -271,11 +277,15 @@ next_desc:
        /* Microsoft ActiveSync based and some regular RNDIS devices lack the
         * CDC descriptors, so we'll hard-wire the interfaces and not check
         * for descriptors.
+        *
+        * Some Android RNDIS devices have a CDC Union descriptor pointing
+        * to non-existing interfaces.  Ignore that and attempt the same
+        * hard-wired 0 and 1 interfaces.
         */
-       if (rndis && !info->u) {
+       if (rndis && (!info->u || android_rndis_quirk)) {
                info->control = usb_ifnum_to_if(dev->udev, 0);
                info->data = usb_ifnum_to_if(dev->udev, 1);
-               if (!info->control || !info->data) {
+               if (!info->control || !info->data || info->control != intf) {
                        dev_dbg(&intf->dev,
                                "rndis: master #0/%p slave #1/%p\n",
                                info->control,
@@ -472,6 +482,7 @@ static const struct driver_info wwan_info = {
 /*-------------------------------------------------------------------------*/
 
 #define HUAWEI_VENDOR_ID       0x12D1
+#define NOVATEL_VENDOR_ID      0x1410
 
 static const struct usb_device_id      products [] = {
 /*
@@ -570,6 +581,13 @@ static const struct usb_device_id  products [] = {
        .driver_info = (unsigned long)&wwan_info,
 },
 
+/* Logitech Harmony 900 - uses the pseudo-MDLM (BLAN) driver */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(0x046d, 0xc11f, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+       .driver_info            = 0,
+},
+
 /*
  * WHITELIST!!!
  *
@@ -582,6 +600,21 @@ static const struct usb_device_id  products [] = {
  * because of bugs/quirks in a given product (like Zaurus, above).
  */
 {
+       /* Novatel USB551L */
+       /* This match must come *before* the generic CDC-ETHER match so that
+        * we get FLAG_WWAN set on the device, since it's descriptors are
+        * generic CDC-ETHER.
+        */
+       .match_flags    =   USB_DEVICE_ID_MATCH_VENDOR
+                | USB_DEVICE_ID_MATCH_PRODUCT
+                | USB_DEVICE_ID_MATCH_INT_INFO,
+       .idVendor               = NOVATEL_VENDOR_ID,
+       .idProduct              = 0xB001,
+       .bInterfaceClass        = USB_CLASS_COMM,
+       .bInterfaceSubClass     = USB_CDC_SUBCLASS_ETHERNET,
+       .bInterfaceProtocol     = USB_CDC_PROTO_NONE,
+       .driver_info = (unsigned long)&wwan_info,
+}, {
        USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
                        USB_CDC_PROTO_NONE),
        .driver_info = (unsigned long) &cdc_info,
index d3b9e958db0bd38c585c4d8fcfa6a04ba5c9bc2e..6a53161102c40c82778050dc37e30a797a4f6088 100644 (file)
@@ -229,23 +229,40 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
        if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
 
                if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
-                       struct usb_cdc_ncm_ndp_input_size ndp_in_sz;
+                       struct usb_cdc_ncm_ndp_input_size *ndp_in_sz;
+
+                       ndp_in_sz = kzalloc(sizeof(*ndp_in_sz), GFP_KERNEL);
+                       if (!ndp_in_sz) {
+                               err = -ENOMEM;
+                               goto size_err;
+                       }
+
                        err = usb_control_msg(ctx->udev,
                                        usb_sndctrlpipe(ctx->udev, 0),
                                        USB_CDC_SET_NTB_INPUT_SIZE,
                                        USB_TYPE_CLASS | USB_DIR_OUT
                                         | USB_RECIP_INTERFACE,
-                                       0, iface_no, &ndp_in_sz, 8, 1000);
+                                       0, iface_no, ndp_in_sz, 8, 1000);
+                       kfree(ndp_in_sz);
                } else {
-                       __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+                       __le32 *dwNtbInMaxSize;
+                       dwNtbInMaxSize = kzalloc(sizeof(*dwNtbInMaxSize),
+                                       GFP_KERNEL);
+                       if (!dwNtbInMaxSize) {
+                               err = -ENOMEM;
+                               goto size_err;
+                       }
+                       *dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+
                        err = usb_control_msg(ctx->udev,
                                        usb_sndctrlpipe(ctx->udev, 0),
                                        USB_CDC_SET_NTB_INPUT_SIZE,
                                        USB_TYPE_CLASS | USB_DIR_OUT
                                         | USB_RECIP_INTERFACE,
-                                       0, iface_no, &dwNtbInMaxSize, 4, 1000);
+                                       0, iface_no, dwNtbInMaxSize, 4, 1000);
+                       kfree(dwNtbInMaxSize);
                }
-
+size_err:
                if (err < 0)
                        pr_debug("Setting NTB Input Size failed\n");
        }
@@ -326,19 +343,29 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
 
        /* set Max Datagram Size (MTU) */
        if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
-               __le16 max_datagram_size;
+               __le16 *max_datagram_size;
                u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
+
+               max_datagram_size = kzalloc(sizeof(*max_datagram_size),
+                               GFP_KERNEL);
+               if (!max_datagram_size) {
+                       err = -ENOMEM;
+                       goto max_dgram_err;
+               }
+
                err = usb_control_msg(ctx->udev, usb_rcvctrlpipe(ctx->udev, 0),
                                USB_CDC_GET_MAX_DATAGRAM_SIZE,
                                USB_TYPE_CLASS | USB_DIR_IN
                                 | USB_RECIP_INTERFACE,
-                               0, iface_no, &max_datagram_size,
+                               0, iface_no, max_datagram_size,
                                2, 1000);
                if (err < 0) {
                        pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
                                                CDC_NCM_MIN_DATAGRAM_SIZE);
+                       kfree(max_datagram_size);
                } else {
-                       ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
+                       ctx->max_datagram_size =
+                               le16_to_cpu(*max_datagram_size);
                        /* Check Eth descriptor value */
                        if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) {
                                if (ctx->max_datagram_size > eth_max_sz)
@@ -361,8 +388,10 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
                                                USB_TYPE_CLASS | USB_DIR_OUT
                                                 | USB_RECIP_INTERFACE,
                                                0,
-                                               iface_no, &max_datagram_size,
+                                               iface_no, max_datagram_size,
                                                2, 1000);
+                       kfree(max_datagram_size);
+max_dgram_err:
                        if (err < 0)
                                pr_debug("SET_MAX_DATAGRAM_SIZE failed\n");
                }
index 81126ff85e0576e40b4d01aaf404044ffacfe1df..9cf4e47e55bf3cbbcf0ca8bab6b2e16be295756f 100644 (file)
@@ -59,6 +59,8 @@
 #define USB_PRODUCT_IPHONE_3G   0x1292
 #define USB_PRODUCT_IPHONE_3GS  0x1294
 #define USB_PRODUCT_IPHONE_4   0x1297
+#define USB_PRODUCT_IPHONE_4_VZW 0x129c
+#define USB_PRODUCT_IPHONE_4S  0x12a0
 
 #define IPHETH_USBINTF_CLASS    255
 #define IPHETH_USBINTF_SUBCLASS 253
@@ -98,6 +100,14 @@ static struct usb_device_id ipheth_table[] = {
                USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
                IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
                IPHETH_USBINTF_PROTO) },
+       { USB_DEVICE_AND_INTERFACE_INFO(
+               USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
+               IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+               IPHETH_USBINTF_PROTO) },
+       { USB_DEVICE_AND_INTERFACE_INFO(
+               USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S,
+               IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+               IPHETH_USBINTF_PROTO) },
        { }
 };
 MODULE_DEVICE_TABLE(usb, ipheth_table);
index 041fb7d43c4f7d077c8175c5d395901a1d92061c..ef3b236b51457d49bbd59ddef6a4dbd648c24dd1 100644 (file)
@@ -977,7 +977,6 @@ static void rtl8150_disconnect(struct usb_interface *intf)
        usb_set_intfdata(intf, NULL);
        if (dev) {
                set_bit(RTL8150_UNPLUG, &dev->flags);
-               tasklet_disable(&dev->tl);
                tasklet_kill(&dev->tl);
                unregister_netdev(dev->netdev);
                unlink_all_urbs(dev);
index ed1b4321058487a62530442586d7f0c2f7628dff..864448b761201122252cb47aaccadea269627f8a 100644 (file)
@@ -943,7 +943,7 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
 }
 
 static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
-static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
+static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
        .rx_urb_size = 8 * 1024,
        .whitelist = {
                .infolen = ARRAY_SIZE(sierra_net_ifnum_list),
@@ -951,7 +951,7 @@ static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
        }
 };
 
-static const struct driver_info sierra_net_info_68A3 = {
+static const struct driver_info sierra_net_info_direct_ip = {
        .description = "Sierra Wireless USB-to-WWAN Modem",
        .flags = FLAG_WWAN | FLAG_SEND_ZLP,
        .bind = sierra_net_bind,
@@ -959,12 +959,18 @@ static const struct driver_info sierra_net_info_68A3 = {
        .status = sierra_net_status,
        .rx_fixup = sierra_net_rx_fixup,
        .tx_fixup = sierra_net_tx_fixup,
-       .data = (unsigned long)&sierra_net_info_data_68A3,
+       .data = (unsigned long)&sierra_net_info_data_direct_ip,
 };
 
 static const struct usb_device_id products[] = {
        {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
-       .driver_info = (unsigned long) &sierra_net_info_68A3},
+       .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+       {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */
+       .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+       {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */
+       .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+       {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */
+       .driver_info = (unsigned long) &sierra_net_info_direct_ip},
 
        {}, /* last item */
 };
index 15b3d6888ae9b1997d3c4f09b8968baacef782cc..de0de3ee6392956f0347bf1c32be970147a8e00b 100644 (file)
@@ -1049,6 +1049,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->net->ethtool_ops = &smsc75xx_ethtool_ops;
        dev->net->flags |= IFF_MULTICAST;
        dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD;
+       dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
        return 0;
 }
 
index f74f3ce7152630fc7adaf43cc6ce107fc8dd6214..e5c15bbbe62fa19d40fbb4b76c2b5f6752164808 100644 (file)
@@ -1190,7 +1190,7 @@ static const struct driver_info smsc95xx_info = {
        .rx_fixup       = smsc95xx_rx_fixup,
        .tx_fixup       = smsc95xx_tx_fixup,
        .status         = smsc95xx_status,
-       .flags          = FLAG_ETHER | FLAG_SEND_ZLP,
+       .flags          = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR,
 };
 
 static const struct usb_device_id products[] = {
index 253def9b5feb9d3c5c4ef1abcfdb61c9af85282d..8a35534d2b53133160b6e94c42d8efc6cca47326 100644 (file)
@@ -280,17 +280,32 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
 }
 EXPORT_SYMBOL_GPL(usbnet_change_mtu);
 
+/* The caller must hold list->lock */
+static void __usbnet_queue_skb(struct sk_buff_head *list,
+                       struct sk_buff *newsk, enum skb_state state)
+{
+       struct skb_data *entry = (struct skb_data *) newsk->cb;
+
+       __skb_queue_tail(list, newsk);
+       entry->state = state;
+}
+
 /*-------------------------------------------------------------------------*/
 
 /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from
  * completion callbacks.  2.5 should have fixed those bugs...
  */
 
-static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
+static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
+               struct sk_buff_head *list, enum skb_state state)
 {
        unsigned long           flags;
+       enum skb_state          old_state;
+       struct skb_data *entry = (struct skb_data *) skb->cb;
 
        spin_lock_irqsave(&list->lock, flags);
+       old_state = entry->state;
+       entry->state = state;
        __skb_unlink(skb, list);
        spin_unlock(&list->lock);
        spin_lock(&dev->done.lock);
@@ -298,6 +313,7 @@ static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_hea
        if (dev->done.qlen == 1)
                tasklet_schedule(&dev->bh);
        spin_unlock_irqrestore(&dev->done.lock, flags);
+       return old_state;
 }
 
 /* some work can't be done in tasklets, so we use keventd
@@ -338,7 +354,6 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
        entry = (struct skb_data *) skb->cb;
        entry->urb = urb;
        entry->dev = dev;
-       entry->state = rx_start;
        entry->length = 0;
 
        usb_fill_bulk_urb (urb, dev->udev, dev->in,
@@ -370,7 +385,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
                        tasklet_schedule (&dev->bh);
                        break;
                case 0:
-                       __skb_queue_tail (&dev->rxq, skb);
+                       __usbnet_queue_skb(&dev->rxq, skb, rx_start);
                }
        } else {
                netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
@@ -421,16 +436,17 @@ static void rx_complete (struct urb *urb)
        struct skb_data         *entry = (struct skb_data *) skb->cb;
        struct usbnet           *dev = entry->dev;
        int                     urb_status = urb->status;
+       enum skb_state          state;
 
        skb_put (skb, urb->actual_length);
-       entry->state = rx_done;
+       state = rx_done;
        entry->urb = NULL;
 
        switch (urb_status) {
        /* success */
        case 0:
                if (skb->len < dev->net->hard_header_len) {
-                       entry->state = rx_cleanup;
+                       state = rx_cleanup;
                        dev->net->stats.rx_errors++;
                        dev->net->stats.rx_length_errors++;
                        netif_dbg(dev, rx_err, dev->net,
@@ -469,7 +485,7 @@ static void rx_complete (struct urb *urb)
                                  "rx throttle %d\n", urb_status);
                }
 block:
-               entry->state = rx_cleanup;
+               state = rx_cleanup;
                entry->urb = urb;
                urb = NULL;
                break;
@@ -480,17 +496,18 @@ block:
                // FALLTHROUGH
 
        default:
-               entry->state = rx_cleanup;
+               state = rx_cleanup;
                dev->net->stats.rx_errors++;
                netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
                break;
        }
 
-       defer_bh(dev, skb, &dev->rxq);
+       state = defer_bh(dev, skb, &dev->rxq, state);
 
        if (urb) {
                if (netif_running (dev->net) &&
-                   !test_bit (EVENT_RX_HALT, &dev->flags)) {
+                   !test_bit (EVENT_RX_HALT, &dev->flags) &&
+                   state != unlink_start) {
                        rx_submit (dev, urb, GFP_ATOMIC);
                        return;
                }
@@ -576,18 +593,34 @@ EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq);
 static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
 {
        unsigned long           flags;
-       struct sk_buff          *skb, *skbnext;
+       struct sk_buff          *skb;
        int                     count = 0;
 
        spin_lock_irqsave (&q->lock, flags);
-       skb_queue_walk_safe(q, skb, skbnext) {
+       while (!skb_queue_empty(q)) {
                struct skb_data         *entry;
                struct urb              *urb;
                int                     retval;
 
-               entry = (struct skb_data *) skb->cb;
+               skb_queue_walk(q, skb) {
+                       entry = (struct skb_data *) skb->cb;
+                       if (entry->state != unlink_start)
+                               goto found;
+               }
+               break;
+found:
+               entry->state = unlink_start;
                urb = entry->urb;
 
+               /*
+                * Get reference count of the URB to avoid it to be
+                * freed during usb_unlink_urb, which may trigger
+                * use-after-free problem inside usb_unlink_urb since
+                * usb_unlink_urb is always racing with .complete
+                * handler(include defer_bh).
+                */
+               usb_get_urb(urb);
+               spin_unlock_irqrestore(&q->lock, flags);
                // during some PM-driven resume scenarios,
                // these (async) unlinks complete immediately
                retval = usb_unlink_urb (urb);
@@ -595,6 +628,8 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
                        netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
                else
                        count++;
+               usb_put_urb(urb);
+               spin_lock_irqsave(&q->lock, flags);
        }
        spin_unlock_irqrestore (&q->lock, flags);
        return count;
@@ -1025,9 +1060,7 @@ static void tx_complete (struct urb *urb)
        }
 
        usb_autopm_put_interface_async(dev->intf);
-       urb->dev = NULL;
-       entry->state = tx_done;
-       defer_bh(dev, skb, &dev->txq);
+       (void) defer_bh(dev, skb, &dev->txq, tx_done);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -1080,7 +1113,6 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
        entry = (struct skb_data *) skb->cb;
        entry->urb = urb;
        entry->dev = dev;
-       entry->state = tx_start;
        entry->length = length;
 
        usb_fill_bulk_urb (urb, dev->udev, dev->out,
@@ -1139,7 +1171,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
                break;
        case 0:
                net->trans_start = jiffies;
-               __skb_queue_tail (&dev->txq, skb);
+               __usbnet_queue_skb(&dev->txq, skb, tx_start);
                if (dev->txq.qlen >= TX_QLEN (dev))
                        netif_stop_queue (net);
        }
index 1a2234c20514f88e22bd07dab26e4b25d4cc26ea..c1e6a446d13c09ece13ed73a9573f95ffb5902b0 100644 (file)
@@ -332,6 +332,11 @@ static const struct usb_device_id  products [] = {
        .driver_info = ZAURUS_PXA_INFO,
 },
 {
+       /* Motorola Rokr E6 */
+       USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x6027, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long) &bogus_mdlm_info,
+}, {
        /* Motorola MOTOMAGX phones */
        USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x6425, USB_CLASS_COMM,
                        USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
@@ -349,6 +354,13 @@ static const struct usb_device_id  products [] = {
        ZAURUS_MASTER_INTERFACE,
        .driver_info = OLYMPUS_MXL_INFO,
 },
+
+/* Logitech Harmony 900 - uses the pseudo-MDLM (BLAN) driver */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(0x046d, 0xc11f, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long) &bogus_mdlm_info,
+},
        { },            // END
 };
 MODULE_DEVICE_TABLE(usb, products);
index 4bf7c6d4ab901347eabd65525c436be0934f41f2..6c0a3b0f0afda42caa8f20ede78d186e2094c9eb 100644 (file)
@@ -421,7 +421,9 @@ static void veth_dellink(struct net_device *dev, struct list_head *head)
        unregister_netdevice_queue(peer, head);
 }
 
-static const struct nla_policy veth_policy[VETH_INFO_MAX + 1];
+static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
+       [VETH_INFO_PEER]        = { .len = sizeof(struct ifinfomsg) },
+};
 
 static struct rtnl_link_ops veth_link_ops = {
        .kind           = DRV_NAME,
index 06daa9d6fee82c3354ced149e533f22bb7d6709f..c7e493461e0ad3d2f88d3ae84b798da92ae414ab 100644 (file)
@@ -2513,9 +2513,6 @@ static int velocity_close(struct net_device *dev)
        if (dev->irq != 0)
                free_irq(dev->irq, dev);
 
-       /* Power down the chip */
-       pci_set_power_state(vptr->pdev, PCI_D3hot);
-
        velocity_free_rings(vptr);
 
        vptr->flags &= (~VELOCITY_FLAGS_OPENED);
index 67402350d0dffaae181015f816cf9c3d67313847..0ef676dcb9c32b15e11fccb2153398b6307af701 100644 (file)
@@ -830,13 +830,8 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
                                        ctx->l4_hdr_size = ((struct tcphdr *)
                                           skb_transport_header(skb))->doff * 4;
                                else if (iph->protocol == IPPROTO_UDP)
-                                       /*
-                                        * Use tcp header size so that bytes to
-                                        * be copied are more than required by
-                                        * the device.
-                                        */
                                        ctx->l4_hdr_size =
-                                                       sizeof(struct tcphdr);
+                                                       sizeof(struct udphdr);
                                else
                                        ctx->l4_hdr_size = 0;
                        } else {
index e08d75e3f170a3ae523957fde0ae80b1e60e3a20..862be0500091e3033ec9bc8f074509947a908782 100644 (file)
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.1.18.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.1.29.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01011200
+#define VMXNET3_DRIVER_VERSION_NUM      0x01011D00
 
 #if defined(CONFIG_PCI_MSI)
        /* RSS only makes sense if MSI-X is supported. */
index 2edd8fe1c1f31c409cef10fedd62c5e9be7cc5b1..0a998638e1b52ccd1068ad7d4449a8901c140124 100644 (file)
@@ -606,7 +606,8 @@ static void i2400m_get_drvinfo(struct net_device *net_dev,
        struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
 
        strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver) - 1);
-       strncpy(info->fw_version, i2400m->fw_name, sizeof(info->fw_version) - 1);
+       strncpy(info->fw_version,
+               i2400m->fw_name ? : "", sizeof(info->fw_version) - 1);
        if (net_dev->dev.parent)
                strncpy(info->bus_info, dev_name(net_dev->dev.parent),
                        sizeof(info->bus_info) - 1);
index 0ca86f9ec4ed38531b0d59060d8f9fa49024dee6..182562952c792a9e347b9a0cc62744ee4e7a8f81 100644 (file)
@@ -327,12 +327,12 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
        xenvif_get(vif);
 
        rtnl_lock();
-       if (netif_running(vif->dev))
-               xenvif_up(vif);
        if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
                dev_set_mtu(vif->dev, ETH_DATA_LEN);
        netdev_update_features(vif->dev);
        netif_carrier_on(vif->dev);
+       if (netif_running(vif->dev))
+               xenvif_up(vif);
        rtnl_unlock();
 
        return 0;
index dccd8636095cb361e2e0e1e2bb8fb7fdd57ecee2..f8c752e408a663d55adf7e84ca4fb42aa93d1d02 100644 (file)
@@ -239,26 +239,45 @@ int oprofile_set_ulong(unsigned long *addr, unsigned long val)
        return err;
 }
 
+static int timer_mode;
+
 static int __init oprofile_init(void)
 {
        int err;
 
+       /* always init architecture to setup backtrace support */
        err = oprofile_arch_init(&oprofile_ops);
-       if (err < 0 || timer) {
-               printk(KERN_INFO "oprofile: using timer interrupt.\n");
+
+       timer_mode = err || timer;      /* fall back to timer mode on errors */
+       if (timer_mode) {
+               if (!err)
+                       oprofile_arch_exit();
                err = oprofile_timer_init(&oprofile_ops);
                if (err)
                        return err;
        }
-       return oprofilefs_register();
+
+       err = oprofilefs_register();
+       if (!err)
+               return 0;
+
+       /* failed */
+       if (timer_mode)
+               oprofile_timer_exit();
+       else
+               oprofile_arch_exit();
+
+       return err;
 }
 
 
 static void __exit oprofile_exit(void)
 {
-       oprofile_timer_exit();
        oprofilefs_unregister();
-       oprofile_arch_exit();
+       if (timer_mode)
+               oprofile_timer_exit();
+       else
+               oprofile_arch_exit();
 }
 
 
index 89f63456646fa4c67b6ee74f7c657917be05a425..84a208dbed939afa20743804d8e4d75ca5ff741a 100644 (file)
@@ -45,7 +45,7 @@ static ssize_t timeout_write(struct file *file, char const __user *buf,
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        retval = oprofile_set_timeout(val);
@@ -84,7 +84,7 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
@@ -141,9 +141,10 @@ static ssize_t enable_write(struct file *file, char const __user *buf, size_t co
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
+       retval = 0;
        if (val)
                retval = oprofile_start();
        else
index e9ff6f7770be23a046cd5ce22b8fd504794b3907..1c0b799b30bca272c99e798e80833e3734e083a1 100644 (file)
@@ -60,6 +60,13 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t cou
 }
 
 
+/*
+ * Note: If oprofilefs_ulong_from_user() returns 0, then *val remains
+ * unchanged and might be uninitialized. This follows write syscall
+ * implementation when count is zero: "If count is zero ... [and if]
+ * no errors are detected, 0 will be returned without causing any
+ * other effect." (man 2 write)
+ */
 int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
 {
        char tmpbuf[TMPBUFSIZE];
@@ -79,7 +86,7 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_
        spin_lock_irqsave(&oprofilefs_lock, flags);
        *val = simple_strtoul(tmpbuf, NULL, 0);
        spin_unlock_irqrestore(&oprofilefs_lock, flags);
-       return 0;
+       return count;
 }
 
 
@@ -99,7 +106,7 @@ static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&value, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        retval = oprofile_set_ulong(file->private_data, value);
index 3ef44624f5103ddaf405e76fcafd0afe6b27a132..878fba1265829cdab586a145d86a332b5ce32874 100644 (file)
@@ -110,6 +110,7 @@ int oprofile_timer_init(struct oprofile_operations *ops)
        ops->start = oprofile_hrtimer_start;
        ops->stop = oprofile_hrtimer_stop;
        ops->cpu_type = "timer";
+       printk(KERN_INFO "oprofile: using timer interrupt.\n");
        return 0;
 }
 
index aca972bbfb4c8ba04e7fc74efd74636ebe49ea65..dd7e0c51a33e5e5f79eb9f11b444fe4667d2ba2d 100644 (file)
@@ -278,8 +278,8 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
 
 static int is_shpc_capable(struct pci_dev *dev)
 {
-       if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device ==
-                                               PCI_DEVICE_ID_AMD_GOLAM_7450))
+       if (dev->vendor == PCI_VENDOR_ID_AMD &&
+           dev->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
                return 1;
        if (!pci_find_capability(dev, PCI_CAP_ID_SHPC))
                return 0;
index 36547f0ce305e987c7a414a47d6a4e5806b239cb..75ba2311b54f3f37b62176a37e032c480be9f3f1 100644 (file)
@@ -944,8 +944,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
        ctrl->pci_dev = pdev;  /* pci_dev of the P2P bridge */
        ctrl_dbg(ctrl, "Hotplug Controller:\n");
 
-       if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device ==
-                               PCI_DEVICE_ID_AMD_GOLAM_7450)) {
+       if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+           pdev->device == PCI_DEVICE_ID_AMD_GOLAM_7450) {
                /* amd shpc driver doesn't use Base Offset; assume 0 */
                ctrl->mmio_base = pci_resource_start(pdev, 0);
                ctrl->mmio_size = pci_resource_len(pdev, 0);
index f02c34d26d1b08226599cfa4c183da703b9486ed..0ec8930f31b8797812c4b2f795d9bb042c35acfb 100644 (file)
@@ -307,6 +307,11 @@ static inline bool dma_pte_present(struct dma_pte *pte)
        return (pte->val & 3) != 0;
 }
 
+static inline bool dma_pte_superpage(struct dma_pte *pte)
+{
+       return (pte->val & (1 << 7));
+}
+
 static inline int first_pte_in_page(struct dma_pte *pte)
 {
        return !((unsigned long)pte & ~VTD_PAGE_MASK);
@@ -578,17 +583,18 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain)
 
 static void domain_update_iommu_superpage(struct dmar_domain *domain)
 {
-       int i, mask = 0xf;
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu = NULL;
+       int mask = 0xf;
 
        if (!intel_iommu_superpage) {
                domain->iommu_superpage = 0;
                return;
        }
 
-       domain->iommu_superpage = 4; /* 1TiB */
-
-       for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
-               mask |= cap_super_page_val(g_iommus[i]->cap);
+       /* set iommu_superpage to the smallest common denominator */
+       for_each_active_iommu(iommu, drhd) {
+               mask &= cap_super_page_val(iommu->cap);
                if (!mask) {
                        break;
                }
@@ -731,29 +737,23 @@ out:
 }
 
 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
-                                     unsigned long pfn, int large_level)
+                                     unsigned long pfn, int target_level)
 {
        int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
        struct dma_pte *parent, *pte = NULL;
        int level = agaw_to_level(domain->agaw);
-       int offset, target_level;
+       int offset;
 
        BUG_ON(!domain->pgd);
        BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
        parent = domain->pgd;
 
-       /* Search pte */
-       if (!large_level)
-               target_level = 1;
-       else
-               target_level = large_level;
-
        while (level > 0) {
                void *tmp_page;
 
                offset = pfn_level_offset(pfn, level);
                pte = &parent[offset];
-               if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE))
+               if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
                        break;
                if (level == target_level)
                        break;
@@ -817,13 +817,14 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
 }
 
 /* clear last level pte, a tlb flush should be followed */
-static void dma_pte_clear_range(struct dmar_domain *domain,
+static int dma_pte_clear_range(struct dmar_domain *domain,
                                unsigned long start_pfn,
                                unsigned long last_pfn)
 {
        int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
        unsigned int large_page = 1;
        struct dma_pte *first_pte, *pte;
+       int order;
 
        BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
        BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
@@ -847,6 +848,9 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
                                   (void *)pte - (void *)first_pte);
 
        } while (start_pfn && start_pfn <= last_pfn);
+
+       order = (large_page - 1) * 9;
+       return order;
 }
 
 /* free page table pages. last level pte should already be cleared */
@@ -3740,6 +3744,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain)
                vm_domain_exit(dmar_domain);
                return -ENOMEM;
        }
+       domain_update_iommu_cap(dmar_domain);
        domain->priv = dmar_domain;
 
        return 0;
@@ -3865,14 +3870,15 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
 {
        struct dmar_domain *dmar_domain = domain->priv;
        size_t size = PAGE_SIZE << gfp_order;
+       int order;
 
-       dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
+       order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
                            (iova + size - 1) >> VTD_PAGE_SHIFT);
 
        if (dmar_domain->max_addr == iova + size)
                dmar_domain->max_addr = iova;
 
-       return gfp_order;
+       return order;
 }
 
 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
index 2f10328bf661234437c1e862f7d4e1a96afeabb3..e1749825008d11e49dc94fc9dc7757c88fa4380c 100644 (file)
@@ -869,5 +869,15 @@ EXPORT_SYMBOL(pci_msi_enabled);
 
 void pci_msi_init_pci_dev(struct pci_dev *dev)
 {
+       int pos;
        INIT_LIST_HEAD(&dev->msi_list);
+
+       /* Disable the msi hardware to avoid screaming interrupts
+        * during boot.  This is the power on reset default so
+        * usually this should be a noop.
+        */
+       pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+       if (pos)
+               msi_set_enable(dev, pos, 0);
+       msix_set_enable(dev, 0);
 }
index d36f41ea8cbfb9ccd61ebfc9c68700b98cb31136..56b04bc80a127ef263ba29aeebbd89b3542d022d 100644 (file)
@@ -393,7 +393,6 @@ static int __init acpi_pci_init(void)
 
        if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
                printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
-               pcie_clear_aspm();
                pcie_no_aspm();
        }
 
index d549bbc93cddf7d55f7b1193692e7bb8a330ae87..bf401aead873dcfa7237f2f1a274f5aaec89d8fe 100644 (file)
@@ -1682,6 +1682,11 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
        if (target_state == PCI_POWER_ERROR)
                return -EIO;
 
+       /* Some devices mustn't be in D3 during system sleep */
+       if (target_state == PCI_D3hot &&
+                       (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP))
+               return 0;
+
        pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
 
        error = pci_set_power_state(dev, target_state);
index 6892601fc76f402bd6f9cdacca44adcef82dc921..0ff0182f176a387e52707a8961ffc5ae38c83873 100644 (file)
@@ -68,7 +68,7 @@ struct pcie_link_state {
        struct aspm_latency acceptable[8];
 };
 
-static int aspm_disabled, aspm_force, aspm_clear_state;
+static int aspm_disabled, aspm_force;
 static bool aspm_support_enabled = true;
 static DEFINE_MUTEX(aspm_lock);
 static LIST_HEAD(link_list);
@@ -500,9 +500,6 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
        int pos;
        u32 reg32;
 
-       if (aspm_clear_state)
-               return -EINVAL;
-
        /*
         * Some functions in a slot might not all be PCIe functions,
         * very strange. Disable ASPM for the whole slot
@@ -511,6 +508,16 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
                pos = pci_pcie_cap(child);
                if (!pos)
                        return -EINVAL;
+
+               /*
+                * If ASPM is disabled then we're not going to change
+                * the BIOS state. It's safe to continue even if it's a
+                * pre-1.1 device
+                */
+
+               if (aspm_disabled)
+                       continue;
+
                /*
                 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use
                 * RBER bit to determine if a function is 1.1 version device
@@ -574,9 +581,6 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
            pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
                return;
 
-       if (aspm_disabled && !aspm_clear_state)
-               return;
-
        /* VIA has a strange chipset, root port is under a bridge */
        if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT &&
            pdev->bus->self)
@@ -608,7 +612,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
         * the BIOS's expectation, we'll do so once pci_enable_device() is
         * called.
         */
-       if (aspm_policy != POLICY_POWERSAVE || aspm_clear_state) {
+       if (aspm_policy != POLICY_POWERSAVE) {
                pcie_config_aspm_path(link);
                pcie_set_clkpm(link, policy_to_clkpm_state(link));
        }
@@ -649,8 +653,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
        struct pci_dev *parent = pdev->bus->self;
        struct pcie_link_state *link, *root, *parent_link;
 
-       if ((aspm_disabled && !aspm_clear_state) || !pci_is_pcie(pdev) ||
-           !parent || !parent->link_state)
+       if (!pci_is_pcie(pdev) || !parent || !parent->link_state)
                return;
        if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
            (parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
@@ -734,13 +737,18 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
  * pci_disable_link_state - disable pci device's link state, so the link will
  * never enter specific states
  */
-static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem,
+                                    bool force)
 {
        struct pci_dev *parent = pdev->bus->self;
        struct pcie_link_state *link;
 
-       if (aspm_disabled || !pci_is_pcie(pdev))
+       if (aspm_disabled && !force)
                return;
+
+       if (!pci_is_pcie(pdev))
+               return;
+
        if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
            pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
                parent = pdev;
@@ -768,16 +776,31 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
 
 void pci_disable_link_state_locked(struct pci_dev *pdev, int state)
 {
-       __pci_disable_link_state(pdev, state, false);
+       __pci_disable_link_state(pdev, state, false, false);
 }
 EXPORT_SYMBOL(pci_disable_link_state_locked);
 
 void pci_disable_link_state(struct pci_dev *pdev, int state)
 {
-       __pci_disable_link_state(pdev, state, true);
+       __pci_disable_link_state(pdev, state, true, false);
 }
 EXPORT_SYMBOL(pci_disable_link_state);
 
+void pcie_clear_aspm(struct pci_bus *bus)
+{
+       struct pci_dev *child;
+
+       /*
+        * Clear any ASPM setup that the firmware has carried out on this bus
+        */
+       list_for_each_entry(child, &bus->devices, bus_list) {
+               __pci_disable_link_state(child, PCIE_LINK_STATE_L0S |
+                                        PCIE_LINK_STATE_L1 |
+                                        PCIE_LINK_STATE_CLKPM,
+                                        false, true);
+       }
+}
+
 static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
 {
        int i;
@@ -935,6 +958,7 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
 static int __init pcie_aspm_disable(char *str)
 {
        if (!strcmp(str, "off")) {
+               aspm_policy = POLICY_DEFAULT;
                aspm_disabled = 1;
                aspm_support_enabled = false;
                printk(KERN_INFO "PCIe ASPM is disabled\n");
@@ -947,16 +971,18 @@ static int __init pcie_aspm_disable(char *str)
 
 __setup("pcie_aspm=", pcie_aspm_disable);
 
-void pcie_clear_aspm(void)
-{
-       if (!aspm_force)
-               aspm_clear_state = 1;
-}
-
 void pcie_no_aspm(void)
 {
-       if (!aspm_force)
+       /*
+        * Disabling ASPM is intended to prevent the kernel from modifying
+        * existing hardware state, not to clear existing state. To that end:
+        * (a) set policy to POLICY_DEFAULT in order to avoid changing state
+        * (b) prevent userspace from changing policy
+        */
+       if (!aspm_force) {
+               aspm_policy = POLICY_DEFAULT;
                aspm_disabled = 1;
+       }
 }
 
 /**
index bafb3c3d4a8963e6a635d94a414e290e4998af40..5b3771a7a413a7df105cc8647b1e7e5f2e96194a 100644 (file)
@@ -657,6 +657,11 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
        dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
                secondary, subordinate, pass);
 
+       if (!primary && (primary != bus->number) && secondary && subordinate) {
+               dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
+               primary = bus->number;
+       }
+
        /* Check if setup is sensible at all */
        if (!pass &&
            (primary != bus->number || secondary <= bus->number)) {
index 1196f61a4ab6b2dabab0faacd1cbde65bf53af9a..975af4353e73bd429ad972bdafa76495765b42ad 100644 (file)
@@ -2745,20 +2745,6 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
        /* disable must be done via function #0 */
        if (PCI_FUNC(dev->devfn))
                return;
-
-       pci_read_config_byte(dev, 0xCB, &disable);
-
-       if (disable & 0x02)
-               return;
-
-       pci_read_config_byte(dev, 0xCA, &write_enable);
-       pci_write_config_byte(dev, 0xCA, 0x57);
-       pci_write_config_byte(dev, 0xCB, disable | 0x02);
-       pci_write_config_byte(dev, 0xCA, write_enable);
-
-       dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
-       dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
-
        /*
         * RICOH 0xe823 SD/MMC card reader fails to recognize
         * certain types of SD/MMC cards. Lowering the SD base
@@ -2781,6 +2767,20 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
 
                dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n");
        }
+
+       pci_read_config_byte(dev, 0xCB, &disable);
+
+       if (disable & 0x02)
+               return;
+
+       pci_read_config_byte(dev, 0xCA, &write_enable);
+       pci_write_config_byte(dev, 0xCA, 0x57);
+       pci_write_config_byte(dev, 0xCB, disable | 0x02);
+       pci_write_config_byte(dev, 0xCA, write_enable);
+
+       dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
+       dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
+
 }
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
@@ -2822,6 +2822,66 @@ static void __devinit fixup_ti816x_class(struct pci_dev* dev)
 }
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_TI, 0xb800, fixup_ti816x_class);
 
+/*
+ * Some BIOS implementations leave the Intel GPU interrupts enabled,
+ * even though no one is handling them (f.e. i915 driver is never loaded).
+ * Additionally the interrupt destination is not set up properly
+ * and the interrupt ends up -somewhere-.
+ *
+ * These spurious interrupts are "sticky" and the kernel disables
+ * the (shared) interrupt line after 100.000+ generated interrupts.
+ *
+ * Fix it by disabling the still enabled interrupts.
+ * This resolves crashes often seen on monitor unplug.
+ */
+#define I915_DEIER_REG 0x4400c
+static void __devinit disable_igfx_irq(struct pci_dev *dev)
+{
+       void __iomem *regs = pci_iomap(dev, 0, 0);
+       if (regs == NULL) {
+               dev_warn(&dev->dev, "igfx quirk: Can't iomap PCI device\n");
+               return;
+       }
+
+       /* Check if any interrupt line is still enabled */
+       if (readl(regs + I915_DEIER_REG) != 0) {
+               dev_warn(&dev->dev, "BIOS left Intel GPU interrupts enabled; "
+                       "disabling\n");
+
+               writel(0, regs + I915_DEIER_REG);
+       }
+
+       pci_iounmap(dev, regs);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
+
+/*
+ * The Intel 6 Series/C200 Series chipset's EHCI controllers on many
+ * ASUS motherboards will cause memory corruption or a system crash
+ * if they are in D3 while the system is put into S3 sleep.
+ */
+static void __devinit asus_ehci_no_d3(struct pci_dev *dev)
+{
+       const char *sys_info;
+       static const char good_Asus_board[] = "P8Z68-V";
+
+       if (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP)
+               return;
+       if (dev->subsystem_vendor != PCI_VENDOR_ID_ASUSTEK)
+               return;
+       sys_info = dmi_get_system_info(DMI_BOARD_NAME);
+       if (sys_info && memcmp(sys_info, good_Asus_board,
+                       sizeof(good_Asus_board) - 1) == 0)
+               return;
+
+       dev_info(&dev->dev, "broken D3 during system sleep on ASUS\n");
+       dev->dev_flags |= PCI_DEV_FLAGS_NO_D3_DURING_SLEEP;
+       device_set_wakeup_capable(&dev->dev, false);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c26, asus_ehci_no_d3);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c2d, asus_ehci_no_d3);
+
 static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
                          struct pci_fixup *end)
 {
index 492b7d807fe8f7a17eafcbe7aa3b9bbeb4fbd89d..d4e7a10522590ceb97a7592577dd7f09d502b91b 100644 (file)
@@ -400,9 +400,8 @@ static int pcifront_claim_resource(struct pci_dev *dev, void *data)
                        dev_info(&pdev->xdev->dev, "claiming resource %s/%d\n",
                                pci_name(dev), i);
                        if (pci_claim_resource(dev, i)) {
-                               dev_err(&pdev->xdev->dev, "Could not claim "
-                                       "resource %s/%d! Device offline. Try "
-                                       "giving less than 4GB to domain.\n",
+                               dev_err(&pdev->xdev->dev, "Could not claim resource %s/%d! "
+                                       "Device offline. Try using e820_host=1 in the guest config.\n",
                                        pci_name(dev), i);
                        }
                }
index 749c2a16012c582bca165db93f14cf4c21ec5293..1932029de48d67cf7dd10ed18f2de11e40056c29 100644 (file)
@@ -1269,10 +1269,8 @@ static int pcmcia_bus_add(struct pcmcia_socket *skt)
 
 static int pcmcia_bus_early_resume(struct pcmcia_socket *skt)
 {
-       if (!verify_cis_cache(skt)) {
-               pcmcia_put_socket(skt);
+       if (!verify_cis_cache(skt))
                return 0;
-       }
 
        dev_dbg(&skt->dev, "cis mismatch - different card\n");
 
index e1c4938b301bbcdacbcc1303e9b8cde5bf288585..2080b223c74a9d1bc91faa71eeb36d6a46995664 100644 (file)
@@ -304,6 +304,10 @@ static struct quirk_entry quirk_fujitsu_amilo_li_1718 = {
        .wireless = 2,
 };
 
+static struct quirk_entry quirk_lenovo_ideapad_s205 = {
+       .wireless = 3,
+};
+
 /* The Aspire One has a dummy ACPI-WMI interface - disable it */
 static struct dmi_system_id __devinitdata acer_blacklist[] = {
        {
@@ -450,6 +454,24 @@ static struct dmi_system_id acer_quirks[] = {
                },
                .driver_data = &quirk_medion_md_98300,
        },
+       {
+               .callback = dmi_matched,
+               .ident = "Lenovo Ideapad S205",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "10382LG"),
+               },
+               .driver_data = &quirk_lenovo_ideapad_s205,
+       },
+       {
+               .callback = dmi_matched,
+               .ident = "Lenovo 3000 N200",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "0687A31"),
+               },
+               .driver_data = &quirk_fujitsu_amilo_li_1718,
+       },
        {}
 };
 
@@ -542,6 +564,12 @@ struct wmi_interface *iface)
                                return AE_ERROR;
                        *value = result & 0x1;
                        return AE_OK;
+               case 3:
+                       err = ec_read(0x78, &result);
+                       if (err)
+                               return AE_ERROR;
+                       *value = result & 0x1;
+                       return AE_OK;
                default:
                        err = ec_read(0xA, &result);
                        if (err)
@@ -648,6 +676,33 @@ static acpi_status AMW0_find_mailled(void)
        return AE_OK;
 }
 
+static int AMW0_set_cap_acpi_check_device_found;
+
+static acpi_status AMW0_set_cap_acpi_check_device_cb(acpi_handle handle,
+       u32 level, void *context, void **retval)
+{
+       AMW0_set_cap_acpi_check_device_found = 1;
+       return AE_OK;
+}
+
+static const struct acpi_device_id norfkill_ids[] = {
+       { "VPC2004", 0},
+       { "IBM0068", 0},
+       { "LEN0068", 0},
+       { "SNY5001", 0},        /* sony-laptop in charge */
+       { "", 0},
+};
+
+static int AMW0_set_cap_acpi_check_device(void)
+{
+       const struct acpi_device_id *id;
+
+       for (id = norfkill_ids; id->id[0]; id++)
+               acpi_get_devices(id->id, AMW0_set_cap_acpi_check_device_cb,
+                               NULL, NULL);
+       return AMW0_set_cap_acpi_check_device_found;
+}
+
 static acpi_status AMW0_set_capabilities(void)
 {
        struct wmab_args args;
@@ -661,7 +716,9 @@ static acpi_status AMW0_set_capabilities(void)
         * work.
         */
        if (wmi_has_guid(AMW0_GUID2)) {
-               interface->capability |= ACER_CAP_WIRELESS;
+               if ((quirks != &quirk_unknown) ||
+                   !AMW0_set_cap_acpi_check_device())
+                       interface->capability |= ACER_CAP_WIRELESS;
                return AE_OK;
        }
 
@@ -1265,9 +1322,15 @@ static void acer_rfkill_update(struct work_struct *ignored)
        u32 state;
        acpi_status status;
 
-       status = get_u32(&state, ACER_CAP_WIRELESS);
-       if (ACPI_SUCCESS(status))
-               rfkill_set_sw_state(wireless_rfkill, !state);
+       if (has_cap(ACER_CAP_WIRELESS)) {
+               status = get_u32(&state, ACER_CAP_WIRELESS);
+               if (ACPI_SUCCESS(status)) {
+                       if (quirks->wireless == 3)
+                               rfkill_set_hw_state(wireless_rfkill, !state);
+                       else
+                               rfkill_set_sw_state(wireless_rfkill, !state);
+               }
+       }
 
        if (has_cap(ACER_CAP_BLUETOOTH)) {
                status = get_u32(&state, ACER_CAP_BLUETOOTH);
@@ -1334,19 +1397,24 @@ static struct rfkill *acer_rfkill_register(struct device *dev,
 
 static int acer_rfkill_init(struct device *dev)
 {
-       wireless_rfkill = acer_rfkill_register(dev, RFKILL_TYPE_WLAN,
-               "acer-wireless", ACER_CAP_WIRELESS);
-       if (IS_ERR(wireless_rfkill))
-               return PTR_ERR(wireless_rfkill);
+       int err;
+
+       if (has_cap(ACER_CAP_WIRELESS)) {
+               wireless_rfkill = acer_rfkill_register(dev, RFKILL_TYPE_WLAN,
+                       "acer-wireless", ACER_CAP_WIRELESS);
+               if (IS_ERR(wireless_rfkill)) {
+                       err = PTR_ERR(wireless_rfkill);
+                       goto error_wireless;
+               }
+       }
 
        if (has_cap(ACER_CAP_BLUETOOTH)) {
                bluetooth_rfkill = acer_rfkill_register(dev,
                        RFKILL_TYPE_BLUETOOTH, "acer-bluetooth",
                        ACER_CAP_BLUETOOTH);
                if (IS_ERR(bluetooth_rfkill)) {
-                       rfkill_unregister(wireless_rfkill);
-                       rfkill_destroy(wireless_rfkill);
-                       return PTR_ERR(bluetooth_rfkill);
+                       err = PTR_ERR(bluetooth_rfkill);
+                       goto error_bluetooth;
                }
        }
 
@@ -1355,30 +1423,44 @@ static int acer_rfkill_init(struct device *dev)
                        RFKILL_TYPE_WWAN, "acer-threeg",
                        ACER_CAP_THREEG);
                if (IS_ERR(threeg_rfkill)) {
-                       rfkill_unregister(wireless_rfkill);
-                       rfkill_destroy(wireless_rfkill);
-                       rfkill_unregister(bluetooth_rfkill);
-                       rfkill_destroy(bluetooth_rfkill);
-                       return PTR_ERR(threeg_rfkill);
+                       err = PTR_ERR(threeg_rfkill);
+                       goto error_threeg;
                }
        }
 
        rfkill_inited = true;
 
-       if (ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID))
+       if ((ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID)) &&
+           has_cap(ACER_CAP_WIRELESS | ACER_CAP_BLUETOOTH | ACER_CAP_THREEG))
                schedule_delayed_work(&acer_rfkill_work,
                        round_jiffies_relative(HZ));
 
        return 0;
+
+error_threeg:
+       if (has_cap(ACER_CAP_BLUETOOTH)) {
+               rfkill_unregister(bluetooth_rfkill);
+               rfkill_destroy(bluetooth_rfkill);
+       }
+error_bluetooth:
+       if (has_cap(ACER_CAP_WIRELESS)) {
+               rfkill_unregister(wireless_rfkill);
+               rfkill_destroy(wireless_rfkill);
+       }
+error_wireless:
+       return err;
 }
 
 static void acer_rfkill_exit(void)
 {
-       if (ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID))
+       if ((ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID)) &&
+           has_cap(ACER_CAP_WIRELESS | ACER_CAP_BLUETOOTH | ACER_CAP_THREEG))
                cancel_delayed_work_sync(&acer_rfkill_work);
 
-       rfkill_unregister(wireless_rfkill);
-       rfkill_destroy(wireless_rfkill);
+       if (has_cap(ACER_CAP_WIRELESS)) {
+               rfkill_unregister(wireless_rfkill);
+               rfkill_destroy(wireless_rfkill);
+       }
 
        if (has_cap(ACER_CAP_BLUETOOTH)) {
                rfkill_unregister(bluetooth_rfkill);
index 16585756df8d247f56842cb425bbaceab3171ba2..ec85987b2246ad1c3423b9a0cce8c3367bfec1bb 100644 (file)
@@ -370,15 +370,17 @@ static u8 read_brightness(void)
                                  &sretval);
        if (!retval) {
                user_brightness = sretval.retval[0];
-               if (user_brightness != 0)
+               if (user_brightness > sabi_config->min_brightness)
                        user_brightness -= sabi_config->min_brightness;
+               else
+                       user_brightness = 0;
        }
        return user_brightness;
 }
 
 static void set_brightness(u8 user_brightness)
 {
-       u8 user_level = user_brightness - sabi_config->min_brightness;
+       u8 user_level = user_brightness + sabi_config->min_brightness;
 
        sabi_set_command(sabi_config->commands.set_brightness, user_level);
 }
@@ -630,6 +632,15 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
                },
                .callback = dmi_check_cb,
        },
+       {
+               .ident = "R700",
+               .matches = {
+                     DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+                     DMI_MATCH(DMI_PRODUCT_NAME, "SR700"),
+                     DMI_MATCH(DMI_BOARD_NAME, "SR700"),
+               },
+               .callback = dmi_check_cb,
+       },
        {
                .ident = "R530/R730",
                .matches = {
@@ -675,6 +686,24 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "P460"),
                },
                .callback = dmi_check_cb,
+       },
+               {
+               .ident = "X520",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "X520"),
+                       DMI_MATCH(DMI_BOARD_NAME, "X520"),
+               },
+               .callback = dmi_check_cb,
+       },
+       {
+               .ident = "R528/R728",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "R528/R728"),
+                       DMI_MATCH(DMI_BOARD_NAME, "R528/R728"),
+               },
+               .callback = dmi_check_cb,
        },
        { },
 };
@@ -760,7 +789,7 @@ static int __init samsung_init(void)
        sabi_iface = ioremap_nocache(ifaceP, 16);
        if (!sabi_iface) {
                pr_err("Can't remap %x\n", ifaceP);
-               goto exit;
+               goto error_no_signature;
        }
        if (debug) {
                printk(KERN_DEBUG "ifaceP = 0x%08x\n", ifaceP);
@@ -792,7 +821,8 @@ static int __init samsung_init(void)
        /* create a backlight device to talk to this one */
        memset(&props, 0, sizeof(struct backlight_properties));
        props.type = BACKLIGHT_PLATFORM;
-       props.max_brightness = sabi_config->max_brightness;
+       props.max_brightness = sabi_config->max_brightness -
+                               sabi_config->min_brightness;
        backlight_device = backlight_device_register("samsung", &sdev->dev,
                                                     NULL, &backlight_ops,
                                                     &props);
@@ -811,7 +841,6 @@ static int __init samsung_init(void)
        if (retval)
                goto error_file_create;
 
-exit:
        return 0;
 
 error_file_create:
index bbd182e178cb65682b7ed47684467184da4c9a03..35dae412635d982e69204f676aa27ed6fca6114d 100644 (file)
@@ -127,7 +127,7 @@ MODULE_PARM_DESC(minor,
                 "default is -1 (automatic)");
 #endif
 
-static int kbd_backlight;      /* = 1 */
+static int kbd_backlight = 1;
 module_param(kbd_backlight, int, 0444);
 MODULE_PARM_DESC(kbd_backlight,
                 "set this to 0 to disable keyboard backlight, "
index f23d5a84e7b1b61108bdb651b5a240bed3ee4ed0..9b88be42b6cd3a43ba9b2c1d1563446c7df43f16 100644 (file)
@@ -754,9 +754,13 @@ static void wmi_free_devices(void)
        struct wmi_block *wblock, *next;
 
        /* Delete devices for all the GUIDs */
-       list_for_each_entry_safe(wblock, next, &wmi_block_list, list)
+       list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
+               list_del(&wblock->list);
                if (wblock->dev.class)
                        device_unregister(&wblock->dev);
+               else
+                       kfree(wblock);
+       }
 }
 
 static bool guid_already_parsed(const char *guid_string)
index ca84d5099ce7c5fa654a39fa38427f8209b9a03d..5f44b55159043cf1397067339108214cf509faf7 100644 (file)
@@ -320,9 +320,14 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp)
 {
        struct acpi_device *acpi = to_acpi_device(dev);
        struct pnp_dev *pnp = _pnp;
+       struct device *physical_device;
+
+       physical_device = acpi_get_physical_device(acpi->handle);
+       if (physical_device)
+               put_device(physical_device);
 
        /* true means it matched */
-       return !acpi_get_physical_device(acpi->handle)
+       return !physical_device
            && compare_pnp_id(pnp->id, acpi_device_hid(acpi));
 }
 
index dfbd5a6cc58becc8cca321e4c6c34a043aebe81f..258fef272ea7d6b61faa565260c8fc7b7a4f995d 100644 (file)
@@ -295,6 +295,45 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
        }
 }
 
+#ifdef CONFIG_AMD_NB
+
+#include <asm/amd_nb.h>
+
+static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
+{
+       resource_size_t start, end;
+       struct pnp_resource *pnp_res;
+       struct resource *res;
+       struct resource mmconfig_res, *mmconfig;
+
+       mmconfig = amd_get_mmconfig_range(&mmconfig_res);
+       if (!mmconfig)
+               return;
+
+       list_for_each_entry(pnp_res, &dev->resources, list) {
+               res = &pnp_res->res;
+               if (res->end < mmconfig->start || res->start > mmconfig->end ||
+                   (res->start == mmconfig->start && res->end == mmconfig->end))
+                       continue;
+
+               dev_info(&dev->dev, FW_BUG
+                        "%pR covers only part of AMD MMCONFIG area %pR; adding more reservations\n",
+                        res, mmconfig);
+               if (mmconfig->start < res->start) {
+                       start = mmconfig->start;
+                       end = res->start - 1;
+                       pnp_add_mem_resource(dev, start, end, 0);
+               }
+               if (mmconfig->end > res->end) {
+                       start = res->end + 1;
+                       end = mmconfig->end;
+                       pnp_add_mem_resource(dev, start, end, 0);
+               }
+               break;
+       }
+}
+#endif
+
 /*
  *  PnP Quirks
  *  Cards or devices that need some tweaking due to incomplete resource info
@@ -322,6 +361,9 @@ static struct pnp_fixup pnp_fixups[] = {
        /* PnP resources that might overlap PCI BARs */
        {"PNP0c01", quirk_system_pci_resources},
        {"PNP0c02", quirk_system_pci_resources},
+#ifdef CONFIG_AMD_NB
+       {"PNP0c01", quirk_amd_mmconfig_area},
+#endif
        {""}
 };
 
index 1fefe82e12e3b94f59add3abb7af9db44047f1a8..91a783d72360b314094a134d5284a89e751b3ca9 100644 (file)
@@ -39,6 +39,7 @@ struct ds2780_device_info {
        struct device *dev;
        struct power_supply bat;
        struct device *w1_dev;
+       struct task_struct *mutex_holder;
 };
 
 enum current_types {
@@ -49,8 +50,8 @@ enum current_types {
 static const char model[] = "DS2780";
 static const char manufacturer[] = "Maxim/Dallas";
 
-static inline struct ds2780_device_info *to_ds2780_device_info(
-       struct power_supply *psy)
+static inline struct ds2780_device_info *
+to_ds2780_device_info(struct power_supply *psy)
 {
        return container_of(psy, struct ds2780_device_info, bat);
 }
@@ -60,17 +61,28 @@ static inline struct power_supply *to_power_supply(struct device *dev)
        return dev_get_drvdata(dev);
 }
 
-static inline int ds2780_read8(struct device *dev, u8 *val, int addr)
+static inline int ds2780_battery_io(struct ds2780_device_info *dev_info,
+       char *buf, int addr, size_t count, int io)
 {
-       return w1_ds2780_io(dev, val, addr, sizeof(u8), 0);
+       if (dev_info->mutex_holder == current)
+               return w1_ds2780_io_nolock(dev_info->w1_dev, buf, addr, count, io);
+       else
+               return w1_ds2780_io(dev_info->w1_dev, buf, addr, count, io);
+}
+
+static inline int ds2780_read8(struct ds2780_device_info *dev_info, u8 *val,
+       int addr)
+{
+       return ds2780_battery_io(dev_info, val, addr, sizeof(u8), 0);
 }
 
-static int ds2780_read16(struct device *dev, s16 *val, int addr)
+static int ds2780_read16(struct ds2780_device_info *dev_info, s16 *val,
+       int addr)
 {
        int ret;
        u8 raw[2];
 
-       ret = w1_ds2780_io(dev, raw, addr, sizeof(u8) * 2, 0);
+       ret = ds2780_battery_io(dev_info, raw, addr, sizeof(raw), 0);
        if (ret < 0)
                return ret;
 
@@ -79,16 +91,16 @@ static int ds2780_read16(struct device *dev, s16 *val, int addr)
        return 0;
 }
 
-static inline int ds2780_read_block(struct device *dev, u8 *val, int addr,
-       size_t count)
+static inline int ds2780_read_block(struct ds2780_device_info *dev_info,
+       u8 *val, int addr, size_t count)
 {
-       return w1_ds2780_io(dev, val, addr, count, 0);
+       return ds2780_battery_io(dev_info, val, addr, count, 0);
 }
 
-static inline int ds2780_write(struct device *dev, u8 *val, int addr,
-       size_t count)
+static inline int ds2780_write(struct ds2780_device_info *dev_info, u8 *val,
+       int addr, size_t count)
 {
-       return w1_ds2780_io(dev, val, addr, count, 1);
+       return ds2780_battery_io(dev_info, val, addr, count, 1);
 }
 
 static inline int ds2780_store_eeprom(struct device *dev, int addr)
@@ -122,7 +134,7 @@ static int ds2780_set_sense_register(struct ds2780_device_info *dev_info,
 {
        int ret;
 
-       ret = ds2780_write(dev_info->w1_dev, &conductance,
+       ret = ds2780_write(dev_info, &conductance,
                                DS2780_RSNSP_REG, sizeof(u8));
        if (ret < 0)
                return ret;
@@ -134,7 +146,7 @@ static int ds2780_set_sense_register(struct ds2780_device_info *dev_info,
 static int ds2780_get_rsgain_register(struct ds2780_device_info *dev_info,
        u16 *rsgain)
 {
-       return ds2780_read16(dev_info->w1_dev, rsgain, DS2780_RSGAIN_MSB_REG);
+       return ds2780_read16(dev_info, rsgain, DS2780_RSGAIN_MSB_REG);
 }
 
 /* Set RSGAIN value from 0 to 1.999 in steps of 0.001 */
@@ -144,8 +156,8 @@ static int ds2780_set_rsgain_register(struct ds2780_device_info *dev_info,
        int ret;
        u8 raw[] = {rsgain >> 8, rsgain & 0xFF};
 
-       ret = ds2780_write(dev_info->w1_dev, raw,
-                               DS2780_RSGAIN_MSB_REG, sizeof(u8) * 2);
+       ret = ds2780_write(dev_info, raw,
+                               DS2780_RSGAIN_MSB_REG, sizeof(raw));
        if (ret < 0)
                return ret;
 
@@ -167,7 +179,7 @@ static int ds2780_get_voltage(struct ds2780_device_info *dev_info,
         * Bits 2 - 0 of the voltage value are in bits 7 - 5 of the
         * voltage LSB register
         */
-       ret = ds2780_read16(dev_info->w1_dev, &voltage_raw,
+       ret = ds2780_read16(dev_info, &voltage_raw,
                                DS2780_VOLT_MSB_REG);
        if (ret < 0)
                return ret;
@@ -196,7 +208,7 @@ static int ds2780_get_temperature(struct ds2780_device_info *dev_info,
         * Bits 2 - 0 of the temperature value are in bits 7 - 5 of the
         * temperature LSB register
         */
-       ret = ds2780_read16(dev_info->w1_dev, &temperature_raw,
+       ret = ds2780_read16(dev_info, &temperature_raw,
                                DS2780_TEMP_MSB_REG);
        if (ret < 0)
                return ret;
@@ -222,13 +234,13 @@ static int ds2780_get_current(struct ds2780_device_info *dev_info,
         * The units of measurement for current are dependent on the value of
         * the sense resistor.
         */
-       ret = ds2780_read8(dev_info->w1_dev, &sense_res_raw, DS2780_RSNSP_REG);
+       ret = ds2780_read8(dev_info, &sense_res_raw, DS2780_RSNSP_REG);
        if (ret < 0)
                return ret;
 
        if (sense_res_raw == 0) {
                dev_err(dev_info->dev, "sense resistor value is 0\n");
-               return -ENXIO;
+               return -EINVAL;
        }
        sense_res = 1000 / sense_res_raw;
 
@@ -248,7 +260,7 @@ static int ds2780_get_current(struct ds2780_device_info *dev_info,
         * Bits 7 - 0 of the current value are in bits 7 - 0 of the current
         * LSB register
         */
-       ret = ds2780_read16(dev_info->w1_dev, &current_raw, reg_msb);
+       ret = ds2780_read16(dev_info, &current_raw, reg_msb);
        if (ret < 0)
                return ret;
 
@@ -267,7 +279,7 @@ static int ds2780_get_accumulated_current(struct ds2780_device_info *dev_info,
         * The units of measurement for accumulated current are dependent on
         * the value of the sense resistor.
         */
-       ret = ds2780_read8(dev_info->w1_dev, &sense_res_raw, DS2780_RSNSP_REG);
+       ret = ds2780_read8(dev_info, &sense_res_raw, DS2780_RSNSP_REG);
        if (ret < 0)
                return ret;
 
@@ -285,7 +297,7 @@ static int ds2780_get_accumulated_current(struct ds2780_device_info *dev_info,
         * Bits 7 - 0 of the ACR value are in bits 7 - 0 of the ACR
         * LSB register
         */
-       ret = ds2780_read16(dev_info->w1_dev, &current_raw, DS2780_ACR_MSB_REG);
+       ret = ds2780_read16(dev_info, &current_raw, DS2780_ACR_MSB_REG);
        if (ret < 0)
                return ret;
 
@@ -299,7 +311,7 @@ static int ds2780_get_capacity(struct ds2780_device_info *dev_info,
        int ret;
        u8 raw;
 
-       ret = ds2780_read8(dev_info->w1_dev, &raw, DS2780_RARC_REG);
+       ret = ds2780_read8(dev_info, &raw, DS2780_RARC_REG);
        if (ret < 0)
                return ret;
 
@@ -345,7 +357,7 @@ static int ds2780_get_charge_now(struct ds2780_device_info *dev_info,
         * Bits 7 - 0 of the RAAC value are in bits 7 - 0 of the RAAC
         * LSB register
         */
-       ret = ds2780_read16(dev_info->w1_dev, &charge_raw, DS2780_RAAC_MSB_REG);
+       ret = ds2780_read16(dev_info, &charge_raw, DS2780_RAAC_MSB_REG);
        if (ret < 0)
                return ret;
 
@@ -356,7 +368,7 @@ static int ds2780_get_charge_now(struct ds2780_device_info *dev_info,
 static int ds2780_get_control_register(struct ds2780_device_info *dev_info,
        u8 *control_reg)
 {
-       return ds2780_read8(dev_info->w1_dev, control_reg, DS2780_CONTROL_REG);
+       return ds2780_read8(dev_info, control_reg, DS2780_CONTROL_REG);
 }
 
 static int ds2780_set_control_register(struct ds2780_device_info *dev_info,
@@ -364,7 +376,7 @@ static int ds2780_set_control_register(struct ds2780_device_info *dev_info,
 {
        int ret;
 
-       ret = ds2780_write(dev_info->w1_dev, &control_reg,
+       ret = ds2780_write(dev_info, &control_reg,
                                DS2780_CONTROL_REG, sizeof(u8));
        if (ret < 0)
                return ret;
@@ -503,7 +515,7 @@ static ssize_t ds2780_get_sense_resistor_value(struct device *dev,
        struct power_supply *psy = to_power_supply(dev);
        struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
 
-       ret = ds2780_read8(dev_info->w1_dev, &sense_resistor, DS2780_RSNSP_REG);
+       ret = ds2780_read8(dev_info, &sense_resistor, DS2780_RSNSP_REG);
        if (ret < 0)
                return ret;
 
@@ -584,7 +596,7 @@ static ssize_t ds2780_get_pio_pin(struct device *dev,
        struct power_supply *psy = to_power_supply(dev);
        struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
 
-       ret = ds2780_read8(dev_info->w1_dev, &sfr, DS2780_SFR_REG);
+       ret = ds2780_read8(dev_info, &sfr, DS2780_SFR_REG);
        if (ret < 0)
                return ret;
 
@@ -611,7 +623,7 @@ static ssize_t ds2780_set_pio_pin(struct device *dev,
                return -EINVAL;
        }
 
-       ret = ds2780_write(dev_info->w1_dev, &new_setting,
+       ret = ds2780_write(dev_info, &new_setting,
                                DS2780_SFR_REG, sizeof(u8));
        if (ret < 0)
                return ret;
@@ -632,7 +644,7 @@ static ssize_t ds2780_read_param_eeprom_bin(struct file *filp,
                DS2780_EEPROM_BLOCK1_END -
                DS2780_EEPROM_BLOCK1_START + 1 - off);
 
-       return ds2780_read_block(dev_info->w1_dev, buf,
+       return ds2780_read_block(dev_info, buf,
                                DS2780_EEPROM_BLOCK1_START + off, count);
 }
 
@@ -650,7 +662,7 @@ static ssize_t ds2780_write_param_eeprom_bin(struct file *filp,
                DS2780_EEPROM_BLOCK1_END -
                DS2780_EEPROM_BLOCK1_START + 1 - off);
 
-       ret = ds2780_write(dev_info->w1_dev, buf,
+       ret = ds2780_write(dev_info, buf,
                                DS2780_EEPROM_BLOCK1_START + off, count);
        if (ret < 0)
                return ret;
@@ -685,9 +697,8 @@ static ssize_t ds2780_read_user_eeprom_bin(struct file *filp,
                DS2780_EEPROM_BLOCK0_END -
                DS2780_EEPROM_BLOCK0_START + 1 - off);
 
-       return ds2780_read_block(dev_info->w1_dev, buf,
+       return ds2780_read_block(dev_info, buf,
                                DS2780_EEPROM_BLOCK0_START + off, count);
-
 }
 
 static ssize_t ds2780_write_user_eeprom_bin(struct file *filp,
@@ -704,7 +715,7 @@ static ssize_t ds2780_write_user_eeprom_bin(struct file *filp,
                DS2780_EEPROM_BLOCK0_END -
                DS2780_EEPROM_BLOCK0_START + 1 - off);
 
-       ret = ds2780_write(dev_info->w1_dev, buf,
+       ret = ds2780_write(dev_info, buf,
                                DS2780_EEPROM_BLOCK0_START + off, count);
        if (ret < 0)
                return ret;
@@ -768,6 +779,7 @@ static int __devinit ds2780_battery_probe(struct platform_device *pdev)
        dev_info->bat.properties        = ds2780_battery_props;
        dev_info->bat.num_properties    = ARRAY_SIZE(ds2780_battery_props);
        dev_info->bat.get_property      = ds2780_battery_get_property;
+       dev_info->mutex_holder          = current;
 
        ret = power_supply_register(&pdev->dev, &dev_info->bat);
        if (ret) {
@@ -797,6 +809,8 @@ static int __devinit ds2780_battery_probe(struct platform_device *pdev)
                goto fail_remove_bin_file;
        }
 
+       dev_info->mutex_holder = NULL;
+
        return 0;
 
 fail_remove_bin_file:
@@ -816,6 +830,8 @@ static int __devexit ds2780_battery_remove(struct platform_device *pdev)
 {
        struct ds2780_device_info *dev_info = platform_get_drvdata(pdev);
 
+       dev_info->mutex_holder = current;
+
        /* remove attributes */
        sysfs_remove_group(&dev_info->bat.dev->kobj, &ds2780_attr_group);
 
index cf3f9997546dc41d10a143390e834d0926720964..10451a15e8284f33be26996363c5ffd6c79ad56a 100644 (file)
@@ -101,7 +101,9 @@ static s32 scaled_ppm_to_ppb(long ppm)
 
 static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
 {
-       return 1; /* always round timer functions to one nanosecond */
+       tp->tv_sec = 0;
+       tp->tv_nsec = 1;
+       return 0;
 }
 
 static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
index d63fddb0fbb0d5ead8cadb323bfbff49b2a3e00f..acda58e6ef021753e8104402ca2c3b69f550c975 100644 (file)
@@ -195,7 +195,7 @@ static const unsigned int LDO12_suspend_table[] = {
 };
 
 static const unsigned int LDO13_table[] = {
-       1300000, 1800000, 2000000, 2500000, 2800000, 3000000, 0, 0,
+       1200000, 1300000, 1800000, 2000000, 2500000, 2800000, 3000000, 0,
 };
 
 static const unsigned int LDO13_suspend_table[] = {
@@ -388,10 +388,10 @@ static struct pm8607_regulator_info pm8607_regulator_info[] = {
        PM8607_LDO( 7,         LDO7, 0, 3, SUPPLIES_EN12, 1),
        PM8607_LDO( 8,         LDO8, 0, 3, SUPPLIES_EN12, 2),
        PM8607_LDO( 9,         LDO9, 0, 3, SUPPLIES_EN12, 3),
-       PM8607_LDO(10,        LDO10, 0, 3, SUPPLIES_EN12, 4),
+       PM8607_LDO(10,        LDO10, 0, 4, SUPPLIES_EN12, 4),
        PM8607_LDO(12,        LDO12, 0, 4, SUPPLIES_EN12, 5),
        PM8607_LDO(13, VIBRATOR_SET, 1, 3,  VIBRATOR_SET, 0),
-       PM8607_LDO(14,        LDO14, 0, 4, SUPPLIES_EN12, 6),
+       PM8607_LDO(14,        LDO14, 0, 3, SUPPLIES_EN12, 6),
 };
 
 static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
index ad6628ca94f41378bdddbe11f1286d54a64f4fd0..a8fb668c03a84937c58d0e7a19f26e0066c5df78 100644 (file)
@@ -688,7 +688,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
                }
 
                new_val++;
-       } while (desc->min + desc->step + new_val <= desc->max);
+       } while (desc->min + desc->step * new_val <= desc->max);
 
        new_idx = tmp_idx;
        new_val = tmp_val;
index 9166aa0a9df71c582425d89a7d6517bd22f01072..229b6f4bb8b4b1d8a088cbe60dbaa72287925cb8 100644 (file)
@@ -481,7 +481,7 @@ static int set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
        if (i >= info->n_voltages)
                i = info->n_voltages - 1;
 
-       *selector = info->voltages[i];
+       *selector = i;
 
        return write_field(hw, &info->voltage, i);
 }
index eb4c88316a15285f165f6230d144900ea4e86179..636a2ec218109e90791894301daea464299f4241 100644 (file)
@@ -227,11 +227,11 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
                alarm->time.tm_hour = now.tm_hour;
 
        /* For simplicity, only support date rollover for now */
-       if (alarm->time.tm_mday == -1) {
+       if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) {
                alarm->time.tm_mday = now.tm_mday;
                missing = day;
        }
-       if (alarm->time.tm_mon == -1) {
+       if ((unsigned)alarm->time.tm_mon >= 12) {
                alarm->time.tm_mon = now.tm_mon;
                if (missing == none)
                        missing = month;
@@ -762,6 +762,14 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
        return 0;
 }
 
+static void rtc_alarm_disable(struct rtc_device *rtc)
+{
+       if (!rtc->ops || !rtc->ops->alarm_irq_enable)
+               return;
+
+       rtc->ops->alarm_irq_enable(rtc->dev.parent, false);
+}
+
 /**
  * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
  * @rtc rtc device
@@ -783,8 +791,10 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
                struct rtc_wkalrm alarm;
                int err;
                next = timerqueue_getnext(&rtc->timerqueue);
-               if (!next)
+               if (!next) {
+                       rtc_alarm_disable(rtc);
                        return;
+               }
                alarm.time = rtc_ktime_to_tm(next->expires);
                alarm.enabled = 1;
                err = __rtc_set_alarm(rtc, &alarm);
@@ -846,7 +856,8 @@ again:
                err = __rtc_set_alarm(rtc, &alarm);
                if (err == -ETIME)
                        goto again;
-       }
+       } else
+               rtc_alarm_disable(rtc);
 
        mutex_unlock(&rtc->ops_lock);
 }
index eda128fc1d38729ebb1a385c10fd30e6345e852b..64aedd8cc095810e4134ceb727538a24b2af6232 100644 (file)
@@ -357,10 +357,19 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
 static struct rtc_class_ops m41t80_rtc_ops = {
        .read_time = m41t80_rtc_read_time,
        .set_time = m41t80_rtc_set_time,
+       /*
+        * XXX - m41t80 alarm functionality is reported broken.
+        * until it is fixed, don't register alarm functions.
+        *
        .read_alarm = m41t80_rtc_read_alarm,
        .set_alarm = m41t80_rtc_set_alarm,
+       */
        .proc = m41t80_rtc_proc,
+       /*
+        * See above comment on broken alarm
+        *
        .alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
+       */
 };
 
 #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
index ff1b84bd9bb56c81a55deb5f8b3ad3eb988fbbd0..1e80a48057e5dc58f7c219ac2e7d68cd9d06b96e 100644 (file)
@@ -312,6 +312,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
        int ret;
        struct pl031_local *ldata;
        struct rtc_class_ops *ops = id->data;
+       unsigned long time;
 
        ret = amba_request_regions(adev, NULL);
        if (ret)
@@ -339,11 +340,27 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
        dev_dbg(&adev->dev, "revision = 0x%01x\n", ldata->hw_revision);
 
        /* Enable the clockwatch on ST Variants */
-       if ((ldata->hw_designer == AMBA_VENDOR_ST) &&
-           (ldata->hw_revision > 1))
+       if (ldata->hw_designer == AMBA_VENDOR_ST)
                writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
                       ldata->base + RTC_CR);
 
+       /*
+        * On ST PL031 variants, the RTC reset value does not provide correct
+        * weekday for 2000-01-01. Correct the erroneous sunday to saturday.
+        */
+       if (ldata->hw_designer == AMBA_VENDOR_ST) {
+               if (readl(ldata->base + RTC_YDR) == 0x2000) {
+                       time = readl(ldata->base + RTC_DR);
+                       if ((time &
+                            (RTC_MON_MASK | RTC_MDAY_MASK | RTC_WDAY_MASK))
+                           == 0x02120000) {
+                               time = time | (0x7 << RTC_WDAY_SHIFT);
+                               writel(0x2000, ldata->base + RTC_YLR);
+                               writel(time, ldata->base + RTC_LR);
+                       }
+               }
+       }
+
        ldata->rtc = rtc_device_register("pl031", &adev->dev, ops,
                                        THIS_MODULE);
        if (IS_ERR(ldata->rtc)) {
index 30fb979d684d0043d2b1f6b0aff2fda6ebe8cfcd..cc2dd7fb289f8fb44bdc5e53719a03753eddf08c 100644 (file)
 #include <linux/hdreg.h>       /* HDIO_GETGEO                      */
 #include <linux/bio.h>
 #include <linux/module.h>
+#include <linux/compat.h>
 #include <linux/init.h>
 
 #include <asm/debug.h>
 #include <asm/idals.h>
 #include <asm/ebcdic.h>
-#include <asm/compat.h>
 #include <asm/io.h>
 #include <asm/uaccess.h>
 #include <asm/cio.h>
index 72261e4c516de89e836a2afb3d646d24b947ecfd..9caeaea5d09936c882e7c057e33a54f2b09a7720 100644 (file)
@@ -13,6 +13,7 @@
 #define KMSG_COMPONENT "dasd"
 
 #include <linux/interrupt.h>
+#include <linux/compat.h>
 #include <linux/major.h>
 #include <linux/fs.h>
 #include <linux/blkpg.h>
index f6489eb7e976a8c0aa1851ebbb21901d689030c1..2150824303a529ca6da15ab10da9d6322eb5e0ba 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/types.h>
+#include <linux/compat.h>
 
 #include <asm/compat.h>
 #include <asm/ccwdev.h>
index 31a3ccbb6495c2e8efcdd34bbd8b3cacfbbf9154..84e569c9c150cb2e37ef5f10a539e7ba7838e4cb 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/fs.h>
 #include <linux/init.h>
+#include <linux/compat.h>
 #include <linux/kernel.h>
 #include <linux/miscdevice.h>
 #include <linux/slab.h>
index 5c567414c4bb81ee74250b01a4755d8c30da3dbc..cda9bd6e48e8d19743290a9fec017194a116655b 100644 (file)
@@ -87,6 +87,12 @@ static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
        }
 }
 
+static ssize_t ccwgroup_online_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t count);
+static ssize_t ccwgroup_online_show(struct device *dev,
+                                   struct device_attribute *attr,
+                                   char *buf);
 /*
  * Provide an 'ungroup' attribute so the user can remove group devices no
  * longer needed or accidentially created. Saves memory :)
@@ -134,6 +140,20 @@ out:
 }
 
 static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
+static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
+
+static struct attribute *ccwgroup_attrs[] = {
+       &dev_attr_online.attr,
+       &dev_attr_ungroup.attr,
+       NULL,
+};
+static struct attribute_group ccwgroup_attr_group = {
+       .attrs = ccwgroup_attrs,
+};
+static const struct attribute_group *ccwgroup_attr_groups[] = {
+       &ccwgroup_attr_group,
+       NULL,
+};
 
 static void
 ccwgroup_release (struct device *dev)
@@ -293,25 +313,17 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
        }
 
        dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
-
+       gdev->dev.groups = ccwgroup_attr_groups;
        rc = device_add(&gdev->dev);
        if (rc)
                goto error;
        get_device(&gdev->dev);
-       rc = device_create_file(&gdev->dev, &dev_attr_ungroup);
-
-       if (rc) {
-               device_unregister(&gdev->dev);
-               goto error;
-       }
-
        rc = __ccwgroup_create_symlinks(gdev);
        if (!rc) {
                mutex_unlock(&gdev->reg_mutex);
                put_device(&gdev->dev);
                return 0;
        }
-       device_remove_file(&gdev->dev, &dev_attr_ungroup);
        device_unregister(&gdev->dev);
 error:
        for (i = 0; i < num_devices; i++)
@@ -423,7 +435,7 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const
        int ret;
 
        if (!dev->driver)
-               return -ENODEV;
+               return -EINVAL;
 
        gdev = to_ccwgroupdev(dev);
        gdrv = to_ccwgroupdrv(dev->driver);
@@ -456,8 +468,6 @@ ccwgroup_online_show (struct device *dev, struct device_attribute *attr, char *b
        return sprintf(buf, online ? "1\n" : "0\n");
 }
 
-static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
-
 static int
 ccwgroup_probe (struct device *dev)
 {
@@ -469,12 +479,7 @@ ccwgroup_probe (struct device *dev)
        gdev = to_ccwgroupdev(dev);
        gdrv = to_ccwgroupdrv(dev->driver);
 
-       if ((ret = device_create_file(dev, &dev_attr_online)))
-               return ret;
-
        ret = gdrv->probe ? gdrv->probe(gdev) : -ENODEV;
-       if (ret)
-               device_remove_file(dev, &dev_attr_online);
 
        return ret;
 }
@@ -485,9 +490,6 @@ ccwgroup_remove (struct device *dev)
        struct ccwgroup_device *gdev;
        struct ccwgroup_driver *gdrv;
 
-       device_remove_file(dev, &dev_attr_online);
-       device_remove_file(dev, &dev_attr_ungroup);
-
        if (!dev->driver)
                return 0;
 
index e950f1ad4dd121934551c02673801aa3fa82daa5..ec760297dd383327d7980895a5912b3b0c6a4ca8 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/slab.h>
+#include <linux/compat.h>
 #include <linux/device.h>
 #include <linux/module.h>
 #include <linux/uaccess.h>
index fd69da3fa6b44d0afe34e550f027680b9f8da6b0..e2c9ac5fcb363a8597f8c9635cb17728011e6c44 100644 (file)
@@ -2742,9 +2742,14 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
 {
        int cast_type = RTN_UNSPEC;
-
-       if (skb_dst(skb) && skb_dst(skb)->neighbour) {
-               cast_type = skb_dst(skb)->neighbour->type;
+       struct neighbour *n = NULL;
+       struct dst_entry *dst;
+
+       dst = skb_dst(skb);
+       if (dst)
+               n = dst_get_neighbour(dst);
+       if (n) {
+               cast_type = n->type;
                if ((cast_type == RTN_BROADCAST) ||
                    (cast_type == RTN_MULTICAST) ||
                    (cast_type == RTN_ANYCAST))
@@ -2787,6 +2792,9 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
 static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
                struct sk_buff *skb, int ipv, int cast_type)
 {
+       struct neighbour *n = NULL;
+       struct dst_entry *dst;
+
        memset(hdr, 0, sizeof(struct qeth_hdr));
        hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
        hdr->hdr.l3.ext_flags = 0;
@@ -2804,13 +2812,16 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
        }
 
        hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
+       dst = skb_dst(skb);
+       if (dst)
+               n = dst_get_neighbour(dst);
        if (ipv == 4) {
                /* IPv4 */
                hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
                memset(hdr->hdr.l3.dest_addr, 0, 12);
-               if ((skb_dst(skb)) && (skb_dst(skb)->neighbour)) {
+               if (n) {
                        *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
-                           *((u32 *) skb_dst(skb)->neighbour->primary_key);
+                           *((u32 *) n->primary_key);
                } else {
                        /* fill in destination address used in ip header */
                        *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
@@ -2821,9 +2832,9 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
                hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type);
                if (card->info.type == QETH_CARD_TYPE_IQD)
                        hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU;
-               if ((skb_dst(skb)) && (skb_dst(skb)->neighbour)) {
+               if (n) {
                        memcpy(hdr->hdr.l3.dest_addr,
-                              skb_dst(skb)->neighbour->primary_key, 16);
+                              n->primary_key, 16);
                } else {
                        /* fill in destination address used in ip header */
                        memcpy(hdr->hdr.l3.dest_addr,
index 303dde09d294944b7ed51ed4eac717b2cbdbc223..fab2c2592a9744e400971b73c264fd9f2c3ab7b9 100644 (file)
@@ -11,6 +11,7 @@
 #define KMSG_COMPONENT "zfcp"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
+#include <linux/compat.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/miscdevice.h>
index 2a4991d6d4d59a297b9e9599bd1a72eac70ee24d..3a417dff1b89b88f9e9c77277387faaa31091b55 100644 (file)
@@ -57,6 +57,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
 {
        struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
+       /* if previous slave_alloc returned early, there is nothing to do */
+       if (!zfcp_sdev->port)
+               return;
+
        zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
        put_device(&zfcp_sdev->port->dev);
 }
index 3382475dc22dcf1ac4167aed67da6056539b2131..c7b6fed887350828b0d0ccd6678eb855c06f37bf 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/pci.h>
+#include <linux/pci-aspm.h>
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
@@ -1108,6 +1109,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
                unique_id++;
        }
 
+       pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+                              PCIE_LINK_STATE_CLKPM);
+
        error = pci_enable_device(pdev);
        if (error)
                goto out;
index b2d661147a431be3bacc9b84f167ab281b44a0b2..143f2682bdab26f4ee547560b5149b4b1b6635d8 100644 (file)
@@ -985,7 +985,7 @@ static int init_act_open(struct cxgbi_sock *csk)
                csk->saddr.sin_addr.s_addr = chba->ipv4addr;
 
        csk->rss_qid = 0;
-       csk->l2t = t3_l2t_get(t3dev, dst->neighbour, ndev);
+       csk->l2t = t3_l2t_get(t3dev, dst_get_neighbour(dst), ndev);
        if (!csk->l2t) {
                pr_err("NO l2t available.\n");
                return -EINVAL;
index f3a4cd7cf7828f663c773ac2819537fad2319ab1..ae13c4993aa378e2cc978f2215f73d6b938cd20b 100644 (file)
@@ -1160,7 +1160,7 @@ static int init_act_open(struct cxgbi_sock *csk)
        cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
        cxgbi_sock_get(csk);
 
-       csk->l2t = cxgb4_l2t_get(lldi->l2t, csk->dst->neighbour, ndev, 0);
+       csk->l2t = cxgb4_l2t_get(lldi->l2t, dst_get_neighbour(csk->dst), ndev, 0);
        if (!csk->l2t) {
                pr_err("%s, cannot alloc l2t.\n", ndev->name);
                goto rel_resource;
index a2a9c7c6c6436c9f0c19e68575d656f7b7807c61..77ac217ad5ce0952d9960710b7aaf1aa93a9bdd5 100644 (file)
@@ -492,7 +492,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
                goto err_out;
        }
        dst = &rt->dst;
-       ndev = dst->neighbour->dev;
+       ndev = dst_get_neighbour(dst)->dev;
 
        if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
                pr_info("multi-cast route %pI4, port %u, dev %s.\n",
@@ -506,7 +506,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
                ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
                mtu = ndev->mtu;
                pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
-                       dst->neighbour->dev->name, ndev->name, mtu);
+                       dst_get_neighbour(dst)->dev->name, ndev->name, mtu);
        }
 
        cdev = cxgbi_device_find_by_netdev(ndev, &port);
index 0119b814779744ccfc448bceddcaac3a7e195fcc..d973325ded2a047db22cf42b1ef6ba15d7f5c412 100644 (file)
@@ -398,7 +398,15 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
 
        spin_lock_irqsave(q->queue_lock, flags);
        sdev = q->queuedata;
-       if (sdev && sdev->scsi_dh_data)
+       if (!sdev) {
+               spin_unlock_irqrestore(q->queue_lock, flags);
+               err = SCSI_DH_NOSYS;
+               if (fn)
+                       fn(data, err);
+               return err;
+       }
+
+       if (sdev->scsi_dh_data)
                scsi_dh = sdev->scsi_dh_data->scsi_dh;
        dev = get_device(&sdev->sdev_gendev);
        if (!scsi_dh || !dev ||
index 8885b3ef369aed632421136b8b39324d56c271d6..f829adcb3b79f64833e50b81da021651293c1588 100644 (file)
@@ -1561,6 +1561,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
        stats->InvalidCRCCount++;
        if (stats->InvalidCRCCount < 5)
                printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+       put_cpu();
        return -EINVAL;
 }
 
index 4f7a5829ea4c0cbca2c7f5e76cb497b2c1708457..351dc0b86fab72dd788807589f7a699e49f480ae 100644 (file)
@@ -286,6 +286,7 @@ static void scsi_host_dev_release(struct device *dev)
 {
        struct Scsi_Host *shost = dev_to_shost(dev);
        struct device *parent = dev->parent;
+       struct request_queue *q;
 
        scsi_proc_hostdir_rm(shost->hostt);
 
@@ -293,9 +294,11 @@ static void scsi_host_dev_release(struct device *dev)
                kthread_stop(shost->ehandler);
        if (shost->work_q)
                destroy_workqueue(shost->work_q);
-       if (shost->uspace_req_q) {
-               kfree(shost->uspace_req_q->queuedata);
-               scsi_free_queue(shost->uspace_req_q);
+       q = shost->uspace_req_q;
+       if (q) {
+               kfree(q->queuedata);
+               q->queuedata = NULL;
+               scsi_free_queue(q);
        }
 
        scsi_destroy_command_freelist(shost);
index 78c2e20b20258c3b6aa84ab1b0998f9e367ecc9a..58f99f44bd056515d4477bf1cbe1690c5ea08b55 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/interrupt.h>
 #include <linux/types.h>
 #include <linux/pci.h>
+#include <linux/pci-aspm.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
@@ -1653,30 +1654,26 @@ static void figure_bus_target_lun(struct ctlr_info *h,
 
        if (is_logical_dev_addr_mode(lunaddrbytes)) {
                /* logical device */
-               if (unlikely(is_scsi_rev_5(h))) {
-                       /* p1210m, logical drives lun assignments
-                        * match SCSI REPORT LUNS data.
+               lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
+               if (is_msa2xxx(h, device)) {
+                       /* msa2xxx way, put logicals on bus 1
+                        * and match target/lun numbers box
+                        * reports.
                         */
-                       lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
-                       *bus = 0;
-                       *target = 0;
-                       *lun = (lunid & 0x3fff) + 1;
+                       *bus = 1;
+                       *target = (lunid >> 16) & 0x3fff;
+                       *lun = lunid & 0x00ff;
                } else {
-                       /* not p1210m... */
-                       lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
-                       if (is_msa2xxx(h, device)) {
-                               /* msa2xxx way, put logicals on bus 1
-                                * and match target/lun numbers box
-                                * reports.
-                                */
-                               *bus = 1;
-                               *target = (lunid >> 16) & 0x3fff;
-                               *lun = lunid & 0x00ff;
+                       if (likely(is_scsi_rev_5(h))) {
+                               /* All current smart arrays (circa 2011) */
+                               *bus = 0;
+                               *target = 0;
+                               *lun = (lunid & 0x3fff) + 1;
                        } else {
-                               /* Traditional smart array way. */
+                               /* Traditional old smart array way. */
                                *bus = 0;
-                               *lun = 0;
                                *target = lunid & 0x3fff;
+                               *lun = 0;
                        }
                }
        } else {
@@ -3300,6 +3297,13 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
                pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
                pmcsr |= PCI_D0;
                pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+
+               /*
+                * The P600 requires a small delay when changing states.
+                * Otherwise we may think the board did not reset and we bail.
+                * This for kdump only and is particular to the P600.
+                */
+               msleep(500);
        }
        return 0;
 }
@@ -3880,6 +3884,10 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
                dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
                return -ENODEV;
        }
+
+       pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
+                              PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
+
        err = pci_enable_device(h->pdev);
        if (err) {
                dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
@@ -4025,10 +4033,10 @@ static int hpsa_request_irq(struct ctlr_info *h,
 
        if (h->msix_vector || h->msi_vector)
                rc = request_irq(h->intr[h->intr_mode], msixhandler,
-                               IRQF_DISABLED, h->devname, h);
+                               0, h->devname, h);
        else
                rc = request_irq(h->intr[h->intr_mode], intxhandler,
-                               IRQF_DISABLED, h->devname, h);
+                               IRQF_SHARED, h->devname, h);
        if (rc) {
                dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
                       h->intr[h->intr_mode], h->devname);
index 888086c4e709497e6d8c7ae9e02dcf06b5bbca18..c5c7c3abb4b964b8b7c9b98bef05f65cee8f83ee 100644 (file)
@@ -8812,7 +8812,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
        uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
        if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
                ioa_cfg->needs_hard_reset = 1;
-       if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
+       if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
                ioa_cfg->needs_hard_reset = 1;
        if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
                ioa_cfg->ioa_unit_checked = 1;
index 61e0d09e2b57950dc47f875a782f58941f42495a..71b57ec3d9340976e049f5bf6bc27020bf92e4e3 100644 (file)
@@ -454,7 +454,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
        if (!orom)
                orom = isci_request_oprom(pdev);
 
-       for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
+       for (i = 0; orom && i < num_controllers(pdev); i++) {
                if (sci_oem_parameters_validate(&orom->ctrl[i])) {
                        dev_warn(&pdev->dev,
                                 "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
index d1de63312e7f3cc544b7487ad87a297449dc2c01..8efeb6b083213bb20d15ca3a9ca0a954515ec23f 100644 (file)
@@ -97,7 +97,7 @@
 #define SCU_MAX_COMPLETION_QUEUE_SHIFT   (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES))
 
 #define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096)
-#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE   (1024)
+#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE   (1024U)
 #define SCU_INVALID_FRAME_INDEX             (0xFFFF)
 
 #define SCU_IO_REQUEST_MAX_SGE_SIZE         (0x00FFFFFF)
index 486b113c634a4810652f11a4aa197c551a07c390..38a99d2811411d102220a96bdf918579fb47546b 100644 (file)
@@ -678,7 +678,7 @@ static void apc_agent_timeout(unsigned long data)
        configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
 
        if (!configure_phy_mask)
-               return;
+               goto done;
 
        for (index = 0; index < SCI_MAX_PHYS; index++) {
                if ((configure_phy_mask & (1 << index)) == 0)
index b5d3a8c4d3297ebf87e72a8087d5277500abc17d..225b196800a2c4538e13f031e184f23a4465edfd 100644 (file)
@@ -1490,29 +1490,30 @@ sci_io_request_frame_handler(struct isci_request *ireq,
                return SCI_SUCCESS;
 
        case SCI_REQ_SMP_WAIT_RESP: {
-               struct smp_resp *rsp_hdr = &ireq->smp.rsp;
-               void *frame_header;
+               struct sas_task *task = isci_request_access_task(ireq);
+               struct scatterlist *sg = &task->smp_task.smp_resp;
+               void *frame_header, *kaddr;
+               u8 *rsp;
 
                sci_unsolicited_frame_control_get_header(&ihost->uf_control,
-                                                             frame_index,
-                                                             &frame_header);
-
-               /* byte swap the header. */
-               word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
-               sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
+                                                        frame_index,
+                                                        &frame_header);
+               kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
+               rsp = kaddr + sg->offset;
+               sci_swab32_cpy(rsp, frame_header, 1);
 
-               if (rsp_hdr->frame_type == SMP_RESPONSE) {
+               if (rsp[0] == SMP_RESPONSE) {
                        void *smp_resp;
 
                        sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
-                                                                     frame_index,
-                                                                     &smp_resp);
+                                                                frame_index,
+                                                                &smp_resp);
 
-                       word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) /
-                               sizeof(u32);
-
-                       sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
-                                      smp_resp, word_cnt);
+                       word_cnt = (sg->length/4)-1;
+                       if (word_cnt > 0)
+                               word_cnt = min_t(unsigned int, word_cnt,
+                                                SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4);
+                       sci_swab32_cpy(rsp + 4, smp_resp, word_cnt);
 
                        ireq->scu_status = SCU_TASK_DONE_GOOD;
                        ireq->sci_status = SCI_SUCCESS;
@@ -1528,12 +1529,13 @@ sci_io_request_frame_handler(struct isci_request *ireq,
                                __func__,
                                ireq,
                                frame_index,
-                               rsp_hdr->frame_type);
+                               rsp[0]);
 
                        ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
                        ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
                        sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
                }
+               kunmap_atomic(kaddr, KM_IRQ0);
 
                sci_controller_release_frame(ihost, frame_index);
 
@@ -2603,18 +2605,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
                        status   = SAM_STAT_GOOD;
                        set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
 
-                       if (task->task_proto == SAS_PROTOCOL_SMP) {
-                               void *rsp = &request->smp.rsp;
-
-                               dev_dbg(&ihost->pdev->dev,
-                                       "%s: SMP protocol completion\n",
-                                       __func__);
-
-                               sg_copy_from_buffer(
-                                       &task->smp_task.smp_resp, 1,
-                                       rsp, sizeof(struct smp_resp));
-                       } else if (completion_status
-                                  == SCI_IO_SUCCESS_IO_DONE_EARLY) {
+                       if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
 
                                /* This was an SSP / STP / SATA transfer.
                                 * There is a possibility that less data than
index 7a1d5a9778eba04c557ae68fe7a20147d4debd6a..58d70b6606efbc6e808e2ef6401c9d23fe68bbd2 100644 (file)
@@ -173,9 +173,6 @@ struct isci_request {
                                u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
                        };
                } ssp;
-               struct {
-                       struct smp_resp rsp;
-               } smp;
                struct {
                        struct isci_stp_request req;
                        struct host_to_dev_fis cmd;
index 462b15174d3f012c19de4d87c7fe98e7f4f2363b..dc26b4aea99e6fb636d3868071768cbf897ccc5b 100644 (file)
@@ -204,8 +204,6 @@ struct smp_req {
        u8 req_data[0];
 }  __packed;
 
-#define SMP_RESP_HDR_SZ        4
-
 /*
  * struct sci_sas_address - This structure depicts how a SAS address is
  *    represented by SCI.
index 16ad97df5ba6790b11eb40b1a4eef4fd90325cd1..e68fac69504bacea696e772f4778f5f33929924a 100644 (file)
@@ -192,13 +192,22 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
        phy->attached_sata_ps   = dr->attached_sata_ps;
        phy->attached_iproto = dr->iproto << 1;
        phy->attached_tproto = dr->tproto << 1;
-       memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
+       /* help some expanders that fail to zero sas_address in the 'no
+        * device' case
+        */
+       if (phy->attached_dev_type == NO_DEVICE ||
+           phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
+               memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
+       else
+               memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
        phy->attached_phy_id = dr->attached_phy_id;
        phy->phy_change_count = dr->change_count;
        phy->routing_attr = dr->routing_attr;
        phy->virtual = dr->virtual;
        phy->last_da_index = -1;
 
+       phy->phy->identify.sas_address = SAS_ADDR(phy->attached_sas_addr);
+       phy->phy->identify.device_type = phy->attached_dev_type;
        phy->phy->identify.initiator_port_protocols = phy->attached_iproto;
        phy->phy->identify.target_port_protocols = phy->attached_tproto;
        phy->phy->identify.phy_identifier = phy_id;
@@ -1630,9 +1639,17 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
                int phy_change_count = 0;
 
                res = sas_get_phy_change_count(dev, i, &phy_change_count);
-               if (res)
-                       goto out;
-               else if (phy_change_count != ex->ex_phy[i].phy_change_count) {
+               switch (res) {
+               case SMP_RESP_PHY_VACANT:
+               case SMP_RESP_NO_PHY:
+                       continue;
+               case SMP_RESP_FUNC_ACC:
+                       break;
+               default:
+                       return res;
+               }
+
+               if (phy_change_count != ex->ex_phy[i].phy_change_count) {
                        if (update)
                                ex->ex_phy[i].phy_change_count =
                                        phy_change_count;
@@ -1640,8 +1657,7 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
                        return 0;
                }
        }
-out:
-       return res;
+       return 0;
 }
 
 static int sas_get_ex_change_count(struct domain_device *dev, int *ecc)
index 2d8cdce7b2f5af14355233424e7092c218821da1..e6e30f4da1f6ded382506400cd18b9d58fc40455 100644 (file)
@@ -1906,7 +1906,6 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
 static enum
 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
 {
-       struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr;
        struct megasas_instance *instance;
        unsigned long flags;
 
@@ -1915,7 +1914,7 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
                return BLK_EH_NOT_HANDLED;
        }
 
-       instance = cmd->instance;
+       instance = (struct megasas_instance *)scmd->device->host->hostdata;
        if (!(instance->flag & MEGASAS_FW_BUSY)) {
                /* FW is busy, throttle IO */
                spin_lock_irqsave(instance->host->host_lock, flags);
index 83035bd1c489545f7bfcec76b1c5b90c1bafcd15..e10639bd73c387b8b817771631e0a1e5dd21902e 100644 (file)
@@ -66,6 +66,8 @@ static MPT_CALLBACK   mpt_callbacks[MPT_MAX_CALLBACKS];
 
 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
 
+#define MAX_HBA_QUEUE_DEPTH    30000
+#define MAX_CHAIN_DEPTH                100000
 static int max_queue_depth = -1;
 module_param(max_queue_depth, int, 0);
 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
@@ -1081,41 +1083,6 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
        return 0;
 }
 
-/**
- * _base_save_msix_table - backup msix vector table
- * @ioc: per adapter object
- *
- * This address an errata where diag reset clears out the table
- */
-static void
-_base_save_msix_table(struct MPT2SAS_ADAPTER *ioc)
-{
-       int i;
-
-       if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
-               return;
-
-       for (i = 0; i < ioc->msix_vector_count; i++)
-               ioc->msix_table_backup[i] = ioc->msix_table[i];
-}
-
-/**
- * _base_restore_msix_table - this restores the msix vector table
- * @ioc: per adapter object
- *
- */
-static void
-_base_restore_msix_table(struct MPT2SAS_ADAPTER *ioc)
-{
-       int i;
-
-       if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
-               return;
-
-       for (i = 0; i < ioc->msix_vector_count; i++)
-               ioc->msix_table[i] = ioc->msix_table_backup[i];
-}
-
 /**
  * _base_check_enable_msix - checks MSIX capabable.
  * @ioc: per adapter object
@@ -1128,7 +1095,7 @@ _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
 {
        int base;
        u16 message_control;
-       u32 msix_table_offset;
+
 
        base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
        if (!base) {
@@ -1141,14 +1108,8 @@ _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
        pci_read_config_word(ioc->pdev, base + 2, &message_control);
        ioc->msix_vector_count = (message_control & 0x3FF) + 1;
 
-       /* get msix table  */
-       pci_read_config_dword(ioc->pdev, base + 4, &msix_table_offset);
-       msix_table_offset &= 0xFFFFFFF8;
-       ioc->msix_table = (u32 *)((void *)ioc->chip + msix_table_offset);
-
        dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
-           "vector_count(%d), table_offset(0x%08x), table(%p)\n", ioc->name,
-           ioc->msix_vector_count, msix_table_offset, ioc->msix_table));
+           "vector_count(%d)\n", ioc->name, ioc->msix_vector_count));
        return 0;
 }
 
@@ -1162,8 +1123,6 @@ _base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
 {
        if (ioc->msix_enable) {
                pci_disable_msix(ioc->pdev);
-               kfree(ioc->msix_table_backup);
-               ioc->msix_table_backup = NULL;
                ioc->msix_enable = 0;
        }
 }
@@ -1189,14 +1148,6 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
        if (_base_check_enable_msix(ioc) != 0)
                goto try_ioapic;
 
-       ioc->msix_table_backup = kcalloc(ioc->msix_vector_count,
-           sizeof(u32), GFP_KERNEL);
-       if (!ioc->msix_table_backup) {
-               dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
-                   "msix_table_backup failed!!!\n", ioc->name));
-               goto try_ioapic;
-       }
-
        memset(&entries, 0, sizeof(struct msix_entry));
        r = pci_enable_msix(ioc->pdev, &entries, 1);
        if (r) {
@@ -2149,8 +2100,6 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
                }
                if (ioc->chain_dma_pool)
                        pci_pool_destroy(ioc->chain_dma_pool);
-       }
-       if (ioc->chain_lookup) {
                free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
                ioc->chain_lookup = NULL;
        }
@@ -2168,9 +2117,7 @@ static int
 _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
 {
        struct mpt2sas_facts *facts;
-       u32 queue_size, queue_diff;
        u16 max_sge_elements;
-       u16 num_of_reply_frames;
        u16 chains_needed_per_io;
        u32 sz, total_sz;
        u32 retry_sz;
@@ -2197,7 +2144,8 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
                max_request_credit = (max_queue_depth < facts->RequestCredit)
                    ? max_queue_depth : facts->RequestCredit;
        else
-               max_request_credit = facts->RequestCredit;
+               max_request_credit = min_t(u16, facts->RequestCredit,
+                   MAX_HBA_QUEUE_DEPTH);
 
        ioc->hba_queue_depth = max_request_credit;
        ioc->hi_priority_depth = facts->HighPriorityCredit;
@@ -2238,50 +2186,25 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
        }
        ioc->chains_needed_per_io = chains_needed_per_io;
 
-       /* reply free queue sizing - taking into account for events */
-       num_of_reply_frames = ioc->hba_queue_depth + 32;
-
-       /* number of replies frames can't be a multiple of 16 */
-       /* decrease number of reply frames by 1 */
-       if (!(num_of_reply_frames % 16))
-               num_of_reply_frames--;
-
-       /* calculate number of reply free queue entries
-        *  (must be multiple of 16)
-        */
-
-       /* (we know reply_free_queue_depth is not a multiple of 16) */
-       queue_size = num_of_reply_frames;
-       queue_size += 16 - (queue_size % 16);
-       ioc->reply_free_queue_depth = queue_size;
+       /* reply free queue sizing - taking into account for 64 FW events */
+       ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
 
-       /* reply descriptor post queue sizing */
-       /* this size should be the number of request frames + number of reply
-        * frames
-        */
-
-       queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1;
-       /* round up to 16 byte boundary */
-       if (queue_size % 16)
-               queue_size += 16 - (queue_size % 16);
-
-       /* check against IOC maximum reply post queue depth */
-       if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) {
-               queue_diff = queue_size -
-                   facts->MaxReplyDescriptorPostQueueDepth;
-
-               /* round queue_diff up to multiple of 16 */
-               if (queue_diff % 16)
-                       queue_diff += 16 - (queue_diff % 16);
-
-               /* adjust hba_queue_depth, reply_free_queue_depth,
-                * and queue_size
-                */
-               ioc->hba_queue_depth -= (queue_diff / 2);
-               ioc->reply_free_queue_depth -= (queue_diff / 2);
-               queue_size = facts->MaxReplyDescriptorPostQueueDepth;
+       /* align the reply post queue on the next 16 count boundary */
+       if (!ioc->reply_free_queue_depth % 16)
+               ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16;
+       else
+               ioc->reply_post_queue_depth = ioc->reply_free_queue_depth +
+                               32 - (ioc->reply_free_queue_depth % 16);
+       if (ioc->reply_post_queue_depth >
+           facts->MaxReplyDescriptorPostQueueDepth) {
+               ioc->reply_post_queue_depth = min_t(u16,
+                   (facts->MaxReplyDescriptorPostQueueDepth -
+                   (facts->MaxReplyDescriptorPostQueueDepth % 16)),
+                   (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16)));
+               ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16;
+               ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64;
        }
-       ioc->reply_post_queue_depth = queue_size;
+
 
        dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
            "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
@@ -2367,15 +2290,12 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
            "depth(%d)\n", ioc->name, ioc->request,
            ioc->scsiio_depth));
 
-       /* loop till the allocation succeeds */
-       do {
-               sz = ioc->chain_depth * sizeof(struct chain_tracker);
-               ioc->chain_pages = get_order(sz);
-               ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
-                   GFP_KERNEL, ioc->chain_pages);
-               if (ioc->chain_lookup == NULL)
-                       ioc->chain_depth -= 100;
-       } while (ioc->chain_lookup == NULL);
+       ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
+       sz = ioc->chain_depth * sizeof(struct chain_tracker);
+       ioc->chain_pages = get_order(sz);
+
+       ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
+           GFP_KERNEL, ioc->chain_pages);
        ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
            ioc->request_sz, 16, 0);
        if (!ioc->chain_dma_pool) {
@@ -3136,7 +3056,7 @@ _base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag)
        }
 
        pfacts = &ioc->pfacts[port];
-       memset(pfacts, 0, sizeof(Mpi2PortFactsReply_t));
+       memset(pfacts, 0, sizeof(struct mpt2sas_port_facts));
        pfacts->PortNumber = mpi_reply.PortNumber;
        pfacts->VP_ID = mpi_reply.VP_ID;
        pfacts->VF_ID = mpi_reply.VF_ID;
@@ -3178,7 +3098,7 @@ _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
        }
 
        facts = &ioc->facts;
-       memset(facts, 0, sizeof(Mpi2IOCFactsReply_t));
+       memset(facts, 0, sizeof(struct mpt2sas_facts));
        facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
        facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
        facts->VP_ID = mpi_reply.VP_ID;
@@ -3513,9 +3433,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
        u32 hcb_size;
 
        printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name);
-
-       _base_save_msix_table(ioc);
-
        drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n",
            ioc->name));
 
@@ -3611,7 +3528,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
                goto out;
        }
 
-       _base_restore_msix_table(ioc);
        printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name);
        return 0;
 
@@ -3863,7 +3779,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
                goto out_free_resources;
 
        ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
-           sizeof(Mpi2PortFactsReply_t), GFP_KERNEL);
+           sizeof(struct mpt2sas_port_facts), GFP_KERNEL);
        if (!ioc->pfacts) {
                r = -ENOMEM;
                goto out_free_resources;
index 41a57a7a5b3a7f106f17a9791f8b1e31d720a644..e1735f99f2384cd3df5d88ddd81bfe46f5df10ac 100644 (file)
@@ -626,8 +626,6 @@ struct mpt2sas_port_facts {
  * @wait_for_port_enable_to_complete:
  * @msix_enable: flag indicating msix is enabled
  * @msix_vector_count: number msix vectors
- * @msix_table: virt address to the msix table
- * @msix_table_backup: backup msix table
  * @scsi_io_cb_idx: shost generated commands
  * @tm_cb_idx: task management commands
  * @scsih_cb_idx: scsih internal commands
@@ -768,8 +766,6 @@ struct MPT2SAS_ADAPTER {
 
        u8              msix_enable;
        u16             msix_vector_count;
-       u32             *msix_table;
-       u32             *msix_table_backup;
        u32             ioc_reset_count;
 
        /* internal commands, callback index */
index 8dc2ad4a0a35763b90ee0664f6fff6965b9c0255..aa51195a7312221bb62b39ac519b164d4c0b8090 100644 (file)
@@ -974,8 +974,8 @@ _scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid)
        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
        if (list_empty(&ioc->free_chain_list)) {
                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
-               printk(MPT2SAS_WARN_FMT "chain buffers not available\n",
-                   ioc->name);
+               dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "chain buffers not "
+                       "available\n", ioc->name));
                return NULL;
        }
        chain_req = list_entry(ioc->free_chain_list.next,
@@ -4145,7 +4145,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
        /* insert into event log */
        sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
             sizeof(Mpi2EventDataSasDeviceStatusChange_t);
-       event_reply = kzalloc(sz, GFP_KERNEL);
+       event_reply = kzalloc(sz, GFP_ATOMIC);
        if (!event_reply) {
                printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
                    ioc->name, __FILE__, __LINE__, __func__);
@@ -6425,6 +6425,7 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
                        } else
                                sas_target_priv_data = NULL;
                        raid_device->responding = 1;
+                       spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
                        starget_printk(KERN_INFO, raid_device->starget,
                            "handle(0x%04x), wwid(0x%016llx)\n", handle,
                            (unsigned long long)raid_device->wwid);
@@ -6435,16 +6436,16 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
                         */
                        _scsih_init_warpdrive_properties(ioc, raid_device);
                        if (raid_device->handle == handle)
-                               goto out;
+                               return;
                        printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
                            raid_device->handle);
                        raid_device->handle = handle;
                        if (sas_target_priv_data)
                                sas_target_priv_data->handle = handle;
-                       goto out;
+                       return;
                }
        }
- out:
+
        spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
 }
 
@@ -7211,6 +7212,7 @@ _scsih_remove(struct pci_dev *pdev)
        }
 
        sas_remove_host(shost);
+       mpt2sas_base_detach(ioc);
        list_del(&ioc->list);
        scsi_remove_host(shost);
        scsi_host_put(shost);
@@ -7318,22 +7320,27 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
        /* SAS Device List */
        list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list,
            list) {
-               spin_lock_irqsave(&ioc->sas_device_lock, flags);
-               list_move_tail(&sas_device->list, &ioc->sas_device_list);
-               spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
                if (ioc->hide_drives)
                        continue;
 
                if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
                    sas_device->sas_address_parent)) {
-                       _scsih_sas_device_remove(ioc, sas_device);
+                       list_del(&sas_device->list);
+                       kfree(sas_device);
+                       continue;
                } else if (!sas_device->starget) {
                        mpt2sas_transport_port_remove(ioc,
                            sas_device->sas_address,
                            sas_device->sas_address_parent);
-                       _scsih_sas_device_remove(ioc, sas_device);
+                       list_del(&sas_device->list);
+                       kfree(sas_device);
+                       continue;
+
                }
+               spin_lock_irqsave(&ioc->sas_device_lock, flags);
+               list_move_tail(&sas_device->list, &ioc->sas_device_list);
+               spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
        }
 }
 
index b31a8e3841d795154672cad902ec628bafb59c6f..d4ed9eb526572e07814e167eca89f94d22a3f73a 100644 (file)
 #ifndef SCSI_OSD_MAJOR
 #  define SCSI_OSD_MAJOR 260
 #endif
-#define SCSI_OSD_MAX_MINOR 64
+#define SCSI_OSD_MAX_MINOR MINORMASK
 
 static const char osd_name[] = "osd";
-static const char *osd_version_string = "open-osd 0.2.0";
+static const char *osd_version_string = "open-osd 0.2.1";
 
 MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>");
 MODULE_DESCRIPTION("open-osd Upper-Layer-Driver osd.ko");
index 28d9c9d6b4b401f926eb62709efe9027e0d4cfaf..99fc45bb72dfedb715ebe7fd9281a46a90258b9c 100644 (file)
@@ -1380,16 +1380,19 @@ static int scsi_lld_busy(struct request_queue *q)
 {
        struct scsi_device *sdev = q->queuedata;
        struct Scsi_Host *shost;
-       struct scsi_target *starget;
 
        if (!sdev)
                return 0;
 
        shost = sdev->host;
-       starget = scsi_target(sdev);
 
-       if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
-           scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
+       /*
+        * Ignore host/starget busy state.
+        * Since block layer does not have a concept of fairness across
+        * multiple queues, congestion of host/starget needs to be handled
+        * in SCSI layer.
+        */
+       if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
                return 1;
 
        return 0;
@@ -1407,6 +1410,8 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
 
        blk_start_request(req);
 
+       scmd_printk(KERN_INFO, cmd, "killing request\n");
+
        sdev = cmd->device;
        starget = scsi_target(sdev);
        shost = sdev->host;
@@ -1488,7 +1493,6 @@ static void scsi_request_fn(struct request_queue *q)
        struct request *req;
 
        if (!sdev) {
-               printk("scsi: killing requests for dead queue\n");
                while ((req = blk_peek_request(q)) != NULL)
                        scsi_kill_request(req, q);
                return;
@@ -1697,6 +1701,15 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
 
 void scsi_free_queue(struct request_queue *q)
 {
+       unsigned long flags;
+
+       WARN_ON(q->queuedata);
+
+       /* cause scsi_request_fn() to kill all non-finished requests */
+       spin_lock_irqsave(q->queue_lock, flags);
+       q->request_fn(q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
        blk_cleanup_queue(q);
 }
 
index d70e91ae60af199d8e64a027fa4382026e553f90..122a5a2020ad5f3d607611e96b370e5f90a226d6 100644 (file)
@@ -6,6 +6,7 @@
  */
 
 #include <linux/pm_runtime.h>
+#include <linux/async.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -68,6 +69,19 @@ static int scsi_bus_resume_common(struct device *dev)
        return err;
 }
 
+static int scsi_bus_prepare(struct device *dev)
+{
+       if (scsi_is_sdev_device(dev)) {
+               /* sd probing uses async_schedule.  Wait until it finishes. */
+               async_synchronize_full();
+
+       } else if (scsi_is_host_device(dev)) {
+               /* Wait until async scanning is finished */
+               scsi_complete_async_scans();
+       }
+       return 0;
+}
+
 static int scsi_bus_suspend(struct device *dev)
 {
        return scsi_bus_suspend_common(dev, PMSG_SUSPEND);
@@ -86,6 +100,7 @@ static int scsi_bus_poweroff(struct device *dev)
 #else /* CONFIG_PM_SLEEP */
 
 #define scsi_bus_resume_common         NULL
+#define scsi_bus_prepare               NULL
 #define scsi_bus_suspend               NULL
 #define scsi_bus_freeze                        NULL
 #define scsi_bus_poweroff              NULL
@@ -194,6 +209,7 @@ void scsi_autopm_put_host(struct Scsi_Host *shost)
 #endif /* CONFIG_PM_RUNTIME */
 
 const struct dev_pm_ops scsi_bus_pm_ops = {
+       .prepare =              scsi_bus_prepare,
        .suspend =              scsi_bus_suspend,
        .resume =               scsi_bus_resume_common,
        .freeze =               scsi_bus_freeze,
index 2a588955423a58f971d91472171830137adda6ce..5b475d0832cbf4520bdb5c8f7f8e5403ad8a1265 100644 (file)
@@ -110,6 +110,7 @@ extern void scsi_exit_procfs(void);
 #endif /* CONFIG_PROC_FS */
 
 /* scsi_scan.c */
+extern int scsi_complete_async_scans(void);
 extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
                                   unsigned int, unsigned int, int);
 extern void scsi_forget_host(struct Scsi_Host *);
index 44e8ca398efa790083006131f5adb3bf37cd57aa..6e7ea4a2b7a1515c1a2f6efe9386836cf18659a9 100644 (file)
@@ -319,10 +319,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
        return sdev;
 
 out_device_destroy:
-       scsi_device_set_state(sdev, SDEV_DEL);
-       transport_destroy_device(&sdev->sdev_gendev);
-       put_device(&sdev->sdev_dev);
-       put_device(&sdev->sdev_gendev);
+       __scsi_remove_device(sdev);
 out:
        if (display_failure_msg)
                printk(ALLOC_FAILURE_MSG, __func__);
@@ -1818,6 +1815,7 @@ static void scsi_finish_async_scan(struct async_scan_data *data)
        }
        spin_unlock(&async_scan_lock);
 
+       scsi_autopm_put_host(shost);
        scsi_host_put(shost);
        kfree(data);
 }
@@ -1844,7 +1842,6 @@ static int do_scan_async(void *_data)
 
        do_scsi_scan_host(shost);
        scsi_finish_async_scan(data);
-       scsi_autopm_put_host(shost);
        return 0;
 }
 
@@ -1872,7 +1869,7 @@ void scsi_scan_host(struct Scsi_Host *shost)
        p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no);
        if (IS_ERR(p))
                do_scan_async(data);
-       /* scsi_autopm_put_host(shost) is called in do_scan_async() */
+       /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */
 }
 EXPORT_SYMBOL(scsi_scan_host);
 
index 74708fcaf82fe900c3a77689ccad11c91b48b173..ae781487461829ae190f41f52d6f3e267e1fa5bb 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <linux/module.h>
 #include <linux/device.h>
-#include <scsi/scsi_scan.h>
+#include "scsi_priv.h"
 
 static int __init wait_scan_init(void)
 {
index 953773cb26d9a6204a52d56e75884622632c4c0c..7d8b5d8d74992a3dcea1e937e090b55fde4e5a85 100644 (file)
@@ -1073,6 +1073,10 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
        SCSI_LOG_IOCTL(1, printk("sd_ioctl: disk=%s, cmd=0x%x\n",
                                                disk->disk_name, cmd));
 
+       error = scsi_verify_blk_ioctl(bdev, cmd);
+       if (error < 0)
+               return error;
+
        /*
         * If we are in the middle of error recovery, don't let anyone
         * else try and use this device.  Also, if error recovery fails, it
@@ -1095,7 +1099,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
                        error = scsi_ioctl(sdp, cmd, p);
                        break;
                default:
-                       error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p);
+                       error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
                        if (error != -ENOTTY)
                                break;
                        error = scsi_ioctl(sdp, cmd, p);
@@ -1265,6 +1269,11 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
                           unsigned int cmd, unsigned long arg)
 {
        struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
+       int ret;
+
+       ret = scsi_verify_blk_ioctl(bdev, cmd);
+       if (ret < 0)
+               return -ENOIOCTLCMD;
 
        /*
         * If we are in the middle of error recovery, don't let anyone
@@ -1276,8 +1285,6 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
                return -ENODEV;
               
        if (sdev->host->hostt->compat_ioctl) {
-               int ret;
-
                ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
 
                return ret;
index 1871b8ae83ae7031f73084470278c7a89ce8da96..9b28f39bac26b06dd8ce2f91e00e045436855abe 100644 (file)
@@ -462,14 +462,16 @@ static void st_scsi_execute_end(struct request *req, int uptodate)
 {
        struct st_request *SRpnt = req->end_io_data;
        struct scsi_tape *STp = SRpnt->stp;
+       struct bio *tmp;
 
        STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
        STp->buffer->cmdstat.residual = req->resid_len;
 
+       tmp = SRpnt->bio;
        if (SRpnt->waiting)
                complete(SRpnt->waiting);
 
-       blk_rq_unmap_user(SRpnt->bio);
+       blk_rq_unmap_user(tmp);
        __blk_put_request(req->q, req);
 }
 
index b4543f575f466fc3c7fed5f3edad05be9595d0cd..36d1ed7817ebf9d52631c970023d31b31ff494e0 100644 (file)
@@ -839,6 +839,10 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
        struct sym_lcb *lp = sym_lp(tp, sdev->lun);
        unsigned long flags;
 
+       /* if slave_alloc returned before allocating a sym_lcb, return */
+       if (!lp)
+               return;
+
        spin_lock_irqsave(np->s.host->host_lock, flags);
 
        if (lp->busy_itlq || lp->busy_itl) {
index 2b39a90fc15d5d33603cfd922cb9f4591a375ce3..a93551bc5b7c6793e8fbf532f76f0ea375c7d878 100755 (executable)
@@ -318,7 +318,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
        }
 
        spi->master = master;
-       spi->dev.parent = dev;
+       spi->dev.parent = &master->dev;
        spi->dev.bus = &spi_bus_type;
        spi->dev.release = spidev_release;
        device_initialize(&spi->dev);
index d6620ad309ce9489f35358cf6644bd20b8e96888..c828151c419e83ada937f23a648989f1c975c6d8 100644 (file)
@@ -516,10 +516,14 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
 
 static void ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
 {
-       ssb_pcicore_fix_sprom_core_index(pc);
+       struct ssb_device *pdev = pc->dev;
+       struct ssb_bus *bus = pdev->bus;
+
+       if (bus->bustype == SSB_BUSTYPE_PCI)
+               ssb_pcicore_fix_sprom_core_index(pc);
 
        /* Disable PCI interrupts. */
-       ssb_write32(pc->dev, SSB_INTVEC, 0);
+       ssb_write32(pdev, SSB_INTVEC, 0);
 
        /* Additional PCIe always once-executed workarounds */
        if (pc->dev->id.coreid == SSB_DEV_PCIE) {
index fa76ce7678a6e289be14f5d15b8d39b0306fc6fe..3e09d57dc6d95768da4a26f0286b3cbbd3641e52 100644 (file)
@@ -57,6 +57,8 @@ struct logger_reader {
        struct logger_log       *log;   /* associated log */
        struct list_head        list;   /* entry in logger_log's list */
        size_t                  r_off;  /* current read head offset */
+       bool                    r_all;  /* reader can read all entries */
+       int                     r_ver;  /* reader ABI version */
 };
 
 /* logger_offset - returns index 'n' into the log via (optimized) modulus */
@@ -86,25 +88,71 @@ static inline struct logger_log *file_get_log(struct file *file)
 }
 
 /*
- * get_entry_len - Grabs the length of the payload of the next entry starting
- * from 'off'.
+ * get_entry_header - returns a pointer to the logger_entry header within
+ * 'log' starting at offset 'off'. A temporary logger_entry 'scratch' must
+ * be provided. Typically the return value will be a pointer within
+ * 'logger->buf'.  However, a pointer to 'scratch' may be returned if
+ * the log entry spans the end and beginning of the circular buffer.
+ */
+static struct logger_entry *get_entry_header(struct logger_log *log,
+               size_t off, struct logger_entry *scratch)
+{
+       size_t len = min(sizeof(struct logger_entry), log->size - off);
+       if (len != sizeof(struct logger_entry)) {
+               memcpy(((void *) scratch), log->buffer + off, len);
+               memcpy(((void *) scratch) + len, log->buffer,
+                       sizeof(struct logger_entry) - len);
+               return scratch;
+       }
+
+       return (struct logger_entry *) (log->buffer + off);
+}
+
+/*
+ * get_entry_msg_len - Grabs the length of the message of the entry
+ * starting from from 'off'.
  *
  * Caller needs to hold log->mutex.
  */
-static __u32 get_entry_len(struct logger_log *log, size_t off)
+static __u32 get_entry_msg_len(struct logger_log *log, size_t off)
 {
-       __u16 val;
+       struct logger_entry scratch;
+       struct logger_entry *entry;
 
-       switch (log->size - off) {
-       case 1:
-               memcpy(&val, log->buffer + off, 1);
-               memcpy(((char *) &val) + 1, log->buffer, 1);
-               break;
-       default:
-               memcpy(&val, log->buffer + off, 2);
+       entry = get_entry_header(log, off, &scratch);
+       return entry->len;
+}
+
+static size_t get_user_hdr_len(int ver)
+{
+       if (ver < 2)
+               return sizeof(struct user_logger_entry_compat);
+       else
+               return sizeof(struct logger_entry);
+}
+
+static ssize_t copy_header_to_user(int ver, struct logger_entry *entry,
+                                        char __user *buf)
+{
+       void *hdr;
+       size_t hdr_len;
+       struct user_logger_entry_compat v1;
+
+       if (ver < 2) {
+               v1.len      = entry->len;
+               v1.__pad    = 0;
+               v1.pid      = entry->pid;
+               v1.tid      = entry->tid;
+               v1.sec      = entry->sec;
+               v1.nsec     = entry->nsec;
+               hdr         = &v1;
+               hdr_len     = sizeof(struct user_logger_entry_compat);
+       } else {
+               hdr         = entry;
+               hdr_len     = sizeof(struct logger_entry);
        }
 
-       return sizeof(struct logger_entry) + val;
+       return copy_to_user(buf, hdr, hdr_len);
 }
 
 /*
@@ -118,15 +166,30 @@ static ssize_t do_read_log_to_user(struct logger_log *log,
                                   char __user *buf,
                                   size_t count)
 {
+       struct logger_entry scratch;
+       struct logger_entry *entry;
        size_t len;
+       size_t msg_start;
 
        /*
-        * We read from the log in two disjoint operations. First, we read from
-        * the current read head offset up to 'count' bytes or to the end of
+        * First, copy the header to userspace, using the version of
+        * the header requested
+        */
+       entry = get_entry_header(log, reader->r_off, &scratch);
+       if (copy_header_to_user(reader->r_ver, entry, buf))
+               return -EFAULT;
+
+       count -= get_user_hdr_len(reader->r_ver);
+       buf += get_user_hdr_len(reader->r_ver);
+       msg_start = logger_offset(reader->r_off + sizeof(struct logger_entry));
+
+       /*
+        * We read from the msg in two disjoint operations. First, we read from
+        * the current msg head offset up to 'count' bytes or to the end of
         * the log, whichever comes first.
         */
-       len = min(count, log->size - reader->r_off);
-       if (copy_to_user(buf, log->buffer + reader->r_off, len))
+       len = min(count, log->size - msg_start);
+       if (copy_to_user(buf, log->buffer + msg_start, len))
                return -EFAULT;
 
        /*
@@ -137,9 +200,34 @@ static ssize_t do_read_log_to_user(struct logger_log *log,
                if (copy_to_user(buf + len, log->buffer, count - len))
                        return -EFAULT;
 
-       reader->r_off = logger_offset(reader->r_off + count);
+       reader->r_off = logger_offset(reader->r_off +
+               sizeof(struct logger_entry) + count);
 
-       return count;
+       return count + get_user_hdr_len(reader->r_ver);
+}
+
+/*
+ * get_next_entry_by_uid - Starting at 'off', returns an offset into
+ * 'log->buffer' which contains the first entry readable by 'euid'
+ */
+static size_t get_next_entry_by_uid(struct logger_log *log,
+               size_t off, uid_t euid)
+{
+       while (off != log->w_off) {
+               struct logger_entry *entry;
+               struct logger_entry scratch;
+               size_t next_len;
+
+               entry = get_entry_header(log, off, &scratch);
+
+               if (entry->euid == euid)
+                       return off;
+
+               next_len = sizeof(struct logger_entry) + entry->len;
+               off = logger_offset(off + next_len);
+       }
+
+       return off;
 }
 
 /*
@@ -151,7 +239,7 @@ static ssize_t do_read_log_to_user(struct logger_log *log,
  *     - If there are no log entries to read, blocks until log is written to
  *     - Atomically reads exactly one log entry
  *
- * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read
+ * Will set errno to EINVAL if read
  * buffer is insufficient to hold next entry.
  */
 static ssize_t logger_read(struct file *file, char __user *buf,
@@ -191,6 +279,10 @@ start:
 
        mutex_lock(&log->mutex);
 
+       if (!reader->r_all)
+               reader->r_off = get_next_entry_by_uid(log,
+                       reader->r_off, current_euid());
+
        /* is there still something to read or did we race? */
        if (unlikely(log->w_off == reader->r_off)) {
                mutex_unlock(&log->mutex);
@@ -198,7 +290,8 @@ start:
        }
 
        /* get the size of the next entry */
-       ret = get_entry_len(log, reader->r_off);
+       ret = get_user_hdr_len(reader->r_ver) +
+               get_entry_msg_len(log, reader->r_off);
        if (count < ret) {
                ret = -EINVAL;
                goto out;
@@ -224,7 +317,8 @@ static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
        size_t count = 0;
 
        do {
-               size_t nr = get_entry_len(log, off);
+               size_t nr = sizeof(struct logger_entry) +
+                       get_entry_msg_len(log, off);
                off = logger_offset(off + nr);
                count += nr;
        } while (count < len);
@@ -336,7 +430,9 @@ ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
        header.tid = current->pid;
        header.sec = now.tv_sec;
        header.nsec = now.tv_nsec;
+       header.euid = current_euid();
        header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
+       header.hdr_size = sizeof(struct logger_entry);
 
        /* null writes succeed, return zero */
        if (unlikely(!header.len))
@@ -409,6 +505,10 @@ static int logger_open(struct inode *inode, struct file *file)
                        return -ENOMEM;
 
                reader->log = log;
+               reader->r_ver = 1;
+               reader->r_all = in_egroup_p(inode->i_gid) ||
+                       capable(CAP_SYSLOG);
+
                INIT_LIST_HEAD(&reader->list);
 
                mutex_lock(&log->mutex);
@@ -463,6 +563,10 @@ static unsigned int logger_poll(struct file *file, poll_table *wait)
        poll_wait(file, &log->wq, wait);
 
        mutex_lock(&log->mutex);
+       if (!reader->r_all)
+               reader->r_off = get_next_entry_by_uid(log,
+                       reader->r_off, current_euid());
+
        if (log->w_off != reader->r_off)
                ret |= POLLIN | POLLRDNORM;
        mutex_unlock(&log->mutex);
@@ -470,11 +574,25 @@ static unsigned int logger_poll(struct file *file, poll_table *wait)
        return ret;
 }
 
+static long logger_set_version(struct logger_reader *reader, void __user *arg)
+{
+       int version;
+       if (copy_from_user(&version, arg, sizeof(int)))
+               return -EFAULT;
+
+       if ((version < 1) || (version > 2))
+               return -EINVAL;
+
+       reader->r_ver = version;
+       return 0;
+}
+
 static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        struct logger_log *log = file_get_log(file);
        struct logger_reader *reader;
-       long ret = -ENOTTY;
+       long ret = -EINVAL;
+       void __user *argp = (void __user *) arg;
 
        mutex_lock(&log->mutex);
 
@@ -499,8 +617,14 @@ static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        break;
                }
                reader = file->private_data;
+
+               if (!reader->r_all)
+                       reader->r_off = get_next_entry_by_uid(log,
+                               reader->r_off, current_euid());
+
                if (log->w_off != reader->r_off)
-                       ret = get_entry_len(log, reader->r_off);
+                       ret = get_user_hdr_len(reader->r_ver) +
+                               get_entry_msg_len(log, reader->r_off);
                else
                        ret = 0;
                break;
@@ -514,6 +638,22 @@ static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                log->head = log->w_off;
                ret = 0;
                break;
+       case LOGGER_GET_VERSION:
+               if (!(file->f_mode & FMODE_READ)) {
+                       ret = -EBADF;
+                       break;
+               }
+               reader = file->private_data;
+               ret = reader->r_ver;
+               break;
+       case LOGGER_SET_VERSION:
+               if (!(file->f_mode & FMODE_READ)) {
+                       ret = -EBADF;
+                       break;
+               }
+               reader = file->private_data;
+               ret = logger_set_version(reader, argp);
+               break;
        }
 
        mutex_unlock(&log->mutex);
@@ -534,8 +674,8 @@ static const struct file_operations logger_fops = {
 
 /*
  * Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which
- * must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and less than
- * LONG_MAX minus LOGGER_ENTRY_MAX_LEN.
+ * must be a power of two, and greater than
+ * (LOGGER_ENTRY_MAX_PAYLOAD + sizeof(struct logger_entry)).
  */
 #define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \
 static unsigned char _buf_ ## VAR[SIZE]; \
index 2cb06e9d8f9821ea6aa98b63d2a79b4e3b171578..3f612a3b101c9d233b907b31ca518eac220ee473 100644 (file)
 #include <linux/types.h>
 #include <linux/ioctl.h>
 
-struct logger_entry {
+/*
+ * The userspace structure for version 1 of the logger_entry ABI.
+ * This structure is returned to userspace unless the caller requests
+ * an upgrade to a newer ABI version.
+ */
+struct user_logger_entry_compat {
        __u16           len;    /* length of the payload */
        __u16           __pad;  /* no matter what, we get 2 bytes of padding */
        __s32           pid;    /* generating process's pid */
@@ -30,14 +35,28 @@ struct logger_entry {
        char            msg[0]; /* the entry's payload */
 };
 
+/*
+ * The structure for version 2 of the logger_entry ABI.
+ * This structure is returned to userspace if ioctl(LOGGER_SET_VERSION)
+ * is called with version >= 2
+ */
+struct logger_entry {
+       __u16           len;            /* length of the payload */
+       __u16           hdr_size;       /* sizeof(struct logger_entry_v2) */
+       __s32           pid;            /* generating process's pid */
+       __s32           tid;            /* generating process's tid */
+       __s32           sec;            /* seconds since Epoch */
+       __s32           nsec;           /* nanoseconds */
+       uid_t           euid;           /* effective UID of logger */
+       char            msg[0];         /* the entry's payload */
+};
+
 #define LOGGER_LOG_RADIO       "log_radio"     /* radio-related messages */
 #define LOGGER_LOG_EVENTS      "log_events"    /* system/hardware events */
 #define LOGGER_LOG_SYSTEM      "log_system"    /* system/framework messages */
 #define LOGGER_LOG_MAIN                "log_main"      /* everything else */
 
-#define LOGGER_ENTRY_MAX_LEN           (4*1024)
-#define LOGGER_ENTRY_MAX_PAYLOAD       \
-       (LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry))
+#define LOGGER_ENTRY_MAX_PAYLOAD       4076
 
 #define __LOGGERIO     0xAE
 
@@ -45,5 +64,7 @@ struct logger_entry {
 #define LOGGER_GET_LOG_LEN             _IO(__LOGGERIO, 2) /* used log len */
 #define LOGGER_GET_NEXT_ENTRY_LEN      _IO(__LOGGERIO, 3) /* next entry len */
 #define LOGGER_FLUSH_LOG               _IO(__LOGGERIO, 4) /* flush log */
+#define LOGGER_GET_VERSION             _IO(__LOGGERIO, 5) /* abi version */
+#define LOGGER_SET_VERSION             _IO(__LOGGERIO, 6) /* abi version */
 
 #endif /* _LINUX_LOGGER_H */
index cb42d899822e6b550ba2d1c1258205304a87dcbb..b278d62e4b894e867432864b491fa377059c8eec 100644 (file)
@@ -143,7 +143,7 @@ ram_console_write(struct console *console, const char *s, unsigned int count)
 static struct console ram_console = {
        .name   = "ram",
        .write  = ram_console_write,
-       .flags  = CON_PRINTBUFFER | CON_ENABLED,
+       .flags  = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME,
        .index  = -1,
 };
 
index 7bb7da7959a29e1a40692190ea918f84c74f17eb..63bafbb0980fee50191ef7493dbd3079c9d8cbbb 100644 (file)
@@ -355,7 +355,14 @@ static void send_data(struct asus_oled_dev *odev)
 
 static int append_values(struct asus_oled_dev *odev, uint8_t val, size_t count)
 {
-       while (count-- > 0 && val) {
+       odev->last_val = val;
+
+       if (val == 0) {
+               odev->buf_offs += count;
+               return 0;
+       }
+
+       while (count-- > 0) {
                size_t x = odev->buf_offs % odev->width;
                size_t y = odev->buf_offs / odev->width;
                size_t i;
@@ -406,7 +413,6 @@ static int append_values(struct asus_oled_dev *odev, uint8_t val, size_t count)
                        ;
                }
 
-               odev->last_val = val;
                odev->buf_offs++;
        }
 
@@ -805,10 +811,9 @@ error:
 
 static void __exit asus_oled_exit(void)
 {
+       usb_deregister(&oled_driver);
        class_remove_file(oled_class, &class_attr_version.attr);
        class_destroy(oled_class);
-
-       usb_deregister(&oled_driver);
 }
 
 module_init(asus_oled_init);
index 453492610613490702125c66411d189558501188..934e7f9a8673743fedf8008bf511cae6ba7c859b 100644 (file)
@@ -143,7 +143,6 @@ static bool wlc_bmac_validate_chip_access(struct wlc_hw_info *wlc_hw);
 static char *wlc_get_macaddr(struct wlc_hw_info *wlc_hw);
 static void wlc_mhfdef(struct wlc_info *wlc, u16 *mhfs, u16 mhf2_init);
 static void wlc_mctrl_write(struct wlc_hw_info *wlc_hw);
-static void wlc_bmac_mute(struct wlc_hw_info *wlc_hw, bool want, mbool flags);
 static void wlc_ucode_mute_override_set(struct wlc_hw_info *wlc_hw);
 static void wlc_ucode_mute_override_clear(struct wlc_hw_info *wlc_hw);
 static u32 wlc_wlintrsoff(struct wlc_info *wlc);
@@ -2725,7 +2724,7 @@ void wlc_intrsrestore(struct wlc_info *wlc, u32 macintmask)
        W_REG(&wlc_hw->regs->macintmask, wlc->macintmask);
 }
 
-static void wlc_bmac_mute(struct wlc_hw_info *wlc_hw, bool on, mbool flags)
+void wlc_bmac_mute(struct wlc_hw_info *wlc_hw, bool on, mbool flags)
 {
        u8 null_ether_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
 
index a5dccc273ac5adb409b80d441033d2adae617c4d..a2a4e7328ee43b5bbc45e798d211dd880a179652 100644 (file)
@@ -103,6 +103,7 @@ extern void wlc_bmac_macphyclk_set(struct wlc_hw_info *wlc_hw, bool clk);
 extern void wlc_bmac_phy_reset(struct wlc_hw_info *wlc_hw);
 extern void wlc_bmac_corereset(struct wlc_hw_info *wlc_hw, u32 flags);
 extern void wlc_bmac_reset(struct wlc_hw_info *wlc_hw);
+extern void wlc_bmac_mute(struct wlc_hw_info *wlc_hw, bool want, mbool flags);
 extern void wlc_bmac_init(struct wlc_hw_info *wlc_hw, chanspec_t chanspec,
                          bool mute);
 extern int wlc_bmac_up_prep(struct wlc_hw_info *wlc_hw);
index 4b4a31eff90c9f7cd2f4ae69552e2d47460d9faa..99250e29461fe03d33e6168a1673100c9f79bce9 100644 (file)
@@ -6145,6 +6145,7 @@ wlc_recvctl(struct wlc_info *wlc, d11rxhdr_t *rxh, struct sk_buff *p)
 {
        int len_mpdu;
        struct ieee80211_rx_status rx_status;
+       struct ieee80211_hdr *hdr;
 
        memset(&rx_status, 0, sizeof(rx_status));
        prep_mac80211_status(wlc, rxh, p, &rx_status);
@@ -6154,6 +6155,13 @@ wlc_recvctl(struct wlc_info *wlc, d11rxhdr_t *rxh, struct sk_buff *p)
        skb_pull(p, D11_PHY_HDR_LEN);
        __skb_trim(p, len_mpdu);
 
+       /* unmute transmit */
+       if (wlc->hw->suspended_fifos) {
+               hdr = (struct ieee80211_hdr *)p->data;
+               if (ieee80211_is_beacon(hdr->frame_control))
+                       wlc_bmac_mute(wlc->hw, false, 0);
+       }
+
        memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status));
        ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p);
        return;
index c20694e65157e00192223ebefffb2e0429a1689c..ea8d109bb35e059d17ff3f06b7b414e9a850d865 100644 (file)
@@ -280,7 +280,7 @@ static int do_devconfig_ioctl(struct comedi_device *dev,
        if (ret == 0) {
                if (!try_module_get(dev->driver->module)) {
                        comedi_device_detach(dev);
-                       return -ENOSYS;
+                       ret = -ENOSYS;
                }
        }
 
@@ -1432,7 +1432,21 @@ static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
        return ret;
 }
 
-static void comedi_unmap(struct vm_area_struct *area)
+
+static void comedi_vm_open(struct vm_area_struct *area)
+{
+       struct comedi_async *async;
+       struct comedi_device *dev;
+
+       async = area->vm_private_data;
+       dev = async->subdevice->device;
+
+       mutex_lock(&dev->mutex);
+       async->mmap_count++;
+       mutex_unlock(&dev->mutex);
+}
+
+static void comedi_vm_close(struct vm_area_struct *area)
 {
        struct comedi_async *async;
        struct comedi_device *dev;
@@ -1446,15 +1460,13 @@ static void comedi_unmap(struct vm_area_struct *area)
 }
 
 static struct vm_operations_struct comedi_vm_ops = {
-       .close = comedi_unmap,
+       .open = comedi_vm_open,
+       .close = comedi_vm_close,
 };
 
 static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
 {
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
        struct comedi_async *async = NULL;
        unsigned long start = vma->vm_start;
        unsigned long size;
@@ -1462,6 +1474,15 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
        int i;
        int retval;
        struct comedi_subdevice *s;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+
+       dev_file_info = comedi_get_device_file_info(minor);
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        mutex_lock(&dev->mutex);
        if (!dev->attached) {
@@ -1528,11 +1549,17 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
 {
        unsigned int mask = 0;
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
        struct comedi_subdevice *read_subdev;
        struct comedi_subdevice *write_subdev;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
+
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        mutex_lock(&dev->mutex);
        if (!dev->attached) {
@@ -1578,9 +1605,15 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
        int n, m, count = 0, retval = 0;
        DECLARE_WAITQUEUE(wait, current);
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
+
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        if (!dev->attached) {
                DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1640,11 +1673,11 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
                                retval = -EAGAIN;
                                break;
                        }
+                       schedule();
                        if (signal_pending(current)) {
                                retval = -ERESTARTSYS;
                                break;
                        }
-                       schedule();
                        if (!s->busy)
                                break;
                        if (s->busy != file) {
@@ -1683,9 +1716,15 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
        int n, m, count = 0, retval = 0;
        DECLARE_WAITQUEUE(wait, current);
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
+
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        if (!dev->attached) {
                DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1741,11 +1780,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
                                retval = -EAGAIN;
                                break;
                        }
+                       schedule();
                        if (signal_pending(current)) {
                                retval = -ERESTARTSYS;
                                break;
                        }
-                       schedule();
                        if (!s->busy) {
                                retval = 0;
                                break;
@@ -1885,11 +1924,17 @@ ok:
 static int comedi_close(struct inode *inode, struct file *file)
 {
        const unsigned minor = iminor(inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
        struct comedi_subdevice *s = NULL;
        int i;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
+
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        mutex_lock(&dev->mutex);
 
@@ -1923,10 +1968,15 @@ static int comedi_close(struct inode *inode, struct file *file)
 static int comedi_fasync(int fd, struct file *file, int on)
 {
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
 
-       struct comedi_device *dev = dev_file_info->device;
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        return fasync_helper(fd, file, on, &dev->async_queue);
 }
index a01f9a07c988961c0389eb60ffb794a0ce87743f..5af82f4235bdddce71a4c56960cdfdecbc16f3bd 100644 (file)
@@ -218,6 +218,7 @@ struct vstor_packet {
 #define STORVSC_MAX_LUNS_PER_TARGET                    64
 #define STORVSC_MAX_TARGETS                            1
 #define STORVSC_MAX_CHANNELS                           1
+#define STORVSC_MAX_CMD_LEN                            16
 
 struct hv_storvsc_request;
 
index cb4a25b08313917889a9394e95176d584432abac..734076b41bf4b175483673d748c587cef6b5be01 100644 (file)
@@ -729,6 +729,8 @@ static int storvsc_probe(struct hv_device *device)
        host->max_id = STORVSC_MAX_TARGETS;
        /* max # of channels */
        host->max_channel = STORVSC_MAX_CHANNELS - 1;
+       /* max cmd length */
+       host->max_cmd_len = STORVSC_MAX_CMD_LEN;
 
        /* Register the HBA and start the scsi bus scan */
        ret = scsi_add_host(host, &device->device);
index dd9a3bb6aa0173f95f89c55e3d1316e0321fa4c5..a1176d91312545490e67afdb6ebd8575fd8529cb 100644 (file)
@@ -520,7 +520,9 @@ static int hmc5843_detect(struct i2c_client *client,
 /* Called when we have found a new HMC5843. */
 static void hmc5843_init_client(struct i2c_client *client)
 {
-       struct hmc5843_data *data = i2c_get_clientdata(client);
+       struct iio_dev *indio_dev = i2c_get_clientdata(client);
+       struct hmc5843_data *data = iio_priv(indio_dev);
+
        hmc5843_set_meas_conf(client, data->meas_conf);
        hmc5843_set_rate(client, data->rate);
        hmc5843_configure(client, data->operating_mode);
index 805df913bb6e164f68ae0c2fdc6efe933040da9d..21cbc9ae79c938ae3c1eba28f3f28bce187051f8 100644 (file)
@@ -836,25 +836,22 @@ static int hardware_init_port(void)
        return 0;
 }
 
-static int init_port(void)
+static int __devinit lirc_serial_probe(struct platform_device *dev)
 {
        int i, nlow, nhigh, result;
 
        result = request_irq(irq, irq_handler,
                             IRQF_DISABLED | (share_irq ? IRQF_SHARED : 0),
                             LIRC_DRIVER_NAME, (void *)&hardware);
-
-       switch (result) {
-       case -EBUSY:
-               printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n", irq);
-               return -EBUSY;
-       case -EINVAL:
-               printk(KERN_ERR LIRC_DRIVER_NAME
-                      ": Bad irq number or handler\n");
-               return -EINVAL;
-       default:
-               break;
-       };
+       if (result < 0) {
+               if (result == -EBUSY)
+                       printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n",
+                              irq);
+               else if (result == -EINVAL)
+                       printk(KERN_ERR LIRC_DRIVER_NAME
+                              ": Bad irq number or handler\n");
+               return result;
+       }
 
        /* Reserve io region. */
        /*
@@ -875,11 +872,14 @@ static int init_port(void)
                       ": or compile the serial port driver as module and\n");
                printk(KERN_WARNING LIRC_DRIVER_NAME
                       ": make sure this module is loaded first\n");
-               return -EBUSY;
+               result = -EBUSY;
+               goto exit_free_irq;
        }
 
-       if (hardware_init_port() < 0)
-               return -EINVAL;
+       if (hardware_init_port() < 0) {
+               result = -EINVAL;
+               goto exit_release_region;
+       }
 
        /* Initialize pulse/space widths */
        init_timing_params(duty_cycle, freq);
@@ -911,6 +911,28 @@ static int init_port(void)
 
        dprintk("Interrupt %d, port %04x obtained\n", irq, io);
        return 0;
+
+exit_release_region:
+       if (iommap != 0)
+               release_mem_region(iommap, 8 << ioshift);
+       else
+               release_region(io, 8);
+exit_free_irq:
+       free_irq(irq, (void *)&hardware);
+
+       return result;
+}
+
+static int __devexit lirc_serial_remove(struct platform_device *dev)
+{
+       free_irq(irq, (void *)&hardware);
+
+       if (iommap != 0)
+               release_mem_region(iommap, 8 << ioshift);
+       else
+               release_region(io, 8);
+
+       return 0;
 }
 
 static int set_use_inc(void *data)
@@ -1076,16 +1098,6 @@ static struct lirc_driver driver = {
 
 static struct platform_device *lirc_serial_dev;
 
-static int __devinit lirc_serial_probe(struct platform_device *dev)
-{
-       return 0;
-}
-
-static int __devexit lirc_serial_remove(struct platform_device *dev)
-{
-       return 0;
-}
-
 static int lirc_serial_suspend(struct platform_device *dev,
                               pm_message_t state)
 {
@@ -1112,10 +1124,8 @@ static int lirc_serial_resume(struct platform_device *dev)
 {
        unsigned long flags;
 
-       if (hardware_init_port() < 0) {
-               lirc_serial_exit();
+       if (hardware_init_port() < 0)
                return -EINVAL;
-       }
 
        spin_lock_irqsave(&hardware[type].lock, flags);
        /* Enable Interrupt */
@@ -1188,10 +1198,6 @@ static int __init lirc_serial_init_module(void)
 {
        int result;
 
-       result = lirc_serial_init();
-       if (result)
-               return result;
-
        switch (type) {
        case LIRC_HOMEBREW:
        case LIRC_IRDEO:
@@ -1211,8 +1217,7 @@ static int __init lirc_serial_init_module(void)
                break;
 #endif
        default:
-               result = -EINVAL;
-               goto exit_serial_exit;
+               return -EINVAL;
        }
        if (!softcarrier) {
                switch (type) {
@@ -1228,37 +1233,26 @@ static int __init lirc_serial_init_module(void)
                }
        }
 
-       result = init_port();
-       if (result < 0)
-               goto exit_serial_exit;
+       result = lirc_serial_init();
+       if (result)
+               return result;
+
        driver.features = hardware[type].features;
        driver.dev = &lirc_serial_dev->dev;
        driver.minor = lirc_register_driver(&driver);
        if (driver.minor < 0) {
                printk(KERN_ERR  LIRC_DRIVER_NAME
                       ": register_chrdev failed!\n");
-               result = -EIO;
-               goto exit_release;
+               lirc_serial_exit();
+               return -EIO;
        }
        return 0;
-exit_release:
-       release_region(io, 8);
-exit_serial_exit:
-       lirc_serial_exit();
-       return result;
 }
 
 static void __exit lirc_serial_exit_module(void)
 {
-       lirc_serial_exit();
-
-       free_irq(irq, (void *)&hardware);
-
-       if (iommap != 0)
-               release_mem_region(iommap, 8 << ioshift);
-       else
-               release_region(io, 8);
        lirc_unregister_driver(driver.minor);
+       lirc_serial_exit();
        dprintk("cleaned up module\n");
 }
 
index ca098cabc2bc8141b68aee51f3f210fbfb8a7c7a..02fafecd47738a23dc9bf40de76e59b374653e4b 100644 (file)
@@ -916,9 +916,10 @@ static int qt2_ioctl(struct tty_struct *tty,
                dbg("%s() port %d, cmd == TIOCMIWAIT enter",
                        __func__, port->number);
                prev_msr_value = port_extra->shadowMSR  & QT2_SERIAL_MSR_MASK;
+               barrier();
+               __set_current_state(TASK_INTERRUPTIBLE);
                while (1) {
                        add_wait_queue(&port_extra->wait, &wait);
-                       set_current_state(TASK_INTERRUPTIBLE);
                        schedule();
                        dbg("%s(): port %d, cmd == TIOCMIWAIT here\n",
                                __func__, port->number);
@@ -926,9 +927,12 @@ static int qt2_ioctl(struct tty_struct *tty,
                        /* see if a signal woke us up */
                        if (signal_pending(current))
                                return -ERESTARTSYS;
+                       set_current_state(TASK_INTERRUPTIBLE);
                        msr_value = port_extra->shadowMSR & QT2_SERIAL_MSR_MASK;
-                       if (msr_value == prev_msr_value)
+                       if (msr_value == prev_msr_value) {
+                               __set_current_state(TASK_RUNNING);
                                return -EIO;  /* no change - error */
+                       }
                        if ((arg & TIOCM_RNG &&
                                ((prev_msr_value & QT2_SERIAL_MSR_RI) ==
                                        (msr_value & QT2_SERIAL_MSR_RI))) ||
@@ -941,6 +945,7 @@ static int qt2_ioctl(struct tty_struct *tty,
                                (arg & TIOCM_CTS &&
                                ((prev_msr_value & QT2_SERIAL_MSR_CTS) ==
                                        (msr_value & QT2_SERIAL_MSR_CTS)))) {
+                               __set_current_state(TASK_RUNNING);
                                return 0;
                        }
                } /* end inifinite while */
index 21ce2af447b527c61ddab279ee6533a6c74ee69b..6d88d1a45f17f72d5959d336161c12da2266575f 100644 (file)
@@ -86,6 +86,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
        {USB_DEVICE(0x0DF6, 0x0045)},
        {USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
        {USB_DEVICE(0x0DF6, 0x004B)},
+       {USB_DEVICE(0x0DF6, 0x005B)},
+       {USB_DEVICE(0x0DF6, 0x005D)},
        {USB_DEVICE(0x0DF6, 0x0063)},
        /* Sweex */
        {USB_DEVICE(0x177F, 0x0154)},
index 12f5eba0355ca7e58dec234cde5bab992e128b71..48aa61eb9c7463894f175ceaa507492a5b3dc223 100644 (file)
@@ -24,7 +24,6 @@ static int debug;
 #define DRIVER_DESC "Quatech USB to Serial Driver"
 
 #define        USB_VENDOR_ID_QUATECH                   0x061d  /* Quatech VID */
-#define QUATECH_SSU100 0xC020  /* SSU100 */
 #define QUATECH_SSU200 0xC030  /* SSU200 */
 #define QUATECH_DSU100 0xC040  /* DSU100 */
 #define QUATECH_DSU200 0xC050  /* DSU200 */
@@ -127,7 +126,6 @@ static int debug;
 #define RS232_MODE          0x00
 
 static const struct usb_device_id serqt_id_table[] = {
-       {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_SSU100)},
        {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_SSU200)},
        {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_DSU100)},
        {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_DSU200)},
@@ -775,7 +773,6 @@ static int qt_startup(struct usb_serial *serial)
        }
 
        switch (serial->dev->descriptor.idProduct) {
-       case QUATECH_SSU100:
        case QUATECH_DSU100:
        case QUATECH_QSU100:
        case QUATECH_ESU100A:
index e42ce9dab7ac5cf15676b7ac935a0fa21dea5431..5c4b5d94450818ef5daf540ee7d785eb2d70de1d 100644 (file)
@@ -68,6 +68,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
 {
        struct usbip_device *ud = &vdev->ud;
        struct urb *urb;
+       unsigned long flags;
 
        spin_lock(&vdev->priv_lock);
        urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
@@ -101,9 +102,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
 
        usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
 
-       spin_lock(&the_controller->lock);
+       spin_lock_irqsave(&the_controller->lock, flags);
        usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
-       spin_unlock(&the_controller->lock);
+       spin_unlock_irqrestore(&the_controller->lock, flags);
 
        usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
 
@@ -141,6 +142,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
 {
        struct vhci_unlink *unlink;
        struct urb *urb;
+       unsigned long flags;
 
        usbip_dump_header(pdu);
 
@@ -170,9 +172,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
                urb->status = pdu->u.ret_unlink.status;
                pr_info("urb->status %d\n", urb->status);
 
-               spin_lock(&the_controller->lock);
+               spin_lock_irqsave(&the_controller->lock, flags);
                usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
-               spin_unlock(&the_controller->lock);
+               spin_unlock_irqrestore(&the_controller->lock, flags);
 
                usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
                                     urb->status);
index e05fc25911474b244b57e3c20907e6d15f53f950..cd2456a3cbd824165358b93b3afbe26b559f3e76 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/fs.h>
 #include <linux/err.h>
 #include <linux/switch.h>
+#include <linux/hrtimer.h>
 
 struct class *switch_class;
 static atomic_t device_count;
@@ -61,8 +62,9 @@ void switch_set_state(struct switch_dev *sdev, int state)
 {
        char name_buf[120];
        char state_buf[120];
+       char timestamp_buf[120];
        char *prop_buf;
-       char *envp[3];
+       char *envp[4];
        int env_offset = 0;
        int length;
 
@@ -87,6 +89,9 @@ void switch_set_state(struct switch_dev *sdev, int state)
                                        "SWITCH_STATE=%s", prop_buf);
                                envp[env_offset++] = state_buf;
                        }
+                       snprintf(timestamp_buf, sizeof(timestamp_buf),
+                                "SWITCH_TIME=%llu", ktime_to_ns(ktime_get()));
+                       envp[env_offset++] = timestamp_buf;
                        envp[env_offset] = NULL;
                        kobject_uevent_env(&sdev->dev->kobj, KOBJ_CHANGE, envp);
                        free_page((unsigned long)prop_buf);
index 70c2e7fa66643f7a7cdf040c62d74e977d7b9d1b..f7c3cfbbabaced66d961f86b6ef30dd30640a0a3 100644 (file)
@@ -127,6 +127,24 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
                set_host_byte(sc, DID_NO_CONNECT);
                return NULL;
        }
+       /*
+        * Because some userspace code via scsi-generic do not memset their
+        * associated read buffers, go ahead and do that here for type
+        * SCF_SCSI_CONTROL_SG_IO_CDB.  Also note that this is currently
+        * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
+        * by target core in transport_generic_allocate_tasks() ->
+        * transport_generic_cmd_sequencer().
+        */
+       if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
+           se_cmd->data_direction == DMA_FROM_DEVICE) {
+               struct scatterlist *sg = scsi_sglist(sc);
+               unsigned char *buf = kmap(sg_page(sg)) + sg->offset;
+
+               if (buf != NULL) {
+                       memset(buf, 0, sg->length);
+                       kunmap(sg_page(sg));
+               }
+       }
 
        transport_device_setup_cmd(se_cmd);
        return se_cmd;
@@ -887,6 +905,9 @@ static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
 
        sc->result = SAM_STAT_GOOD;
        set_host_byte(sc, DID_OK);
+       if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
+           (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
+               scsi_set_resid(sc, se_cmd->residual_count);
        sc->scsi_done(sc);
        return 0;
 }
@@ -912,6 +933,9 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
                sc->result = se_cmd->scsi_status;
 
        set_host_byte(sc, DID_OK);
+       if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
+           (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
+               scsi_set_resid(sc, se_cmd->residual_count);
        sc->scsi_done(sc);
        return 0;
 }
index 47abb42d9c36b04abf0f2e3b735465bf96e0fb88..c9674059213e963e88cea6b0b15598422ea2d73d 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/configfs.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
+#include <asm/unaligned.h>
 
 #include <target/target_core_base.h>
 #include <target/target_core_device.h>
@@ -60,10 +61,30 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
        unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
        u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
                                    Target port group descriptor */
+       /*
+        * Need at least 4 bytes of response data or else we can't
+        * even fit the return data length.
+        */
+       if (cmd->data_length < 4) {
+               pr_warn("REPORT TARGET PORT GROUPS allocation length %u"
+                       " too small\n", cmd->data_length);
+               return -EINVAL;
+       }
 
        spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
        list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
                        tg_pt_gp_list) {
+               /*
+                * Check if the Target port group and Target port descriptor list
+                * based on tg_pt_gp_members count will fit into the response payload.
+                * Otherwise, bump rd_len to let the initiator know we have exceeded
+                * the allocation length and the response is truncated.
+                */
+               if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
+                    cmd->data_length) {
+                       rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
+                       continue;
+               }
                /*
                 * PREF: Preferred target port bit, determine if this
                 * bit should be set for port group.
@@ -218,8 +239,7 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
                 * changed.
                 */
                if (primary) {
-                       tg_pt_id = ((ptr[2] << 8) & 0xff);
-                       tg_pt_id |= (ptr[3] & 0xff);
+                       tg_pt_id = get_unaligned_be16(ptr + 2);
                        /*
                         * Locate the matching target port group ID from
                         * the global tg_pt_gp list
@@ -260,8 +280,7 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
                         * the Target Port in question for the the incoming
                         * SET_TARGET_PORT_GROUPS op.
                         */
-                       rtpi = ((ptr[2] << 8) & 0xff);
-                       rtpi |= (ptr[3] & 0xff);
+                       rtpi = get_unaligned_be16(ptr + 2);
                        /*
                         * Locate the matching relative target port identifer
                         * for the struct se_device storage object.
index 7f19c8b7b84c704c0ab74ffdef7a19dca63737e2..05584010e70e2cf8a079d3e1d84d452b727a3bf7 100644 (file)
@@ -83,6 +83,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
                buf[1] = 0x80;
        buf[2] = dev->transport->get_device_rev(dev);
 
+       /*
+        * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
+        *
+        * SPC4 says:
+        *   A RESPONSE DATA FORMAT field set to 2h indicates that the
+        *   standard INQUIRY data is in the format defined in this
+        *   standard. Response data format values less than 2h are
+        *   obsolete. Response data format values greater than 2h are
+        *   reserved.
+        */
+       buf[3] = 2;
+
        /*
         * Enable SCCS and TPGS fields for Emulated ALUA
         */
@@ -94,7 +106,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
                return 0;
        }
 
-       buf[7] = 0x32; /* Sync=1 and CmdQue=1 */
+       buf[7] = 0x2; /* CmdQue=1 */
 
        /*
         * Do not include vendor, product, reversion info in INQUIRY
index b662db3a320bb96f272cbf9a831c1c8813e89952..98e12d31c9c23f67e89bc1266e93fffb21138f56 100644 (file)
@@ -471,6 +471,7 @@ static int core_scsi3_pr_seq_non_holder(
        case READ_MEDIA_SERIAL_NUMBER:
        case REPORT_LUNS:
        case REQUEST_SENSE:
+       case PERSISTENT_RESERVE_IN:
                ret = 0; /*/ Allowed CDBs */
                break;
        default:
@@ -3079,7 +3080,7 @@ static int core_scsi3_pro_preempt(
                        if (!(calling_it_nexus))
                                core_scsi3_ua_allocate(pr_reg_nacl,
                                        pr_res_mapped_lun, 0x2A,
-                                       ASCQ_2AH_RESERVATIONS_PREEMPTED);
+                                       ASCQ_2AH_REGISTRATIONS_PREEMPTED);
                }
                spin_unlock(&pr_tmpl->registration_lock);
                /*
@@ -3191,7 +3192,7 @@ static int core_scsi3_pro_preempt(
                 *    additional sense code set to REGISTRATIONS PREEMPTED;
                 */
                core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
-                               ASCQ_2AH_RESERVATIONS_PREEMPTED);
+                               ASCQ_2AH_REGISTRATIONS_PREEMPTED);
        }
        spin_unlock(&pr_tmpl->registration_lock);
        /*
index 4b9b7169bdd96957d9a6054ea68ec99c897a2381..d3a7342317ea479323bd643cbf1f88e7d5e86c2c 100644 (file)
@@ -2777,10 +2777,15 @@ static inline u32 transport_get_sectors_6(
 
        /*
         * Everything else assume TYPE_DISK Sector CDB location.
-        * Use 8-bit sector value.
+        * Use 8-bit sector value.  SBC-3 says:
+        *
+        *   A TRANSFER LENGTH field set to zero specifies that 256
+        *   logical blocks shall be written.  Any other value
+        *   specifies the number of logical blocks that shall be
+        *   written.
         */
 type_disk:
-       return (u32)cdb[4];
+       return cdb[4] ? : 256;
 }
 
 static inline u32 transport_get_sectors_10(
@@ -5663,6 +5668,8 @@ int transport_send_check_condition_and_sense(
        case TCM_SECTOR_COUNT_TOO_MANY:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ILLEGAL REQUEST */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
                /* INVALID COMMAND OPERATION CODE */
@@ -5671,6 +5678,7 @@ int transport_send_check_condition_and_sense(
        case TCM_UNKNOWN_MODE_PAGE:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ILLEGAL REQUEST */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
                /* INVALID FIELD IN CDB */
@@ -5679,6 +5687,7 @@ int transport_send_check_condition_and_sense(
        case TCM_CHECK_CONDITION_ABORT_CMD:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ABORTED COMMAND */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
                /* BUS DEVICE RESET FUNCTION OCCURRED */
@@ -5688,6 +5697,7 @@ int transport_send_check_condition_and_sense(
        case TCM_INCORRECT_AMOUNT_OF_DATA:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ABORTED COMMAND */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
                /* WRITE ERROR */
@@ -5698,22 +5708,25 @@ int transport_send_check_condition_and_sense(
        case TCM_INVALID_CDB_FIELD:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
-               /* ABORTED COMMAND */
-               buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+               /* ILLEGAL REQUEST */
+               buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
                /* INVALID FIELD IN CDB */
                buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
                break;
        case TCM_INVALID_PARAMETER_LIST:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
-               /* ABORTED COMMAND */
-               buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+               /* ILLEGAL REQUEST */
+               buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
                /* INVALID FIELD IN PARAMETER LIST */
                buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
                break;
        case TCM_UNEXPECTED_UNSOLICITED_DATA:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ABORTED COMMAND */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
                /* WRITE ERROR */
@@ -5724,6 +5737,7 @@ int transport_send_check_condition_and_sense(
        case TCM_SERVICE_CRC_ERROR:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ABORTED COMMAND */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
                /* PROTOCOL SERVICE CRC ERROR */
@@ -5734,6 +5748,7 @@ int transport_send_check_condition_and_sense(
        case TCM_SNACK_REJECTED:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ABORTED COMMAND */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
                /* READ ERROR */
@@ -5744,6 +5759,7 @@ int transport_send_check_condition_and_sense(
        case TCM_WRITE_PROTECTED:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* DATA PROTECT */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
                /* WRITE PROTECTED */
@@ -5752,6 +5768,7 @@ int transport_send_check_condition_and_sense(
        case TCM_CHECK_CONDITION_UNIT_ATTENTION:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* UNIT ATTENTION */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
                core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
@@ -5761,6 +5778,7 @@ int transport_send_check_condition_and_sense(
        case TCM_CHECK_CONDITION_NOT_READY:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* Not Ready */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
                transport_get_sense_codes(cmd, &asc, &ascq);
@@ -5771,6 +5789,7 @@ int transport_send_check_condition_and_sense(
        default:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ILLEGAL REQUEST */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
                /* LOGICAL UNIT COMMUNICATION FAILURE */
index b2a106729d4914eca2c275adb956a944939d9b99..3c3fa84e9effc276deed994cccc3118a73824598 100644 (file)
@@ -371,10 +371,12 @@ static void ft_send_resp_status(struct fc_lport *lport,
 
        fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
        sp = fr_seq(fp);
-       if (sp)
+       if (sp) {
                lport->tt.seq_send(lport, sp, fp);
-       else
+               lport->tt.exch_done(sp);
+       } else {
                lport->tt.frame_send(lport, fp);
+       }
 }
 
 /*
index 220579592c203a47c4441c5562f2f4fece0edd8b..34111486baac0362fc2e1f90e8696cd62c01de8e 100644 (file)
@@ -1113,8 +1113,10 @@ static int set_serial_info(struct async_struct * info,
                    (new_serial.close_delay != state->close_delay) ||
                    (new_serial.xmit_fifo_size != state->xmit_fifo_size) ||
                    ((new_serial.flags & ~ASYNC_USR_MASK) !=
-                    (state->flags & ~ASYNC_USR_MASK)))
+                    (state->flags & ~ASYNC_USR_MASK))) {
+                       tty_unlock();
                        return -EPERM;
+               }
                state->flags = ((state->flags & ~ASYNC_USR_MASK) |
                               (new_serial.flags & ASYNC_USR_MASK));
                info->flags = ((info->flags & ~ASYNC_USR_MASK) |
index 435f6facbc238606feeea4ce26b508ed34ddb6af..44fbebab5075f98f337d880288993b01ad9e7271 100644 (file)
@@ -46,6 +46,7 @@ static inline char __dcc_getchar(void)
 
        asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg"
                : "=r" (__c));
+       isb();
 
        return __c;
 }
@@ -55,6 +56,7 @@ static inline void __dcc_putchar(char c)
        asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char"
                : /* no output register */
                : "r" (c));
+       isb();
 }
 
 static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
index ba679ce0a77456b1ecbeb6e674b67fbbb4ab1dac..8f82f7ab3541e4156c19518ca4c341152754b954 100644 (file)
@@ -1330,7 +1330,7 @@ static void moxa_start(struct tty_struct *tty)
        if (ch == NULL)
                return;
 
-       if (!(ch->statusflags & TXSTOPPED))
+       if (!test_bit(TXSTOPPED, &ch->statusflags))
                return;
 
        MoxaPortTxEnable(ch);
index e809e9d4683c6d82197cbd569c12b1375df8be99..e18604b3fc7d6c00f13bb05079f48554dca143aa 100644 (file)
@@ -670,12 +670,18 @@ static int ptmx_open(struct inode *inode, struct file *filp)
 
        nonseekable_open(inode, filp);
 
+       retval = tty_alloc_file(filp);
+       if (retval)
+               return retval;
+
        /* find a device that is not in use. */
        tty_lock();
        index = devpts_new_index(inode);
        tty_unlock();
-       if (index < 0)
-               return index;
+       if (index < 0) {
+               retval = index;
+               goto err_file;
+       }
 
        mutex_lock(&tty_mutex);
        tty_lock();
@@ -689,27 +695,27 @@ static int ptmx_open(struct inode *inode, struct file *filp)
 
        set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
 
-       retval = tty_add_file(tty, filp);
-       if (retval)
-               goto out;
+       tty_add_file(tty, filp);
 
        retval = devpts_pty_new(inode, tty->link);
        if (retval)
-               goto out1;
+               goto err_release;
 
        retval = ptm_driver->ops->open(tty, filp);
        if (retval)
-               goto out2;
-out1:
+               goto err_release;
+
        tty_unlock();
-       return retval;
-out2:
+       return 0;
+err_release:
        tty_unlock();
        tty_release(inode, filp);
        return retval;
 out:
        devpts_kill_index(inode, index);
        tty_unlock();
+err_file:
+       tty_free_file(filp);
        return retval;
 }
 
index ff48fdb5c0bf3b9cc9dd1ff4ef32f3fc125bac6c..21098ed996352af78510a77062b33a577aeb85cf 100644 (file)
@@ -1459,51 +1459,61 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
                .vendor         = PCI_VENDOR_ID_INTEL,
                .device         = 0x8811,
                .init           = pci_eg20t_init,
+               .setup          = pci_default_setup,
        },
        {
                .vendor         = PCI_VENDOR_ID_INTEL,
                .device         = 0x8812,
                .init           = pci_eg20t_init,
+               .setup          = pci_default_setup,
        },
        {
                .vendor         = PCI_VENDOR_ID_INTEL,
                .device         = 0x8813,
                .init           = pci_eg20t_init,
+               .setup          = pci_default_setup,
        },
        {
                .vendor         = PCI_VENDOR_ID_INTEL,
                .device         = 0x8814,
                .init           = pci_eg20t_init,
+               .setup          = pci_default_setup,
        },
        {
                .vendor         = 0x10DB,
                .device         = 0x8027,
                .init           = pci_eg20t_init,
+               .setup          = pci_default_setup,
        },
        {
                .vendor         = 0x10DB,
                .device         = 0x8028,
                .init           = pci_eg20t_init,
+               .setup          = pci_default_setup,
        },
        {
                .vendor         = 0x10DB,
                .device         = 0x8029,
                .init           = pci_eg20t_init,
+               .setup          = pci_default_setup,
        },
        {
                .vendor         = 0x10DB,
                .device         = 0x800C,
                .init           = pci_eg20t_init,
+               .setup          = pci_default_setup,
        },
        {
                .vendor         = 0x10DB,
                .device         = 0x800D,
                .init           = pci_eg20t_init,
+               .setup          = pci_default_setup,
        },
        {
                .vendor         = 0x10DB,
                .device         = 0x800D,
                .init           = pci_eg20t_init,
+               .setup          = pci_default_setup,
        },
        /*
         * Cronyx Omega PCI (PLX-chip based)
index 4763420c6a4af57a62a14b9177b7096c1cb20ca4..358f51500f67e39cfa57c3d499702d814b877052 100644 (file)
@@ -1650,7 +1650,7 @@ config SERIAL_IFX6X60
          Support for the IFX6x60 modem devices on Intel MID platforms.
 
 config SERIAL_PCH_UART
-       tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223) UART"
+       tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) UART"
        depends on PCI
        select SERIAL_CORE
        help
@@ -1658,12 +1658,12 @@ config SERIAL_PCH_UART
          which is an IOH(Input/Output Hub) for x86 embedded processor.
          Enabling PCH_DMA, this PCH UART works as DMA mode.
 
-         This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
-         Output Hub), ML7213 and ML7223.
-         ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
-         for MP(Media Phone) use.
-         ML7213/ML7223 is companion chip for Intel Atom E6xx series.
-         ML7213/ML7223 is completely compatible for Intel EG20T PCH.
+         This driver also can be used for LAPIS Semiconductor IOH(Input/
+         Output Hub), ML7213, ML7223 and ML7831.
+         ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
+         for MP(Media Phone) use and ML7831 IOH is for general purpose use.
+         ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+         ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
 
 config SERIAL_MSM_SMD
        bool "Enable tty device interface for some SMD ports"
index 50bc5a5ac6533af4d9efceb1e21882eb489914ac..37db1d5898e05fa72c3435e0a66d3d0d0a99a0b3 100644 (file)
@@ -555,7 +555,7 @@ static int __devinit altera_uart_probe(struct platform_device *pdev)
        res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (res_mem)
                port->mapbase = res_mem->start;
-       else if (platp->mapbase)
+       else if (platp)
                port->mapbase = platp->mapbase;
        else
                return -EINVAL;
@@ -563,7 +563,7 @@ static int __devinit altera_uart_probe(struct platform_device *pdev)
        res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
        if (res_irq)
                port->irq = res_irq->start;
-       else if (platp->irq)
+       else if (platp)
                port->irq = platp->irq;
 
        /* Check platform data first so we can override device node data */
index f5f6831b0a640671d9e91c3d3e17a4d682e35578..21dc4b761eda409fcbc04a442a9e33724293b2c3 100644 (file)
@@ -1376,6 +1376,10 @@ static int pl011_startup(struct uart_port *port)
 
        uap->port.uartclk = clk_get_rate(uap->clk);
 
+       /* Clear pending error and receive interrupts */
+       writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS |
+              UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR);
+
        /*
         * Allocate the IRQ
         */
@@ -1410,10 +1414,6 @@ static int pl011_startup(struct uart_port *port)
        cr = UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
        writew(cr, uap->port.membase + UART011_CR);
 
-       /* Clear pending error interrupts */
-       writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
-              uap->port.membase + UART011_ICR);
-
        /*
         * initialise the old status of the modem signals
         */
@@ -1428,6 +1428,9 @@ static int pl011_startup(struct uart_port *port)
         * as well.
         */
        spin_lock_irq(&uap->port.lock);
+       /* Clear out any spuriously appearing RX interrupts */
+        writew(UART011_RTIS | UART011_RXIS,
+               uap->port.membase + UART011_ICR);
        uap->im = UART011_RTIM;
        if (!pl011_dma_rx_running(uap))
                uap->im |= UART011_RXIM;
@@ -1733,9 +1736,19 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
 {
        struct uart_amba_port *uap = amba_ports[co->index];
        unsigned int status, old_cr, new_cr;
+       unsigned long flags;
+       int locked = 1;
 
        clk_enable(uap->clk);
 
+       local_irq_save(flags);
+       if (uap->port.sysrq)
+               locked = 0;
+       else if (oops_in_progress)
+               locked = spin_trylock(&uap->port.lock);
+       else
+               spin_lock(&uap->port.lock);
+
        /*
         *      First save the CR then disable the interrupts
         */
@@ -1755,6 +1768,10 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
        } while (status & UART01x_FR_BUSY);
        writew(old_cr, uap->port.membase + UART011_CR);
 
+       if (locked)
+               spin_unlock(&uap->port.lock);
+       local_irq_restore(flags);
+
        clk_disable(uap->clk);
 }
 
@@ -1906,6 +1923,10 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
        uap->port.line = i;
        pl011_dma_probe(uap);
 
+       /* Ensure interrupts from this UART are masked and cleared */
+       writew(0, uap->port.membase + UART011_IMSC);
+       writew(0xffff, uap->port.membase + UART011_ICR);
+
        snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
 
        amba_ports[i] = uap;
index af9b7814965a461921d337047c068aed9bb5d051..b989495c763eb15fd548167610555227b3c01d62 100644 (file)
@@ -199,8 +199,9 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
 {
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
        unsigned int mode;
+       unsigned long flags;
 
-       spin_lock(&port->lock);
+       spin_lock_irqsave(&port->lock, flags);
 
        /* Disable interrupts */
        UART_PUT_IDR(port, atmel_port->tx_done_mask);
@@ -231,7 +232,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
        /* Enable interrupts */
        UART_PUT_IER(port, atmel_port->tx_done_mask);
 
-       spin_unlock(&port->lock);
+       spin_unlock_irqrestore(&port->lock, flags);
 
 }
 
index e6c3dbd781d61fd21e9dcafd3d85c1a0799f1f66..836fe2731234bbeabe690ef41ee230588cedea1b 100644 (file)
@@ -154,10 +154,9 @@ static irqreturn_t clps711xuart_int_tx(int irq, void *dev_id)
                port->x_char = 0;
                return IRQ_HANDLED;
        }
-       if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
-               clps711xuart_stop_tx(port);
-               return IRQ_HANDLED;
-       }
+
+       if (uart_circ_empty(xmit) || uart_tx_stopped(port))
+               goto disable_tx_irq;
 
        count = port->fifosize >> 1;
        do {
@@ -171,8 +170,11 @@ static irqreturn_t clps711xuart_int_tx(int irq, void *dev_id)
        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
                uart_write_wakeup(port);
 
-       if (uart_circ_empty(xmit))
-               clps711xuart_stop_tx(port);
+       if (uart_circ_empty(xmit)) {
+       disable_tx_irq:
+               disable_irq_nosync(TX_IRQ(port));
+               tx_enabled(port) = 0;
+       }
 
        return IRQ_HANDLED;
 }
index 225123b37f190de6ea448cba50be732ea20c3885..58be715913cdcc5d0e7f23a1a2a8c1128c118947 100644 (file)
@@ -4450,7 +4450,7 @@ static int __init rs_init(void)
 
 #if defined(CONFIG_ETRAX_RS485)
 #if defined(CONFIG_ETRAX_RS485_ON_PA)
-       if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit,
+       if (cris_io_interface_allocate_pins(if_serial_0, 'a', rs485_pa_bit,
                        rs485_pa_bit)) {
                printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
                        "RS485 pin\n");
@@ -4459,7 +4459,7 @@ static int __init rs_init(void)
        }
 #endif
 #if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
-       if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit,
+       if (cris_io_interface_allocate_pins(if_serial_0, 'g', rs485_pa_bit,
                        rs485_port_g_bit)) {
                printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
                        "RS485 pin\n");
index b704c8ce0d715bcb6555a263b4c450c154a66277..5b837e749c16662981d2b28e984f5426ddf34aa2 100644 (file)
@@ -183,10 +183,8 @@ struct jsm_board
 /* Our Read/Error/Write queue sizes */
 #define RQUEUEMASK     0x1FFF          /* 8 K - 1 */
 #define EQUEUEMASK     0x1FFF          /* 8 K - 1 */
-#define WQUEUEMASK     0x0FFF          /* 4 K - 1 */
 #define RQUEUESIZE     (RQUEUEMASK + 1)
 #define EQUEUESIZE     RQUEUESIZE
-#define WQUEUESIZE     (WQUEUEMASK + 1)
 
 
 /************************************************************************
@@ -226,10 +224,6 @@ struct jsm_channel {
        u16             ch_e_head;      /* Head location of the error queue */
        u16             ch_e_tail;      /* Tail location of the error queue */
 
-       u8              *ch_wqueue;     /* Our write queue buffer - malloc'ed */
-       u16             ch_w_head;      /* Head location of the write queue */
-       u16             ch_w_tail;      /* Tail location of the write queue */
-
        u64             ch_rxcount;     /* total of data received so far */
        u64             ch_txcount;     /* total of data transmitted so far */
 
@@ -378,7 +372,6 @@ extern int  jsm_debug;
  * Prototypes for non-static functions used in more than one module
  *
  *************************************************************************/
-int jsm_tty_write(struct uart_port *port);
 int jsm_tty_init(struct jsm_board *);
 int jsm_uart_port_init(struct jsm_board *);
 int jsm_remove_uart_port(struct jsm_board *);
index 96da17868cf3c46bad25d743c4a304e37fca2c30..6c12d94e6d3fe40c06dc3742bc312037044e754d 100644 (file)
@@ -211,7 +211,6 @@ static void __devexit jsm_remove_one(struct pci_dev *pdev)
                if (brd->channels[i]) {
                        kfree(brd->channels[i]->ch_rqueue);
                        kfree(brd->channels[i]->ch_equeue);
-                       kfree(brd->channels[i]->ch_wqueue);
                        kfree(brd->channels[i]);
                }
        }
@@ -270,6 +269,7 @@ static void jsm_io_resume(struct pci_dev *pdev)
        struct jsm_board *brd = pci_get_drvdata(pdev);
 
        pci_restore_state(pdev);
+       pci_save_state(pdev);
 
        jsm_uart_port_init(brd);
 }
index 4538c3e3646ecf6bc648cbf8acac37f6ca0c6338..bd6e84699e1194552e4e4f7ffc68d9db066e1a62 100644 (file)
@@ -496,12 +496,15 @@ static void neo_copy_data_from_queue_to_uart(struct jsm_channel *ch)
        int s;
        int qlen;
        u32 len_written = 0;
+       struct circ_buf *circ;
 
        if (!ch)
                return;
 
+       circ = &ch->uart_port.state->xmit;
+
        /* No data to write to the UART */
-       if (ch->ch_w_tail == ch->ch_w_head)
+       if (uart_circ_empty(circ))
                return;
 
        /* If port is "stopped", don't send any data to the UART */
@@ -517,11 +520,10 @@ static void neo_copy_data_from_queue_to_uart(struct jsm_channel *ch)
                if (ch->ch_cached_lsr & UART_LSR_THRE) {
                        ch->ch_cached_lsr &= ~(UART_LSR_THRE);
 
-                       writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_neo_uart->txrx);
+                       writeb(circ->buf[circ->tail], &ch->ch_neo_uart->txrx);
                        jsm_printk(WRITE, INFO, &ch->ch_bd->pci_dev,
-                                       "Tx data: %x\n", ch->ch_wqueue[ch->ch_w_head]);
-                       ch->ch_w_tail++;
-                       ch->ch_w_tail &= WQUEUEMASK;
+                                       "Tx data: %x\n", circ->buf[circ->head]);
+                       circ->tail = (circ->tail + 1) & (UART_XMIT_SIZE - 1);
                        ch->ch_txcount++;
                }
                return;
@@ -536,36 +538,36 @@ static void neo_copy_data_from_queue_to_uart(struct jsm_channel *ch)
        n = UART_17158_TX_FIFOSIZE - ch->ch_t_tlevel;
 
        /* cache head and tail of queue */
-       head = ch->ch_w_head & WQUEUEMASK;
-       tail = ch->ch_w_tail & WQUEUEMASK;
-       qlen = (head - tail) & WQUEUEMASK;
+       head = circ->head & (UART_XMIT_SIZE - 1);
+       tail = circ->tail & (UART_XMIT_SIZE - 1);
+       qlen = uart_circ_chars_pending(circ);
 
        /* Find minimum of the FIFO space, versus queue length */
        n = min(n, qlen);
 
        while (n > 0) {
 
-               s = ((head >= tail) ? head : WQUEUESIZE) - tail;
+               s = ((head >= tail) ? head : UART_XMIT_SIZE) - tail;
                s = min(s, n);
 
                if (s <= 0)
                        break;
 
-               memcpy_toio(&ch->ch_neo_uart->txrxburst, ch->ch_wqueue + tail, s);
+               memcpy_toio(&ch->ch_neo_uart->txrxburst, circ->buf + tail, s);
                /* Add and flip queue if needed */
-               tail = (tail + s) & WQUEUEMASK;
+               tail = (tail + s) & (UART_XMIT_SIZE - 1);
                n -= s;
                ch->ch_txcount += s;
                len_written += s;
        }
 
        /* Update the final tail */
-       ch->ch_w_tail = tail & WQUEUEMASK;
+       circ->tail = tail & (UART_XMIT_SIZE - 1);
 
        if (len_written >= ch->ch_t_tlevel)
                ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
 
-       if (!jsm_tty_write(&ch->uart_port))
+       if (uart_circ_empty(circ))
                uart_write_wakeup(&ch->uart_port);
 }
 
@@ -946,7 +948,6 @@ static void neo_param(struct jsm_channel *ch)
        if ((ch->ch_c_cflag & (CBAUD)) == 0) {
                ch->ch_r_head = ch->ch_r_tail = 0;
                ch->ch_e_head = ch->ch_e_tail = 0;
-               ch->ch_w_head = ch->ch_w_tail = 0;
 
                neo_flush_uart_write(ch);
                neo_flush_uart_read(ch);
index 7a4a914ecff0b0b96186a61812528d2750d05f1a..434bd881fcae84dd26386502c4817eaf87dd2c21 100644 (file)
@@ -118,6 +118,19 @@ static void jsm_tty_set_mctrl(struct uart_port *port, unsigned int mctrl)
        udelay(10);
 }
 
+/*
+ * jsm_tty_write()
+ *
+ * Take data from the user or kernel and send it out to the FEP.
+ * In here exists all the Transparent Print magic as well.
+ */
+static void jsm_tty_write(struct uart_port *port)
+{
+       struct jsm_channel *channel;
+       channel = container_of(port, struct jsm_channel, uart_port);
+       channel->ch_bd->bd_ops->copy_data_from_queue_to_uart(channel);
+}
+
 static void jsm_tty_start_tx(struct uart_port *port)
 {
        struct jsm_channel *channel = (struct jsm_channel *)port;
@@ -216,14 +229,6 @@ static int jsm_tty_open(struct uart_port *port)
                        return -ENOMEM;
                }
        }
-       if (!channel->ch_wqueue) {
-               channel->ch_wqueue = kzalloc(WQUEUESIZE, GFP_KERNEL);
-               if (!channel->ch_wqueue) {
-                       jsm_printk(INIT, ERR, &channel->ch_bd->pci_dev,
-                               "unable to allocate write queue buf");
-                       return -ENOMEM;
-               }
-       }
 
        channel->ch_flags &= ~(CH_OPENING);
        /*
@@ -237,7 +242,6 @@ static int jsm_tty_open(struct uart_port *port)
         */
        channel->ch_r_head = channel->ch_r_tail = 0;
        channel->ch_e_head = channel->ch_e_tail = 0;
-       channel->ch_w_head = channel->ch_w_tail = 0;
 
        brd->bd_ops->flush_uart_write(channel);
        brd->bd_ops->flush_uart_read(channel);
@@ -836,75 +840,3 @@ void jsm_check_queue_flow_control(struct jsm_channel *ch)
                }
        }
 }
-
-/*
- * jsm_tty_write()
- *
- * Take data from the user or kernel and send it out to the FEP.
- * In here exists all the Transparent Print magic as well.
- */
-int jsm_tty_write(struct uart_port *port)
-{
-       int bufcount;
-       int data_count = 0,data_count1 =0;
-       u16 head;
-       u16 tail;
-       u16 tmask;
-       u32 remain;
-       int temp_tail = port->state->xmit.tail;
-       struct jsm_channel *channel = (struct jsm_channel *)port;
-
-       tmask = WQUEUEMASK;
-       head = (channel->ch_w_head) & tmask;
-       tail = (channel->ch_w_tail) & tmask;
-
-       if ((bufcount = tail - head - 1) < 0)
-               bufcount += WQUEUESIZE;
-
-       bufcount = min(bufcount, 56);
-       remain = WQUEUESIZE - head;
-
-       data_count = 0;
-       if (bufcount >= remain) {
-               bufcount -= remain;
-               while ((port->state->xmit.head != temp_tail) &&
-               (data_count < remain)) {
-                       channel->ch_wqueue[head++] =
-                       port->state->xmit.buf[temp_tail];
-
-                       temp_tail++;
-                       temp_tail &= (UART_XMIT_SIZE - 1);
-                       data_count++;
-               }
-               if (data_count == remain) head = 0;
-       }
-
-       data_count1 = 0;
-       if (bufcount > 0) {
-               remain = bufcount;
-               while ((port->state->xmit.head != temp_tail) &&
-                       (data_count1 < remain)) {
-                       channel->ch_wqueue[head++] =
-                               port->state->xmit.buf[temp_tail];
-
-                       temp_tail++;
-                       temp_tail &= (UART_XMIT_SIZE - 1);
-                       data_count1++;
-
-               }
-       }
-
-       port->state->xmit.tail = temp_tail;
-
-       data_count += data_count1;
-       if (data_count) {
-               head &= tmask;
-               channel->ch_w_head = head;
-       }
-
-       if (data_count) {
-               channel->ch_bd->bd_ops->copy_data_from_queue_to_uart(channel);
-       }
-
-       return data_count;
-}
index 7e02c9c344fee3496814ebc8b55bbe21280f618b..5b3d063a4aa10809f5f6dffe0d446bc0e9a3c456 100644 (file)
@@ -368,6 +368,8 @@ static void mxs_auart_settermios(struct uart_port *u,
 
        writel(ctrl, u->membase + AUART_LINECTRL);
        writel(ctrl2, u->membase + AUART_CTRL2);
+
+       uart_update_timeout(u, termios->c_cflag, baud);
 }
 
 static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
index 465210930890b72ecdcaf1c70167b7211f43ed9e..101eda9f1965c776f61728c2fca93bf77f97007d 100644 (file)
@@ -256,6 +256,8 @@ enum pch_uart_num_t {
        pch_ml7213_uart2,
        pch_ml7223_uart0,
        pch_ml7223_uart1,
+       pch_ml7831_uart0,
+       pch_ml7831_uart1,
 };
 
 static struct pch_uart_driver_data drv_dat[] = {
@@ -268,6 +270,8 @@ static struct pch_uart_driver_data drv_dat[] = {
        [pch_ml7213_uart2] = {PCH_UART_2LINE, 2},
        [pch_ml7223_uart0] = {PCH_UART_8LINE, 0},
        [pch_ml7223_uart1] = {PCH_UART_2LINE, 1},
+       [pch_ml7831_uart0] = {PCH_UART_8LINE, 0},
+       [pch_ml7831_uart1] = {PCH_UART_2LINE, 1},
 };
 
 static unsigned int default_baud = 9600;
@@ -598,7 +602,8 @@ static void pch_request_dma(struct uart_port *port)
        dma_cap_zero(mask);
        dma_cap_set(DMA_SLAVE, mask);
 
-       dma_dev = pci_get_bus_and_slot(2, PCI_DEVFN(0xa, 0)); /* Get DMA's dev
+       dma_dev = pci_get_bus_and_slot(priv->pdev->bus->number,
+                                      PCI_DEVFN(0xa, 0)); /* Get DMA's dev
                                                                information */
        /* Set Tx DMA */
        param = &priv->param_tx;
@@ -625,6 +630,7 @@ static void pch_request_dma(struct uart_port *port)
                dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Rx)\n",
                        __func__);
                dma_release_channel(priv->chan_tx);
+               priv->chan_tx = NULL;
                return;
        }
 
@@ -1212,8 +1218,7 @@ static void pch_uart_shutdown(struct uart_port *port)
                dev_err(priv->port.dev,
                        "pch_uart_hal_set_fifo Failed(ret=%d)\n", ret);
 
-       if (priv->use_dma_flag)
-               pch_free_dma(port);
+       pch_free_dma(port);
 
        free_irq(priv->port.irq, priv);
 }
@@ -1277,6 +1282,7 @@ static void pch_uart_set_termios(struct uart_port *port,
        if (rtn)
                goto out;
 
+       pch_uart_set_mctrl(&priv->port, priv->port.mctrl);
        /* Don't rewrite B0 */
        if (tty_termios_baud_rate(termios))
                tty_termios_encode_baud_rate(termios, baud, baud);
@@ -1348,9 +1354,11 @@ static int pch_uart_verify_port(struct uart_port *port,
                        __func__);
                return -EOPNOTSUPP;
 #endif
-               priv->use_dma = 1;
                priv->use_dma_flag = 1;
                dev_info(priv->port.dev, "PCH UART : Use DMA Mode\n");
+               if (!priv->use_dma)
+                       pch_request_dma(port);
+               priv->use_dma = 1;
        }
 
        return 0;
@@ -1545,6 +1553,10 @@ static DEFINE_PCI_DEVICE_TABLE(pch_uart_pci_id) = {
         .driver_data = pch_ml7223_uart0},
        {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x800D),
         .driver_data = pch_ml7223_uart1},
+       {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8811),
+        .driver_data = pch_ml7831_uart0},
+       {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8812),
+        .driver_data = pch_ml7831_uart1},
        {0,},
 };
 
index 4302e6e3768e504d76f63d2be40198195c303290..81243a62dd590118f047ec02274ab2166188cb96 100644 (file)
@@ -100,6 +100,16 @@ static inline void receive_chars(struct uart_pxa_port *up, int *status)
        int max_count = 256;
 
        do {
+               /* work around Errata #20 according to
+                * Intel(R) PXA27x Processor Family
+                * Specification Update (May 2005)
+                *
+                * Step 2
+                * Disable the Reciever Time Out Interrupt via IER[RTOEI]
+                */
+               up->ier &= ~UART_IER_RTOIE;
+               serial_out(up, UART_IER, up->ier);
+
                ch = serial_in(up, UART_RX);
                flag = TTY_NORMAL;
                up->port.icount.rx++;
@@ -156,6 +166,16 @@ static inline void receive_chars(struct uart_pxa_port *up, int *status)
                *status = serial_in(up, UART_LSR);
        } while ((*status & UART_LSR_DR) && (max_count-- > 0));
        tty_flip_buffer_push(tty);
+
+       /* work around Errata #20 according to
+        * Intel(R) PXA27x Processor Family
+        * Specification Update (May 2005)
+        *
+        * Step 6:
+        * No more data in FIFO: Re-enable RTO interrupt via IER[RTOIE]
+        */
+       up->ier |= UART_IER_RTOIE;
+       serial_out(up, UART_IER, up->ier);
 }
 
 static void transmit_chars(struct uart_pxa_port *up)
index 62ba22f7d318cf1bd2da8f9b394e92cdfeefe505..2a106a94cdd009246462c4f254311dd05a38bdaf 100644 (file)
@@ -2006,6 +2006,8 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
                if (port->tty && port->tty->termios && termios.c_cflag == 0)
                        termios = *(port->tty->termios);
 
+               if (console_suspend_enabled)
+                       uart_change_pm(state, 0);
                uport->ops->set_termios(uport, &termios, NULL);
                if (console_suspend_enabled)
                        console_start(uport->cons);
@@ -2326,6 +2328,7 @@ void uart_unregister_driver(struct uart_driver *drv)
        tty_unregister_driver(p);
        put_tty_driver(p);
        kfree(drv->state);
+       drv->state = NULL;
        drv->tty_driver = NULL;
 }
 
index ebd8629c108ddc06f6b731831dbe962442a1868f..bead17e5634a3da16368644d97d28aed2b471b8a 100644 (file)
@@ -953,17 +953,20 @@ static void sci_dma_tx_complete(void *arg)
        port->icount.tx += sg_dma_len(&s->sg_tx);
 
        async_tx_ack(s->desc_tx);
-       s->cookie_tx = -EINVAL;
        s->desc_tx = NULL;
 
        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
                uart_write_wakeup(port);
 
        if (!uart_circ_empty(xmit)) {
+               s->cookie_tx = 0;
                schedule_work(&s->work_tx);
-       } else if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
-               u16 ctrl = sci_in(port, SCSCR);
-               sci_out(port, SCSCR, ctrl & ~SCSCR_TIE);
+       } else {
+               s->cookie_tx = -EINVAL;
+               if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+                       u16 ctrl = sci_in(port, SCSCR);
+                       sci_out(port, SCSCR, ctrl & ~SCSCR_TIE);
+               }
        }
 
        spin_unlock_irqrestore(&port->lock, flags);
@@ -1225,8 +1228,10 @@ static void sci_start_tx(struct uart_port *port)
        }
 
        if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
-           s->cookie_tx < 0)
+           s->cookie_tx < 0) {
+               s->cookie_tx = 0;
                schedule_work(&s->work_tx);
+       }
 #endif
 
        if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
index b6f92d3001add88088f470daafde7d981f3f2a26..b44aef078f10d6e2a94cc99e21bf451a63a41e26 100644 (file)
@@ -193,8 +193,7 @@ static inline struct tty_struct *file_tty(struct file *file)
        return ((struct tty_file_private *)file->private_data)->tty;
 }
 
-/* Associate a new file with the tty structure */
-int tty_add_file(struct tty_struct *tty, struct file *file)
+int tty_alloc_file(struct file *file)
 {
        struct tty_file_private *priv;
 
@@ -202,15 +201,36 @@ int tty_add_file(struct tty_struct *tty, struct file *file)
        if (!priv)
                return -ENOMEM;
 
+       file->private_data = priv;
+
+       return 0;
+}
+
+/* Associate a new file with the tty structure */
+void tty_add_file(struct tty_struct *tty, struct file *file)
+{
+       struct tty_file_private *priv = file->private_data;
+
        priv->tty = tty;
        priv->file = file;
-       file->private_data = priv;
 
        spin_lock(&tty_files_lock);
        list_add(&priv->list, &tty->tty_files);
        spin_unlock(&tty_files_lock);
+}
 
-       return 0;
+/**
+ * tty_free_file - free file->private_data
+ *
+ * This shall be used only for fail path handling when tty_add_file was not
+ * called yet.
+ */
+void tty_free_file(struct file *file)
+{
+       struct tty_file_private *priv = file->private_data;
+
+       file->private_data = NULL;
+       kfree(priv);
 }
 
 /* Delete file from its tty */
@@ -221,8 +241,7 @@ void tty_del_file(struct file *file)
        spin_lock(&tty_files_lock);
        list_del(&priv->list);
        spin_unlock(&tty_files_lock);
-       file->private_data = NULL;
-       kfree(priv);
+       tty_free_file(file);
 }
 
 
@@ -1811,6 +1830,10 @@ static int tty_open(struct inode *inode, struct file *filp)
        nonseekable_open(inode, filp);
 
 retry_open:
+       retval = tty_alloc_file(filp);
+       if (retval)
+               return -ENOMEM;
+
        noctty = filp->f_flags & O_NOCTTY;
        index  = -1;
        retval = 0;
@@ -1823,6 +1846,7 @@ retry_open:
                if (!tty) {
                        tty_unlock();
                        mutex_unlock(&tty_mutex);
+                       tty_free_file(filp);
                        return -ENXIO;
                }
                driver = tty_driver_kref_get(tty->driver);
@@ -1855,6 +1879,7 @@ retry_open:
                }
                tty_unlock();
                mutex_unlock(&tty_mutex);
+               tty_free_file(filp);
                return -ENODEV;
        }
 
@@ -1862,6 +1887,7 @@ retry_open:
        if (!driver) {
                tty_unlock();
                mutex_unlock(&tty_mutex);
+               tty_free_file(filp);
                return -ENODEV;
        }
 got_driver:
@@ -1872,6 +1898,8 @@ got_driver:
                if (IS_ERR(tty)) {
                        tty_unlock();
                        mutex_unlock(&tty_mutex);
+                       tty_driver_kref_put(driver);
+                       tty_free_file(filp);
                        return PTR_ERR(tty);
                }
        }
@@ -1887,15 +1915,11 @@ got_driver:
        tty_driver_kref_put(driver);
        if (IS_ERR(tty)) {
                tty_unlock();
+               tty_free_file(filp);
                return PTR_ERR(tty);
        }
 
-       retval = tty_add_file(tty, filp);
-       if (retval) {
-               tty_unlock();
-               tty_release(inode, filp);
-               return retval;
-       }
+       tty_add_file(tty, filp);
 
        check_tty_count(tty, "tty_open");
        if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
index ef925d5817139417764e7b3d6dfc2108d86971c5..a76c808afadc919820639f1337bd5a55a04e3bdb 100644 (file)
@@ -36,6 +36,7 @@
 
 #include <linux/kmod.h>
 #include <linux/nsproxy.h>
+#include <linux/ratelimit.h>
 
 /*
  *     This guards the refcounted line discipline lists. The lock
@@ -548,15 +549,16 @@ static void tty_ldisc_flush_works(struct tty_struct *tty)
 /**
  *     tty_ldisc_wait_idle     -       wait for the ldisc to become idle
  *     @tty: tty to wait for
+ *     @timeout: for how long to wait at most
  *
  *     Wait for the line discipline to become idle. The discipline must
  *     have been halted for this to guarantee it remains idle.
  */
-static int tty_ldisc_wait_idle(struct tty_struct *tty)
+static int tty_ldisc_wait_idle(struct tty_struct *tty, long timeout)
 {
-       int ret;
+       long ret;
        ret = wait_event_timeout(tty_ldisc_idle,
-                       atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
+                       atomic_read(&tty->ldisc->users) == 1, timeout);
        if (ret < 0)
                return ret;
        return ret > 0 ? 0 : -EBUSY;
@@ -666,7 +668,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
 
        tty_ldisc_flush_works(tty);
 
-       retval = tty_ldisc_wait_idle(tty);
+       retval = tty_ldisc_wait_idle(tty, 5 * HZ);
 
        tty_lock();
        mutex_lock(&tty->ldisc_mutex);
@@ -763,8 +765,6 @@ static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
        if (IS_ERR(ld))
                return -1;
 
-       WARN_ON_ONCE(tty_ldisc_wait_idle(tty));
-
        tty_ldisc_close(tty, tty->ldisc);
        tty_ldisc_put(tty->ldisc);
        tty->ldisc = NULL;
@@ -839,7 +839,7 @@ void tty_ldisc_hangup(struct tty_struct *tty)
        tty_unlock();
        cancel_work_sync(&tty->buf.work);
        mutex_unlock(&tty->ldisc_mutex);
-
+retry:
        tty_lock();
        mutex_lock(&tty->ldisc_mutex);
 
@@ -848,6 +848,22 @@ void tty_ldisc_hangup(struct tty_struct *tty)
           it means auditing a lot of other paths so this is
           a FIXME */
        if (tty->ldisc) {       /* Not yet closed */
+               if (atomic_read(&tty->ldisc->users) != 1) {
+                       char cur_n[TASK_COMM_LEN], tty_n[64];
+                       long timeout = 3 * HZ;
+                       tty_unlock();
+
+                       while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) {
+                               timeout = MAX_SCHEDULE_TIMEOUT;
+                               printk_ratelimited(KERN_WARNING
+                                       "%s: waiting (%s) for %s took too long, but we keep waiting...\n",
+                                       __func__, get_task_comm(cur_n, current),
+                                       tty_name(tty, tty_n));
+                       }
+                       mutex_unlock(&tty->ldisc_mutex);
+                       goto retry;
+               }
+
                if (reset == 0) {
 
                        if (!tty_ldisc_reinit(tty, tty->termios->c_line))
index 33d37d230f8f4335ee9d85336bcb3816c6e968f0..a4aaca0e014de1bbe8dd07dc0692b77ebd35a47a 100644 (file)
@@ -227,7 +227,6 @@ int tty_port_block_til_ready(struct tty_port *port,
        int do_clocal = 0, retval;
        unsigned long flags;
        DEFINE_WAIT(wait);
-       int cd;
 
        /* block if port is in the process of being closed */
        if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
@@ -284,11 +283,14 @@ int tty_port_block_til_ready(struct tty_port *port,
                                retval = -ERESTARTSYS;
                        break;
                }
-               /* Probe the carrier. For devices with no carrier detect this
-                  will always return true */
-               cd = tty_port_carrier_raised(port);
+               /*
+                * Probe the carrier. For devices with no carrier detect
+                * tty_port_carrier_raised will always return true.
+                * Never ask drivers if CLOCAL is set, this causes troubles
+                * on some hardware.
+                */
                if (!(port->flags & ASYNC_CLOSING) &&
-                               (do_clocal || cd))
+                               (do_clocal || tty_port_carrier_raised(port)))
                        break;
                if (signal_pending(current)) {
                        retval = -ERESTARTSYS;
index 45d3e80156d45e15d08a1fa164686031126adc4c..f3438083a2854a9f9a9e9d8b90f9c772af192b6d 100644 (file)
@@ -516,6 +516,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
        int err = 0, err1, i;
        struct uni_pagedir *p, *q;
 
+       /* Save original vc_unipagdir_loc in case we allocate a new one */
        p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
        if (p->readonly) return -EIO;
        
@@ -528,26 +529,57 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
                err1 = con_clear_unimap(vc, NULL);
                if (err1) return err1;
                
+               /*
+                * Since refcount was > 1, con_clear_unimap() allocated a
+                * a new uni_pagedir for this vc.  Re: p != q
+                */
                q = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
-               for (i = 0, l = 0; i < 32; i++)
+
+               /*
+                * uni_pgdir is a 32*32*64 table with rows allocated
+                * when its first entry is added.  The unicode value must
+                * still be incremented for empty rows.  We are copying
+                * entries from "p" (old) to "q" (new).
+                */
+               l = 0;          /* unicode value */
+               for (i = 0; i < 32; i++)
                if ((p1 = p->uni_pgdir[i]))
                        for (j = 0; j < 32; j++)
-                       if ((p2 = p1[j]))
+                       if ((p2 = p1[j])) {
                                for (k = 0; k < 64; k++, l++)
                                if (p2[k] != 0xffff) {
+                                       /*
+                                        * Found one, copy entry for unicode
+                                        * l with fontpos value p2[k].
+                                        */
                                        err1 = con_insert_unipair(q, l, p2[k]);
                                        if (err1) {
                                                p->refcount++;
                                                *vc->vc_uni_pagedir_loc = (unsigned long)p;
                                                con_release_unimap(q);
                                                kfree(q);
-                                               return err1; 
+                                               return err1;
                                        }
-                               }
-               p = q;
-       } else if (p == dflt)
+                               }
+                       } else {
+                               /* Account for row of 64 empty entries */
+                               l += 64;
+                       }
+               else
+                       /* Account for empty table */
+                       l += 32 * 64;
+
+               /*
+                * Finished copying font table, set vc_uni_pagedir to new table
+                */
+               p = q;
+       } else if (p == dflt) {
                dflt = NULL;
-       
+       }
+
+       /*
+        * Insert user specified unicode pairs into new table.
+        */
        while (ct--) {
                unsigned short unicode, fontpos;
                __get_user(unicode, &list->unicode);
@@ -557,11 +589,14 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
                list++;
        }
        
+       /*
+        * Merge with fontmaps of any other virtual consoles.
+        */
        if (con_unify_unimap(vc, p))
                return err;
 
        for (i = 0; i <= 3; i++)
-               set_inverse_transl(vc, p, i); /* Update all inverse translations */
+               set_inverse_transl(vc, p, i); /* Update inverse translations */
        set_inverse_trans_unicode(vc, p);
   
        return err;
index 5e096f43bceaceb056098d69227d937501ef27bf..65447c5f91d7e0c1108d5f4db5d6eaf69af53234 100644 (file)
@@ -1463,7 +1463,6 @@ compat_kdfontop_ioctl(struct compat_console_font_op __user *fontop,
        if (!perm && op->op != KD_FONT_OP_GET)
                return -EPERM;
        op->data = compat_ptr(((struct compat_console_font_op *)op)->data);
-       op->flags |= KD_FONT_FLAG_OLD;
        i = con_font_op(vc, op);
        if (i)
                return i;
index dac7676ce21bb6d9121d8fa4d178ea5cc0708438..496e06e5fa6696f8b74aa547bc9f04d8b877bb08 100644 (file)
@@ -498,6 +498,14 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
 
        usb_autopm_put_interface(acm->control);
 
+       /*
+        * Unthrottle device in case the TTY was closed while throttled.
+        */
+       spin_lock_irq(&acm->read_lock);
+       acm->throttled = 0;
+       acm->throttle_req = 0;
+       spin_unlock_irq(&acm->read_lock);
+
        if (acm_submit_read_urbs(acm, GFP_KERNEL))
                goto bail_out;
 
@@ -539,7 +547,6 @@ static void acm_port_down(struct acm *acm)
 {
        int i;
 
-       mutex_lock(&open_mutex);
        if (acm->dev) {
                usb_autopm_get_interface(acm->control);
                acm_set_control(acm, acm->ctrlout = 0);
@@ -551,14 +558,23 @@ static void acm_port_down(struct acm *acm)
                acm->control->needs_remote_wakeup = 0;
                usb_autopm_put_interface(acm->control);
        }
-       mutex_unlock(&open_mutex);
 }
 
 static void acm_tty_hangup(struct tty_struct *tty)
 {
-       struct acm *acm = tty->driver_data;
+       struct acm *acm;
+
+       mutex_lock(&open_mutex);
+       acm = tty->driver_data;
+
+       if (!acm)
+               goto out;
+
        tty_port_hangup(&acm->port);
        acm_port_down(acm);
+
+out:
+       mutex_unlock(&open_mutex);
 }
 
 static void acm_tty_close(struct tty_struct *tty, struct file *filp)
@@ -569,8 +585,9 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
           shutdown */
        if (!acm)
                return;
+
+       mutex_lock(&open_mutex);
        if (tty_port_close_start(&acm->port, tty, filp) == 0) {
-               mutex_lock(&open_mutex);
                if (!acm->dev) {
                        tty_port_tty_set(&acm->port, NULL);
                        acm_tty_unregister(acm);
@@ -582,6 +599,7 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
        acm_port_down(acm);
        tty_port_close_end(&acm->port, tty);
        tty_port_tty_set(&acm->port, NULL);
+       mutex_unlock(&open_mutex);
 }
 
 static int acm_tty_write(struct tty_struct *tty,
@@ -1181,6 +1199,8 @@ made_compressed_probe:
                i = device_create_file(&intf->dev, &dev_attr_wCountryCodes);
                if (i < 0) {
                        kfree(acm->country_codes);
+                       acm->country_codes = NULL;
+                       acm->country_code_size = 0;
                        goto skip_countries;
                }
 
@@ -1189,6 +1209,8 @@ made_compressed_probe:
                if (i < 0) {
                        device_remove_file(&intf->dev, &dev_attr_wCountryCodes);
                        kfree(acm->country_codes);
+                       acm->country_codes = NULL;
+                       acm->country_code_size = 0;
                        goto skip_countries;
                }
        }
@@ -1456,6 +1478,16 @@ static const struct usb_device_id acm_ids[] = {
        },
        { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
        },
+       /* Motorola H24 HSPA module: */
+       { USB_DEVICE(0x22b8, 0x2d91) }, /* modem                                */
+       { USB_DEVICE(0x22b8, 0x2d92) }, /* modem           + diagnostics        */
+       { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port                      */
+       { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics        */
+       { USB_DEVICE(0x22b8, 0x2d96) }, /* modem                         + NMEA */
+       { USB_DEVICE(0x22b8, 0x2d97) }, /* modem           + diagnostics + NMEA */
+       { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port               + NMEA */
+       { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
+
        { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
        .driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
                                           data interface instead of
@@ -1534,6 +1566,9 @@ static const struct usb_device_id acm_ids[] = {
        { NOKIA_PCSUITE_ACM_INFO(0x03cd), }, /* Nokia C7 */
        { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
 
+       /* Support for Owen devices */
+       { USB_DEVICE(0x03eb, 0x0030), }, /* Owen SI30 */
+
        /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
 
        /* Support Lego NXT using pbLua firmware */
index 2b9ff518b509c3ca45fa7e064aa5be56fbae36ae..5a244cfbeb4222f9114b68c162d14d3efb80cad9 100644 (file)
@@ -57,6 +57,8 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
 
 #define WDM_MAX                        16
 
+/* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */
+#define WDM_DEFAULT_BUFSIZE    256
 
 static DEFINE_MUTEX(wdm_mutex);
 
@@ -88,7 +90,8 @@ struct wdm_device {
        int                     count;
        dma_addr_t              shandle;
        dma_addr_t              ihandle;
-       struct mutex            lock;
+       struct mutex            wlock;
+       struct mutex            rlock;
        wait_queue_head_t       wait;
        struct work_struct      rxwork;
        int                     werr;
@@ -105,8 +108,9 @@ static void wdm_out_callback(struct urb *urb)
        spin_lock(&desc->iuspin);
        desc->werr = urb->status;
        spin_unlock(&desc->iuspin);
-       clear_bit(WDM_IN_USE, &desc->flags);
        kfree(desc->outbuf);
+       desc->outbuf = NULL;
+       clear_bit(WDM_IN_USE, &desc->flags);
        wake_up(&desc->wait);
 }
 
@@ -309,7 +313,7 @@ static ssize_t wdm_write
        if (we < 0)
                return -EIO;
 
-       desc->outbuf = buf = kmalloc(count, GFP_KERNEL);
+       buf = kmalloc(count, GFP_KERNEL);
        if (!buf) {
                rv = -ENOMEM;
                goto outnl;
@@ -323,7 +327,7 @@ static ssize_t wdm_write
        }
 
        /* concurrent writes and disconnect */
-       r = mutex_lock_interruptible(&desc->lock);
+       r = mutex_lock_interruptible(&desc->wlock);
        rv = -ERESTARTSYS;
        if (r) {
                kfree(buf);
@@ -373,10 +377,12 @@ static ssize_t wdm_write
        req->wIndex = desc->inum;
        req->wLength = cpu_to_le16(count);
        set_bit(WDM_IN_USE, &desc->flags);
+       desc->outbuf = buf;
 
        rv = usb_submit_urb(desc->command, GFP_KERNEL);
        if (rv < 0) {
                kfree(buf);
+               desc->outbuf = NULL;
                clear_bit(WDM_IN_USE, &desc->flags);
                dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
        } else {
@@ -386,7 +392,7 @@ static ssize_t wdm_write
 out:
        usb_autopm_put_interface(desc->intf);
 outnp:
-       mutex_unlock(&desc->lock);
+       mutex_unlock(&desc->wlock);
 outnl:
        return rv < 0 ? rv : count;
 }
@@ -394,16 +400,17 @@ outnl:
 static ssize_t wdm_read
 (struct file *file, char __user *buffer, size_t count, loff_t *ppos)
 {
-       int rv, cntr = 0;
+       int rv, cntr;
        int i = 0;
        struct wdm_device *desc = file->private_data;
 
 
-       rv = mutex_lock_interruptible(&desc->lock); /*concurrent reads */
+       rv = mutex_lock_interruptible(&desc->rlock); /*concurrent reads */
        if (rv < 0)
                return -ERESTARTSYS;
 
-       if (desc->length == 0) {
+       cntr = ACCESS_ONCE(desc->length);
+       if (cntr == 0) {
                desc->read = 0;
 retry:
                if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
@@ -453,17 +460,20 @@ retry:
                        spin_unlock_irq(&desc->iuspin);
                        goto retry;
                }
-               clear_bit(WDM_READ, &desc->flags);
+               cntr = desc->length;
                spin_unlock_irq(&desc->iuspin);
        }
 
-       cntr = count > desc->length ? desc->length : count;
+       if (cntr > count)
+               cntr = count;
        rv = copy_to_user(buffer, desc->ubuf, cntr);
        if (rv > 0) {
                rv = -EFAULT;
                goto err;
        }
 
+       spin_lock_irq(&desc->iuspin);
+
        for (i = 0; i < desc->length - cntr; i++)
                desc->ubuf[i] = desc->ubuf[i + cntr];
 
@@ -471,10 +481,13 @@ retry:
        /* in case we had outstanding data */
        if (!desc->length)
                clear_bit(WDM_READ, &desc->flags);
+
+       spin_unlock_irq(&desc->iuspin);
+
        rv = cntr;
 
 err:
-       mutex_unlock(&desc->lock);
+       mutex_unlock(&desc->rlock);
        return rv;
 }
 
@@ -498,7 +511,7 @@ static unsigned int wdm_poll(struct file *file, struct poll_table_struct *wait)
 
        spin_lock_irqsave(&desc->iuspin, flags);
        if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
-               mask = POLLERR;
+               mask = POLLHUP | POLLERR;
                spin_unlock_irqrestore(&desc->iuspin, flags);
                goto desc_out;
        }
@@ -540,7 +553,8 @@ static int wdm_open(struct inode *inode, struct file *file)
        }
        intf->needs_remote_wakeup = 1;
 
-       mutex_lock(&desc->lock);
+       /* using write lock to protect desc->count */
+       mutex_lock(&desc->wlock);
        if (!desc->count++) {
                desc->werr = 0;
                desc->rerr = 0;
@@ -553,7 +567,7 @@ static int wdm_open(struct inode *inode, struct file *file)
        } else {
                rv = 0;
        }
-       mutex_unlock(&desc->lock);
+       mutex_unlock(&desc->wlock);
        usb_autopm_put_interface(desc->intf);
 out:
        mutex_unlock(&wdm_mutex);
@@ -565,9 +579,11 @@ static int wdm_release(struct inode *inode, struct file *file)
        struct wdm_device *desc = file->private_data;
 
        mutex_lock(&wdm_mutex);
-       mutex_lock(&desc->lock);
+
+       /* using write lock to protect desc->count */
+       mutex_lock(&desc->wlock);
        desc->count--;
-       mutex_unlock(&desc->lock);
+       mutex_unlock(&desc->wlock);
 
        if (!desc->count) {
                dev_dbg(&desc->intf->dev, "wdm_release: cleanup");
@@ -630,7 +646,7 @@ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
        struct usb_cdc_dmm_desc *dmhd;
        u8 *buffer = intf->altsetting->extra;
        int buflen = intf->altsetting->extralen;
-       u16 maxcom = 0;
+       u16 maxcom = WDM_DEFAULT_BUFSIZE;
 
        if (!buffer)
                goto out;
@@ -665,7 +681,8 @@ next_desc:
        desc = kzalloc(sizeof(struct wdm_device), GFP_KERNEL);
        if (!desc)
                goto out;
-       mutex_init(&desc->lock);
+       mutex_init(&desc->rlock);
+       mutex_init(&desc->wlock);
        spin_lock_init(&desc->iuspin);
        init_waitqueue_head(&desc->wait);
        desc->wMaxCommand = maxcom;
@@ -716,7 +733,7 @@ next_desc:
                goto err;
 
        desc->inbuf = usb_alloc_coherent(interface_to_usbdev(intf),
-                                        desc->bMaxPacketSize0,
+                                        desc->wMaxCommand,
                                         GFP_KERNEL,
                                         &desc->response->transfer_dma);
        if (!desc->inbuf)
@@ -779,11 +796,13 @@ static void wdm_disconnect(struct usb_interface *intf)
        /* to terminate pending flushes */
        clear_bit(WDM_IN_USE, &desc->flags);
        spin_unlock_irqrestore(&desc->iuspin, flags);
-       mutex_lock(&desc->lock);
+       wake_up_all(&desc->wait);
+       mutex_lock(&desc->rlock);
+       mutex_lock(&desc->wlock);
        kill_urbs(desc);
        cancel_work_sync(&desc->rxwork);
-       mutex_unlock(&desc->lock);
-       wake_up_all(&desc->wait);
+       mutex_unlock(&desc->wlock);
+       mutex_unlock(&desc->rlock);
        if (!desc->count)
                cleanup(desc);
        mutex_unlock(&wdm_mutex);
@@ -798,8 +817,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
        dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor);
 
        /* if this is an autosuspend the caller does the locking */
-       if (!(message.event & PM_EVENT_AUTO))
-               mutex_lock(&desc->lock);
+       if (!(message.event & PM_EVENT_AUTO)) {
+               mutex_lock(&desc->rlock);
+               mutex_lock(&desc->wlock);
+       }
        spin_lock_irq(&desc->iuspin);
 
        if ((message.event & PM_EVENT_AUTO) &&
@@ -815,8 +836,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
                kill_urbs(desc);
                cancel_work_sync(&desc->rxwork);
        }
-       if (!(message.event & PM_EVENT_AUTO))
-               mutex_unlock(&desc->lock);
+       if (!(message.event & PM_EVENT_AUTO)) {
+               mutex_unlock(&desc->wlock);
+               mutex_unlock(&desc->rlock);
+       }
 
        return rv;
 }
@@ -854,7 +877,8 @@ static int wdm_pre_reset(struct usb_interface *intf)
 {
        struct wdm_device *desc = usb_get_intfdata(intf);
 
-       mutex_lock(&desc->lock);
+       mutex_lock(&desc->rlock);
+       mutex_lock(&desc->wlock);
        kill_urbs(desc);
 
        /*
@@ -876,7 +900,8 @@ static int wdm_post_reset(struct usb_interface *intf)
        int rv;
 
        rv = recover_from_urb_loss(desc);
-       mutex_unlock(&desc->lock);
+       mutex_unlock(&desc->wlock);
+       mutex_unlock(&desc->rlock);
        return 0;
 }
 
index 37518dfdeb987188089eb43c3147417ce14615a9..ca3c303eed81a70db92fb6b5c2420d9468fa4446 100644 (file)
@@ -292,17 +292,14 @@ static struct async *async_getcompleted(struct dev_state *ps)
 static struct async *async_getpending(struct dev_state *ps,
                                             void __user *userurb)
 {
-       unsigned long flags;
        struct async *as;
 
-       spin_lock_irqsave(&ps->lock, flags);
        list_for_each_entry(as, &ps->async_pending, asynclist)
                if (as->userurb == userurb) {
                        list_del_init(&as->asynclist);
-                       spin_unlock_irqrestore(&ps->lock, flags);
                        return as;
                }
-       spin_unlock_irqrestore(&ps->lock, flags);
+
        return NULL;
 }
 
@@ -357,6 +354,7 @@ static void cancel_bulk_urbs(struct dev_state *ps, unsigned bulk_addr)
 __releases(ps->lock)
 __acquires(ps->lock)
 {
+       struct urb *urb;
        struct async *as;
 
        /* Mark all the pending URBs that match bulk_addr, up to but not
@@ -379,8 +377,11 @@ __acquires(ps->lock)
        list_for_each_entry(as, &ps->async_pending, asynclist) {
                if (as->bulk_status == AS_UNLINK) {
                        as->bulk_status = 0;            /* Only once */
+                       urb = as->urb;
+                       usb_get_urb(urb);
                        spin_unlock(&ps->lock);         /* Allow completions */
-                       usb_unlink_urb(as->urb);
+                       usb_unlink_urb(urb);
+                       usb_put_urb(urb);
                        spin_lock(&ps->lock);
                        goto rescan;
                }
@@ -407,7 +408,7 @@ static void async_completed(struct urb *urb)
                sinfo.si_errno = as->status;
                sinfo.si_code = SI_ASYNCIO;
                sinfo.si_addr = as->userurb;
-               pid = as->pid;
+               pid = get_pid(as->pid);
                uid = as->uid;
                euid = as->euid;
                secid = as->secid;
@@ -422,15 +423,18 @@ static void async_completed(struct urb *urb)
                cancel_bulk_urbs(ps, as->bulk_addr);
        spin_unlock(&ps->lock);
 
-       if (signr)
+       if (signr) {
                kill_pid_info_as_uid(sinfo.si_signo, &sinfo, pid, uid,
                                      euid, secid);
+               put_pid(pid);
+       }
 
        wake_up(&ps->wait);
 }
 
 static void destroy_async(struct dev_state *ps, struct list_head *list)
 {
+       struct urb *urb;
        struct async *as;
        unsigned long flags;
 
@@ -438,10 +442,13 @@ static void destroy_async(struct dev_state *ps, struct list_head *list)
        while (!list_empty(list)) {
                as = list_entry(list->next, struct async, asynclist);
                list_del_init(&as->asynclist);
+               urb = as->urb;
+               usb_get_urb(urb);
 
                /* drop the spinlock so the completion handler can run */
                spin_unlock_irqrestore(&ps->lock, flags);
-               usb_kill_urb(as->urb);
+               usb_kill_urb(urb);
+               usb_put_urb(urb);
                spin_lock_irqsave(&ps->lock, flags);
        }
        spin_unlock_irqrestore(&ps->lock, flags);
@@ -607,9 +614,10 @@ static int findintfep(struct usb_device *dev, unsigned int ep)
 }
 
 static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
-                          unsigned int index)
+                          unsigned int request, unsigned int index)
 {
        int ret = 0;
+       struct usb_host_interface *alt_setting;
 
        if (ps->dev->state != USB_STATE_UNAUTHENTICATED
         && ps->dev->state != USB_STATE_ADDRESS
@@ -618,6 +626,19 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
        if (USB_TYPE_VENDOR == (USB_TYPE_MASK & requesttype))
                return 0;
 
+       /*
+        * check for the special corner case 'get_device_id' in the printer
+        * class specification, where wIndex is (interface << 8 | altsetting)
+        * instead of just interface
+        */
+       if (requesttype == 0xa1 && request == 0) {
+               alt_setting = usb_find_alt_setting(ps->dev->actconfig,
+                                                  index >> 8, index & 0xff);
+               if (alt_setting
+                && alt_setting->desc.bInterfaceClass == USB_CLASS_PRINTER)
+                       index >>= 8;
+       }
+
        index &= 0xff;
        switch (requesttype & USB_RECIP_MASK) {
        case USB_RECIP_ENDPOINT:
@@ -770,7 +791,8 @@ static int proc_control(struct dev_state *ps, void __user *arg)
 
        if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
                return -EFAULT;
-       ret = check_ctrlrecip(ps, ctrl.bRequestType, ctrl.wIndex);
+       ret = check_ctrlrecip(ps, ctrl.bRequestType, ctrl.bRequest,
+                             ctrl.wIndex);
        if (ret)
                return ret;
        wLength = ctrl.wLength;         /* To suppress 64k PAGE_SIZE warning */
@@ -1100,7 +1122,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
                        kfree(dr);
                        return -EINVAL;
                }
-               ret = check_ctrlrecip(ps, dr->bRequestType,
+               ret = check_ctrlrecip(ps, dr->bRequestType, dr->bRequest,
                                      le16_to_cpup(&dr->wIndex));
                if (ret) {
                        kfree(dr);
@@ -1335,12 +1357,24 @@ static int proc_submiturb(struct dev_state *ps, void __user *arg)
 
 static int proc_unlinkurb(struct dev_state *ps, void __user *arg)
 {
+       struct urb *urb;
        struct async *as;
+       unsigned long flags;
 
+       spin_lock_irqsave(&ps->lock, flags);
        as = async_getpending(ps, arg);
-       if (!as)
+       if (!as) {
+               spin_unlock_irqrestore(&ps->lock, flags);
                return -EINVAL;
-       usb_kill_urb(as->urb);
+       }
+
+       urb = as->urb;
+       usb_get_urb(urb);
+       spin_unlock_irqrestore(&ps->lock, flags);
+
+       usb_kill_urb(urb);
+       usb_put_urb(urb);
+
        return 0;
 }
 
index 34e3da5aa72a2463863b04e3b3befb4aba02505d..75b4bc03e2e1a13d2d852ebce3ae1a19df2f8b79 100644 (file)
@@ -1583,7 +1583,7 @@ int usb_autopm_get_interface_async(struct usb_interface *intf)
        dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
                        __func__, atomic_read(&intf->dev.power.usage_count),
                        status);
-       if (status > 0)
+       if (status > 0 || status == -EINPROGRESS)
                status = 0;
        return status;
 }
@@ -1668,6 +1668,11 @@ int usb_runtime_suspend(struct device *dev)
                return -EAGAIN;
 
        status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
+
+       /* Allow a retry if autosuspend failed temporarily */
+       if (status == -EAGAIN || status == -EBUSY)
+               usb_mark_last_busy(udev);
+
        /* The PM core reacts badly unless the return code is 0,
         * -EAGAIN, or -EBUSY, so always return -EBUSY on an error.
         */
index ce22f4a84ed0ade731c6586f6a51d01433f67d45..6c1642b382fdbdac30af6ff646a73874151782bd 100644 (file)
@@ -187,7 +187,10 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
                return -ENODEV;
        dev->current_state = PCI_D0;
 
-       if (!dev->irq) {
+       /* The xHCI driver supports MSI and MSI-X,
+        * so don't fail if the BIOS doesn't provide a legacy IRQ.
+        */
+       if (!dev->irq && (driver->flags & HCD_MASK) != HCD_USB3) {
                dev_err(&dev->dev,
                        "Found HC with no IRQ.  Check BIOS/PCI %s setup!\n",
                        pci_name(dev));
index 103b92a5805713014a75c8d3231b21a867b0ed50..cc8b9c57150a95f8b464a5cc69047a2ce3f025db 100755 (executable)
@@ -1387,11 +1387,10 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
                                        ret = -EAGAIN;
                                else
                                        urb->transfer_flags |= URB_DMA_MAP_SG;
-                               if (n != urb->num_sgs) {
-                                       urb->num_sgs = n;
+                               urb->num_mapped_sgs = n;
+                               if (n != urb->num_sgs)
                                        urb->transfer_flags |=
                                                        URB_DMA_SG_COMBINED;
-                               }
                        } else if (urb->sg) {
                                struct scatterlist *sg = urb->sg;
                                urb->transfer_dma = dma_map_page(
@@ -1764,6 +1763,8 @@ int usb_hcd_alloc_bandwidth(struct usb_device *udev,
                struct usb_interface *iface = usb_ifnum_to_if(udev,
                                cur_alt->desc.bInterfaceNumber);
 
+               if (!iface)
+                       return -EINVAL;
                if (iface->resetting_device) {
                        /*
                         * The USB core just reset the device, so the xHCI host
@@ -2435,8 +2436,10 @@ int usb_add_hcd(struct usb_hcd *hcd,
                        && device_can_wakeup(&hcd->self.root_hub->dev))
                dev_dbg(hcd->self.controller, "supports USB remote wakeup\n");
 
-       /* enable irqs just before we start the controller */
-       if (usb_hcd_is_primary_hcd(hcd)) {
+       /* enable irqs just before we start the controller,
+        * if the BIOS provides legacy PCI irqs.
+        */
+       if (usb_hcd_is_primary_hcd(hcd) && irqnum) {
                retval = usb_hcd_request_irqs(hcd, irqnum, irqflags);
                if (retval)
                        goto err_request_irq;
index 44728488411571fde463faac9df013d864e07e7c..a33954b424efbf24cf005a53651aaf68a957357a 100755 (executable)
@@ -705,10 +705,26 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
        if (type == HUB_INIT3)
                goto init3;
 
-       /* After a resume, port power should still be on.
+       /* The superspeed hub except for root hub has to use Hub Depth
+        * value as an offset into the route string to locate the bits
+        * it uses to determine the downstream port number. So hub driver
+        * should send a set hub depth request to superspeed hub after
+        * the superspeed hub is set configuration in initialization or
+        * reset procedure.
+        *
+        * After a resume, port power should still be on.
         * For any other type of activation, turn it on.
         */
        if (type != HUB_RESUME) {
+               if (hdev->parent && hub_is_superspeed(hdev)) {
+                       ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
+                                       HUB_SET_DEPTH, USB_RT_HUB,
+                                       hdev->level - 1, 0, NULL, 0,
+                                       USB_CTRL_SET_TIMEOUT);
+                       if (ret < 0)
+                               dev_err(hub->intfdev,
+                                               "set hub depth failed\n");
+               }
 
                /* Speed up system boot by using a delayed_work for the
                 * hub's initial power-up delays.  This is pretty awkward
@@ -813,6 +829,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
                                        USB_PORT_FEAT_C_PORT_LINK_STATE);
                }
 
+               if ((portchange & USB_PORT_STAT_C_BH_RESET) &&
+                               hub_is_superspeed(hub->hdev)) {
+                       need_debounce_delay = true;
+                       clear_port_feature(hub->hdev, port1,
+                                       USB_PORT_FEAT_C_BH_PORT_RESET);
+               }
                /* We can forget about a "removed" device when there's a
                 * physical disconnect or the connect status changes.
                 */
@@ -981,18 +1003,6 @@ static int hub_configure(struct usb_hub *hub,
                goto fail;
        }
 
-       if (hub_is_superspeed(hdev) && (hdev->parent != NULL)) {
-               ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
-                               HUB_SET_DEPTH, USB_RT_HUB,
-                               hdev->level - 1, 0, NULL, 0,
-                               USB_CTRL_SET_TIMEOUT);
-
-               if (ret < 0) {
-                       message = "can't set hub depth";
-                       goto fail;
-               }
-       }
-
        /* Request the entire hub descriptor.
         * hub->descriptor can handle USB_MAXCHILDREN ports,
         * but the hub can/will return fewer bytes here.
@@ -1637,7 +1647,6 @@ void usb_disconnect(struct usb_device **pdev)
 {
        struct usb_device       *udev = *pdev;
        int                     i;
-       struct usb_hcd          *hcd = bus_to_hcd(udev->bus);
 
        if (!udev) {
                pr_debug ("%s nodev\n", __func__);
@@ -1665,9 +1674,7 @@ void usb_disconnect(struct usb_device **pdev)
         * so that the hardware is now fully quiesced.
         */
        dev_dbg (&udev->dev, "unregistering device\n");
-       mutex_lock(hcd->bandwidth_mutex);
        usb_disable_device(udev, 0);
-       mutex_unlock(hcd->bandwidth_mutex);
        usb_hcd_synchronize_unlinks(udev);
 
        usb_remove_ep_devs(&udev->ep0);
index 0b5ec234c787ff907cf677c25dbf4f407a418642..806060ca9322286717163f226989b80138e72c35 100644 (file)
@@ -308,7 +308,8 @@ static void sg_complete(struct urb *urb)
                                retval = usb_unlink_urb(io->urbs [i]);
                                if (retval != -EINPROGRESS &&
                                    retval != -ENODEV &&
-                                   retval != -EBUSY)
+                                   retval != -EBUSY &&
+                                   retval != -EIDRM)
                                        dev_err(&io->dev->dev,
                                                "%s, unlink --> %d\n",
                                                __func__, retval);
@@ -317,7 +318,6 @@ static void sg_complete(struct urb *urb)
                }
                spin_lock(&io->lock);
        }
-       urb->dev = NULL;
 
        /* on the last completion, signal usb_sg_wait() */
        io->bytes += urb->actual_length;
@@ -524,7 +524,6 @@ void usb_sg_wait(struct usb_sg_request *io)
                case -ENXIO:    /* hc didn't queue this one */
                case -EAGAIN:
                case -ENOMEM:
-                       io->urbs[i]->dev = NULL;
                        retval = 0;
                        yield();
                        break;
@@ -542,7 +541,6 @@ void usb_sg_wait(struct usb_sg_request *io)
 
                        /* fail any uncompleted urbs */
                default:
-                       io->urbs[i]->dev = NULL;
                        io->urbs[i]->status = retval;
                        dev_dbg(&io->dev->dev, "%s, submit --> %d\n",
                                __func__, retval);
@@ -593,7 +591,10 @@ void usb_sg_cancel(struct usb_sg_request *io)
                        if (!io->urbs [i]->dev)
                                continue;
                        retval = usb_unlink_urb(io->urbs [i]);
-                       if (retval != -EINPROGRESS && retval != -EBUSY)
+                       if (retval != -EINPROGRESS
+                                       && retval != -ENODEV
+                                       && retval != -EBUSY
+                                       && retval != -EIDRM)
                                dev_warn(&io->dev->dev, "%s, unlink --> %d\n",
                                        __func__, retval);
                }
@@ -1135,8 +1136,6 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
  * Deallocates hcd/hardware state for the endpoints (nuking all or most
  * pending urbs) and usbcore state for the interfaces, so that usbcore
  * must usb_set_configuration() before any interfaces could be used.
- *
- * Must be called with hcd->bandwidth_mutex held.
  */
 void usb_disable_device(struct usb_device *dev, int skip_ep0)
 {
@@ -1189,7 +1188,9 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
                        usb_disable_endpoint(dev, i + USB_DIR_IN, false);
                }
                /* Remove endpoints from the host controller internal state */
+               mutex_lock(hcd->bandwidth_mutex);
                usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
+               mutex_unlock(hcd->bandwidth_mutex);
                /* Second pass: remove endpoint pointers */
        }
        for (i = skip_ep0; i < 16; ++i) {
@@ -1749,7 +1750,6 @@ free_interfaces:
        /* if it's already configured, clear out old state first.
         * getting rid of old interfaces means unbinding their drivers.
         */
-       mutex_lock(hcd->bandwidth_mutex);
        if (dev->state != USB_STATE_ADDRESS)
                usb_disable_device(dev, 1);     /* Skip ep0 */
 
@@ -1762,6 +1762,7 @@ free_interfaces:
         * host controller will not allow submissions to dropped endpoints.  If
         * this call fails, the device state is unchanged.
         */
+       mutex_lock(hcd->bandwidth_mutex);
        ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
        if (ret < 0) {
                mutex_unlock(hcd->bandwidth_mutex);
@@ -1802,7 +1803,6 @@ free_interfaces:
                intfc = cp->intf_cache[i];
                intf->altsetting = intfc->altsetting;
                intf->num_altsetting = intfc->num_altsetting;
-               intf->intf_assoc = find_iad(dev, cp, i);
                kref_get(&intfc->ref);
 
                alt = usb_altnum_to_altsetting(intf, 0);
@@ -1815,6 +1815,8 @@ free_interfaces:
                if (!alt)
                        alt = &intf->altsetting[0];
 
+               intf->intf_assoc =
+                       find_iad(dev, cp, alt->desc.bInterfaceNumber);
                intf->cur_altsetting = alt;
                usb_enable_interface(dev, intf, true);
                intf->dev.parent = &dev->dev;
index 81ce6a8e1d94a4b6c7a86c6dbb17a64c1b6e69bf..32d3adc315f5357c01b137a725bba3e7d18fe1da 100644 (file)
@@ -38,6 +38,54 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Creative SB Audigy 2 NX */
        { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Logitech Webcam C200 */
+       { USB_DEVICE(0x046d, 0x0802), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Logitech Webcam C250 */
+       { USB_DEVICE(0x046d, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Logitech Webcam C300 */
+       { USB_DEVICE(0x046d, 0x0805), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Logitech Webcam B/C500 */
+       { USB_DEVICE(0x046d, 0x0807), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Logitech Webcam C600 */
+       { USB_DEVICE(0x046d, 0x0808), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Logitech Webcam Pro 9000 */
+       { USB_DEVICE(0x046d, 0x0809), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Logitech Webcam C905 */
+       { USB_DEVICE(0x046d, 0x080a), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Logitech Webcam C210 */
+       { USB_DEVICE(0x046d, 0x0819), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Logitech Webcam C260 */
+       { USB_DEVICE(0x046d, 0x081a), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Logitech Webcam C310 */
+       { USB_DEVICE(0x046d, 0x081b), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Logitech Webcam C910 */
+       { USB_DEVICE(0x046d, 0x0821), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Logitech Webcam C160 */
+       { USB_DEVICE(0x046d, 0x0824), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Logitech Webcam C270 */
+       { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Logitech Quickcam Pro 9000 */
+       { USB_DEVICE(0x046d, 0x0990), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Logitech Quickcam E3500 */
+       { USB_DEVICE(0x046d, 0x09a4), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Logitech Quickcam Vision Pro */
+       { USB_DEVICE(0x046d, 0x09a6), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Logitech Harmony 700-series */
        { USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT },
 
@@ -69,6 +117,15 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x06a3, 0x0006), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
 
+       /* Guillemot Webcam Hercules Dualpix Exchange (2nd ID) */
+       { USB_DEVICE(0x06f8, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Guillemot Webcam Hercules Dualpix Exchange*/
+       { USB_DEVICE(0x06f8, 0x3005), .driver_info = USB_QUIRK_RESET_RESUME },
+
+       /* Midiman M-Audio Keystation 88es */
+       { USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* M-Systems Flash Disk Pioneers */
        { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
 
index 30ce90921f4b306af30abf4a5c4d7062e3791894..e222aceb3251e7851c692238ac293e3a9b2e75d7 100644 (file)
@@ -558,7 +558,7 @@ config USB_DWC_OTG
        select USB_GADGET_SELECTED
 
 config USB_GADGET_EG20T
-       boolean "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH UDC"
+       tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7213/ML7831) UDC"
        depends on PCI
        select USB_GADGET_DUALSPEED
        help
@@ -576,8 +576,9 @@ config USB_GADGET_EG20T
 
          This driver also can be used for OKI SEMICONDUCTOR's ML7213 which is
          for IVI(In-Vehicle Infotainment) use.
-         ML7213 is companion chip for Intel Atom E6xx series.
-         ML7213 is completely compatible for Intel EG20T PCH.
+         ML7831 is for general purpose use.
+         ML7213/ML7831 is companion chip for Intel Atom E6xx series.
+         ML7213/ML7831 is completely compatible for Intel EG20T PCH.
 
 config USB_EG20T
        tristate
index 703d281f0688d0e955a023002300913dcf4129e1..c5bca332c2a195fe2424ff20b2f2610561672f14 100644 (file)
@@ -45,6 +45,7 @@
 #include "epautoconf.c"
 #include "composite.c"
 
+#include "f_audio_source.c"
 #include "f_mass_storage.c"
 #include "u_serial.c"
 #include "f_acm.c"
@@ -82,6 +83,11 @@ struct android_usb_function {
        int (*init)(struct android_usb_function *, struct usb_composite_dev *);
        /* Optional: cleanup during gadget unbind */
        void (*cleanup)(struct android_usb_function *);
+       /* Optional: called when the function is added the list of
+        *              enabled functions */
+       void (*enable)(struct android_usb_function *);
+       /* Optional: called when it is removed */
+       void (*disable)(struct android_usb_function *);
 
        int (*bind_config)(struct android_usb_function *, struct usb_configuration *);
 
@@ -100,6 +106,7 @@ struct android_dev {
        struct device *dev;
 
        bool enabled;
+       int disable_depth;
        struct mutex mutex;
        bool connected;
        bool sw_connected;
@@ -153,8 +160,6 @@ static struct usb_configuration android_config_driver = {
        .label          = "android",
        .unbind         = android_unbind_config,
        .bConfigurationValue = 1,
-       .bmAttributes   = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
-       .bMaxPower      = 0xFA, /* 500ma */
 };
 
 static void android_work(struct work_struct *data)
@@ -184,18 +189,55 @@ static void android_work(struct work_struct *data)
        }
 }
 
+static void android_enable(struct android_dev *dev)
+{
+       struct usb_composite_dev *cdev = dev->cdev;
+
+       BUG_ON(!mutex_is_locked(&dev->mutex));
+       BUG_ON(!dev->disable_depth);
+
+       if (--dev->disable_depth == 0) {
+               usb_add_config(cdev, &android_config_driver,
+                                       android_bind_config);
+               usb_gadget_connect(cdev->gadget);
+       }
+}
+
+static void android_disable(struct android_dev *dev)
+{
+       struct usb_composite_dev *cdev = dev->cdev;
+
+       BUG_ON(!mutex_is_locked(&dev->mutex));
+
+       if (dev->disable_depth++ == 0) {
+               usb_gadget_disconnect(cdev->gadget);
+               /* Cancel pending control requests */
+               usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
+               usb_remove_config(cdev, &android_config_driver);
+       }
+}
 
 /*-------------------------------------------------------------------------*/
 /* Supported functions initialization */
 
+struct adb_data {
+       bool opened;
+       bool enabled;
+};
+
 static int adb_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev)
 {
+       f->config = kzalloc(sizeof(struct adb_data), GFP_KERNEL);
+       if (!f->config)
+               return -ENOMEM;
+
        return adb_setup();
 }
 
 static void adb_function_cleanup(struct android_usb_function *f)
 {
        adb_cleanup();
+       kfree(f->config);
 }
 
 static int adb_function_bind_config(struct android_usb_function *f, struct usb_configuration *c)
@@ -203,13 +245,69 @@ static int adb_function_bind_config(struct android_usb_function *f, struct usb_c
        return adb_bind_config(c);
 }
 
+static void adb_android_function_enable(struct android_usb_function *f)
+{
+       struct android_dev *dev = _android_dev;
+       struct adb_data *data = f->config;
+
+       data->enabled = true;
+
+       /* Disable the gadget until adbd is ready */
+       if (!data->opened)
+               android_disable(dev);
+}
+
+static void adb_android_function_disable(struct android_usb_function *f)
+{
+       struct android_dev *dev = _android_dev;
+       struct adb_data *data = f->config;
+
+       data->enabled = false;
+
+       /* Balance the disable that was called in closed_callback */
+       if (!data->opened)
+               android_enable(dev);
+}
+
 static struct android_usb_function adb_function = {
        .name           = "adb",
+       .enable         = adb_android_function_enable,
+       .disable        = adb_android_function_disable,
        .init           = adb_function_init,
        .cleanup        = adb_function_cleanup,
        .bind_config    = adb_function_bind_config,
 };
 
+static void adb_ready_callback(void)
+{
+       struct android_dev *dev = _android_dev;
+       struct adb_data *data = adb_function.config;
+
+       mutex_lock(&dev->mutex);
+
+       data->opened = true;
+
+       if (data->enabled)
+               android_enable(dev);
+
+       mutex_unlock(&dev->mutex);
+}
+
+static void adb_closed_callback(void)
+{
+       struct android_dev *dev = _android_dev;
+       struct adb_data *data = adb_function.config;
+
+       mutex_lock(&dev->mutex);
+
+       data->opened = false;
+
+       if (data->enabled)
+               android_disable(dev);
+
+       mutex_unlock(&dev->mutex);
+}
+
 
 #define MAX_ACM_INSTANCES 4
 struct acm_function_config {
@@ -645,6 +743,67 @@ static struct android_usb_function accessory_function = {
        .ctrlrequest    = accessory_function_ctrlrequest,
 };
 
+static int audio_source_function_init(struct android_usb_function *f,
+                       struct usb_composite_dev *cdev)
+{
+       struct audio_source_config *config;
+
+       config = kzalloc(sizeof(struct audio_source_config), GFP_KERNEL);
+       if (!config)
+               return -ENOMEM;
+       config->card = -1;
+       config->device = -1;
+       f->config = config;
+       return 0;
+}
+
+static void audio_source_function_cleanup(struct android_usb_function *f)
+{
+       kfree(f->config);
+}
+
+static int audio_source_function_bind_config(struct android_usb_function *f,
+                                               struct usb_configuration *c)
+{
+       struct audio_source_config *config = f->config;
+
+       return audio_source_bind_config(c, config);
+}
+
+static void audio_source_function_unbind_config(struct android_usb_function *f,
+                                               struct usb_configuration *c)
+{
+       struct audio_source_config *config = f->config;
+
+       config->card = -1;
+       config->device = -1;
+}
+
+static ssize_t audio_source_pcm_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct android_usb_function *f = dev_get_drvdata(dev);
+       struct audio_source_config *config = f->config;
+
+       /* print PCM card and device numbers */
+       return sprintf(buf, "%d %d\n", config->card, config->device);
+}
+
+static DEVICE_ATTR(pcm, S_IRUGO | S_IWUSR, audio_source_pcm_show, NULL);
+
+static struct device_attribute *audio_source_function_attributes[] = {
+       &dev_attr_pcm,
+       NULL
+};
+
+static struct android_usb_function audio_source_function = {
+       .name           = "audio_source",
+       .init           = audio_source_function_init,
+       .cleanup        = audio_source_function_cleanup,
+       .bind_config    = audio_source_function_bind_config,
+       .unbind_config  = audio_source_function_unbind_config,
+       .attributes     = audio_source_function_attributes,
+};
 
 static struct android_usb_function *supported_functions[] = {
        &adb_function,
@@ -654,6 +813,7 @@ static struct android_usb_function *supported_functions[] = {
        &rndis_function,
        &mass_storage_function,
        &accessory_function,
+       &audio_source_function,
        NULL
 };
 
@@ -836,13 +996,13 @@ static ssize_t enable_store(struct device *pdev, struct device_attribute *attr,
 {
        struct android_dev *dev = dev_get_drvdata(pdev);
        struct usb_composite_dev *cdev = dev->cdev;
+       struct android_usb_function *f;
        int enabled = 0;
 
        mutex_lock(&dev->mutex);
 
        sscanf(buff, "%d", &enabled);
        if (enabled && !dev->enabled) {
-               cdev->next_string_id = 0;
                /* update values in composite driver's copy of device descriptor */
                cdev->desc.idVendor = device_desc.idVendor;
                cdev->desc.idProduct = device_desc.idProduct;
@@ -850,15 +1010,18 @@ static ssize_t enable_store(struct device *pdev, struct device_attribute *attr,
                cdev->desc.bDeviceClass = device_desc.bDeviceClass;
                cdev->desc.bDeviceSubClass = device_desc.bDeviceSubClass;
                cdev->desc.bDeviceProtocol = device_desc.bDeviceProtocol;
-               usb_add_config(cdev, &android_config_driver,
-                                       android_bind_config);
-               usb_gadget_connect(cdev->gadget);
+               list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+                       if (f->enable)
+                               f->enable(f);
+               }
+               android_enable(dev);
                dev->enabled = true;
        } else if (!enabled && dev->enabled) {
-               usb_gadget_disconnect(cdev->gadget);
-               /* Cancel pending control requests */
-               usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
-               usb_remove_config(cdev, &android_config_driver);
+               android_disable(dev);
+               list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+                       if (f->disable)
+                               f->disable(f);
+               }
                dev->enabled = false;
        } else {
                pr_err("android_usb: already %s\n",
@@ -922,10 +1085,7 @@ field ## _store(struct device *dev, struct device_attribute *attr,        \
                const char *buf, size_t size)                           \
 {                                                                      \
        if (size >= sizeof(buffer)) return -EINVAL;                     \
-       if (sscanf(buf, "%s", buffer) == 1) {                           \
-               return size;                                            \
-       }                                                               \
-       return -1;                                                      \
+       return strlcpy(buffer, buf, sizeof(buffer));                    \
 }                                                                      \
 static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, field ## _show, field ## _store);
 
@@ -1036,7 +1196,6 @@ static int android_bind(struct usb_composite_dev *cdev)
                device_desc.bcdDevice = __constant_cpu_to_le16(0x9999);
        }
 
-       usb_gadget_set_selfpowered(gadget);
        dev->cdev = cdev;
 
        return 0;
@@ -1110,6 +1269,11 @@ static void android_disconnect(struct usb_gadget *gadget)
        unsigned long flags;
 
        composite_disconnect(gadget);
+       /* accessory HID support can be active while the
+          accessory function is not actually enabled,
+          so we need to inform it when we are disconnected.
+        */
+       acc_disconnect();
 
        spin_lock_irqsave(&cdev->lock, flags);
        dev->connected = 0;
@@ -1154,6 +1318,7 @@ static int __init init(void)
        if (!dev)
                return -ENOMEM;
 
+       dev->disable_depth = 1;
        dev->functions = supported_functions;
        INIT_LIST_HEAD(&dev->enabled_functions);
        INIT_WORK(&dev->work, android_work);
index 6182f6d0451925763728a63570b95a61843553df..2e295e8c5567cefe7ea90db0bd17f66a045f262d 100755 (executable)
@@ -565,7 +565,7 @@ done:
        return status;
 }
 
-static int remove_config(struct usb_composite_dev *cdev,
+static int unbind_config(struct usb_composite_dev *cdev,
                              struct usb_configuration *config)
 {
        while (!list_empty(&config->functions)) {
@@ -580,7 +580,6 @@ static int remove_config(struct usb_composite_dev *cdev,
                        /* may free memory for "f" */
                }
        }
-       list_del(&config->list);
        if (config->unbind) {
                DBG(cdev, "unbind config '%s'/%p\n", config->label, config);
                config->unbind(config);
@@ -599,9 +598,11 @@ int usb_remove_config(struct usb_composite_dev *cdev,
        if (cdev->config == config)
                reset_config(cdev);
 
+       list_del(&config->list);
+
        spin_unlock_irqrestore(&cdev->lock, flags);
 
-       return remove_config(cdev, config);
+       return unbind_config(cdev, config);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -1085,7 +1086,8 @@ composite_unbind(struct usb_gadget *gadget)
                struct usb_configuration        *c;
                c = list_first_entry(&cdev->configs,
                                struct usb_configuration, list);
-               remove_config(cdev, c);
+               list_del(&c->list);
+               unbind_config(cdev, c);
        }
        if (composite->unbind)
                composite->unbind(cdev);
index 51ce8dfb5dcad2ed07bf563047bec8edf3d049f3..ee78a24d7ad512b947cde9707f2d347a36c92948 100755 (executable)
@@ -33,6 +33,8 @@
 #include <linux/device.h>
 #include <linux/miscdevice.h>
 
+#include <linux/hid.h>
+#include <linux/hiddev.h>
 #include <linux/usb.h>
 #include <linux/usb/ch9.h>
 #include <linux/usb/f_accessory.h>
@@ -40,7 +42,7 @@
 #define BULK_BUFFER_SIZE    16384
 #define ACC_STRING_SIZE     256
 
-#define PROTOCOL_VERSION    1
+#define PROTOCOL_VERSION    2
 
 /* String IDs */
 #define INTERFACE_STRING_INDEX 0
 #define TX_REQ_MAX 4
 #define RX_REQ_MAX 2
 
+struct acc_hid_dev {
+       struct list_head        list;
+       struct hid_device *hid;
+       struct acc_dev *dev;
+       /* accessory defined ID */
+       int id;
+       /* HID report descriptor */
+       u8 *report_desc;
+       /* length of HID report descriptor */
+       int report_desc_len;
+       /* number of bytes of report_desc we have received so far */
+       int report_desc_offset;
+};
+
 struct acc_dev {
        struct usb_function function;
        struct usb_composite_dev *cdev;
@@ -78,6 +94,8 @@ struct acc_dev {
        /* set to 1 if we have a pending start request */
        int start_requested;
 
+       int audio_mode;
+
        /* synchronize access to our device file */
        atomic_t open_excl;
 
@@ -87,7 +105,21 @@ struct acc_dev {
        wait_queue_head_t write_wq;
        struct usb_request *rx_req[RX_REQ_MAX];
        int rx_done;
-       struct delayed_work work;
+
+       /* delayed work for handling ACCESSORY_START */
+       struct delayed_work start_work;
+
+       /* worker for registering and unregistering hid devices */
+       struct work_struct hid_work;
+
+       /* list of active HID devices */
+       struct list_head        hid_list;
+
+       /* list of new HID devices to register */
+       struct list_head        new_hid_list;
+
+       /* list of dead HID devices to unregister */
+       struct list_head        dead_hid_list;
 };
 
 static struct usb_interface_descriptor acc_interface_desc = {
@@ -296,6 +328,160 @@ static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
        }
 }
 
+static void acc_complete_set_hid_report_desc(struct usb_ep *ep,
+               struct usb_request *req)
+{
+       struct acc_hid_dev *hid = req->context;
+       struct acc_dev *dev = hid->dev;
+       int length = req->actual;
+
+       if (req->status != 0) {
+               pr_err("acc_complete_set_hid_report_desc, err %d\n",
+                       req->status);
+               return;
+       }
+
+       memcpy(hid->report_desc + hid->report_desc_offset, req->buf, length);
+       hid->report_desc_offset += length;
+       if (hid->report_desc_offset == hid->report_desc_len) {
+               /* After we have received the entire report descriptor
+                * we schedule work to initialize the HID device
+                */
+               schedule_work(&dev->hid_work);
+       }
+}
+
+static void acc_complete_send_hid_event(struct usb_ep *ep,
+               struct usb_request *req)
+{
+       struct acc_hid_dev *hid = req->context;
+       int length = req->actual;
+
+       if (req->status != 0) {
+               pr_err("acc_complete_send_hid_event, err %d\n", req->status);
+               return;
+       }
+
+       hid_report_raw_event(hid->hid, HID_INPUT_REPORT, req->buf, length, 1);
+}
+
+static int acc_hid_parse(struct hid_device *hid)
+{
+       struct acc_hid_dev *hdev = hid->driver_data;
+
+       hid_parse_report(hid, hdev->report_desc, hdev->report_desc_len);
+       return 0;
+}
+
+static int acc_hid_start(struct hid_device *hid)
+{
+       return 0;
+}
+
+static void acc_hid_stop(struct hid_device *hid)
+{
+}
+
+static int acc_hid_open(struct hid_device *hid)
+{
+       return 0;
+}
+
+static void acc_hid_close(struct hid_device *hid)
+{
+}
+
+static struct hid_ll_driver acc_hid_ll_driver = {
+       .parse = acc_hid_parse,
+       .start = acc_hid_start,
+       .stop = acc_hid_stop,
+       .open = acc_hid_open,
+       .close = acc_hid_close,
+};
+
+static struct acc_hid_dev *acc_hid_new(struct acc_dev *dev,
+               int id, int desc_len)
+{
+       struct acc_hid_dev *hdev;
+
+       hdev = kzalloc(sizeof(*hdev), GFP_ATOMIC);
+       if (!hdev)
+               return NULL;
+       hdev->report_desc = kzalloc(desc_len, GFP_ATOMIC);
+       if (!hdev->report_desc) {
+               kfree(hdev);
+               return NULL;
+       }
+       hdev->dev = dev;
+       hdev->id = id;
+       hdev->report_desc_len = desc_len;
+
+       return hdev;
+}
+
+static struct acc_hid_dev *acc_hid_get(struct list_head *list, int id)
+{
+       struct acc_hid_dev *hid;
+
+       list_for_each_entry(hid, list, list) {
+               if (hid->id == id)
+                       return hid;
+       }
+       return NULL;
+}
+
+static int acc_register_hid(struct acc_dev *dev, int id, int desc_length)
+{
+       struct acc_hid_dev *hid;
+       unsigned long flags;
+
+       /* report descriptor length must be > 0 */
+       if (desc_length <= 0)
+               return -EINVAL;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       /* replace HID if one already exists with this ID */
+       hid = acc_hid_get(&dev->hid_list, id);
+       if (!hid)
+               hid = acc_hid_get(&dev->new_hid_list, id);
+       if (hid)
+               list_move(&hid->list, &dev->dead_hid_list);
+
+       hid = acc_hid_new(dev, id, desc_length);
+       if (!hid) {
+               spin_unlock_irqrestore(&dev->lock, flags);
+               return -ENOMEM;
+       }
+
+       list_add(&hid->list, &dev->new_hid_list);
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       /* schedule work to register the HID device */
+       schedule_work(&dev->hid_work);
+       return 0;
+}
+
+static int acc_unregister_hid(struct acc_dev *dev, int id)
+{
+       struct acc_hid_dev *hid;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       hid = acc_hid_get(&dev->hid_list, id);
+       if (!hid)
+               hid = acc_hid_get(&dev->new_hid_list, id);
+       if (!hid) {
+               spin_unlock_irqrestore(&dev->lock, flags);
+               return -EINVAL;
+       }
+
+       list_move(&hid->list, &dev->dead_hid_list);
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       schedule_work(&dev->hid_work);
+       return 0;
+}
+
 static int __init create_bulk_endpoints(struct acc_dev *dev,
                                struct usb_endpoint_descriptor *in_desc,
                                struct usb_endpoint_descriptor *out_desc)
@@ -353,7 +539,7 @@ static int __init create_bulk_endpoints(struct acc_dev *dev,
        return 0;
 
 fail:
-       printk(KERN_ERR "acc_bind() could not allocate requests\n");
+       pr_err("acc_bind() could not allocate requests\n");
        while ((req = req_get(dev, &dev->tx_idle)))
                acc_request_free(req, dev->ep_in);
        for (i = 0; i < RX_REQ_MAX; i++)
@@ -510,6 +696,8 @@ static long acc_ioctl(struct file *fp, unsigned code, unsigned long value)
                break;
        case ACCESSORY_IS_START_REQUESTED:
                return dev->start_requested;
+       case ACCESSORY_GET_AUDIO_MODE:
+               return dev->audio_mode;
        }
        if (!src)
                return -EINVAL;
@@ -540,7 +728,7 @@ static int acc_release(struct inode *ip, struct file *fp)
        return 0;
 }
 
-/* file operations for /dev/acc_usb */
+/* file operations for /dev/usb_accessory */
 static const struct file_operations acc_fops = {
        .owner = THIS_MODULE,
        .read = acc_read,
@@ -550,23 +738,47 @@ static const struct file_operations acc_fops = {
        .release = acc_release,
 };
 
+static int acc_hid_probe(struct hid_device *hdev,
+               const struct hid_device_id *id)
+{
+       int ret;
+
+       ret = hid_parse(hdev);
+       if (ret)
+               return ret;
+       return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+}
+
 static struct miscdevice acc_device = {
        .minor = MISC_DYNAMIC_MINOR,
        .name = "usb_accessory",
        .fops = &acc_fops,
 };
 
+static const struct hid_device_id acc_hid_table[] = {
+       { HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) },
+       { }
+};
+
+static struct hid_driver acc_hid_driver = {
+       .name = "USB accessory",
+       .id_table = acc_hid_table,
+       .probe = acc_hid_probe,
+};
 
 static int acc_ctrlrequest(struct usb_composite_dev *cdev,
                                const struct usb_ctrlrequest *ctrl)
 {
        struct acc_dev  *dev = _acc_dev;
        int     value = -EOPNOTSUPP;
+       struct acc_hid_dev *hid;
+       int offset;
        u8 b_requestType = ctrl->bRequestType;
        u8 b_request = ctrl->bRequest;
        u16     w_index = le16_to_cpu(ctrl->wIndex);
        u16     w_value = le16_to_cpu(ctrl->wValue);
        u16     w_length = le16_to_cpu(ctrl->wLength);
+       unsigned long flags;
 
 /*
        printk(KERN_INFO "acc_ctrlrequest "
@@ -579,20 +791,56 @@ static int acc_ctrlrequest(struct usb_composite_dev *cdev,
                if (b_request == ACCESSORY_START) {
                        dev->start_requested = 1;
                        schedule_delayed_work(
-                               &dev->work, msecs_to_jiffies(10));
+                               &dev->start_work, msecs_to_jiffies(10));
                        value = 0;
                } else if (b_request == ACCESSORY_SEND_STRING) {
                        dev->string_index = w_index;
                        cdev->gadget->ep0->driver_data = dev;
                        cdev->req->complete = acc_complete_set_string;
                        value = w_length;
+               } else if (b_request == ACCESSORY_SET_AUDIO_MODE &&
+                               w_index == 0 && w_length == 0) {
+                       dev->audio_mode = w_value;
+                       value = 0;
+               } else if (b_request == ACCESSORY_REGISTER_HID) {
+                       value = acc_register_hid(dev, w_value, w_index);
+               } else if (b_request == ACCESSORY_UNREGISTER_HID) {
+                       value = acc_unregister_hid(dev, w_value);
+               } else if (b_request == ACCESSORY_SET_HID_REPORT_DESC) {
+                       spin_lock_irqsave(&dev->lock, flags);
+                       hid = acc_hid_get(&dev->new_hid_list, w_value);
+                       spin_unlock_irqrestore(&dev->lock, flags);
+                       if (!hid) {
+                               value = -EINVAL;
+                               goto err;
+                       }
+                       offset = w_index;
+                       if (offset != hid->report_desc_offset
+                               || offset + w_length > hid->report_desc_len) {
+                               value = -EINVAL;
+                               goto err;
+                       }
+                       cdev->req->context = hid;
+                       cdev->req->complete = acc_complete_set_hid_report_desc;
+                       value = w_length;
+               } else if (b_request == ACCESSORY_SEND_HID_EVENT) {
+                       spin_lock_irqsave(&dev->lock, flags);
+                       hid = acc_hid_get(&dev->hid_list, w_value);
+                       spin_unlock_irqrestore(&dev->lock, flags);
+                       if (!hid) {
+                               value = -EINVAL;
+                               goto err;
+                       }
+                       cdev->req->context = hid;
+                       cdev->req->complete = acc_complete_send_hid_event;
+                       value = w_length;
                }
        } else if (b_requestType == (USB_DIR_IN | USB_TYPE_VENDOR)) {
                if (b_request == ACCESSORY_GET_PROTOCOL) {
                        *((u16 *)cdev->req->buf) = PROTOCOL_VERSION;
                        value = sizeof(u16);
 
-                       /* clear any strings left over from a previous session */
+                       /* clear strings left over from a previous session */
                        memset(dev->manufacturer, 0, sizeof(dev->manufacturer));
                        memset(dev->model, 0, sizeof(dev->model));
                        memset(dev->description, 0, sizeof(dev->description));
@@ -600,6 +848,7 @@ static int acc_ctrlrequest(struct usb_composite_dev *cdev,
                        memset(dev->uri, 0, sizeof(dev->uri));
                        memset(dev->serial, 0, sizeof(dev->serial));
                        dev->start_requested = 0;
+                       dev->audio_mode = 0;
                }
        }
 
@@ -612,6 +861,7 @@ static int acc_ctrlrequest(struct usb_composite_dev *cdev,
                                __func__);
        }
 
+err:
        if (value == -EOPNOTSUPP)
                VDBG(cdev,
                        "unknown class-specific control req "
@@ -631,6 +881,10 @@ acc_function_bind(struct usb_configuration *c, struct usb_function *f)
 
        DBG(cdev, "acc_function_bind dev: %p\n", dev);
 
+       ret = hid_register_driver(&acc_hid_driver);
+       if (ret)
+               return ret;
+
        dev->start_requested = 0;
 
        /* allocate interface ID(s) */
@@ -659,6 +913,36 @@ acc_function_bind(struct usb_configuration *c, struct usb_function *f)
        return 0;
 }
 
+static void
+kill_all_hid_devices(struct acc_dev *dev)
+{
+       struct acc_hid_dev *hid;
+       struct list_head *entry, *temp;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       list_for_each_safe(entry, temp, &dev->hid_list) {
+               hid = list_entry(entry, struct acc_hid_dev, list);
+               list_del(&hid->list);
+               list_add(&hid->list, &dev->dead_hid_list);
+       }
+       list_for_each_safe(entry, temp, &dev->new_hid_list) {
+               hid = list_entry(entry, struct acc_hid_dev, list);
+               list_del(&hid->list);
+               list_add(&hid->list, &dev->dead_hid_list);
+       }
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       schedule_work(&dev->hid_work);
+}
+
+static void
+acc_hid_unbind(struct acc_dev *dev)
+{
+       hid_unregister_driver(&acc_hid_driver);
+       kill_all_hid_devices(dev);
+}
+
 static void
 acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
 {
@@ -670,14 +954,104 @@ acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
                acc_request_free(req, dev->ep_in);
        for (i = 0; i < RX_REQ_MAX; i++)
                acc_request_free(dev->rx_req[i], dev->ep_out);
+
+       acc_hid_unbind(dev);
 }
 
-static void acc_work(struct work_struct *data)
+static void acc_start_work(struct work_struct *data)
 {
        char *envp[2] = { "ACCESSORY=START", NULL };
        kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp);
 }
 
+static int acc_hid_init(struct acc_hid_dev *hdev)
+{
+       struct hid_device *hid;
+       int ret;
+
+       hid = hid_allocate_device();
+       if (IS_ERR(hid))
+               return PTR_ERR(hid);
+
+       hid->ll_driver = &acc_hid_ll_driver;
+       hid->dev.parent = acc_device.this_device;
+
+       hid->bus = BUS_USB;
+       hid->vendor = HID_ANY_ID;
+       hid->product = HID_ANY_ID;
+       hid->driver_data = hdev;
+       ret = hid_add_device(hid);
+       if (ret) {
+               pr_err("can't add hid device: %d\n", ret);
+               hid_destroy_device(hid);
+               return ret;
+       }
+
+       hdev->hid = hid;
+       return 0;
+}
+
+static void acc_hid_delete(struct acc_hid_dev *hid)
+{
+       kfree(hid->report_desc);
+       kfree(hid);
+}
+
+static void acc_hid_work(struct work_struct *data)
+{
+       struct acc_dev *dev = _acc_dev;
+       struct list_head        *entry, *temp;
+       struct acc_hid_dev *hid;
+       struct list_head        new_list, dead_list;
+       unsigned long flags;
+
+       INIT_LIST_HEAD(&new_list);
+
+       spin_lock_irqsave(&dev->lock, flags);
+
+       /* copy hids that are ready for initialization to new_list */
+       list_for_each_safe(entry, temp, &dev->new_hid_list) {
+               hid = list_entry(entry, struct acc_hid_dev, list);
+               if (hid->report_desc_offset == hid->report_desc_len)
+                       list_move(&hid->list, &new_list);
+       }
+
+       if (list_empty(&dev->dead_hid_list)) {
+               INIT_LIST_HEAD(&dead_list);
+       } else {
+               /* move all of dev->dead_hid_list to dead_list */
+               dead_list.prev = dev->dead_hid_list.prev;
+               dead_list.next = dev->dead_hid_list.next;
+               dead_list.next->prev = &dead_list;
+               dead_list.prev->next = &dead_list;
+               INIT_LIST_HEAD(&dev->dead_hid_list);
+       }
+
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       /* register new HID devices */
+       list_for_each_safe(entry, temp, &new_list) {
+               hid = list_entry(entry, struct acc_hid_dev, list);
+               if (acc_hid_init(hid)) {
+                       pr_err("can't add HID device %p\n", hid);
+                       acc_hid_delete(hid);
+               } else {
+                       spin_lock_irqsave(&dev->lock, flags);
+                       list_move(&hid->list, &dev->hid_list);
+                       spin_unlock_irqrestore(&dev->lock, flags);
+               }
+       }
+
+       /* remove dead HID devices */
+       list_for_each_safe(entry, temp, &dead_list) {
+               hid = list_entry(entry, struct acc_hid_dev, list);
+               list_del(&hid->list);
+               if (hid->hid)
+                       hid_destroy_device(hid->hid);
+               acc_hid_delete(hid);
+       }
+}
+
 static int acc_function_set_alt(struct usb_function *f,
                unsigned intf, unsigned alt)
 {
@@ -769,7 +1143,11 @@ static int acc_setup(void)
        init_waitqueue_head(&dev->write_wq);
        atomic_set(&dev->open_excl, 0);
        INIT_LIST_HEAD(&dev->tx_idle);
-       INIT_DELAYED_WORK(&dev->work, acc_work);
+       INIT_LIST_HEAD(&dev->hid_list);
+       INIT_LIST_HEAD(&dev->new_hid_list);
+       INIT_LIST_HEAD(&dev->dead_hid_list);
+       INIT_DELAYED_WORK(&dev->start_work, acc_start_work);
+       INIT_WORK(&dev->hid_work, acc_hid_work);
 
        /* _acc_dev must be set before calling usb_gadget_register_driver */
        _acc_dev = dev;
@@ -782,10 +1160,16 @@ static int acc_setup(void)
 
 err:
        kfree(dev);
-       printk(KERN_ERR "USB accessory gadget driver failed to initialize\n");
+       pr_err("USB accessory gadget driver failed to initialize\n");
        return ret;
 }
 
+static void acc_disconnect(void)
+{
+       /* unregister all HID devices if USB is disconnected */
+       kill_all_hid_devices(_acc_dev);
+}
+
 static void acc_cleanup(void)
 {
        misc_deregister(&acc_device);
index 83e1f59385a2f3f697b5433a00c6b2e1796502ae..de61944d780f3937710dc81c050d603360a51510 100755 (executable)
@@ -111,6 +111,8 @@ static struct usb_descriptor_header *hs_adb_descs[] = {
        NULL,
 };
 
+static void adb_ready_callback(void);
+static void adb_closed_callback(void);
 
 /* temporary variable used between adb_open() and adb_gadget_bind() */
 static struct adb_dev *_adb_dev;
@@ -406,7 +408,7 @@ static ssize_t adb_write(struct file *fp, const char __user *buf,
 
 static int adb_open(struct inode *ip, struct file *fp)
 {
-       printk(KERN_INFO "adb_open\n");
+       pr_info("adb_open\n");
        if (!_adb_dev)
                return -ENODEV;
 
@@ -418,12 +420,17 @@ static int adb_open(struct inode *ip, struct file *fp)
        /* clear the error latch */
        _adb_dev->error = 0;
 
+       adb_ready_callback();
+
        return 0;
 }
 
 static int adb_release(struct inode *ip, struct file *fp)
 {
-       printk(KERN_INFO "adb_release\n");
+       pr_info("adb_release\n");
+
+       adb_closed_callback();
+
        adb_unlock(&_adb_dev->open_excl);
        return 0;
 }
diff --git a/drivers/usb/gadget/f_audio_source.c b/drivers/usb/gadget/f_audio_source.c
new file mode 100644 (file)
index 0000000..23a7511
--- /dev/null
@@ -0,0 +1,825 @@
+/*
+ * Gadget Function Driver for USB audio source device
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/usb/audio.h>
+#include <linux/wait.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+
+#define SAMPLE_RATE 44100
+/* Each frame is two 16 bit integers (one per channel) */
+#define BYTES_PER_FRAME 4
+#define FRAMES_PER_MSEC (SAMPLE_RATE / 1000)
+
+#define IN_EP_MAX_PACKET_SIZE 256
+
+/* Number of requests to allocate */
+#define IN_EP_REQ_COUNT 4
+
+#define AUDIO_AC_INTERFACE     0
+#define AUDIO_AS_INTERFACE     1
+#define AUDIO_NUM_INTERFACES   2
+
+/* B.3.1  Standard AC Interface Descriptor */
+static struct usb_interface_descriptor ac_interface_desc = {
+       .bLength =              USB_DT_INTERFACE_SIZE,
+       .bDescriptorType =      USB_DT_INTERFACE,
+       .bNumEndpoints =        0,
+       .bInterfaceClass =      USB_CLASS_AUDIO,
+       .bInterfaceSubClass =   USB_SUBCLASS_AUDIOCONTROL,
+};
+
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
+
+#define UAC_DT_AC_HEADER_LENGTH        UAC_DT_AC_HEADER_SIZE(AUDIO_NUM_INTERFACES)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH \
+       + UAC_DT_INPUT_TERMINAL_SIZE + UAC_DT_OUTPUT_TERMINAL_SIZE \
+       + UAC_DT_FEATURE_UNIT_SIZE(0))
+/* B.3.2  Class-Specific AC Interface Descriptor */
+static struct uac1_ac_header_descriptor_2 ac_header_desc = {
+       .bLength =              UAC_DT_AC_HEADER_LENGTH,
+       .bDescriptorType =      USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype =   UAC_HEADER,
+       .bcdADC =               __constant_cpu_to_le16(0x0100),
+       .wTotalLength =         __constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
+       .bInCollection =        AUDIO_NUM_INTERFACES,
+       .baInterfaceNr = {
+               [0] =           AUDIO_AC_INTERFACE,
+               [1] =           AUDIO_AS_INTERFACE,
+       }
+};
+
+#define INPUT_TERMINAL_ID      1
+static struct uac_input_terminal_descriptor input_terminal_desc = {
+       .bLength =              UAC_DT_INPUT_TERMINAL_SIZE,
+       .bDescriptorType =      USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype =   UAC_INPUT_TERMINAL,
+       .bTerminalID =          INPUT_TERMINAL_ID,
+       .wTerminalType =        UAC_INPUT_TERMINAL_MICROPHONE,
+       .bAssocTerminal =       0,
+       .wChannelConfig =       0x3,
+};
+
+DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
+
+#define FEATURE_UNIT_ID                2
+static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
+       .bLength                = UAC_DT_FEATURE_UNIT_SIZE(0),
+       .bDescriptorType        = USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype     = UAC_FEATURE_UNIT,
+       .bUnitID                = FEATURE_UNIT_ID,
+       .bSourceID              = INPUT_TERMINAL_ID,
+       .bControlSize           = 2,
+};
+
+#define OUTPUT_TERMINAL_ID     3
+static struct uac1_output_terminal_descriptor output_terminal_desc = {
+       .bLength                = UAC_DT_OUTPUT_TERMINAL_SIZE,
+       .bDescriptorType        = USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype     = UAC_OUTPUT_TERMINAL,
+       .bTerminalID            = OUTPUT_TERMINAL_ID,
+       .wTerminalType          = UAC_TERMINAL_STREAMING,
+       .bAssocTerminal         = FEATURE_UNIT_ID,
+       .bSourceID              = FEATURE_UNIT_ID,
+};
+
+/* B.4.1  Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_interface_alt_0_desc = {
+       .bLength =              USB_DT_INTERFACE_SIZE,
+       .bDescriptorType =      USB_DT_INTERFACE,
+       .bAlternateSetting =    0,
+       .bNumEndpoints =        0,
+       .bInterfaceClass =      USB_CLASS_AUDIO,
+       .bInterfaceSubClass =   USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_interface_alt_1_desc = {
+       .bLength =              USB_DT_INTERFACE_SIZE,
+       .bDescriptorType =      USB_DT_INTERFACE,
+       .bAlternateSetting =    1,
+       .bNumEndpoints =        1,
+       .bInterfaceClass =      USB_CLASS_AUDIO,
+       .bInterfaceSubClass =   USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+/* B.4.2  Class-Specific AS Interface Descriptor */
+static struct uac1_as_header_descriptor as_header_desc = {
+       .bLength =              UAC_DT_AS_HEADER_SIZE,
+       .bDescriptorType =      USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype =   UAC_AS_GENERAL,
+       .bTerminalLink =        INPUT_TERMINAL_ID,
+       .bDelay =               1,
+       .wFormatTag =           UAC_FORMAT_TYPE_I_PCM,
+};
+
+DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
+
+static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
+       .bLength =              UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+       .bDescriptorType =      USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype =   UAC_FORMAT_TYPE,
+       .bFormatType =          UAC_FORMAT_TYPE_I,
+       .bSubframeSize =        2,
+       .bBitResolution =       16,
+       .bSamFreqType =         1,
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor hs_as_in_ep_desc  = {
+       .bLength =              USB_DT_ENDPOINT_AUDIO_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     USB_DIR_IN,
+       .bmAttributes =         USB_ENDPOINT_SYNC_SYNC
+                               | USB_ENDPOINT_XFER_ISOC,
+       .wMaxPacketSize =       __constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+       .bInterval =            4, /* poll 1 per millisecond */
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor fs_as_in_ep_desc  = {
+       .bLength =              USB_DT_ENDPOINT_AUDIO_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     USB_DIR_IN,
+       .bmAttributes =         USB_ENDPOINT_SYNC_SYNC
+                               | USB_ENDPOINT_XFER_ISOC,
+       .wMaxPacketSize =       __constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+       .bInterval =            1, /* poll 1 per millisecond */
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor as_iso_in_desc = {
+       .bLength =              UAC_ISO_ENDPOINT_DESC_SIZE,
+       .bDescriptorType =      USB_DT_CS_ENDPOINT,
+       .bDescriptorSubtype =   UAC_EP_GENERAL,
+       .bmAttributes =         1,
+       .bLockDelayUnits =      1,
+       .wLockDelay =           __constant_cpu_to_le16(1),
+};
+
+static struct usb_descriptor_header *hs_audio_desc[] = {
+       (struct usb_descriptor_header *)&ac_interface_desc,
+       (struct usb_descriptor_header *)&ac_header_desc,
+
+       (struct usb_descriptor_header *)&input_terminal_desc,
+       (struct usb_descriptor_header *)&output_terminal_desc,
+       (struct usb_descriptor_header *)&feature_unit_desc,
+
+       (struct usb_descriptor_header *)&as_interface_alt_0_desc,
+       (struct usb_descriptor_header *)&as_interface_alt_1_desc,
+       (struct usb_descriptor_header *)&as_header_desc,
+
+       (struct usb_descriptor_header *)&as_type_i_desc,
+
+       (struct usb_descriptor_header *)&hs_as_in_ep_desc,
+       (struct usb_descriptor_header *)&as_iso_in_desc,
+       NULL,
+};
+
+static struct usb_descriptor_header *fs_audio_desc[] = {
+       (struct usb_descriptor_header *)&ac_interface_desc,
+       (struct usb_descriptor_header *)&ac_header_desc,
+
+       (struct usb_descriptor_header *)&input_terminal_desc,
+       (struct usb_descriptor_header *)&output_terminal_desc,
+       (struct usb_descriptor_header *)&feature_unit_desc,
+
+       (struct usb_descriptor_header *)&as_interface_alt_0_desc,
+       (struct usb_descriptor_header *)&as_interface_alt_1_desc,
+       (struct usb_descriptor_header *)&as_header_desc,
+
+       (struct usb_descriptor_header *)&as_type_i_desc,
+
+       (struct usb_descriptor_header *)&fs_as_in_ep_desc,
+       (struct usb_descriptor_header *)&as_iso_in_desc,
+       NULL,
+};
+
+static struct snd_pcm_hardware audio_hw_info = {
+       .info =                 SNDRV_PCM_INFO_MMAP |
+                               SNDRV_PCM_INFO_MMAP_VALID |
+                               SNDRV_PCM_INFO_BATCH |
+                               SNDRV_PCM_INFO_INTERLEAVED |
+                               SNDRV_PCM_INFO_BLOCK_TRANSFER,
+
+       .formats                = SNDRV_PCM_FMTBIT_S16_LE,
+       .channels_min           = 2,
+       .channels_max           = 2,
+       .rate_min               = SAMPLE_RATE,
+       .rate_max               = SAMPLE_RATE,
+
+       .buffer_bytes_max =     1024 * 1024,
+       .period_bytes_min =     64,
+       .period_bytes_max =     512 * 1024,
+       .periods_min =          2,
+       .periods_max =          1024,
+};
+
+/*-------------------------------------------------------------------------*/
+
+struct audio_source_config {
+       int     card;
+       int     device;
+};
+
+struct audio_dev {
+       struct usb_function             func;
+       struct snd_card                 *card;
+       struct snd_pcm                  *pcm;
+       struct snd_pcm_substream *substream;
+
+       struct list_head                idle_reqs;
+       struct usb_ep                   *in_ep;
+       struct usb_endpoint_descriptor  *in_desc;
+
+       spinlock_t                      lock;
+
+       /* beginning, end and current position in our buffer */
+       void                            *buffer_start;
+       void                            *buffer_end;
+       void                            *buffer_pos;
+
+       /* byte size of a "period" */
+       unsigned int                    period;
+       /* bytes sent since last call to snd_pcm_period_elapsed */
+       unsigned int                    period_offset;
+       /* time we started playing */
+       ktime_t                         start_time;
+       /* number of frames sent since start_time */
+       s64                             frames_sent;
+};
+
+static inline struct audio_dev *func_to_audio(struct usb_function *f)
+{
+       return container_of(f, struct audio_dev, func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_request *audio_request_new(struct usb_ep *ep, int buffer_size)
+{
+       struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+       if (!req)
+               return NULL;
+
+       req->buf = kmalloc(buffer_size, GFP_KERNEL);
+       if (!req->buf) {
+               usb_ep_free_request(ep, req);
+               return NULL;
+       }
+       req->length = buffer_size;
+       return req;
+}
+
+static void audio_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+       if (req) {
+               kfree(req->buf);
+               usb_ep_free_request(ep, req);
+       }
+}
+
+static void audio_req_put(struct audio_dev *audio, struct usb_request *req)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&audio->lock, flags);
+       list_add_tail(&req->list, &audio->idle_reqs);
+       spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static struct usb_request *audio_req_get(struct audio_dev *audio)
+{
+       unsigned long flags;
+       struct usb_request *req;
+
+       spin_lock_irqsave(&audio->lock, flags);
+       if (list_empty(&audio->idle_reqs)) {
+               req = 0;
+       } else {
+               req = list_first_entry(&audio->idle_reqs, struct usb_request,
+                               list);
+               list_del(&req->list);
+       }
+       spin_unlock_irqrestore(&audio->lock, flags);
+       return req;
+}
+
+/* send the appropriate number of packets to match our bitrate */
+static void audio_send(struct audio_dev *audio)
+{
+       struct snd_pcm_runtime *runtime;
+       struct usb_request *req;
+       int length, length1, length2, ret;
+       s64 msecs;
+       s64 frames;
+       ktime_t now;
+
+       /* audio->substream will be null if we have been closed */
+       if (!audio->substream)
+               return;
+       /* audio->buffer_pos will be null if we have been stopped */
+       if (!audio->buffer_pos)
+               return;
+
+       runtime = audio->substream->runtime;
+
+       /* compute number of frames to send */
+       now = ktime_get();
+       msecs = ktime_to_ns(now) - ktime_to_ns(audio->start_time);
+       do_div(msecs, 1000000);
+       frames = msecs * SAMPLE_RATE;
+       do_div(frames, 1000);
+
+       /* Readjust our frames_sent if we fall too far behind.
+        * If we get too far behind it is better to drop some frames than
+        * to keep sending data too fast in an attempt to catch up.
+        */
+       if (frames - audio->frames_sent > 10 * FRAMES_PER_MSEC)
+               audio->frames_sent = frames - FRAMES_PER_MSEC;
+
+       frames -= audio->frames_sent;
+
+       /* We need to send something to keep the pipeline going */
+       if (frames <= 0)
+               frames = FRAMES_PER_MSEC;
+
+       while (frames > 0) {
+               req = audio_req_get(audio);
+               if (!req)
+                       break;
+
+               length = frames_to_bytes(runtime, frames);
+               if (length > IN_EP_MAX_PACKET_SIZE)
+                       length = IN_EP_MAX_PACKET_SIZE;
+
+               if (audio->buffer_pos + length > audio->buffer_end)
+                       length1 = audio->buffer_end - audio->buffer_pos;
+               else
+                       length1 = length;
+               memcpy(req->buf, audio->buffer_pos, length1);
+               if (length1 < length) {
+                       /* Wrap around and copy remaining length
+                        * at beginning of buffer.
+                        */
+                       length2 = length - length1;
+                       memcpy(req->buf + length1, audio->buffer_start,
+                                       length2);
+                       audio->buffer_pos = audio->buffer_start + length2;
+               } else {
+                       audio->buffer_pos += length1;
+                       if (audio->buffer_pos >= audio->buffer_end)
+                               audio->buffer_pos = audio->buffer_start;
+               }
+
+               req->length = length;
+               ret = usb_ep_queue(audio->in_ep, req, GFP_ATOMIC);
+               if (ret < 0) {
+                       pr_err("usb_ep_queue failed ret: %d\n", ret);
+                       audio_req_put(audio, req);
+                       break;
+               }
+
+               frames -= bytes_to_frames(runtime, length);
+               audio->frames_sent += bytes_to_frames(runtime, length);
+       }
+}
+
+static void audio_control_complete(struct usb_ep *ep, struct usb_request *req)
+{
+       /* nothing to do here */
+}
+
+static void audio_data_complete(struct usb_ep *ep, struct usb_request *req)
+{
+       struct audio_dev *audio = req->context;
+
+       pr_debug("audio_data_complete req->status %d req->actual %d\n",
+               req->status, req->actual);
+
+       audio_req_put(audio, req);
+
+       if (!audio->buffer_start)
+               return;
+
+       audio->period_offset += req->actual;
+       if (audio->period_offset >= audio->period) {
+               snd_pcm_period_elapsed(audio->substream);
+               audio->period_offset = 0;
+       }
+       audio_send(audio);
+}
+
+static int audio_set_endpoint_req(struct usb_function *f,
+               const struct usb_ctrlrequest *ctrl)
+{
+       int value = -EOPNOTSUPP;
+       u16 ep = le16_to_cpu(ctrl->wIndex);
+       u16 len = le16_to_cpu(ctrl->wLength);
+       u16 w_value = le16_to_cpu(ctrl->wValue);
+
+       pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+                       ctrl->bRequest, w_value, len, ep);
+
+       switch (ctrl->bRequest) {
+       case UAC_SET_CUR:
+       case UAC_SET_MIN:
+       case UAC_SET_MAX:
+       case UAC_SET_RES:
+               value = len;
+               break;
+       default:
+               break;
+       }
+
+       return value;
+}
+
+static int audio_get_endpoint_req(struct usb_function *f,
+               const struct usb_ctrlrequest *ctrl)
+{
+       struct usb_composite_dev *cdev = f->config->cdev;
+       int value = -EOPNOTSUPP;
+       u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+       u16 len = le16_to_cpu(ctrl->wLength);
+       u16 w_value = le16_to_cpu(ctrl->wValue);
+       u8 *buf = cdev->req->buf;
+
+       pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+                       ctrl->bRequest, w_value, len, ep);
+
+       if (w_value == UAC_EP_CS_ATTR_SAMPLE_RATE << 8) {
+               switch (ctrl->bRequest) {
+               case UAC_GET_CUR:
+               case UAC_GET_MIN:
+               case UAC_GET_MAX:
+               case UAC_GET_RES:
+                       /* return our sample rate */
+                       buf[0] = (u8)SAMPLE_RATE;
+                       buf[1] = (u8)(SAMPLE_RATE >> 8);
+                       buf[2] = (u8)(SAMPLE_RATE >> 16);
+                       value = 3;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return value;
+}
+
+static int
+audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+       struct usb_composite_dev *cdev = f->config->cdev;
+       struct usb_request *req = cdev->req;
+       int value = -EOPNOTSUPP;
+       u16 w_index = le16_to_cpu(ctrl->wIndex);
+       u16 w_value = le16_to_cpu(ctrl->wValue);
+       u16 w_length = le16_to_cpu(ctrl->wLength);
+
+       /* composite driver infrastructure handles everything; interface
+        * activation uses set_alt().
+        */
+       switch (ctrl->bRequestType) {
+       case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+               value = audio_set_endpoint_req(f, ctrl);
+               break;
+
+       case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+               value = audio_get_endpoint_req(f, ctrl);
+               break;
+       }
+
+       /* respond with data transfer or status phase? */
+       if (value >= 0) {
+               pr_debug("audio req%02x.%02x v%04x i%04x l%d\n",
+                       ctrl->bRequestType, ctrl->bRequest,
+                       w_value, w_index, w_length);
+               req->zero = 0;
+               req->length = value;
+               req->complete = audio_control_complete;
+               value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+               if (value < 0)
+                       pr_err("audio response on err %d\n", value);
+       }
+
+       /* device either stalls (value < 0) or reports success */
+       return value;
+}
+
+static int audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+       struct audio_dev *audio = func_to_audio(f);
+
+       pr_debug("audio_set_alt intf %d, alt %d\n", intf, alt);
+       usb_ep_enable(audio->in_ep, audio->in_desc);
+       return 0;
+}
+
+static void audio_disable(struct usb_function *f)
+{
+       struct audio_dev        *audio = func_to_audio(f);
+
+       pr_debug("audio_disable\n");
+       usb_ep_disable(audio->in_ep);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void audio_build_desc(struct audio_dev *audio)
+{
+       u8 *sam_freq;
+       int rate;
+
+       /* Set channel numbers */
+       input_terminal_desc.bNrChannels = 2;
+       as_type_i_desc.bNrChannels = 2;
+
+       /* Set sample rates */
+       rate = SAMPLE_RATE;
+       sam_freq = as_type_i_desc.tSamFreq[0];
+       memcpy(sam_freq, &rate, 3);
+}
+
+/* audio function driver setup/binding */
+static int
+audio_bind(struct usb_configuration *c, struct usb_function *f)
+{
+       struct usb_composite_dev *cdev = c->cdev;
+       struct audio_dev *audio = func_to_audio(f);
+       int status;
+       struct usb_ep *ep;
+       struct usb_request *req;
+       int i;
+
+       audio_build_desc(audio);
+
+       /* allocate instance-specific interface IDs, and patch descriptors */
+       status = usb_interface_id(c, f);
+       if (status < 0)
+               goto fail;
+       ac_interface_desc.bInterfaceNumber = status;
+
+       status = usb_interface_id(c, f);
+       if (status < 0)
+               goto fail;
+       as_interface_alt_0_desc.bInterfaceNumber = status;
+       as_interface_alt_1_desc.bInterfaceNumber = status;
+
+       status = -ENODEV;
+
+       /* allocate our endpoint */
+       ep = usb_ep_autoconfig(cdev->gadget, &fs_as_in_ep_desc);
+       if (!ep)
+               goto fail;
+       audio->in_ep = ep;
+       ep->driver_data = audio; /* claim */
+
+       if (gadget_is_dualspeed(c->cdev->gadget))
+               hs_as_in_ep_desc.bEndpointAddress =
+                       fs_as_in_ep_desc.bEndpointAddress;
+
+       f->descriptors = fs_audio_desc;
+       f->hs_descriptors = hs_audio_desc;
+
+       for (i = 0, status = 0; i < IN_EP_REQ_COUNT && status == 0; i++) {
+               req = audio_request_new(ep, IN_EP_MAX_PACKET_SIZE);
+               if (req) {
+                       req->context = audio;
+                       req->complete = audio_data_complete;
+                       audio_req_put(audio, req);
+               } else
+                       status = -ENOMEM;
+       }
+
+fail:
+       return status;
+}
+
+static void
+audio_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+       struct audio_dev *audio = func_to_audio(f);
+       struct usb_request *req;
+
+       while ((req = audio_req_get(audio)))
+               audio_request_free(req, audio->in_ep);
+
+       snd_card_free_when_closed(audio->card);
+       kfree(audio);
+}
+
+static void audio_pcm_playback_start(struct audio_dev *audio)
+{
+       audio->start_time = ktime_get();
+       audio->frames_sent = 0;
+       audio_send(audio);
+}
+
+static void audio_pcm_playback_stop(struct audio_dev *audio)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&audio->lock, flags);
+       audio->buffer_start = 0;
+       audio->buffer_end = 0;
+       audio->buffer_pos = 0;
+       spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static int audio_pcm_open(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct audio_dev *audio = substream->private_data;
+
+       runtime->private_data = audio;
+       runtime->hw = audio_hw_info;
+       snd_pcm_limit_hw_rates(runtime);
+       runtime->hw.channels_max = 2;
+
+       audio->substream = substream;
+       return 0;
+}
+
+static int audio_pcm_close(struct snd_pcm_substream *substream)
+{
+       struct audio_dev *audio = substream->private_data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&audio->lock, flags);
+       audio->substream = NULL;
+       spin_unlock_irqrestore(&audio->lock, flags);
+
+       return 0;
+}
+
+static int audio_pcm_hw_params(struct snd_pcm_substream *substream,
+                               struct snd_pcm_hw_params *params)
+{
+       unsigned int channels = params_channels(params);
+       unsigned int rate = params_rate(params);
+
+       if (rate != SAMPLE_RATE)
+               return -EINVAL;
+       if (channels != 2)
+               return -EINVAL;
+
+       return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+               params_buffer_bytes(params));
+}
+
+static int audio_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+       return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int audio_pcm_prepare(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct audio_dev *audio = runtime->private_data;
+
+       audio->period = snd_pcm_lib_period_bytes(substream);
+       audio->period_offset = 0;
+       audio->buffer_start = runtime->dma_area;
+       audio->buffer_end = audio->buffer_start
+               + snd_pcm_lib_buffer_bytes(substream);
+       audio->buffer_pos = audio->buffer_start;
+
+       return 0;
+}
+
+static snd_pcm_uframes_t audio_pcm_pointer(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct audio_dev *audio = runtime->private_data;
+       ssize_t bytes = audio->buffer_pos - audio->buffer_start;
+
+       /* return offset of next frame to fill in our buffer */
+       return bytes_to_frames(runtime, bytes);
+}
+
+static int audio_pcm_playback_trigger(struct snd_pcm_substream *substream,
+                                       int cmd)
+{
+       struct audio_dev *audio = substream->runtime->private_data;
+       int ret = 0;
+
+       switch (cmd) {
+       case SNDRV_PCM_TRIGGER_START:
+       case SNDRV_PCM_TRIGGER_RESUME:
+               audio_pcm_playback_start(audio);
+               break;
+
+       case SNDRV_PCM_TRIGGER_STOP:
+       case SNDRV_PCM_TRIGGER_SUSPEND:
+               audio_pcm_playback_stop(audio);
+               break;
+
+       default:
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
+static struct snd_pcm_ops audio_playback_ops = {
+       .open           = audio_pcm_open,
+       .close          = audio_pcm_close,
+       .ioctl          = snd_pcm_lib_ioctl,
+       .hw_params      = audio_pcm_hw_params,
+       .hw_free        = audio_pcm_hw_free,
+       .prepare        = audio_pcm_prepare,
+       .trigger        = audio_pcm_playback_trigger,
+       .pointer        = audio_pcm_pointer,
+};
+
+int audio_source_bind_config(struct usb_configuration *c,
+               struct audio_source_config *config)
+{
+       struct audio_dev *audio;
+       struct snd_card *card;
+       struct snd_pcm *pcm;
+       int err;
+
+       config->card = -1;
+       config->device = -1;
+
+       audio = kzalloc(sizeof *audio, GFP_KERNEL);
+       if (!audio)
+               return -ENOMEM;
+
+       audio->func.name = "audio_source";
+
+       spin_lock_init(&audio->lock);
+
+       audio->func.bind = audio_bind;
+       audio->func.unbind = audio_unbind;
+       audio->func.set_alt = audio_set_alt;
+       audio->func.setup = audio_setup;
+       audio->func.disable = audio_disable;
+       audio->in_desc = &fs_as_in_ep_desc;
+
+       INIT_LIST_HEAD(&audio->idle_reqs);
+
+       err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+                       THIS_MODULE, 0, &card);
+       if (err)
+               goto snd_card_fail;
+
+       snd_card_set_dev(card, &c->cdev->gadget->dev);
+
+       err = snd_pcm_new(card, "USB audio source", 0, 1, 0, &pcm);
+       if (err)
+               goto pcm_fail;
+       pcm->private_data = audio;
+       pcm->info_flags = 0;
+       audio->pcm = pcm;
+
+       strlcpy(pcm->name, "USB gadget audio", sizeof(pcm->name));
+
+       snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &audio_playback_ops);
+       snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+                               NULL, 0, 64 * 1024);
+
+       strlcpy(card->driver, "audio_source", sizeof(card->driver));
+       strlcpy(card->shortname, card->driver, sizeof(card->shortname));
+       strlcpy(card->longname, "USB accessory audio source",
+               sizeof(card->longname));
+
+       err = snd_card_register(card);
+       if (err)
+               goto register_fail;
+
+       err = usb_add_function(c, &audio->func);
+       if (err)
+               goto add_fail;
+
+       config->card = pcm->card->number;
+       config->device = pcm->device;
+       audio->card = card;
+       return 0;
+
+add_fail:
+register_fail:
+pcm_fail:
+       snd_card_free(audio->card);
+snd_card_fail:
+       kfree(audio);
+       return err;
+}
index 19fffccc370d3af1e368b5ccd8a98b844c89fa1d..1cefb9f160711ccd9b854c102788016132b6a961 100644 (file)
@@ -720,7 +720,7 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
        if (code == FUNCTIONFS_INTERFACE_REVMAP) {
                struct ffs_function *func = ffs->func;
                ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
-       } else if (gadget->ops->ioctl) {
+       } else if (gadget && gadget->ops->ioctl) {
                ret = gadget->ops->ioctl(gadget, code, value);
        } else {
                ret = -ENOTTY;
index b37960f9e75312a106f5b8d969e3ba151072553f..0e64a47cd6b2a878fc2f7ffc67d8aceb90bc39ac 100644 (file)
@@ -373,7 +373,7 @@ int __init loopback_add(struct usb_composite_dev *cdev, bool autoresume)
 
        /* support autoresume for remote wakeup testing */
        if (autoresume)
-               sourcesink_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+               loopback_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
 
        /* support OTG systems */
        if (gadget_is_otg(cdev->gadget)) {
index ef0cd7fbe1a02728a6f91808e55401447ba15f12..daea926edef345f4a105113620fd363cd6e53e80 100755 (executable)
@@ -2390,7 +2390,7 @@ unknown_cmnd:
                common->data_size_from_cmnd = 0;
                sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
                reply = check_command(common, common->cmnd_size,
-                                     DATA_DIR_UNKNOWN, 0xff, 0, unknown);
+                                     DATA_DIR_UNKNOWN, ~0, 0, unknown);
                if (reply == 0) {
                        common->curlun->sense_data = SS_INVALID_COMMAND;
                        reply = -EINVAL;
index d03b11b51c8edd1410c67cd521aee53a4a2c95c1..96adf45d44c285e0940cf4566972a4e3ea3780ff 100644 (file)
@@ -755,8 +755,6 @@ rndis_unbind(struct usb_configuration *c, struct usb_function *f)
        rndis_deregister(rndis->config);
        rndis_exit();
 
-       rndis_string_defs[0].id = 0;
-
        if (gadget_is_dualspeed(c->cdev->gadget))
                usb_free_descriptors(f->hs_descriptors);
        usb_free_descriptors(f->descriptors);
@@ -796,14 +794,14 @@ rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
        if (!can_support_rndis(c) || !ethaddr)
                return -EINVAL;
 
+       /* setup RNDIS itself */
+       status = rndis_init();
+       if (status < 0)
+               return status;
+
        /* maybe allocate device-global string IDs */
        if (rndis_string_defs[0].id == 0) {
 
-               /* ... and setup RNDIS itself */
-               status = rndis_init();
-               if (status < 0)
-                       return status;
-
                /* control interface label */
                status = usb_string_id(c->cdev);
                if (status < 0)
index 0360f56221ea21ed3eda1fc93cfde2ecd76bc08c..e358130a48572648f52942b7b3dd79d46fa36567 100644 (file)
@@ -2553,7 +2553,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
                fsg->data_size_from_cmnd = 0;
                sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
                if ((reply = check_command(fsg, fsg->cmnd_size,
-                               DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
+                               DATA_DIR_UNKNOWN, ~0, 0, unknown)) == 0) {
                        fsg->curlun->sense_data = SS_INVALID_COMMAND;
                        reply = -EINVAL;
                }
index 4e4833168087565215274dd1dab672628cd303b9..44d789d27cf331702b9a62a32e737d73dec4f7cc 100644 (file)
@@ -717,6 +717,8 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
                lastreq = list_entry(ep->queue.prev, struct fsl_req, queue);
                lastreq->tail->next_td_ptr =
                        cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK);
+               /* Ensure dTD's next dtd pointer to be updated */
+               wmb();
                /* Read prime bit, if 1 goto done */
                if (fsl_readl(&dr_regs->endpointprime) & bitmask)
                        goto out;
@@ -767,7 +769,7 @@ out:
  * @is_last: return flag if it is the last dTD of the request
  * return: pointer to the built dTD */
 static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
-               dma_addr_t *dma, int *is_last)
+               dma_addr_t *dma, int *is_last, gfp_t gfp_flags)
 {
        u32 swap_temp;
        struct ep_td_struct *dtd;
@@ -776,7 +778,7 @@ static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
        *length = min(req->req.length - req->req.actual,
                        (unsigned)EP_MAX_LENGTH_TRANSFER);
 
-       dtd = dma_pool_alloc(udc_controller->td_pool, GFP_KERNEL, dma);
+       dtd = dma_pool_alloc(udc_controller->td_pool, gfp_flags, dma);
        if (dtd == NULL)
                return dtd;
 
@@ -826,7 +828,7 @@ static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
 }
 
 /* Generate dtd chain for a request */
-static int fsl_req_to_dtd(struct fsl_req *req)
+static int fsl_req_to_dtd(struct fsl_req *req, gfp_t gfp_flags)
 {
        unsigned        count;
        int             is_last;
@@ -835,7 +837,7 @@ static int fsl_req_to_dtd(struct fsl_req *req)
        dma_addr_t dma;
 
        do {
-               dtd = fsl_build_dtd(req, &count, &dma, &is_last);
+               dtd = fsl_build_dtd(req, &count, &dma, &is_last, gfp_flags);
                if (dtd == NULL)
                        return -ENOMEM;
 
@@ -909,13 +911,11 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
        req->req.actual = 0;
        req->dtd_count = 0;
 
-       spin_lock_irqsave(&udc->lock, flags);
-
        /* build dtds and push them to device queue */
-       if (!fsl_req_to_dtd(req)) {
+       if (!fsl_req_to_dtd(req, gfp_flags)) {
+               spin_lock_irqsave(&udc->lock, flags);
                fsl_queue_td(ep, req);
        } else {
-               spin_unlock_irqrestore(&udc->lock, flags);
                return -ENOMEM;
        }
 
@@ -1294,7 +1294,7 @@ static int ep0_prime_status(struct fsl_udc *udc, int direction)
                        ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
        req->mapped = 1;
 
-       if (fsl_req_to_dtd(req) == 0)
+       if (fsl_req_to_dtd(req, GFP_ATOMIC) == 0)
                fsl_queue_td(ep, req);
        else
                return -ENOMEM;
@@ -1378,7 +1378,7 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
        req->mapped = 1;
 
        /* prime the data phase */
-       if ((fsl_req_to_dtd(req) == 0))
+       if ((fsl_req_to_dtd(req, GFP_ATOMIC) == 0))
                fsl_queue_td(ep, req);
        else                    /* no mem */
                goto stall;
index 2523e54097bd3529dce275c9a8ef1f0cebff57c9..79afa8256cb3874aabccd836a5d946099dc36d10 100644 (file)
@@ -69,9 +69,9 @@ static struct usb_device_descriptor device_desc = {
        /* .bDeviceClass =              USB_CLASS_COMM, */
        /* .bDeviceSubClass =   0, */
        /* .bDeviceProtocol =   0, */
-       .bDeviceClass =         0xEF,
-       .bDeviceSubClass =      2,
-       .bDeviceProtocol =      1,
+       .bDeviceClass =         USB_CLASS_PER_INTERFACE,
+       .bDeviceSubClass =      0,
+       .bDeviceProtocol =      0,
        /* .bMaxPacketSize0 = f(hardware) */
 
        /* Vendor and product id can be overridden by module parameters.  */
index a56876aaf76cdc1dfbec0a331f70739a2a27b04f..febadaa2a80d8a32fbc1040ace61979134b9e20b 100644 (file)
@@ -1050,6 +1050,8 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
 // FIXME don't call this with the spinlock held ...
                                if (copy_to_user (buf, dev->req->buf, len))
                                        retval = -EFAULT;
+                               else
+                                       retval = len;
                                clean_req (dev->gadget->ep0, dev->req);
                                /* NOTE userspace can't yet choose to stall */
                        }
index 68dbcc3e4cc2cb005bd2606a8bb64a4c35a12d81..1852c8a20c3d4c5dc6860cf9dbedac8bcae2075c 100644 (file)
@@ -320,6 +320,7 @@ struct pch_udc_ep {
  * @registered:                driver regsitered with system
  * @suspended:         driver in suspended state
  * @connected:         gadget driver associated
+ * @vbus_session:      required vbus_session state
  * @set_cfg_not_acked: pending acknowledgement 4 setup
  * @waiting_zlp_ack:   pending acknowledgement 4 ZLP
  * @data_requests:     DMA pool for data requests
@@ -346,6 +347,7 @@ struct pch_udc_dev {
                        registered:1,
                        suspended:1,
                        connected:1,
+                       vbus_session:1,
                        set_cfg_not_acked:1,
                        waiting_zlp_ack:1;
        struct pci_pool         *data_requests;
@@ -363,6 +365,7 @@ struct pch_udc_dev {
 #define PCI_DEVICE_ID_INTEL_EG20T_UDC  0x8808
 #define PCI_VENDOR_ID_ROHM             0x10DB
 #define PCI_DEVICE_ID_ML7213_IOH_UDC   0x801D
+#define PCI_DEVICE_ID_ML7831_IOH_UDC   0x8808
 
 static const char      ep0_string[] = "ep0in";
 static DEFINE_SPINLOCK(udc_stall_spinlock);    /* stall spin lock */
@@ -561,6 +564,29 @@ static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
        pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
 }
 
+/**
+ * pch_udc_reconnect() - This API initializes usb device controller,
+ *                                             and clear the disconnect status.
+ * @dev:               Reference to pch_udc_regs structure
+ */
+static void pch_udc_init(struct pch_udc_dev *dev);
+static void pch_udc_reconnect(struct pch_udc_dev *dev)
+{
+       pch_udc_init(dev);
+
+       /* enable device interrupts */
+       /* pch_udc_enable_interrupts() */
+       pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
+                       UDC_DEVINT_UR | UDC_DEVINT_ENUM);
+
+       /* Clear the disconnect */
+       pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
+       pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
+       mdelay(1);
+       /* Resume USB signalling */
+       pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
+}
+
 /**
  * pch_udc_vbus_session() - set or clearr the disconnect status.
  * @dev:       Reference to pch_udc_regs structure
@@ -571,10 +597,18 @@ static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
 static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
                                          int is_active)
 {
-       if (is_active)
-               pch_udc_clear_disconnect(dev);
-       else
+       if (is_active) {
+               pch_udc_reconnect(dev);
+               dev->vbus_session = 1;
+       } else {
+               if (dev->driver && dev->driver->disconnect) {
+                       spin_unlock(&dev->lock);
+                       dev->driver->disconnect(&dev->gadget);
+                       spin_lock(&dev->lock);
+               }
                pch_udc_set_disconnect(dev);
+               dev->vbus_session = 0;
+       }
 }
 
 /**
@@ -1134,7 +1168,17 @@ static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
        if (!gadget)
                return -EINVAL;
        dev = container_of(gadget, struct pch_udc_dev, gadget);
-       pch_udc_vbus_session(dev, is_on);
+       if (is_on) {
+               pch_udc_reconnect(dev);
+       } else {
+               if (dev->driver && dev->driver->disconnect) {
+                       spin_unlock(&dev->lock);
+                       dev->driver->disconnect(&dev->gadget);
+                       spin_lock(&dev->lock);
+               }
+               pch_udc_set_disconnect(dev);
+       }
+
        return 0;
 }
 
@@ -2338,8 +2382,11 @@ static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
                /* Complete request queue */
                empty_req_queue(ep);
        }
-       if (dev->driver && dev->driver->disconnect)
+       if (dev->driver && dev->driver->disconnect) {
+               spin_unlock(&dev->lock);
                dev->driver->disconnect(&dev->gadget);
+               spin_lock(&dev->lock);
+       }
 }
 
 /**
@@ -2374,6 +2421,11 @@ static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
        pch_udc_set_dma(dev, DMA_DIR_TX);
        pch_udc_set_dma(dev, DMA_DIR_RX);
        pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
+
+       /* enable device interrupts */
+       pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
+                                       UDC_DEVINT_ES | UDC_DEVINT_ENUM |
+                                       UDC_DEVINT_SI | UDC_DEVINT_SC);
 }
 
 /**
@@ -2475,8 +2527,24 @@ static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
        if (dev_intr & UDC_DEVINT_SC)
                pch_udc_svc_cfg_interrupt(dev);
        /* USB Suspend interrupt */
-       if (dev_intr & UDC_DEVINT_US)
+       if (dev_intr & UDC_DEVINT_US) {
+               if (dev->driver
+                       && dev->driver->suspend) {
+                       spin_unlock(&dev->lock);
+                       dev->driver->suspend(&dev->gadget);
+                       spin_lock(&dev->lock);
+               }
+
+               if (dev->vbus_session == 0) {
+                       if (dev->driver && dev->driver->disconnect) {
+                               spin_unlock(&dev->lock);
+                               dev->driver->disconnect(&dev->gadget);
+                               spin_lock(&dev->lock);
+                       }
+                       pch_udc_reconnect(dev);
+               }
                dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
+       }
        /* Clear the SOF interrupt, if enabled */
        if (dev_intr & UDC_DEVINT_SOF)
                dev_dbg(&dev->pdev->dev, "SOF\n");
@@ -2502,6 +2570,14 @@ static irqreturn_t pch_udc_isr(int irq, void *pdev)
        dev_intr = pch_udc_read_device_interrupts(dev);
        ep_intr = pch_udc_read_ep_interrupts(dev);
 
+       /* For a hot plug, this find that the controller is hung up. */
+       if (dev_intr == ep_intr)
+               if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
+                       dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
+                       /* The controller is reset */
+                       pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
+                       return IRQ_HANDLED;
+               }
        if (dev_intr)
                /* Clear device interrupts */
                pch_udc_write_device_interrupts(dev, dev_intr);
@@ -2915,8 +2991,10 @@ static int pch_udc_probe(struct pci_dev *pdev,
        }
        pch_udc = dev;
        /* initialize the hardware */
-       if (pch_udc_pcd_init(dev))
+       if (pch_udc_pcd_init(dev)) {
+               retval = -ENODEV;
                goto finished;
+       }
        if (request_irq(pdev->irq, pch_udc_isr, IRQF_SHARED, KBUILD_MODNAME,
                        dev)) {
                dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
@@ -2971,6 +3049,11 @@ static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
                .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
                .class_mask = 0xffffffff,
        },
+       {
+               PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
+               .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+               .class_mask = 0xffffffff,
+       },
        { 0 },
 };
 
index 271ef94668e719867d12c00a9ce9c1681c825f23..88a464cc96c08cc4ed96830366b89bf4e215bbae 100644 (file)
@@ -1602,7 +1602,7 @@ cleanup(void)
        if (status)
                ERROR(dev, "usb_gadget_unregister_driver %x\n", status);
 
-       unregister_chrdev_region(g_printer_devno, 2);
+       unregister_chrdev_region(g_printer_devno, 1);
        class_destroy(usb_gadget_class);
        mutex_unlock(&usb_printer_gadget.lock_printer_io);
 }
index d3cdffea9c8a33e71dd8fb5bacaf41c7aaf240cd..bbfbde7415954148d97cb7921306e993e679bc0d 100644 (file)
@@ -1147,11 +1147,15 @@ static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
 
 #endif /* CONFIG_USB_GADGET_DEBUG_FILES */
 
+static bool rndis_initialized;
 
 int rndis_init(void)
 {
        u8 i;
 
+       if (rndis_initialized)
+               return 0;
+
        for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
                char name [20];
@@ -1178,6 +1182,7 @@ int rndis_init(void)
                INIT_LIST_HEAD(&(rndis_per_dev_params[i].resp_queue));
        }
 
+       rndis_initialized = true;
        return 0;
 }
 
@@ -1186,7 +1191,13 @@ void rndis_exit(void)
 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
        u8 i;
        char name[20];
+#endif
 
+       if (!rndis_initialized)
+               return;
+       rndis_initialized = false;
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
        for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
                sprintf(name, NAME_TEMPLATE, i);
                remove_proc_entry(name, NULL);
index 5b7919460fd2302c1d79ed7c5e5fff42f351d80e..01a23c1197f645092513d8e60333f1f80050b640 100644 (file)
@@ -29,7 +29,7 @@
 
 struct uvc_request_data
 {
-       unsigned int length;
+       __s32 length;
        __u8 data[60];
 };
 
index 5e807f083bc820e1251fbb7a08e8c023ed462edd..992f66b88c81ba3383ba971dfab16026246b78c3 100644 (file)
@@ -41,7 +41,7 @@ uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data)
        if (data->length < 0)
                return usb_ep_set_halt(cdev->gadget->ep0);
 
-       req->length = min(uvc->event_length, data->length);
+       req->length = min_t(unsigned int, uvc->event_length, data->length);
        req->zero = data->length < uvc->event_length;
        req->dma = DMA_ADDR_INVALID;
 
index 40a844c1dbb4945044a9afe2d94d2097bad29af3..3e2ccb0dd255f8d2608accda5263f5dbaa0e1b11 100644 (file)
@@ -808,7 +808,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
        next += temp;
 
        temp = scnprintf (next, size, "uframe %04x\n",
-                       ehci_readl(ehci, &ehci->regs->frame_index));
+                       ehci_read_frame_index(ehci));
        size -= temp;
        next += temp;
 
index f380bf97e5af71d614092c300d1e786949da1254..fc93d57609ac7a29ce5e47b5820b860ed386666a 100644 (file)
@@ -125,7 +125,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
         */
        if (pdata->init && pdata->init(pdev)) {
                retval = -ENODEV;
-               goto err3;
+               goto err4;
        }
 
        /* Enable USB controller, 83xx or 8536 */
@@ -216,6 +216,8 @@ static void ehci_fsl_setup_phy(struct ehci_hcd *ehci,
                               unsigned int port_offset)
 {
        u32 portsc;
+       struct usb_hcd *hcd = ehci_to_hcd(ehci);
+       void __iomem *non_ehci = hcd->regs;
 
        portsc = ehci_readl(ehci, &ehci->regs->port_status[port_offset]);
        portsc &= ~(PORT_PTS_MSK | PORT_PTS_PTW);
@@ -231,6 +233,8 @@ static void ehci_fsl_setup_phy(struct ehci_hcd *ehci,
                portsc |= PORT_PTS_PTW;
                /* fall through */
        case FSL_USB2_PHY_UTMI:
+               /* enable UTMI PHY */
+               setbits32(non_ehci + FSL_SOC_USB_CTRL, CTRL_UTMI_PHY_EN);
                portsc |= PORT_PTS_UTMI;
                break;
        case FSL_USB2_PHY_NONE:
index 4918062211656c276f5904cc50d45605076ed026..bea5013ab7f5584cb0a293d9469345b313452108 100644 (file)
@@ -45,5 +45,6 @@
 #define FSL_SOC_USB_PRICTRL    0x40c   /* NOTE: big-endian */
 #define FSL_SOC_USB_SICTRL     0x410   /* NOTE: big-endian */
 #define FSL_SOC_USB_CTRL       0x500   /* NOTE: big-endian */
+#define CTRL_UTMI_PHY_EN       (1<<9)
 #define SNOOP_SIZE_2GB         0x1e
 #endif                         /* _EHCI_FSL_H */
index 9ff9abc7e3aacf52eded9e444bdedf0a86d08cf1..f89f77f1b339f4a73200a95750f3c5200fb84b84 100644 (file)
@@ -761,6 +761,35 @@ static int ehci_run (struct usb_hcd *hcd)
        return 0;
 }
 
+static int __maybe_unused ehci_setup (struct usb_hcd *hcd)
+{
+       struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+       int retval;
+
+       ehci->regs = (void __iomem *)ehci->caps +
+           HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
+       dbg_hcs_params(ehci, "reset");
+       dbg_hcc_params(ehci, "reset");
+
+       /* cache this readonly data; minimize chip reads */
+       ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+       ehci->sbrn = HCD_USB2;
+
+       retval = ehci_halt(ehci);
+       if (retval)
+               return retval;
+
+       /* data structure init */
+       retval = ehci_init(hcd);
+       if (retval)
+               return retval;
+
+       ehci_reset(ehci);
+
+       return 0;
+}
+
 /*-------------------------------------------------------------------------*/
 
 static irqreturn_t ehci_irq (struct usb_hcd *hcd)
@@ -779,8 +808,13 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
                goto dead;
        }
 
+       /*
+        * We don't use STS_FLR, but some controllers don't like it to
+        * remain on, so mask it out along with the other status bits.
+        */
+       masked_status = status & (INTR_MASK | STS_FLR);
+
        /* Shared IRQ? */
-       masked_status = status & INTR_MASK;
        if (!masked_status || unlikely(hcd->state == HC_STATE_HALT)) {
                spin_unlock(&ehci->lock);
                return IRQ_NONE;
@@ -831,7 +865,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
                pcd_status = status;
 
                /* resume root hub? */
-               if (!(cmd & CMD_RUN))
+               if (hcd->state == HC_STATE_SUSPENDED)
                        usb_hcd_resume_root_hub(hcd);
 
                /* get per-port change detect bits */
@@ -1159,8 +1193,7 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
 static int ehci_get_frame (struct usb_hcd *hcd)
 {
        struct ehci_hcd         *ehci = hcd_to_ehci (hcd);
-       return (ehci_readl(ehci, &ehci->regs->frame_index) >> 3) %
-               ehci->periodic_size;
+       return (ehci_read_frame_index(ehci) >> 3) % ehci->periodic_size;
 }
 
 /*-------------------------------------------------------------------------*/
index 0f3a724800847da4c227d2c58f95bc7d1df5b6d2..f5d7fed4a4c0d728ece11c7678091d7bb3b5fd28 100644 (file)
@@ -1120,7 +1120,19 @@ static int ehci_hub_control (
                        if (!selector || selector > 5)
                                goto error;
                        ehci_quiesce(ehci);
+
+                       /* Put all enabled ports into suspend */
+                       while (ports--) {
+                               u32 __iomem *sreg =
+                                               &ehci->regs->port_status[ports];
+
+                               temp = ehci_readl(ehci, sreg) & ~PORT_RWC_BITS;
+                               if (temp & PORT_PE)
+                                       ehci_writel(ehci, temp | PORT_SUSPEND,
+                                                       sreg);
+                       }
                        ehci_halt(ehci);
+                       temp = ehci_readl(ehci, status_reg);
                        temp |= selector << 16;
                        ehci_writel(ehci, temp, status_reg);
                        break;
index 1102ce65a3a9eff69fee9c612f65f4a6ead9c3a9..f76831480c6ef6a35bab6ca2169f21112ee172e0 100644 (file)
@@ -224,6 +224,11 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
                        pci_dev_put(p_smbus);
                }
                break;
+       case PCI_VENDOR_ID_NETMOS:
+               /* MosChip frame-index-register bug */
+               ehci_info(ehci, "applying MosChip frame-index workaround\n");
+               ehci->frame_index_bug = 1;
+               break;
        }
 
        /* optional debug port, normally in the first BAR */
@@ -352,7 +357,9 @@ static bool usb_is_intel_switchable_ehci(struct pci_dev *pdev)
 {
        return pdev->class == PCI_CLASS_SERIAL_USB_EHCI &&
                pdev->vendor == PCI_VENDOR_ID_INTEL &&
-               pdev->device == 0x1E26;
+               (pdev->device == 0x1E26 ||
+                pdev->device == 0x8C2D ||
+                pdev->device == 0x8C26);
 }
 
 static void ehci_enable_xhci_companion(void)
index 271987952e338e2f60ea14f7d21c88a81f75981a..e4dd26a8b2b14f6529f5f6e8c39908ef7059bab2 100644 (file)
@@ -649,7 +649,7 @@ qh_urb_transaction (
        /*
         * data transfer stage:  buffer setup
         */
-       i = urb->num_sgs;
+       i = urb->num_mapped_sgs;
        if (len > 0 && i > 0) {
                sg = urb->sg;
                buf = sg_dma_address(sg);
index 6c9fbe352f7359e7ff6c6c0d8aab1d0bf3c35ccb..8949b239deccac8ab31f7a0a2c1560abef8f3575 100644 (file)
 
 static int ehci_get_frame (struct usb_hcd *hcd);
 
+#ifdef CONFIG_PCI
+
+static unsigned ehci_read_frame_index(struct ehci_hcd *ehci)
+{
+       unsigned uf;
+
+       /*
+        * The MosChip MCS9990 controller updates its microframe counter
+        * a little before the frame counter, and occasionally we will read
+        * the invalid intermediate value.  Avoid problems by checking the
+        * microframe number (the low-order 3 bits); if they are 0 then
+        * re-read the register to get the correct value.
+        */
+       uf = ehci_readl(ehci, &ehci->regs->frame_index);
+       if (unlikely(ehci->frame_index_bug && ((uf & 7) == 0)))
+               uf = ehci_readl(ehci, &ehci->regs->frame_index);
+       return uf;
+}
+
+#endif
+
 /*-------------------------------------------------------------------------*/
 
 /*
@@ -482,7 +503,7 @@ static int enable_periodic (struct ehci_hcd *ehci)
        ehci_to_hcd(ehci)->state = HC_STATE_RUNNING;
 
        /* make sure ehci_work scans these */
-       ehci->next_uframe = ehci_readl(ehci, &ehci->regs->frame_index)
+       ehci->next_uframe = ehci_read_frame_index(ehci)
                % (ehci->periodic_size << 3);
        if (unlikely(ehci->broken_periodic))
                ehci->last_periodic_enable = ktime_get_real();
@@ -1412,7 +1433,7 @@ iso_stream_schedule (
                goto fail;
        }
 
-       now = ehci_readl(ehci, &ehci->regs->frame_index) & (mod - 1);
+       now = ehci_read_frame_index(ehci) & (mod - 1);
 
        /* Typical case: reuse current schedule, stream is still active.
         * Hopefully there are no gaps from the host falling behind
@@ -1458,30 +1479,36 @@ iso_stream_schedule (
         * jump until after the queue is primed.
         */
        else {
+               int done = 0;
                start = SCHEDULE_SLOP + (now & ~0x07);
 
                /* NOTE:  assumes URB_ISO_ASAP, to limit complexity/bugs */
 
-               /* find a uframe slot with enough bandwidth */
-               next = start + period;
-               for (; start < next; start++) {
-
+               /* find a uframe slot with enough bandwidth.
+                * Early uframes are more precious because full-speed
+                * iso IN transfers can't use late uframes,
+                * and therefore they should be allocated last.
+                */
+               next = start;
+               start += period;
+               do {
+                       start--;
                        /* check schedule: enough space? */
                        if (stream->highspeed) {
                                if (itd_slot_ok(ehci, mod, start,
                                                stream->usecs, period))
-                                       break;
+                                       done = 1;
                        } else {
                                if ((start % 8) >= 6)
                                        continue;
                                if (sitd_slot_ok(ehci, mod, stream,
                                                start, sched, period))
-                                       break;
+                                       done = 1;
                        }
-               }
+               } while (start > next && !done);
 
                /* no room in the schedule */
-               if (start == next) {
+               if (!done) {
                        ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n",
                                urb, now, now + mod);
                        status = -ENOSPC;
@@ -2279,7 +2306,7 @@ scan_periodic (struct ehci_hcd *ehci)
         */
        now_uframe = ehci->next_uframe;
        if (HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
-               clock = ehci_readl(ehci, &ehci->regs->frame_index);
+               clock = ehci_read_frame_index(ehci);
                clock_frame = (clock >> 3) & (ehci->periodic_size - 1);
        } else  {
                clock = now_uframe + mod - 1;
@@ -2458,8 +2485,7 @@ restart:
                                        || ehci->periodic_sched == 0)
                                break;
                        ehci->next_uframe = now_uframe;
-                       now = ehci_readl(ehci, &ehci->regs->frame_index) &
-                                       (mod - 1);
+                       now = ehci_read_frame_index(ehci) & (mod - 1);
                        if (now_uframe == now)
                                break;
 
index 9706c2b64a947b7fee05df62ced3bfb8594082ce..db0d14e47e382a96dac820afd6164c592169e25c 100644 (file)
@@ -137,6 +137,7 @@ struct ehci_hcd {                   /* one per controller */
        unsigned                fs_i_thresh:1;  /* Intel iso scheduling */
        unsigned                use_dummy_qh:1; /* AMD Frame List table quirk*/
        unsigned                has_synopsys_hc_bug:1; /* Synopsys HC */
+       unsigned                frame_index_bug:1; /* MosChip (AKA NetMos) */
 
        /* required for usb32 quirk */
        #define OHCI_CTRL_HCFS          (3 << 6)
@@ -755,6 +756,22 @@ static inline void ehci_sync_mem()
 
 /*-------------------------------------------------------------------------*/
 
+#ifdef CONFIG_PCI
+
+/* For working around the MosChip frame-index-register bug */
+static unsigned ehci_read_frame_index(struct ehci_hcd *ehci);
+
+#else
+
+static inline unsigned ehci_read_frame_index(struct ehci_hcd *ehci)
+{
+       return ehci_readl(ehci, &ehci->regs->frame_index);
+}
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
 #ifndef DEBUG
 #define STUB_DEBUG_FILES
 #endif /* DEBUG */
index a42ef380e917ddd4c347418880467180e3929b2e..2df851b4bc7c1c474acdc65dc6e8143f38dbf7d1 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Freescale QUICC Engine USB Host Controller Driver
  *
- * Copyright (c) Freescale Semicondutor, Inc. 2006.
+ * Copyright (c) Freescale Semicondutor, Inc. 2006, 2011.
  *               Shlomi Gridish <gridish@freescale.com>
  *               Jerry Huang <Chang-Ming.Huang@freescale.com>
  * Copyright (c) Logic Product Development, Inc. 2007
@@ -810,9 +810,11 @@ void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb)
                ed->dev_addr = usb_pipedevice(urb->pipe);
                ed->max_pkt_size = usb_maxpacket(urb->dev, urb->pipe,
                        usb_pipeout(urb->pipe));
+               /* setup stage */
                td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++, FHCI_TA_SETUP,
                        USB_TD_TOGGLE_DATA0, urb->setup_packet, 8, 0, 0, true);
 
+               /* data stage */
                if (data_len > 0) {
                        td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
                                usb_pipeout(urb->pipe) ? FHCI_TA_OUT :
@@ -820,9 +822,18 @@ void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb)
                                USB_TD_TOGGLE_DATA1, data, data_len, 0, 0,
                                true);
                }
-               td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
-                       usb_pipeout(urb->pipe) ? FHCI_TA_IN : FHCI_TA_OUT,
-                       USB_TD_TOGGLE_DATA1, data, 0, 0, 0, true);
+
+               /* status stage */
+               if (data_len > 0)
+                       td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
+                               (usb_pipeout(urb->pipe) ? FHCI_TA_IN :
+                                                         FHCI_TA_OUT),
+                               USB_TD_TOGGLE_DATA1, data, 0, 0, 0, true);
+               else
+                        td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
+                               FHCI_TA_IN,
+                               USB_TD_TOGGLE_DATA1, data, 0, 0, 0, true);
+
                urb_state = US_CTRL_SETUP;
                break;
        case FHCI_TF_ISO:
index 55d3d5859ac5667da2332a3a782588e4541bbdc9..840beda66dd94aa378c506918ffad093e1ba4052 100644 (file)
@@ -1583,6 +1583,9 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
        int retval = 0;
 
        spin_lock_irqsave(&priv->lock, spinflags);
+       retval = usb_hcd_check_unlink_urb(hcd, urb, status);
+       if (retval)
+               goto out;
 
        qh = urb->ep->hcpriv;
        if (!qh) {
index f9cf3f04b7424bd2299dc52f1662cdd6443e050b..23107e230530a4b4aaeec1800f28824686b90e7f 100644 (file)
@@ -389,17 +389,14 @@ ohci_shutdown (struct usb_hcd *hcd)
        struct ohci_hcd *ohci;
 
        ohci = hcd_to_ohci (hcd);
-       ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
-       ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
+       ohci_writel(ohci, (u32) ~0, &ohci->regs->intrdisable);
 
-       /* If the SHUTDOWN quirk is set, don't put the controller in RESET */
-       ohci->hc_control &= (ohci->flags & OHCI_QUIRK_SHUTDOWN ?
-                       OHCI_CTRL_RWC | OHCI_CTRL_HCFS :
-                       OHCI_CTRL_RWC);
-       ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
+       /* Software reset, after which the controller goes into SUSPEND */
+       ohci_writel(ohci, OHCI_HCR, &ohci->regs->cmdstatus);
+       ohci_readl(ohci, &ohci->regs->cmdstatus);       /* flush the writes */
+       udelay(10);
 
-       /* flush the writes */
-       (void) ohci_readl (ohci, &ohci->regs->control);
+       ohci_writel(ohci, ohci->fminterval, &ohci->regs->fminterval);
 }
 
 static int check_ed(struct ohci_hcd *ohci, struct ed *ed)
index 9154615292db7a98ad5126c0fc5c9446b90bf119..2f00040fc408c78a4ef1e9bdab138344249d1fa5 100644 (file)
@@ -356,10 +356,7 @@ static void ohci_finish_controller_resume(struct usb_hcd *hcd)
                msleep(20);
        }
 
-       /* Does the root hub have a port wakeup pending? */
-       if (ohci_readl(ohci, &ohci->regs->intrstatus) &
-                       (OHCI_INTR_RD | OHCI_INTR_RHSC))
-               usb_hcd_resume_root_hub(hcd);
+       usb_hcd_resume_root_hub(hcd);
 }
 
 /* Carry out polling-, autostop-, and autoresume-related state changes */
index ad8166c681e2894424b81850aa3a366108d8d11c..bc01b064585ac9da1d232106586d71937b78db66 100644 (file)
@@ -175,28 +175,6 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
        return 0;
 }
 
-/* nVidia controllers continue to drive Reset signalling on the bus
- * even after system shutdown, wasting power.  This flag tells the
- * shutdown routine to leave the controller OPERATIONAL instead of RESET.
- */
-static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd)
-{
-       struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
-       struct ohci_hcd *ohci = hcd_to_ohci(hcd);
-
-       /* Evidently nVidia fixed their later hardware; this is a guess at
-        * the changeover point.
-        */
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB          0x026d
-
-       if (pdev->device < PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB) {
-               ohci->flags |= OHCI_QUIRK_SHUTDOWN;
-               ohci_dbg(ohci, "enabled nVidia shutdown quirk\n");
-       }
-
-       return 0;
-}
-
 static void sb800_prefetch(struct ohci_hcd *ohci, int on)
 {
        struct pci_dev *pdev;
@@ -260,10 +238,6 @@ static const struct pci_device_id ohci_pci_quirks[] = {
                PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
                .driver_data = (unsigned long)ohci_quirk_amd700,
        },
-       {
-               PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
-               .driver_data = (unsigned long) ohci_quirk_nvidia_shutdown,
-       },
 
        /* FIXME for some of the early AMD 760 southbridges, OHCI
         * won't work at all.  blacklist them.
index 35e5fd640ce75388c7f373544c62e965f34c0111..0795b934d00c9709800948c137a1aa84fd96e47d 100644 (file)
@@ -403,7 +403,6 @@ struct ohci_hcd {
 #define        OHCI_QUIRK_HUB_POWER    0x100                   /* distrust firmware power/oc setup */
 #define        OHCI_QUIRK_AMD_PLL      0x200                   /* AMD PLL quirk*/
 #define        OHCI_QUIRK_AMD_PREFETCH 0x400                   /* pre-fetch for ISO transfer */
-#define        OHCI_QUIRK_SHUTDOWN     0x800                   /* nVidia power bug */
        // there are also chip quirks/bugs in init logic
 
        struct work_struct      nec_work;       /* Worker for NEC quirk */
index 629a96813fd66a9ebde456a2232a572c13f13dba..0757b1934da53f3733781cc8cd9f4d7035fe2d0d 100644 (file)
@@ -36,6 +36,7 @@
 #define OHCI_INTRENABLE                0x10
 #define OHCI_INTRDISABLE       0x14
 #define OHCI_FMINTERVAL                0x34
+#define OHCI_HCFS              (3 << 6)        /* hc functional state */
 #define OHCI_HCR               (1 << 0)        /* host controller reset */
 #define OHCI_OCR               (1 << 3)        /* ownership change request */
 #define OHCI_CTRL_RWC          (1 << 9)        /* remote wakeup connected */
@@ -465,6 +466,8 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
 {
        void __iomem *base;
        u32 control;
+       u32 fminterval;
+       int cnt;
 
        if (!mmio_resource_enabled(pdev, 0))
                return;
@@ -497,41 +500,32 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
        }
 #endif
 
-       /* reset controller, preserving RWC (and possibly IR) */
-       writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
-       readl(base + OHCI_CONTROL);
+       /* disable interrupts */
+       writel((u32) ~0, base + OHCI_INTRDISABLE);
 
-       /* Some NVIDIA controllers stop working if kept in RESET for too long */
-       if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) {
-               u32 fminterval;
-               int cnt;
+       /* Reset the USB bus, if the controller isn't already in RESET */
+       if (control & OHCI_HCFS) {
+               /* Go into RESET, preserving RWC (and possibly IR) */
+               writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
+               readl(base + OHCI_CONTROL);
 
-               /* drive reset for at least 50 ms (7.1.7.5) */
+               /* drive bus reset for at least 50 ms (7.1.7.5) */
                msleep(50);
+       }
 
-               /* software reset of the controller, preserving HcFmInterval */
-               fminterval = readl(base + OHCI_FMINTERVAL);
-               writel(OHCI_HCR, base + OHCI_CMDSTATUS);
-
-               /* reset requires max 10 us delay */
-               for (cnt = 30; cnt > 0; --cnt) {        /* ... allow extra time */
-                       if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
-                               break;
-                       udelay(1);
-               }
-               writel(fminterval, base + OHCI_FMINTERVAL);
+       /* software reset of the controller, preserving HcFmInterval */
+       fminterval = readl(base + OHCI_FMINTERVAL);
+       writel(OHCI_HCR, base + OHCI_CMDSTATUS);
 
-               /* Now we're in the SUSPEND state with all devices reset
-                * and wakeups and interrupts disabled
-                */
+       /* reset requires max 10 us delay */
+       for (cnt = 30; cnt > 0; --cnt) {        /* ... allow extra time */
+               if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
+                       break;
+               udelay(1);
        }
+       writel(fminterval, base + OHCI_FMINTERVAL);
 
-       /*
-        * disable interrupts
-        */
-       writel(~(u32)0, base + OHCI_INTRDISABLE);
-       writel(~(u32)0, base + OHCI_INTRSTATUS);
-
+       /* Now the controller is safely in SUSPEND and nothing can wake it up */
        iounmap(base);
 }
 
@@ -626,7 +620,7 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
        void __iomem *base, *op_reg_base;
        u32     hcc_params, cap, val;
        u8      offset, cap_length;
-       int     wait_time, delta, count = 256/4;
+       int     wait_time, count = 256/4;
 
        if (!mmio_resource_enabled(pdev, 0))
                return;
@@ -672,11 +666,10 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
                writel(val, op_reg_base + EHCI_USBCMD);
 
                wait_time = 2000;
-               delta = 100;
                do {
                        writel(0x3f, op_reg_base + EHCI_USBSTS);
-                       udelay(delta);
-                       wait_time -= delta;
+                       udelay(100);
+                       wait_time -= 100;
                        val = readl(op_reg_base + EHCI_USBSTS);
                        if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
                                break;
@@ -718,12 +711,28 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done,
        return -ETIMEDOUT;
 }
 
-bool usb_is_intel_switchable_xhci(struct pci_dev *pdev)
+#define PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI    0x8C31
+
+bool usb_is_intel_ppt_switchable_xhci(struct pci_dev *pdev)
 {
        return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
                pdev->vendor == PCI_VENDOR_ID_INTEL &&
                pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI;
 }
+
+/* The Intel Lynx Point chipset also has switchable ports. */
+bool usb_is_intel_lpt_switchable_xhci(struct pci_dev *pdev)
+{
+       return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
+               pdev->vendor == PCI_VENDOR_ID_INTEL &&
+               pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI;
+}
+
+bool usb_is_intel_switchable_xhci(struct pci_dev *pdev)
+{
+       return usb_is_intel_ppt_switchable_xhci(pdev) ||
+               usb_is_intel_lpt_switchable_xhci(pdev);
+}
 EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci);
 
 /*
@@ -831,9 +840,13 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
                }
        }
 
-       /* Disable any BIOS SMIs */
-       writel(XHCI_LEGACY_DISABLE_SMI,
-                       base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+       val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+       /* Mask off (turn off) any enabled SMIs */
+       val &= XHCI_LEGACY_DISABLE_SMI;
+       /* Mask all SMI events bits, RW1C */
+       val |= XHCI_LEGACY_SMI_EVENTS;
+       /* Disable any BIOS SMIs and clear all SMI events*/
+       writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
 
        if (usb_is_intel_switchable_xhci(pdev))
                usb_enable_xhci_ports(pdev);
@@ -873,6 +886,22 @@ hc_init:
 
 static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
 {
+       /* Skip Netlogic mips SoC's internal PCI USB controller.
+        * This device does not need/support EHCI/OHCI handoff
+        */
+       if (pdev->vendor == 0x184e)     /* vendor Netlogic */
+               return;
+       if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
+                       pdev->class != PCI_CLASS_SERIAL_USB_OHCI &&
+                       pdev->class != PCI_CLASS_SERIAL_USB_EHCI &&
+                       pdev->class != PCI_CLASS_SERIAL_USB_XHCI)
+               return;
+
+       if (pci_enable_device(pdev) < 0) {
+               dev_warn(&pdev->dev, "Can't enable PCI device, "
+                               "BIOS handoff failed.\n");
+               return;
+       }
        if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
                quirk_usb_handoff_uhci(pdev);
        else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
@@ -881,5 +910,6 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
                quirk_usb_disable_ehci(pdev);
        else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
                quirk_usb_handoff_xhci(pdev);
+       pci_disable_device(pdev);
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
index 84ed28b34f934d86ecef647d73580309baf3f2b8..82539913ad847d1e007662a5b665daf0927b524c 100644 (file)
@@ -943,7 +943,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
        if (usb_pipein(urb->pipe))
                status |= TD_CTRL_SPD;
 
-       i = urb->num_sgs;
+       i = urb->num_mapped_sgs;
        if (len > 0 && i > 0) {
                sg = urb->sg;
                data = sg_dma_address(sg);
index d6e175428618d4b31f69bd3d323e7c5aef93deae..76083ae9213800cf48ecc1fa2208def7c1cf4542 100644 (file)
@@ -124,7 +124,7 @@ void qset_clear(struct whc *whc, struct whc_qset *qset)
 {
        qset->td_start = qset->td_end = qset->ntds = 0;
 
-       qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
+       qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
        qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
        qset->qh.err_count = 0;
        qset->qh.scratch[0] = 0;
@@ -443,7 +443,7 @@ static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *u
 
        remaining = urb->transfer_buffer_length;
 
-       for_each_sg(urb->sg, sg, urb->num_sgs, i) {
+       for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
                dma_addr_t dma_addr;
                size_t dma_remaining;
                dma_addr_t sp, ep;
@@ -561,7 +561,7 @@ static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
 
        remaining = urb->transfer_buffer_length;
 
-       for_each_sg(urb->sg, sg, urb->num_sgs, i) {
+       for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
                size_t len;
                size_t sg_remaining;
                void *orig;
index ce5c9e51748e7632dd3480428675fc4d73fce004..4206f6bef6fb4dc0ce3f2a827a57819ce2ebed6a 100644 (file)
@@ -62,8 +62,9 @@
 /* USB Legacy Support Control and Status Register  - section 7.1.2 */
 /* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
 #define XHCI_LEGACY_CONTROL_OFFSET     (0x04)
-/* bits 1:2, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
-#define        XHCI_LEGACY_DISABLE_SMI         ((0x3 << 1) + (0xff << 5) + (0x7 << 17))
+/* bits 1:3, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
+#define        XHCI_LEGACY_DISABLE_SMI         ((0x7 << 1) + (0xff << 5) + (0x7 << 17))
+#define XHCI_LEGACY_SMI_EVENTS         (0x7 << 29)
 
 /* command register values to disable interrupts and halt the HC */
 /* start/stop HC execution - do not write unless HC is halted*/
index 723f8231193d95cb92cad93adcc3c47e917b8933..7520ebb44548b63adc620614e44915e519c7de98 100644 (file)
@@ -75,7 +75,7 @@ static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
         */
        memset(port_removable, 0, sizeof(port_removable));
        for (i = 0; i < ports; i++) {
-               portsc = xhci_readl(xhci, xhci->usb3_ports[i]);
+               portsc = xhci_readl(xhci, xhci->usb2_ports[i]);
                /* If a device is removable, PORTSC reports a 0, same as in the
                 * hub descriptor DeviceRemovable bits.
                 */
@@ -392,6 +392,20 @@ static int xhci_get_ports(struct usb_hcd *hcd, __le32 __iomem ***port_array)
        return max_ports;
 }
 
+/* Test and clear port RWC bit */
+void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
+                               int port_id, u32 port_bit)
+{
+       u32 temp;
+
+       temp = xhci_readl(xhci, port_array[port_id]);
+       if (temp & port_bit) {
+               temp = xhci_port_state_to_neutral(temp);
+               temp |= port_bit;
+               xhci_writel(xhci, temp, port_array[port_id]);
+       }
+}
+
 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                u16 wIndex, char *buf, u16 wLength)
 {
@@ -938,12 +952,8 @@ int xhci_bus_resume(struct usb_hcd *hcd)
                        spin_lock_irqsave(&xhci->lock, flags);
 
                        /* Clear PLC */
-                       temp = xhci_readl(xhci, port_array[port_index]);
-                       if (temp & PORT_PLC) {
-                               temp = xhci_port_state_to_neutral(temp);
-                               temp |= PORT_PLC;
-                               xhci_writel(xhci, temp, port_array[port_index]);
-                       }
+                       xhci_test_and_clear_bit(xhci, port_array, port_index,
+                                               PORT_PLC);
 
                        slot_id = xhci_find_slot_id_by_port(hcd,
                                        xhci, port_index + 1);
index fcb7f7efc86db903544aa8601fb20e8e874f4b33..b455f4ca076a9a464a5f5c0b56b01c825bba2193 100644 (file)
@@ -81,7 +81,7 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
  * related flags, such as End TRB, Toggle Cycle, and no snoop.
  */
 static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
-               struct xhci_segment *next, bool link_trbs)
+               struct xhci_segment *next, bool link_trbs, bool isoc)
 {
        u32 val;
 
@@ -97,7 +97,9 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
                val &= ~TRB_TYPE_BITMASK;
                val |= TRB_TYPE(TRB_LINK);
                /* Always set the chain bit with 0.95 hardware */
-               if (xhci_link_trb_quirk(xhci))
+               /* Set chain bit for isoc rings on AMD 0.96 host */
+               if (xhci_link_trb_quirk(xhci) ||
+                               (isoc && (xhci->quirks & XHCI_AMD_0x96_HOST)))
                        val |= TRB_CHAIN;
                prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
        }
@@ -112,18 +114,20 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
        struct xhci_segment *seg;
        struct xhci_segment *first_seg;
 
-       if (!ring || !ring->first_seg)
+       if (!ring)
                return;
-       first_seg = ring->first_seg;
-       seg = first_seg->next;
-       xhci_dbg(xhci, "Freeing ring at %p\n", ring);
-       while (seg != first_seg) {
-               struct xhci_segment *next = seg->next;
-               xhci_segment_free(xhci, seg);
-               seg = next;
+       if (ring->first_seg) {
+               first_seg = ring->first_seg;
+               seg = first_seg->next;
+               xhci_dbg(xhci, "Freeing ring at %p\n", ring);
+               while (seg != first_seg) {
+                       struct xhci_segment *next = seg->next;
+                       xhci_segment_free(xhci, seg);
+                       seg = next;
+               }
+               xhci_segment_free(xhci, first_seg);
+               ring->first_seg = NULL;
        }
-       xhci_segment_free(xhci, first_seg);
-       ring->first_seg = NULL;
        kfree(ring);
 }
 
@@ -152,7 +156,7 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring)
  * See section 4.9.1 and figures 15 and 16.
  */
 static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
-               unsigned int num_segs, bool link_trbs, gfp_t flags)
+               unsigned int num_segs, bool link_trbs, bool isoc, gfp_t flags)
 {
        struct xhci_ring        *ring;
        struct xhci_segment     *prev;
@@ -178,12 +182,12 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
                next = xhci_segment_alloc(xhci, flags);
                if (!next)
                        goto fail;
-               xhci_link_segments(xhci, prev, next, link_trbs);
+               xhci_link_segments(xhci, prev, next, link_trbs, isoc);
 
                prev = next;
                num_segs--;
        }
-       xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
+       xhci_link_segments(xhci, prev, ring->first_seg, link_trbs, isoc);
 
        if (link_trbs) {
                /* See section 4.9.2.1 and 6.4.4.1 */
@@ -229,14 +233,14 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
  * pointers to the beginning of the ring.
  */
 static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
-               struct xhci_ring *ring)
+               struct xhci_ring *ring, bool isoc)
 {
        struct xhci_segment     *seg = ring->first_seg;
        do {
                memset(seg->trbs, 0,
                                sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
                /* All endpoint rings have link TRBs */
-               xhci_link_segments(xhci, seg, seg->next, 1);
+               xhci_link_segments(xhci, seg, seg->next, 1, isoc);
                seg = seg->next;
        } while (seg != ring->first_seg);
        xhci_initialize_ring_info(ring);
@@ -540,7 +544,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
         */
        for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
                stream_info->stream_rings[cur_stream] =
-                       xhci_ring_alloc(xhci, 1, true, mem_flags);
+                       xhci_ring_alloc(xhci, 1, true, false, mem_flags);
                cur_ring = stream_info->stream_rings[cur_stream];
                if (!cur_ring)
                        goto cleanup_rings;
@@ -765,7 +769,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
        }
 
        /* Allocate endpoint 0 ring */
-       dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags);
+       dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, false, flags);
        if (!dev->eps[0].ring)
                goto fail;
 
@@ -871,7 +875,6 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
        struct xhci_virt_device *dev;
        struct xhci_ep_ctx      *ep0_ctx;
        struct xhci_slot_ctx    *slot_ctx;
-       struct xhci_input_control_ctx *ctrl_ctx;
        u32                     port_num;
        struct usb_device *top_dev;
 
@@ -883,12 +886,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
                return -EINVAL;
        }
        ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
-       ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
        slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
 
-       /* 2) New slot context and endpoint 0 context are valid*/
-       ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
-
        /* 3) Only the control endpoint is valid - one endpoint context */
        slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | (u32) udev->route);
        switch (udev->speed) {
@@ -1003,26 +1002,42 @@ static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
 }
 
 /*
- * Convert bInterval expressed in frames (in 1-255 range) to exponent of
+ * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
  * microframes, rounded down to nearest power of 2.
  */
-static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
-               struct usb_host_endpoint *ep)
+static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
+               struct usb_host_endpoint *ep, unsigned int desc_interval,
+               unsigned int min_exponent, unsigned int max_exponent)
 {
        unsigned int interval;
 
-       interval = fls(8 * ep->desc.bInterval) - 1;
-       interval = clamp_val(interval, 3, 10);
-       if ((1 << interval) != 8 * ep->desc.bInterval)
+       interval = fls(desc_interval) - 1;
+       interval = clamp_val(interval, min_exponent, max_exponent);
+       if ((1 << interval) != desc_interval)
                dev_warn(&udev->dev,
                         "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
                         ep->desc.bEndpointAddress,
                         1 << interval,
-                        8 * ep->desc.bInterval);
+                        desc_interval);
 
        return interval;
 }
 
+static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
+               struct usb_host_endpoint *ep)
+{
+       return xhci_microframes_to_exponent(udev, ep,
+                       ep->desc.bInterval, 0, 15);
+}
+
+
+static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
+               struct usb_host_endpoint *ep)
+{
+       return xhci_microframes_to_exponent(udev, ep,
+                       ep->desc.bInterval * 8, 3, 10);
+}
+
 /* Return the polling or NAK interval.
  *
  * The polling interval is expressed in "microframes".  If xHCI's Interval field
@@ -1041,7 +1056,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
                /* Max NAK rate */
                if (usb_endpoint_xfer_control(&ep->desc) ||
                    usb_endpoint_xfer_bulk(&ep->desc)) {
-                       interval = ep->desc.bInterval;
+                       interval = xhci_parse_microframe_interval(udev, ep);
                        break;
                }
                /* Fall through - SS and HS isoc/int have same decoding */
@@ -1175,10 +1190,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
         */
        if (usb_endpoint_xfer_isoc(&ep->desc))
                virt_dev->eps[ep_index].new_ring =
-                       xhci_ring_alloc(xhci, 8, true, mem_flags);
+                       xhci_ring_alloc(xhci, 8, true, true, mem_flags);
        else
                virt_dev->eps[ep_index].new_ring =
-                       xhci_ring_alloc(xhci, 1, true, mem_flags);
+                       xhci_ring_alloc(xhci, 1, true, false, mem_flags);
        if (!virt_dev->eps[ep_index].new_ring) {
                /* Attempt to use the ring cache */
                if (virt_dev->num_rings_cached == 0)
@@ -1187,7 +1202,8 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
                        virt_dev->ring_cache[virt_dev->num_rings_cached];
                virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
                virt_dev->num_rings_cached--;
-               xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
+               xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
+                       usb_endpoint_xfer_isoc(&ep->desc) ? true : false);
        }
        virt_dev->eps[ep_index].skip = false;
        ep_ring = virt_dev->eps[ep_index].new_ring;
@@ -1493,11 +1509,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
        int i;
 
        /* Free the Event Ring Segment Table and the actual Event Ring */
-       if (xhci->ir_set) {
-               xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
-               xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
-               xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
-       }
        size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
        if (xhci->erst.entries)
                pci_free_consistent(pdev, size,
@@ -1509,7 +1520,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
        xhci->event_ring = NULL;
        xhci_dbg(xhci, "Freed event ring\n");
 
-       xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
+       xhci->cmd_ring_reserved_trbs = 0;
        if (xhci->cmd_ring)
                xhci_ring_free(xhci, xhci->cmd_ring);
        xhci->cmd_ring = NULL;
@@ -1538,7 +1549,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
        xhci->medium_streams_pool = NULL;
        xhci_dbg(xhci, "Freed medium stream array pool\n");
 
-       xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
        if (xhci->dcbaa)
                pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
                                xhci->dcbaa, xhci->dcbaa->dma);
@@ -2001,7 +2011,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
                goto fail;
 
        /* Set up the command ring to have one segments for now. */
-       xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
+       xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
        if (!xhci->cmd_ring)
                goto fail;
        xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
@@ -2032,7 +2042,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
         * the event ring segment table (ERST).  Section 4.9.3.
         */
        xhci_dbg(xhci, "// Allocating event ring\n");
-       xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
+       xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, false,
+                                               flags);
        if (!xhci->event_ring)
                goto fail;
        if (xhci_check_trb_in_td_math(xhci, flags) < 0)
@@ -2106,6 +2117,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
 
 fail:
        xhci_warn(xhci, "Couldn't initialize memory\n");
+       xhci_halt(xhci);
+       xhci_reset(xhci);
        xhci_mem_cleanup(xhci);
        return -ENOMEM;
 }
index cb16de213f6491bb062c41e7cd91277f9fc0b605..39e230f2dd1b5b08aa5f8ceab14309b76359bd79 100644 (file)
@@ -123,11 +123,15 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
                xhci_dbg(xhci, "QUIRK: Fresco Logic revision %u "
                                "has broken MSI implementation\n",
                                pdev->revision);
+               xhci->quirks |= XHCI_TRUST_TX_LENGTH;
        }
 
        if (pdev->vendor == PCI_VENDOR_ID_NEC)
                xhci->quirks |= XHCI_NEC_HOST;
 
+       if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96)
+               xhci->quirks |= XHCI_AMD_0x96_HOST;
+
        /* AMD PLL quirk */
        if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
                xhci->quirks |= XHCI_AMD_PLL_FIX;
@@ -142,6 +146,8 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
                xhci->quirks |= XHCI_RESET_ON_RESUME;
                xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
        }
+       if (pdev->vendor == PCI_VENDOR_ID_VIA)
+               xhci->quirks |= XHCI_RESET_ON_RESUME;
 
        /* Make sure the HC is halted. */
        retval = xhci_halt(xhci);
index d0871ea687dc16840169533098b3c08e70807237..de3c1513fdb246915274c69d3b3ac0a14d5e6e37 100644 (file)
@@ -187,7 +187,7 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
  *                     prepare_transfer()?
  */
 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
-               bool consumer, bool more_trbs_coming)
+               bool consumer, bool more_trbs_coming, bool isoc)
 {
        u32 chain;
        union xhci_trb *next;
@@ -214,11 +214,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
                                if (!chain && !more_trbs_coming)
                                        break;
 
-                               /* If we're not dealing with 0.95 hardware,
+                               /* If we're not dealing with 0.95 hardware or
+                                * isoc rings on AMD 0.96 host,
                                 * carry over the chain bit of the previous TRB
                                 * (which may mean the chain bit is cleared).
                                 */
-                               if (!xhci_link_trb_quirk(xhci)) {
+                               if (!(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST))
+                                               && !xhci_link_trb_quirk(xhci)) {
                                        next->link.control &=
                                                cpu_to_le32(~TRB_CHAIN);
                                        next->link.control |=
@@ -817,23 +819,24 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
        struct xhci_ring *ring;
        struct xhci_td *cur_td;
        int ret, i, j;
+       unsigned long flags;
 
        ep = (struct xhci_virt_ep *) arg;
        xhci = ep->xhci;
 
-       spin_lock(&xhci->lock);
+       spin_lock_irqsave(&xhci->lock, flags);
 
        ep->stop_cmds_pending--;
        if (xhci->xhc_state & XHCI_STATE_DYING) {
                xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
                                "xHCI as DYING, exiting.\n");
-               spin_unlock(&xhci->lock);
+               spin_unlock_irqrestore(&xhci->lock, flags);
                return;
        }
        if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
                xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
                                "exiting.\n");
-               spin_unlock(&xhci->lock);
+               spin_unlock_irqrestore(&xhci->lock, flags);
                return;
        }
 
@@ -845,11 +848,11 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
        xhci->xhc_state |= XHCI_STATE_DYING;
        /* Disable interrupts from the host controller and start halting it */
        xhci_quiesce(xhci);
-       spin_unlock(&xhci->lock);
+       spin_unlock_irqrestore(&xhci->lock, flags);
 
        ret = xhci_halt(xhci);
 
-       spin_lock(&xhci->lock);
+       spin_lock_irqsave(&xhci->lock, flags);
        if (ret < 0) {
                /* This is bad; the host is not responding to commands and it's
                 * not allowing itself to be halted.  At least interrupts are
@@ -897,7 +900,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
                        }
                }
        }
-       spin_unlock(&xhci->lock);
+       spin_unlock_irqrestore(&xhci->lock, flags);
        xhci_dbg(xhci, "Calling usb_hc_died()\n");
        usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
        xhci_dbg(xhci, "xHCI host controller is dead.\n");
@@ -1215,6 +1218,7 @@ static void handle_vendor_event(struct xhci_hcd *xhci,
  *
  * Returns a zero-based port number, which is suitable for indexing into each of
  * the split roothubs' port arrays and bus state arrays.
+ * Add one to it in order to call xhci_find_slot_id_by_port.
  */
 static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
                struct xhci_hcd *xhci, u32 port_id)
@@ -1337,7 +1341,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
                        temp |= PORT_LINK_STROBE | XDEV_U0;
                        xhci_writel(xhci, temp, port_array[faked_port_index]);
                        slot_id = xhci_find_slot_id_by_port(hcd, xhci,
-                                       faked_port_index);
+                                       faked_port_index + 1);
                        if (!slot_id) {
                                xhci_dbg(xhci, "slot_id is zero\n");
                                goto cleanup;
@@ -1345,10 +1349,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
                        xhci_ring_device(xhci, slot_id);
                        xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
                        /* Clear PORT_PLC */
-                       temp = xhci_readl(xhci, port_array[faked_port_index]);
-                       temp = xhci_port_state_to_neutral(temp);
-                       temp |= PORT_PLC;
-                       xhci_writel(xhci, temp, port_array[faked_port_index]);
+                       xhci_test_and_clear_bit(xhci, port_array,
+                                               faked_port_index, PORT_PLC);
                } else {
                        xhci_dbg(xhci, "resume HS port %d\n", port_id);
                        bus_state->resume_done[faked_port_index] = jiffies +
@@ -1359,6 +1361,10 @@ static void handle_port_status(struct xhci_hcd *xhci,
                }
        }
 
+       if (hcd->speed != HCD_USB3)
+               xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
+                                       PORT_PLC);
+
 cleanup:
        /* Update event ring dequeue pointer before dropping the lock */
        inc_deq(xhci, xhci->event_ring, true);
@@ -1733,8 +1739,12 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
        /* handle completion code */
        switch (trb_comp_code) {
        case COMP_SUCCESS:
-               frame->status = 0;
-               break;
+               if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
+                       frame->status = 0;
+                       break;
+               }
+               if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
+                       trb_comp_code = COMP_SHORT_TX;
        case COMP_SHORT_TX:
                frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
                                -EREMOTEIO : 0;
@@ -1750,6 +1760,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
                break;
        case COMP_DEV_ERR:
        case COMP_STALL:
+       case COMP_TX_ERR:
                frame->status = -EPROTO;
                skip_td = true;
                break;
@@ -1832,13 +1843,16 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
        switch (trb_comp_code) {
        case COMP_SUCCESS:
                /* Double check that the HW transferred everything. */
-               if (event_trb != td->last_trb) {
+               if (event_trb != td->last_trb ||
+                               TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
                        xhci_warn(xhci, "WARN Successful completion "
                                        "on short TX\n");
                        if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
                                *status = -EREMOTEIO;
                        else
                                *status = 0;
+                       if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
+                               trb_comp_code = COMP_SHORT_TX;
                } else {
                        *status = 0;
                }
@@ -1940,8 +1954,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
        int status = -EINPROGRESS;
        struct urb_priv *urb_priv;
        struct xhci_ep_ctx *ep_ctx;
+       struct list_head *tmp;
        u32 trb_comp_code;
        int ret = 0;
+       int td_num = 0;
 
        slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
        xdev = xhci->devs[slot_id];
@@ -1963,6 +1979,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                return -ENODEV;
        }
 
+       /* Count current td numbers if ep->skip is set */
+       if (ep->skip) {
+               list_for_each(tmp, &ep_ring->td_list)
+                       td_num++;
+       }
+
        event_dma = le64_to_cpu(event->buffer);
        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
        /* Look for common error cases */
@@ -1971,6 +1993,13 @@ static int handle_tx_event(struct xhci_hcd *xhci,
         * transfer type
         */
        case COMP_SUCCESS:
+               if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
+                       break;
+               if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
+                       trb_comp_code = COMP_SHORT_TX;
+               else
+                       xhci_warn(xhci, "WARN Successful completion on short TX: "
+                                       "needs XHCI_TRUST_TX_LENGTH quirk?\n");
        case COMP_SHORT_TX:
                break;
        case COMP_STOP:
@@ -2074,7 +2103,18 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                        goto cleanup;
                }
 
+               /* We've skipped all the TDs on the ep ring when ep->skip set */
+               if (ep->skip && td_num == 0) {
+                       ep->skip = false;
+                       xhci_dbg(xhci, "All tds on the ep_ring skipped. "
+                                               "Clear skip flag.\n");
+                       ret = 0;
+                       goto cleanup;
+               }
+
                td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
+               if (ep->skip)
+                       td_num--;
 
                /* Is this a TRB in the currently executing TD? */
                event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
@@ -2327,7 +2367,7 @@ hw_died:
                u32 irq_pending;
                /* Acknowledge the PCI interrupt */
                irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
-               irq_pending |= 0x3;
+               irq_pending |= IMAN_IP;
                xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
        }
 
@@ -2398,7 +2438,7 @@ irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
  *                     prepare_transfer()?
  */
 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
-               bool consumer, bool more_trbs_coming,
+               bool consumer, bool more_trbs_coming, bool isoc,
                u32 field1, u32 field2, u32 field3, u32 field4)
 {
        struct xhci_generic_trb *trb;
@@ -2408,7 +2448,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
        trb->field[1] = cpu_to_le32(field2);
        trb->field[2] = cpu_to_le32(field3);
        trb->field[3] = cpu_to_le32(field4);
-       inc_enq(xhci, ring, consumer, more_trbs_coming);
+       inc_enq(xhci, ring, consumer, more_trbs_coming, isoc);
 }
 
 /*
@@ -2416,7 +2456,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
  * FIXME allocate segments if the ring is full.
  */
 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
-               u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
+               u32 ep_state, unsigned int num_trbs, bool isoc, gfp_t mem_flags)
 {
        /* Make sure the endpoint has been added to xHC schedule */
        switch (ep_state) {
@@ -2458,10 +2498,11 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
                next = ring->enqueue;
 
                while (last_trb(xhci, ring, ring->enq_seg, next)) {
-                       /* If we're not dealing with 0.95 hardware,
-                        * clear the chain bit.
+                       /* If we're not dealing with 0.95 hardware or isoc rings
+                        * on AMD 0.96 host, clear the chain bit.
                         */
-                       if (!xhci_link_trb_quirk(xhci))
+                       if (!xhci_link_trb_quirk(xhci) && !(isoc &&
+                                       (xhci->quirks & XHCI_AMD_0x96_HOST)))
                                next->link.control &= cpu_to_le32(~TRB_CHAIN);
                        else
                                next->link.control |= cpu_to_le32(TRB_CHAIN);
@@ -2494,6 +2535,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
                unsigned int num_trbs,
                struct urb *urb,
                unsigned int td_index,
+               bool isoc,
                gfp_t mem_flags)
 {
        int ret;
@@ -2511,7 +2553,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
 
        ret = prepare_ring(xhci, ep_ring,
                           le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
-                          num_trbs, mem_flags);
+                          num_trbs, isoc, mem_flags);
        if (ret)
                return ret;
 
@@ -2544,7 +2586,7 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
        struct scatterlist *sg;
 
        sg = NULL;
-       num_sgs = urb->num_sgs;
+       num_sgs = urb->num_mapped_sgs;
        temp = urb->transfer_buffer_length;
 
        xhci_dbg(xhci, "count sg list trbs: \n");
@@ -2728,13 +2770,13 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                return -EINVAL;
 
        num_trbs = count_sg_trbs_needed(xhci, urb);
-       num_sgs = urb->num_sgs;
+       num_sgs = urb->num_mapped_sgs;
        total_packet_count = roundup(urb->transfer_buffer_length,
                        le16_to_cpu(urb->ep->desc.wMaxPacketSize));
 
        trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
                        ep_index, urb->stream_id,
-                       num_trbs, urb, 0, mem_flags);
+                       num_trbs, urb, 0, false, mem_flags);
        if (trb_buff_len < 0)
                return trb_buff_len;
 
@@ -2829,7 +2871,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                        more_trbs_coming = true;
                else
                        more_trbs_coming = false;
-               queue_trb(xhci, ep_ring, false, more_trbs_coming,
+               queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
                                lower_32_bits(addr),
                                upper_32_bits(addr),
                                length_field,
@@ -2920,7 +2962,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 
        ret = prepare_transfer(xhci, xhci->devs[slot_id],
                        ep_index, urb->stream_id,
-                       num_trbs, urb, 0, mem_flags);
+                       num_trbs, urb, 0, false, mem_flags);
        if (ret < 0)
                return ret;
 
@@ -2992,7 +3034,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                        more_trbs_coming = true;
                else
                        more_trbs_coming = false;
-               queue_trb(xhci, ep_ring, false, more_trbs_coming,
+               queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
                                lower_32_bits(addr),
                                upper_32_bits(addr),
                                length_field,
@@ -3052,7 +3094,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                num_trbs++;
        ret = prepare_transfer(xhci, xhci->devs[slot_id],
                        ep_index, urb->stream_id,
-                       num_trbs, urb, 0, mem_flags);
+                       num_trbs, urb, 0, false, mem_flags);
        if (ret < 0)
                return ret;
 
@@ -3085,7 +3127,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                }
        }
 
-       queue_trb(xhci, ep_ring, false, true,
+       queue_trb(xhci, ep_ring, false, true, false,
                  setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
                  le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
                  TRB_LEN(8) | TRB_INTR_TARGET(0),
@@ -3105,7 +3147,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        if (urb->transfer_buffer_length > 0) {
                if (setup->bRequestType & USB_DIR_IN)
                        field |= TRB_DIR_IN;
-               queue_trb(xhci, ep_ring, false, true,
+               queue_trb(xhci, ep_ring, false, true, false,
                                lower_32_bits(urb->transfer_dma),
                                upper_32_bits(urb->transfer_dma),
                                length_field,
@@ -3121,7 +3163,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                field = 0;
        else
                field = TRB_DIR_IN;
-       queue_trb(xhci, ep_ring, false, false,
+       queue_trb(xhci, ep_ring, false, false, false,
                        0,
                        0,
                        TRB_INTR_TARGET(0),
@@ -3270,7 +3312,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
 
                ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
-                               urb->stream_id, trbs_per_td, urb, i, mem_flags);
+                               urb->stream_id, trbs_per_td, urb, i, true,
+                               mem_flags);
                if (ret < 0) {
                        if (i == 0)
                                return ret;
@@ -3340,7 +3383,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                                remainder |
                                TRB_INTR_TARGET(0);
 
-                       queue_trb(xhci, ep_ring, false, more_trbs_coming,
+                       queue_trb(xhci, ep_ring, false, more_trbs_coming, true,
                                lower_32_bits(addr),
                                upper_32_bits(addr),
                                length_field,
@@ -3354,7 +3397,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                /* Check TD length */
                if (running_total != td_len) {
                        xhci_err(xhci, "ISOC TD length unmatch\n");
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto cleanup;
                }
        }
 
@@ -3422,7 +3466,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
         * Do not insert any td of the urb to the ring if the check failed.
         */
        ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
-                          num_trbs, mem_flags);
+                          num_trbs, true, mem_flags);
        if (ret)
                return ret;
 
@@ -3481,7 +3525,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
                reserved_trbs++;
 
        ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
-                       reserved_trbs, GFP_ATOMIC);
+                       reserved_trbs, false, GFP_ATOMIC);
        if (ret < 0) {
                xhci_err(xhci, "ERR: No room for command on command ring\n");
                if (command_must_succeed)
@@ -3489,8 +3533,8 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
                                        "unfailable commands failed.\n");
                return ret;
        }
-       queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
-                       field4 | xhci->cmd_ring->cycle_state);
+       queue_trb(xhci, xhci->cmd_ring, false, false, false, field1, field2,
+                       field3, field4 | xhci->cmd_ring->cycle_state);
        return 0;
 }
 
index 7ea48b342aab80186433003039bb159207452412..16ec4d3890b353f7d90f4bfc1d7c7e4bd6a16345 100644 (file)
@@ -444,6 +444,11 @@ int xhci_run(struct usb_hcd *hcd)
 
        if (ret) {
 legacy_irq:
+               if (!pdev->irq) {
+                       xhci_err(xhci, "No msi-x/msi found and "
+                                       "no IRQ in BIOS\n");
+                       return -EINVAL;
+               }
                /* fall back to legacy interrupt*/
                ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
                                        hcd->irq_descr, hcd);
@@ -605,11 +610,11 @@ static void xhci_save_registers(struct xhci_hcd *xhci)
        xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
        xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
        xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
-       xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
-       xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
        xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
        xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
        xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+       xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+       xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
 }
 
 static void xhci_restore_registers(struct xhci_hcd *xhci)
@@ -618,10 +623,11 @@ static void xhci_restore_registers(struct xhci_hcd *xhci)
        xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
        xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
        xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
-       xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
-       xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
        xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
        xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
+       xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
+       xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
+       xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
 }
 
 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
@@ -657,7 +663,10 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
        ring = xhci->cmd_ring;
        seg = ring->deq_seg;
        do {
-               memset(seg->trbs, 0, SEGMENT_SIZE);
+               memset(seg->trbs, 0,
+                       sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+               seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
+                       cpu_to_le32(~TRB_CYCLE);
                seg = seg->next;
        } while (seg != ring->deq_seg);
 
@@ -721,8 +730,8 @@ int xhci_suspend(struct xhci_hcd *xhci)
        command = xhci_readl(xhci, &xhci->op_regs->command);
        command |= CMD_CSS;
        xhci_writel(xhci, command, &xhci->op_regs->command);
-       if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
-               xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
+       if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) {
+               xhci_warn(xhci, "WARN: xHC save state timeout\n");
                spin_unlock_irq(&xhci->lock);
                return -ETIMEDOUT;
        }
@@ -749,7 +758,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
        u32                     command, temp = 0;
        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
        struct usb_hcd          *secondary_hcd;
-       int                     retval;
+       int                     retval = 0;
 
        /* Wait a bit if either of the roothubs need to settle from the
         * transition into bus suspend.
@@ -759,6 +768,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                                xhci->bus_state[1].next_statechange))
                msleep(100);
 
+       set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+       set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
+
        spin_lock_irq(&xhci->lock);
        if (xhci->quirks & XHCI_RESET_ON_RESUME)
                hibernated = true;
@@ -774,8 +786,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                command |= CMD_CRS;
                xhci_writel(xhci, command, &xhci->op_regs->command);
                if (handshake(xhci, &xhci->op_regs->status,
-                             STS_RESTORE, 0, 10*100)) {
-                       xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
+                             STS_RESTORE, 0, 10 * 1000)) {
+                       xhci_warn(xhci, "WARN: xHC restore state timeout\n");
                        spin_unlock_irq(&xhci->lock);
                        return -ETIMEDOUT;
                }
@@ -828,20 +840,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                        return retval;
                xhci_dbg(xhci, "Start the primary HCD\n");
                retval = xhci_run(hcd->primary_hcd);
-               if (retval)
-                       goto failed_restart;
-
-               xhci_dbg(xhci, "Start the secondary HCD\n");
-               retval = xhci_run(secondary_hcd);
                if (!retval) {
-                       set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-                       set_bit(HCD_FLAG_HW_ACCESSIBLE,
-                                       &xhci->shared_hcd->flags);
+                       xhci_dbg(xhci, "Start the secondary HCD\n");
+                       retval = xhci_run(secondary_hcd);
                }
-failed_restart:
                hcd->state = HC_STATE_SUSPENDED;
                xhci->shared_hcd->state = HC_STATE_SUSPENDED;
-               return retval;
+               goto done;
        }
 
        /* step 4: set Run/Stop bit */
@@ -860,11 +865,14 @@ failed_restart:
         * Running endpoints by ringing their doorbells
         */
 
-       set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-       set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
-
        spin_unlock_irq(&xhci->lock);
-       return 0;
+
+ done:
+       if (retval == 0) {
+               usb_hcd_resume_root_hub(hcd);
+               usb_hcd_resume_root_hub(xhci->shared_hcd);
+       }
+       return retval;
 }
 #endif /* CONFIG_PM */
 
@@ -1566,6 +1574,7 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
                /* FIXME: can we allocate more resources for the HC? */
                break;
        case COMP_BW_ERR:
+       case COMP_2ND_BW_ERR:
                dev_warn(&udev->dev, "Not enough bandwidth "
                                "for new device state.\n");
                ret = -ENOSPC;
@@ -1889,6 +1898,12 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
        ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
        ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
        ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
+
+       /* Don't issue the command if there's no endpoints to update. */
+       if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
+                       ctrl_ctx->drop_flags == 0)
+               return 0;
+
        xhci_dbg(xhci, "New Input Control Context:\n");
        slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
        xhci_dbg_ctx(xhci, virt_dev->in_ctx,
@@ -2175,8 +2190,7 @@ static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
                if (ret < 0)
                        return ret;
 
-               max_streams = USB_SS_MAX_STREAMS(
-                               eps[i]->ss_ep_comp.bmAttributes);
+               max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
                if (max_streams < (*num_streams - 1)) {
                        xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
                                        eps[i]->desc.bEndpointAddress,
@@ -2869,6 +2883,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
        /* Otherwise, update the control endpoint ring enqueue pointer. */
        else
                xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
+       ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+       ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
+       ctrl_ctx->drop_flags = 0;
+
        xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
        xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
 
@@ -2950,7 +2968,6 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
        virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
                + 1;
        /* Zero the input context control for later use */
-       ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
        ctrl_ctx->add_flags = 0;
        ctrl_ctx->drop_flags = 0;
 
index d8bbf5ccb10d97872fb79aeb4b8aa00f6dad2cea..25c3e26b999c76cee5cd3c88166602e2fa4dfc1e 100644 (file)
@@ -205,6 +205,10 @@ struct xhci_op_regs {
 #define CMD_PM_INDEX   (1 << 11)
 /* bits 12:31 are reserved (and should be preserved on writes). */
 
+/* IMAN - Interrupt Management Register */
+#define IMAN_IP                (1 << 1)
+#define IMAN_IE                (1 << 0)
+
 /* USBSTS - USB status - status bitmasks */
 /* HC not running - set to 1 when run/stop bit is cleared. */
 #define STS_HALT       XHCI_STS_HALT
@@ -900,7 +904,6 @@ struct xhci_transfer_event {
 /* Invalid Stream ID Error */
 #define COMP_STRID_ERR 34
 /* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */
-/* FIXME - check for this */
 #define COMP_2ND_BW_ERR        35
 /* Split Transaction Error */
 #define        COMP_SPLIT_ERR  36
@@ -1311,6 +1314,8 @@ struct xhci_hcd {
 #define XHCI_EP_LIMIT_QUIRK    (1 << 5)
 #define XHCI_BROKEN_MSI                (1 << 6)
 #define XHCI_RESET_ON_RESUME   (1 << 7)
+#define XHCI_AMD_0x96_HOST     (1 << 9)
+#define XHCI_TRUST_TX_LENGTH   (1 << 10)
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
        /* There are two roothubs to keep track of bus suspend info for */
@@ -1565,6 +1570,8 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
                unsigned int ep_index, unsigned int stream_id);
 
 /* xHCI roothub code */
+void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
+                               int port_id, u32 port_bit);
 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
                char *buf, u16 wLength);
 int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
index fe1d44319d0a1eb2d373c14c6fccbfdfedd505c9..8f725f651915437bd46562067e9169d7e8873dd9 100644 (file)
@@ -55,8 +55,9 @@ static int isight_firmware_load(struct usb_interface *intf,
 
        ptr = firmware->data;
 
+       buf[0] = 0x01;
        if (usb_control_msg
-           (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, "\1", 1,
+           (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, buf, 1,
             300) != 1) {
                printk(KERN_ERR
                       "Failed to initialise isight firmware loader\n");
@@ -100,8 +101,9 @@ static int isight_firmware_load(struct usb_interface *intf,
                }
        }
 
+       buf[0] = 0x00;
        if (usb_control_msg
-           (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, "\0", 1,
+           (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, buf, 1,
             300) != 1) {
                printk(KERN_ERR "isight firmware loading completion failed\n");
                ret = -ENODEV;
index 417b8f207e8b126ac5b2ee33bab0dc5fdaa232b0..59689fa2f7c1fab7cf4a35eebd74c375a33d1c91 100644 (file)
@@ -24,7 +24,7 @@
 
 #define VENDOR_ID      0x0fc5
 #define PRODUCT_ID     0x1227
-#define MAXLEN         6
+#define MAXLEN         8
 
 /* table of devices that work with this driver */
 static const struct usb_device_id id_table[] = {
index bb10846affc30b39a1f7514acf5084174cb43f37..5707f56d8046a59017b95d44a559165abab95a96 100644 (file)
@@ -1023,7 +1023,10 @@ test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
                case 13:        /* short read, resembling case 10 */
                        req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
                        /* last data packet "should" be DATA1, not DATA0 */
-                       len = 1024 - udev->descriptor.bMaxPacketSize0;
+                       if (udev->speed == USB_SPEED_SUPER)
+                               len = 1024 - 512;
+                       else
+                               len = 1024 - udev->descriptor.bMaxPacketSize0;
                        expected = -EREMOTEIO;
                        break;
                case 14:        /* short read; try to fill the last packet */
@@ -1382,11 +1385,15 @@ static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
 
 static int halt_simple(struct usbtest_dev *dev)
 {
-       int             ep;
-       int             retval = 0;
-       struct urb      *urb;
+       int                     ep;
+       int                     retval = 0;
+       struct urb              *urb;
+       struct usb_device       *udev = testdev_to_usbdev(dev);
 
-       urb = simple_alloc_urb(testdev_to_usbdev(dev), 0, 512);
+       if (udev->speed == USB_SPEED_SUPER)
+               urb = simple_alloc_urb(udev, 0, 1024);
+       else
+               urb = simple_alloc_urb(udev, 0, 512);
        if (urb == NULL)
                return -ENOMEM;
 
index ac5bfd619e62ab4dad5d05772ece6fcc13744159..2504694455f3a27544a969d4dd85fe11a067e03e 100644 (file)
@@ -99,9 +99,7 @@ static void yurex_delete(struct kref *kref)
        usb_put_dev(dev->udev);
        if (dev->cntl_urb) {
                usb_kill_urb(dev->cntl_urb);
-               if (dev->cntl_req)
-                       usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
-                               dev->cntl_req, dev->cntl_urb->setup_dma);
+               kfree(dev->cntl_req);
                if (dev->cntl_buffer)
                        usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
                                dev->cntl_buffer, dev->cntl_urb->transfer_dma);
@@ -234,9 +232,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
        }
 
        /* allocate buffer for control req */
-       dev->cntl_req = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE,
-                                          GFP_KERNEL,
-                                          &dev->cntl_urb->setup_dma);
+       dev->cntl_req = kmalloc(YUREX_BUF_SIZE, GFP_KERNEL);
        if (!dev->cntl_req) {
                err("Could not allocate cntl_req");
                goto error;
@@ -286,7 +282,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
                         usb_rcvintpipe(dev->udev, dev->int_in_endpointAddr),
                         dev->int_buffer, YUREX_BUF_SIZE, yurex_interrupt,
                         dev, 1);
-       dev->cntl_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+       dev->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
        if (usb_submit_urb(dev->urb, GFP_KERNEL)) {
                retval = -EIO;
                err("Could not submitting URB");
index a09dbd243eb370972bab11e2868eb08b588a15dd..a04b2ff9dd83263083f51fceb703655721e081bb 100644 (file)
@@ -1101,7 +1101,7 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg
                nevents = mon_bin_queued(rp);
 
                sp = (struct mon_bin_stats __user *)arg;
-               if (put_user(rp->cnt_lost, &sp->dropped))
+               if (put_user(ndropped, &sp->dropped))
                        return -EFAULT;
                if (put_user(nevents, &sp->queued))
                        return -EFAULT;
index dce7182e1dfeabe59df62b9e7ae3d38e2cb806a3..a0232a77c05bae4a80920e0b0b8b2128accf7cbe 100644 (file)
@@ -2078,8 +2078,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        if (status < 0)
                goto fail3;
 
-       pm_runtime_put(musb->controller);
-
        status = musb_init_debugfs(musb);
        if (status < 0)
                goto fail4;
index 548338c2147c50fd0fb01f4e78e763a169799a2a..99ceaef23327fd13f9f2e9f334a90e4a98afc812 100644 (file)
@@ -576,6 +576,15 @@ void musb_g_tx(struct musb *musb, u8 epnum)
 
                if (request->actual == request->length) {
                        musb_g_giveback(musb_ep, request, 0);
+                       /*
+                        * In the giveback function the MUSB lock is
+                        * released and acquired after sometime. During
+                        * this time period the INDEX register could get
+                        * changed by the gadget_queue function especially
+                        * on SMP systems. Reselect the INDEX to be sure
+                        * we are reading/modifying the right registers
+                        */
+                       musb_ep_select(mbase, epnum);
                        req = musb_ep->desc ? next_request(musb_ep) : NULL;
                        if (!req) {
                                dev_dbg(musb->controller, "%s idle now\n",
@@ -968,6 +977,15 @@ void musb_g_rx(struct musb *musb, u8 epnum)
                }
 #endif
                musb_g_giveback(musb_ep, request, 0);
+               /*
+                * In the giveback function the MUSB lock is
+                * released and acquired after sometime. During
+                * this time period the INDEX register could get
+                * changed by the gadget_queue function especially
+                * on SMP systems. Reselect the INDEX to be sure
+                * we are reading/modifying the right registers
+                */
+               musb_ep_select(mbase, epnum);
 
                req = next_request(musb_ep);
                if (!req)
index c5d4c44d0ffa334246e801c131862bf318ba29cc..6958ab9b99bea17f314c6e48090a78fb3811cf6f 100644 (file)
@@ -295,7 +295,8 @@ static int musb_otg_notifications(struct notifier_block *nb,
 
 static int omap2430_musb_init(struct musb *musb)
 {
-       u32 l, status = 0;
+       u32 l;
+       int status = 0;
        struct device *dev = musb->controller;
        struct musb_hdrc_platform_data *plat = dev->platform_data;
        struct omap_musb_board_data *data = plat->board_data;
@@ -312,7 +313,7 @@ static int omap2430_musb_init(struct musb *musb)
 
        status = pm_runtime_get_sync(dev);
        if (status < 0) {
-               dev_err(dev, "pm_runtime_get_sync FAILED");
+               dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
                goto err1;
        }
 
@@ -464,14 +465,14 @@ static int __init omap2430_probe(struct platform_device *pdev)
                goto err2;
        }
 
+       pm_runtime_enable(&pdev->dev);
+
        ret = platform_device_add(musb);
        if (ret) {
                dev_err(&pdev->dev, "failed to register musb device\n");
                goto err2;
        }
 
-       pm_runtime_enable(&pdev->dev);
-
        return 0;
 
 err2:
index 5cdb9d912275193bd7f5a8929742c4199a45c5c4..18e875b92e001d1c290f2b95e498eea77c6b72cb 100644 (file)
@@ -42,7 +42,7 @@ static int debug;
  * Version information
  */
 
-#define DRIVER_VERSION "v0.6"
+#define DRIVER_VERSION "v0.7"
 #define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>"
 #define DRIVER_DESC "USB ARK3116 serial/IrDA driver"
 #define DRIVER_DEV_DESC "ARK3116 RS232/IrDA"
@@ -380,10 +380,6 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
                goto err_out;
        }
 
-       /* setup termios */
-       if (tty)
-               ark3116_set_termios(tty, port, NULL);
-
        /* remove any data still left: also clears error state */
        ark3116_read_reg(serial, UART_RX, buf);
 
@@ -406,6 +402,10 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
        /* enable DMA */
        ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT);
 
+       /* setup termios */
+       if (tty)
+               ark3116_set_termios(tty, port, NULL);
+
 err_out:
        kfree(buf);
        return result;
index fd67cc53545bcdfcb37772e6536f6f884bd711fc..aa0c43f1473aa3fe60cc1ad0bbc15f0d0c2584d2 100644 (file)
@@ -39,6 +39,8 @@ static void cp210x_get_termios(struct tty_struct *,
        struct usb_serial_port *port);
 static void cp210x_get_termios_port(struct usb_serial_port *port,
        unsigned int *cflagp, unsigned int *baudp);
+static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *,
+                                                       struct ktermios *);
 static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *,
                                                        struct ktermios*);
 static int cp210x_tiocmget(struct tty_struct *);
@@ -47,6 +49,7 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port,
                unsigned int, unsigned int);
 static void cp210x_break_ctl(struct tty_struct *, int);
 static int cp210x_startup(struct usb_serial *);
+static void cp210x_release(struct usb_serial *);
 static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
 
 static int debug;
@@ -79,6 +82,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
        { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
        { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
+       { USB_DEVICE(0x10C4, 0x80C4) }, /* Cygnal Integrated Products, Inc., Optris infrared thermometer */
        { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
        { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
        { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */
@@ -92,6 +96,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
        { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
        { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
+       { USB_DEVICE(0x10C4, 0x81A9) }, /* Multiplex RC Interface */
        { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
        { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
        { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
@@ -118,6 +123,8 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
        { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+       { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
+       { USB_DEVICE(0x10C4, 0xEA80) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
        { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
        { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
@@ -133,16 +140,23 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
        { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */
        { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
+       { USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */
+       { USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */
        { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
        { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
        { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
        { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+       { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
        { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
        { } /* Terminating Entry */
 };
 
 MODULE_DEVICE_TABLE(usb, id_table);
 
+struct cp210x_port_private {
+       __u8                    bInterfaceNumber;
+};
+
 static struct usb_driver cp210x_driver = {
        .name           = "cp210x",
        .probe          = usb_serial_probe,
@@ -168,6 +182,7 @@ static struct usb_serial_driver cp210x_device = {
        .tiocmget               = cp210x_tiocmget,
        .tiocmset               = cp210x_tiocmset,
        .attach                 = cp210x_startup,
+       .release                = cp210x_release,
        .dtr_rts                = cp210x_dtr_rts
 };
 
@@ -200,6 +215,8 @@ static struct usb_serial_driver cp210x_device = {
 #define CP210X_EMBED_EVENTS    0x15
 #define CP210X_GET_EVENTSTATE  0x16
 #define CP210X_SET_CHARS       0x19
+#define CP210X_GET_BAUDRATE    0x1D
+#define CP210X_SET_BAUDRATE    0x1E
 
 /* CP210X_IFC_ENABLE */
 #define UART_ENABLE            0x0001
@@ -253,6 +270,7 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
                unsigned int *data, int size)
 {
        struct usb_serial *serial = port->serial;
+       struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
        __le32 *buf;
        int result, i, length;
 
@@ -268,7 +286,8 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
        /* Issue the request, attempting to read 'size' bytes */
        result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
                                request, REQTYPE_DEVICE_TO_HOST, 0x0000,
-                               0, buf, size, 300);
+                               port_priv->bInterfaceNumber, buf, size,
+                               USB_CTRL_GET_TIMEOUT);
 
        /* Convert data into an array of integers */
        for (i = 0; i < length; i++)
@@ -296,6 +315,7 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
                unsigned int *data, int size)
 {
        struct usb_serial *serial = port->serial;
+       struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
        __le32 *buf;
        int result, i, length;
 
@@ -317,12 +337,14 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
                result = usb_control_msg(serial->dev,
                                usb_sndctrlpipe(serial->dev, 0),
                                request, REQTYPE_HOST_TO_DEVICE, 0x0000,
-                               0, buf, size, 300);
+                               port_priv->bInterfaceNumber, buf, size,
+                               USB_CTRL_SET_TIMEOUT);
        } else {
                result = usb_control_msg(serial->dev,
                                usb_sndctrlpipe(serial->dev, 0),
                                request, REQTYPE_HOST_TO_DEVICE, data[0],
-                               0, NULL, 0, 300);
+                               port_priv->bInterfaceNumber, NULL, 0,
+                               USB_CTRL_SET_TIMEOUT);
        }
 
        kfree(buf);
@@ -353,8 +375,8 @@ static inline int cp210x_set_config_single(struct usb_serial_port *port,
  * Quantises the baud rate as per AN205 Table 1
  */
 static unsigned int cp210x_quantise_baudrate(unsigned int baud) {
-       if      (baud <= 56)       baud = 0;
-       else if (baud <= 300)      baud = 300;
+       if (baud <= 300)
+               baud = 300;
        else if (baud <= 600)      baud = 600;
        else if (baud <= 1200)     baud = 1200;
        else if (baud <= 1800)     baud = 1800;
@@ -382,17 +404,15 @@ static unsigned int cp210x_quantise_baudrate(unsigned int baud) {
        else if (baud <= 491520)   baud = 460800;
        else if (baud <= 567138)   baud = 500000;
        else if (baud <= 670254)   baud = 576000;
-       else if (baud <= 1053257)  baud = 921600;
-       else if (baud <= 1474560)  baud = 1228800;
-       else if (baud <= 2457600)  baud = 1843200;
-       else                       baud = 3686400;
+       else if (baud < 1000000)
+               baud = 921600;
+       else if (baud > 2000000)
+               baud = 2000000;
        return baud;
 }
 
 static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port)
 {
-       int result;
-
        dbg("%s - port %d", __func__, port->number);
 
        if (cp210x_set_config_single(port, CP210X_IFC_ENABLE, UART_ENABLE)) {
@@ -401,13 +421,14 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port)
                return -EPROTO;
        }
 
-       result = usb_serial_generic_open(tty, port);
-       if (result)
-               return result;
-
        /* Configure the termios structure */
        cp210x_get_termios(tty, port);
-       return 0;
+
+       /* The baud rate must be initialised on cp2104 */
+       if (tty)
+               cp210x_change_speed(tty, port, NULL);
+
+       return usb_serial_generic_open(tty, port);
 }
 
 static void cp210x_close(struct usb_serial_port *port)
@@ -459,10 +480,7 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
 
        dbg("%s - port %d", __func__, port->number);
 
-       cp210x_get_config(port, CP210X_GET_BAUDDIV, &baud, 2);
-       /* Convert to baudrate */
-       if (baud)
-               baud = cp210x_quantise_baudrate((BAUD_RATE_GEN_FREQ + baud/2)/ baud);
+       cp210x_get_config(port, CP210X_GET_BAUDRATE, &baud, 4);
 
        dbg("%s - baud rate = %d", __func__, baud);
        *baudp = baud;
@@ -576,11 +594,64 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
        *cflagp = cflag;
 }
 
+/*
+ * CP2101 supports the following baud rates:
+ *
+ *     300, 600, 1200, 1800, 2400, 4800, 7200, 9600, 14400, 19200, 28800,
+ *     38400, 56000, 57600, 115200, 128000, 230400, 460800, 921600
+ *
+ * CP2102 and CP2103 support the following additional rates:
+ *
+ *     4000, 16000, 51200, 64000, 76800, 153600, 250000, 256000, 500000,
+ *     576000
+ *
+ * The device will map a requested rate to a supported one, but the result
+ * of requests for rates greater than 1053257 is undefined (see AN205).
+ *
+ * CP2104, CP2105 and CP2110 support most rates up to 2M, 921k and 1M baud,
+ * respectively, with an error less than 1%. The actual rates are determined
+ * by
+ *
+ *     div = round(freq / (2 x prescale x request))
+ *     actual = freq / (2 x prescale x div)
+ *
+ * For CP2104 and CP2105 freq is 48Mhz and prescale is 4 for request <= 365bps
+ * or 1 otherwise.
+ * For CP2110 freq is 24Mhz and prescale is 4 for request <= 300bps or 1
+ * otherwise.
+ */
+static void cp210x_change_speed(struct tty_struct *tty,
+               struct usb_serial_port *port, struct ktermios *old_termios)
+{
+       u32 baud;
+
+       baud = tty->termios->c_ospeed;
+
+       /* This maps the requested rate to a rate valid on cp2102 or cp2103,
+        * or to an arbitrary rate in [1M,2M].
+        *
+        * NOTE: B0 is not implemented.
+        */
+       baud = cp210x_quantise_baudrate(baud);
+
+       dbg("%s - setting baud rate to %u", __func__, baud);
+       if (cp210x_set_config(port, CP210X_SET_BAUDRATE, &baud,
+                                                       sizeof(baud))) {
+               dev_warn(&port->dev, "failed to set baud rate to %u\n", baud);
+               if (old_termios)
+                       baud = old_termios->c_ospeed;
+               else
+                       baud = 9600;
+       }
+
+       tty_encode_baud_rate(tty, baud, baud);
+}
+
 static void cp210x_set_termios(struct tty_struct *tty,
                struct usb_serial_port *port, struct ktermios *old_termios)
 {
        unsigned int cflag, old_cflag;
-       unsigned int baud = 0, bits;
+       unsigned int bits;
        unsigned int modem_ctl[4];
 
        dbg("%s - port %d", __func__, port->number);
@@ -591,20 +662,9 @@ static void cp210x_set_termios(struct tty_struct *tty,
        tty->termios->c_cflag &= ~CMSPAR;
        cflag = tty->termios->c_cflag;
        old_cflag = old_termios->c_cflag;
-       baud = cp210x_quantise_baudrate(tty_get_baud_rate(tty));
-
-       /* If the baud rate is to be updated*/
-       if (baud != tty_termios_baud_rate(old_termios) && baud != 0) {
-               dbg("%s - Setting baud rate to %d baud", __func__,
-                               baud);
-               if (cp210x_set_config_single(port, CP210X_SET_BAUDDIV,
-                                       ((BAUD_RATE_GEN_FREQ + baud/2) / baud))) {
-                       dbg("Baud rate requested not supported by device");
-                       baud = tty_termios_baud_rate(old_termios);
-               }
-       }
-       /* Report back the resulting baud rate */
-       tty_encode_baud_rate(tty, baud, baud);
+
+       if (tty->termios->c_ospeed != old_termios->c_ospeed)
+               cp210x_change_speed(tty, port, old_termios);
 
        /* If the number of data bits is to be updated */
        if ((cflag & CSIZE) != (old_cflag & CSIZE)) {
@@ -784,11 +844,39 @@ static void cp210x_break_ctl (struct tty_struct *tty, int break_state)
 
 static int cp210x_startup(struct usb_serial *serial)
 {
+       struct cp210x_port_private *port_priv;
+       int i;
+
        /* cp210x buffers behave strangely unless device is reset */
        usb_reset_device(serial->dev);
+
+       for (i = 0; i < serial->num_ports; i++) {
+               port_priv = kzalloc(sizeof(*port_priv), GFP_KERNEL);
+               if (!port_priv)
+                       return -ENOMEM;
+
+               memset(port_priv, 0x00, sizeof(*port_priv));
+               port_priv->bInterfaceNumber =
+                   serial->interface->cur_altsetting->desc.bInterfaceNumber;
+
+               usb_set_serial_port_data(serial->port[i], port_priv);
+       }
+
        return 0;
 }
 
+static void cp210x_release(struct usb_serial *serial)
+{
+       struct cp210x_port_private *port_priv;
+       int i;
+
+       for (i = 0; i < serial->num_ports; i++) {
+               port_priv = usb_get_serial_port_data(serial->port[i]);
+               kfree(port_priv);
+               usb_set_serial_port_data(serial->port[i], NULL);
+       }
+}
+
 static int __init cp210x_init(void)
 {
        int retval;
index f968a3debf1d9a5586b957d1f2d09cea21e126cb..86fc8fcf27816266262052bcda63942a6f68b04a 100644 (file)
@@ -156,6 +156,7 @@ static struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
  * /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
  */
 static struct usb_device_id id_table_combined [] = {
+       { USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
@@ -186,6 +187,7 @@ static struct usb_device_id id_table_combined [] = {
                .driver_info = (kernel_ulong_t)&ftdi_8u2232c_quirk },
        { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_232H_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_FTX_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) },
@@ -206,6 +208,8 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_XF_640_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_XF_642_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_DSS20_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_URBAN_0_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_URBAN_1_PID) },
        { USB_DEVICE(FTDI_NF_RIC_VID, FTDI_NF_RIC_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_VNHCPCUSB_D_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_MTXORB_0_PID) },
@@ -532,6 +536,10 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_6_PID) },
        { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_7_PID) },
        { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) },
+       { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_1_PID) },
+       { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_2_PID) },
+       { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_3_PID) },
+       { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_4_PID) },
        { USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) },
        { USB_DEVICE(OCT_VID, OCT_US101_PID) },
        { USB_DEVICE(OCT_VID, OCT_DK201_PID) },
@@ -727,11 +735,13 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
        { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
        { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
+       { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
        { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
        { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
@@ -744,6 +754,8 @@ static struct usb_device_id id_table_combined [] = {
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(FTDI_VID, LMI_LM3S_ICDI_BOARD_PID),
+               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
@@ -790,14 +802,18 @@ static struct usb_device_id id_table_combined [] = {
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID) },
        { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
        { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
        { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },
+       { USB_DEVICE(PI_VID, PI_E861_PID) },
        { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
        { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(FTDI_VID, TI_XDS100V2_PID),
+               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
        { USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) },
        { USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) },
@@ -829,11 +845,16 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_CINTERION_MC55I_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
        { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
                .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
+       { USB_DEVICE(FTDI_VID, FTDI_RF_R106) },
+       { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID),
+               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
        { },                                    /* Optional parameter entry */
        { }                                     /* Terminating entry */
 };
@@ -856,7 +877,8 @@ static const char *ftdi_chip_name[] = {
        [FT232RL] = "FT232RL",
        [FT2232H] = "FT2232H",
        [FT4232H] = "FT4232H",
-       [FT232H]  = "FT232H"
+       [FT232H]  = "FT232H",
+       [FTX]     = "FT-X"
 };
 
 
@@ -1154,7 +1176,8 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
                break;
        case FT232BM: /* FT232BM chip */
        case FT2232C: /* FT2232C chip */
-       case FT232RL:
+       case FT232RL: /* FT232RL chip */
+       case FTX:     /* FT-X series */
                if (baud <= 3000000) {
                        __u16 product_id = le16_to_cpu(
                                port->serial->dev->descriptor.idProduct);
@@ -1320,8 +1343,7 @@ static int set_serial_info(struct tty_struct *tty,
                goto check_and_exit;
        }
 
-       if ((new_serial.baud_base != priv->baud_base) &&
-           (new_serial.baud_base < 9600)) {
+       if (new_serial.baud_base != priv->baud_base) {
                mutex_unlock(&priv->cfg_lock);
                return -EINVAL;
        }
@@ -1441,10 +1463,14 @@ static void ftdi_determine_type(struct usb_serial_port *port)
        } else if (version < 0x900) {
                /* Assume it's an FT232RL */
                priv->chip_type = FT232RL;
-       } else {
+       } else if (version < 0x1000) {
                /* Assume it's an FT232H */
                priv->chip_type = FT232H;
+       } else {
+               /* Assume it's an FT-X series device */
+               priv->chip_type = FTX;
        }
+
        dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]);
 }
 
@@ -1572,7 +1598,8 @@ static int create_sysfs_attrs(struct usb_serial_port *port)
                     priv->chip_type == FT232RL ||
                     priv->chip_type == FT2232H ||
                     priv->chip_type == FT4232H ||
-                    priv->chip_type == FT232H)) {
+                    priv->chip_type == FT232H ||
+                    priv->chip_type == FTX)) {
                        retval = device_create_file(&port->dev,
                                                    &dev_attr_latency_timer);
                }
@@ -1594,7 +1621,8 @@ static void remove_sysfs_attrs(struct usb_serial_port *port)
                    priv->chip_type == FT232RL ||
                    priv->chip_type == FT2232H ||
                    priv->chip_type == FT4232H ||
-                    priv->chip_type == FT232H) {
+                   priv->chip_type == FT232H ||
+                   priv->chip_type == FTX) {
                        device_remove_file(&port->dev, &dev_attr_latency_timer);
                }
        }
@@ -1745,7 +1773,8 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
 
        dbg("%s", __func__);
 
-       if (strcmp(udev->manufacturer, "CALAO Systems") == 0)
+       if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) ||
+           (udev->product && !strcmp(udev->product, "BeagleBone/XDS100")))
                return ftdi_jtag_probe(serial);
 
        return 0;
@@ -1810,6 +1839,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
 
 static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
 {
+       struct ktermios dummy;
        struct usb_device *dev = port->serial->dev;
        struct ftdi_private *priv = usb_get_serial_port_data(port);
        int result;
@@ -1828,8 +1858,10 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
           This is same behaviour as serial.c/rs_open() - Kuba */
 
        /* ftdi_set_termios  will send usb control messages */
-       if (tty)
-               ftdi_set_termios(tty, port, tty->termios);
+       if (tty) {
+               memset(&dummy, 0, sizeof(dummy));
+               ftdi_set_termios(tty, port, &dummy);
+       }
 
        /* Start reading from the device */
        result = usb_serial_generic_open(tty, port);
@@ -2075,13 +2107,19 @@ static void ftdi_set_termios(struct tty_struct *tty,
 
        cflag = termios->c_cflag;
 
-       /* FIXME -For this cut I don't care if the line is really changing or
-          not  - so just do the change regardless  - should be able to
-          compare old_termios and tty->termios */
+       if (old_termios->c_cflag == termios->c_cflag
+           && old_termios->c_ispeed == termios->c_ispeed
+           && old_termios->c_ospeed == termios->c_ospeed)
+               goto no_c_cflag_changes;
+
        /* NOTE These routines can get interrupted by
           ftdi_sio_read_bulk_callback  - need to examine what this means -
           don't see any problems yet */
 
+       if ((old_termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)) ==
+           (termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)))
+               goto no_data_parity_stop_changes;
+
        /* Set number of data bits, parity, stop bits */
 
        urb_value = 0;
@@ -2122,6 +2160,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
        }
 
        /* Now do the baudrate */
+no_data_parity_stop_changes:
        if ((cflag & CBAUD) == B0) {
                /* Disable flow control */
                if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
@@ -2149,6 +2188,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
 
        /* Set flow control */
        /* Note device also supports DTR/CD (ugh) and Xon/Xoff in hardware */
+no_c_cflag_changes:
        if (cflag & CRTSCTS) {
                dbg("%s Setting to CRTSCTS flow control", __func__);
                if (usb_control_msg(dev,
@@ -2239,6 +2279,7 @@ static int ftdi_tiocmget(struct tty_struct *tty)
        case FT2232H:
        case FT4232H:
        case FT232H:
+       case FTX:
                len = 2;
                break;
        default:
index 19584faa86f92c35ce94fcdf345c5d591da58187..ed58c6fa8dbe1f2614e425524f8056e6315a8ede 100644 (file)
@@ -157,7 +157,8 @@ enum ftdi_chip_type {
        FT232RL = 5,
        FT2232H = 6,
        FT4232H = 7,
-       FT232H  = 8
+       FT232H  = 8,
+       FTX     = 9,
 };
 
 enum ftdi_sio_baudrate {
index 19156d1049fe22134e745158a6dc64da1a9d0a1b..d27d7d777ea361205ef58bfe3ec39a6ab96cc958 100644 (file)
 #define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
 #define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
 #define FTDI_232H_PID  0x6014 /* Single channel hi-speed device */
+#define FTDI_FTX_PID   0x6015 /* FT-X series (FT201X, FT230X, FT231X, etc) */
 #define FTDI_SIO_PID   0x8372  /* Product Id SIO application of 8U100AX */
 #define FTDI_232RL_PID  0xFBFA  /* Product ID for FT232RL */
 
 
 /*** third-party PIDs (using FTDI_VID) ***/
 
+#define FTDI_LUMEL_PD12_PID    0x6002
+
 /*
  * Marvell OpenRD Base, Client
  * http://www.open-rd.org
 /* www.candapter.com Ewert Energy Systems CANdapter device */
 #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
 
+/*
+ * Texas Instruments XDS100v2 JTAG / BeagleBone A3
+ * http://processors.wiki.ti.com/index.php/XDS100
+ * http://beagleboard.org/bone
+ */
+#define TI_XDS100V2_PID                0xa6d0
+
 #define FTDI_NXTCAM_PID                0xABB8 /* NXTCam for Mindstorms NXT */
 
 /* US Interface Navigator (http://www.usinterface.com/) */
@@ -54,6 +64,7 @@
 /* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */
 #define LMI_LM3S_DEVEL_BOARD_PID       0xbcd8
 #define LMI_LM3S_EVAL_BOARD_PID                0xbcd9
+#define LMI_LM3S_ICDI_BOARD_PID                0xbcda
 
 #define FTDI_TURTELIZER_PID    0xBDC8 /* JTAG/RS-232 adapter by egnite GmbH */
 
 #define FTDI_TACTRIX_OPENPORT_13S_PID  0xCC49  /* OpenPort 1.3 Subaru */
 #define FTDI_TACTRIX_OPENPORT_13U_PID  0xCC4A  /* OpenPort 1.3 Universal */
 
+#define FTDI_DISTORTEC_JTAG_LOCK_PICK_PID      0xCFF8
+
 /* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */
 /* the VID is the standard ftdi vid (FTDI_VID) */
 #define FTDI_SCS_DEVICE_0_PID 0xD010    /* SCS PTC-IIusb */
 
 /* Propox devices */
 #define FTDI_PROPOX_JTAGCABLEII_PID    0xD738
+#define FTDI_PROPOX_ISPCABLEIII_PID    0xD739
 
 /* Lenz LI-USB Computer Interface. */
 #define FTDI_LENZ_LIUSB_PID    0xD780
 #define PROTEGO_SPECIAL_4      0xFC73  /* special/unknown device */
 
 /*
- * DSS-20 Sync Station for Sony Ericsson P800
+ * Sony Ericsson product ids
  */
-#define FTDI_DSS20_PID          0xFC82
+#define FTDI_DSS20_PID         0xFC82  /* DSS-20 Sync Station for Sony Ericsson P800 */
+#define FTDI_URBAN_0_PID       0xFC8A  /* Sony Ericsson Urban, uart #0 */
+#define FTDI_URBAN_1_PID       0xFC8B  /* Sony Ericsson Urban, uart #1 */
 
 /* www.irtrans.de device */
 #define FTDI_IRTRANS_PID 0xFC60 /* Product Id */
 #define ADI_GNICE_PID          0xF000
 #define ADI_GNICEPLUS_PID      0xF001
 
+/*
+ * Microchip Technology, Inc.
+ *
+ * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are also used by:
+ * Hornby Elite - Digital Command Control Console
+ * http://www.hornby.com/hornby-dcc/controllers/
+ */
+#define MICROCHIP_VID          0x04D8
+#define MICROCHIP_USB_BOARD_PID        0x000A /* CDC RS-232 Emulation Demo */
+
 /*
  * RATOC REX-USB60F
  */
 #define SEALEVEL_2803_6_PID    0X2863  /* SeaLINK+8 (2803) Port 6 */
 #define SEALEVEL_2803_7_PID    0X2873  /* SeaLINK+8 (2803) Port 7 */
 #define SEALEVEL_2803_8_PID    0X2883  /* SeaLINK+8 (2803) Port 8 */
+#define SEALEVEL_2803R_1_PID   0Xa02a  /* SeaLINK+8 (2803-ROHS) Port 1+2 */
+#define SEALEVEL_2803R_2_PID   0Xa02b  /* SeaLINK+8 (2803-ROHS) Port 3+4 */
+#define SEALEVEL_2803R_3_PID   0Xa02c  /* SeaLINK+8 (2803-ROHS) Port 5+6 */
+#define SEALEVEL_2803R_4_PID   0Xa02d  /* SeaLINK+8 (2803-ROHS) Port 7+8 */
 
 /*
  * JETI SPECTROMETER SPECBOS 1201
 #define RTSYSTEMS_VID                  0x2100  /* Vendor ID */
 #define RTSYSTEMS_SERIAL_VX7_PID       0x9e52  /* Serial converter for VX-7 Radios using FT232RL */
 #define RTSYSTEMS_CT29B_PID            0x9e54  /* CT29B Radio Cable */
+#define RTSYSTEMS_RTS01_PID            0x9e57  /* USB-RTS01 Radio Cable */
+
+
+/*
+ * Physik Instrumente
+ * http://www.physikinstrumente.com/en/products/
+ */
+#define PI_VID              0x1a72  /* Vendor ID */
+#define PI_E861_PID         0x1008  /* E-861 piezo controller USB connection */
 
 /*
  * Bayer Ascensia Contour blood glucose meter USB-converter cable.
 /* USB-Nano-485*/
 #define FTDI_CTI_NANO_PID      0xF60B
 
+/*
+ * ZeitControl cardsystems GmbH rfid-readers http://zeitconrol.de
+ */
+/* TagTracer MIFARE*/
+#define FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID   0xF7C0
+
+/*
+ * Rainforest Automation
+ */
+/* ZigBee controller */
+#define FTDI_RF_R106           0x8A28
 
+/*
+ * Product: HCP HIT GPRS modem
+ * Manufacturer: HCP d.o.o.
+ * ATI command output: Cinterion MC55i
+ */
+#define FTDI_CINTERION_MC55I_PID       0xA951
index e4db5ad2bc55f1c37f2da2b15c086534e98248c7..9f0b2bff8ee48e70950794ad367a94f3566b82bc 100644 (file)
@@ -215,8 +215,10 @@ retry:
        clear_bit(i, &port->write_urbs_free);
        result = usb_submit_urb(urb, GFP_ATOMIC);
        if (result) {
-               dev_err(&port->dev, "%s - error submitting urb: %d\n",
+               if (!port->port.console) {
+                       dev_err(&port->dev, "%s - error submitting urb: %d\n",
                                                __func__, result);
+               }
                set_bit(i, &port->write_urbs_free);
                spin_lock_irqsave(&port->lock, flags);
                port->tx_bytes -= count;
index 0aac00afb5c8212403466a3ede7a49b78f63a2e7..8a90d58ee96d16f885d120190684d64640fe4971 100644 (file)
@@ -2677,15 +2677,7 @@ cleanup:
 
 static void edge_disconnect(struct usb_serial *serial)
 {
-       int i;
-       struct edgeport_port *edge_port;
-
        dbg("%s", __func__);
-
-       for (i = 0; i < serial->num_ports; ++i) {
-               edge_port = usb_get_serial_port_data(serial->port[i]);
-               edge_remove_sysfs_attrs(edge_port->port);
-       }
 }
 
 static void edge_release(struct usb_serial *serial)
@@ -2764,6 +2756,7 @@ static struct usb_serial_driver edgeport_1port_device = {
        .disconnect             = edge_disconnect,
        .release                = edge_release,
        .port_probe             = edge_create_sysfs_attrs,
+       .port_remove            = edge_remove_sysfs_attrs,
        .ioctl                  = edge_ioctl,
        .set_termios            = edge_set_termios,
        .tiocmget               = edge_tiocmget,
@@ -2795,6 +2788,7 @@ static struct usb_serial_driver edgeport_2port_device = {
        .disconnect             = edge_disconnect,
        .release                = edge_release,
        .port_probe             = edge_create_sysfs_attrs,
+       .port_remove            = edge_remove_sysfs_attrs,
        .ioctl                  = edge_ioctl,
        .set_termios            = edge_set_termios,
        .tiocmget               = edge_tiocmget,
index ba0d28727ccb1e6628c0492432558cb6314a61f7..42de17b7037481e397c1cdcd651587e381f042bb 100644 (file)
@@ -359,13 +359,16 @@ static int mct_u232_set_modem_ctrl(struct usb_serial *serial,
                        MCT_U232_SET_REQUEST_TYPE,
                        0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE,
                        WDR_TIMEOUT);
-       if (rc < 0)
-               dev_err(&serial->dev->dev,
-                       "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
+       kfree(buf);
+
        dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr);
 
-       kfree(buf);
-       return rc;
+       if (rc < 0) {
+               dev_err(&serial->dev->dev,
+                       "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
+               return rc;
+       }
+       return 0;
 } /* mct_u232_set_modem_ctrl */
 
 static int mct_u232_get_modem_stat(struct usb_serial *serial,
index 7b50aa12275277fb4e9da7e874551d1fbb3380e6..fdbeb6a0e9055a6bdb8ab5b7c942700dae241db8 100644 (file)
 
 #define CLK_MULTI_REGISTER         ((__u16)(0x02))
 #define CLK_START_VALUE_REGISTER   ((__u16)(0x03))
+#define GPIO_REGISTER              ((__u16)(0x07))
 
 #define SERIAL_LCR_DLAB            ((__u16)(0x0080))
 
@@ -205,7 +206,7 @@ static const struct usb_device_id moschip_port_id_table[] = {
        {}                      /* terminating entry */
 };
 
-static const struct usb_device_id moschip_id_table_combined[] __devinitconst = {
+static const struct usb_device_id moschip_id_table_combined[] = {
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
@@ -1103,14 +1104,25 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
        mos7840_port->read_urb = port->read_urb;
 
        /* set up our bulk in urb */
-
-       usb_fill_bulk_urb(mos7840_port->read_urb,
-                         serial->dev,
-                         usb_rcvbulkpipe(serial->dev,
-                                         port->bulk_in_endpointAddress),
-                         port->bulk_in_buffer,
-                         mos7840_port->read_urb->transfer_buffer_length,
-                         mos7840_bulk_in_callback, mos7840_port);
+       if ((serial->num_ports == 2)
+               && ((((__u16)port->number -
+                       (__u16)(port->serial->minor)) % 2) != 0)) {
+               usb_fill_bulk_urb(mos7840_port->read_urb,
+                       serial->dev,
+                       usb_rcvbulkpipe(serial->dev,
+                               (port->bulk_in_endpointAddress) + 2),
+                       port->bulk_in_buffer,
+                       mos7840_port->read_urb->transfer_buffer_length,
+                       mos7840_bulk_in_callback, mos7840_port);
+       } else {
+               usb_fill_bulk_urb(mos7840_port->read_urb,
+                       serial->dev,
+                       usb_rcvbulkpipe(serial->dev,
+                               port->bulk_in_endpointAddress),
+                       port->bulk_in_buffer,
+                       mos7840_port->read_urb->transfer_buffer_length,
+                       mos7840_bulk_in_callback, mos7840_port);
+       }
 
        dbg("mos7840_open: bulkin endpoint is %d",
            port->bulk_in_endpointAddress);
@@ -1521,13 +1533,25 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
        memcpy(urb->transfer_buffer, current_position, transfer_size);
 
        /* fill urb with data and submit  */
-       usb_fill_bulk_urb(urb,
-                         serial->dev,
-                         usb_sndbulkpipe(serial->dev,
-                                         port->bulk_out_endpointAddress),
-                         urb->transfer_buffer,
-                         transfer_size,
-                         mos7840_bulk_out_data_callback, mos7840_port);
+       if ((serial->num_ports == 2)
+               && ((((__u16)port->number -
+                       (__u16)(port->serial->minor)) % 2) != 0)) {
+               usb_fill_bulk_urb(urb,
+                       serial->dev,
+                       usb_sndbulkpipe(serial->dev,
+                               (port->bulk_out_endpointAddress) + 2),
+                       urb->transfer_buffer,
+                       transfer_size,
+                       mos7840_bulk_out_data_callback, mos7840_port);
+       } else {
+               usb_fill_bulk_urb(urb,
+                       serial->dev,
+                       usb_sndbulkpipe(serial->dev,
+                               port->bulk_out_endpointAddress),
+                       urb->transfer_buffer,
+                       transfer_size,
+                       mos7840_bulk_out_data_callback, mos7840_port);
+       }
 
        data1 = urb->transfer_buffer;
        dbg("bulkout endpoint is %d", port->bulk_out_endpointAddress);
@@ -1840,7 +1864,7 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
 
        } else {
 #ifdef HW_flow_control
-               / *setting h/w flow control bit to 0 */
+               /setting h/w flow control bit to 0 */
                Data = 0xb;
                mos7840_port->shadowMCR = Data;
                status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
@@ -2310,19 +2334,26 @@ static int mos7840_ioctl(struct tty_struct *tty,
 
 static int mos7840_calc_num_ports(struct usb_serial *serial)
 {
-       int mos7840_num_ports = 0;
-
-       dbg("numberofendpoints: cur %d, alt %d",
-           (int)serial->interface->cur_altsetting->desc.bNumEndpoints,
-           (int)serial->interface->altsetting->desc.bNumEndpoints);
-       if (serial->interface->cur_altsetting->desc.bNumEndpoints == 5) {
-               mos7840_num_ports = serial->num_ports = 2;
-       } else if (serial->interface->cur_altsetting->desc.bNumEndpoints == 9) {
+       __u16 Data = 0x00;
+       int ret = 0;
+       int mos7840_num_ports;
+
+       ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+               MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, &Data,
+               VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+
+       if ((Data & 0x01) == 0) {
+               mos7840_num_ports = 2;
+               serial->num_bulk_in = 2;
+               serial->num_bulk_out = 2;
+               serial->num_ports = 2;
+       } else {
+               mos7840_num_ports = 4;
                serial->num_bulk_in = 4;
                serial->num_bulk_out = 4;
-               mos7840_num_ports = serial->num_ports = 4;
+               serial->num_ports = 4;
        }
-       dbg ("mos7840_num_ports = %d", mos7840_num_ports);
+
        return mos7840_num_ports;
 }
 
index 60f38d5e64fce24761dacdbb7871fb57e9c55e68..0a8c1e64b247763dd4130a53b49888be39e66ad7 100644 (file)
@@ -315,7 +315,7 @@ static int omninet_write_room(struct tty_struct *tty)
        int room = 0; /* Default: no room */
 
        /* FIXME: no consistent locking for write_urb_busy */
-       if (wport->write_urb_busy)
+       if (!wport->write_urb_busy)
                room = wport->bulk_out_size - OMNINET_HEADERLEN;
 
        dbg("%s - returns %d", __func__, room);
index 8e84303422304d4b1a9aacb172f2a9635a09d5db..e9c5474b8e4a620f26651aebd51a584ee848b727 100644 (file)
@@ -47,6 +47,7 @@
 /* Function prototypes */
 static int  option_probe(struct usb_serial *serial,
                        const struct usb_device_id *id);
+static void option_release(struct usb_serial *serial);
 static int option_send_setup(struct usb_serial_port *port);
 static void option_instat_callback(struct urb *urb);
 
@@ -150,12 +151,14 @@ static void option_instat_callback(struct urb *urb);
 #define HUAWEI_PRODUCT_E14AC                   0x14AC
 #define HUAWEI_PRODUCT_K3806                   0x14AE
 #define HUAWEI_PRODUCT_K4605                   0x14C6
+#define HUAWEI_PRODUCT_K5005                   0x14C8
 #define HUAWEI_PRODUCT_K3770                   0x14C9
 #define HUAWEI_PRODUCT_K3771                   0x14CA
 #define HUAWEI_PRODUCT_K4510                   0x14CB
 #define HUAWEI_PRODUCT_K4511                   0x14CC
 #define HUAWEI_PRODUCT_ETS1220                 0x1803
 #define HUAWEI_PRODUCT_E353                    0x1506
+#define HUAWEI_PRODUCT_E173S                   0x1C05
 
 #define QUANTA_VENDOR_ID                       0x0408
 #define QUANTA_PRODUCT_Q101                    0xEA02
@@ -306,6 +309,9 @@ static void option_instat_callback(struct urb *urb);
 #define TELIT_VENDOR_ID                                0x1bc7
 #define TELIT_PRODUCT_UC864E                   0x1003
 #define TELIT_PRODUCT_UC864G                   0x1004
+#define TELIT_PRODUCT_CC864_DUAL               0x1005
+#define TELIT_PRODUCT_CC864_SINGLE             0x1006
+#define TELIT_PRODUCT_DE910_DUAL               0x1010
 
 /* ZTE PRODUCTS */
 #define ZTE_VENDOR_ID                          0x19d2
@@ -317,6 +323,9 @@ static void option_instat_callback(struct urb *urb);
 #define ZTE_PRODUCT_AC2726                     0xfff5
 #define ZTE_PRODUCT_AC100                      0x0094
 #define ZTE_PRODUCT_AC8710T                    0xffff
+#define ZTE_PRODUCT_MC2718                     0xffe8
+#define ZTE_PRODUCT_AD3812                     0xffeb
+#define ZTE_PRODUCT_MC2716                     0xffed
 
 #define BENQ_VENDOR_ID                         0x04a5
 #define BENQ_PRODUCT_H10                       0x4068
@@ -431,7 +440,7 @@ static void option_instat_callback(struct urb *urb);
 #define SC8800G_VENDOR_ID      0x067b
 #define SC8800G_PRODUCT_ID     0x2303
 
-/* YUGA products  www.yuga-info.com*/
+/* YUGA products  www.yuga-info.com gavin.kx@qq.com */
 #define YUGA_VENDOR_ID                         0x257A
 #define YUGA_PRODUCT_CEM600                    0x1601
 #define YUGA_PRODUCT_CEM610                    0x1602
@@ -448,6 +457,8 @@ static void option_instat_callback(struct urb *urb);
 #define YUGA_PRODUCT_CEU516                    0x160C
 #define YUGA_PRODUCT_CEU528                    0x160D
 #define YUGA_PRODUCT_CEU526                    0x160F
+#define YUGA_PRODUCT_CEU881                    0x161F
+#define YUGA_PRODUCT_CEU882                    0x162F
 
 #define YUGA_PRODUCT_CWM600                    0x2601
 #define YUGA_PRODUCT_CWM610                    0x2602
@@ -463,23 +474,41 @@ static void option_instat_callback(struct urb *urb);
 #define YUGA_PRODUCT_CWU518                    0x260B
 #define YUGA_PRODUCT_CWU516                    0x260C
 #define YUGA_PRODUCT_CWU528                    0x260D
+#define YUGA_PRODUCT_CWU581                    0x260E
 #define YUGA_PRODUCT_CWU526                    0x260F
-
-#define YUGA_PRODUCT_CLM600                    0x2601
-#define YUGA_PRODUCT_CLM610                    0x2602
-#define YUGA_PRODUCT_CLM500                    0x2603
-#define YUGA_PRODUCT_CLM510                    0x2604
-#define YUGA_PRODUCT_CLM800                    0x2605
-#define YUGA_PRODUCT_CLM900                    0x2606
-
-#define YUGA_PRODUCT_CLU718                    0x2607
-#define YUGA_PRODUCT_CLU716                    0x2608
-#define YUGA_PRODUCT_CLU728                    0x2609
-#define YUGA_PRODUCT_CLU726                    0x260A
-#define YUGA_PRODUCT_CLU518                    0x260B
-#define YUGA_PRODUCT_CLU516                    0x260C
-#define YUGA_PRODUCT_CLU528                    0x260D
-#define YUGA_PRODUCT_CLU526                    0x260F
+#define YUGA_PRODUCT_CWU582                    0x261F
+#define YUGA_PRODUCT_CWU583                    0x262F
+
+#define YUGA_PRODUCT_CLM600                    0x3601
+#define YUGA_PRODUCT_CLM610                    0x3602
+#define YUGA_PRODUCT_CLM500                    0x3603
+#define YUGA_PRODUCT_CLM510                    0x3604
+#define YUGA_PRODUCT_CLM800                    0x3605
+#define YUGA_PRODUCT_CLM900                    0x3606
+
+#define YUGA_PRODUCT_CLU718                    0x3607
+#define YUGA_PRODUCT_CLU716                    0x3608
+#define YUGA_PRODUCT_CLU728                    0x3609
+#define YUGA_PRODUCT_CLU726                    0x360A
+#define YUGA_PRODUCT_CLU518                    0x360B
+#define YUGA_PRODUCT_CLU516                    0x360C
+#define YUGA_PRODUCT_CLU528                    0x360D
+#define YUGA_PRODUCT_CLU526                    0x360F
+
+/* Viettel products */
+#define VIETTEL_VENDOR_ID                      0x2262
+#define VIETTEL_PRODUCT_VT1000                 0x0002
+
+/* ZD Incorporated */
+#define ZD_VENDOR_ID                           0x0685
+#define ZD_PRODUCT_7000                                0x7000
+
+/* LG products */
+#define LG_VENDOR_ID                           0x1004
+#define LG_PRODUCT_L02C                                0x618f
+
+/* MediaTek products */
+#define MEDIATEK_VENDOR_ID                     0x0e8d
 
 /* some devices interfaces need special handling due to a number of reasons */
 enum option_blacklist_reason {
@@ -488,31 +517,66 @@ enum option_blacklist_reason {
                OPTION_BLACKLIST_RESERVED_IF = 2
 };
 
+#define MAX_BL_NUM  8
 struct option_blacklist_info {
-       const u32 infolen;      /* number of interface numbers on blacklist */
-       const u8  *ifaceinfo;   /* pointer to the array holding the numbers */
-       enum option_blacklist_reason reason;
+       /* bitfield of interface numbers for OPTION_BLACKLIST_SENDSETUP */
+       const unsigned long sendsetup;
+       /* bitfield of interface numbers for OPTION_BLACKLIST_RESERVED_IF */
+       const unsigned long reserved;
 };
 
-static const u8 four_g_w14_no_sendsetup[] = { 0, 1 };
 static const struct option_blacklist_info four_g_w14_blacklist = {
-       .infolen = ARRAY_SIZE(four_g_w14_no_sendsetup),
-       .ifaceinfo = four_g_w14_no_sendsetup,
-       .reason = OPTION_BLACKLIST_SENDSETUP
+       .sendsetup = BIT(0) | BIT(1),
 };
 
-static const u8 alcatel_x200_no_sendsetup[] = { 0, 1 };
 static const struct option_blacklist_info alcatel_x200_blacklist = {
-       .infolen = ARRAY_SIZE(alcatel_x200_no_sendsetup),
-       .ifaceinfo = alcatel_x200_no_sendsetup,
-       .reason = OPTION_BLACKLIST_SENDSETUP
+       .sendsetup = BIT(0) | BIT(1),
+};
+
+static const struct option_blacklist_info zte_0037_blacklist = {
+       .sendsetup = BIT(0) | BIT(1),
 };
 
-static const u8 zte_k3765_z_no_sendsetup[] = { 0, 1, 2 };
 static const struct option_blacklist_info zte_k3765_z_blacklist = {
-       .infolen = ARRAY_SIZE(zte_k3765_z_no_sendsetup),
-       .ifaceinfo = zte_k3765_z_no_sendsetup,
-       .reason = OPTION_BLACKLIST_SENDSETUP
+       .sendsetup = BIT(0) | BIT(1) | BIT(2),
+       .reserved = BIT(4),
+};
+
+static const struct option_blacklist_info zte_ad3812_z_blacklist = {
+       .sendsetup = BIT(0) | BIT(1) | BIT(2),
+};
+
+static const struct option_blacklist_info zte_mc2718_z_blacklist = {
+       .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
+};
+
+static const struct option_blacklist_info zte_mc2716_z_blacklist = {
+       .sendsetup = BIT(1) | BIT(2) | BIT(3),
+};
+
+static const struct option_blacklist_info huawei_cdc12_blacklist = {
+       .reserved = BIT(1) | BIT(2),
+};
+
+static const struct option_blacklist_info net_intf1_blacklist = {
+       .reserved = BIT(1),
+};
+
+static const struct option_blacklist_info net_intf3_blacklist = {
+       .reserved = BIT(3),
+};
+
+static const struct option_blacklist_info net_intf4_blacklist = {
+       .reserved = BIT(4),
+};
+
+static const struct option_blacklist_info net_intf5_blacklist = {
+       .reserved = BIT(5),
+};
+
+static const struct option_blacklist_info zte_mf626_blacklist = {
+       .sendsetup = BIT(0) | BIT(1),
+       .reserved = BIT(4),
 };
 
 static const struct usb_device_id option_ids[] = {
@@ -617,12 +681,21 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x31) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x32) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x33) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
@@ -632,9 +705,14 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) },  /* E398 3G Modem */
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) },  /* E398 3G PC UI Interface */
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) },  /* E398 3G Application Interface */
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
@@ -660,6 +738,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) },
@@ -721,12 +800,17 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) },
@@ -741,57 +825,63 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000f, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) },
-       /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, */
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff,
-         0xff, 0xff), .driver_info = (kernel_ulong_t)&four_g_w14_blacklist },
+         0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mf626_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t)&zte_0037_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) },
-       /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0053, 0xff, 0xff, 0xff) }, */
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0067, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0077, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) },
@@ -800,11 +890,23 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0083, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0087, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0088, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0089, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0090, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0091, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0092, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0093, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0095, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) },
@@ -818,23 +920,20 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0145, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0146, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0148, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0149, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0150, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0154, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
@@ -1009,17 +1108,27 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
+         0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
+
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
-         0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) },
+
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
@@ -1029,6 +1138,12 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xF006, 0xff, 0xff, 0xff) },
        { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_AC100)},
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
+        .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
+        .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
+        .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
        { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
        { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
        { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
@@ -1162,6 +1277,18 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
+       { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU881) },
+       { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU882) },
+       { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU581) },
+       { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU582) },
+       { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU583) },
+       { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) },
+       { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) },        /* MediaTek MT6276M modem & app port */
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
@@ -1205,7 +1332,7 @@ static struct usb_serial_driver option_1port_device = {
        .ioctl             = usb_wwan_ioctl,
        .attach            = usb_wwan_startup,
        .disconnect        = usb_wwan_disconnect,
-       .release           = usb_wwan_release,
+       .release           = option_release,
        .read_int_callback = option_instat_callback,
 #ifdef CONFIG_PM
        .suspend           = usb_wwan_suspend,
@@ -1215,35 +1342,6 @@ static struct usb_serial_driver option_1port_device = {
 
 static int debug;
 
-/* per port private data */
-
-#define N_IN_URB 4
-#define N_OUT_URB 4
-#define IN_BUFLEN 4096
-#define OUT_BUFLEN 4096
-
-struct option_port_private {
-       /* Input endpoints and buffer for this port */
-       struct urb *in_urbs[N_IN_URB];
-       u8 *in_buffer[N_IN_URB];
-       /* Output endpoints and buffer for this port */
-       struct urb *out_urbs[N_OUT_URB];
-       u8 *out_buffer[N_OUT_URB];
-       unsigned long out_busy;         /* Bit vector of URBs in use */
-       int opened;
-       struct usb_anchor delayed;
-
-       /* Settings for the port */
-       int rts_state;  /* Handshaking pins (outputs) */
-       int dtr_state;
-       int cts_state;  /* Handshaking pins (inputs) */
-       int dsr_state;
-       int dcd_state;
-       int ri_state;
-
-       unsigned long tx_start_time[N_OUT_URB];
-};
-
 /* Functions used by new usb-serial code. */
 static int __init option_init(void)
 {
@@ -1275,10 +1373,35 @@ static void __exit option_exit(void)
 module_init(option_init);
 module_exit(option_exit);
 
+static bool is_blacklisted(const u8 ifnum, enum option_blacklist_reason reason,
+                          const struct option_blacklist_info *blacklist)
+{
+       unsigned long num;
+       const unsigned long *intf_list;
+
+       if (blacklist) {
+               if (reason == OPTION_BLACKLIST_SENDSETUP)
+                       intf_list = &blacklist->sendsetup;
+               else if (reason == OPTION_BLACKLIST_RESERVED_IF)
+                       intf_list = &blacklist->reserved;
+               else {
+                       BUG_ON(reason);
+                       return false;
+               }
+
+               for_each_set_bit(num, intf_list, MAX_BL_NUM + 1) {
+                       if (num == ifnum)
+                               return true;
+               }
+       }
+       return false;
+}
+
 static int option_probe(struct usb_serial *serial,
                        const struct usb_device_id *id)
 {
        struct usb_wwan_intf_private *data;
+
        /* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */
        if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID &&
                serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 &&
@@ -1291,13 +1414,14 @@ static int option_probe(struct usb_serial *serial,
                serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff)
                return -ENODEV;
 
-       /* Don't bind network interfaces on Huawei K3765, K4505 & K4605 */
-       if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID &&
-               (serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 ||
-                       serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505 ||
-                       serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4605) &&
-               (serial->interface->cur_altsetting->desc.bInterfaceNumber == 1 ||
-                       serial->interface->cur_altsetting->desc.bInterfaceNumber == 2))
+       /* Don't bind reserved interfaces (like network ones) which often have
+        * the same class/subclass/protocol as the serial interfaces.  Look at
+        * the Windows driver .INF files for reserved interface numbers.
+        */
+       if (is_blacklisted(
+               serial->interface->cur_altsetting->desc.bInterfaceNumber,
+               OPTION_BLACKLIST_RESERVED_IF,
+               (const struct option_blacklist_info *) id->driver_info))
                return -ENODEV;
 
        /* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */
@@ -1307,7 +1431,6 @@ static int option_probe(struct usb_serial *serial,
                return -ENODEV;
 
        data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
-
        if (!data)
                return -ENOMEM;
        data->send_setup = option_send_setup;
@@ -1316,21 +1439,13 @@ static int option_probe(struct usb_serial *serial,
        return 0;
 }
 
-static enum option_blacklist_reason is_blacklisted(const u8 ifnum,
-                               const struct option_blacklist_info *blacklist)
+static void option_release(struct usb_serial *serial)
 {
-       const u8  *info;
-       int i;
+       struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
 
-       if (blacklist) {
-               info = blacklist->ifaceinfo;
+       usb_wwan_release(serial);
 
-               for (i = 0; i < blacklist->infolen; i++) {
-                       if (info[i] == ifnum)
-                               return blacklist->reason;
-               }
-       }
-       return OPTION_BLACKLIST_NONE;
+       kfree(priv);
 }
 
 static void option_instat_callback(struct urb *urb)
@@ -1338,7 +1453,8 @@ static void option_instat_callback(struct urb *urb)
        int err;
        int status = urb->status;
        struct usb_serial_port *port =  urb->context;
-       struct option_port_private *portdata = usb_get_serial_port_data(port);
+       struct usb_wwan_port_private *portdata =
+                                       usb_get_serial_port_data(port);
        static int err_times = 0;
 
        dbg("%s", __func__);
@@ -1409,14 +1525,13 @@ static int option_send_setup(struct usb_serial_port *port)
        struct usb_serial *serial = port->serial;
        struct usb_wwan_intf_private *intfdata =
                (struct usb_wwan_intf_private *) serial->private;
-       struct option_port_private *portdata;
+       struct usb_wwan_port_private *portdata;
        int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
        int val = 0;
        dbg("%s", __func__);
 
-       if (is_blacklisted(ifNum,
-                          (struct option_blacklist_info *) intfdata->private)
-           == OPTION_BLACKLIST_SENDSETUP) {
+       if (is_blacklisted(ifNum, OPTION_BLACKLIST_SENDSETUP,
+                       (struct option_blacklist_info *) intfdata->private)) {
                dbg("No send_setup on blacklisted interface #%d\n", ifNum);
                return -EIO;
        }
index 1d33260de014f0ee3c39546567a2e61b867b7ebf..5aa7172e049f1c64ed40d048452fa847c1d3928c 100644 (file)
@@ -91,7 +91,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
        { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
        { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
-       { USB_DEVICE(WINCHIPHEAD_VENDOR_ID, WINCHIPHEAD_USBSER_PRODUCT_ID) },
+       { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
        { }                                     /* Terminating entry */
 };
 
@@ -424,7 +424,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
        control = priv->line_control;
        if ((cflag & CBAUD) == B0)
                priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
-       else
+       else if ((old_termios->c_cflag & CBAUD) == B0)
                priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
        if (control != priv->line_control) {
                control = priv->line_control;
index ca0d237683b38f6985f3d62de82b2af30b1dda10..c38b8c00c06fddd4c71f3508df13f592d2b4480b 100644 (file)
 #define ADLINK_VENDOR_ID               0x0b63
 #define ADLINK_ND6530_PRODUCT_ID       0x6530
 
-/* WinChipHead USB->RS 232 adapter */
-#define WINCHIPHEAD_VENDOR_ID          0x4348
-#define WINCHIPHEAD_USBSER_PRODUCT_ID  0x5523
+/* SMART USB Serial Adapter */
+#define SMART_VENDOR_ID        0x0b8c
+#define SMART_PRODUCT_ID       0x2303
+
index 30b73e68a904d65e7dadb3fdeee8acd73f65607c..a34819884c1ad6b82e732f069e7392f0272d766f 100644 (file)
@@ -36,6 +36,7 @@
 #define UTSTARCOM_PRODUCT_UM175_V1             0x3712
 #define UTSTARCOM_PRODUCT_UM175_V2             0x3714
 #define UTSTARCOM_PRODUCT_UM175_ALLTEL         0x3715
+#define PANTECH_PRODUCT_UML190_VZW             0x3716
 #define PANTECH_PRODUCT_UML290_VZW             0x3718
 
 /* CMOTECH devices */
@@ -67,7 +68,11 @@ static struct usb_device_id id_table[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
-       { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xfe, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfd, 0xff) },  /* NMEA */
+       { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfe, 0xff) },  /* WMC */
+       { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) },  /* DIAG */
        { },
 };
 MODULE_DEVICE_TABLE(usb, id_table);
index 27f9ae4dffdf861af3c6774ff7d0436e0a6a830e..7cd2c2699355003ed5bc46763071b052cabce468 100644 (file)
 
 static int debug;
 
+#define DEVICE_G1K(v, p) \
+       USB_DEVICE(v, p), .driver_info = 1
+
 static const struct usb_device_id id_table[] = {
-       {USB_DEVICE(0x05c6, 0x9211)},   /* Acer Gobi QDL device */
-       {USB_DEVICE(0x05c6, 0x9212)},   /* Acer Gobi Modem Device */
-       {USB_DEVICE(0x03f0, 0x1f1d)},   /* HP un2400 Gobi Modem Device */
-       {USB_DEVICE(0x03f0, 0x201d)},   /* HP un2400 Gobi QDL Device */
-       {USB_DEVICE(0x04da, 0x250d)},   /* Panasonic Gobi Modem device */
-       {USB_DEVICE(0x04da, 0x250c)},   /* Panasonic Gobi QDL device */
-       {USB_DEVICE(0x413c, 0x8172)},   /* Dell Gobi Modem device */
-       {USB_DEVICE(0x413c, 0x8171)},   /* Dell Gobi QDL device */
-       {USB_DEVICE(0x1410, 0xa001)},   /* Novatel Gobi Modem device */
-       {USB_DEVICE(0x1410, 0xa008)},   /* Novatel Gobi QDL device */
-       {USB_DEVICE(0x0b05, 0x1776)},   /* Asus Gobi Modem device */
-       {USB_DEVICE(0x0b05, 0x1774)},   /* Asus Gobi QDL device */
-       {USB_DEVICE(0x19d2, 0xfff3)},   /* ONDA Gobi Modem device */
-       {USB_DEVICE(0x19d2, 0xfff2)},   /* ONDA Gobi QDL device */
-       {USB_DEVICE(0x1557, 0x0a80)},   /* OQO Gobi QDL device */
-       {USB_DEVICE(0x05c6, 0x9001)},   /* Generic Gobi Modem device */
-       {USB_DEVICE(0x05c6, 0x9002)},   /* Generic Gobi Modem device */
-       {USB_DEVICE(0x05c6, 0x9202)},   /* Generic Gobi Modem device */
-       {USB_DEVICE(0x05c6, 0x9203)},   /* Generic Gobi Modem device */
-       {USB_DEVICE(0x05c6, 0x9222)},   /* Generic Gobi Modem device */
-       {USB_DEVICE(0x05c6, 0x9008)},   /* Generic Gobi QDL device */
-       {USB_DEVICE(0x05c6, 0x9009)},   /* Generic Gobi Modem device */
-       {USB_DEVICE(0x05c6, 0x9201)},   /* Generic Gobi QDL device */
-       {USB_DEVICE(0x05c6, 0x9221)},   /* Generic Gobi QDL device */
-       {USB_DEVICE(0x05c6, 0x9231)},   /* Generic Gobi QDL device */
-       {USB_DEVICE(0x1f45, 0x0001)},   /* Unknown Gobi QDL device */
+       /* Gobi 1000 devices */
+       {DEVICE_G1K(0x05c6, 0x9211)},   /* Acer Gobi QDL device */
+       {DEVICE_G1K(0x05c6, 0x9212)},   /* Acer Gobi Modem Device */
+       {DEVICE_G1K(0x03f0, 0x1f1d)},   /* HP un2400 Gobi Modem Device */
+       {DEVICE_G1K(0x03f0, 0x201d)},   /* HP un2400 Gobi QDL Device */
+       {DEVICE_G1K(0x04da, 0x250d)},   /* Panasonic Gobi Modem device */
+       {DEVICE_G1K(0x04da, 0x250c)},   /* Panasonic Gobi QDL device */
+       {DEVICE_G1K(0x413c, 0x8172)},   /* Dell Gobi Modem device */
+       {DEVICE_G1K(0x413c, 0x8171)},   /* Dell Gobi QDL device */
+       {DEVICE_G1K(0x1410, 0xa001)},   /* Novatel Gobi Modem device */
+       {DEVICE_G1K(0x1410, 0xa008)},   /* Novatel Gobi QDL device */
+       {DEVICE_G1K(0x0b05, 0x1776)},   /* Asus Gobi Modem device */
+       {DEVICE_G1K(0x0b05, 0x1774)},   /* Asus Gobi QDL device */
+       {DEVICE_G1K(0x19d2, 0xfff3)},   /* ONDA Gobi Modem device */
+       {DEVICE_G1K(0x19d2, 0xfff2)},   /* ONDA Gobi QDL device */
+       {DEVICE_G1K(0x1557, 0x0a80)},   /* OQO Gobi QDL device */
+       {DEVICE_G1K(0x05c6, 0x9001)},   /* Generic Gobi Modem device */
+       {DEVICE_G1K(0x05c6, 0x9002)},   /* Generic Gobi Modem device */
+       {DEVICE_G1K(0x05c6, 0x9202)},   /* Generic Gobi Modem device */
+       {DEVICE_G1K(0x05c6, 0x9203)},   /* Generic Gobi Modem device */
+       {DEVICE_G1K(0x05c6, 0x9222)},   /* Generic Gobi Modem device */
+       {DEVICE_G1K(0x05c6, 0x9008)},   /* Generic Gobi QDL device */
+       {DEVICE_G1K(0x05c6, 0x9009)},   /* Generic Gobi Modem device */
+       {DEVICE_G1K(0x05c6, 0x9201)},   /* Generic Gobi QDL device */
+       {DEVICE_G1K(0x05c6, 0x9221)},   /* Generic Gobi QDL device */
+       {DEVICE_G1K(0x05c6, 0x9231)},   /* Generic Gobi QDL device */
+       {DEVICE_G1K(0x1f45, 0x0001)},   /* Unknown Gobi QDL device */
+
+       /* Gobi 2000 devices */
+       {USB_DEVICE(0x1410, 0xa010)},   /* Novatel Gobi 2000 QDL device */
+       {USB_DEVICE(0x1410, 0xa011)},   /* Novatel Gobi 2000 QDL device */
+       {USB_DEVICE(0x1410, 0xa012)},   /* Novatel Gobi 2000 QDL device */
+       {USB_DEVICE(0x1410, 0xa013)},   /* Novatel Gobi 2000 QDL device */
+       {USB_DEVICE(0x1410, 0xa014)},   /* Novatel Gobi 2000 QDL device */
        {USB_DEVICE(0x413c, 0x8185)},   /* Dell Gobi 2000 QDL device (N0218, VU936) */
        {USB_DEVICE(0x413c, 0x8186)},   /* Dell Gobi 2000 Modem device (N0218, VU936) */
        {USB_DEVICE(0x05c6, 0x9208)},   /* Generic Gobi 2000 QDL device */
@@ -79,10 +90,29 @@ static const struct usb_device_id id_table[] = {
        {USB_DEVICE(0x1199, 0x9008)},   /* Sierra Wireless Gobi 2000 Modem device (VT773) */
        {USB_DEVICE(0x1199, 0x9009)},   /* Sierra Wireless Gobi 2000 Modem device (VT773) */
        {USB_DEVICE(0x1199, 0x900a)},   /* Sierra Wireless Gobi 2000 Modem device (VT773) */
+       {USB_DEVICE(0x1199, 0x9011)},   /* Sierra Wireless Gobi 2000 Modem device (MC8305) */
        {USB_DEVICE(0x16d8, 0x8001)},   /* CMDTech Gobi 2000 QDL device (VU922) */
        {USB_DEVICE(0x16d8, 0x8002)},   /* CMDTech Gobi 2000 Modem device (VU922) */
        {USB_DEVICE(0x05c6, 0x9204)},   /* Gobi 2000 QDL device */
        {USB_DEVICE(0x05c6, 0x9205)},   /* Gobi 2000 Modem device */
+
+       /* Gobi 3000 devices */
+       {USB_DEVICE(0x03f0, 0x371d)},   /* HP un2430 Gobi 3000 QDL */
+       {USB_DEVICE(0x05c6, 0x920c)},   /* Gobi 3000 QDL */
+       {USB_DEVICE(0x05c6, 0x920d)},   /* Gobi 3000 Composite */
+       {USB_DEVICE(0x1410, 0xa020)},   /* Novatel Gobi 3000 QDL */
+       {USB_DEVICE(0x1410, 0xa021)},   /* Novatel Gobi 3000 Composite */
+       {USB_DEVICE(0x413c, 0x8193)},   /* Dell Gobi 3000 QDL */
+       {USB_DEVICE(0x413c, 0x8194)},   /* Dell Gobi 3000 Composite */
+       {USB_DEVICE(0x1199, 0x9010)},   /* Sierra Wireless Gobi 3000 QDL */
+       {USB_DEVICE(0x1199, 0x9012)},   /* Sierra Wireless Gobi 3000 QDL */
+       {USB_DEVICE(0x1199, 0x9013)},   /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
+       {USB_DEVICE(0x1199, 0x9014)},   /* Sierra Wireless Gobi 3000 QDL */
+       {USB_DEVICE(0x1199, 0x9015)},   /* Sierra Wireless Gobi 3000 Modem device */
+       {USB_DEVICE(0x1199, 0x9018)},   /* Sierra Wireless Gobi 3000 QDL */
+       {USB_DEVICE(0x1199, 0x9019)},   /* Sierra Wireless Gobi 3000 Modem device */
+       {USB_DEVICE(0x12D1, 0x14F0)},   /* Sony Gobi 3000 QDL */
+       {USB_DEVICE(0x12D1, 0x14F1)},   /* Sony Gobi 3000 Composite */
        { }                             /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, id_table);
@@ -104,8 +134,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
        int retval = -ENODEV;
        __u8 nintf;
        __u8 ifnum;
+       bool is_gobi1k = id->driver_info ? true : false;
 
        dbg("%s", __func__);
+       dbg("Is Gobi 1000 = %d", is_gobi1k);
 
        nintf = serial->dev->actconfig->desc.bNumInterfaces;
        dbg("Num Interfaces = %d", nintf);
@@ -153,15 +185,25 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
 
        case 3:
        case 4:
-               /* Composite mode */
-               /* ifnum == 0 is a broadband network adapter */
-               if (ifnum == 1) {
-                       /*
-                        * Diagnostics Monitor (serial line 9600 8N1)
-                        * Qualcomm DM protocol
-                        * use "libqcdm" (ModemManager) for communication
-                        */
-                       dbg("Diagnostics Monitor found");
+               /* Composite mode; don't bind to the QMI/net interface as that
+                * gets handled by other drivers.
+                */
+
+               /* Gobi 1K USB layout:
+                * 0: serial port (doesn't respond)
+                * 1: serial port (doesn't respond)
+                * 2: AT-capable modem port
+                * 3: QMI/net
+                *
+                * Gobi 2K+ USB layout:
+                * 0: QMI/net
+                * 1: DM/DIAG (use libqcdm from ModemManager for communication)
+                * 2: AT-capable modem port
+                * 3: NMEA
+                */
+
+               if (ifnum == 1 && !is_gobi1k) {
+                       dbg("Gobi 2K+ DM/DIAG interface found");
                        retval = usb_set_interface(serial->dev, ifnum, 0);
                        if (retval < 0) {
                                dev_err(&serial->dev->dev,
@@ -180,13 +222,13 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
                                retval = -ENODEV;
                                kfree(data);
                        }
-               } else if (ifnum==3) {
+               } else if (ifnum==3 && !is_gobi1k) {
                        /*
                         * NMEA (serial line 9600 8N1)
                         * # echo "\$GPS_START" > /dev/ttyUSBx
                         * # echo "\$GPS_STOP"  > /dev/ttyUSBx
                         */
-                       dbg("NMEA GPS interface found");
+                       dbg("Gobi 2K+ NMEA GPS interface found");
                        retval = usb_set_interface(serial->dev, ifnum, 0);
                        if (retval < 0) {
                                dev_err(&serial->dev->dev,
index d5d136a53b61a52ca50cb2dd33fa983009b53b37..d5476389df04efb3d7a8fe4c59137e452457f1a2 100644 (file)
@@ -221,7 +221,7 @@ static const struct sierra_iface_info typeB_interface_list = {
 };
 
 /* 'blacklist' of interfaces not served by this driver */
-static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11 };
+static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11, 19, 20 };
 static const struct sierra_iface_info direct_ip_interface_blacklist = {
        .infolen = ARRAY_SIZE(direct_ip_non_serial_ifaces),
        .ifaceinfo = direct_ip_non_serial_ifaces,
@@ -298,9 +298,16 @@ static const struct usb_device_id id_table[] = {
        /* Sierra Wireless HSPA Non-Composite Device */
        { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
        { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */
+       { USB_DEVICE(0x1199, 0x68A2),   /* Sierra Wireless MC77xx in QMI mode */
+         .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+       },
        { USB_DEVICE(0x1199, 0x68A3),   /* Sierra Wireless Direct IP modems */
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
+       /* AT&T Direct IP LTE modems */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
+         .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+       },
        { USB_DEVICE(0x0f3d, 0x68A3),   /* Airprime/Sierra Wireless Direct IP modems */
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
index ea8445689c8501f82795a9fe29c95b9fbcd73183..2856474123eb191b0107c7c467bf2d808470af11 100644 (file)
@@ -165,7 +165,7 @@ static unsigned int product_5052_count;
 /* the array dimension is the number of default entries plus */
 /* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */
 /* null entry */
-static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = {
+static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = {
        { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
        { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
        { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
@@ -179,6 +179,8 @@ static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = {
        { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
        { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
        { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
+       { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
+       { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
 };
 
 static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
@@ -188,7 +190,7 @@ static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
        { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
 };
 
-static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1] = {
+static struct usb_device_id ti_id_table_combined[19+2*TI_EXTRA_VID_PID_COUNT+1] = {
        { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
        { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
        { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
@@ -206,6 +208,8 @@ static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1]
        { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
        { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
        { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
+       { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
+       { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
        { }
 };
 
index 2aac1953993b1c9508283a8ecfef303c9fa26f1e..b353e7e3d4809d17478478eae47fa2cc75a0d5a6 100644 (file)
@@ -37,6 +37,7 @@
 #define TI_5152_BOOT_PRODUCT_ID                0x5152  /* no EEPROM, no firmware */
 #define TI_5052_EEPROM_PRODUCT_ID      0x505A  /* EEPROM, no firmware */
 #define TI_5052_FIRMWARE_PRODUCT_ID    0x505F  /* firmware is running */
+#define FRI2_PRODUCT_ID                        0x5053  /* Fish River Island II */
 
 /* Multi-Tech vendor and product ids */
 #define MTS_VENDOR_ID                  0x06E0
 #define MTS_MT9234ZBA_PRODUCT_ID       0xF115
 #define MTS_MT9234ZBAOLD_PRODUCT_ID    0x0319
 
+/* Abbott Diabetics vendor and product ids */
+#define ABBOTT_VENDOR_ID               0x1a61
+#define ABBOTT_PRODUCT_ID              0x3410
+
 /* Commands */
 #define TI_GET_VERSION                 0x01
 #define TI_GET_PORT_STATUS             0x02
index 49d860078b9367d3f7bda27769cf31d18df93ac1..eb7f76f3327fbfa18a7e3f88b800ff3250e4cb99 100755 (executable)
@@ -702,12 +702,14 @@ exit:
 static struct usb_serial_driver *search_serial_device(
                                        struct usb_interface *iface)
 {
-       const struct usb_device_id *id;
+       const struct usb_device_id *id = NULL;
        struct usb_serial_driver *drv;
+       struct usb_driver *driver = to_usb_driver(iface->dev.driver);
 
        /* Check if the usb id matches a known device */
        list_for_each_entry(drv, &usb_serial_driver_list, driver_list) {
-               id = get_iface_id(drv, iface);
+               if (drv->usb_driver == driver)
+                       id = get_iface_id(drv, iface);
                if (id)
                        return drv;
        }
@@ -1116,6 +1118,12 @@ int usb_serial_probe(struct usb_interface *interface,
                SEW868_USB = 0;
 #endif
 
+       /* Avoid race with tty_open and serial_install by setting the
+        * disconnected flag and not clearing it until all ports have been
+        * registered.
+        */
+       serial->disconnected = 1;
+
        if (get_free_serial(serial, num_ports, &minor) == NULL) {
                dev_err(&interface->dev, "No more free serial devices\n");
                goto probe_error;
@@ -1140,6 +1148,8 @@ int usb_serial_probe(struct usb_interface *interface,
                }
        }
 
+       serial->disconnected = 0;
+
        usb_serial_console_init(debug, minor);
 
 exit:
index fc310f75eada97c0c64da9d5886882eef9631dbf..0fded39e3b3e28a3c51af4b9ed6cc22de047a9b2 100644 (file)
@@ -58,7 +58,9 @@
 
 void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us)
 {
-       /* Pad the SCSI command with zeros out to 12 bytes
+       /*
+        * Pad the SCSI command with zeros out to 12 bytes.  If the
+        * command already is 12 bytes or longer, leave it alone.
         *
         * NOTE: This only works because a scsi_cmnd struct field contains
         * a unsigned char cmnd[16], so we know we have storage available
@@ -66,9 +68,6 @@ void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us)
        for (; srb->cmd_len<12; srb->cmd_len++)
                srb->cmnd[srb->cmd_len] = 0;
 
-       /* set command length to 12 bytes */
-       srb->cmd_len = 12;
-
        /* send the command to the transport layer */
        usb_stor_invoke_transport(srb, us);
 }
index e8ae21b2d387c11a1a1038ae98855d9db1228dc1..ff32390d61e5d489314b72bec6ff3e3d178b5b01 100644 (file)
@@ -691,6 +691,9 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
                int temp_result;
                struct scsi_eh_save ses;
                int sense_size = US_SENSE_SIZE;
+               struct scsi_sense_hdr sshdr;
+               const u8 *scdd;
+               u8 fm_ili;
 
                /* device supports and needs bigger sense buffer */
                if (us->fflags & US_FL_SANE_SENSE)
@@ -774,32 +777,30 @@ Retry_Sense:
                        srb->sense_buffer[7] = (US_SENSE_SIZE - 8);
                }
 
+               scsi_normalize_sense(srb->sense_buffer, SCSI_SENSE_BUFFERSIZE,
+                                    &sshdr);
+
                US_DEBUGP("-- Result from auto-sense is %d\n", temp_result);
                US_DEBUGP("-- code: 0x%x, key: 0x%x, ASC: 0x%x, ASCQ: 0x%x\n",
-                         srb->sense_buffer[0],
-                         srb->sense_buffer[2] & 0xf,
-                         srb->sense_buffer[12], 
-                         srb->sense_buffer[13]);
+                         sshdr.response_code, sshdr.sense_key,
+                         sshdr.asc, sshdr.ascq);
 #ifdef CONFIG_USB_STORAGE_DEBUG
-               usb_stor_show_sense(
-                         srb->sense_buffer[2] & 0xf,
-                         srb->sense_buffer[12], 
-                         srb->sense_buffer[13]);
+               usb_stor_show_sense(sshdr.sense_key, sshdr.asc, sshdr.ascq);
 #endif
 
                /* set the result so the higher layers expect this data */
                srb->result = SAM_STAT_CHECK_CONDITION;
 
+               scdd = scsi_sense_desc_find(srb->sense_buffer,
+                                           SCSI_SENSE_BUFFERSIZE, 4);
+               fm_ili = (scdd ? scdd[3] : srb->sense_buffer[2]) & 0xA0;
+
                /* We often get empty sense data.  This could indicate that
                 * everything worked or that there was an unspecified
                 * problem.  We have to decide which.
                 */
-               if (    /* Filemark 0, ignore EOM, ILI 0, no sense */
-                               (srb->sense_buffer[2] & 0xaf) == 0 &&
-                       /* No ASC or ASCQ */
-                               srb->sense_buffer[12] == 0 &&
-                               srb->sense_buffer[13] == 0) {
-
+               if (sshdr.sense_key == 0 && sshdr.asc == 0 && sshdr.ascq == 0 &&
+                   fm_ili == 0) {
                        /* If things are really okay, then let's show that.
                         * Zero out the sense buffer so the higher layers
                         * won't realize we did an unsolicited auto-sense.
@@ -814,7 +815,10 @@ Retry_Sense:
                         */
                        } else {
                                srb->result = DID_ERROR << 16;
-                               srb->sense_buffer[2] = HARDWARE_ERROR;
+                               if ((sshdr.response_code & 0x72) == 0x72)
+                                       srb->sense_buffer[1] = HARDWARE_ERROR;
+                               else
+                                       srb->sense_buffer[2] = HARDWARE_ERROR;
                        }
                }
        }
index 3041a974faf39278ef8fad4033e089f8e64b7b9c..591f57fcf97d3e0f69e07838b810f34069b6b1ba 100644 (file)
@@ -1854,6 +1854,13 @@ UNUSUAL_DEV(  0x1370, 0x6828, 0x0110, 0x0110,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_RESIDUE ),
 
+/* Reported by Qinglin Ye <yestyle@gmail.com> */
+UNUSUAL_DEV(  0x13fe, 0x3600, 0x0100, 0x0100,
+               "Kingston",
+               "DT 101 G2",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_BULK_IGNORE_TAG ),
+
 /* Reported by Francesco Foresti <frafore@tiscali.it> */
 UNUSUAL_DEV(  0x14cd, 0x6600, 0x0201, 0x0201,
                "Super Top",
@@ -1878,6 +1885,13 @@ UNUSUAL_DEV(  0x1652, 0x6600, 0x0201, 0x0201,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_RESIDUE ),
 
+/* Reported by Jesse Feddema <jdfeddema@gmail.com> */
+UNUSUAL_DEV(  0x177f, 0x0400, 0x0000, 0x0000,
+               "Yarvik",
+               "PMP400",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
+
 /* Reported by Hans de Goede <hdegoede@redhat.com>
  * These Appotech controllers are found in Picture Frames, they provide a
  * (buggy) emulation of a cdrom drive which contains the windows software
index 0ca095820f3e7ca10c98e27a4eaeea728e5c533e..db51ba16dc0755d6b7876c115abd10eba6e876c7 100644 (file)
@@ -788,15 +788,19 @@ static void quiesce_and_remove_host(struct us_data *us)
        struct Scsi_Host *host = us_to_host(us);
 
        /* If the device is really gone, cut short reset delays */
-       if (us->pusb_dev->state == USB_STATE_NOTATTACHED)
+       if (us->pusb_dev->state == USB_STATE_NOTATTACHED) {
                set_bit(US_FLIDX_DISCONNECTING, &us->dflags);
+               wake_up(&us->delay_wait);
+       }
 
-       /* Prevent SCSI-scanning (if it hasn't started yet)
-        * and wait for the SCSI-scanning thread to stop.
+       /* Prevent SCSI scanning (if it hasn't started yet)
+        * or wait for the SCSI-scanning routine to stop.
         */
-       set_bit(US_FLIDX_DONT_SCAN, &us->dflags);
-       wake_up(&us->delay_wait);
-       wait_for_completion(&us->scanning_done);
+       cancel_delayed_work_sync(&us->scan_dwork);
+
+       /* Balance autopm calls if scanning was cancelled */
+       if (test_bit(US_FLIDX_SCAN_PENDING, &us->dflags))
+               usb_autopm_put_interface_no_suspend(us->pusb_intf);
 
        /* Removing the host will perform an orderly shutdown: caches
         * synchronized, disks spun down, etc.
@@ -823,42 +827,28 @@ static void release_everything(struct us_data *us)
        scsi_host_put(us_to_host(us));
 }
 
-/* Thread to carry out delayed SCSI-device scanning */
-static int usb_stor_scan_thread(void * __us)
+/* Delayed-work routine to carry out SCSI-device scanning */
+static void usb_stor_scan_dwork(struct work_struct *work)
 {
-       struct us_data *us = (struct us_data *)__us;
+       struct us_data *us = container_of(work, struct us_data,
+                       scan_dwork.work);
        struct device *dev = &us->pusb_intf->dev;
 
-       dev_dbg(dev, "device found\n");
+       dev_dbg(dev, "starting scan\n");
 
-       set_freezable();
-       /* Wait for the timeout to expire or for a disconnect */
-       if (delay_use > 0) {
-               dev_dbg(dev, "waiting for device to settle "
-                               "before scanning\n");
-               wait_event_freezable_timeout(us->delay_wait,
-                               test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
-                               delay_use * HZ);
+       /* For bulk-only devices, determine the max LUN value */
+       if (us->protocol == USB_PR_BULK && !(us->fflags & US_FL_SINGLE_LUN)) {
+               mutex_lock(&us->dev_mutex);
+               us->max_lun = usb_stor_Bulk_max_lun(us);
+               mutex_unlock(&us->dev_mutex);
        }
+       scsi_scan_host(us_to_host(us));
+       dev_dbg(dev, "scan complete\n");
 
-       /* If the device is still connected, perform the scanning */
-       if (!test_bit(US_FLIDX_DONT_SCAN, &us->dflags)) {
-
-               /* For bulk-only devices, determine the max LUN value */
-               if (us->protocol == USB_PR_BULK &&
-                               !(us->fflags & US_FL_SINGLE_LUN)) {
-                       mutex_lock(&us->dev_mutex);
-                       us->max_lun = usb_stor_Bulk_max_lun(us);
-                       mutex_unlock(&us->dev_mutex);
-               }
-               scsi_scan_host(us_to_host(us));
-               dev_dbg(dev, "scan complete\n");
-
-               /* Should we unbind if no devices were detected? */
-       }
+       /* Should we unbind if no devices were detected? */
 
        usb_autopm_put_interface(us->pusb_intf);
-       complete_and_exit(&us->scanning_done, 0);
+       clear_bit(US_FLIDX_SCAN_PENDING, &us->dflags);
 }
 
 static unsigned int usb_stor_sg_tablesize(struct usb_interface *intf)
@@ -905,7 +895,7 @@ int usb_stor_probe1(struct us_data **pus,
        init_completion(&us->cmnd_ready);
        init_completion(&(us->notify));
        init_waitqueue_head(&us->delay_wait);
-       init_completion(&us->scanning_done);
+       INIT_DELAYED_WORK(&us->scan_dwork, usb_stor_scan_dwork);
 
        /* Associate the us_data structure with the USB device */
        result = associate_dev(us, intf);
@@ -936,7 +926,6 @@ EXPORT_SYMBOL_GPL(usb_stor_probe1);
 /* Second part of general USB mass-storage probing */
 int usb_stor_probe2(struct us_data *us)
 {
-       struct task_struct *th;
        int result;
        struct device *dev = &us->pusb_intf->dev;
 
@@ -977,20 +966,14 @@ int usb_stor_probe2(struct us_data *us)
                goto BadDevice;
        }
 
-       /* Start up the thread for delayed SCSI-device scanning */
-       th = kthread_create(usb_stor_scan_thread, us, "usb-stor-scan");
-       if (IS_ERR(th)) {
-               dev_warn(dev,
-                               "Unable to start the device-scanning thread\n");
-               complete(&us->scanning_done);
-               quiesce_and_remove_host(us);
-               result = PTR_ERR(th);
-               goto BadDevice;
-       }
-
+       /* Submit the delayed_work for SCSI-device scanning */
        usb_autopm_get_interface_no_resume(us->pusb_intf);
-       wake_up_process(th);
+       set_bit(US_FLIDX_SCAN_PENDING, &us->dflags);
 
+       if (delay_use > 0)
+               dev_dbg(dev, "waiting for device to settle before scanning\n");
+       queue_delayed_work(system_freezable_wq, &us->scan_dwork,
+                       delay_use * HZ);
        return 0;
 
        /* We come here if there are any problems */
@@ -1063,6 +1046,7 @@ static struct usb_driver usb_storage_driver = {
        .id_table =     usb_storage_usb_ids,
        .supports_autosuspend = 1,
        .soft_unbind =  1,
+       .no_dynamic_id = 1,
 };
 
 static int __init usb_stor_init(void)
index 7b0f2113632efb52ada464fc17dd83a74f34ea5a..75f70f04f37b89ba0ee63495298bc995da0024e4 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/blkdev.h>
 #include <linux/completion.h>
 #include <linux/mutex.h>
+#include <linux/workqueue.h>
 #include <scsi/scsi_host.h>
 
 struct us_data;
@@ -72,7 +73,7 @@ struct us_unusual_dev {
 #define US_FLIDX_DISCONNECTING 3       /* disconnect in progress   */
 #define US_FLIDX_RESETTING     4       /* device reset in progress */
 #define US_FLIDX_TIMED_OUT     5       /* SCSI midlayer timed out  */
-#define US_FLIDX_DONT_SCAN     6       /* don't scan (disconnect)  */
+#define US_FLIDX_SCAN_PENDING  6       /* scanning not yet done    */
 #define US_FLIDX_REDO_READ10   7       /* redo READ(10) command    */
 #define US_FLIDX_READ10_WORKED 8       /* previous READ(10) succeeded */
 
@@ -147,8 +148,8 @@ struct us_data {
        /* mutual exclusion and synchronization structures */
        struct completion       cmnd_ready;      /* to sleep thread on      */
        struct completion       notify;          /* thread begin/end        */
-       wait_queue_head_t       delay_wait;      /* wait during scan, reset */
-       struct completion       scanning_done;   /* wait for scan thread    */
+       wait_queue_head_t       delay_wait;      /* wait during reset       */
+       struct delayed_work     scan_dwork;      /* for async scanning      */
 
        /* subdriver information */
        void                    *extra;          /* Any extra data          */
index 2babcd4fbfc13b6bebd4238444925eca6f99e401..86685e9949872e3cce86a38892c4cc14bb01e643 100644 (file)
@@ -645,7 +645,8 @@ void hwarc_neep_cb(struct urb *urb)
                dev_err(dev, "NEEP: URB error %d\n", urb->status);
        }
        result = usb_submit_urb(urb, GFP_ATOMIC);
-       if (result < 0) {
+       if (result < 0 && result != -ENODEV && result != -EPERM) {
+               /* ignoring unrecoverable errors */
                dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n",
                        result);
                goto error;
index 697e56a5bcdd0c7e4233774ce2670b70ae8a9373..47146c8943395222a9b71ba524427e9af50ac259 100644 (file)
@@ -106,6 +106,7 @@ struct uwb_rc_neh {
        u8 evt_type;
        __le16 evt;
        u8 context;
+       u8 completed;
        uwb_rc_cmd_cb_f cb;
        void *arg;
 
@@ -408,6 +409,7 @@ static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size
        struct device *dev = &rc->uwb_dev.dev;
        struct uwb_rc_neh *neh;
        struct uwb_rceb *notif;
+       unsigned long flags;
 
        if (rceb->bEventContext == 0) {
                notif = kmalloc(size, GFP_ATOMIC);
@@ -421,7 +423,11 @@ static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size
        } else {
                neh = uwb_rc_neh_lookup(rc, rceb);
                if (neh) {
-                       del_timer_sync(&neh->timer);
+                       spin_lock_irqsave(&rc->neh_lock, flags);
+                       /* to guard against a timeout */
+                       neh->completed = 1;
+                       del_timer(&neh->timer);
+                       spin_unlock_irqrestore(&rc->neh_lock, flags);
                        uwb_rc_neh_cb(neh, rceb, size);
                } else
                        dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n",
@@ -567,6 +573,10 @@ static void uwb_rc_neh_timer(unsigned long arg)
        unsigned long flags;
 
        spin_lock_irqsave(&rc->neh_lock, flags);
+       if (neh->completed) {
+               spin_unlock_irqrestore(&rc->neh_lock, flags);
+               return;
+       }
        if (neh->context)
                __uwb_rc_neh_rm(rc, neh);
        else
index 4484c721f0f9b1ce6a160b5d7e450d0245b2e772..c2ceae4bb6ca3c72a3dfd234ef7e3f6a5e061fbd 100644 (file)
@@ -1085,7 +1085,7 @@ static int atmel_lcdfb_suspend(struct platform_device *pdev, pm_message_t mesg)
         */
        lcdc_writel(sinfo, ATMEL_LCDC_IDR, ~0UL);
 
-       sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
+       sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_CTR);
        lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, 0);
        if (sinfo->atmel_lcdfb_power_control)
                sinfo->atmel_lcdfb_power_control(0);
index 772f6015219a91764a78a481071b2830824e4272..6f54f7436a97171972c83c048ee191f42641e452 100644 (file)
@@ -271,7 +271,7 @@ static int tosa_lcd_resume(struct spi_device *spi)
 }
 #else
 #define tosa_lcd_suspend       NULL
-#define tosa_lcd_reume NULL
+#define tosa_lcd_resume NULL
 #endif
 
 static struct spi_driver tosa_lcd_driver = {
index caaa27d4a46a95c76da928a4353cd7d8087d1a95..cb09aa1fa138618c13278de877d5ecfb9b845582 100644 (file)
 #define CARMINEFB_DEFAULT_VIDEO_MODE   1
 
 static unsigned int fb_mode = CARMINEFB_DEFAULT_VIDEO_MODE;
-module_param(fb_mode, uint, 444);
+module_param(fb_mode, uint, 0444);
 MODULE_PARM_DESC(fb_mode, "Initial video mode as integer.");
 
 static char *fb_mode_str;
-module_param(fb_mode_str, charp, 444);
+module_param(fb_mode_str, charp, 0444);
 MODULE_PARM_DESC(fb_mode_str, "Initial video mode in characters.");
 
 /*
@@ -46,7 +46,7 @@ MODULE_PARM_DESC(fb_mode_str, "Initial video mode in characters.");
  * 0b010 Display 1
  */
 static int fb_displays = CARMINE_USE_DISPLAY0 | CARMINE_USE_DISPLAY1;
-module_param(fb_displays, int, 444);
+module_param(fb_displays, int, 0444);
 MODULE_PARM_DESC(fb_displays, "Bit mode, which displays are used");
 
 struct carmine_hw {
index d9369a700ddae00098e6ebc8460843f0e5e2f551..ae7c4211f7154577f7b35c249cd7ab140452607b 100755 (executable)
@@ -1658,6 +1658,7 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
        if (ret)
                return -EINVAL;
 
+       unlink_framebuffer(fb_info);
        if (fb_info->pixmap.addr &&
            (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
                kfree(fb_info->pixmap.addr);
@@ -1665,7 +1666,6 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
        registered_fb[i] = NULL;
        num_registered_fb--;
        fb_cleanup_device(fb_info);
-       device_destroy(fb_class, MKDEV(FB_MAJOR, i));
        event.info = fb_info;
        fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
 
@@ -1674,6 +1674,22 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
        return 0;
 }
 
+int unlink_framebuffer(struct fb_info *fb_info)
+{
+       int i;
+
+       i = fb_info->node;
+       if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
+               return -EINVAL;
+
+       if (fb_info->dev) {
+               device_destroy(fb_class, MKDEV(FB_MAJOR, i));
+               fb_info->dev = NULL;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(unlink_framebuffer);
+
 void remove_conflicting_framebuffers(struct apertures_struct *a,
                                     const char *name, bool primary)
 {
@@ -1745,8 +1761,6 @@ void fb_set_suspend(struct fb_info *info, int state)
 {
        struct fb_event event;
 
-       if (!lock_fb_info(info))
-               return;
        event.info = info;
        if (state) {
                fb_notifier_call_chain(FB_EVENT_SUSPEND, &event);
@@ -1755,7 +1769,6 @@ void fb_set_suspend(struct fb_info *info, int state)
                info->state = FBINFO_STATE_RUNNING;
                fb_notifier_call_chain(FB_EVENT_RESUME, &event);
        }
-       unlock_fb_info(info);
 }
 
 /**
index 04251ce89184bbfc752e2d927f48d0f534841c7b..67afa9c2289d539e281bb831aa2260c0fba1205b 100644 (file)
@@ -399,9 +399,12 @@ static ssize_t store_fbstate(struct device *device,
 
        state = simple_strtoul(buf, &last, 0);
 
+       if (!lock_fb_info(fb_info))
+               return -ENODEV;
        console_lock();
        fb_set_suspend(fb_info, (int)state);
        console_unlock();
+       unlock_fb_info(fb_info);
 
        return count;
 }
index cb163a5397beab1a592b836db2c273db3266351c..3251a0236d5639660f47fdca631f98bd5299015c 100644 (file)
@@ -100,36 +100,32 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
                          u_int transp, struct fb_info *info)
 {
        struct offb_par *par = (struct offb_par *) info->par;
-       int i, depth;
-       u32 *pal = info->pseudo_palette;
-
-       depth = info->var.bits_per_pixel;
-       if (depth == 16)
-               depth = (info->var.green.length == 5) ? 15 : 16;
-
-       if (regno > 255 ||
-           (depth == 16 && regno > 63) ||
-           (depth == 15 && regno > 31))
-               return 1;
-
-       if (regno < 16) {
-               switch (depth) {
-               case 15:
-                       pal[regno] = (regno << 10) | (regno << 5) | regno;
-                       break;
-               case 16:
-                       pal[regno] = (regno << 11) | (regno << 5) | regno;
-                       break;
-               case 24:
-                       pal[regno] = (regno << 16) | (regno << 8) | regno;
-                       break;
-               case 32:
-                       i = (regno << 8) | regno;
-                       pal[regno] = (i << 16) | i;
-                       break;
+
+       if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
+               u32 *pal = info->pseudo_palette;
+               u32 cr = red >> (16 - info->var.red.length);
+               u32 cg = green >> (16 - info->var.green.length);
+               u32 cb = blue >> (16 - info->var.blue.length);
+               u32 value;
+
+               if (regno >= 16)
+                       return -EINVAL;
+
+               value = (cr << info->var.red.offset) |
+                       (cg << info->var.green.offset) |
+                       (cb << info->var.blue.offset);
+               if (info->var.transp.length > 0) {
+                       u32 mask = (1 << info->var.transp.length) - 1;
+                       mask <<= info->var.transp.offset;
+                       value |= mask;
                }
+               pal[regno] = value;
+               return 0;
        }
 
+       if (regno > 255)
+               return -EINVAL;
+
        red >>= 8;
        green >>= 8;
        blue >>= 8;
@@ -381,7 +377,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
                                int pitch, unsigned long address,
                                int foreign_endian, struct device_node *dp)
 {
-       unsigned long res_size = pitch * height * (depth + 7) / 8;
+       unsigned long res_size = pitch * height;
        struct offb_par *par = &default_par;
        unsigned long res_start = address;
        struct fb_fix_screeninfo *fix;
index b0555f4f0a78b534c52cd427d871603d3424969f..fadd6a0836c769103b50ecee127320e44166a551 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/mutex.h>
 #include <linux/delay.h>
 #include <linux/string.h>
+#include <linux/gpio.h>
 #include <video/omapdss.h>
 #if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
        defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
@@ -40,6 +41,9 @@
 #include "hdmi.h"
 #include "dss_features.h"
 
+#define HDMI_DEFAULT_REGN 15
+#define HDMI_DEFAULT_REGM2 1
+
 static struct {
        struct mutex lock;
        struct omap_display_platform_data *pdata;
@@ -51,6 +55,9 @@ static struct {
        u8 edid_set;
        bool custom_set;
        struct hdmi_config cfg;
+
+       int hpd_gpio;
+       bool phy_tx_enabled;
 } hdmi;
 
 /*
@@ -275,6 +282,47 @@ static int hdmi_pll_reset(void)
        return 0;
 }
 
+static int hdmi_check_hpd_state(void)
+{
+       unsigned long flags;
+       bool hpd;
+       int r;
+       /* this should be in ti_hdmi_4xxx_ip private data */
+       static DEFINE_SPINLOCK(phy_tx_lock);
+
+       spin_lock_irqsave(&phy_tx_lock, flags);
+
+       hpd = gpio_get_value(hdmi.hpd_gpio);
+
+       if (hpd == hdmi.phy_tx_enabled) {
+               spin_unlock_irqrestore(&phy_tx_lock, flags);
+               return 0;
+       }
+
+       if (hpd)
+               r = hdmi_set_phy_pwr(HDMI_PHYPWRCMD_TXON);
+       else
+               r = hdmi_set_phy_pwr(HDMI_PHYPWRCMD_LDOON);
+
+       if (r) {
+               DSSERR("Failed to %s PHY TX power\n",
+                               hpd ? "enable" : "disable");
+               goto err;
+       }
+
+       hdmi.phy_tx_enabled = hpd;
+err:
+       spin_unlock_irqrestore(&phy_tx_lock, flags);
+       return r;
+}
+
+static irqreturn_t hpd_irq_handler(int irq, void *data)
+{
+       hdmi_check_hpd_state();
+
+       return IRQ_HANDLED;
+}
+
 static int hdmi_phy_init(void)
 {
        u16 r = 0;
@@ -283,10 +331,6 @@ static int hdmi_phy_init(void)
        if (r)
                return r;
 
-       r = hdmi_set_phy_pwr(HDMI_PHYPWRCMD_TXON);
-       if (r)
-               return r;
-
        /*
         * Read address 0 in order to get the SCP reset done completed
         * Dummy access performed to make sure reset is done
@@ -308,6 +352,23 @@ static int hdmi_phy_init(void)
        /* Write to phy address 3 to change the polarity control */
        REG_FLD_MOD(HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
 
+       r = request_threaded_irq(gpio_to_irq(hdmi.hpd_gpio),
+                       NULL, hpd_irq_handler,
+                       IRQF_DISABLED | IRQF_TRIGGER_RISING |
+                       IRQF_TRIGGER_FALLING, "hpd", NULL);
+       if (r) {
+               DSSERR("HPD IRQ request failed\n");
+               hdmi_set_phy_pwr(HDMI_PHYPWRCMD_OFF);
+               return r;
+       }
+
+       r = hdmi_check_hpd_state();
+       if (r) {
+               free_irq(gpio_to_irq(hdmi.hpd_gpio), NULL);
+               hdmi_set_phy_pwr(HDMI_PHYPWRCMD_OFF);
+               return r;
+       }
+
        return 0;
 }
 
@@ -358,7 +419,9 @@ static int hdmi_pll_program(struct hdmi_pll_info *fmt)
 
 static void hdmi_phy_off(void)
 {
+       free_irq(gpio_to_irq(hdmi.hpd_gpio), NULL);
        hdmi_set_phy_pwr(HDMI_PHYPWRCMD_OFF);
+       hdmi.phy_tx_enabled = false;
 }
 
 static int hdmi_core_ddc_edid(u8 *pedid, int ext)
@@ -1069,7 +1132,11 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
         * Input clock is predivided by N + 1
         * out put of which is reference clk
         */
-       pi->regn = dssdev->clocks.hdmi.regn;
+       if (dssdev->clocks.hdmi.regn == 0)
+               pi->regn = HDMI_DEFAULT_REGN;
+       else
+               pi->regn = dssdev->clocks.hdmi.regn;
+
        refclk = clkin / (pi->regn + 1);
 
        /*
@@ -1077,7 +1144,11 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
         * Multiplying by 100 to avoid fractional part removal
         */
        pi->regm = (phy * 100 / (refclk)) / 100;
-       pi->regm2 = dssdev->clocks.hdmi.regm2;
+
+       if (dssdev->clocks.hdmi.regm2 == 0)
+               pi->regm2 = HDMI_DEFAULT_REGM2;
+       else
+               pi->regm2 = dssdev->clocks.hdmi.regm2;
 
        /*
         * fractional multiplier is remainder of the difference between
@@ -1225,12 +1296,15 @@ void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev)
 
 int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev)
 {
+       struct omap_dss_hdmi_data *priv = dssdev->data;
        int r = 0;
 
        DSSDBG("ENTER hdmi_display_enable\n");
 
        mutex_lock(&hdmi.lock);
 
+       hdmi.hpd_gpio = priv->hpd_gpio;
+
        r = omap_dss_start_device(dssdev);
        if (r) {
                DSSERR("failed to start device\n");
index 7d54e2c612f774c292088507d24bba47e8d4f605..647ba984f00f6b890c0fc7b9960c0ece05c31c9f 100644 (file)
@@ -1111,6 +1111,7 @@ static long sh_hdmi_clk_configure(struct sh_hdmi *hdmi, unsigned long hdmi_rate,
 static void sh_hdmi_edid_work_fn(struct work_struct *work)
 {
        struct sh_hdmi *hdmi = container_of(work, struct sh_hdmi, edid_work.work);
+       struct fb_info *info;
        struct sh_mobile_hdmi_info *pdata = hdmi->dev->platform_data;
        struct sh_mobile_lcdc_chan *ch;
        int ret;
@@ -1123,8 +1124,9 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
 
        mutex_lock(&hdmi->mutex);
 
+       info = hdmi->info;
+
        if (hdmi->hp_state == HDMI_HOTPLUG_CONNECTED) {
-               struct fb_info *info = hdmi->info;
                unsigned long parent_rate = 0, hdmi_rate;
 
                ret = sh_hdmi_read_edid(hdmi, &hdmi_rate, &parent_rate);
@@ -1148,42 +1150,45 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
 
                ch = info->par;
 
-               console_lock();
+               if (lock_fb_info(info)) {
+                       console_lock();
 
-               /* HDMI plug in */
-               if (!sh_hdmi_must_reconfigure(hdmi) &&
-                   info->state == FBINFO_STATE_RUNNING) {
-                       /*
-                        * First activation with the default monitor - just turn
-                        * on, if we run a resume here, the logo disappears
-                        */
-                       if (lock_fb_info(info)) {
+                       /* HDMI plug in */
+                       if (!sh_hdmi_must_reconfigure(hdmi) &&
+                           info->state == FBINFO_STATE_RUNNING) {
+                               /*
+                                * First activation with the default monitor - just turn
+                                * on, if we run a resume here, the logo disappears
+                                */
                                info->var.width = hdmi->var.width;
                                info->var.height = hdmi->var.height;
                                sh_hdmi_display_on(hdmi, info);
-                               unlock_fb_info(info);
+                       } else {
+                               /* New monitor or have to wake up */
+                               fb_set_suspend(info, 0);
                        }
-               } else {
-                       /* New monitor or have to wake up */
-                       fb_set_suspend(info, 0);
-               }
 
-               console_unlock();
+                       console_unlock();
+                       unlock_fb_info(info);
+               }
        } else {
                ret = 0;
-               if (!hdmi->info)
+               if (!info)
                        goto out;
 
                hdmi->monspec.modedb_len = 0;
                fb_destroy_modedb(hdmi->monspec.modedb);
                hdmi->monspec.modedb = NULL;
 
-               console_lock();
+               if (lock_fb_info(info)) {
+                       console_lock();
 
-               /* HDMI disconnect */
-               fb_set_suspend(hdmi->info, 1);
+                       /* HDMI disconnect */
+                       fb_set_suspend(info, 1);
 
-               console_unlock();
+                       console_unlock();
+                       unlock_fb_info(info);
+               }
        }
 
 out:
index 816a4fda04f5b9c0637721054a5c7aba1a456932..415e9b2f43dec4777ea054007e81ec018bf40437 100644 (file)
@@ -1666,7 +1666,7 @@ static void dlfb_usb_disconnect(struct usb_interface *interface)
        for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
                device_remove_file(info->dev, &fb_device_attrs[i]);
        device_remove_bin_file(info->dev, &edid_attr);
-
+       unlink_framebuffer(info);
        usb_set_intfdata(interface, NULL);
 
        /* if clients still have us open, will be freed on last close */
index 7f8472cc993b2908e2ebc695cac102242dccc921..881358859d8322baf08942082dec5ea4f8cc0e9e 100644 (file)
@@ -815,8 +815,15 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
        par->pmi_setpal = pmi_setpal;
        par->ypan = ypan;
 
-       if (par->pmi_setpal || par->ypan)
-               uvesafb_vbe_getpmi(task, par);
+       if (par->pmi_setpal || par->ypan) {
+               if (__supported_pte_mask & _PAGE_NX) {
+                       par->pmi_setpal = par->ypan = 0;
+                       printk(KERN_WARNING "uvesafb: NX protection is actively."
+                               "We have better not to use the PMI.\n");
+               } else {
+                       uvesafb_vbe_getpmi(task, par);
+               }
+       }
 #else
        /* The protected mode interface is not available on non-x86. */
        par->pmi_setpal = par->ypan = 0;
index 61b0bd596b85db28553cbae06bcdeb8f6d4128ca..1603023e3aa51e405f06679fff967f37738a1c8c 100644 (file)
 #define M1200X720_R60_VSP       POSITIVE
 
 /* 1200x900@60 Sync Polarity (DCON) */
-#define M1200X900_R60_HSP       NEGATIVE
-#define M1200X900_R60_VSP       NEGATIVE
+#define M1200X900_R60_HSP       POSITIVE
+#define M1200X900_R60_VSP       POSITIVE
 
 /* 1280x600@60 Sync Polarity (GTF Mode) */
 #define M1280x600_R60_HSP       NEGATIVE
index ae35cfdeb37cd0de853c9f1c82b1fc7614011fed..013884543e91a88f37e30806ee8da6fd31821d79 100644 (file)
 
 #include <linux/types.h>
 
+
+#define VIA_PITCH_SIZE (1<<3)
+#define VIA_PITCH_MAX  0x3FF8
+
+
 void via_set_primary_address(u32 addr);
 void via_set_secondary_address(u32 addr);
 void via_set_primary_pitch(u32 pitch);
index cf43c80d27f6e47659d3add9aac2c22c2bc02e36..dd1276e886f3ceb942b40fe545c1d8092f955d4f 100644 (file)
@@ -151,7 +151,8 @@ static void viafb_update_fix(struct fb_info *info)
 
        info->fix.visual =
                bpp == 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
-       info->fix.line_length = (info->var.xres_virtual * bpp / 8 + 7) & ~7;
+       info->fix.line_length = ALIGN(info->var.xres_virtual * bpp / 8,
+               VIA_PITCH_SIZE);
 }
 
 static void viafb_setup_fixinfo(struct fb_fix_screeninfo *fix,
@@ -238,8 +239,12 @@ static int viafb_check_var(struct fb_var_screeninfo *var,
                depth = 24;
 
        viafb_fill_var_color_info(var, depth);
-       line = (var->xres_virtual * var->bits_per_pixel / 8 + 7) & ~7;
-       if (line * var->yres_virtual > ppar->memsize)
+       if (var->xres_virtual < var->xres)
+               var->xres_virtual = var->xres;
+
+       line = ALIGN(var->xres_virtual * var->bits_per_pixel / 8,
+               VIA_PITCH_SIZE);
+       if (line > VIA_PITCH_MAX || line * var->yres_virtual > ppar->memsize)
                return -EINVAL;
 
        /* Based on var passed in to calculate the refresh,
@@ -348,8 +353,9 @@ static int viafb_pan_display(struct fb_var_screeninfo *var,
        struct fb_info *info)
 {
        struct viafb_par *viapar = info->par;
-       u32 vram_addr = (var->yoffset * var->xres_virtual + var->xoffset)
-               * (var->bits_per_pixel / 8) + viapar->vram_addr;
+       u32 vram_addr = viapar->vram_addr
+               + var->yoffset * info->fix.line_length
+               + var->xoffset * info->var.bits_per_pixel / 8;
 
        DEBUG_MSG(KERN_DEBUG "viafb_pan_display, address = %d\n", vram_addr);
        if (!viafb_dual_fb) {
index 4bcc8b82640be13af193478aaa2c4699bc609d2b..ecb925411e09f85031692c0e71f6fbbab349653f 100644 (file)
@@ -590,11 +590,11 @@ static struct virtio_config_ops virtio_pci_config_ops = {
 
 static void virtio_pci_release_dev(struct device *_d)
 {
-       struct virtio_device *dev = container_of(_d, struct virtio_device,
-                                                dev);
-       struct virtio_pci_device *vp_dev = to_vp_device(dev);
-
-       kfree(vp_dev);
+       /*
+        * No need for a release method as we allocate/free
+        * all devices together with the pci devices.
+        * Provide an empty one to avoid getting a warning from core.
+        */
 }
 
 /* the PCI probing function */
@@ -682,6 +682,7 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev)
        pci_iounmap(pci_dev, vp_dev->ioaddr);
        pci_release_regions(pci_dev);
        pci_disable_device(pci_dev);
+       kfree(vp_dev);
 }
 
 #ifdef CONFIG_PM
index 274c8f38303f5748478c47309c6e255b5c69ead1..505b17d8c67ce2ccf0ec07915d7849d5832f6215 100644 (file)
 #include "../w1_family.h"
 #include "w1_ds2780.h"
 
-int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
-                       int io)
+static int w1_ds2780_do_io(struct device *dev, char *buf, int addr,
+                       size_t count, int io)
 {
        struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
 
-       if (!dev)
-               return -ENODEV;
+       if (addr > DS2780_DATA_SIZE || addr < 0)
+               return 0;
 
-       mutex_lock(&sl->master->mutex);
-
-       if (addr > DS2780_DATA_SIZE || addr < 0) {
-               count = 0;
-               goto out;
-       }
        count = min_t(int, count, DS2780_DATA_SIZE - addr);
 
        if (w1_reset_select_slave(sl) == 0) {
@@ -47,7 +41,6 @@ int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
                        w1_write_8(sl->master, W1_DS2780_WRITE_DATA);
                        w1_write_8(sl->master, addr);
                        w1_write_block(sl->master, buf, count);
-                       /* XXX w1_write_block returns void, not n_written */
                } else {
                        w1_write_8(sl->master, W1_DS2780_READ_DATA);
                        w1_write_8(sl->master, addr);
@@ -55,13 +48,42 @@ int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
                }
        }
 
-out:
+       return count;
+}
+
+int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
+                       int io)
+{
+       struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+       int ret;
+
+       if (!dev)
+               return -ENODEV;
+
+       mutex_lock(&sl->master->mutex);
+
+       ret = w1_ds2780_do_io(dev, buf, addr, count, io);
+
        mutex_unlock(&sl->master->mutex);
 
-       return count;
+       return ret;
 }
 EXPORT_SYMBOL(w1_ds2780_io);
 
+int w1_ds2780_io_nolock(struct device *dev, char *buf, int addr, size_t count,
+                       int io)
+{
+       int ret;
+
+       if (!dev)
+               return -ENODEV;
+
+       ret = w1_ds2780_do_io(dev, buf, addr, count, io);
+
+       return ret;
+}
+EXPORT_SYMBOL(w1_ds2780_io_nolock);
+
 int w1_ds2780_eeprom_cmd(struct device *dev, int addr, int cmd)
 {
        struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
index a1fba79eb1b54ea990dc9cf6923452e482a6e3e2..7373793650216cca9eed315cf9875c40863aa726 100644 (file)
 
 extern int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
                        int io);
+extern int w1_ds2780_io_nolock(struct device *dev, char *buf, int addr,
+                       size_t count, int io);
 extern int w1_ds2780_eeprom_cmd(struct device *dev, int addr, int cmd);
 
 #endif /* !_W1_DS2780_H */
index 8cb26855bfede30eeb6782f693a29add442cdaa1..d4ab797cf756ffc829b743576ee563fbb90a8a4e 100644 (file)
@@ -216,6 +216,7 @@ static int __devinit cru_detect(unsigned long map_entry,
 
        cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
 
+       set_memory_x((unsigned long)bios32_map, 2);
        asminline_call(&cmn_regs, bios32_entrypoint);
 
        if (cmn_regs.u1.ral != 0) {
@@ -233,8 +234,11 @@ static int __devinit cru_detect(unsigned long map_entry,
                if ((physical_bios_base + physical_bios_offset)) {
                        cru_rom_addr =
                                ioremap(cru_physical_address, cru_length);
-                       if (cru_rom_addr)
+                       if (cru_rom_addr) {
+                               set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
+                                       (cru_length + PAGE_SIZE - 1) >> PAGE_SHIFT);
                                retval = 0;
+                       }
                }
 
                printk(KERN_DEBUG "hpwdt: CRU Base Address:   0x%lx\n",
index 30df85d8fca860d69a75079951feb9bb725944f1..a5493f8acf95a290700f3a3460ddf788234da4ea 100644 (file)
@@ -1026,7 +1026,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
        if (irq < 0)
                return irq;
 
-       irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
+       irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
        retval = request_irq(irq, handler, irqflags, devname, dev_id);
        if (retval != 0) {
                unbind_from_irq(irq);
index f6832f46aea4a4861368465321cc8423f997cdfc..e1c4c6e5b469c44449f68e9e841d264eb08dbc48 100644 (file)
@@ -135,7 +135,7 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
                /* Grant foreign access to the page. */
                gref->gref_id = gnttab_grant_foreign_access(op->domid,
                        pfn_to_mfn(page_to_pfn(gref->page)), readonly);
-               if (gref->gref_id < 0) {
+               if ((int)gref->gref_id < 0) {
                        rc = gref->gref_id;
                        goto undo;
                }
@@ -280,7 +280,7 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
                goto out;
        }
 
-       gref_ids = kzalloc(sizeof(gref_ids[0]) * op.count, GFP_TEMPORARY);
+       gref_ids = kcalloc(op.count, sizeof(gref_ids[0]), GFP_TEMPORARY);
        if (!gref_ids) {
                rc = -ENOMEM;
                goto out;
index f914b26cf0c2efdfac05b8af9e6d9b8a37124dfc..b4e830eb3f26bc4848e9980636fde7a995b49875 100644 (file)
@@ -664,7 +664,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
        vma->vm_flags |= VM_RESERVED|VM_DONTEXPAND;
 
        if (use_ptemod)
-               vma->vm_flags |= VM_DONTCOPY|VM_PFNMAP;
+               vma->vm_flags |= VM_DONTCOPY;
 
        vma->vm_private_data = map;
 
index 6e8c15a23201a3a280948cf25142001347a0d6cb..fd60dffeb0fc79f72d535ea9bc9a8931c5153477 100644 (file)
@@ -162,7 +162,7 @@ void __init xen_swiotlb_init(int verbose)
        /*
         * Get IO TLB memory from any location.
         */
-       xen_io_tlb_start = alloc_bootmem(bytes);
+       xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
        if (!xen_io_tlb_start)
                panic("Cannot allocate SWIOTLB buffer");
 
@@ -278,9 +278,10 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
        /*
         * Ensure that the address returned is DMA'ble
         */
-       if (!dma_capable(dev, dev_addr, size))
-               panic("map_single: bounce buffer is not DMA'ble");
-
+       if (!dma_capable(dev, dev_addr, size)) {
+               swiotlb_tbl_unmap_single(dev, map, size, dir);
+               dev_addr = 0;
+       }
        return dev_addr;
 }
 EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
index b6a2690c9d49922676e80bc7eab65b5fffc00a29..560f2176aa6c042372f4c6d55dab1546ccc674a0 100644 (file)
@@ -132,7 +132,7 @@ static int read_backend_details(struct xenbus_device *xendev)
        return xenbus_read_otherend_details(xendev, "backend-id", "backend");
 }
 
-static int is_device_connecting(struct device *dev, void *data)
+static int is_device_connecting(struct device *dev, void *data, bool ignore_nonessential)
 {
        struct xenbus_device *xendev = to_xenbus_device(dev);
        struct device_driver *drv = data;
@@ -149,16 +149,41 @@ static int is_device_connecting(struct device *dev, void *data)
        if (drv && (dev->driver != drv))
                return 0;
 
+       if (ignore_nonessential) {
+               /* With older QEMU, for PVonHVM guests the guest config files
+                * could contain: vfb = [ 'vnc=1, vnclisten=0.0.0.0']
+                * which is nonsensical as there is no PV FB (there can be
+                * a PVKB) running as HVM guest. */
+
+               if ((strncmp(xendev->nodename, "device/vkbd", 11) == 0))
+                       return 0;
+
+               if ((strncmp(xendev->nodename, "device/vfb", 10) == 0))
+                       return 0;
+       }
        xendrv = to_xenbus_driver(dev->driver);
        return (xendev->state < XenbusStateConnected ||
                (xendev->state == XenbusStateConnected &&
                 xendrv->is_ready && !xendrv->is_ready(xendev)));
 }
+static int essential_device_connecting(struct device *dev, void *data)
+{
+       return is_device_connecting(dev, data, true /* ignore PV[KBB+FB] */);
+}
+static int non_essential_device_connecting(struct device *dev, void *data)
+{
+       return is_device_connecting(dev, data, false);
+}
 
-static int exists_connecting_device(struct device_driver *drv)
+static int exists_essential_connecting_device(struct device_driver *drv)
 {
        return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
-                               is_device_connecting);
+                               essential_device_connecting);
+}
+static int exists_non_essential_connecting_device(struct device_driver *drv)
+{
+       return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+                               non_essential_device_connecting);
 }
 
 static int print_device_status(struct device *dev, void *data)
@@ -189,6 +214,23 @@ static int print_device_status(struct device *dev, void *data)
 /* We only wait for device setup after most initcalls have run. */
 static int ready_to_wait_for_devices;
 
+static bool wait_loop(unsigned long start, unsigned int max_delay,
+                    unsigned int *seconds_waited)
+{
+       if (time_after(jiffies, start + (*seconds_waited+5)*HZ)) {
+               if (!*seconds_waited)
+                       printk(KERN_WARNING "XENBUS: Waiting for "
+                              "devices to initialise: ");
+               *seconds_waited += 5;
+               printk("%us...", max_delay - *seconds_waited);
+               if (*seconds_waited == max_delay)
+                       return true;
+       }
+
+       schedule_timeout_interruptible(HZ/10);
+
+       return false;
+}
 /*
  * On a 5-minute timeout, wait for all devices currently configured.  We need
  * to do this to guarantee that the filesystems and / or network devices
@@ -212,19 +254,14 @@ static void wait_for_devices(struct xenbus_driver *xendrv)
        if (!ready_to_wait_for_devices || !xen_domain())
                return;
 
-       while (exists_connecting_device(drv)) {
-               if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
-                       if (!seconds_waited)
-                               printk(KERN_WARNING "XENBUS: Waiting for "
-                                      "devices to initialise: ");
-                       seconds_waited += 5;
-                       printk("%us...", 300 - seconds_waited);
-                       if (seconds_waited == 300)
-                               break;
-               }
-
-               schedule_timeout_interruptible(HZ/10);
-       }
+       while (exists_non_essential_connecting_device(drv))
+               if (wait_loop(start, 30, &seconds_waited))
+                       break;
+
+       /* Skips PVKB and PVFB check.*/
+       while (exists_essential_connecting_device(drv))
+               if (wait_loop(start, 270, &seconds_waited))
+                       break;
 
        if (seconds_waited)
                printk("\n");
index 5534690075aff4d6b0713d04b8d98dd07ef1fba9..daee5db4bef8a38f56eb46194ff0c0b60fc9e863 100644 (file)
@@ -801,6 +801,12 @@ static int process_msg(void)
                goto out;
        }
 
+       if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) {
+               kfree(msg);
+               err = -EINVAL;
+               goto out;
+       }
+
        body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
        if (body == NULL) {
                kfree(msg);
index 5a9b6843bac1103056161b11def45db9463bc404..1f3624d3b24fa028344caa97a13cd519ae1102b2 100644 (file)
@@ -109,7 +109,7 @@ struct afs_call {
        unsigned                reply_size;     /* current size of reply */
        unsigned                first_offset;   /* offset into mapping[first] */
        unsigned                last_to;        /* amount of mapping[last] */
-       unsigned short          offset;         /* offset into received data store */
+       unsigned                offset;         /* offset into received data store */
        unsigned char           unmarshall;     /* unmarshalling phase */
        bool                    incoming;       /* T if incoming call */
        bool                    send_pages;     /* T if data from mapping should be sent */
index e45a323aebb4468711de395fd06e5f0f9d2833d0..8ad8c2a0703a120c2dde7f425225f1d515b13f0c 100644 (file)
@@ -314,6 +314,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
        struct msghdr msg;
        struct kvec iov[1];
        int ret;
+       struct sk_buff *skb;
 
        _enter("%x,{%d},", addr->s_addr, ntohs(call->port));
 
@@ -380,6 +381,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
 
 error_do_abort:
        rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
+       while ((skb = skb_dequeue(&call->rx_queue)))
+               afs_free_skb(skb);
        rxrpc_kernel_end_call(rxcall);
        call->rxcall = NULL;
 error_kill_call:
index e29ec485af255822b8414be128fc8ef66da9a204..278ed7dc71bbc11cbadb4603fe40507e75d20741 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -228,12 +228,6 @@ static void __put_ioctx(struct kioctx *ctx)
        call_rcu(&ctx->rcu_head, ctx_rcu_free);
 }
 
-static inline void get_ioctx(struct kioctx *kioctx)
-{
-       BUG_ON(atomic_read(&kioctx->users) <= 0);
-       atomic_inc(&kioctx->users);
-}
-
 static inline int try_get_ioctx(struct kioctx *kioctx)
 {
        return atomic_inc_not_zero(&kioctx->users);
@@ -273,7 +267,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
        mm = ctx->mm = current->mm;
        atomic_inc(&mm->mm_count);
 
-       atomic_set(&ctx->users, 1);
+       atomic_set(&ctx->users, 2);
        spin_lock_init(&ctx->ctx_lock);
        spin_lock_init(&ctx->ring_info.ring_lock);
        init_waitqueue_head(&ctx->wait);
@@ -527,11 +521,16 @@ static void aio_fput_routine(struct work_struct *data)
                        fput(req->ki_filp);
 
                /* Link the iocb into the context's free list */
+               rcu_read_lock();
                spin_lock_irq(&ctx->ctx_lock);
                really_put_req(ctx, req);
+               /*
+                * at that point ctx might've been killed, but actual
+                * freeing is RCU'd
+                */
                spin_unlock_irq(&ctx->ctx_lock);
+               rcu_read_unlock();
 
-               put_ioctx(ctx);
                spin_lock_irq(&fput_lock);
        }
        spin_unlock_irq(&fput_lock);
@@ -562,7 +561,6 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
         * this function will be executed w/out any aio kthread wakeup.
         */
        if (unlikely(!fput_atomic(req->ki_filp))) {
-               get_ioctx(ctx);
                spin_lock(&fput_lock);
                list_add(&req->ki_list, &fput_head);
                spin_unlock(&fput_lock);
@@ -1256,10 +1254,10 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
        ret = PTR_ERR(ioctx);
        if (!IS_ERR(ioctx)) {
                ret = put_user(ioctx->user_id, ctxp);
-               if (!ret)
+               if (!ret) {
+                       put_ioctx(ioctx);
                        return 0;
-
-               get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
+               }
                io_destroy(ioctx);
        }
 
@@ -1397,6 +1395,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
        if (ret < 0)
                goto out;
 
+       ret = rw_verify_area(type, kiocb->ki_filp, &kiocb->ki_pos, ret);
+       if (ret < 0)
+               goto out;
+
        kiocb->ki_nr_segs = kiocb->ki_nbytes;
        kiocb->ki_cur_seg = 0;
        /* ki_nbytes/left now reflect bytes instead of segs */
@@ -1408,11 +1410,17 @@ out:
        return ret;
 }
 
-static ssize_t aio_setup_single_vector(struct kiocb *kiocb)
+static ssize_t aio_setup_single_vector(int type, struct file * file, struct kiocb *kiocb)
 {
+       int bytes;
+
+       bytes = rw_verify_area(type, file, &kiocb->ki_pos, kiocb->ki_left);
+       if (bytes < 0)
+               return bytes;
+
        kiocb->ki_iovec = &kiocb->ki_inline_vec;
        kiocb->ki_iovec->iov_base = kiocb->ki_buf;
-       kiocb->ki_iovec->iov_len = kiocb->ki_left;
+       kiocb->ki_iovec->iov_len = bytes;
        kiocb->ki_nr_segs = 1;
        kiocb->ki_cur_seg = 0;
        return 0;
@@ -1437,10 +1445,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
                if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf,
                        kiocb->ki_left)))
                        break;
-               ret = security_file_permission(file, MAY_READ);
-               if (unlikely(ret))
-                       break;
-               ret = aio_setup_single_vector(kiocb);
+               ret = aio_setup_single_vector(READ, file, kiocb);
                if (ret)
                        break;
                ret = -EINVAL;
@@ -1455,10 +1460,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
                if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf,
                        kiocb->ki_left)))
                        break;
-               ret = security_file_permission(file, MAY_WRITE);
-               if (unlikely(ret))
-                       break;
-               ret = aio_setup_single_vector(kiocb);
+               ret = aio_setup_single_vector(WRITE, file, kiocb);
                if (ret)
                        break;
                ret = -EINVAL;
@@ -1469,9 +1471,6 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
                ret = -EBADF;
                if (unlikely(!(file->f_mode & FMODE_READ)))
                        break;
-               ret = security_file_permission(file, MAY_READ);
-               if (unlikely(ret))
-                       break;
                ret = aio_setup_vectored_rw(READ, kiocb, compat);
                if (ret)
                        break;
@@ -1483,9 +1482,6 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
                ret = -EBADF;
                if (unlikely(!(file->f_mode & FMODE_WRITE)))
                        break;
-               ret = security_file_permission(file, MAY_WRITE);
-               if (unlikely(ret))
-                       break;
                ret = aio_setup_vectored_rw(WRITE, kiocb, compat);
                if (ret)
                        break;
index 475f9c597cb7c0dd86bce6a0c42602377d33fc14..756d3286bee5b1dd4211f73d7932da84f36c3586 100644 (file)
@@ -278,6 +278,17 @@ int autofs4_fill_super(struct super_block *, void *, int);
 struct autofs_info *autofs4_new_ino(struct autofs_sb_info *);
 void autofs4_clean_ino(struct autofs_info *);
 
+static inline int autofs_prepare_pipe(struct file *pipe)
+{
+       if (!pipe->f_op || !pipe->f_op->write)
+               return -EINVAL;
+       if (!S_ISFIFO(pipe->f_dentry->d_inode->i_mode))
+               return -EINVAL;
+       /* We want a packet pipe */
+       pipe->f_flags |= O_DIRECT;
+       return 0;
+}
+
 /* Queue management functions */
 
 int autofs4_wait(struct autofs_sb_info *,struct dentry *, enum autofs_notify);
index 509fe1eb66ae31babcceda94ab8fd0a04268ebcb..de542716245e0cf0648f07f11ac2001f1e61e115 100644 (file)
@@ -376,7 +376,7 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp,
                        err = -EBADF;
                        goto out;
                }
-               if (!pipe->f_op || !pipe->f_op->write) {
+               if (autofs_prepare_pipe(pipe) < 0) {
                        err = -EPIPE;
                        fput(pipe);
                        goto out;
index 180fa2425e49310e9674a227c13297a1718f53fa..7c26678e2cac9e018d779393bad6f43f0fe76797 100644 (file)
@@ -292,7 +292,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
                printk("autofs: could not open pipe file descriptor\n");
                goto fail_dput;
        }
-       if (!pipe->f_op || !pipe->f_op->write)
+       if (autofs_prepare_pipe(pipe) < 0)
                goto fail_fput;
        sbi->pipe = pipe;
        sbi->pipefd = pipefd;
index 25435987d6ae1cdcbbcfea4e9bf4d744c55bab44..813ea10fdde3dad53135c6ec6ac3332815bb9732 100644 (file)
@@ -90,7 +90,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
 
        return (bytes > 0);
 }
-       
+
 static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
                                 struct autofs_wait_queue *wq,
                                 int type)
index 303983fabfd63391f3961a58be352303b7af0064..618493e44ae0953a6321924d0c294e699b3ba841 100644 (file)
@@ -796,7 +796,16 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                         * might try to exec.  This is because the brk will
                         * follow the loader, and is not movable.  */
 #if defined(CONFIG_X86) || defined(CONFIG_ARM)
-                       load_bias = 0;
+                       /* Memory randomization might have been switched off
+                        * in runtime via sysctl.
+                        * If that is the case, retain the original non-zero
+                        * load_bias value in order to establish proper
+                        * non-randomized mappings.
+                        */
+                       if (current->flags & PF_RANDOMIZE)
+                               load_bias = 0;
+                       else
+                               load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
 #else
                        load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
 #endif
@@ -1413,7 +1422,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
        for (i = 1; i < view->n; ++i) {
                const struct user_regset *regset = &view->regsets[i];
                do_thread_regset_writeback(t->task, regset);
-               if (regset->core_note_type &&
+               if (regset->core_note_type && regset->get &&
                    (!regset->active || regset->active(t->task, regset))) {
                        int ret;
                        size_t size = regset->n * regset->size;
index fd91640369dbccefe18e020d55ed44cd3eee129b..d6093f91c48cf3d1556b89cef0e479877fdd76e6 100755 (executable)
@@ -66,7 +66,7 @@ static void bdev_inode_switch_bdi(struct inode *inode,
        spin_unlock(&inode_wb_list_lock);
 }
 
-static sector_t max_block(struct block_device *bdev)
+sector_t blkdev_max_block(struct block_device *bdev)
 {
        sector_t retval = ~((sector_t)0);
        loff_t sz = i_size_read(bdev->bd_inode);
@@ -137,7 +137,7 @@ static int
 blkdev_get_block(struct inode *inode, sector_t iblock,
                struct buffer_head *bh, int create)
 {
-       if (iblock >= max_block(I_BDEV(inode))) {
+       if (iblock >= blkdev_max_block(I_BDEV(inode))) {
                if (create)
                        return -EIO;
 
@@ -159,7 +159,7 @@ static int
 blkdev_get_blocks(struct inode *inode, sector_t iblock,
                struct buffer_head *bh, int create)
 {
-       sector_t end_block = max_block(I_BDEV(inode));
+       sector_t end_block = blkdev_max_block(I_BDEV(inode));
        unsigned long max_blocks = bh->b_size >> inode->i_blkbits;
 
        if ((iblock + max_blocks) > end_block) {
@@ -1077,6 +1077,7 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
 static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
 {
        struct gendisk *disk;
+       struct module *owner;
        int ret;
        int partno;
        int perm = 0;
@@ -1102,6 +1103,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
        disk = get_gendisk(bdev->bd_dev, &partno);
        if (!disk)
                goto out;
+       owner = disk->fops->owner;
 
        disk_block_events(disk);
        mutex_lock_nested(&bdev->bd_mutex, for_part);
@@ -1129,8 +1131,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                                        bdev->bd_disk = NULL;
                                        mutex_unlock(&bdev->bd_mutex);
                                        disk_unblock_events(disk);
-                                       module_put(disk->fops->owner);
                                        put_disk(disk);
+                                       module_put(owner);
                                        goto restart;
                                }
                        }
@@ -1149,8 +1151,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                         * The latter is necessary to prevent ghost
                         * partitions on a removed medium.
                         */
-                       if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
-                               rescan_partitions(disk, bdev);
+                       if (bdev->bd_invalidated) {
+                               if (!ret)
+                                       rescan_partitions(disk, bdev);
+                               else if (ret == -ENOMEDIUM)
+                                       invalidate_partitions(disk, bdev);
+                       }
                        if (ret)
                                goto out_clear;
                } else {
@@ -1180,14 +1186,18 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                        if (bdev->bd_disk->fops->open)
                                ret = bdev->bd_disk->fops->open(bdev, mode);
                        /* the same as first opener case, read comment there */
-                       if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
-                               rescan_partitions(bdev->bd_disk, bdev);
+                       if (bdev->bd_invalidated) {
+                               if (!ret)
+                                       rescan_partitions(bdev->bd_disk, bdev);
+                               else if (ret == -ENOMEDIUM)
+                                       invalidate_partitions(bdev->bd_disk, bdev);
+                       }
                        if (ret)
                                goto out_unlock_bdev;
                }
                /* only one opener holds refs to the module and disk */
-               module_put(disk->fops->owner);
                put_disk(disk);
+               module_put(owner);
        }
        bdev->bd_openers++;
        if (for_part)
@@ -1207,8 +1217,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
  out_unlock_bdev:
        mutex_unlock(&bdev->bd_mutex);
        disk_unblock_events(disk);
-       module_put(disk->fops->owner);
        put_disk(disk);
+       module_put(owner);
  out:
        bdput(bdev);
 
@@ -1434,14 +1444,15 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
        if (!bdev->bd_openers) {
                struct module *owner = disk->fops->owner;
 
-               put_disk(disk);
-               module_put(owner);
                disk_put_part(bdev->bd_part);
                bdev->bd_part = NULL;
                bdev->bd_disk = NULL;
                if (bdev != bdev->bd_contains)
                        victim = bdev->bd_contains;
                bdev->bd_contains = NULL;
+
+               put_disk(disk);
+               module_put(owner);
        }
        mutex_unlock(&bdev->bd_mutex);
        bdput(bdev);
index 3b859a3e6a0e9354a653e324f8c08ba85d45f959..66179bcb16f6dc1711c793ca757e458523d52812 100644 (file)
@@ -1972,7 +1972,7 @@ BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item,
 
 static inline bool btrfs_root_readonly(struct btrfs_root *root)
 {
-       return root->root_item.flags & BTRFS_ROOT_SUBVOL_RDONLY;
+       return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
 }
 
 /* struct btrfs_super_block */
index afc8303517ff01565efd0916a48cdc77fd46d787..cddd5382fa3e0496221768bdb32347581a4a4701 100644 (file)
@@ -968,6 +968,7 @@ init_page_buffers(struct page *page, struct block_device *bdev,
        struct buffer_head *head = page_buffers(page);
        struct buffer_head *bh = head;
        int uptodate = PageUptodate(page);
+       sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode));
 
        do {
                if (!buffer_mapped(bh)) {
@@ -976,7 +977,8 @@ init_page_buffers(struct page *page, struct block_device *bdev,
                        bh->b_blocknr = block;
                        if (uptodate)
                                set_buffer_uptodate(bh);
-                       set_buffer_mapped(bh);
+                       if (block < end_block)
+                               set_buffer_mapped(bh);
                }
                block++;
                bh = bh->b_this_page;
index 6255fa812c7a28b2071ced3f7585cb2359e6a3c4..7cb9dd2253186dcb07180a7ea975a838b8355893 100644 (file)
@@ -43,6 +43,7 @@
 
 #define CIFS_MIN_RCV_POOL 4
 
+#define MAX_REOPEN_ATT 5 /* these many maximum attempts to reopen a file */
 /*
  * default attribute cache timeout (jiffies)
  */
index 2451627c0158680c0a0cc96e843bd4e3a7bad940..b7758094770cf8f60fb01fb36c0c3350e4550f69 100644 (file)
@@ -2767,10 +2767,10 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
 
 /*
  * When the server doesn't allow large posix writes, only allow a wsize of
- * 128k minus the size of the WRITE_AND_X header. That allows for a write up
+ * 2^17-1 minus the size of the WRITE_AND_X header. That allows for a write up
  * to the maximum size described by RFC1002.
  */
-#define CIFS_MAX_RFC1002_WSIZE (128 * 1024 - sizeof(WRITE_REQ) + 4)
+#define CIFS_MAX_RFC1002_WSIZE ((1<<17) - 1 - sizeof(WRITE_REQ) + 4)
 
 /*
  * The default wsize is 1M. find_get_pages seems to return a maximum of 256
@@ -3004,7 +3004,7 @@ cifs_get_volume_info(char *mount_data, const char *devname)
 int
 cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
 {
-       int rc = 0;
+       int rc;
        int xid;
        struct cifs_ses *pSesInfo;
        struct cifs_tcon *tcon;
@@ -3033,6 +3033,7 @@ try_mount_again:
                FreeXid(xid);
        }
 #endif
+       rc = 0;
        tcon = NULL;
        pSesInfo = NULL;
        srvTcp = NULL;
index 16cdd6da227a7a3303e2ded6531b7a73914156db..ed5c07b0cdb174260e994000096de97dabe227df 100644 (file)
@@ -583,10 +583,26 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
                         * If either that or op not supported returned, follow
                         * the normal lookup.
                         */
-                       if ((rc == 0) || (rc == -ENOENT))
+                       switch (rc) {
+                       case 0:
+                               /*
+                                * The server may allow us to open things like
+                                * FIFOs, but the client isn't set up to deal
+                                * with that. If it's not a regular file, just
+                                * close it and proceed as if it were a normal
+                                * lookup.
+                                */
+                               if (newInode && !S_ISREG(newInode->i_mode)) {
+                                       CIFSSMBClose(xid, pTcon, fileHandle);
+                                       break;
+                               }
+                       case -ENOENT:
                                posix_open = true;
-                       else if ((rc == -EINVAL) || (rc != -EOPNOTSUPP))
+                       case -EOPNOTSUPP:
+                               break;
+                       default:
                                pTcon->broken_posix_open = true;
+                       }
                }
                if (!posix_open)
                        rc = cifs_get_inode_info_unix(&newInode, full_path,
index a9b4a24f2a16ba0b4f9073e98043d51bf1f43e67..9040cb0695cdc7cc6a7db7bf97048dd245664a78 100644 (file)
@@ -973,10 +973,11 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
                                        bool fsuid_only)
 {
-       struct cifsFileInfo *open_file;
+       struct cifsFileInfo *open_file, *inv_file = NULL;
        struct cifs_sb_info *cifs_sb;
        bool any_available = false;
        int rc;
+       unsigned int refind = 0;
 
        /* Having a null inode here (because mapping->host was set to zero by
        the VFS or MM) should not happen but we had reports of on oops (due to
@@ -996,40 +997,25 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
 
        spin_lock(&cifs_file_list_lock);
 refind_writable:
+       if (refind > MAX_REOPEN_ATT) {
+               spin_unlock(&cifs_file_list_lock);
+               return NULL;
+       }
        list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
                if (!any_available && open_file->pid != current->tgid)
                        continue;
                if (fsuid_only && open_file->uid != current_fsuid())
                        continue;
                if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
-                       cifsFileInfo_get(open_file);
-
                        if (!open_file->invalidHandle) {
                                /* found a good writable file */
+                               cifsFileInfo_get(open_file);
                                spin_unlock(&cifs_file_list_lock);
                                return open_file;
+                       } else {
+                               if (!inv_file)
+                                       inv_file = open_file;
                        }
-
-                       spin_unlock(&cifs_file_list_lock);
-
-                       /* Had to unlock since following call can block */
-                       rc = cifs_reopen_file(open_file, false);
-                       if (!rc)
-                               return open_file;
-
-                       /* if it fails, try another handle if possible */
-                       cFYI(1, "wp failed on reopen file");
-                       cifsFileInfo_put(open_file);
-
-                       spin_lock(&cifs_file_list_lock);
-
-                       /* else we simply continue to the next entry. Thus
-                          we do not loop on reopen errors.  If we
-                          can not reopen the file, for example if we
-                          reconnected to a server with another client
-                          racing to delete or lock the file we would not
-                          make progress if we restarted before the beginning
-                          of the loop here. */
                }
        }
        /* couldn't find useable FH with same pid, try any available */
@@ -1037,7 +1023,30 @@ refind_writable:
                any_available = true;
                goto refind_writable;
        }
+
+       if (inv_file) {
+               any_available = false;
+               cifsFileInfo_get(inv_file);
+       }
+
        spin_unlock(&cifs_file_list_lock);
+
+       if (inv_file) {
+               rc = cifs_reopen_file(inv_file, false);
+               if (!rc)
+                       return inv_file;
+               else {
+                       spin_lock(&cifs_file_list_lock);
+                       list_move_tail(&inv_file->flist,
+                                       &cifs_inode->openFileList);
+                       spin_unlock(&cifs_file_list_lock);
+                       cifsFileInfo_put(inv_file);
+                       spin_lock(&cifs_file_list_lock);
+                       ++refind;
+                       goto refind_writable;
+               }
+       }
+
        return NULL;
 }
 
index a7b2dcd4a53ec1b5404729ea29e2e9ff79fabdeb..745e5cdca8f7f75b27b2d56d631051c11733f9e4 100644 (file)
@@ -562,7 +562,16 @@ int cifs_get_file_info(struct file *filp)
 
        xid = GetXid();
        rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data);
-       if (rc == -EOPNOTSUPP || rc == -EINVAL) {
+       switch (rc) {
+       case 0:
+               cifs_all_info_to_fattr(&fattr, &find_data, cifs_sb, false);
+               break;
+       case -EREMOTE:
+               cifs_create_dfs_fattr(&fattr, inode->i_sb);
+               rc = 0;
+               break;
+       case -EOPNOTSUPP:
+       case -EINVAL:
                /*
                 * FIXME: legacy server -- fall back to path-based call?
                 * for now, just skip revalidating and mark inode for
@@ -570,18 +579,14 @@ int cifs_get_file_info(struct file *filp)
                 */
                rc = 0;
                CIFS_I(inode)->time = 0;
+       default:
                goto cgfi_exit;
-       } else if (rc == -EREMOTE) {
-               cifs_create_dfs_fattr(&fattr, inode->i_sb);
-               rc = 0;
-       } else if (rc)
-               goto cgfi_exit;
+       }
 
        /*
         * don't bother with SFU junk here -- just mark inode as needing
         * revalidation.
         */
-       cifs_all_info_to_fattr(&fattr, &find_data, cifs_sb, false);
        fattr.cf_uniqueid = CIFS_I(inode)->uniqueid;
        fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
        cifs_fattr_to_inode(inode, &fattr);
index d3e619692ee0f0437e26d9d91d61545859f248bc..0cfae19129bdef93342e26cdc4dab75c1f14223a 100644 (file)
@@ -244,16 +244,15 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
        /* copy user */
        /* BB what about null user mounts - check that we do this BB */
        /* copy user */
-       if (ses->user_name != NULL)
+       if (ses->user_name != NULL) {
                strncpy(bcc_ptr, ses->user_name, MAX_USERNAME_SIZE);
+               bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE);
+       }
        /* else null user mount */
-
-       bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE);
        *bcc_ptr = 0;
        bcc_ptr++; /* account for null termination */
 
        /* copy domain */
-
        if (ses->domainName != NULL) {
                strncpy(bcc_ptr, ses->domainName, 256);
                bcc_ptr += strnlen(ses->domainName, 256);
index fbdcbca40725ab483f9b238c561c6dfb240db0bf..0b51cfc9291a810f194b8b8ea01b2f58f80cb22b 100644 (file)
@@ -241,6 +241,7 @@ static void dentry_lru_add(struct dentry *dentry)
 static void __dentry_lru_del(struct dentry *dentry)
 {
        list_del_init(&dentry->d_lru);
+       dentry->d_flags &= ~DCACHE_SHRINK_LIST;
        dentry->d_sb->s_nr_dentry_unused--;
        dentry_stat.nr_unused--;
 }
@@ -753,6 +754,7 @@ relock:
                        spin_unlock(&dentry->d_lock);
                } else {
                        list_move_tail(&dentry->d_lru, &tmp);
+                       dentry->d_flags |= DCACHE_SHRINK_LIST;
                        spin_unlock(&dentry->d_lock);
                        if (!--cnt)
                                break;
@@ -1144,14 +1146,18 @@ resume:
                /* 
                 * move only zero ref count dentries to the end 
                 * of the unused list for prune_dcache
+                *
+                * Those which are presently on the shrink list, being processed
+                * by shrink_dentry_list(), shouldn't be moved.  Otherwise the
+                * loop in shrink_dcache_parent() might not make any progress
+                * and loop forever.
                 */
-               if (!dentry->d_count) {
+               if (dentry->d_count) {
+                       dentry_lru_del(dentry);
+               } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
                        dentry_lru_move_tail(dentry);
                        found++;
-               } else {
-                       dentry_lru_del(dentry);
                }
-
                /*
                 * We can return to the caller if we have found some (this
                 * ensures forward progress). We'll be coming back to find
@@ -2427,6 +2433,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
                        if (d_ancestor(alias, dentry)) {
                                /* Check for loops */
                                actual = ERR_PTR(-ELOOP);
+                               spin_unlock(&inode->i_lock);
                        } else if (IS_ROOT(alias)) {
                                /* Is this an anonymous mountpoint that we
                                 * could splice into our tree? */
@@ -2436,7 +2443,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
                                goto found;
                        } else {
                                /* Nope, but we must(!) avoid directory
-                                * aliasing */
+                                * aliasing. This drops inode->i_lock */
                                actual = __d_unalias(inode, dentry, alias);
                        }
                        write_sequnlock(&rename_lock);
@@ -2487,16 +2494,14 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
 /**
  * prepend_path - Prepend path string to a buffer
  * @path: the dentry/vfsmount to report
- * @root: root vfsmnt/dentry (may be modified by this function)
+ * @root: root vfsmnt/dentry
  * @buffer: pointer to the end of the buffer
  * @buflen: pointer to buffer length
  *
  * Caller holds the rename_lock.
- *
- * If path is not reachable from the supplied root, then the value of
- * root is changed (without modifying refcounts).
  */
-static int prepend_path(const struct path *path, struct path *root,
+static int prepend_path(const struct path *path,
+                       const struct path *root,
                        char **buffer, int *buflen)
 {
        struct dentry *dentry = path->dentry;
@@ -2531,10 +2536,10 @@ static int prepend_path(const struct path *path, struct path *root,
                dentry = parent;
        }
 
-out:
        if (!error && !slash)
                error = prepend(buffer, buflen, "/", 1);
 
+out:
        br_read_unlock(vfsmount_lock);
        return error;
 
@@ -2548,15 +2553,17 @@ global_root:
                WARN(1, "Root dentry has weird name <%.*s>\n",
                     (int) dentry->d_name.len, dentry->d_name.name);
        }
-       root->mnt = vfsmnt;
-       root->dentry = dentry;
+       if (!slash)
+               error = prepend(buffer, buflen, "/", 1);
+       if (!error)
+               error = vfsmnt->mnt_ns ? 1 : 2;
        goto out;
 }
 
 /**
  * __d_path - return the path of a dentry
  * @path: the dentry/vfsmount to report
- * @root: root vfsmnt/dentry (may be modified by this function)
+ * @root: root vfsmnt/dentry
  * @buf: buffer to return value in
  * @buflen: buffer length
  *
@@ -2567,10 +2574,10 @@ global_root:
  *
  * "buflen" should be positive.
  *
- * If path is not reachable from the supplied root, then the value of
- * root is changed (without modifying refcounts).
+ * If the path is not reachable from the supplied root, return %NULL.
  */
-char *__d_path(const struct path *path, struct path *root,
+char *__d_path(const struct path *path,
+              const struct path *root,
               char *buf, int buflen)
 {
        char *res = buf + buflen;
@@ -2581,7 +2588,28 @@ char *__d_path(const struct path *path, struct path *root,
        error = prepend_path(path, root, &res, &buflen);
        write_sequnlock(&rename_lock);
 
-       if (error)
+       if (error < 0)
+               return ERR_PTR(error);
+       if (error > 0)
+               return NULL;
+       return res;
+}
+
+char *d_absolute_path(const struct path *path,
+              char *buf, int buflen)
+{
+       struct path root = {};
+       char *res = buf + buflen;
+       int error;
+
+       prepend(&res, &buflen, "\0", 1);
+       write_seqlock(&rename_lock);
+       error = prepend_path(path, &root, &res, &buflen);
+       write_sequnlock(&rename_lock);
+
+       if (error > 1)
+               error = -EINVAL;
+       if (error < 0)
                return ERR_PTR(error);
        return res;
 }
@@ -2589,8 +2617,9 @@ char *__d_path(const struct path *path, struct path *root,
 /*
  * same as __d_path but appends "(deleted)" for unlinked files.
  */
-static int path_with_deleted(const struct path *path, struct path *root,
-                                char **buf, int *buflen)
+static int path_with_deleted(const struct path *path,
+                            const struct path *root,
+                            char **buf, int *buflen)
 {
        prepend(buf, buflen, "\0", 1);
        if (d_unlinked(path->dentry)) {
@@ -2627,7 +2656,6 @@ char *d_path(const struct path *path, char *buf, int buflen)
 {
        char *res = buf + buflen;
        struct path root;
-       struct path tmp;
        int error;
 
        /*
@@ -2642,9 +2670,8 @@ char *d_path(const struct path *path, char *buf, int buflen)
 
        get_fs_root(current->fs, &root);
        write_seqlock(&rename_lock);
-       tmp = root;
-       error = path_with_deleted(path, &tmp, &res, &buflen);
-       if (error)
+       error = path_with_deleted(path, &root, &res, &buflen);
+       if (error < 0)
                res = ERR_PTR(error);
        write_sequnlock(&rename_lock);
        path_put(&root);
@@ -2665,7 +2692,6 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
 {
        char *res = buf + buflen;
        struct path root;
-       struct path tmp;
        int error;
 
        if (path->dentry->d_op && path->dentry->d_op->d_dname)
@@ -2673,9 +2699,8 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
 
        get_fs_root(current->fs, &root);
        write_seqlock(&rename_lock);
-       tmp = root;
-       error = path_with_deleted(path, &tmp, &res, &buflen);
-       if (!error && !path_equal(&tmp, &root))
+       error = path_with_deleted(path, &root, &res, &buflen);
+       if (error > 0)
                error = prepend_unreachable(&res, &buflen);
        write_sequnlock(&rename_lock);
        path_put(&root);
@@ -2806,19 +2831,18 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
        write_seqlock(&rename_lock);
        if (!d_unlinked(pwd.dentry)) {
                unsigned long len;
-               struct path tmp = root;
                char *cwd = page + PAGE_SIZE;
                int buflen = PAGE_SIZE;
 
                prepend(&cwd, &buflen, "\0", 1);
-               error = prepend_path(&pwd, &tmp, &cwd, &buflen);
+               error = prepend_path(&pwd, &root, &cwd, &buflen);
                write_sequnlock(&rename_lock);
 
-               if (error)
+               if (error < 0)
                        goto out;
 
                /* Unreachable from current root */
-               if (!path_equal(&tmp, &root)) {
+               if (error > 0) {
                        error = prepend_unreachable(&cwd, &buflen);
                        if (error)
                                goto out;
index 58609bde3b9fc076187afa3317582788f2f6bc7f..c6602d24517682c56bd2928c390174b834a05534 100644 (file)
@@ -417,17 +417,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
                        (unsigned long long)(extent_base + extent_offset), rc);
                goto out;
        }
-       if (unlikely(ecryptfs_verbosity > 0)) {
-               ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
-                               "with iv:\n");
-               ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
-               ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
-                               "encryption:\n");
-               ecryptfs_dump_hex((char *)
-                                 (page_address(page)
-                                  + (extent_offset * crypt_stat->extent_size)),
-                                 8);
-       }
        rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
                                          page, (extent_offset
                                                 * crypt_stat->extent_size),
@@ -440,14 +429,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
                goto out;
        }
        rc = 0;
-       if (unlikely(ecryptfs_verbosity > 0)) {
-               ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16llx]; "
-                       "rc = [%d]\n",
-                       (unsigned long long)(extent_base + extent_offset), rc);
-               ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
-                               "encryption:\n");
-               ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
-       }
 out:
        return rc;
 }
@@ -543,17 +524,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
                        (unsigned long long)(extent_base + extent_offset), rc);
                goto out;
        }
-       if (unlikely(ecryptfs_verbosity > 0)) {
-               ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
-                               "with iv:\n");
-               ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
-               ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
-                               "decryption:\n");
-               ecryptfs_dump_hex((char *)
-                                 (page_address(enc_extent_page)
-                                  + (extent_offset * crypt_stat->extent_size)),
-                                 8);
-       }
        rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
                                          (extent_offset
                                           * crypt_stat->extent_size),
@@ -567,16 +537,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
                goto out;
        }
        rc = 0;
-       if (unlikely(ecryptfs_verbosity > 0)) {
-               ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16llx]; "
-                       "rc = [%d]\n",
-                       (unsigned long long)(extent_base + extent_offset), rc);
-               ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
-                               "decryption:\n");
-               ecryptfs_dump_hex((char *)(page_address(page)
-                                          + (extent_offset
-                                             * crypt_stat->extent_size)), 8);
-       }
 out:
        return rc;
 }
@@ -1618,7 +1578,8 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
                rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode);
                if (rc) {
                        printk(KERN_DEBUG "Valid eCryptfs headers not found in "
-                              "file header region or xattr region\n");
+                              "file header region or xattr region, inode %lu\n",
+                               ecryptfs_inode->i_ino);
                        rc = -EINVAL;
                        goto out;
                }
@@ -1627,7 +1588,8 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
                                                ECRYPTFS_DONT_VALIDATE_HEADER_SIZE);
                if (rc) {
                        printk(KERN_DEBUG "Valid eCryptfs headers not found in "
-                              "file xattr region either\n");
+                              "file xattr region either, inode %lu\n",
+                               ecryptfs_inode->i_ino);
                        rc = -EINVAL;
                }
                if (crypt_stat->mount_crypt_stat->flags
@@ -1638,7 +1600,8 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
                               "crypto metadata only in the extended attribute "
                               "region, but eCryptfs was mounted without "
                               "xattr support enabled. eCryptfs will not treat "
-                              "this like an encrypted file.\n");
+                              "this like an encrypted file, inode %lu\n",
+                               ecryptfs_inode->i_ino);
                        rc = -EINVAL;
                }
        }
@@ -1943,7 +1906,7 @@ static unsigned char *portable_filename_chars = ("-.0123456789ABCD"
 
 /* We could either offset on every reverse map or just pad some 0x00's
  * at the front here */
-static const unsigned char filename_rev_map[] = {
+static const unsigned char filename_rev_map[256] = {
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */
@@ -1959,7 +1922,7 @@ static const unsigned char filename_rev_map[] = {
        0x00, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, /* 103 */
        0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, /* 111 */
        0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, /* 119 */
-       0x3D, 0x3E, 0x3F
+       0x3D, 0x3E, 0x3F /* 123 - 255 initialized to 0x00 */
 };
 
 /**
index 4ec9eb00a241fb56adcec03587d8b3c8ae506d40..0c1a652700499abcf3239a715c2635361946720b 100644 (file)
@@ -139,6 +139,27 @@ out:
        return rc;
 }
 
+static void ecryptfs_vma_close(struct vm_area_struct *vma)
+{
+       filemap_write_and_wait(vma->vm_file->f_mapping);
+}
+
+static const struct vm_operations_struct ecryptfs_file_vm_ops = {
+       .close          = ecryptfs_vma_close,
+       .fault          = filemap_fault,
+};
+
+static int ecryptfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       int rc;
+
+       rc = generic_file_mmap(file, vma);
+       if (!rc)
+               vma->vm_ops = &ecryptfs_file_vm_ops;
+
+       return rc;
+}
+
 struct kmem_cache *ecryptfs_file_info_cache;
 
 /**
@@ -348,7 +369,7 @@ const struct file_operations ecryptfs_main_fops = {
 #ifdef CONFIG_COMPAT
        .compat_ioctl = ecryptfs_compat_ioctl,
 #endif
-       .mmap = generic_file_mmap,
+       .mmap = ecryptfs_file_mmap,
        .open = ecryptfs_open,
        .flush = ecryptfs_flush,
        .release = ecryptfs_release,
index 4a4fad7fb851233e2e69298f8187cce712e00038..2717329386d858ba9173a76e3d377efa89614394 100644 (file)
@@ -854,18 +854,6 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
                size_t num_zeros = (PAGE_CACHE_SIZE
                                    - (ia->ia_size & ~PAGE_CACHE_MASK));
 
-
-               /*
-                * XXX(truncate) this should really happen at the begginning
-                * of ->setattr.  But the code is too messy to that as part
-                * of a larger patch.  ecryptfs is also totally missing out
-                * on the inode_change_ok check at the beginning of
-                * ->setattr while would include this.
-                */
-               rc = inode_newsize_ok(inode, ia->ia_size);
-               if (rc)
-                       goto out;
-
                if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
                        truncate_setsize(inode, ia->ia_size);
                        lower_ia->ia_size = ia->ia_size;
@@ -915,6 +903,28 @@ out:
        return rc;
 }
 
+static int ecryptfs_inode_newsize_ok(struct inode *inode, loff_t offset)
+{
+       struct ecryptfs_crypt_stat *crypt_stat;
+       loff_t lower_oldsize, lower_newsize;
+
+       crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
+       lower_oldsize = upper_size_to_lower_size(crypt_stat,
+                                                i_size_read(inode));
+       lower_newsize = upper_size_to_lower_size(crypt_stat, offset);
+       if (lower_newsize > lower_oldsize) {
+               /*
+                * The eCryptfs inode and the new *lower* size are mixed here
+                * because we may not have the lower i_mutex held and/or it may
+                * not be appropriate to call inode_newsize_ok() with inodes
+                * from other filesystems.
+                */
+               return inode_newsize_ok(inode, lower_newsize);
+       }
+
+       return 0;
+}
+
 /**
  * ecryptfs_truncate
  * @dentry: The ecryptfs layer dentry
@@ -931,6 +941,10 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
        struct iattr lower_ia = { .ia_valid = 0 };
        int rc;
 
+       rc = ecryptfs_inode_newsize_ok(dentry->d_inode, new_length);
+       if (rc)
+               return rc;
+
        rc = truncate_upper(dentry, &ia, &lower_ia);
        if (!rc && lower_ia.ia_valid & ATTR_SIZE) {
                struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
@@ -1012,6 +1026,16 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
                }
        }
        mutex_unlock(&crypt_stat->cs_mutex);
+
+       rc = inode_change_ok(inode, ia);
+       if (rc)
+               goto out;
+       if (ia->ia_valid & ATTR_SIZE) {
+               rc = ecryptfs_inode_newsize_ok(inode, ia->ia_size);
+               if (rc)
+                       goto out;
+       }
+
        if (S_ISREG(inode->i_mode)) {
                rc = filemap_write_and_wait(inode->i_mapping);
                if (rc)
@@ -1095,6 +1119,8 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
        }
 
        rc = vfs_setxattr(lower_dentry, name, value, size, flags);
+       if (!rc)
+               fsstack_copy_attr_all(dentry->d_inode, lower_dentry->d_inode);
 out:
        return rc;
 }
index 940a82e63dc3fcdc52797f9176842fdbf391d272..0dc5a3d554a44ac782627eff0053117387af03aa 100644 (file)
@@ -409,11 +409,47 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
        ssize_t sz = 0;
        char *data;
        uid_t euid = current_euid();
+       unsigned char packet_size_peek[3];
        int rc;
 
-       if (count == 0)
+       if (count == 0) {
                goto out;
+       } else if (count == (1 + 4)) {
+               /* Likely a harmless MSG_HELO or MSG_QUIT - no packet length */
+               goto memdup;
+       } else if (count < (1 + 4 + 1)
+                  || count > (1 + 4 + 2 + sizeof(struct ecryptfs_message) + 4
+                              + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES)) {
+               printk(KERN_WARNING "%s: Acceptable packet size range is "
+                      "[%d-%lu], but amount of data written is [%zu].",
+                      __func__, (1 + 4 + 1),
+                      (1 + 4 + 2 + sizeof(struct ecryptfs_message) + 4
+                       + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES), count);
+               return -EINVAL;
+       }
+
+       if (copy_from_user(packet_size_peek, (buf + 1 + 4),
+                          sizeof(packet_size_peek))) {
+               printk(KERN_WARNING "%s: Error while inspecting packet size\n",
+                      __func__);
+               return -EFAULT;
+       }
+
+       rc = ecryptfs_parse_packet_length(packet_size_peek, &packet_size,
+                                         &packet_size_length);
+       if (rc) {
+               printk(KERN_WARNING "%s: Error parsing packet length; "
+                      "rc = [%d]\n", __func__, rc);
+               return rc;
+       }
+
+       if ((1 + 4 + packet_size_length + packet_size) != count) {
+               printk(KERN_WARNING "%s: Invalid packet size [%zu]\n", __func__,
+                      packet_size);
+               return -EINVAL;
+       }
 
+memdup:
        data = memdup_user(buf, count);
        if (IS_ERR(data)) {
                printk(KERN_ERR "%s: memdup_user returned error [%ld]\n",
@@ -435,23 +471,7 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
                }
                memcpy(&counter_nbo, &data[i], 4);
                seq = be32_to_cpu(counter_nbo);
-               i += 4;
-               rc = ecryptfs_parse_packet_length(&data[i], &packet_size,
-                                                 &packet_size_length);
-               if (rc) {
-                       printk(KERN_WARNING "%s: Error parsing packet length; "
-                              "rc = [%d]\n", __func__, rc);
-                       goto out_free;
-               }
-               i += packet_size_length;
-               if ((1 + 4 + packet_size_length + packet_size) != count) {
-                       printk(KERN_WARNING "%s: (1 + packet_size_length([%zd])"
-                              " + packet_size([%zd]))([%zd]) != "
-                              "count([%zd]). Invalid packet format.\n",
-                              __func__, packet_size_length, packet_size,
-                              (1 + packet_size_length + packet_size), count);
-                       goto out_free;
-               }
+               i += 4 + packet_size_length;
                rc = ecryptfs_miscdev_response(&data[i], packet_size,
                                               euid, current_user_ns(),
                                               task_pid(current), seq);
index 3745f7c2b9c214756b778ab40a00af325c0e9e99..608c1c3fde1b2d452d141da3a124b5434326f75d 100644 (file)
@@ -130,13 +130,18 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
                pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT);
                size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK);
                size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page);
-               size_t total_remaining_bytes = ((offset + size) - pos);
+               loff_t total_remaining_bytes = ((offset + size) - pos);
+
+               if (fatal_signal_pending(current)) {
+                       rc = -EINTR;
+                       break;
+               }
 
                if (num_bytes > total_remaining_bytes)
                        num_bytes = total_remaining_bytes;
                if (pos < offset) {
                        /* remaining zeros to write, up to destination offset */
-                       size_t total_remaining_zeros = (offset - pos);
+                       loff_t total_remaining_zeros = (offset - pos);
 
                        if (num_bytes > total_remaining_zeros)
                                num_bytes = total_remaining_zeros;
@@ -193,15 +198,19 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
                }
                pos += num_bytes;
        }
-       if ((offset + size) > ecryptfs_file_size) {
-               i_size_write(ecryptfs_inode, (offset + size));
+       if (pos > ecryptfs_file_size) {
+               i_size_write(ecryptfs_inode, pos);
                if (crypt_stat->flags & ECRYPTFS_ENCRYPTED) {
-                       rc = ecryptfs_write_inode_size_to_metadata(
+                       int rc2;
+
+                       rc2 = ecryptfs_write_inode_size_to_metadata(
                                                                ecryptfs_inode);
-                       if (rc) {
+                       if (rc2) {
                                printk(KERN_ERR "Problem with "
                                       "ecryptfs_write_inode_size_to_metadata; "
-                                      "rc = [%d]\n", rc);
+                                      "rc = [%d]\n", rc2);
+                               if (!rc)
+                                       rc = rc2;
                                goto out;
                        }
                }
index f9cfd168fbe2d4d408800e964268a8d40a3ec9ac..35a852a2682f5c9e4625f2feff3f7996e13f6bfa 100644 (file)
  * simultaneous inserts (A into B and B into A) from racing and
  * constructing a cycle without either insert observing that it is
  * going to.
+ * It is necessary to acquire multiple "ep->mtx"es at once in the
+ * case when one epoll fd is added to another. In this case, we
+ * always acquire the locks in the order of nesting (i.e. after
+ * epoll_ctl(e1, EPOLL_CTL_ADD, e2), e1->mtx will always be acquired
+ * before e2->mtx). Since we disallow cycles of epoll file
+ * descriptors, this ensures that the mutexes are well-ordered. In
+ * order to communicate this nesting to lockdep, when walking a tree
+ * of epoll file descriptors, we use the current recursion depth as
+ * the lockdep subkey.
  * It is possible to drop the "ep->mtx" and to use the global
  * mutex "epmutex" (together with "ep->lock") to have it working,
  * but having "ep->mtx" will make the interface more scalable.
@@ -188,6 +197,12 @@ struct eventpoll {
 
        /* The user that created the eventpoll descriptor */
        struct user_struct *user;
+
+       struct file *file;
+
+       /* used to optimize loop detection check */
+       int visited;
+       struct list_head visited_list_link;
 };
 
 /* Wait structure used by the poll hooks */
@@ -246,6 +261,15 @@ static struct kmem_cache *epi_cache __read_mostly;
 /* Slab cache used to allocate "struct eppoll_entry" */
 static struct kmem_cache *pwq_cache __read_mostly;
 
+/* Visited nodes during ep_loop_check(), so we can unset them when we finish */
+static LIST_HEAD(visited_list);
+
+/*
+ * List of files with newly added links, where we may need to limit the number
+ * of emanating paths. Protected by the epmutex.
+ */
+static LIST_HEAD(tfile_check_list);
+
 #ifdef CONFIG_SYSCTL
 
 #include <linux/sysctl.h>
@@ -267,6 +291,12 @@ ctl_table epoll_table[] = {
 };
 #endif /* CONFIG_SYSCTL */
 
+static const struct file_operations eventpoll_fops;
+
+static inline int is_file_epoll(struct file *f)
+{
+       return f->f_op == &eventpoll_fops;
+}
 
 /* Setup the structure that is used as key for the RB tree */
 static inline void ep_set_ffd(struct epoll_filefd *ffd,
@@ -290,6 +320,11 @@ static inline int ep_is_linked(struct list_head *p)
        return !list_empty(p);
 }
 
+static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p)
+{
+       return container_of(p, struct eppoll_entry, wait);
+}
+
 /* Get the "struct epitem" from a wait queue pointer */
 static inline struct epitem *ep_item_from_wait(wait_queue_t *p)
 {
@@ -437,6 +472,18 @@ static void ep_poll_safewake(wait_queue_head_t *wq)
        put_cpu();
 }
 
+static void ep_remove_wait_queue(struct eppoll_entry *pwq)
+{
+       wait_queue_head_t *whead;
+
+       rcu_read_lock();
+       /* If it is cleared by POLLFREE, it should be rcu-safe */
+       whead = rcu_dereference(pwq->whead);
+       if (whead)
+               remove_wait_queue(whead, &pwq->wait);
+       rcu_read_unlock();
+}
+
 /*
  * This function unregisters poll callbacks from the associated file
  * descriptor.  Must be called with "mtx" held (or "epmutex" if called from
@@ -451,7 +498,7 @@ static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
                pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
 
                list_del(&pwq->llink);
-               remove_wait_queue(pwq->whead, &pwq->wait);
+               ep_remove_wait_queue(pwq);
                kmem_cache_free(pwq_cache, pwq);
        }
 }
@@ -464,13 +511,15 @@ static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
  * @ep: Pointer to the epoll private data structure.
  * @sproc: Pointer to the scan callback.
  * @priv: Private opaque data passed to the @sproc callback.
+ * @depth: The current depth of recursive f_op->poll calls.
  *
  * Returns: The same integer error code returned by the @sproc callback.
  */
 static int ep_scan_ready_list(struct eventpoll *ep,
                              int (*sproc)(struct eventpoll *,
                                           struct list_head *, void *),
-                             void *priv)
+                             void *priv,
+                             int depth)
 {
        int error, pwake = 0;
        unsigned long flags;
@@ -481,7 +530,7 @@ static int ep_scan_ready_list(struct eventpoll *ep,
         * We need to lock this because we could be hit by
         * eventpoll_release_file() and epoll_ctl().
         */
-       mutex_lock(&ep->mtx);
+       mutex_lock_nested(&ep->mtx, depth);
 
        /*
         * Steal the ready list, and re-init the original one to the
@@ -670,7 +719,7 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
 
 static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests)
 {
-       return ep_scan_ready_list(priv, ep_read_events_proc, NULL);
+       return ep_scan_ready_list(priv, ep_read_events_proc, NULL, call_nests + 1);
 }
 
 static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
@@ -700,12 +749,6 @@ static const struct file_operations eventpoll_fops = {
        .llseek         = noop_llseek,
 };
 
-/* Fast test to see if the file is an evenpoll file */
-static inline int is_file_epoll(struct file *f)
-{
-       return f->f_op == &eventpoll_fops;
-}
-
 /*
  * This is called from eventpoll_release() to unlink files from the eventpoll
  * interface. We need to have this facility to cleanup correctly files that are
@@ -737,7 +780,7 @@ void eventpoll_release_file(struct file *file)
 
                ep = epi->ep;
                list_del_init(&epi->fllink);
-               mutex_lock(&ep->mtx);
+               mutex_lock_nested(&ep->mtx, 0);
                ep_remove(ep, epi);
                mutex_unlock(&ep->mtx);
        }
@@ -816,6 +859,17 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
        struct epitem *epi = ep_item_from_wait(wait);
        struct eventpoll *ep = epi->ep;
 
+       if ((unsigned long)key & POLLFREE) {
+               ep_pwq_from_wait(wait)->whead = NULL;
+               /*
+                * whead = NULL above can race with ep_remove_wait_queue()
+                * which can do another remove_wait_queue() after us, so we
+                * can't use __remove_wait_queue(). whead->lock is held by
+                * the caller.
+                */
+               list_del_init(&wait->task_list);
+       }
+
        spin_lock_irqsave(&ep->lock, flags);
 
        /*
@@ -915,6 +969,103 @@ static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
        rb_insert_color(&epi->rbn, &ep->rbr);
 }
 
+
+
+#define PATH_ARR_SIZE 5
+/*
+ * These are the number paths of length 1 to 5, that we are allowing to emanate
+ * from a single file of interest. For example, we allow 1000 paths of length
+ * 1, to emanate from each file of interest. This essentially represents the
+ * potential wakeup paths, which need to be limited in order to avoid massive
+ * uncontrolled wakeup storms. The common use case should be a single ep which
+ * is connected to n file sources. In this case each file source has 1 path
+ * of length 1. Thus, the numbers below should be more than sufficient. These
+ * path limits are enforced during an EPOLL_CTL_ADD operation, since a modify
+ * and delete can't add additional paths. Protected by the epmutex.
+ */
+static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
+static int path_count[PATH_ARR_SIZE];
+
+static int path_count_inc(int nests)
+{
+       /* Allow an arbitrary number of depth 1 paths */
+       if (nests == 0)
+               return 0;
+
+       if (++path_count[nests] > path_limits[nests])
+               return -1;
+       return 0;
+}
+
+static void path_count_init(void)
+{
+       int i;
+
+       for (i = 0; i < PATH_ARR_SIZE; i++)
+               path_count[i] = 0;
+}
+
+static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
+{
+       int error = 0;
+       struct file *file = priv;
+       struct file *child_file;
+       struct epitem *epi;
+
+       list_for_each_entry(epi, &file->f_ep_links, fllink) {
+               child_file = epi->ep->file;
+               if (is_file_epoll(child_file)) {
+                       if (list_empty(&child_file->f_ep_links)) {
+                               if (path_count_inc(call_nests)) {
+                                       error = -1;
+                                       break;
+                               }
+                       } else {
+                               error = ep_call_nested(&poll_loop_ncalls,
+                                                       EP_MAX_NESTS,
+                                                       reverse_path_check_proc,
+                                                       child_file, child_file,
+                                                       current);
+                       }
+                       if (error != 0)
+                               break;
+               } else {
+                       printk(KERN_ERR "reverse_path_check_proc: "
+                               "file is not an ep!\n");
+               }
+       }
+       return error;
+}
+
+/**
+ * reverse_path_check - The tfile_check_list is list of file *, which have
+ *                      links that are proposed to be newly added. We need to
+ *                      make sure that those added links don't add too many
+ *                      paths such that we will spend all our time waking up
+ *                      eventpoll objects.
+ *
+ * Returns: Returns zero if the proposed links don't create too many paths,
+ *         -1 otherwise.
+ */
+static int reverse_path_check(void)
+{
+       int length = 0;
+       int error = 0;
+       struct file *current_file;
+
+       /* let's call this for all tfiles */
+       list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) {
+               length++;
+               path_count_init();
+               error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+                                       reverse_path_check_proc, current_file,
+                                       current_file, current);
+               if (error)
+                       break;
+       }
+       return error;
+}
+
 /*
  * Must be called with "mtx" held.
  */
@@ -976,6 +1127,11 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
         */
        ep_rbtree_insert(ep, epi);
 
+       /* now check if we've created too many backpaths */
+       error = -EINVAL;
+       if (reverse_path_check())
+               goto error_remove_epi;
+
        /* We have to drop the new item inside our item list to keep track of it */
        spin_lock_irqsave(&ep->lock, flags);
 
@@ -1000,6 +1156,14 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
 
        return 0;
 
+error_remove_epi:
+       spin_lock(&tfile->f_lock);
+       if (ep_is_linked(&epi->fllink))
+               list_del_init(&epi->fllink);
+       spin_unlock(&tfile->f_lock);
+
+       rb_erase(&epi->rbn, &ep->rbr);
+
 error_unregister:
        ep_unregister_pollwait(ep, epi);
 
@@ -1134,7 +1298,7 @@ static int ep_send_events(struct eventpoll *ep,
        esed.maxevents = maxevents;
        esed.events = events;
 
-       return ep_scan_ready_list(ep, ep_send_events_proc, &esed);
+       return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0);
 }
 
 static inline struct timespec ep_set_mstimeout(long ms)
@@ -1264,18 +1428,36 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
        int error = 0;
        struct file *file = priv;
        struct eventpoll *ep = file->private_data;
+       struct eventpoll *ep_tovisit;
        struct rb_node *rbp;
        struct epitem *epi;
 
-       mutex_lock(&ep->mtx);
+       mutex_lock_nested(&ep->mtx, call_nests + 1);
+       ep->visited = 1;
+       list_add(&ep->visited_list_link, &visited_list);
        for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
                epi = rb_entry(rbp, struct epitem, rbn);
                if (unlikely(is_file_epoll(epi->ffd.file))) {
+                       ep_tovisit = epi->ffd.file->private_data;
+                       if (ep_tovisit->visited)
+                               continue;
                        error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
-                                              ep_loop_check_proc, epi->ffd.file,
-                                              epi->ffd.file->private_data, current);
+                                       ep_loop_check_proc, epi->ffd.file,
+                                       ep_tovisit, current);
                        if (error != 0)
                                break;
+               } else {
+                       /*
+                        * If we've reached a file that is not associated with
+                        * an ep, then we need to check if the newly added
+                        * links are going to add too many wakeup paths. We do
+                        * this by adding it to the tfile_check_list, if it's
+                        * not already there, and calling reverse_path_check()
+                        * during ep_insert().
+                        */
+                       if (list_empty(&epi->ffd.file->f_tfile_llink))
+                               list_add(&epi->ffd.file->f_tfile_llink,
+                                        &tfile_check_list);
                }
        }
        mutex_unlock(&ep->mtx);
@@ -1296,8 +1478,31 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
  */
 static int ep_loop_check(struct eventpoll *ep, struct file *file)
 {
-       return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+       int ret;
+       struct eventpoll *ep_cur, *ep_next;
+
+       ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
                              ep_loop_check_proc, file, ep, current);
+       /* clear visited list */
+       list_for_each_entry_safe(ep_cur, ep_next, &visited_list,
+                                                       visited_list_link) {
+               ep_cur->visited = 0;
+               list_del(&ep_cur->visited_list_link);
+       }
+       return ret;
+}
+
+static void clear_tfile_check_list(void)
+{
+       struct file *file;
+
+       /* first clear the tfile_check_list */
+       while (!list_empty(&tfile_check_list)) {
+               file = list_first_entry(&tfile_check_list, struct file,
+                                       f_tfile_llink);
+               list_del_init(&file->f_tfile_llink);
+       }
+       INIT_LIST_HEAD(&tfile_check_list);
 }
 
 /*
@@ -1305,8 +1510,9 @@ static int ep_loop_check(struct eventpoll *ep, struct file *file)
  */
 SYSCALL_DEFINE1(epoll_create1, int, flags)
 {
-       int error;
+       int error, fd;
        struct eventpoll *ep = NULL;
+       struct file *file;
 
        /* Check the EPOLL_* constant for consistency.  */
        BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
@@ -1323,11 +1529,25 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
         * Creates all the items needed to setup an eventpoll file. That is,
         * a file structure and a free file descriptor.
         */
-       error = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep,
+       fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
+       if (fd < 0) {
+               error = fd;
+               goto out_free_ep;
+       }
+       file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
                                 O_RDWR | (flags & O_CLOEXEC));
-       if (error < 0)
-               ep_free(ep);
-
+       if (IS_ERR(file)) {
+               error = PTR_ERR(file);
+               goto out_free_fd;
+       }
+       fd_install(fd, file);
+       ep->file = file;
+       return fd;
+
+out_free_fd:
+       put_unused_fd(fd);
+out_free_ep:
+       ep_free(ep);
        return error;
 }
 
@@ -1393,23 +1613,29 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
        /*
         * When we insert an epoll file descriptor, inside another epoll file
         * descriptor, there is the change of creating closed loops, which are
-        * better be handled here, than in more critical paths.
+        * better be handled here, than in more critical paths. While we are
+        * checking for loops we also determine the list of files reachable
+        * and hang them on the tfile_check_list, so we can check that we
+        * haven't created too many possible wakeup paths.
         *
-        * We hold epmutex across the loop check and the insert in this case, in
-        * order to prevent two separate inserts from racing and each doing the
-        * insert "at the same time" such that ep_loop_check passes on both
-        * before either one does the insert, thereby creating a cycle.
+        * We need to hold the epmutex across both ep_insert and ep_remove
+        * b/c we want to make sure we are looking at a coherent view of
+        * epoll network.
         */
-       if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) {
+       if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) {
                mutex_lock(&epmutex);
                did_lock_epmutex = 1;
-               error = -ELOOP;
-               if (ep_loop_check(ep, tfile) != 0)
-                       goto error_tgt_fput;
+       }
+       if (op == EPOLL_CTL_ADD) {
+               if (is_file_epoll(tfile)) {
+                       error = -ELOOP;
+                       if (ep_loop_check(ep, tfile) != 0)
+                               goto error_tgt_fput;
+               } else
+                       list_add(&tfile->f_tfile_llink, &tfile_check_list);
        }
 
-
-       mutex_lock(&ep->mtx);
+       mutex_lock_nested(&ep->mtx, 0);
 
        /*
         * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
@@ -1426,6 +1652,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
                        error = ep_insert(ep, &epds, tfile, fd);
                } else
                        error = -EEXIST;
+               clear_tfile_check_list();
                break;
        case EPOLL_CTL_DEL:
                if (epi)
@@ -1444,7 +1671,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
        mutex_unlock(&ep->mtx);
 
 error_tgt_fput:
-       if (unlikely(did_lock_epmutex))
+       if (did_lock_epmutex)
                mutex_unlock(&epmutex);
 
        fput(tfile);
index bfc2dc43681d41c6b54fe405174cc480de1663e9..0b3da7cc8aba919fff46a72b16d63df4d0637269 100644 (file)
@@ -561,8 +561,12 @@ got:
        if (IS_DIRSYNC(inode))
                handle->h_sync = 1;
        if (insert_inode_locked(inode) < 0) {
-               err = -EINVAL;
-               goto fail_drop;
+               /*
+                * Likely a bitmap corruption causing inode to be allocated
+                * twice.
+                */
+               err = -EIO;
+               goto fail;
        }
        spin_lock(&sbi->s_next_gen_lock);
        inode->i_generation = sbi->s_next_generation++;
index 3451d23c3bae3eedd75d8504694a22735e7dc291..db9ba1a3f7f8a0f58370a31d24180e2dad31f678 100644 (file)
@@ -1568,7 +1568,13 @@ static int ext3_ordered_writepage(struct page *page,
        int err;
 
        J_ASSERT(PageLocked(page));
-       WARN_ON_ONCE(IS_RDONLY(inode));
+       /*
+        * We don't want to warn for emergency remount. The condition is
+        * ordered to avoid dereferencing inode->i_sb in non-error case to
+        * avoid slow-downs.
+        */
+       WARN_ON_ONCE(IS_RDONLY(inode) &&
+                    !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
 
        /*
         * We give up here if we're reentered, because it might be for a
@@ -1642,7 +1648,13 @@ static int ext3_writeback_writepage(struct page *page,
        int err;
 
        J_ASSERT(PageLocked(page));
-       WARN_ON_ONCE(IS_RDONLY(inode));
+       /*
+        * We don't want to warn for emergency remount. The condition is
+        * ordered to avoid dereferencing inode->i_sb in non-error case to
+        * avoid slow-downs.
+        */
+       WARN_ON_ONCE(IS_RDONLY(inode) &&
+                    !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
 
        if (ext3_journal_current_handle())
                goto out_fail;
@@ -1684,7 +1696,13 @@ static int ext3_journalled_writepage(struct page *page,
        int err;
 
        J_ASSERT(PageLocked(page));
-       WARN_ON_ONCE(IS_RDONLY(inode));
+       /*
+        * We don't want to warn for emergency remount. The condition is
+        * ordered to avoid dereferencing inode->i_sb in non-error case to
+        * avoid slow-downs.
+        */
+       WARN_ON_ONCE(IS_RDONLY(inode) &&
+                    !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
 
        if (ext3_journal_current_handle())
                goto no_write;
index 354619a1aed949ee3b4bb14a4757334d0f2d673f..1a34c1c84604528cc8ba49e3e8dcbca65a356cc9 100644 (file)
@@ -175,6 +175,7 @@ struct mpage_da_data {
  */
 #define        EXT4_IO_END_UNWRITTEN   0x0001
 #define EXT4_IO_END_ERROR      0x0002
+#define EXT4_IO_END_QUEUED     0x0004
 
 struct ext4_io_page {
        struct page     *p_page;
@@ -357,8 +358,7 @@ struct flex_groups {
 
 /* Flags that should be inherited by new inodes from their parent. */
 #define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\
-                          EXT4_SYNC_FL | EXT4_IMMUTABLE_FL | EXT4_APPEND_FL |\
-                          EXT4_NODUMP_FL | EXT4_NOATIME_FL |\
+                          EXT4_SYNC_FL | EXT4_NODUMP_FL | EXT4_NOATIME_FL |\
                           EXT4_NOCOMPR_FL | EXT4_JOURNAL_DATA_FL |\
                           EXT4_NOTAIL_FL | EXT4_DIRSYNC_FL)
 
index 5802fa1dab18f4a8aac358bd79f3ae92109daf02..95af6f8785018e1afc684518ed81e829a8186024 100644 (file)
@@ -261,43 +261,45 @@ static inline void ext4_update_inode_fsync_trans(handle_t *handle,
 /* super.c */
 int ext4_force_commit(struct super_block *sb);
 
-static inline int ext4_should_journal_data(struct inode *inode)
+/*
+ * Ext4 inode journal modes
+ */
+#define EXT4_INODE_JOURNAL_DATA_MODE   0x01 /* journal data mode */
+#define EXT4_INODE_ORDERED_DATA_MODE   0x02 /* ordered data mode */
+#define EXT4_INODE_WRITEBACK_DATA_MODE 0x04 /* writeback data mode */
+
+static inline int ext4_inode_journal_mode(struct inode *inode)
 {
        if (EXT4_JOURNAL(inode) == NULL)
-               return 0;
-       if (!S_ISREG(inode->i_mode))
-               return 1;
-       if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
-               return 1;
-       if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
-               return 1;
-       return 0;
+               return EXT4_INODE_WRITEBACK_DATA_MODE;  /* writeback */
+       /* We do not support data journalling with delayed allocation */
+       if (!S_ISREG(inode->i_mode) ||
+           test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
+               return EXT4_INODE_JOURNAL_DATA_MODE;    /* journal data */
+       if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
+           !test_opt(inode->i_sb, DELALLOC))
+               return EXT4_INODE_JOURNAL_DATA_MODE;    /* journal data */
+       if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
+               return EXT4_INODE_ORDERED_DATA_MODE;    /* ordered */
+       if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
+               return EXT4_INODE_WRITEBACK_DATA_MODE;  /* writeback */
+       else
+               BUG();
+}
+
+static inline int ext4_should_journal_data(struct inode *inode)
+{
+       return ext4_inode_journal_mode(inode) & EXT4_INODE_JOURNAL_DATA_MODE;
 }
 
 static inline int ext4_should_order_data(struct inode *inode)
 {
-       if (EXT4_JOURNAL(inode) == NULL)
-               return 0;
-       if (!S_ISREG(inode->i_mode))
-               return 0;
-       if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
-               return 0;
-       if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
-               return 1;
-       return 0;
+       return ext4_inode_journal_mode(inode) & EXT4_INODE_ORDERED_DATA_MODE;
 }
 
 static inline int ext4_should_writeback_data(struct inode *inode)
 {
-       if (EXT4_JOURNAL(inode) == NULL)
-               return 1;
-       if (!S_ISREG(inode->i_mode))
-               return 0;
-       if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
-               return 0;
-       if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
-               return 1;
-       return 0;
+       return ext4_inode_journal_mode(inode) & EXT4_INODE_WRITEBACK_DATA_MODE;
 }
 
 /*
index f3aacb32059f81ca951c2d47728e18ef8e04975e..611647b28a4dd3c0e0b7fdc5b5693a3ccdd5270e 100644 (file)
@@ -341,6 +341,8 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
        ext4_fsblk_t block = ext4_ext_pblock(ext);
        int len = ext4_ext_get_actual_len(ext);
 
+       if (len == 0)
+               return 0;
        return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
 }
 
@@ -2844,7 +2846,7 @@ static int ext4_split_extent_at(handle_t *handle,
                if (err)
                        goto fix_extent_len;
                /* update the extent length and mark as initialized */
-               ex->ee_len = cpu_to_le32(ee_len);
+               ex->ee_len = cpu_to_le16(ee_len);
                ext4_ext_try_to_merge(inode, path, ex);
                err = ext4_ext_dirty(handle, inode, path + depth);
                goto out;
index 21bb2f61e50223c2da0946c4b48db0e4c947e1a7..412469b241a8c3eba95307839e113b1142c24d22 100644 (file)
@@ -1021,8 +1021,12 @@ got:
        if (IS_DIRSYNC(inode))
                ext4_handle_sync(handle);
        if (insert_inode_locked(inode) < 0) {
-               err = -EINVAL;
-               goto fail_drop;
+               /*
+                * Likely a bitmap corruption causing inode to be allocated
+                * twice.
+                */
+               err = -EIO;
+               goto fail;
        }
        spin_lock(&sbi->s_next_gen_lock);
        inode->i_generation = sbi->s_next_generation++;
index c94774c3276e6f9f1665597cfbad4c596445e6ee..c1e6a7263893b086d66026c8d7bcc5f9d3a1e82f 100644 (file)
@@ -190,9 +190,6 @@ void ext4_evict_inode(struct inode *inode)
 
        trace_ext4_evict_inode(inode);
 
-       mutex_lock(&inode->i_mutex);
-       ext4_flush_completed_IO(inode);
-       mutex_unlock(&inode->i_mutex);
        ext4_ioend_wait(inode);
 
        if (inode->i_nlink) {
@@ -2129,8 +2126,11 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
                                        clear_buffer_unwritten(bh);
                                }
 
-                               /* skip page if block allocation undone */
-                               if (buffer_delay(bh) || buffer_unwritten(bh))
+                               /*
+                                * skip page if block allocation undone and
+                                * block is dirty
+                                */
+                               if (ext4_bh_delay_or_unwritten(NULL, bh))
                                        skip_page = 1;
                                bh = bh->b_this_page;
                                block_start += bh->b_size;
@@ -3212,13 +3212,14 @@ static int ext4_da_write_end(struct file *file,
        int write_mode = (int)(unsigned long)fsdata;
 
        if (write_mode == FALL_BACK_TO_NONDELALLOC) {
-               if (ext4_should_order_data(inode)) {
+               switch (ext4_inode_journal_mode(inode)) {
+               case EXT4_INODE_ORDERED_DATA_MODE:
                        return ext4_ordered_write_end(file, mapping, pos,
                                        len, copied, page, fsdata);
-               } else if (ext4_should_writeback_data(inode)) {
+               case EXT4_INODE_WRITEBACK_DATA_MODE:
                        return ext4_writeback_write_end(file, mapping, pos,
                                        len, copied, page, fsdata);
-               } else {
+               default:
                        BUG();
                }
        }
@@ -3234,7 +3235,7 @@ static int ext4_da_write_end(struct file *file,
         */
 
        new_i_size = pos + copied;
-       if (new_i_size > EXT4_I(inode)->i_disksize) {
+       if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
                if (ext4_da_should_update_i_disksize(page, end)) {
                        down_write(&EXT4_I(inode)->i_data_sem);
                        if (new_i_size > EXT4_I(inode)->i_disksize) {
@@ -3510,12 +3511,17 @@ static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
        }
 
 retry:
-       if (rw == READ && ext4_should_dioread_nolock(inode))
+       if (rw == READ && ext4_should_dioread_nolock(inode)) {
+               if (unlikely(!list_empty(&ei->i_completed_io_list))) {
+                       mutex_lock(&inode->i_mutex);
+                       ext4_flush_completed_IO(inode);
+                       mutex_unlock(&inode->i_mutex);
+               }
                ret = __blockdev_direct_IO(rw, iocb, inode,
                                 inode->i_sb->s_bdev, iov,
                                 offset, nr_segs,
                                 ext4_get_block, NULL, NULL, 0);
-       else {
+       else {
                ret = blockdev_direct_IO(rw, iocb, inode,
                                 inode->i_sb->s_bdev, iov,
                                 offset, nr_segs,
@@ -3913,18 +3919,25 @@ static const struct address_space_operations ext4_da_aops = {
 
 void ext4_set_aops(struct inode *inode)
 {
-       if (ext4_should_order_data(inode) &&
-               test_opt(inode->i_sb, DELALLOC))
-               inode->i_mapping->a_ops = &ext4_da_aops;
-       else if (ext4_should_order_data(inode))
-               inode->i_mapping->a_ops = &ext4_ordered_aops;
-       else if (ext4_should_writeback_data(inode) &&
-                test_opt(inode->i_sb, DELALLOC))
-               inode->i_mapping->a_ops = &ext4_da_aops;
-       else if (ext4_should_writeback_data(inode))
-               inode->i_mapping->a_ops = &ext4_writeback_aops;
-       else
+       switch (ext4_inode_journal_mode(inode)) {
+       case EXT4_INODE_ORDERED_DATA_MODE:
+               if (test_opt(inode->i_sb, DELALLOC))
+                       inode->i_mapping->a_ops = &ext4_da_aops;
+               else
+                       inode->i_mapping->a_ops = &ext4_ordered_aops;
+               break;
+       case EXT4_INODE_WRITEBACK_DATA_MODE:
+               if (test_opt(inode->i_sb, DELALLOC))
+                       inode->i_mapping->a_ops = &ext4_da_aops;
+               else
+                       inode->i_mapping->a_ops = &ext4_writeback_aops;
+               break;
+       case EXT4_INODE_JOURNAL_DATA_MODE:
                inode->i_mapping->a_ops = &ext4_journalled_aops;
+               break;
+       default:
+               BUG();
+       }
 }
 
 /*
index 808c554e773fdc2658c4708f1697edabab665acc..4cbe1c2c99687ff04ea816e69f974f0bdb6d4fd4 100644 (file)
@@ -35,7 +35,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                handle_t *handle = NULL;
                int err, migrate = 0;
                struct ext4_iloc iloc;
-               unsigned int oldflags;
+               unsigned int oldflags, mask, i;
                unsigned int jflag;
 
                if (!inode_owner_or_capable(inode))
@@ -112,9 +112,14 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                if (err)
                        goto flags_err;
 
-               flags = flags & EXT4_FL_USER_MODIFIABLE;
-               flags |= oldflags & ~EXT4_FL_USER_MODIFIABLE;
-               ei->i_flags = flags;
+               for (i = 0, mask = 1; i < 32; i++, mask <<= 1) {
+                       if (!(mask & EXT4_FL_USER_MODIFIABLE))
+                               continue;
+                       if (mask & flags)
+                               ext4_set_inode_flag(inode, i);
+                       else
+                               ext4_clear_inode_flag(inode, i);
+               }
 
                ext4_set_inode_flags(inode);
                inode->i_ctime = ext4_current_time(inode);
index 0f1be7f1637636532ef48ceffee046467a570e0a..b6adf68a5c02047bd978f02b833052d0e0de7dab 100644 (file)
@@ -2528,6 +2528,9 @@ int ext4_mb_release(struct super_block *sb)
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
 
+       if (sbi->s_proc)
+               remove_proc_entry("mb_groups", sbi->s_proc);
+
        if (sbi->s_group_info) {
                for (i = 0; i < ngroups; i++) {
                        grinfo = ext4_get_group_info(sb, i);
@@ -2575,8 +2578,6 @@ int ext4_mb_release(struct super_block *sb)
        }
 
        free_percpu(sbi->s_locality_groups);
-       if (sbi->s_proc)
-               remove_proc_entry("mb_groups", sbi->s_proc);
 
        return 0;
 }
@@ -4583,6 +4584,7 @@ do_more:
                 */
                new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
                if (!new_entry) {
+                       ext4_mb_unload_buddy(&e4b);
                        err = -ENOMEM;
                        goto error_return;
                }
index 458a394f6d3cd7aa4c65cb1693d16ade82e4416d..3d36d5a1e19a108104252bd435a2d9a6543e226b 100644 (file)
@@ -1589,7 +1589,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
                        dxtrace(dx_show_index("node", frames[1].entries));
                        dxtrace(dx_show_index("node",
                               ((struct dx_node *) bh2->b_data)->entries));
-                       err = ext4_handle_dirty_metadata(handle, inode, bh2);
+                       err = ext4_handle_dirty_metadata(handle, dir, bh2);
                        if (err)
                                goto journal_error;
                        brelse (bh2);
@@ -1615,7 +1615,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
                        if (err)
                                goto journal_error;
                }
-               err = ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
+               err = ext4_handle_dirty_metadata(handle, dir, frames[0].bh);
                if (err) {
                        ext4_std_error(inode->i_sb, err);
                        goto cleanup;
@@ -1866,7 +1866,7 @@ retry:
        ext4_set_de_type(dir->i_sb, de, S_IFDIR);
        inode->i_nlink = 2;
        BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
-       err = ext4_handle_dirty_metadata(handle, dir, dir_block);
+       err = ext4_handle_dirty_metadata(handle, inode, dir_block);
        if (err)
                goto out_clear_inode;
        err = ext4_mark_inode_dirty(handle, inode);
@@ -2540,7 +2540,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
                                                cpu_to_le32(new_dir->i_ino);
                BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
-               retval = ext4_handle_dirty_metadata(handle, old_dir, dir_bh);
+               retval = ext4_handle_dirty_metadata(handle, old_inode, dir_bh);
                if (retval) {
                        ext4_std_error(old_dir->i_sb, retval);
                        goto end_rename;
index 97e5e98fd42a291f79c4c314b21c5b8427ac3e63..d99d74aca8a70c3affeaf1ecb6b34d9630f8cf1d 100644 (file)
@@ -142,7 +142,23 @@ static void ext4_end_io_work(struct work_struct *work)
        unsigned long           flags;
        int                     ret;
 
-       mutex_lock(&inode->i_mutex);
+       if (!mutex_trylock(&inode->i_mutex)) {
+               /*
+                * Requeue the work instead of waiting so that the work
+                * items queued after this can be processed.
+                */
+               queue_work(EXT4_SB(inode->i_sb)->dio_unwritten_wq, &io->work);
+               /*
+                * To prevent the ext4-dio-unwritten thread from keeping
+                * requeueing end_io requests and occupying cpu for too long,
+                * yield the cpu if it sees an end_io request that has already
+                * been requeued.
+                */
+               if (io->flag & EXT4_IO_END_QUEUED)
+                       yield();
+               io->flag |= EXT4_IO_END_QUEUED;
+               return;
+       }
        ret = ext4_end_io_nolock(io);
        if (ret < 0) {
                mutex_unlock(&inode->i_mutex);
@@ -389,6 +405,18 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
 
                block_end = block_start + blocksize;
                if (block_start >= len) {
+                       /*
+                        * Comments copied from block_write_full_page_endio:
+                        *
+                        * The page straddles i_size.  It must be zeroed out on
+                        * each and every writepage invocation because it may
+                        * be mmapped.  "A file is mapped in multiples of the
+                        * page size.  For a file that is not a multiple of
+                        * the  page size, the remaining memory is zeroed when
+                        * mapped, and writes to that region are not written
+                        * out to the file."
+                        */
+                       zero_user_segment(page, block_start, block_end);
                        clear_buffer_dirty(bh);
                        set_buffer_uptodate(bh);
                        continue;
index 111ed9d3c546620009f2e5e96d587ca677df9efd..113b10768445861525cf42d64fbb3437c771121f 100644 (file)
@@ -433,6 +433,7 @@ void __ext4_error(struct super_block *sb, const char *function,
        printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
               sb->s_id, function, line, current->comm, &vaf);
        va_end(args);
+       save_error_info(sb, function, line);
 
        ext4_handle_error(sb);
 }
@@ -1113,9 +1114,9 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
                seq_puts(seq, ",block_validity");
 
        if (!test_opt(sb, INIT_INODE_TABLE))
-               seq_puts(seq, ",noinit_inode_table");
+               seq_puts(seq, ",noinit_itable");
        else if (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)
-               seq_printf(seq, ",init_inode_table=%u",
+               seq_printf(seq, ",init_itable=%u",
                           (unsigned) sbi->s_li_wait_mult);
 
        ext4_show_quota_options(seq, sb);
@@ -1291,8 +1292,7 @@ enum {
        Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
        Opt_inode_readahead_blks, Opt_journal_ioprio,
        Opt_dioread_nolock, Opt_dioread_lock,
-       Opt_discard, Opt_nodiscard,
-       Opt_init_inode_table, Opt_noinit_inode_table,
+       Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
 };
 
 static const match_table_t tokens = {
@@ -1365,9 +1365,9 @@ static const match_table_t tokens = {
        {Opt_dioread_lock, "dioread_lock"},
        {Opt_discard, "discard"},
        {Opt_nodiscard, "nodiscard"},
-       {Opt_init_inode_table, "init_itable=%u"},
-       {Opt_init_inode_table, "init_itable"},
-       {Opt_noinit_inode_table, "noinit_itable"},
+       {Opt_init_itable, "init_itable=%u"},
+       {Opt_init_itable, "init_itable"},
+       {Opt_noinit_itable, "noinit_itable"},
        {Opt_err, NULL},
 };
 
@@ -1844,7 +1844,7 @@ set_qf_format:
                case Opt_dioread_lock:
                        clear_opt(sb, DIOREAD_NOLOCK);
                        break;
-               case Opt_init_inode_table:
+               case Opt_init_itable:
                        set_opt(sb, INIT_INODE_TABLE);
                        if (args[0].from) {
                                if (match_int(&args[0], &option))
@@ -1855,7 +1855,7 @@ set_qf_format:
                                return 0;
                        sbi->s_li_wait_mult = option;
                        break;
-               case Opt_noinit_inode_table:
+               case Opt_noinit_itable:
                        clear_opt(sb, INIT_INODE_TABLE);
                        break;
                default:
@@ -1958,17 +1958,16 @@ static int ext4_fill_flex_info(struct super_block *sb)
        struct ext4_group_desc *gdp = NULL;
        ext4_group_t flex_group_count;
        ext4_group_t flex_group;
-       int groups_per_flex = 0;
+       unsigned int groups_per_flex = 0;
        size_t size;
        int i;
 
        sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
-       groups_per_flex = 1 << sbi->s_log_groups_per_flex;
-
-       if (groups_per_flex < 2) {
+       if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
                sbi->s_log_groups_per_flex = 0;
                return 1;
        }
+       groups_per_flex = 1 << sbi->s_log_groups_per_flex;
 
        /* We allocate both existing and potentially added groups */
        flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
@@ -3620,7 +3619,8 @@ no_journal:
                goto failed_mount4;
        }
 
-       ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY);
+       if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY))
+               sb->s_flags |= MS_RDONLY;
 
        /* determine the minimum size of new large inodes, if present */
        if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
index c757adc972506d672c78b7de03e654ede7eb8b1e..c2865cc3101e71d72e65304b8e2b27768814dfad 100644 (file)
@@ -487,18 +487,19 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
                ext4_free_blocks(handle, inode, bh, 0, 1,
                                 EXT4_FREE_BLOCKS_METADATA |
                                 EXT4_FREE_BLOCKS_FORGET);
+               unlock_buffer(bh);
        } else {
                le32_add_cpu(&BHDR(bh)->h_refcount, -1);
+               if (ce)
+                       mb_cache_entry_release(ce);
+               unlock_buffer(bh);
                error = ext4_handle_dirty_metadata(handle, inode, bh);
                if (IS_SYNC(inode))
                        ext4_handle_sync(handle);
                dquot_free_block(inode, 1);
                ea_bdebug(bh, "refcount now=%d; releasing",
                          le32_to_cpu(BHDR(bh)->h_refcount));
-               if (ce)
-                       mb_cache_entry_release(ce);
        }
-       unlock_buffer(bh);
 out:
        ext4_std_error(inode->i_sb, error);
        return;
@@ -820,8 +821,14 @@ inserted:
                        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
                                goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
 
+                       /*
+                        * take i_data_sem because we will test
+                        * i_delalloc_reserved_flag in ext4_mb_new_blocks
+                        */
+                       down_read((&EXT4_I(inode)->i_data_sem));
                        block = ext4_new_meta_blocks(handle, inode, goal, 0,
                                                     NULL, &error);
+                       up_read((&EXT4_I(inode)->i_data_sem));
                        if (error)
                                goto cleanup;
 
index fb6fc955331299df218dc4d9c560a122a0690e10..c858b5c83209bbe2ca16f56e93be195db46f6da7 100644 (file)
@@ -1516,7 +1516,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
        else if (outarg->offset + num > file_size)
                num = file_size - outarg->offset;
 
-       while (num) {
+       while (num && req->num_pages < FUSE_MAX_PAGES_PER_REQ) {
                struct page *page;
                unsigned int this_num;
 
@@ -1530,6 +1530,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
 
                num -= this_num;
                total_len += this_num;
+               index++;
        }
        req->misc.retrieve_in.offset = outarg->offset;
        req->misc.retrieve_in.size = total_len;
index d501607145951d7b5f4b1ac62e230eae27e6e452..c04a025c677f56edc39c4b389ff8539f47eca2f0 100644 (file)
@@ -858,6 +858,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
                if (stat) {
                        generic_fillattr(inode, stat);
                        stat->mode = fi->orig_i_mode;
+                       stat->ino = fi->orig_ino;
                }
        }
 
index b788becada76bf8616512fb766525fc89950eeec..f6215501097d428ee8baa9c13415bc7201530d20 100644 (file)
@@ -82,6 +82,9 @@ struct fuse_inode {
            preserve the original mode */
        mode_t orig_i_mode;
 
+       /** 64 bit inode number */
+       u64 orig_ino;
+
        /** Version of last attribute change */
        u64 attr_version;
 
index 38f84cd48b67d057798f8f75fe5c8f22f12b10dc..69a1e0f04f4468e8da03d477ad8a4d343e579a83 100644 (file)
@@ -91,6 +91,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
        fi->nlookup = 0;
        fi->attr_version = 0;
        fi->writectr = 0;
+       fi->orig_ino = 0;
        INIT_LIST_HEAD(&fi->write_files);
        INIT_LIST_HEAD(&fi->queued_writes);
        INIT_LIST_HEAD(&fi->writepages);
@@ -140,6 +141,18 @@ static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
        return 0;
 }
 
+/*
+ * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
+ * so that it will fit.
+ */
+static ino_t fuse_squash_ino(u64 ino64)
+{
+       ino_t ino = (ino_t) ino64;
+       if (sizeof(ino_t) < sizeof(u64))
+               ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8;
+       return ino;
+}
+
 void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
                                   u64 attr_valid)
 {
@@ -149,7 +162,7 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
        fi->attr_version = ++fc->attr_version;
        fi->i_time = attr_valid;
 
-       inode->i_ino     = attr->ino;
+       inode->i_ino     = fuse_squash_ino(attr->ino);
        inode->i_mode    = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
        inode->i_nlink   = attr->nlink;
        inode->i_uid     = attr->uid;
@@ -175,6 +188,8 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
        fi->orig_i_mode = inode->i_mode;
        if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
                inode->i_mode &= ~S_ISVTX;
+
+       fi->orig_ino = attr->ino;
 }
 
 void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
index 3ebc437736febb4e0ef37a3612ea6501c7eac59a..1cbdeea1db4441b21c386458046d0b080101bafb 100644 (file)
@@ -46,11 +46,26 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
        case HFS_EXT_CNID:
                hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize,
                                    mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz));
+               if (HFS_I(tree->inode)->alloc_blocks >
+                                       HFS_I(tree->inode)->first_blocks) {
+                       printk(KERN_ERR "hfs: invalid btree extent records\n");
+                       unlock_new_inode(tree->inode);
+                       goto free_inode;
+               }
+
                tree->inode->i_mapping->a_ops = &hfs_btree_aops;
                break;
        case HFS_CAT_CNID:
                hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize,
                                    mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz));
+
+               if (!HFS_I(tree->inode)->first_blocks) {
+                       printk(KERN_ERR "hfs: invalid btree extent records "
+                                                               "(0 size).\n");
+                       unlock_new_inode(tree->inode);
+                       goto free_inode;
+               }
+
                tree->inode->i_mapping->a_ops = &hfs_btree_aops;
                break;
        default:
@@ -59,11 +74,6 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
        }
        unlock_new_inode(tree->inode);
 
-       if (!HFS_I(tree->inode)->first_blocks) {
-               printk(KERN_ERR "hfs: invalid btree extent records (0 size).\n");
-               goto free_inode;
-       }
-
        mapping = tree->inode->i_mapping;
        page = read_mapping_page(mapping, 0, NULL);
        if (IS_ERR(page))
index e673a88b8ae7560b14796b611aa57b267193411f..b1ce4c7ad3fb4f33e12ff4c8e320368b851e52c8 100644 (file)
@@ -40,6 +40,8 @@ int hfs_mac2asc(struct super_block *sb, char *out, const struct hfs_name *in)
 
        src = in->name;
        srclen = in->len;
+       if (srclen > HFS_NAMELEN)
+               srclen = HFS_NAMELEN;
        dst = out;
        dstlen = HFS_MAX_NAMELEN;
        if (nls_io) {
index b4ba1b3193336ead42c6ffbfbbd365e058cf8f1e..408073ae7a272fa52063120402a227edc243217f 100644 (file)
@@ -360,6 +360,10 @@ int hfsplus_rename_cat(u32 cnid,
        err = hfs_brec_find(&src_fd);
        if (err)
                goto out;
+       if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) {
+               err = -EIO;
+               goto out;
+       }
 
        hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
                                src_fd.entrylength);
index 4df5059c25da67c4b1ddd2a868057951e6e54d60..159f5ebf519a9b41bd67dfcf5acf93339bb89695 100644 (file)
@@ -146,6 +146,11 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
                filp->f_pos++;
                /* fall through */
        case 1:
+               if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
+                       err = -EIO;
+                       goto out;
+               }
+
                hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
                        fd.entrylength);
                if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) {
@@ -177,6 +182,12 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        err = -EIO;
                        goto out;
                }
+
+               if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
+                       err = -EIO;
+                       goto out;
+               }
+
                hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
                        fd.entrylength);
                type = be16_to_cpu(entry.type);
index 7b8112da285af85a68fc41a53cd80d47ff92109b..aac1563174ed3fa3ce8f2afcaa789e81d42950af 100644 (file)
@@ -56,7 +56,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
        DECLARE_COMPLETION_ONSTACK(wait);
        struct bio *bio;
        int ret = 0;
-       unsigned int io_size;
+       u64 io_size;
        loff_t start;
        int offset;
 
index 85c098a499f33ce858bdfaf85f76f053bd1b9376..9d71c95b193e357a867b1e8c9184aa1e1e58495c 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/statfs.h>
 #include <linux/types.h>
 #include <linux/pid_namespace.h>
+#include <linux/namei.h>
 #include <asm/uaccess.h>
 #include "os.h"
 
index 7aafeb8fa3005eb14fd09f75d94c17dbeefe8481..8b0c87530b04e810fa36783564713fa149f9a822 100644 (file)
@@ -238,17 +238,10 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
        loff_t isize;
        ssize_t retval = 0;
 
-       mutex_lock(&inode->i_mutex);
-
        /* validate length */
        if (len == 0)
                goto out;
 
-       isize = i_size_read(inode);
-       if (!isize)
-               goto out;
-
-       end_index = (isize - 1) >> huge_page_shift(h);
        for (;;) {
                struct page *page;
                unsigned long nr, ret;
@@ -256,18 +249,21 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
 
                /* nr is the maximum number of bytes to copy from this page */
                nr = huge_page_size(h);
+               isize = i_size_read(inode);
+               if (!isize)
+                       goto out;
+               end_index = (isize - 1) >> huge_page_shift(h);
                if (index >= end_index) {
                        if (index > end_index)
                                goto out;
                        nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
-                       if (nr <= offset) {
+                       if (nr <= offset)
                                goto out;
-                       }
                }
                nr = nr - offset;
 
                /* Find the page */
-               page = find_get_page(mapping, index);
+               page = find_lock_page(mapping, index);
                if (unlikely(page == NULL)) {
                        /*
                         * We have a HOLE, zero out the user-buffer for the
@@ -279,17 +275,18 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
                        else
                                ra = 0;
                } else {
+                       unlock_page(page);
+
                        /*
                         * We have the page, copy it to user space buffer.
                         */
                        ra = hugetlbfs_read_actor(page, offset, buf, len, nr);
                        ret = ra;
+                       page_cache_release(page);
                }
                if (ra < 0) {
                        if (retval == 0)
                                retval = ra;
-                       if (page)
-                               page_cache_release(page);
                        goto out;
                }
 
@@ -299,16 +296,12 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
                index += offset >> huge_page_shift(h);
                offset &= ~huge_page_mask(h);
 
-               if (page)
-                       page_cache_release(page);
-
                /* short read or no more work */
                if ((ret != nr) || (len == 0))
                        break;
        }
 out:
        *ppos = ((loff_t)index << huge_page_shift(h)) + offset;
-       mutex_unlock(&inode->i_mutex);
        return retval;
 }
 
index e2d4285fbe90ebcc511a96574283418bc933634d..9f36384e2e8779739451d97ed1d8182ad358bc69 100644 (file)
@@ -1131,6 +1131,14 @@ static int journal_get_superblock(journal_t *journal)
                goto out;
        }
 
+       if (be32_to_cpu(sb->s_first) == 0 ||
+           be32_to_cpu(sb->s_first) >= journal->j_maxlen) {
+               printk(KERN_WARNING
+                       "JBD: Invalid start block of journal: %u\n",
+                       be32_to_cpu(sb->s_first));
+               goto out;
+       }
+
        return 0;
 
 out:
index eef6979821a4c8db91f48854c46f81c291ef4c27..36c2e800e734b18452e2853e15619761109bafc6 100644 (file)
@@ -683,7 +683,7 @@ start_journal_io:
        if (commit_transaction->t_need_data_flush &&
            (journal->j_fs_dev != journal->j_dev) &&
            (journal->j_flags & JBD2_BARRIER))
-               blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
+               blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
 
        /* Done it all: now write the commit record asynchronously. */
        if (JBD2_HAS_INCOMPAT_FEATURE(journal,
@@ -819,7 +819,7 @@ wait_for_iobuf:
        if (JBD2_HAS_INCOMPAT_FEATURE(journal,
                                      JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
            journal->j_flags & JBD2_BARRIER) {
-               blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL);
+               blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
        }
 
        if (err)
index 0dfa5b598e68fa3f358f043c442eb39558bf6f52..40c5fb73e9c696cbede4bbb5b6a66c372bf3d125 100644 (file)
@@ -1251,6 +1251,14 @@ static int journal_get_superblock(journal_t *journal)
                goto out;
        }
 
+       if (be32_to_cpu(sb->s_first) == 0 ||
+           be32_to_cpu(sb->s_first) >= journal->j_maxlen) {
+               printk(KERN_WARNING
+                       "JBD2: Invalid start block of journal: %u\n",
+                       be32_to_cpu(sb->s_first));
+               goto out;
+       }
+
        return 0;
 
 out:
index 2d7109414cdd6b7a4d21bdb2e738ff20581523a4..9baa39ea6be2ddfce3e069b672430791b6cf352c 100644 (file)
@@ -1902,6 +1902,8 @@ zap_buffer_unlocked:
        clear_buffer_mapped(bh);
        clear_buffer_req(bh);
        clear_buffer_new(bh);
+       clear_buffer_delay(bh);
+       clear_buffer_unwritten(bh);
        bh->b_bdev = NULL;
        return may_free;
 }
index 31dce611337cffcf67e8330bb2f6b6e8608bde1e..4bbd5211bb326d78830c49aba249720cfde36be8 100644 (file)
@@ -225,8 +225,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
                        return 0;
 
                D1(printk(KERN_DEBUG "No progress from erasing blocks; doing GC anyway\n"));
-               spin_lock(&c->erase_completion_lock);
                mutex_lock(&c->alloc_sem);
+               spin_lock(&c->erase_completion_lock);
        }
 
        /* First, work out which block we're garbage-collecting */
index f848b52c67b19e565567168a2bd5810fcc5f0a6c..046bb77c60131fd2bebdbfde16fc2f939f70ea4f 100644 (file)
@@ -241,7 +241,7 @@ static int decode_nlm4_stat(struct xdr_stream *xdr, __be32 *stat)
        p = xdr_inline_decode(xdr, 4);
        if (unlikely(p == NULL))
                goto out_overflow;
-       if (unlikely(*p > nlm4_failed))
+       if (unlikely(ntohl(*p) > ntohl(nlm4_failed)))
                goto out_bad_xdr;
        *stat = *p;
        return 0;
index 180ac34feb9a8630e3bbeff633b06d858a1420f8..36057cedac62f0f3fc13e333fe845a72c980474a 100644 (file)
@@ -236,7 +236,7 @@ static int decode_nlm_stat(struct xdr_stream *xdr,
        p = xdr_inline_decode(xdr, 4);
        if (unlikely(p == NULL))
                goto out_overflow;
-       if (unlikely(*p > nlm_lck_denied_grace_period))
+       if (unlikely(ntohl(*p) > ntohl(nlm_lck_denied_grace_period)))
                goto out_enum;
        *stat = *p;
        return 0;
index abfff9d7979dc60b57646b6129cdb7dd279c6041..1743064cd0cee5930e7c284ccade5a5771255736 100644 (file)
@@ -440,7 +440,7 @@ static int param_set_##name(const char *val, struct kernel_param *kp)       \
        __typeof__(type) num = which_strtol(val, &endp, 0);             \
        if (endp == val || *endp || num < (min) || num > (max))         \
                return -EINVAL;                                         \
-       *((int *) kp->arg) = num;                                       \
+       *((type *) kp->arg) = num;                                      \
        return 0;                                                       \
 }
 
index b456c7a2e6b4c48800351ca03f9b7788cb76d1e5..16bda6cd602d8ff95d9eb06f7f2c124a9dd29b71 100644 (file)
@@ -136,7 +136,7 @@ static int do_getname(const char __user *filename, char *page)
        return retval;
 }
 
-static char *getname_flags(const char __user * filename, int flags)
+static char *getname_flags(const char __user *filename, int flags, int *empty)
 {
        char *tmp, *result;
 
@@ -147,6 +147,8 @@ static char *getname_flags(const char __user * filename, int flags)
 
                result = tmp;
                if (retval < 0) {
+                       if (retval == -ENOENT && empty)
+                               *empty = 1;
                        if (retval != -ENOENT || !(flags & LOOKUP_EMPTY)) {
                                __putname(tmp);
                                result = ERR_PTR(retval);
@@ -159,7 +161,7 @@ static char *getname_flags(const char __user * filename, int flags)
 
 char *getname(const char __user * filename)
 {
-       return getname_flags(filename, 0);
+       return getname_flags(filename, 0, 0);
 }
 
 #ifdef CONFIG_AUDITSYSCALL
@@ -779,17 +781,20 @@ static int follow_automount(struct path *path, unsigned flags,
        if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_CONTINUE))
                return -EISDIR; /* we actually want to stop here */
 
-       /* We want to mount if someone is trying to open/create a file of any
-        * type under the mountpoint, wants to traverse through the mountpoint
-        * or wants to open the mounted directory.
+       /* We don't want to mount if someone's just doing a stat -
+        * unless they're stat'ing a directory and appended a '/' to
+        * the name.
         *
-        * We don't want to mount if someone's just doing a stat and they've
-        * set AT_SYMLINK_NOFOLLOW - unless they're stat'ing a directory and
-        * appended a '/' to the name.
+        * We do, however, want to mount if someone wants to open or
+        * create a file of any type under the mountpoint, wants to
+        * traverse through the mountpoint or wants to open the
+        * mounted directory.  Also, autofs may mark negative dentries
+        * as being automount points.  These will need the attentions
+        * of the daemon to instantiate them before they can be used.
         */
-       if (!(flags & LOOKUP_FOLLOW) &&
-           !(flags & (LOOKUP_CONTINUE | LOOKUP_DIRECTORY |
-                      LOOKUP_OPEN | LOOKUP_CREATE)))
+       if (!(flags & (LOOKUP_CONTINUE | LOOKUP_DIRECTORY |
+                    LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
+           path->dentry->d_inode)
                return -EISDIR;
 
        current->total_link_count++;
@@ -905,7 +910,7 @@ static int follow_managed(struct path *path, unsigned flags)
                mntput(path->mnt);
        if (ret == -EISDIR)
                ret = 0;
-       return ret;
+       return ret < 0 ? ret : need_mntput;
 }
 
 int follow_down_one(struct path *path)
@@ -953,6 +958,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
                        break;
                path->mnt = mounted;
                path->dentry = mounted->mnt_root;
+               nd->flags |= LOOKUP_JUMPED;
                nd->seq = read_seqcount_begin(&path->dentry->d_seq);
                /*
                 * Update the inode too. We don't need to re-check the
@@ -1227,6 +1233,8 @@ retry:
                path_put_conditional(path, nd);
                return err;
        }
+       if (err)
+               nd->flags |= LOOKUP_JUMPED;
        *inode = path->dentry->d_inode;
        return 0;
 }
@@ -1747,11 +1755,11 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
        return __lookup_hash(&this, base, NULL);
 }
 
-int user_path_at(int dfd, const char __user *name, unsigned flags,
-                struct path *path)
+int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
+                struct path *path, int *empty)
 {
        struct nameidata nd;
-       char *tmp = getname_flags(name, flags);
+       char *tmp = getname_flags(name, flags, empty);
        int err = PTR_ERR(tmp);
        if (!IS_ERR(tmp)) {
 
@@ -1765,6 +1773,12 @@ int user_path_at(int dfd, const char __user *name, unsigned flags,
        return err;
 }
 
+int user_path_at(int dfd, const char __user *name, unsigned flags,
+                struct path *path)
+{
+       return user_path_at_empty(dfd, name, flags, path, 0);
+}
+
 static int user_path_parent(int dfd, const char __user *path,
                        struct nameidata *nd, char **name)
 {
@@ -2095,7 +2109,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
                /* sayonara */
                error = complete_walk(nd);
                if (error)
-                       return ERR_PTR(-ECHILD);
+                       return ERR_PTR(error);
 
                error = -ENOTDIR;
                if (nd->flags & LOOKUP_DIRECTORY) {
@@ -2107,6 +2121,10 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
        }
 
        /* create side of things */
+       /*
+        * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED has been
+        * cleared when we got to the last component we are about to look up
+        */
        error = complete_walk(nd);
        if (error)
                return ERR_PTR(error);
@@ -2175,6 +2193,9 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
        if (error < 0)
                goto exit_dput;
 
+       if (error)
+               nd->flags |= LOOKUP_JUMPED;
+
        error = -ENOENT;
        if (!path->dentry->d_inode)
                goto exit_dput;
@@ -2184,6 +2205,10 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
 
        path_to_nameidata(path, nd);
        nd->inode = path->dentry->d_inode;
+       /* Why this, you ask?  _Now_ we might have grown LOOKUP_JUMPED... */
+       error = complete_walk(nd);
+       if (error)
+               return ERR_PTR(error);
        error = -EISDIR;
        if (S_ISDIR(nd->inode->i_mode))
                goto exit;
index fe59bd145d214b0157b75dd7b441c91e24dcb56c..b3d8f51c6fa379794acd9ef622a7f81c05a4e5a4 100644 (file)
@@ -1048,15 +1048,12 @@ static int show_mountinfo(struct seq_file *m, void *v)
        if (err)
                goto out;
        seq_putc(m, ' ');
-       seq_path_root(m, &mnt_path, &root, " \t\n\\");
-       if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) {
-               /*
-                * Mountpoint is outside root, discard that one.  Ugly,
-                * but less so than trying to do that in iterator in a
-                * race-free way (due to renames).
-                */
-               return SEQ_SKIP;
-       }
+
+       /* mountpoints outside of chroot jail will give SEQ_SKIP on this */
+       err = seq_path_root(m, &mnt_path, &root, " \t\n\\");
+       if (err)
+               goto out;
+
        seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
        show_mnt_opts(m, mnt);
 
@@ -1109,6 +1106,7 @@ static int show_vfsstat(struct seq_file *m, void *v)
 
        /* device */
        if (mnt->mnt_sb->s_op->show_devname) {
+               seq_puts(m, "device ");
                err = mnt->mnt_sb->s_op->show_devname(m, mnt);
        } else {
                if (mnt->mnt_devname) {
@@ -1246,8 +1244,9 @@ void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
                list_del_init(&p->mnt_expire);
                list_del_init(&p->mnt_list);
                __touch_mnt_namespace(p->mnt_ns);
+               if (p->mnt_ns)
+                       __mnt_make_shortterm(p);
                p->mnt_ns = NULL;
-               __mnt_make_shortterm(p);
                list_del_init(&p->mnt_child);
                if (p->mnt_parent != p) {
                        p->mnt_parent->mnt_ghosts++;
@@ -1757,7 +1756,7 @@ static int do_loopback(struct path *path, char *old_name,
                return err;
        if (!old_name || !*old_name)
                return -EINVAL;
-       err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
+       err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
        if (err)
                return err;
 
@@ -2724,3 +2723,8 @@ struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
        return vfs_kern_mount(type, MS_KERNMOUNT, type->name, data);
 }
 EXPORT_SYMBOL_GPL(kern_mount_data);
+
+bool our_mnt(struct vfsmount *mnt)
+{
+       return check_mnt(mnt);
+}
index aaa09e948a9cf642ff2fc7f80505b5bca2eaafc5..b5c826e17b6ab1c3ae045955eff1df2fee1e4eda 100644 (file)
@@ -324,7 +324,7 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
        dprintk("%s enter. slotid %d seqid %d\n",
                __func__, args->csa_slotid, args->csa_sequenceid);
 
-       if (args->csa_slotid > NFS41_BC_MAX_CALLBACKS)
+       if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS)
                return htonl(NFS4ERR_BADSLOT);
 
        slot = tbl->slots + args->csa_slotid;
index 321a66bc3846453602c10e6bc5838dea88721a40..ecabbd8f6ee50989bdd4e9476a4f2cae5c7dde09 100644 (file)
@@ -466,6 +466,17 @@ static void nfs_delegation_run_state_manager(struct nfs_client *clp)
                nfs4_schedule_state_manager(clp);
 }
 
+void nfs_remove_bad_delegation(struct inode *inode)
+{
+       struct nfs_delegation *delegation;
+
+       delegation = nfs_detach_delegation(NFS_I(inode), NFS_SERVER(inode));
+       if (delegation) {
+               nfs_inode_find_state_and_recover(inode, &delegation->stateid);
+               nfs_free_delegation(delegation);
+       }
+}
+
 /**
  * nfs_expire_all_delegation_types
  * @clp: client to process
index d9322e490c56ff98a39e79295186653e0e80589e..691a796091842bfcde8de961a8d1b97a1190cd70 100644 (file)
@@ -45,6 +45,7 @@ void nfs_expire_unreferenced_delegations(struct nfs_client *clp);
 void nfs_handle_cb_pathdown(struct nfs_client *clp);
 int nfs_client_return_marked_delegations(struct nfs_client *clp);
 int nfs_delegations_present(struct nfs_client *clp);
+void nfs_remove_bad_delegation(struct inode *inode);
 
 void nfs_delegation_mark_reclaim(struct nfs_client *clp);
 void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
index f91c62d48ff76772eafc36cf8eca3e821d54d9b5..462a00601737d8e025cd85752406ff7619508c27 100644 (file)
@@ -1458,12 +1458,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
                                res = NULL;
                                goto out;
                        /* This turned out not to be a regular file */
+                       case -EISDIR:
                        case -ENOTDIR:
                                goto no_open;
                        case -ELOOP:
                                if (!(nd->intent.open.flags & O_NOFOLLOW))
                                        goto no_open;
-                       /* case -EISDIR: */
                        /* case -EINVAL: */
                        default:
                                res = ERR_CAST(inode);
index 2f093ed16980016cff7d3ece166b23d99de2e790..dd2f13077bee4e7f7976bb030f7ece01470aa880 100644 (file)
@@ -887,3 +887,35 @@ static int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
                        file->f_path.dentry->d_name.name, arg);
        return -EINVAL;
 }
+
+#ifdef CONFIG_NFS_V4
+static int
+nfs4_file_open(struct inode *inode, struct file *filp)
+{
+       /*
+        * NFSv4 opens are handled in d_lookup and d_revalidate. If we get to
+        * this point, then something is very wrong
+        */
+       dprintk("NFS: %s called! inode=%p filp=%p\n", __func__, inode, filp);
+       return -ENOTDIR;
+}
+
+const struct file_operations nfs4_file_operations = {
+       .llseek         = nfs_file_llseek,
+       .read           = do_sync_read,
+       .write          = do_sync_write,
+       .aio_read       = nfs_file_read,
+       .aio_write      = nfs_file_write,
+       .mmap           = nfs_file_mmap,
+       .open           = nfs4_file_open,
+       .flush          = nfs_file_flush,
+       .release        = nfs_file_release,
+       .fsync          = nfs_file_fsync,
+       .lock           = nfs_lock,
+       .flock          = nfs_flock,
+       .splice_read    = nfs_file_splice_read,
+       .splice_write   = nfs_file_splice_write,
+       .check_flags    = nfs_check_flags,
+       .setlease       = nfs_setlease,
+};
+#endif /* CONFIG_NFS_V4 */
index 6f4850deb272857ae5829251d64bf7aabf702130..c48f9f6ad72a8adb9b60359dd15702244018733a 100644 (file)
@@ -291,7 +291,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
                 */
                inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops;
                if (S_ISREG(inode->i_mode)) {
-                       inode->i_fop = &nfs_file_operations;
+                       inode->i_fop = NFS_SB(sb)->nfs_client->rpc_ops->file_ops;
                        inode->i_data.a_ops = &nfs_file_aops;
                        inode->i_data.backing_dev_info = &NFS_SB(sb)->backing_dev_info;
                } else if (S_ISDIR(inode->i_mode)) {
index 38053d823eb061060cdccdd904ebe9f830ee0c7f..771741f14799de09ac17636fe5658a934e9d5adf 100644 (file)
@@ -853,6 +853,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
        .dentry_ops     = &nfs_dentry_operations,
        .dir_inode_ops  = &nfs3_dir_inode_operations,
        .file_inode_ops = &nfs3_file_inode_operations,
+       .file_ops       = &nfs_file_operations,
        .getroot        = nfs3_proc_get_root,
        .getattr        = nfs3_proc_getattr,
        .setattr        = nfs3_proc_setattr,
index c4a69833dd0d5b1abf3223e6e63f2153606ef9c5..e1c1365ba832d0c4066fe3326d8388bdfb98bef0 100644 (file)
@@ -209,6 +209,7 @@ struct nfs4_exception {
        long timeout;
        int retry;
        struct nfs4_state *state;
+       struct inode *inode;
 };
 
 struct nfs4_state_recovery_ops {
@@ -344,6 +345,8 @@ extern void nfs4_put_open_state(struct nfs4_state *);
 extern void nfs4_close_state(struct path *, struct nfs4_state *, fmode_t);
 extern void nfs4_close_sync(struct path *, struct nfs4_state *, fmode_t);
 extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
+extern void nfs_inode_find_state_and_recover(struct inode *inode,
+               const nfs4_stateid *stateid);
 extern void nfs4_schedule_lease_recovery(struct nfs_client *);
 extern void nfs4_schedule_state_manager(struct nfs_client *);
 extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);
index 614c4d287d72c6521b08356fcfee4379f753249b..75af81211e4f953db666ea96f183a0f73f5100e4 100644 (file)
@@ -428,6 +428,14 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
 
        dprintk("--> %s\n", __func__);
 
+       /* FIXME: remove this check when layout segment support is added */
+       if (lgr->range.offset != 0 ||
+           lgr->range.length != NFS4_MAX_UINT64) {
+               dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
+                       __func__);
+               goto out;
+       }
+
        if (fl->pattern_offset > lgr->range.offset) {
                dprintk("%s pattern_offset %lld too large\n",
                                __func__, fl->pattern_offset);
index 92cfd2e113136f36ff1106b373565d376cf7c85e..b7a7e5fe401991009dd66172eb8c17f9dbbc5201 100644 (file)
@@ -94,6 +94,8 @@ static int nfs4_map_errors(int err)
        case -NFS4ERR_BADOWNER:
        case -NFS4ERR_BADNAME:
                return -EINVAL;
+       case -NFS4ERR_SHARE_DENIED:
+               return -EACCES;
        default:
                dprintk("%s could not handle NFSv4 error %d\n",
                                __func__, -err);
@@ -254,15 +256,28 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
 {
        struct nfs_client *clp = server->nfs_client;
        struct nfs4_state *state = exception->state;
+       struct inode *inode = exception->inode;
        int ret = errorcode;
 
        exception->retry = 0;
        switch(errorcode) {
                case 0:
                        return 0;
+               case -NFS4ERR_OPENMODE:
+                       if (nfs_have_delegation(inode, FMODE_READ)) {
+                               nfs_inode_return_delegation(inode);
+                               exception->retry = 1;
+                               return 0;
+                       }
+                       if (state == NULL)
+                               break;
+                       nfs4_schedule_stateid_recovery(server, state);
+                       goto wait_on_recovery;
+               case -NFS4ERR_DELEG_REVOKED:
                case -NFS4ERR_ADMIN_REVOKED:
                case -NFS4ERR_BAD_STATEID:
-               case -NFS4ERR_OPENMODE:
+                       if (state != NULL)
+                               nfs_remove_bad_delegation(state->inode);
                        if (state == NULL)
                                break;
                        nfs4_schedule_stateid_recovery(server, state);
@@ -1305,8 +1320,11 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
                                 * The show must go on: exit, but mark the
                                 * stateid as needing recovery.
                                 */
+                       case -NFS4ERR_DELEG_REVOKED:
                        case -NFS4ERR_ADMIN_REVOKED:
                        case -NFS4ERR_BAD_STATEID:
+                               nfs_inode_find_state_and_recover(state->inode,
+                                               stateid);
                                nfs4_schedule_stateid_recovery(server, state);
                        case -EKEYEXPIRED:
                                /*
@@ -1755,6 +1773,7 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
                        nfs_setattr_update_inode(state->inode, sattr);
                nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
        }
+       nfs_revalidate_inode(server, state->inode);
        nfs4_opendata_put(opendata);
        nfs4_put_state_owner(sp);
        *res = state;
@@ -1862,7 +1881,10 @@ static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
                           struct nfs4_state *state)
 {
        struct nfs_server *server = NFS_SERVER(inode);
-       struct nfs4_exception exception = { };
+       struct nfs4_exception exception = {
+               .state = state,
+               .inode = inode,
+       };
        int err;
        do {
                err = nfs4_handle_exception(server,
@@ -3678,8 +3700,11 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
        if (task->tk_status >= 0)
                return 0;
        switch(task->tk_status) {
+               case -NFS4ERR_DELEG_REVOKED:
                case -NFS4ERR_ADMIN_REVOKED:
                case -NFS4ERR_BAD_STATEID:
+                       if (state != NULL)
+                               nfs_remove_bad_delegation(state->inode);
                case -NFS4ERR_OPENMODE:
                        if (state == NULL)
                                break;
@@ -4402,7 +4427,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
 {
        struct nfs_server *server = NFS_SERVER(state->inode);
-       struct nfs4_exception exception = { };
+       struct nfs4_exception exception = {
+               .inode = state->inode,
+       };
        int err;
 
        do {
@@ -4420,7 +4447,9 @@ static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request
 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
 {
        struct nfs_server *server = NFS_SERVER(state->inode);
-       struct nfs4_exception exception = { };
+       struct nfs4_exception exception = {
+               .inode = state->inode,
+       };
        int err;
 
        err = nfs4_set_lock_state(state, request);
@@ -4484,7 +4513,10 @@ out:
 
 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
 {
-       struct nfs4_exception exception = { };
+       struct nfs4_exception exception = {
+               .state = state,
+               .inode = state->inode,
+       };
        int err;
 
        do {
@@ -4529,6 +4561,20 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
 
        if (state == NULL)
                return -ENOLCK;
+       /*
+        * Don't rely on the VFS having checked the file open mode,
+        * since it won't do this for flock() locks.
+        */
+       switch (request->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) {
+       case F_RDLCK:
+               if (!(filp->f_mode & FMODE_READ))
+                       return -EBADF;
+               break;
+       case F_WRLCK:
+               if (!(filp->f_mode & FMODE_WRITE))
+                       return -EBADF;
+       }
+
        do {
                status = nfs4_proc_setlk(state, cmd, request);
                if ((status != -EAGAIN) || IS_SETLK(cmd))
@@ -4577,6 +4623,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
                                 * The show must go on: exit, but mark the
                                 * stateid as needing recovery.
                                 */
+                       case -NFS4ERR_DELEG_REVOKED:
                        case -NFS4ERR_ADMIN_REVOKED:
                        case -NFS4ERR_BAD_STATEID:
                        case -NFS4ERR_OPENMODE:
@@ -6008,6 +6055,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
        .dentry_ops     = &nfs4_dentry_operations,
        .dir_inode_ops  = &nfs4_dir_inode_operations,
        .file_inode_ops = &nfs4_file_inode_operations,
+       .file_ops       = &nfs4_file_operations,
        .getroot        = nfs4_proc_get_root,
        .getattr        = nfs4_proc_getattr,
        .setattr        = nfs4_proc_setattr,
index e97dd219f84f4e205cf0a583fdb247d4ba01d807..c6e2769f65bb99932ec230d56ac261dfaff85504 100644 (file)
@@ -1069,6 +1069,33 @@ void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4
        nfs4_schedule_state_manager(clp);
 }
 
+void nfs_inode_find_state_and_recover(struct inode *inode,
+               const nfs4_stateid *stateid)
+{
+       struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+       struct nfs_inode *nfsi = NFS_I(inode);
+       struct nfs_open_context *ctx;
+       struct nfs4_state *state;
+       bool found = false;
+
+       spin_lock(&inode->i_lock);
+       list_for_each_entry(ctx, &nfsi->open_files, list) {
+               state = ctx->state;
+               if (state == NULL)
+                       continue;
+               if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
+                       continue;
+               if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0)
+                       continue;
+               nfs4_state_mark_reclaim_nograce(clp, state);
+               found = true;
+       }
+       spin_unlock(&inode->i_lock);
+       if (found)
+               nfs4_schedule_state_manager(clp);
+}
+
+
 static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
 {
        struct inode *inode = state->inode;
@@ -1519,16 +1546,16 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
 {
        if (!flags)
                return;
-       else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
+       if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
                nfs41_handle_server_reboot(clp);
-       else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
+       if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
                            SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
                            SEQ4_STATUS_ADMIN_STATE_REVOKED |
                            SEQ4_STATUS_LEASE_MOVED))
                nfs41_handle_state_revoked(clp);
-       else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
+       if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
                nfs41_handle_recallable_state_revoked(clp);
-       else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
+       if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
                            SEQ4_STATUS_BACKCHANNEL_FAULT |
                            SEQ4_STATUS_CB_PATH_DOWN_SESSION))
                nfs41_handle_cb_path_down(clp);
index 1d1dc1ee3943cbf115b389dff315b7e5c0f841b4..75fe694d78dea9c6604913ddb0c34258e8b1a276 100644 (file)
@@ -1006,7 +1006,8 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
 static struct pnfs_layoutdriver_type objlayout_type = {
        .id = LAYOUT_OSD2_OBJECTS,
        .name = "LAYOUT_OSD2_OBJECTS",
-       .flags                   = PNFS_LAYOUTRET_ON_SETATTR,
+       .flags                   = PNFS_LAYOUTRET_ON_SETATTR |
+                                  PNFS_LAYOUTRET_ON_ERROR,
 
        .alloc_layout_hdr        = objlayout_alloc_layout_hdr,
        .free_layout_hdr         = objlayout_free_layout_hdr,
index 1d06f8e2adeaec53fd8dc71c4e1a9cae2baef9c3..fefa1224aff3f9d7a2ffcf86876184840207fc90 100644 (file)
@@ -294,9 +294,11 @@ objlayout_read_done(struct objlayout_io_state *state, ssize_t status, bool sync)
        dprintk("%s: Begin status=%zd eof=%d\n", __func__, status, eof);
        rdata = state->rpcdata;
        rdata->task.tk_status = status;
-       if (status >= 0) {
+       if (likely(status >= 0)) {
                rdata->res.count = status;
                rdata->res.eof = eof;
+       } else {
+               rdata->pnfs_error = status;
        }
        objlayout_iodone(state);
        /* must not use state after this point */
@@ -380,15 +382,17 @@ objlayout_write_done(struct objlayout_io_state *state, ssize_t status,
        wdata = state->rpcdata;
        state->status = status;
        wdata->task.tk_status = status;
-       if (status >= 0) {
+       if (likely(status >= 0)) {
                wdata->res.count = status;
                wdata->verf.committed = state->committed;
                dprintk("%s: Return status %d committed %d\n",
                        __func__, wdata->task.tk_status,
                        wdata->verf.committed);
-       } else
+       } else {
+               wdata->pnfs_error = status;
                dprintk("%s: Return status %d\n",
                        __func__, wdata->task.tk_status);
+       }
        objlayout_iodone(state);
        /* must not use state after this point */
 
index a726c0afa76ea364348d8514623b023ca454a711..99518872f42ccdbdd14180beb8c814bc315752ca 100644 (file)
@@ -980,7 +980,8 @@ pnfs_update_layout(struct inode *ino,
                arg.offset -= pg_offset;
                arg.length += pg_offset;
        }
-       arg.length = PAGE_CACHE_ALIGN(arg.length);
+       if (arg.length != NFS4_MAX_UINT64)
+               arg.length = PAGE_CACHE_ALIGN(arg.length);
 
        lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
        if (!lseg && first) {
@@ -1118,6 +1119,14 @@ pnfs_ld_write_done(struct nfs_write_data *data)
                data->mds_ops->rpc_release(data);
                return 0;
        }
+       if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
+                                       PNFS_LAYOUTRET_ON_ERROR) {
+               /* Don't lo_commit on error, Server will needs to
+                * preform a file recovery.
+                */
+               clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(data->inode)->flags);
+               pnfs_return_layout(data->inode);
+       }
 
        dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__,
                data->pnfs_error);
@@ -1166,6 +1175,10 @@ pnfs_ld_read_done(struct nfs_read_data *data)
                return 0;
        }
 
+       if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
+                                               PNFS_LAYOUTRET_ON_ERROR)
+               pnfs_return_layout(data->inode);
+
        dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__,
                data->pnfs_error);
        status = nfs_initiate_read(data, NFS_CLIENT(data->inode),
index 9d147d963bd950c44ca386c4a9d3a2a9647cbd49..bb8b3247f29c6fc37bfa3ee094923c13060ae582 100644 (file)
@@ -68,6 +68,7 @@ enum {
 enum layoutdriver_policy_flags {
        /* Should the pNFS client commit and return the layout upon a setattr */
        PNFS_LAYOUTRET_ON_SETATTR       = 1 << 0,
+       PNFS_LAYOUTRET_ON_ERROR         = 1 << 1,
 };
 
 struct nfs4_deviceid_node;
index ac40b8535d7e0e7493f13063afdbe57169837c9e..f48125da198a2d5a50bc0861805212a129427920 100644 (file)
@@ -710,6 +710,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
        .dentry_ops     = &nfs_dentry_operations,
        .dir_inode_ops  = &nfs_dir_inode_operations,
        .file_inode_ops = &nfs_file_inode_operations,
+       .file_ops       = &nfs_file_operations,
        .getroot        = nfs_proc_get_root,
        .getattr        = nfs_proc_getattr,
        .setattr        = nfs_proc_setattr,
index ce40e5c568ba393ec297fbdbc4d288b750491e5a..8e7b61d5829a0a1637938bf9d0468887b469bdc6 100644 (file)
@@ -904,10 +904,24 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve
                data->auth_flavor_len   = 1;
                data->version           = version;
                data->minorversion      = 0;
+               security_init_mnt_opts(&data->lsm_opts);
        }
        return data;
 }
 
+static void nfs_free_parsed_mount_data(struct nfs_parsed_mount_data *data)
+{
+       if (data) {
+               kfree(data->client_address);
+               kfree(data->mount_server.hostname);
+               kfree(data->nfs_server.export_path);
+               kfree(data->nfs_server.hostname);
+               kfree(data->fscache_uniq);
+               security_free_mnt_opts(&data->lsm_opts);
+               kfree(data);
+       }
+}
+
 /*
  * Sanity-check a server address provided by the mount command.
  *
@@ -2218,9 +2232,7 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
        data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION);
        mntfh = nfs_alloc_fhandle();
        if (data == NULL || mntfh == NULL)
-               goto out_free_fh;
-
-       security_init_mnt_opts(&data->lsm_opts);
+               goto out;
 
        /* Validate the mount data */
        error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
@@ -2232,8 +2244,6 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
 #ifdef CONFIG_NFS_V4
        if (data->version == 4) {
                mntroot = nfs4_try_mount(flags, dev_name, data);
-               kfree(data->client_address);
-               kfree(data->nfs_server.export_path);
                goto out;
        }
 #endif /* CONFIG_NFS_V4 */
@@ -2284,13 +2294,8 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
        s->s_flags |= MS_ACTIVE;
 
 out:
-       kfree(data->nfs_server.hostname);
-       kfree(data->mount_server.hostname);
-       kfree(data->fscache_uniq);
-       security_free_mnt_opts(&data->lsm_opts);
-out_free_fh:
+       nfs_free_parsed_mount_data(data);
        nfs_free_fhandle(mntfh);
-       kfree(data);
        return mntroot;
 
 out_err_nosb:
@@ -2613,9 +2618,7 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
 
        mntfh = nfs_alloc_fhandle();
        if (data == NULL || mntfh == NULL)
-               goto out_free_fh;
-
-       security_init_mnt_opts(&data->lsm_opts);
+               goto out;
 
        /* Get a volume representation */
        server = nfs4_create_server(data, mntfh);
@@ -2663,13 +2666,10 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
 
        s->s_flags |= MS_ACTIVE;
 
-       security_free_mnt_opts(&data->lsm_opts);
        nfs_free_fhandle(mntfh);
        return mntroot;
 
 out:
-       security_free_mnt_opts(&data->lsm_opts);
-out_free_fh:
        nfs_free_fhandle(mntfh);
        return ERR_PTR(error);
 
@@ -2694,11 +2694,15 @@ static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type,
        char *root_devname;
        size_t len;
 
-       len = strlen(hostname) + 3;
+       len = strlen(hostname) + 5;
        root_devname = kmalloc(len, GFP_KERNEL);
        if (root_devname == NULL)
                return ERR_PTR(-ENOMEM);
-       snprintf(root_devname, len, "%s:/", hostname);
+       /* Does hostname needs to be enclosed in brackets? */
+       if (strchr(hostname, ':'))
+               snprintf(root_devname, len, "[%s]:/", hostname);
+       else
+               snprintf(root_devname, len, "%s:/", hostname);
        root_mnt = vfs_kern_mount(fs_type, flags, root_devname, data);
        kfree(root_devname);
        return root_mnt;
@@ -2793,7 +2797,7 @@ static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt,
                goto out_put_mnt_ns;
 
        ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt,
-                       export_path, LOOKUP_FOLLOW, nd);
+                       export_path, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, nd);
 
        nfs_referral_loop_unprotect();
        put_mnt_ns(ns_private);
@@ -2855,7 +2859,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
 
        data = nfs_alloc_parsed_mount_data(4);
        if (data == NULL)
-               goto out_free_data;
+               goto out;
 
        /* Validate the mount data */
        error = nfs4_validate_mount_data(raw_data, data, dev_name);
@@ -2869,12 +2873,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
                error = PTR_ERR(res);
 
 out:
-       kfree(data->client_address);
-       kfree(data->nfs_server.export_path);
-       kfree(data->nfs_server.hostname);
-       kfree(data->fscache_uniq);
-out_free_data:
-       kfree(data);
+       nfs_free_parsed_mount_data(data);
        dprintk("<-- nfs4_mount() = %d%s\n", error,
                        error != 0 ? " [error]" : "");
        return res;
index 727168059684e92b22b0c1015fc229668a4bf07d..f2f80c005c023d4911490555a874bb872a7dad55 100644 (file)
@@ -428,7 +428,6 @@ static void
 nfs_mark_request_dirty(struct nfs_page *req)
 {
        __set_page_dirty_nobuffers(req->wb_page);
-       __mark_inode_dirty(req->wb_page->mapping->host, I_DIRTY_DATASYNC);
 }
 
 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
@@ -762,6 +761,8 @@ int nfs_updatepage(struct file *file, struct page *page,
        status = nfs_writepage_setup(ctx, page, offset, count);
        if (status < 0)
                nfs_set_pageerror(page);
+       else
+               __set_page_dirty_nobuffers(page);
 
        dprintk("NFS:       nfs_updatepage returns %d (isize %lld)\n",
                        status, (long long)i_size_read(inode));
@@ -1525,6 +1526,10 @@ static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_contr
        int flags = FLUSH_SYNC;
        int ret = 0;
 
+       /* no commits means nothing needs to be done */
+       if (!nfsi->ncommit)
+               return ret;
+
        if (wbc->sync_mode == WB_SYNC_NONE) {
                /* Don't commit yet if this is a non-blocking flush and there
                 * are a lot of outstanding writes for this mapping.
@@ -1659,34 +1664,20 @@ out_error:
 int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
                struct page *page)
 {
-       struct nfs_page *req;
-       int ret;
+       /*
+        * If PagePrivate is set, then the page is currently associated with
+        * an in-progress read or write request. Don't try to migrate it.
+        *
+        * FIXME: we could do this in principle, but we'll need a way to ensure
+        *        that we can safely release the inode reference while holding
+        *        the page lock.
+        */
+       if (PagePrivate(page))
+               return -EBUSY;
 
        nfs_fscache_release_page(page, GFP_KERNEL);
 
-       req = nfs_find_and_lock_request(page, false);
-       ret = PTR_ERR(req);
-       if (IS_ERR(req))
-               goto out;
-
-       ret = migrate_page(mapping, newpage, page);
-       if (!req)
-               goto out;
-       if (ret)
-               goto out_unlock;
-       page_cache_get(newpage);
-       spin_lock(&mapping->host->i_lock);
-       req->wb_page = newpage;
-       SetPagePrivate(newpage);
-       set_page_private(newpage, (unsigned long)req);
-       ClearPagePrivate(page);
-       set_page_private(page, 0);
-       spin_unlock(&mapping->host->i_lock);
-       page_cache_release(page);
-out_unlock:
-       nfs_clear_page_tag_locked(req);
-out:
-       return ret;
+       return migrate_page(mapping, newpage, page);
 }
 #endif
 
index b9566e46219f3ac8af1e9ab6916aadece4a02177..4b470f6043ec9277287ebf775fd3f1ab8ee3b8b7 100644 (file)
@@ -88,7 +88,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
        struct svc_expkey key;
        struct svc_expkey *ek = NULL;
 
-       if (mesg[mlen-1] != '\n')
+       if (mlen < 1 || mesg[mlen-1] != '\n')
                return -EINVAL;
        mesg[mlen-1] = 0;
 
index 08c6e36ab2eb05d8c28f68394a326573a7f52658..43f46cd9edea84e18040381a2647a54ca59debb0 100644 (file)
@@ -803,13 +803,13 @@ encode_entry_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name,
        return p;
 }
 
-static int
+static __be32
 compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
                const char *name, int namlen)
 {
        struct svc_export       *exp;
        struct dentry           *dparent, *dchild;
-       int rv = 0;
+       __be32 rv = nfserr_noent;
 
        dparent = cd->fh.fh_dentry;
        exp  = cd->fh.fh_export;
@@ -817,26 +817,20 @@ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
        if (isdotent(name, namlen)) {
                if (namlen == 2) {
                        dchild = dget_parent(dparent);
-                       if (dchild == dparent) {
-                               /* filesystem root - cannot return filehandle for ".." */
-                               dput(dchild);
-                               return -ENOENT;
-                       }
+                       /* filesystem root - cannot return filehandle for ".." */
+                       if (dchild == dparent)
+                               goto out;
                } else
                        dchild = dget(dparent);
        } else
                dchild = lookup_one_len(name, dparent, namlen);
        if (IS_ERR(dchild))
-               return -ENOENT;
-       rv = -ENOENT;
+               return rv;
        if (d_mountpoint(dchild))
                goto out;
-       rv = fh_compose(fhp, exp, dchild, &cd->fh);
-       if (rv)
-               goto out;
        if (!dchild->d_inode)
                goto out;
-       rv = 0;
+       rv = fh_compose(fhp, exp, dchild, &cd->fh);
 out:
        dput(dchild);
        return rv;
@@ -845,7 +839,7 @@ out:
 static __be32 *encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen)
 {
        struct svc_fh   fh;
-       int err;
+       __be32 err;
 
        fh_init(&fh, NFS3_FHSIZE);
        err = compose_entry_fh(cd, &fh, name, namlen);
index 3a6dbd70b34b57146cacfb8e22d0601481ec5349..d06a02c1b1a3d9135749ca655687097dbab0ef88 100644 (file)
@@ -156,6 +156,8 @@ do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfs
                !(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
                return nfserr_inval;
 
+       accmode |= NFSD_MAY_READ_IF_EXEC;
+
        if (open->op_share_access & NFS4_SHARE_ACCESS_READ)
                accmode |= NFSD_MAY_READ;
        if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
@@ -682,7 +684,7 @@ nfsd4_readdir(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        readdir->rd_bmval[1] &= nfsd_suppattrs1(cstate->minorversion);
        readdir->rd_bmval[2] &= nfsd_suppattrs2(cstate->minorversion);
 
-       if ((cookie > ~(u32)0) || (cookie == 1) || (cookie == 2) ||
+       if ((cookie == 1) || (cookie == 2) ||
            (cookie == 0 && memcmp(readdir->rd_verf.data, zeroverf.data, NFS4_VERIFIER_SIZE)))
                return nfserr_bad_cookie;
 
@@ -810,6 +812,7 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
              struct nfsd4_setattr *setattr)
 {
        __be32 status = nfs_ok;
+       int err;
 
        if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
                nfs4_lock_state();
@@ -821,9 +824,9 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                        return status;
                }
        }
-       status = mnt_want_write(cstate->current_fh.fh_export->ex_path.mnt);
-       if (status)
-               return status;
+       err = mnt_want_write(cstate->current_fh.fh_export->ex_path.mnt);
+       if (err)
+               return nfserrno(err);
        status = nfs_ok;
 
        status = check_attr_support(rqstp, cstate, setattr->sa_bmval,
@@ -921,7 +924,7 @@ _nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        count = 4 + (verify->ve_attrlen >> 2);
        buf = kmalloc(count << 2, GFP_KERNEL);
        if (!buf)
-               return nfserr_resource;
+               return nfserr_jukebox;
 
        status = nfsd4_encode_fattr(&cstate->current_fh,
                                    cstate->current_fh.fh_export,
index ffb59ef6f82f71a3813139bde209f53bccd54aca..be2681481704e4f12a74d0feb73211eec2bf71ab 100644 (file)
@@ -88,7 +88,7 @@ nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname)
        struct xdr_netobj cksum;
        struct hash_desc desc;
        struct scatterlist sg;
-       __be32 status = nfserr_resource;
+       __be32 status = nfserr_jukebox;
 
        dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n",
                        clname->len, clname->data);
index 3b8ad35561b377a66e9513a1e03a88367419aec0..92f7eb7c58630b660c7a2770e1359b244deba365 100644 (file)
@@ -188,8 +188,15 @@ static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag)
 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
 {
        if (atomic_dec_and_test(&fp->fi_access[oflag])) {
-               nfs4_file_put_fd(fp, O_RDWR);
                nfs4_file_put_fd(fp, oflag);
+               /*
+                * It's also safe to get rid of the RDWR open *if*
+                * we no longer have need of the other kind of access
+                * or if we already have the other kind of open:
+                */
+               if (fp->fi_fds[1-oflag]
+                       || atomic_read(&fp->fi_access[1 - oflag]) == 0)
+                       nfs4_file_put_fd(fp, O_RDWR);
        }
 }
 
@@ -1903,7 +1910,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
         * of 5 bullet points, labeled as CASE0 - CASE4 below.
         */
        unconf = find_unconfirmed_client_by_str(dname, strhashval);
-       status = nfserr_resource;
+       status = nfserr_jukebox;
        if (!conf) {
                /*
                 * RFC 3530 14.2.33 CASE 4:
@@ -2440,7 +2447,7 @@ renew:
        if (open->op_stateowner == NULL) {
                sop = alloc_init_open_stateowner(strhashval, clp, open);
                if (sop == NULL)
-                       return nfserr_resource;
+                       return nfserr_jukebox;
                open->op_stateowner = sop;
        }
        list_del_init(&sop->so_close_lru);
@@ -2576,7 +2583,7 @@ nfs4_new_open(struct svc_rqst *rqstp, struct nfs4_stateid **stpp,
 
        stp = nfs4_alloc_stateid();
        if (stp == NULL)
-               return nfserr_resource;
+               return nfserr_jukebox;
 
        status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
        if (status) {
@@ -2807,7 +2814,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
                status = nfserr_bad_stateid;
                if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
                        goto out;
-               status = nfserr_resource;
+               status = nfserr_jukebox;
                fp = alloc_init_file(ino);
                if (fp == NULL)
                        goto out;
@@ -3381,8 +3388,9 @@ static inline void nfs4_file_downgrade(struct nfs4_stateid *stp, unsigned int to
        int i;
 
        for (i = 1; i < 4; i++) {
-               if (test_bit(i, &stp->st_access_bmap) && !(i & to_access)) {
-                       nfs4_file_put_access(stp->st_file, i);
+               if (test_bit(i, &stp->st_access_bmap)
+                                       && ((i & to_access) != i)) {
+                       nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(i));
                        __clear_bit(i, &stp->st_access_bmap);
                }
        }
@@ -3413,6 +3421,8 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
        if (!access_valid(od->od_share_access, cstate->minorversion)
                        || !deny_valid(od->od_share_deny))
                return nfserr_inval;
+       /* We don't yet support WANT bits: */
+       od->od_share_access &= NFS4_SHARE_ACCESS_MASK;
 
        nfs4_lock_state();
        if ((status = nfs4_preprocess_seqid_op(cstate,
@@ -3840,7 +3850,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                /* XXX: Do we need to check for duplicate stateowners on
                 * the same file, or should they just be allowed (and
                 * create new stateids)? */
-               status = nfserr_resource;
+               status = nfserr_jukebox;
                lock_sop = alloc_init_lock_stateowner(strhashval,
                                open_sop->so_client, open_stp, lock);
                if (lock_sop == NULL)
@@ -3924,9 +3934,9 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        case (EDEADLK):
                status = nfserr_deadlock;
                break;
-       default:        
+       default:
                dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
-               status = nfserr_resource;
+               status = nfserrno(err);
                break;
        }
 out:
@@ -3946,16 +3956,14 @@ out:
  * vfs_test_lock.  (Arguably perhaps test_lock should be done with an
  * inode operation.)
  */
-static int nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
+static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
 {
        struct file *file;
-       int err;
-
-       err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
-       if (err)
-               return err;
-       err = vfs_test_lock(file, lock);
-       nfsd_close(file);
+       __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
+       if (!err) {
+               err = nfserrno(vfs_test_lock(file, lock));
+               nfsd_close(file);
+       }
        return err;
 }
 
@@ -3968,7 +3976,6 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 {
        struct inode *inode;
        struct file_lock file_lock;
-       int error;
        __be32 status;
 
        if (locks_in_grace())
@@ -4020,12 +4027,10 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 
        nfs4_transform_lock_offset(&file_lock);
 
-       status = nfs_ok;
-       error = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock);
-       if (error) {
-               status = nfserrno(error);
+       status = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock);
+       if (status)
                goto out;
-       }
+
        if (file_lock.fl_type != F_UNLCK) {
                status = nfserr_denied;
                nfs4_set_lock_denied(&file_lock, &lockt->lt_denied);
index 990181103214de4f45d305753aea2daa064f44c0..6c740974bfe0c0042235f88aa92434d193edb48d 100644 (file)
@@ -1548,6 +1548,18 @@ static void write_cinfo(__be32 **p, struct nfsd4_change_info *c)
                                                                \
        save = resp->p;
 
+static bool seqid_mutating_err(__be32 err)
+{
+       /* rfc 3530 section 8.1.5: */
+       return  err != nfserr_stale_clientid &&
+               err != nfserr_stale_stateid &&
+               err != nfserr_bad_stateid &&
+               err != nfserr_bad_seqid &&
+               err != nfserr_bad_xdr &&
+               err != nfserr_resource &&
+               err != nfserr_nofilehandle;
+}
+
 /*
  * Routine for encoding the result of a "seqid-mutating" NFSv4 operation.  This
  * is where sequence id's are incremented, and the replay cache is filled.
index 6bd2f3c21f2b7195b355be089084bc8222176a45..858c7baea2d2ffb290aad4e9691cb4bd54493f67 100644 (file)
@@ -447,12 +447,6 @@ struct nfs4_stateid {
 #define WR_STATE               0x00000020
 #define CLOSE_STATE             0x00000040
 
-#define seqid_mutating_err(err)                       \
-       (((err) != nfserr_stale_clientid) &&    \
-       ((err) != nfserr_bad_seqid) &&          \
-       ((err) != nfserr_stale_stateid) &&      \
-       ((err) != nfserr_bad_stateid))
-
 struct nfsd4_compound_state;
 
 extern __be32 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
index fd0acca5370a5da699fe62a5d94d0b6e81f67b78..acf88aea211a04beb05754143126d84f3a47a9b3 100644 (file)
@@ -2114,7 +2114,8 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
 
        /* Allow read access to binaries even when mode 111 */
        if (err == -EACCES && S_ISREG(inode->i_mode) &&
-           acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE))
+            (acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE) ||
+             acc == (NFSD_MAY_READ | NFSD_MAY_READ_IF_EXEC)))
                err = inode_permission(inode, MAY_EXEC);
 
        return err? nfserrno(err) : 0;
index e0bbac04d1dd01ec899e86666258c7c5405d2e8e..a22e40e278612ecdafa9f13e36c583c151436894 100644 (file)
@@ -25,6 +25,7 @@
 #define NFSD_MAY_BYPASS_GSS_ON_ROOT 256
 #define NFSD_MAY_NOT_BREAK_LEASE 512
 #define NFSD_MAY_BYPASS_GSS    1024
+#define NFSD_MAY_READ_IF_EXEC  2048
 
 #define NFSD_MAY_CREATE                (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
 #define NFSD_MAY_REMOVE                (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
index 41d6743d303c2a7923e5d09bc1df1fcf41514c29..3e654273cfc28e04bf6c3dd608314eeef7c5ab9b 100644 (file)
@@ -842,6 +842,19 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        case FS_IOC32_GETVERSION:
                cmd = FS_IOC_GETVERSION;
                break;
+       case NILFS_IOCTL_CHANGE_CPMODE:
+       case NILFS_IOCTL_DELETE_CHECKPOINT:
+       case NILFS_IOCTL_GET_CPINFO:
+       case NILFS_IOCTL_GET_CPSTAT:
+       case NILFS_IOCTL_GET_SUINFO:
+       case NILFS_IOCTL_GET_SUSTAT:
+       case NILFS_IOCTL_GET_VINFO:
+       case NILFS_IOCTL_GET_BDESCS:
+       case NILFS_IOCTL_CLEAN_SEGMENTS:
+       case NILFS_IOCTL_SYNC:
+       case NILFS_IOCTL_RESIZE:
+       case NILFS_IOCTL_SET_ALLOC_RANGE:
+               break;
        default:
                return -ENOIOCTLCMD;
        }
index d32714094375b72695bcacdfcf7cdd866be2ff05..35a89708b635333c5b690a3a9221ce8ab82e4d5c 100644 (file)
@@ -515,6 +515,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
                brelse(sbh[1]);
                sbh[1] = NULL;
                sbp[1] = NULL;
+               valid[1] = 0;
                swp = 0;
        }
        if (!valid[swp]) {
index 252ab1f6452b3344608839daa925f6c8382354b8..42ed195771f1a72322cf174798a0c2484c209cc0 100644 (file)
@@ -135,9 +135,6 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
 
        mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
 
-       /* 1 from caller and 1 for being on i_list/g_list */
-       BUG_ON(atomic_read(&mark->refcnt) < 2);
-
        spin_lock(&group->mark_lock);
 
        if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
@@ -181,6 +178,11 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
        if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
                iput(inode);
 
+       /*
+        * We don't necessarily have a ref on mark from caller so the above iput
+        * may have already destroyed it.  Don't touch from now on.
+        */
+
        /*
         * it's possible that this group tried to destroy itself, but this
         * this mark was simultaneously being freed by inode.  If that's the
index ed553c60de827e0ebad24e3501e0e00d21c82cfc..76c8165deed95cd422a00fee2d0f0efb29c9763a 100644 (file)
@@ -1134,7 +1134,7 @@ static int ocfs2_adjust_rightmost_branch(handle_t *handle,
        }
 
        el = path_leaf_el(path);
-       rec = &el->l_recs[le32_to_cpu(el->l_next_free_rec) - 1];
+       rec = &el->l_recs[le16_to_cpu(el->l_next_free_rec) - 1];
 
        ocfs2_adjust_rightmost_records(handle, et, path, rec);
 
index ebfd3825f12a367b3c2786507146913a8191e56f..15d29ccefd4f766467429d5fd6f3a48091965fee 100644 (file)
@@ -1036,14 +1036,14 @@ static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci,
 
        tmp_el = left_path->p_node[subtree_root].el;
        blkno = left_path->p_node[subtree_root+1].bh->b_blocknr;
-       for (i = 0; i < le32_to_cpu(tmp_el->l_next_free_rec); i++) {
+       for (i = 0; i < le16_to_cpu(tmp_el->l_next_free_rec); i++) {
                if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) {
                        *cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos);
                        break;
                }
        }
 
-       BUG_ON(i == le32_to_cpu(tmp_el->l_next_free_rec));
+       BUG_ON(i == le16_to_cpu(tmp_el->l_next_free_rec));
 
 out:
        ocfs2_free_path(left_path);
@@ -1468,7 +1468,7 @@ static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
 
        trace_ocfs2_divide_leaf_refcount_block(
                (unsigned long long)ref_leaf_bh->b_blocknr,
-               le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used));
+               le16_to_cpu(rl->rl_count), le16_to_cpu(rl->rl_used));
 
        /*
         * XXX: Improvement later.
@@ -2411,7 +2411,7 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
                                rb = (struct ocfs2_refcount_block *)
                                                        prev_bh->b_data;
 
-                               if (le64_to_cpu(rb->rf_records.rl_used) +
+                               if (le16_to_cpu(rb->rf_records.rl_used) +
                                    recs_add >
                                    le16_to_cpu(rb->rf_records.rl_count))
                                        ref_blocks++;
@@ -2476,7 +2476,7 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
        if (prev_bh) {
                rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
 
-               if (le64_to_cpu(rb->rf_records.rl_used) + recs_add >
+               if (le16_to_cpu(rb->rf_records.rl_used) + recs_add >
                    le16_to_cpu(rb->rf_records.rl_count))
                        ref_blocks++;
 
@@ -3629,7 +3629,7 @@ int ocfs2_refcounted_xattr_delete_need(struct inode *inode,
                         * one will split a refcount rec, so totally we need
                         * clusters * 2 new refcount rec.
                         */
-                       if (le64_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
+                       if (le16_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
                            le16_to_cpu(rb->rf_records.rl_count))
                                ref_blocks++;
 
index ba5d97e4a73e8a43e64fc3cfa55fb9b97d4ec6a5..f169da4624fd07c5964c3d24d3b65e8a0dcd5ed7 100644 (file)
@@ -600,7 +600,7 @@ static void ocfs2_bg_alloc_cleanup(handle_t *handle,
                ret = ocfs2_free_clusters(handle, cluster_ac->ac_inode,
                                          cluster_ac->ac_bh,
                                          le64_to_cpu(rec->e_blkno),
-                                         le32_to_cpu(rec->e_leaf_clusters));
+                                         le16_to_cpu(rec->e_leaf_clusters));
                if (ret)
                        mlog_errno(ret);
                /* Try all the clusters to free */
@@ -1628,7 +1628,7 @@ static int ocfs2_bg_discontig_fix_by_rec(struct ocfs2_suballoc_result *res,
 {
        unsigned int bpc = le16_to_cpu(cl->cl_bpc);
        unsigned int bitoff = le32_to_cpu(rec->e_cpos) * bpc;
-       unsigned int bitcount = le32_to_cpu(rec->e_leaf_clusters) * bpc;
+       unsigned int bitcount = le16_to_cpu(rec->e_leaf_clusters) * bpc;
 
        if (res->sr_bit_offset < bitoff)
                return 0;
index dc290f032250dfc19b343ad8b45fbb8aaeeafb73..1648912c1a87c6988db6a4cfcc89726212276656 100644 (file)
@@ -550,17 +550,11 @@ static bool disk_unlock_native_capacity(struct gendisk *disk)
        }
 }
 
-int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
+static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
 {
-       struct parsed_partitions *state = NULL;
        struct disk_part_iter piter;
        struct hd_struct *part;
-       int p, highest, res;
-rescan:
-       if (state && !IS_ERR(state)) {
-               kfree(state);
-               state = NULL;
-       }
+       int res;
 
        if (bdev->bd_part_count)
        {
@@ -582,6 +576,24 @@ rescan:
                delete_partition(disk, part->partno);
        disk_part_iter_exit(&piter);
 
+       return 0;
+}
+
+int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
+{
+       struct parsed_partitions *state = NULL;
+       struct hd_struct *part;
+       int p, highest, res;
+rescan:
+       if (state && !IS_ERR(state)) {
+               kfree(state);
+               state = NULL;
+       }
+
+       res = drop_partitions(disk, bdev);
+       if (res)
+               return res;
+
        if (disk->fops->revalidate_disk)
                disk->fops->revalidate_disk(disk);
        check_disk_size_change(disk, bdev);
@@ -693,6 +705,26 @@ rescan:
        return 0;
 }
 
+int invalidate_partitions(struct gendisk *disk, struct block_device *bdev)
+{
+       int res;
+
+       if (!bdev->bd_invalidated)
+               return 0;
+
+       res = drop_partitions(disk, bdev);
+       if (res)
+               return res;
+
+       set_capacity(disk, 0);
+       check_disk_size_change(disk, bdev);
+       bdev->bd_invalidated = 0;
+       /* tell userspace that the media / partition table may have changed */
+       kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
+
+       return 0;
+}
+
 unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
 {
        struct address_space *mapping = bdev->bd_inode->i_mapping;
index da42f7db50de42640a7fa56df21e21bbb48cf588..0499a96287ad29713ab4635982c676ea0974233a 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -345,6 +345,16 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = {
        .get = generic_pipe_buf_get,
 };
 
+static const struct pipe_buf_operations packet_pipe_buf_ops = {
+       .can_merge = 0,
+       .map = generic_pipe_buf_map,
+       .unmap = generic_pipe_buf_unmap,
+       .confirm = generic_pipe_buf_confirm,
+       .release = anon_pipe_buf_release,
+       .steal = generic_pipe_buf_steal,
+       .get = generic_pipe_buf_get,
+};
+
 static ssize_t
 pipe_read(struct kiocb *iocb, const struct iovec *_iov,
           unsigned long nr_segs, loff_t pos)
@@ -406,6 +416,13 @@ redo:
                        ret += chars;
                        buf->offset += chars;
                        buf->len -= chars;
+
+                       /* Was it a packet buffer? Clean up and exit */
+                       if (buf->flags & PIPE_BUF_FLAG_PACKET) {
+                               total_len = chars;
+                               buf->len = 0;
+                       }
+
                        if (!buf->len) {
                                buf->ops = NULL;
                                ops->release(pipe, buf);
@@ -458,6 +475,11 @@ redo:
        return ret;
 }
 
+static inline int is_packetized(struct file *file)
+{
+       return (file->f_flags & O_DIRECT) != 0;
+}
+
 static ssize_t
 pipe_write(struct kiocb *iocb, const struct iovec *_iov,
            unsigned long nr_segs, loff_t ppos)
@@ -592,6 +614,11 @@ redo2:
                        buf->ops = &anon_pipe_buf_ops;
                        buf->offset = 0;
                        buf->len = chars;
+                       buf->flags = 0;
+                       if (is_packetized(filp)) {
+                               buf->ops = &packet_pipe_buf_ops;
+                               buf->flags = PIPE_BUF_FLAG_PACKET;
+                       }
                        pipe->nrbufs = ++bufs;
                        pipe->tmp_page = NULL;
 
@@ -1012,7 +1039,7 @@ struct file *create_write_pipe(int flags)
                goto err_dentry;
        f->f_mapping = inode->i_mapping;
 
-       f->f_flags = O_WRONLY | (flags & O_NONBLOCK);
+       f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
        f->f_version = 0;
 
        return f;
@@ -1056,7 +1083,7 @@ int do_pipe_flags(int *fd, int flags)
        int error;
        int fdw, fdr;
 
-       if (flags & ~(O_CLOEXEC | O_NONBLOCK))
+       if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
                return -EINVAL;
 
        fw = create_write_pipe(flags);
index efb304854b742e3bfeff3a6f5104b6130311089d..6a938aa9e29429179ccfef84f76ff881a2a80314 100644 (file)
@@ -200,65 +200,7 @@ static int proc_root_link(struct inode *inode, struct path *path)
        return result;
 }
 
-static struct mm_struct *__check_mem_permission(struct task_struct *task)
-{
-       struct mm_struct *mm;
-
-       mm = get_task_mm(task);
-       if (!mm)
-               return ERR_PTR(-EINVAL);
-
-       /*
-        * A task can always look at itself, in case it chooses
-        * to use system calls instead of load instructions.
-        */
-       if (task == current)
-               return mm;
-
-       /*
-        * If current is actively ptrace'ing, and would also be
-        * permitted to freshly attach with ptrace now, permit it.
-        */
-       if (task_is_stopped_or_traced(task)) {
-               int match;
-               rcu_read_lock();
-               match = (tracehook_tracer_task(task) == current);
-               rcu_read_unlock();
-               if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH))
-                       return mm;
-       }
-
-       /*
-        * No one else is allowed.
-        */
-       mmput(mm);
-       return ERR_PTR(-EPERM);
-}
-
-/*
- * If current may access user memory in @task return a reference to the
- * corresponding mm, otherwise ERR_PTR.
- */
-static struct mm_struct *check_mem_permission(struct task_struct *task)
-{
-       struct mm_struct *mm;
-       int err;
-
-       /*
-        * Avoid racing if task exec's as we might get a new mm but validate
-        * against old credentials.
-        */
-       err = mutex_lock_killable(&task->signal->cred_guard_mutex);
-       if (err)
-               return ERR_PTR(err);
-
-       mm = __check_mem_permission(task);
-       mutex_unlock(&task->signal->cred_guard_mutex);
-
-       return mm;
-}
-
-struct mm_struct *mm_for_maps(struct task_struct *task)
+static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
 {
        struct mm_struct *mm;
        int err;
@@ -269,7 +211,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
 
        mm = get_task_mm(task);
        if (mm && mm != current->mm &&
-                       !ptrace_may_access(task, PTRACE_MODE_READ) &&
+                       !ptrace_may_access(task, mode) &&
                        !capable(CAP_SYS_RESOURCE)) {
                mmput(mm);
                mm = ERR_PTR(-EACCES);
@@ -279,6 +221,11 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
        return mm;
 }
 
+struct mm_struct *mm_for_maps(struct task_struct *task)
+{
+       return mm_access(task, PTRACE_MODE_READ);
+}
+
 static int proc_pid_cmdline(struct task_struct *task, char * buffer)
 {
        int res = 0;
@@ -822,134 +769,102 @@ static const struct file_operations proc_single_file_operations = {
 };
 
 static int mem_open(struct inode* inode, struct file* file)
-{
-       file->private_data = (void*)((long)current->self_exec_id);
-       /* OK to pass negative loff_t, we can catch out-of-range */
-       file->f_mode |= FMODE_UNSIGNED_OFFSET;
-       return 0;
-}
-
-static ssize_t mem_read(struct file * file, char __user * buf,
-                       size_t count, loff_t *ppos)
 {
        struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
-       char *page;
-       unsigned long src = *ppos;
-       int ret = -ESRCH;
        struct mm_struct *mm;
 
        if (!task)
-               goto out_no_task;
+               return -ESRCH;
 
-       ret = -ENOMEM;
-       page = (char *)__get_free_page(GFP_TEMPORARY);
-       if (!page)
-               goto out;
+       mm = mm_access(task, PTRACE_MODE_ATTACH);
+       put_task_struct(task);
 
-       mm = check_mem_permission(task);
-       ret = PTR_ERR(mm);
        if (IS_ERR(mm))
-               goto out_free;
-
-       ret = -EIO;
-       if (file->private_data != (void*)((long)current->self_exec_id))
-               goto out_put;
-
-       ret = 0;
-       while (count > 0) {
-               int this_len, retval;
+               return PTR_ERR(mm);
 
-               this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
-               retval = access_remote_vm(mm, src, page, this_len, 0);
-               if (!retval) {
-                       if (!ret)
-                               ret = -EIO;
-                       break;
-               }
-
-               if (copy_to_user(buf, page, retval)) {
-                       ret = -EFAULT;
-                       break;
-               }
-               ret += retval;
-               src += retval;
-               buf += retval;
-               count -= retval;
+       if (mm) {
+               /* ensure this mm_struct can't be freed */
+               atomic_inc(&mm->mm_count);
+               /* but do not pin its memory */
+               mmput(mm);
        }
-       *ppos = src;
 
-out_put:
-       mmput(mm);
-out_free:
-       free_page((unsigned long) page);
-out:
-       put_task_struct(task);
-out_no_task:
-       return ret;
+       /* OK to pass negative loff_t, we can catch out-of-range */
+       file->f_mode |= FMODE_UNSIGNED_OFFSET;
+       file->private_data = mm;
+
+       return 0;
 }
 
-static ssize_t mem_write(struct file * file, const char __user *buf,
-                        size_t count, loff_t *ppos)
+static ssize_t mem_rw(struct file *file, char __user *buf,
+                       size_t count, loff_t *ppos, int write)
 {
-       int copied;
+       struct mm_struct *mm = file->private_data;
+       unsigned long addr = *ppos;
+       ssize_t copied;
        char *page;
-       struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
-       unsigned long dst = *ppos;
-       struct mm_struct *mm;
 
-       copied = -ESRCH;
-       if (!task)
-               goto out_no_task;
+       if (!mm)
+               return 0;
 
-       copied = -ENOMEM;
        page = (char *)__get_free_page(GFP_TEMPORARY);
        if (!page)
-               goto out_task;
-
-       mm = check_mem_permission(task);
-       copied = PTR_ERR(mm);
-       if (IS_ERR(mm))
-               goto out_free;
-
-       copied = -EIO;
-       if (file->private_data != (void *)((long)current->self_exec_id))
-               goto out_mm;
+               return -ENOMEM;
 
        copied = 0;
+       if (!atomic_inc_not_zero(&mm->mm_users))
+               goto free;
+
        while (count > 0) {
-               int this_len, retval;
+               int this_len = min_t(int, count, PAGE_SIZE);
 
-               this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
-               if (copy_from_user(page, buf, this_len)) {
+               if (write && copy_from_user(page, buf, this_len)) {
                        copied = -EFAULT;
                        break;
                }
-               retval = access_remote_vm(mm, dst, page, this_len, 1);
-               if (!retval) {
+
+               this_len = access_remote_vm(mm, addr, page, this_len, write);
+               if (!this_len) {
                        if (!copied)
                                copied = -EIO;
                        break;
                }
-               copied += retval;
-               buf += retval;
-               dst += retval;
-               count -= retval;                        
+
+               if (!write && copy_to_user(buf, page, this_len)) {
+                       copied = -EFAULT;
+                       break;
+               }
+
+               buf += this_len;
+               addr += this_len;
+               copied += this_len;
+               count -= this_len;
        }
-       *ppos = dst;
+       *ppos = addr;
 
-out_mm:
        mmput(mm);
-out_free:
+free:
        free_page((unsigned long) page);
-out_task:
-       put_task_struct(task);
-out_no_task:
        return copied;
 }
 
+static ssize_t mem_read(struct file *file, char __user *buf,
+                       size_t count, loff_t *ppos)
+{
+       return mem_rw(file, buf, count, ppos, 0);
+}
+
+#define mem_write NULL
+
+#ifndef mem_write
+/* This is a security hazard */
+static ssize_t mem_write(struct file *file, const char __user *buf,
+                        size_t count, loff_t *ppos)
+{
+       return mem_rw(file, (char __user*)buf, count, ppos, 1);
+}
+#endif
+
 loff_t mem_lseek(struct file *file, loff_t offset, int orig)
 {
        switch (orig) {
@@ -966,11 +881,20 @@ loff_t mem_lseek(struct file *file, loff_t offset, int orig)
        return file->f_pos;
 }
 
+static int mem_release(struct inode *inode, struct file *file)
+{
+       struct mm_struct *mm = file->private_data;
+       if (mm)
+               mmdrop(mm);
+       return 0;
+}
+
 static const struct file_operations proc_mem_operations = {
        .llseek         = mem_lseek,
        .read           = mem_read,
        .write          = mem_write,
        .open           = mem_open,
+       .release        = mem_release,
 };
 
 static ssize_t environ_read(struct file *file, char __user *buf,
@@ -1960,6 +1884,14 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
                spin_lock(&files->file_lock);
                file = fcheck_files(files, fd);
                if (file) {
+                       unsigned int f_flags;
+                       struct fdtable *fdt;
+
+                       fdt = files_fdtable(files);
+                       f_flags = file->f_flags & ~O_CLOEXEC;
+                       if (FD_ISSET(fd, fdt->close_on_exec))
+                               f_flags |= O_CLOEXEC;
+
                        if (path) {
                                *path = file->f_path;
                                path_get(&file->f_path);
@@ -1969,7 +1901,7 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
                                         "pos:\t%lli\n"
                                         "flags:\t0%o\n",
                                         (long long) file->f_pos,
-                                        file->f_flags);
+                                        f_flags);
                        spin_unlock(&files->file_lock);
                        put_files_struct(files);
                        return 0;
index ed257d1415687f50dc84abad0089be090a2d3f8c..a96282781f94e0e3d24376ed5583df72eca21caf 100644 (file)
@@ -131,12 +131,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                K(i.freeswap),
                K(global_page_state(NR_FILE_DIRTY)),
                K(global_page_state(NR_WRITEBACK)),
-               K(global_page_state(NR_ANON_PAGES)
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+               K(global_page_state(NR_ANON_PAGES)
                  + global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
-                 HPAGE_PMD_NR
+                 HPAGE_PMD_NR),
+#else
+               K(global_page_state(NR_ANON_PAGES)),
 #endif
-                 ),
                K(global_page_state(NR_FILE_MAPPED)),
                K(global_page_state(NR_SHMEM)),
                K(global_page_state(NR_SLAB_RECLAIMABLE) +
index be177f702acbc9bc352c0e13f5b37aeedbf6dd25..d6c078ea148985512160eba57b873c509552359f 100644 (file)
@@ -54,7 +54,7 @@ static struct dentry *proc_ns_instantiate(struct inode *dir,
        ei->ns_ops    = ns_ops;
        ei->ns        = ns;
 
-       dentry->d_op = &pid_dentry_operations;
+       d_set_d_op(dentry, &pid_dentry_operations);
        d_add(dentry, inode);
        /* Close the race of the process dying before we return the dentry */
        if (pid_revalidate(dentry, NULL))
index 5afaa58a863012d83a69763b2e65c9db67fe2ada..55a1f494711ccdea764ad2661ce33ee9b34c636e 100644 (file)
@@ -407,6 +407,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
        } else {
                spin_unlock(&walk->mm->page_table_lock);
        }
+
+       if (pmd_trans_unstable(pmd))
+               return 0;
        /*
         * The mmap_sem held all the way back in m_start() is what
         * keeps khugepaged out of here and from collapsing things
@@ -505,6 +508,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
        struct page *page;
 
        split_huge_page_pmd(walk->mm, pmd);
+       if (pmd_trans_unstable(pmd))
+               return 0;
 
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -516,6 +521,9 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
                if (!page)
                        continue;
 
+               if (PageReserved(page))
+                       continue;
+
                /* Clear accessed and referenced bits. */
                ptep_test_and_clear_young(vma, addr, pte);
                ClearPageReferenced(page);
@@ -665,6 +673,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
        int err = 0;
 
        split_huge_page_pmd(walk->mm, pmd);
+       if (pmd_trans_unstable(pmd))
+               return 0;
 
        /* find the first VMA at or above 'addr' */
        vma = find_vma(walk->mm, addr);
@@ -956,6 +966,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
                spin_unlock(&walk->mm->page_table_lock);
        }
 
+       if (pmd_trans_unstable(pmd))
+               return 0;
        orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
        do {
                struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
@@ -1039,6 +1051,9 @@ static int show_numa_map(struct seq_file *m, void *v)
                seq_printf(m, " stack");
        }
 
+       if (is_vm_hugetlb_page(vma))
+               seq_printf(m, " huge");
+
        walk_page_range(vma->vm_start, vma->vm_end, &walk);
 
        if (!md->pages)
index 766b1d456050ff98d2603f3af027b7d61b98174d..29166ecd03aea1426486a4fd92122a75250511b6 100644 (file)
@@ -11,15 +11,20 @@ static int uptime_proc_show(struct seq_file *m, void *v)
 {
        struct timespec uptime;
        struct timespec idle;
+       cputime64_t idletime;
+       u64 nsec;
+       u32 rem;
        int i;
-       cputime_t idletime = cputime_zero;
 
+       idletime = 0;
        for_each_possible_cpu(i)
                idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle);
 
        do_posix_clock_monotonic_gettime(&uptime);
        monotonic_to_bootbased(&uptime);
-       cputime_to_timespec(idletime, &idle);
+       nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
+       idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
+       idle.tv_nsec = rem;
        seq_printf(m, "%lu.%02lu %lu.%02lu\n",
                        (unsigned long) uptime.tv_sec,
                        (uptime.tv_nsec / (NSEC_PER_SEC / 100)),
index b34bdb25490c8956b4733c29771bea3109ec2fcf..10b6be3ca280963e90e5de2b11f5556e576cea4a 100644 (file)
@@ -355,7 +355,7 @@ SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
         * resolution (think about autofs) and thus deadlocks could arise.
         */
        if (cmds == Q_QUOTAON) {
-               ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW, &path);
+               ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
                if (ret)
                        pathp = ERR_PTR(ret);
                else
index aa91089162cb568ce4e5426c4b9db2e942240ce7..f19dfbf60002efa4c84ecabd09e3c9cb10ce23a7 100644 (file)
@@ -453,16 +453,20 @@ int remove_save_link(struct inode *inode, int truncate)
 static void reiserfs_kill_sb(struct super_block *s)
 {
        if (REISERFS_SB(s)) {
-               if (REISERFS_SB(s)->xattr_root) {
-                       d_invalidate(REISERFS_SB(s)->xattr_root);
-                       dput(REISERFS_SB(s)->xattr_root);
-                       REISERFS_SB(s)->xattr_root = NULL;
-               }
-               if (REISERFS_SB(s)->priv_root) {
-                       d_invalidate(REISERFS_SB(s)->priv_root);
-                       dput(REISERFS_SB(s)->priv_root);
-                       REISERFS_SB(s)->priv_root = NULL;
-               }
+               /*
+                * Force any pending inode evictions to occur now. Any
+                * inodes to be removed that have extended attributes
+                * associated with them need to clean them up before
+                * we can release the extended attribute root dentries.
+                * shrink_dcache_for_umount will BUG if we don't release
+                * those before it's called so ->put_super is too late.
+                */
+               shrink_dcache_sb(s);
+
+               dput(REISERFS_SB(s)->xattr_root);
+               REISERFS_SB(s)->xattr_root = NULL;
+               dput(REISERFS_SB(s)->priv_root);
+               REISERFS_SB(s)->priv_root = NULL;
        }
 
        kill_block_super(s);
@@ -1164,7 +1168,8 @@ static void handle_quota_files(struct super_block *s, char **qf_names,
                        kfree(REISERFS_SB(s)->s_qf_names[i]);
                REISERFS_SB(s)->s_qf_names[i] = qf_names[i];
        }
-       REISERFS_SB(s)->s_jquota_fmt = *qfmt;
+       if (*qfmt)
+               REISERFS_SB(s)->s_jquota_fmt = *qfmt;
 }
 #endif
 
index 05d6b0e78c959a341137c97fbb2ea2fa89b25197..dba43c3ea3afb6605972d3a0e3eca3ac5248876e 100644 (file)
@@ -449,8 +449,6 @@ EXPORT_SYMBOL(seq_path);
 
 /*
  * Same as seq_path, but relative to supplied root.
- *
- * root may be changed, see __d_path().
  */
 int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
                  char *esc)
@@ -463,6 +461,8 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
                char *p;
 
                p = __d_path(path, root, buf, size);
+               if (!p)
+                       return SEQ_SKIP;
                res = PTR_ERR(p);
                if (!IS_ERR(p)) {
                        char *end = mangle_path(buf, p, esc);
@@ -474,7 +474,7 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
        }
        seq_commit(m, res);
 
-       return res < 0 ? res : 0;
+       return res < 0 && res != -ENAMETOOLONG ? res : 0;
 }
 
 /*
index 492465b451ddd34f6f60214fb48224687649faa9..7ae2a574cb25a64902128f53832b317202dbee8f 100644 (file)
 #include <linux/signalfd.h>
 #include <linux/syscalls.h>
 
+void signalfd_cleanup(struct sighand_struct *sighand)
+{
+       wait_queue_head_t *wqh = &sighand->signalfd_wqh;
+       /*
+        * The lockless check can race with remove_wait_queue() in progress,
+        * but in this case its caller should run under rcu_read_lock() and
+        * sighand_cachep is SLAB_DESTROY_BY_RCU, we can safely return.
+        */
+       if (likely(!waitqueue_active(wqh)))
+               return;
+
+       /* wait_queue_t->func(POLLFREE) should do remove_wait_queue() */
+       wake_up_poll(wqh, POLLHUP | POLLFREE);
+}
+
 struct signalfd_ctx {
        sigset_t sigmask;
 };
index aa866d309695497c1c8925cb30c8af6884f20aaf..9d890085f9ba116b991b43780ba1ba28f9b61337 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/uio.h>
 #include <linux/security.h>
 #include <linux/gfp.h>
+#include <linux/socket.h>
 
 /*
  * Attempt to steal a page from a pipe buffer. This should perhaps go into
@@ -691,7 +692,9 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
        if (!likely(file->f_op && file->f_op->sendpage))
                return -EINVAL;
 
-       more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
+       more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
+       if (sd->len < sd->total_len)
+               more |= MSG_SENDPAGE_NOTLAST;
        return file->f_op->sendpage(file, buf->page, buf->offset,
                                    sd->len, &pos, more);
 }
index 961039121cb8cbde185bf1b8399c6e4ccc71cdf5..02a606141b808a302863607f5bdb74bf2cface37 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -296,15 +296,16 @@ SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
 {
        struct path path;
        int error;
+       int empty = 0;
 
        if (bufsiz <= 0)
                return -EINVAL;
 
-       error = user_path_at(dfd, pathname, LOOKUP_EMPTY, &path);
+       error = user_path_at_empty(dfd, pathname, LOOKUP_EMPTY, &path, &empty);
        if (!error) {
                struct inode *inode = path.dentry->d_inode;
 
-               error = -EINVAL;
+               error = empty ? -ENOENT : -EINVAL;
                if (inode->i_op->readlink) {
                        error = security_inode_readlink(path.dentry);
                        if (!error) {
index 8244924dec55fd863bc7e0649e1686f78b336c8d..9cf04a1189659f07944545097ef806249f40803d 100644 (file)
@@ -76,7 +76,7 @@ EXPORT_SYMBOL(vfs_statfs);
 int user_statfs(const char __user *pathname, struct kstatfs *st)
 {
        struct path path;
-       int error = user_path(pathname, &path);
+       int error = user_path_at(AT_FDCWD, pathname, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
        if (!error) {
                error = vfs_statfs(&path, st);
                path_put(&path);
index ab3d672db0deae0a84a01bc1938cd967cc27e79f..caf4dfa28eed4a292d41a6e888f77685362b4e2f 100644 (file)
@@ -1009,6 +1009,8 @@ int freeze_super(struct super_block *sb)
                        printk(KERN_ERR
                                "VFS:Filesystem freeze failed\n");
                        sb->s_frozen = SB_UNFROZEN;
+                       smp_wmb();
+                       wake_up(&sb->s_wait_unfrozen);
                        deactivate_locked_super(sb);
                        return ret;
                }
index 0a12eb89cd32b544aa720804d2836c4c6d2c8a3c..a494413e4865a1726e81a6caa4e8087f0c7be048 100644 (file)
@@ -136,12 +136,13 @@ static int sysfs_sd_setsecdata(struct sysfs_dirent *sd, void **secdata, u32 *sec
        void *old_secdata;
        size_t old_secdata_len;
 
-       iattrs = sd->s_iattr;
-       if (!iattrs)
-               iattrs = sysfs_init_inode_attrs(sd);
-       if (!iattrs)
-               return -ENOMEM;
+       if (!sd->s_iattr) {
+               sd->s_iattr = sysfs_init_inode_attrs(sd);
+               if (!sd->s_iattr)
+                       return -ENOMEM;
+       }
 
+       iattrs = sd->s_iattr;
        old_secdata = iattrs->ia_secdata;
        old_secdata_len = iattrs->ia_secdata_len;
 
index a811ac4a26bb68de36c14756446d1c0de9bcbc7b..fd75b635daebf83d0c7445ba810c7ef937c65bbc 100644 (file)
@@ -121,20 +121,21 @@ const char *dbg_key_str1(const struct ubifs_info *c,
                         const union ubifs_key *key);
 
 /*
- * DBGKEY macros require @dbg_lock to be held, which it is in the dbg message
- * macros.
+ * TODO: these macros are now broken because there is no locking around them
+ * and we use a global buffer for the key string. This means that in case of
+ * concurrent execution we will end up with incorrect and messy key strings.
  */
 #define DBGKEY(key) dbg_key_str0(c, (key))
 #define DBGKEY1(key) dbg_key_str1(c, (key))
 
-#define ubifs_dbg_msg(type, fmt, ...) do {                        \
-       spin_lock(&dbg_lock);                                     \
-       pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__); \
-       spin_unlock(&dbg_lock);                                   \
-} while (0)
+#define ubifs_dbg_msg(type, fmt, ...) \
+       pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__)
 
 /* Just a debugging messages not related to any specific UBIFS subsystem */
-#define dbg_msg(fmt, ...)   ubifs_dbg_msg("msg", fmt, ##__VA_ARGS__)
+#define dbg_msg(fmt, ...)                                                     \
+       printk(KERN_DEBUG "UBIFS DBG (pid %d): %s: " fmt "\n", current->pid,  \
+              __func__, ##__VA_ARGS__)
+
 /* General messages */
 #define dbg_gen(fmt, ...)   ubifs_dbg_msg("gen", fmt, ##__VA_ARGS__)
 /* Additional journal messages */
index 2a346bb1d9f5f082338555fb50e754e19eb4aa67..3438b0000413af78285419a6a0cb6236788e0220 100644 (file)
@@ -125,7 +125,6 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                        err = udf_expand_file_adinicb(inode);
                        if (err) {
                                udf_debug("udf_expand_adinicb: err=%d\n", err);
-                               up_write(&iinfo->i_data_sem);
                                return err;
                        }
                } else {
@@ -133,9 +132,10 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                                iinfo->i_lenAlloc = pos + count;
                        else
                                iinfo->i_lenAlloc = inode->i_size;
+                       up_write(&iinfo->i_data_sem);
                }
-       }
-       up_write(&iinfo->i_data_sem);
+       } else
+               up_write(&iinfo->i_data_sem);
 
        retval = generic_file_aio_write(iocb, iov, nr_segs, ppos);
        if (retval > 0)
@@ -201,12 +201,10 @@ out:
 static int udf_release_file(struct inode *inode, struct file *filp)
 {
        if (filp->f_mode & FMODE_WRITE) {
-               mutex_lock(&inode->i_mutex);
                down_write(&UDF_I(inode)->i_data_sem);
                udf_discard_prealloc(inode);
                udf_truncate_tail_extent(inode);
                up_write(&UDF_I(inode)->i_data_sem);
-               mutex_unlock(&inode->i_mutex);
        }
        return 0;
 }
index 1d1358ed80c13e5da17849c7773bc8a6d1861e7c..262050f2eb6c4ec5ff33f8b3c0d9038aac2fbbde 100644 (file)
@@ -145,6 +145,12 @@ const struct address_space_operations udf_aops = {
        .bmap           = udf_bmap,
 };
 
+/*
+ * Expand file stored in ICB to a normal one-block-file
+ *
+ * This function requires i_data_sem for writing and releases it.
+ * This function requires i_mutex held
+ */
 int udf_expand_file_adinicb(struct inode *inode)
 {
        struct page *page;
@@ -163,9 +169,15 @@ int udf_expand_file_adinicb(struct inode *inode)
                        iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
                /* from now on we have normal address_space methods */
                inode->i_data.a_ops = &udf_aops;
+               up_write(&iinfo->i_data_sem);
                mark_inode_dirty(inode);
                return 0;
        }
+       /*
+        * Release i_data_sem so that we can lock a page - page lock ranks
+        * above i_data_sem. i_mutex still protects us against file changes.
+        */
+       up_write(&iinfo->i_data_sem);
 
        page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
        if (!page)
@@ -181,6 +193,7 @@ int udf_expand_file_adinicb(struct inode *inode)
                SetPageUptodate(page);
                kunmap(page);
        }
+       down_write(&iinfo->i_data_sem);
        memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00,
               iinfo->i_lenAlloc);
        iinfo->i_lenAlloc = 0;
@@ -190,17 +203,20 @@ int udf_expand_file_adinicb(struct inode *inode)
                iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
        /* from now on we have normal address_space methods */
        inode->i_data.a_ops = &udf_aops;
+       up_write(&iinfo->i_data_sem);
        err = inode->i_data.a_ops->writepage(page, &udf_wbc);
        if (err) {
                /* Restore everything back so that we don't lose data... */
                lock_page(page);
                kaddr = kmap(page);
+               down_write(&iinfo->i_data_sem);
                memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr,
                       inode->i_size);
                kunmap(page);
                unlock_page(page);
                iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
                inode->i_data.a_ops = &udf_adinicb_aops;
+               up_write(&iinfo->i_data_sem);
        }
        page_cache_release(page);
        mark_inode_dirty(inode);
@@ -1105,10 +1121,9 @@ int udf_setsize(struct inode *inode, loff_t newsize)
                        if (bsize <
                            (udf_file_entry_alloc_offset(inode) + newsize)) {
                                err = udf_expand_file_adinicb(inode);
-                               if (err) {
-                                       up_write(&iinfo->i_data_sem);
+                               if (err)
                                        return err;
-                               }
+                               down_write(&iinfo->i_data_sem);
                        } else
                                iinfo->i_lenAlloc = newsize;
                }
index 7b27b063ff6d673423ecd0902d0076f1c0ea3ccf..7f0e18aa25d4484fd42c78f4edef001056e1c74f 100644 (file)
@@ -1830,6 +1830,12 @@ static void udf_close_lvid(struct super_block *sb)
                                le16_to_cpu(lvid->descTag.descCRCLength)));
 
        lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
+       /*
+        * We set buffer uptodate unconditionally here to avoid spurious
+        * warnings from mark_buffer_dirty() when previous EIO has marked
+        * the buffer as !uptodate
+        */
+       set_buffer_uptodate(bh);
        mark_buffer_dirty(bh);
        sbi->s_lvid_dirty = 0;
        mutex_unlock(&sbi->s_alloc_mutex);
index 39f4f809bb6807fa6f19b3b3e9f94b0189db60d0..f86e03487866b767b4b74687eec5b843f6699bee 100644 (file)
@@ -39,9 +39,11 @@ xfs_acl_from_disk(struct xfs_acl *aclp)
        struct posix_acl_entry *acl_e;
        struct posix_acl *acl;
        struct xfs_acl_entry *ace;
-       int count, i;
+       unsigned int count, i;
 
        count = be32_to_cpu(aclp->acl_cnt);
+       if (count > XFS_ACL_MAX_ENTRIES)
+               return ERR_PTR(-EFSCORRUPTED);
 
        acl = posix_acl_alloc(count, GFP_KERNEL);
        if (!acl)
index 50a7d5fb3b73dca40017a9b55252444f81c84a7b..36d6ee44386702f6aaeeafa05e5f44daa9907fa7 100644 (file)
@@ -346,7 +346,6 @@ extern struct list_head *xfs_get_buftarg_list(void);
 #define xfs_getsize_buftarg(buftarg)   block_size((buftarg)->bt_bdev)
 #define xfs_readonly_buftarg(buftarg)  bdev_read_only((buftarg)->bt_bdev)
 
-#define xfs_binval(buftarg)            xfs_flush_buftarg(buftarg, 1)
 #define XFS_bflush(buftarg)            xfs_flush_buftarg(buftarg, 1)
 
 #endif /* __XFS_BUF_H__ */
index 244e797dae327a7da95992e23697489397c4bbc1..572494faf26212b9cdc76745ca4ea8314853935a 100644 (file)
@@ -68,7 +68,7 @@ xfs_trim_extents(
         * Look up the longest btree in the AGF and start with it.
         */
        error = xfs_alloc_lookup_le(cur, 0,
-                                   XFS_BUF_TO_AGF(agbp)->agf_longest, &i);
+                           be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest), &i);
        if (error)
                goto out_del_cursor;
 
@@ -84,7 +84,7 @@ xfs_trim_extents(
                if (error)
                        goto out_del_cursor;
                XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor);
-               ASSERT(flen <= XFS_BUF_TO_AGF(agbp)->agf_longest);
+               ASSERT(flen <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest));
 
                /*
                 * Too small?  Give up.
index f4f878fc008316e816c24de3cdb1b86908ce9b52..fed3f3c878c6fda9749f277f04d76b8cc12cd0de 100644 (file)
@@ -98,22 +98,22 @@ xfs_fs_encode_fh(
        switch (fileid_type) {
        case FILEID_INO32_GEN_PARENT:
                spin_lock(&dentry->d_lock);
-               fid->i32.parent_ino = dentry->d_parent->d_inode->i_ino;
+               fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
                fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
                spin_unlock(&dentry->d_lock);
                /*FALLTHRU*/
        case FILEID_INO32_GEN:
-               fid->i32.ino = inode->i_ino;
+               fid->i32.ino = XFS_I(inode)->i_ino;
                fid->i32.gen = inode->i_generation;
                break;
        case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
                spin_lock(&dentry->d_lock);
-               fid64->parent_ino = dentry->d_parent->d_inode->i_ino;
+               fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
                fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
                spin_unlock(&dentry->d_lock);
                /*FALLTHRU*/
        case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
-               fid64->ino = inode->i_ino;
+               fid64->ino = XFS_I(inode)->i_ino;
                fid64->gen = inode->i_generation;
                break;
        }
index 7f782af286bfa0edd73a125cdcb892339025913a..b679198dcc01c7e279fe426b0f2c1f153e03c14d 100644 (file)
@@ -309,7 +309,19 @@ xfs_file_aio_read(
        if (XFS_FORCED_SHUTDOWN(mp))
                return -EIO;
 
-       if (unlikely(ioflags & IO_ISDIRECT)) {
+       /*
+        * Locking is a bit tricky here. If we take an exclusive lock
+        * for direct IO, we effectively serialise all new concurrent
+        * read IO to this file and block it behind IO that is currently in
+        * progress because IO in progress holds the IO lock shared. We only
+        * need to hold the lock exclusive to blow away the page cache, so
+        * only take lock exclusively if the page cache needs invalidation.
+        * This allows the normal direct IO case of no page cache pages to
+        * proceeed concurrently without serialisation.
+        */
+       xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
+       if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) {
+               xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
                xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
 
                if (inode->i_mapping->nrpages) {
@@ -322,8 +334,7 @@ xfs_file_aio_read(
                        }
                }
                xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
-       } else
-               xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
+       }
 
        trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);
 
@@ -658,6 +669,7 @@ xfs_file_aio_write_checks(
        xfs_fsize_t             new_size;
        int                     error = 0;
 
+       xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
        error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
        if (error) {
                xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);
@@ -749,14 +761,24 @@ xfs_file_dio_aio_write(
                *iolock = XFS_IOLOCK_EXCL;
        else
                *iolock = XFS_IOLOCK_SHARED;
-       xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
+       xfs_rw_ilock(ip, *iolock);
 
        ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
        if (ret)
                return ret;
 
+       /*
+        * Recheck if there are cached pages that need invalidate after we got
+        * the iolock to protect against other threads adding new pages while
+        * we were waiting for the iolock.
+        */
+       if (mapping->nrpages && *iolock == XFS_IOLOCK_SHARED) {
+               xfs_rw_iunlock(ip, *iolock);
+               *iolock = XFS_IOLOCK_EXCL;
+               xfs_rw_ilock(ip, *iolock);
+       }
+
        if (mapping->nrpages) {
-               WARN_ON(*iolock != XFS_IOLOCK_EXCL);
                ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1,
                                                        FI_REMAPF_LOCKED);
                if (ret)
@@ -801,7 +823,7 @@ xfs_file_buffered_aio_write(
        size_t                  count = ocount;
 
        *iolock = XFS_IOLOCK_EXCL;
-       xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
+       xfs_rw_ilock(ip, *iolock);
 
        ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
        if (ret)
index d44d92cd12b17c7645156b4754c39ea29b5b10e5..f5b697bf39f26f51b3d0a2d767eb9d90fd9a8ca6 100644 (file)
@@ -69,9 +69,8 @@ xfs_synchronize_times(
 }
 
 /*
- * If the linux inode is valid, mark it dirty.
- * Used when committing a dirty inode into a transaction so that
- * the inode will get written back by the linux code
+ * If the linux inode is valid, mark it dirty, else mark the dirty state
+ * in the XFS inode to make sure we pick it up when reclaiming the inode.
  */
 void
 xfs_mark_inode_dirty_sync(
@@ -81,6 +80,10 @@ xfs_mark_inode_dirty_sync(
 
        if (!(inode->i_state & (I_WILL_FREE|I_FREEING)))
                mark_inode_dirty_sync(inode);
+       else {
+               barrier();
+               ip->i_update_core = 1;
+       }
 }
 
 void
@@ -91,6 +94,11 @@ xfs_mark_inode_dirty(
 
        if (!(inode->i_state & (I_WILL_FREE|I_FREEING)))
                mark_inode_dirty(inode);
+       else {
+               barrier();
+               ip->i_update_core = 1;
+       }
+
 }
 
 /*
@@ -456,7 +464,7 @@ xfs_vn_getattr(
        trace_xfs_getattr(ip);
 
        if (XFS_FORCED_SHUTDOWN(mp))
-               return XFS_ERROR(EIO);
+               return -XFS_ERROR(EIO);
 
        stat->size = XFS_ISIZE(ip);
        stat->dev = inode->i_sb->s_dev;
index 347cae965e85c4260a872b4d0d2b2157b80935c8..e6ac98c112e308fefe0905270e34cfc401aff325 100644 (file)
@@ -870,43 +870,6 @@ xfs_fs_dirty_inode(
        XFS_I(inode)->i_update_core = 1;
 }
 
-STATIC int
-xfs_log_inode(
-       struct xfs_inode        *ip)
-{
-       struct xfs_mount        *mp = ip->i_mount;
-       struct xfs_trans        *tp;
-       int                     error;
-
-       xfs_iunlock(ip, XFS_ILOCK_SHARED);
-       tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
-       error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
-
-       if (error) {
-               xfs_trans_cancel(tp, 0);
-               /* we need to return with the lock hold shared */
-               xfs_ilock(ip, XFS_ILOCK_SHARED);
-               return error;
-       }
-
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-
-       /*
-        * Note - it's possible that we might have pushed ourselves out of the
-        * way during trans_reserve which would flush the inode.  But there's
-        * no guarantee that the inode buffer has actually gone out yet (it's
-        * delwri).  Plus the buffer could be pinned anyway if it's part of
-        * an inode in another recent transaction.  So we play it safe and
-        * fire off the transaction anyway.
-        */
-       xfs_trans_ijoin(tp, ip);
-       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-       error = xfs_trans_commit(tp, 0);
-       xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
-
-       return error;
-}
-
 STATIC int
 xfs_fs_write_inode(
        struct inode            *inode,
@@ -919,9 +882,9 @@ xfs_fs_write_inode(
        trace_xfs_write_inode(ip);
 
        if (XFS_FORCED_SHUTDOWN(mp))
-               return XFS_ERROR(EIO);
+               return -XFS_ERROR(EIO);
 
-       if (wbc->sync_mode == WB_SYNC_ALL) {
+       if (wbc->sync_mode == WB_SYNC_ALL || wbc->for_kupdate) {
                /*
                 * Make sure the inode has made it it into the log.  Instead
                 * of forcing it all the way to stable storage using a
@@ -930,13 +893,14 @@ xfs_fs_write_inode(
                 * of synchronous log foces dramatically.
                 */
                xfs_ioend_wait(ip);
-               xfs_ilock(ip, XFS_ILOCK_SHARED);
-               if (ip->i_update_core) {
-                       error = xfs_log_inode(ip);
-                       if (error)
-                               goto out_unlock;
-               }
+               error = xfs_log_dirty_inode(ip, NULL, 0);
+               if (error)
+                       goto out;
+               return 0;
        } else {
+               if (!ip->i_update_core)
+                       return 0;
+
                /*
                 * We make this non-blocking if the inode is contended, return
                 * EAGAIN to indicate to the caller that they did not succeed.
index 8ecad5ff9f9b0e0bdb5a4f76dd97b694147a5a55..2f277a04d67d9f84a35a4cb3f27cc69b1e418488 100644 (file)
@@ -336,6 +336,32 @@ xfs_sync_fsdata(
        return xfs_bwrite(mp, bp);
 }
 
+int
+xfs_log_dirty_inode(
+       struct xfs_inode        *ip,
+       struct xfs_perag        *pag,
+       int                     flags)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_trans        *tp;
+       int                     error;
+
+       if (!ip->i_update_core)
+               return 0;
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
+       error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               return error;
+       }
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+       return xfs_trans_commit(tp, 0);
+}
+
 /*
  * When remounting a filesystem read-only or freezing the filesystem, we have
  * two phases to execute. This first phase is syncing the data before we
@@ -365,6 +391,17 @@ xfs_quiesce_data(
 
        /* push and block till complete */
        xfs_sync_data(mp, SYNC_WAIT);
+
+       /*
+        * Log all pending size and timestamp updates.  The vfs writeback
+        * code is supposed to do this, but due to its overagressive
+        * livelock detection it will skip inodes where appending writes
+        * were written out in the first non-blocking sync phase if their
+        * completion took long enough that it happened after taking the
+        * timestamp for the cut-off in the blocking phase.
+        */
+       xfs_inode_ag_iterator(mp, xfs_log_dirty_inode, 0);
+
        xfs_qm_sync(mp, SYNC_WAIT);
 
        /* write superblock and hoover up shutdown errors */
@@ -772,6 +809,17 @@ restart:
        if (!xfs_iflock_nowait(ip)) {
                if (!(sync_mode & SYNC_WAIT))
                        goto out;
+
+               /*
+                * If we only have a single dirty inode in a cluster there is
+                * a fair chance that the AIL push may have pushed it into
+                * the buffer, but xfsbufd won't touch it until 30 seconds
+                * from now, and thus we will lock up here.
+                *
+                * Promote the inode buffer to the front of the delwri list
+                * and wake up xfsbufd now.
+                */
+               xfs_promote_inode(ip);
                xfs_iflock(ip);
        }
 
index e3a6ad27415f77c07484941e1b6a79b07d2a3a34..ef5b2ce42982db7dd2591e5b14dbdd28af60846d 100644 (file)
@@ -42,6 +42,8 @@ void xfs_quiesce_attr(struct xfs_mount *mp);
 
 void xfs_flush_inodes(struct xfs_inode *ip);
 
+int xfs_log_dirty_inode(struct xfs_inode *ip, struct xfs_perag *pag, int flags);
+
 int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
 
 void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
index b94dace4e7852333274cd3ca42f633aca98d33f9..e70c7fc95e2f1ae31aa21bf713c496b988434e57 100644 (file)
@@ -714,7 +714,8 @@ xfs_qm_dqattach_one(
         * disk and we didn't ask it to allocate;
         * ESRCH if quotas got turned off suddenly.
         */
-       error = xfs_qm_dqget(ip->i_mount, ip, id, type, XFS_QMOPT_DOWARN, &dqp);
+       error = xfs_qm_dqget(ip->i_mount, ip, id, type,
+                            doalloc | XFS_QMOPT_DOWARN, &dqp);
        if (error)
                return error;
 
index 01d2072fb6d4580ca6ceb7f77a20eed67c94cf1c..99d4011602e06f697bacad9f24bd6848a5db3a5a 100644 (file)
@@ -822,17 +822,9 @@ xfs_attr_inactive(xfs_inode_t *dp)
        error = xfs_attr_root_inactive(&trans, dp);
        if (error)
                goto out;
-       /*
-        * signal synchronous inactive transactions unless this
-        * is a synchronous mount filesystem in which case we
-        * know that we're here because we've been called out of
-        * xfs_inactive which means that the last reference is gone
-        * and the unlink transaction has already hit the disk so
-        * async inactive transactions are safe.
-        */
-       if ((error = xfs_itruncate_finish(&trans, dp, 0LL, XFS_ATTR_FORK,
-                               (!(mp->m_flags & XFS_MOUNT_WSYNC)
-                                ? 1 : 0))))
+
+       error = xfs_itruncate_finish(&trans, dp, 0LL, XFS_ATTR_FORK, 0);
+       if (error)
                goto out;
 
        /*
index 71e90dc2aeb18b82189d84d754afdf2b8ab39b3d..f49ecf2e7d36943fce53811268ab80d11e33c27b 100644 (file)
@@ -110,6 +110,7 @@ xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
 /*
  * Query whether the requested number of additional bytes of extended
  * attribute space will be able to fit inline.
+ *
  * Returns zero if not, else the di_forkoff fork offset to be used in the
  * literal area for attribute data once the new bytes have been added.
  *
@@ -122,7 +123,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
        int offset;
        int minforkoff; /* lower limit on valid forkoff locations */
        int maxforkoff; /* upper limit on valid forkoff locations */
-       int dsize;      
+       int dsize;
        xfs_mount_t *mp = dp->i_mount;
 
        offset = (XFS_LITINO(mp) - bytes) >> 3; /* rounded down */
@@ -136,47 +137,60 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
                return (offset >= minforkoff) ? minforkoff : 0;
        }
 
-       if (!(mp->m_flags & XFS_MOUNT_ATTR2)) {
-               if (bytes <= XFS_IFORK_ASIZE(dp))
-                       return dp->i_d.di_forkoff;
+       /*
+        * If the requested numbers of bytes is smaller or equal to the
+        * current attribute fork size we can always proceed.
+        *
+        * Note that if_bytes in the data fork might actually be larger than
+        * the current data fork size is due to delalloc extents. In that
+        * case either the extent count will go down when they are converted
+        * to real extents, or the delalloc conversion will take care of the
+        * literal area rebalancing.
+        */
+       if (bytes <= XFS_IFORK_ASIZE(dp))
+               return dp->i_d.di_forkoff;
+
+       /*
+        * For attr2 we can try to move the forkoff if there is space in the
+        * literal area, but for the old format we are done if there is no
+        * space in the fixed attribute fork.
+        */
+       if (!(mp->m_flags & XFS_MOUNT_ATTR2))
                return 0;
-       }
 
        dsize = dp->i_df.if_bytes;
-       
+
        switch (dp->i_d.di_format) {
        case XFS_DINODE_FMT_EXTENTS:
-               /* 
+               /*
                 * If there is no attr fork and the data fork is extents, 
-                * determine if creating the default attr fork will result 
-                * in the extents form migrating to btree. If so, the 
-                * minimum offset only needs to be the space required for 
+                * determine if creating the default attr fork will result
+                * in the extents form migrating to btree. If so, the
+                * minimum offset only needs to be the space required for
                 * the btree root.
-                */ 
+                */
                if (!dp->i_d.di_forkoff && dp->i_df.if_bytes >
                    xfs_default_attroffset(dp))
                        dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
                break;
-               
        case XFS_DINODE_FMT_BTREE:
                /*
-                * If have data btree then keep forkoff if we have one,
-                * otherwise we are adding a new attr, so then we set 
-                * minforkoff to where the btree root can finish so we have 
+                * If we have a data btree then keep forkoff if we have one,
+                * otherwise we are adding a new attr, so then we set
+                * minforkoff to where the btree root can finish so we have
                 * plenty of room for attrs
                 */
                if (dp->i_d.di_forkoff) {
-                       if (offset < dp->i_d.di_forkoff) 
+                       if (offset < dp->i_d.di_forkoff)
                                return 0;
-                       else 
-                               return dp->i_d.di_forkoff;
-               } else
-                       dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
+                       return dp->i_d.di_forkoff;
+               }
+               dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
                break;
        }
-       
-       /* 
-        * A data fork btree root must have space for at least 
+
+       /*
+        * A data fork btree root must have space for at least
         * MINDBTPTRS key/ptr pairs if the data fork is small or empty.
         */
        minforkoff = MAX(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
@@ -186,10 +200,10 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
        maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
        maxforkoff = maxforkoff >> 3;   /* rounded down */
 
-       if (offset >= minforkoff && offset < maxforkoff)
-               return offset;
        if (offset >= maxforkoff)
                return maxforkoff;
+       if (offset >= minforkoff)
+               return offset;
        return 0;
 }
 
index e546a33214c93b9d6d4e9e75450451c8592b40aa..a175933a7f48ddb46cde2011793af073284148c0 100644 (file)
@@ -3785,19 +3785,11 @@ xfs_bmap_compute_maxlevels(
  * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
  * caller.  Frees all the extents that need freeing, which must be done
  * last due to locking considerations.  We never free any extents in
- * the first transaction.  This is to allow the caller to make the first
- * transaction a synchronous one so that the pointers to the data being
- * broken in this transaction will be permanent before the data is actually
- * freed.  This is necessary to prevent blocks from being reallocated
- * and written to before the free and reallocation are actually permanent.
- * We do not just make the first transaction synchronous here, because
- * there are more efficient ways to gain the same protection in some cases
- * (see the file truncation code).
+ * the first transaction.
  *
  * Return 1 if the given transaction was committed and a new one
  * started, and 0 otherwise in the committed parameter.
  */
-/*ARGSUSED*/
 int                                            /* error */
 xfs_bmap_finish(
        xfs_trans_t             **tp,           /* transaction pointer addr */
index a7342e840d77608fa92adf92c71647f45aac4b94..7888a75630788e3bec732c1fcf00d5b974bca800 100644 (file)
@@ -1023,7 +1023,6 @@ xfs_buf_iodone_callbacks(
        XFS_BUF_UNDELAYWRITE(bp);
 
        trace_xfs_buf_error_relse(bp, _RET_IP_);
-       xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
 
 do_callbacks:
        xfs_buf_do_callbacks(bp);
index 3631783b2b5385ee939a61e5cafd5b06ef6b0a45..ca752f05c31c498b8ef6b1fc29c6e4ebe2ea6a33 100644 (file)
@@ -356,9 +356,20 @@ xfs_iget_cache_miss(
                        BUG();
        }
 
-       spin_lock(&pag->pag_ici_lock);
+       /*
+        * These values must be set before inserting the inode into the radix
+        * tree as the moment it is inserted a concurrent lookup (allowed by the
+        * RCU locking mechanism) can find it and that lookup must see that this
+        * is an inode currently under construction (i.e. that XFS_INEW is set).
+        * The ip->i_flags_lock that protects the XFS_INEW flag forms the
+        * memory barrier that ensures this detection works correctly at lookup
+        * time.
+        */
+       ip->i_udquot = ip->i_gdquot = NULL;
+       xfs_iflags_set(ip, XFS_INEW);
 
        /* insert the new inode */
+       spin_lock(&pag->pag_ici_lock);
        error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
        if (unlikely(error)) {
                WARN_ON(error != -EEXIST);
@@ -366,11 +377,6 @@ xfs_iget_cache_miss(
                error = EAGAIN;
                goto out_preload_end;
        }
-
-       /* These values _must_ be set before releasing the radix tree lock! */
-       ip->i_udquot = ip->i_gdquot = NULL;
-       xfs_iflags_set(ip, XFS_INEW);
-
        spin_unlock(&pag->pag_ici_lock);
        radix_tree_preload_end();
 
index a098a20ca63e29bbd021a266e287d87ca796fd8c..5715279975c951ff027266bb31ea006a26a72a06 100644 (file)
@@ -1528,15 +1528,7 @@ xfs_itruncate_finish(
                                xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
                        }
                }
-       } else if (sync) {
-               ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC));
-               if (ip->i_d.di_anextents > 0)
-                       xfs_trans_set_sync(ntp);
        }
-       ASSERT(fork == XFS_DATA_FORK ||
-               (fork == XFS_ATTR_FORK &&
-                       ((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) ||
-                        (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC)))));
 
        /*
         * Since it is possible for space to become allocated beyond
@@ -3099,6 +3091,27 @@ corrupt_out:
        return XFS_ERROR(EFSCORRUPTED);
 }
 
+void
+xfs_promote_inode(
+       struct xfs_inode        *ip)
+{
+       struct xfs_buf          *bp;
+
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
+
+       bp = xfs_incore(ip->i_mount->m_ddev_targp, ip->i_imap.im_blkno,
+                       ip->i_imap.im_len, XBF_TRYLOCK);
+       if (!bp)
+               return;
+
+       if (XFS_BUF_ISDELAYWRITE(bp)) {
+               xfs_buf_delwri_promote(bp);
+               wake_up_process(ip->i_mount->m_ddev_targp->bt_task);
+       }
+
+       xfs_buf_relse(bp);
+}
+
 /*
  * Return a pointer to the extent record at file index idx.
  */
index 964cfea776868684afb26f818b8a761ce652b1b5..28b3596453e031e189f72125a5be5cb03adea8a4 100644 (file)
@@ -509,6 +509,7 @@ int         xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
 void           xfs_iext_realloc(xfs_inode_t *, int, int);
 void           xfs_iunpin_wait(xfs_inode_t *);
 int            xfs_iflush(xfs_inode_t *, uint);
+void           xfs_promote_inode(struct xfs_inode *);
 void           xfs_lock_inodes(xfs_inode_t **, int, uint);
 void           xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
 
index 04142caedb2bad62c158b08e6799e5f75bce5b4e..b75fd67ca376ce0923cddbf10b318aadb25ea9da 100644 (file)
@@ -3159,37 +3159,26 @@ xlog_recover_process_iunlinks(
                         */
                        continue;
                }
+               /*
+                * Unlock the buffer so that it can be acquired in the normal
+                * course of the transaction to truncate and free each inode.
+                * Because we are not racing with anyone else here for the AGI
+                * buffer, we don't even need to hold it locked to read the
+                * initial unlinked bucket entries out of the buffer. We keep
+                * buffer reference though, so that it stays pinned in memory
+                * while we need the buffer.
+                */
                agi = XFS_BUF_TO_AGI(agibp);
+               xfs_buf_unlock(agibp);
 
                for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
                        agino = be32_to_cpu(agi->agi_unlinked[bucket]);
                        while (agino != NULLAGINO) {
-                               /*
-                                * Release the agi buffer so that it can
-                                * be acquired in the normal course of the
-                                * transaction to truncate and free the inode.
-                                */
-                               xfs_buf_relse(agibp);
-
                                agino = xlog_recover_process_one_iunlink(mp,
                                                        agno, agino, bucket);
-
-                               /*
-                                * Reacquire the agibuffer and continue around
-                                * the loop. This should never fail as we know
-                                * the buffer was good earlier on.
-                                */
-                               error = xfs_read_agi(mp, NULL, agno, &agibp);
-                               ASSERT(error == 0);
-                               agi = XFS_BUF_TO_AGI(agibp);
                        }
                }
-
-               /*
-                * Release the buffer for the current agi so we can
-                * go on to the next one.
-                */
-               xfs_buf_relse(agibp);
+               xfs_buf_rele(agibp);
        }
 
        mp->m_dmevmask = mp_dmevmask;
index b49b82363d203dfa8d2c19abe9e08993b34c27e9..9afdd497369c13e8dffea7f71f8f057c833038ba 100644 (file)
@@ -44,9 +44,6 @@
 #include "xfs_trace.h"
 
 
-STATIC void    xfs_unmountfs_wait(xfs_mount_t *);
-
-
 #ifdef HAVE_PERCPU_SB
 STATIC void    xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
                                                int);
@@ -1507,11 +1504,6 @@ xfs_unmountfs(
         */
        xfs_log_force(mp, XFS_LOG_SYNC);
 
-       xfs_binval(mp->m_ddev_targp);
-       if (mp->m_rtdev_targp) {
-               xfs_binval(mp->m_rtdev_targp);
-       }
-
        /*
         * Unreserve any blocks we have so that when we unmount we don't account
         * the reserved free space as used. This is really only necessary for
@@ -1537,7 +1529,16 @@ xfs_unmountfs(
                xfs_warn(mp, "Unable to update superblock counters. "
                                "Freespace may not be correct on next mount.");
        xfs_unmountfs_writesb(mp);
-       xfs_unmountfs_wait(mp);                 /* wait for async bufs */
+
+       /*
+        * Make sure all buffers have been flushed and completed before
+        * unmounting the log.
+        */
+       error = xfs_flush_buftarg(mp->m_ddev_targp, 1);
+       if (error)
+               xfs_warn(mp, "%d busy buffers during unmount.", error);
+       xfs_wait_buftarg(mp->m_ddev_targp);
+
        xfs_log_unmount_write(mp);
        xfs_log_unmount(mp);
        xfs_uuid_unmount(mp);
@@ -1548,16 +1549,6 @@ xfs_unmountfs(
        xfs_free_perag(mp);
 }
 
-STATIC void
-xfs_unmountfs_wait(xfs_mount_t *mp)
-{
-       if (mp->m_logdev_targp != mp->m_ddev_targp)
-               xfs_wait_buftarg(mp->m_logdev_targp);
-       if (mp->m_rtdev_targp)
-               xfs_wait_buftarg(mp->m_rtdev_targp);
-       xfs_wait_buftarg(mp->m_ddev_targp);
-}
-
 int
 xfs_fs_writable(xfs_mount_t *mp)
 {
index 619720705bc6843e4624ce02c27829fa32256ba9..59509ae0b2734be2fce52994d1ea75a5afce7013 100644 (file)
@@ -535,7 +535,7 @@ xfs_readlink(
        char            *link)
 {
        xfs_mount_t     *mp = ip->i_mount;
-       int             pathlen;
+       xfs_fsize_t     pathlen;
        int             error = 0;
 
        trace_xfs_readlink(ip);
@@ -545,13 +545,20 @@ xfs_readlink(
 
        xfs_ilock(ip, XFS_ILOCK_SHARED);
 
-       ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
-       ASSERT(ip->i_d.di_size <= MAXPATHLEN);
-
        pathlen = ip->i_d.di_size;
        if (!pathlen)
                goto out;
 
+       if (pathlen < 0 || pathlen > MAXPATHLEN) {
+               xfs_alert(mp, "%s: inode (%llu) bad symlink length (%lld)",
+                        __func__, (unsigned long long) ip->i_ino,
+                        (long long) pathlen);
+               ASSERT(0);
+               error = XFS_ERROR(EFSCORRUPTED);
+               goto out;
+       }
+
+
        if (ip->i_df.if_flags & XFS_IFINLINE) {
                memcpy(link, ip->i_df.if_u1.if_data, pathlen);
                link[pathlen] = '\0';
index 173972672175588c181718aef6cf7f1537c37758..451823cb88372b2130793abbbb4e17869af34faf 100644 (file)
@@ -15,6 +15,7 @@ extern int pxm_to_node(int);
 extern int node_to_pxm(int);
 extern void __acpi_map_pxm_to_node(int, int);
 extern int acpi_map_pxm_to_node(int);
+extern unsigned char acpi_srat_revision;
 
 #endif                         /* CONFIG_ACPI_NUMA */
 #endif                         /* __ACP_NUMA_H */
index 76bff2bff15e346532be60dc1ce1a13aff070471..831924a1bbd89b1cc5bb80c09069edf767bdffcc 100644 (file)
@@ -425,6 +425,8 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
                                unsigned long size);
 #endif
 
+#ifdef CONFIG_MMU
+
 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
 static inline int pmd_trans_huge(pmd_t pmd)
 {
@@ -441,7 +443,84 @@ static inline int pmd_write(pmd_t pmd)
        return 0;
 }
 #endif /* __HAVE_ARCH_PMD_WRITE */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifndef pmd_read_atomic
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+       /*
+        * Depend on compiler for an atomic pmd read. NOTE: this is
+        * only going to work, if the pmdval_t isn't larger than
+        * an unsigned long.
+        */
+       return *pmdp;
+}
+#endif
+
+/*
+ * This function is meant to be used by sites walking pagetables with
+ * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
+ * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
+ * into a null pmd and the transhuge page fault can convert a null pmd
+ * into an hugepmd or into a regular pmd (if the hugepage allocation
+ * fails). While holding the mmap_sem in read mode the pmd becomes
+ * stable and stops changing under us only if it's not null and not a
+ * transhuge pmd. When those races occurs and this function makes a
+ * difference vs the standard pmd_none_or_clear_bad, the result is
+ * undefined so behaving like if the pmd was none is safe (because it
+ * can return none anyway). The compiler level barrier() is critically
+ * important to compute the two checks atomically on the same pmdval.
+ *
+ * For 32bit kernels with a 64bit large pmd_t this automatically takes
+ * care of reading the pmd atomically to avoid SMP race conditions
+ * against pmd_populate() when the mmap_sem is hold for reading by the
+ * caller (a special atomic read not done by "gcc" as in the generic
+ * version above, is also needed when THP is disabled because the page
+ * fault can populate the pmd from under us).
+ */
+static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
+{
+       pmd_t pmdval = pmd_read_atomic(pmd);
+       /*
+        * The barrier will stabilize the pmdval in a register or on
+        * the stack so that it will stop changing under the code.
+        */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       barrier();
 #endif
+       if (pmd_none(pmdval))
+               return 1;
+       if (unlikely(pmd_bad(pmdval))) {
+               if (!pmd_trans_huge(pmdval))
+                       pmd_clear_bad(pmd);
+               return 1;
+       }
+       return 0;
+}
+
+/*
+ * This is a noop if Transparent Hugepage Support is not built into
+ * the kernel. Otherwise it is equivalent to
+ * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in
+ * places that already verified the pmd is not none and they want to
+ * walk ptes while holding the mmap sem in read mode (write mode don't
+ * need this). If THP is not enabled, the pmd can't go away under the
+ * code even if MADV_DONTNEED runs, but if THP is enabled we need to
+ * run a pmd_trans_unstable before walking the ptes after
+ * split_huge_page_pmd returns (because it may have run when the pmd
+ * become null, but then a page fault can map in a THP and not a
+ * regular page).
+ */
+static inline int pmd_trans_unstable(pmd_t *pmd)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       return pmd_none_or_trans_huge_or_clear_bad(pmd);
+#else
+       return 0;
+#endif
+}
+
+#endif /* CONFIG_MMU */
 
 #endif /* !__ASSEMBLY__ */
 
index 44bce836d350a651284b325adc220d0ead84da58..9ce7f44aebd2ab64a91b2f21b436a3377e54f045 100644 (file)
@@ -28,6 +28,8 @@
 #define POLLRDHUP       0x2000
 #endif
 
+#define POLLFREE       0x4000  /* currently only for epoll */
+
 struct pollfd {
        int fd;
        short events;
index 0fd28e028de1d83130aeb751294159dffc39fa6a..c749af9c0983022876bffacf1aea628c270d4a34 100644 (file)
@@ -15,7 +15,7 @@ typedef __kernel_fsid_t       fsid_t;
  * with a 10' pole.
  */
 #ifndef __statfs_word
-#if BITS_PER_LONG == 64
+#if __BITS_PER_LONG == 64
 #define __statfs_word long
 #else
 #define __statfs_word __u32
index 4f76959397fa88a869dff276ba5ab0ccc9494071..5518963e38b0653185a0d204a75b015937184ded 100644 (file)
@@ -218,7 +218,7 @@ __SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
 
 /* fs/sendfile.c */
 #define __NR3264_sendfile 71
-__SC_3264(__NR3264_sendfile, sys_sendfile64, sys_sendfile)
+__SYSCALL(__NR3264_sendfile, sys_sendfile64)
 
 /* fs/select.c */
 #define __NR_pselect6 72
index 738b3a5faa1294215479ddf88e99c2c523dfa72c..40aaebf50af77a2ee6ade1722b0f8661b6227516 100644 (file)
@@ -1323,6 +1323,7 @@ extern int drm_getmagic(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 extern int drm_authmagic(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
+extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
 
 /* Cache management (drm_cache.c) */
 void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
index 91567bbdb027c5f36a09923a2d9852c10fe1f07d..03eb1d68d50406f47c17a9c8e09c5ec9e0e15bb0 100644 (file)
@@ -72,6 +72,7 @@
 
 #define DP_MAIN_LINK_CHANNEL_CODING         0x006
 
+#define DP_EDP_CONFIGURATION_CAP            0x00d
 #define DP_TRAINING_AUX_RD_INTERVAL         0x00e
 
 /* link configuration */
 #define DP_MAIN_LINK_CHANNEL_CODING_SET            0x108
 # define DP_SET_ANSI_8B10B                 (1 << 0)
 
+#define DP_EDP_CONFIGURATION_SET            0x10a
+
 #define DP_LANE0_1_STATUS                  0x202
 #define DP_LANE2_3_STATUS                  0x203
 # define DP_LANE_CR_DONE                   (1 << 0)
index c4961ea50a494b69d996873e09e8e509956660c0..53dfa1098b986cf17b2b9c4d0609462fd9c514ea 100644 (file)
@@ -233,6 +233,8 @@ struct drm_mode_fb_cmd {
 #define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
 #define DRM_MODE_FB_DIRTY_FLAGS         0x03
 
+#define DRM_MODE_FB_DIRTY_MAX_CLIPS     256
+
 /*
  * Mark a region of a framebuffer as dirty.
  *
index 3d53efd25ab906889e081acb8ae10f1065ef180a..4306811f8a14356282d685a1894215e91e78d542 100644 (file)
@@ -4,6 +4,7 @@
 */
 #define radeon_PCI_IDS \
        {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -55,6 +56,7 @@
        {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
        {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
        {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+       {0x1002, 0x4C6E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
        {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
        {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
        {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
        {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x674A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x675B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x675D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6771, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x677B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6843, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6849, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x68fa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0, 0, 0}
 
 #define r128_PCI_IDS \
index a3ef66a2a08303bb8cc7492a70bda66c22c2e473..fc8a3ffce3204b7f887c401bb2e14bfc229cc5fd 100644 (file)
@@ -49,6 +49,26 @@ static inline unsigned long hweight_long(unsigned long w)
        return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
 }
 
+/**
+ * rol64 - rotate a 64-bit value left
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline __u64 rol64(__u64 word, unsigned int shift)
+{
+       return (word << shift) | (word >> (64 - shift));
+}
+
+/**
+ * ror64 - rotate a 64-bit value right
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline __u64 ror64(__u64 word, unsigned int shift)
+{
+       return (word >> shift) | (word << (64 - shift));
+}
+
 /**
  * rol32 - rotate a 32-bit value left
  * @word: value to rotate
index 1a23722e8878619aec4017cbbd31b951439fdb2b..1b130216ccd4e893b2d29159c72dcaca5cd1be0b 100644 (file)
@@ -670,6 +670,9 @@ extern int blk_insert_cloned_request(struct request_queue *q,
                                     struct request *rq);
 extern void blk_delay_queue(struct request_queue *, unsigned long);
 extern void blk_recount_segments(struct request_queue *, struct bio *);
+extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
+extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
+                             unsigned int, void __user *);
 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
                          unsigned int, void __user *);
 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
@@ -798,9 +801,6 @@ extern void blk_unprep_request(struct request *);
  */
 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
                                        spinlock_t *lock, int node_id);
-extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *,
-                                                          request_fn_proc *,
-                                                          spinlock_t *, int node_id);
 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
 extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
                                                      request_fn_proc *, spinlock_t *);
index 846bb1792572e3cb5cd88eae67e0150a4423d8c2..edaf3900f6bc77bca7712636cef0f9b564c265fe 100644 (file)
@@ -561,5 +561,9 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
 
 extern void __user *compat_alloc_user_space(unsigned long len);
 
+#else
+
+#define is_compat_task() (0)
+
 #endif /* CONFIG_COMPAT */
 #endif /* _LINUX_COMPAT_H */
index 19d90a55541d99c37ea6d6c78040b9979dc38116..f13bb6dd156ff25af6e8133de14123fe9ba3507f 100644 (file)
@@ -207,6 +207,7 @@ struct dentry_operations {
 
 #define DCACHE_CANT_MOUNT      0x0100
 #define DCACHE_GENOCIDE                0x0200
+#define DCACHE_SHRINK_LIST     0x0400
 
 #define DCACHE_OP_HASH         0x1000
 #define DCACHE_OP_COMPARE      0x2000
@@ -340,7 +341,8 @@ extern int d_validate(struct dentry *, struct dentry *);
  */
 extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
 
-extern char *__d_path(const struct path *path, struct path *root, char *, int);
+extern char *__d_path(const struct path *, const struct path *, char *, int);
+extern char *d_absolute_path(const struct path *, char *, int);
 extern char *d_path(const struct path *, char *, int);
 extern char *d_path_with_unreachable(const struct path *, char *, int);
 extern char *dentry_path_raw(struct dentry *, char *, int);
index e376270cd26e648d9a2fdecec685e0d5ae4474c3..e0ce165aa59ad195aecec385e58bce0c6e1740b9 100644 (file)
@@ -347,7 +347,18 @@ extern int __init efi_setup_pcdp_console(char *);
 #define EFI_VARIABLE_NON_VOLATILE       0x0000000000000001
 #define EFI_VARIABLE_BOOTSERVICE_ACCESS 0x0000000000000002
 #define EFI_VARIABLE_RUNTIME_ACCESS     0x0000000000000004
-
+#define EFI_VARIABLE_HARDWARE_ERROR_RECORD 0x0000000000000008
+#define EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS 0x0000000000000010
+#define EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS 0x0000000000000020
+#define EFI_VARIABLE_APPEND_WRITE      0x0000000000000040
+
+#define EFI_VARIABLE_MASK      (EFI_VARIABLE_NON_VOLATILE | \
+                               EFI_VARIABLE_BOOTSERVICE_ACCESS | \
+                               EFI_VARIABLE_RUNTIME_ACCESS | \
+                               EFI_VARIABLE_HARDWARE_ERROR_RECORD | \
+                               EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS | \
+                               EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS | \
+                               EFI_VARIABLE_APPEND_WRITE)
 /*
  * EFI Device Path information
  */
index f362733186a5a6f4a1657b0a85ee32e158095ed2..657ab55beda014c15b33833e84dc7e1496d06abf 100644 (file)
@@ -61,6 +61,7 @@ struct file;
 static inline void eventpoll_init_file(struct file *file)
 {
        INIT_LIST_HEAD(&file->f_ep_links);
+       INIT_LIST_HEAD(&file->f_tfile_llink);
 }
 
 
index 2dfa7076e8b601f5197420c6bdbab1780ca77d7f..0bfcb76bf958c55f9300a13ac568069c8de142e4 100644 (file)
@@ -196,8 +196,8 @@ struct ext2_group_desc
 
 /* Flags that should be inherited by new inodes from their parent. */
 #define EXT2_FL_INHERITED (EXT2_SECRM_FL | EXT2_UNRM_FL | EXT2_COMPR_FL |\
-                          EXT2_SYNC_FL | EXT2_IMMUTABLE_FL | EXT2_APPEND_FL |\
-                          EXT2_NODUMP_FL | EXT2_NOATIME_FL | EXT2_COMPRBLK_FL|\
+                          EXT2_SYNC_FL | EXT2_NODUMP_FL |\
+                          EXT2_NOATIME_FL | EXT2_COMPRBLK_FL |\
                           EXT2_NOCOMP_FL | EXT2_JOURNAL_DATA_FL |\
                           EXT2_NOTAIL_FL | EXT2_DIRSYNC_FL)
 
index 5e06acf95d0f84271b637f41711749c6968a32b4..7b14d251c78a794f150d07863895353f5faa3b8b 100644 (file)
@@ -180,8 +180,8 @@ struct ext3_group_desc
 
 /* Flags that should be inherited by new inodes from their parent. */
 #define EXT3_FL_INHERITED (EXT3_SECRM_FL | EXT3_UNRM_FL | EXT3_COMPR_FL |\
-                          EXT3_SYNC_FL | EXT3_IMMUTABLE_FL | EXT3_APPEND_FL |\
-                          EXT3_NODUMP_FL | EXT3_NOATIME_FL | EXT3_COMPRBLK_FL|\
+                          EXT3_SYNC_FL | EXT3_NODUMP_FL |\
+                          EXT3_NOATIME_FL | EXT3_COMPRBLK_FL |\
                           EXT3_NOCOMPR_FL | EXT3_JOURNAL_DATA_FL |\
                           EXT3_NOTAIL_FL | EXT3_DIRSYNC_FL)
 
index 34fcd09307e02ace4d8f3e9066c89be8dff1ce66..5a24a1470b0149b0fc1a50f55e7645f085120f81 100755 (executable)
@@ -1013,6 +1013,7 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
 /* drivers/video/fbmem.c */
 extern int register_framebuffer(struct fb_info *fb_info);
 extern int unregister_framebuffer(struct fb_info *fb_info);
+extern int unlink_framebuffer(struct fb_info *fb_info);
 extern void remove_conflicting_framebuffers(struct apertures_struct *a,
                                const char *name, bool primary);
 extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
index b5b979247863718f10da2764b3eab280a3005e9f..96b10354c741c01306ba608ee0cdfb4e1d71ee98 100644 (file)
@@ -969,6 +969,7 @@ struct file {
 #ifdef CONFIG_EPOLL
        /* Used by fs/eventpoll.c to link all the hooks to this file */
        struct list_head        f_ep_links;
+       struct list_head        f_tfile_llink;
 #endif /* #ifdef CONFIG_EPOLL */
        struct address_space    *f_mapping;
 #ifdef CONFIG_DEBUG_WRITECOUNT
@@ -1882,6 +1883,7 @@ extern int fd_statfs(int, struct kstatfs *);
 extern int statfs_by_dentry(struct dentry *, struct kstatfs *);
 extern int freeze_super(struct super_block *super);
 extern int thaw_super(struct super_block *super);
+extern bool our_mnt(struct vfsmount *mnt);
 
 extern int current_umask(void);
 
@@ -2027,6 +2029,7 @@ extern void unregister_blkdev(unsigned int, const char *);
 extern struct block_device *bdget(dev_t);
 extern struct block_device *bdgrab(struct block_device *bdev);
 extern void bd_set_size(struct block_device *, loff_t size);
+extern sector_t blkdev_max_block(struct block_device *bdev);
 extern void bd_forget(struct inode *inode);
 extern void bdput(struct block_device *);
 extern void invalidate_bdev(struct block_device *);
index 300d7582006e49f6ea08a3df09fc9070748c5256..7a1ce1816441420a53541c2ec62b0ddf8c1f05c7 100644 (file)
@@ -221,12 +221,6 @@ static inline void part_pack_uuid(const u8 *uuid_str, u8 *to)
        }
 }
 
-static inline char *part_unpack_uuid(const u8 *uuid, char *out)
-{
-       sprintf(out, "%pU", uuid);
-       return out;
-}
-
 static inline int disk_max_parts(struct gendisk *disk)
 {
        if (disk->flags & GENHD_FL_EXT_DEVT)
@@ -594,6 +588,7 @@ extern char *disk_name (struct gendisk *hd, int partno, char *buf);
 
 extern int disk_expand_part_tbl(struct gendisk *disk, int target);
 extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
+extern int invalidate_partitions(struct gendisk *disk, struct block_device *bdev);
 extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
                                                     int partno, sector_t start,
                                                     sector_t len, int flags,
index 6427d298fbfc7d319321d8b9c287fc5761f9a2a4..530e11ba07387bfa7da5c32c800e341a9f1c4d7d 100644 (file)
@@ -129,6 +129,10 @@ enum sample_type {
 #define REG_BCICTL2             0x024
 #define TWL4030_BCI_ITHSENS    0x007
 
+/* Register and bits for GPBR1 register */
+#define TWL4030_REG_GPBR1              0x0c
+#define TWL4030_GPBR1_MADC_HFCLK_EN    (1 << 7)
+
 struct twl4030_madc_user_parms {
        int channel;
        int average;
index 771d6d85667d68a17c24c452979f8d37cc628082..a207923f3612d7444a516257dafffd3a6ede8110 100644 (file)
@@ -129,6 +129,9 @@ struct input_keymap_entry {
 
 #define EVIOCGRAB              _IOW('E', 0x90, int)                    /* Grab/Release device */
 
+#define EVIOCGSUSPENDBLOCK     _IOR('E', 0x91, int)                    /* get suspend block enable */
+#define EVIOCSSUSPENDBLOCK     _IOW('E', 0x91, int)                    /* set suspend block enable */
+
 /*
  * Device properties and quirks
  */
index f6efed0039edfdb06cbe1430588e51bf2db07aca..b9490bf393993a52d24228310eedf00192f7d460 100644 (file)
@@ -59,6 +59,8 @@
  * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
  * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
  * IRQF_NO_THREAD - Interrupt cannot be threaded
+ * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
+ *                resume time.
  */
 #define IRQF_DISABLED          0x00000020
 #define IRQF_SAMPLE_RANDOM     0x00000040
@@ -72,6 +74,7 @@
 #define IRQF_NO_SUSPEND                0x00004000
 #define IRQF_FORCE_RESUME      0x00008000
 #define IRQF_NO_THREAD         0x00010000
+#define IRQF_EARLY_RESUME      0x00020000
 
 #define IRQF_TIMER             (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
 
index 8cdcc2a199ad1641c3ca652263922fbdcddf6f6a..1feeb526356527c1197a8856ce510649cbdd779b 100644 (file)
@@ -117,6 +117,8 @@ io_mapping_unmap(void __iomem *vaddr)
 
 #else
 
+#include <linux/uaccess.h>
+
 /* this struct isn't actually defined anywhere */
 struct io_mapping;
 
@@ -138,12 +140,14 @@ static inline void __iomem *
 io_mapping_map_atomic_wc(struct io_mapping *mapping,
                         unsigned long offset)
 {
+       pagefault_disable();
        return ((char __force __iomem *) mapping) + offset;
 }
 
 static inline void
 io_mapping_unmap_atomic(void __iomem *vaddr)
 {
+       pagefault_enable();
 }
 
 /* Non-atomic map/unmap */
index f97672a36fa8d740c16751a0dc2e0753b5092b30..265e2c3cbd1cd74d2b3efd416aa9301c90ae90ad 100644 (file)
@@ -303,7 +303,7 @@ extern void jiffies_to_timespec(const unsigned long jiffies,
 extern unsigned long timeval_to_jiffies(const struct timeval *value);
 extern void jiffies_to_timeval(const unsigned long jiffies,
                               struct timeval *value);
-extern clock_t jiffies_to_clock_t(long x);
+extern clock_t jiffies_to_clock_t(unsigned long x);
 extern unsigned long clock_t_to_jiffies(unsigned long x);
 extern u64 jiffies_64_to_clock_t(u64 x);
 extern u64 nsec_to_clock_t(u64 x);
index 47e8dbea85c893833d54069aab17e3af1021e6c7..da3e76d00105d5b8a129fec75f359c3397fe7142 100644 (file)
 }                                                      \
 )
 
+/*
+ * Multiplies an integer by a fraction, while avoiding unnecessary
+ * overflow or loss of precision.
+ */
+#define mult_frac(x, numer, denom)(                    \
+{                                                      \
+       typeof(x) quot = (x) / (denom);                 \
+       typeof(x) rem  = (x) % (denom);                 \
+       (quot * (numer)) + ((rem * (numer)) / (denom)); \
+}                                                      \
+)
+
+
 #define _RET_IP_               (unsigned long)__builtin_return_address(0)
 #define _THIS_IP_  ({ __label__ __here; __here: (unsigned long)&&__here; })
 
index 10ca03d0a250e280682479c4228536120c2cee4f..f7192fb4237f6bbcf3f2347f1b964c122b54f047 100644 (file)
@@ -63,7 +63,8 @@ enum kgdb_bptype {
        BP_HARDWARE_BREAKPOINT,
        BP_WRITE_WATCHPOINT,
        BP_READ_WATCHPOINT,
-       BP_ACCESS_WATCHPOINT
+       BP_ACCESS_WATCHPOINT,
+       BP_POKE_BREAKPOINT,
 };
 
 enum kgdb_bpstate {
@@ -207,8 +208,8 @@ extern void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc);
 
 /* Optional functions. */
 extern int kgdb_validate_break_address(unsigned long addr);
-extern int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr);
-extern int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle);
+extern int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt);
+extern int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt);
 
 /**
  *     kgdb_arch_late - Perform any architecture specific initalization.
index 31ebb59cbd2f848e05c0b98b70caa6217085a9f2..82d5476e69cc723904e345d6faec7a8dc338b15f 100644 (file)
@@ -554,6 +554,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
 
 #ifdef CONFIG_IOMMU_API
 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
+void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
 int kvm_iommu_map_guest(struct kvm *kvm);
 int kvm_iommu_unmap_guest(struct kvm *kvm);
 int kvm_assign_device(struct kvm *kvm,
@@ -567,6 +568,11 @@ static inline int kvm_iommu_map_pages(struct kvm *kvm,
        return 0;
 }
 
+static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
+                                        struct kvm_memory_slot *slot)
+{
+}
+
 static inline int kvm_iommu_map_guest(struct kvm *kvm)
 {
        return -ENODEV;
index f549056fb20bd5533555918cc1b1f9805c2cdcc3..87f402ccec55567330943ab774ffb12ae21c7da8 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/spinlock.h>
 #include <linux/lockdep.h>
 #include <linux/percpu.h>
+#include <linux/cpu.h>
 
 /* can make br locks by using local lock for read side, global lock for write */
 #define br_lock_init(name)     name##_lock_init()
 
 #define DEFINE_LGLOCK(name)                                            \
                                                                        \
+ DEFINE_SPINLOCK(name##_cpu_lock);                                     \
+ cpumask_t name##_cpus __read_mostly;                                  \
  DEFINE_PER_CPU(arch_spinlock_t, name##_lock);                         \
  DEFINE_LGLOCK_LOCKDEP(name);                                          \
                                                                        \
+ static int                                                            \
+ name##_lg_cpu_callback(struct notifier_block *nb,                     \
+                               unsigned long action, void *hcpu)       \
+ {                                                                     \
+       switch (action & ~CPU_TASKS_FROZEN) {                           \
+       case CPU_UP_PREPARE:                                            \
+               spin_lock(&name##_cpu_lock);                            \
+               cpu_set((unsigned long)hcpu, name##_cpus);              \
+               spin_unlock(&name##_cpu_lock);                          \
+               break;                                                  \
+       case CPU_UP_CANCELED: case CPU_DEAD:                            \
+               spin_lock(&name##_cpu_lock);                            \
+               cpu_clear((unsigned long)hcpu, name##_cpus);            \
+               spin_unlock(&name##_cpu_lock);                          \
+       }                                                               \
+       return NOTIFY_OK;                                               \
+ }                                                                     \
+ static struct notifier_block name##_lg_cpu_notifier = {               \
+       .notifier_call = name##_lg_cpu_callback,                        \
+ };                                                                    \
  void name##_lock_init(void) {                                         \
        int i;                                                          \
        LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
                lock = &per_cpu(name##_lock, i);                        \
                *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;     \
        }                                                               \
+       register_hotcpu_notifier(&name##_lg_cpu_notifier);              \
+       get_online_cpus();                                              \
+       for_each_online_cpu(i)                                          \
+               cpu_set(i, name##_cpus);                                \
+       put_online_cpus();                                              \
  }                                                                     \
  EXPORT_SYMBOL(name##_lock_init);                                      \
                                                                        \
                                                                        \
  void name##_global_lock_online(void) {                                        \
        int i;                                                          \
-       preempt_disable();                                              \
+       spin_lock(&name##_cpu_lock);                                    \
        rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
-       for_each_online_cpu(i) {                                        \
+       for_each_cpu(i, &name##_cpus) {                                 \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_lock(lock);                                   \
  void name##_global_unlock_online(void) {                              \
        int i;                                                          \
        rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
-       for_each_online_cpu(i) {                                        \
+       for_each_cpu(i, &name##_cpus) {                                 \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_unlock(lock);                                 \
        }                                                               \
-       preempt_enable();                                               \
+       spin_unlock(&name##_cpu_lock);                                  \
  }                                                                     \
  EXPORT_SYMBOL(name##_global_unlock_online);                           \
                                                                        \
index 25b808631cd92c50d10cf6a31b2d9b9942b62ac9..fd7ff3d91e6a920ff084beca09d10b5b9abba981 100644 (file)
@@ -185,7 +185,6 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
 #define rounddown_pow_of_two(n)                        \
 (                                              \
        __builtin_constant_p(n) ? (             \
-               (n == 1) ? 0 :                  \
                (1UL << ilog2(n))) :            \
        __rounddown_pow_of_two(n)               \
  )
index 23fcdfcba81b33ef0d5e2957c591f8baeb1f1836..b8ba85544721fd911ce52c61e08a8ba88328ecfe 100644 (file)
@@ -6,6 +6,8 @@
 
 #if BITS_PER_LONG == 64
 
+#define div64_long(x,y) div64_s64((x),(y))
+
 /**
  * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
  *
@@ -45,6 +47,8 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
 
 #elif BITS_PER_LONG == 32
 
+#define div64_long(x,y) div_s64((x),(y))
+
 #ifndef div_u64_rem
 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
 {
index 50940da6adf36d7a544c32448b6873f83cef2224..313a00eca40ed578833b8a0950c3e407dfb0de5a 100644 (file)
@@ -119,6 +119,8 @@ struct zone_reclaim_stat*
 mem_cgroup_get_reclaim_stat_from_page(struct page *page);
 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
                                        struct task_struct *p);
+extern void mem_cgroup_replace_page_cache(struct page *oldpage,
+                                       struct page *newpage);
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 extern int do_swap_account;
@@ -370,6 +372,10 @@ static inline
 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
 {
 }
+static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
+                               struct page *newpage)
+{
+}
 #endif /* CONFIG_CGROUP_MEM_CONT */
 
 #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
index 07890ac03d817dfd662208c8af8ec71970e2cac1..f59179b078847e3cd657840302c934df55a1affb 100644 (file)
@@ -355,36 +355,50 @@ static inline struct page *compound_head(struct page *page)
        return page;
 }
 
+/*
+ * The atomic page->_mapcount, starts from -1: so that transitions
+ * both from it and to it can be tracked, using atomic_inc_and_test
+ * and atomic_add_negative(-1).
+ */
+static inline void reset_page_mapcount(struct page *page)
+{
+       atomic_set(&(page)->_mapcount, -1);
+}
+
+static inline int page_mapcount(struct page *page)
+{
+       return atomic_read(&(page)->_mapcount) + 1;
+}
+
 static inline int page_count(struct page *page)
 {
        return atomic_read(&compound_head(page)->_count);
 }
 
+static inline void get_huge_page_tail(struct page *page)
+{
+       /*
+        * __split_huge_page_refcount() cannot run
+        * from under us.
+        */
+       VM_BUG_ON(page_mapcount(page) < 0);
+       VM_BUG_ON(atomic_read(&page->_count) != 0);
+       atomic_inc(&page->_mapcount);
+}
+
+extern bool __get_page_tail(struct page *page);
+
 static inline void get_page(struct page *page)
 {
+       if (unlikely(PageTail(page)))
+               if (likely(__get_page_tail(page)))
+                       return;
        /*
         * Getting a normal page or the head of a compound page
-        * requires to already have an elevated page->_count. Only if
-        * we're getting a tail page, the elevated page->_count is
-        * required only in the head page, so for tail pages the
-        * bugcheck only verifies that the page->_count isn't
-        * negative.
+        * requires to already have an elevated page->_count.
         */
-       VM_BUG_ON(atomic_read(&page->_count) < !PageTail(page));
+       VM_BUG_ON(atomic_read(&page->_count) <= 0);
        atomic_inc(&page->_count);
-       /*
-        * Getting a tail page will elevate both the head and tail
-        * page->_count(s).
-        */
-       if (unlikely(PageTail(page))) {
-               /*
-                * This is safe only because
-                * __split_huge_page_refcount can't run under
-                * get_page().
-                */
-               VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0);
-               atomic_inc(&page->first_page->_count);
-       }
 }
 
 static inline struct page *virt_to_head_page(const void *x)
@@ -802,21 +816,6 @@ static inline pgoff_t page_index(struct page *page)
        return page->index;
 }
 
-/*
- * The atomic page->_mapcount, like _count, starts from -1:
- * so that transitions both from it and to it can be tracked,
- * using atomic_inc_and_test and atomic_add_negative(-1).
- */
-static inline void reset_page_mapcount(struct page *page)
-{
-       atomic_set(&(page)->_mapcount, -1);
-}
-
-static inline int page_mapcount(struct page *page)
-{
-       return atomic_read(&(page)->_mapcount) + 1;
-}
-
 /*
  * Return true if this page is mapped into pagetables.
  */
index 027935c86c688df2315d7ea4862d2c8e233ab8aa..059839c7000da9fa86556d11f4de21e10c7c0602 100644 (file)
@@ -36,10 +36,24 @@ struct page {
                                         * updated asynchronously */
        atomic_t _count;                /* Usage count, see below. */
        union {
-               atomic_t _mapcount;     /* Count of ptes mapped in mms,
-                                        * to show when page is mapped
-                                        * & limit reverse map searches.
-                                        */
+               /*
+                * Count of ptes mapped in
+                * mms, to show when page is
+                * mapped & limit reverse map
+                * searches.
+                *
+                * Used also for tail pages
+                * refcounting instead of
+                * _count. Tail pages cannot
+                * be mapped and keeping the
+                * tail page _count zero at
+                * all times guarantees
+                * get_page_unless_zero() will
+                * never succeed on tail
+                * pages.
+                */
+               atomic_t _mapcount;
+
                struct {                /* SLUB */
                        u16 inuse;
                        u16 objects;
index 397c8338f2abdca01e454e02866c6e34c1884aa8..3b7d4c5001e91c3358fb79c55bcc6682b857f432 100755 (executable)
@@ -279,6 +279,7 @@ struct mmc_host {
 
        unsigned int            sdio_irqs;
        struct task_struct      *sdio_irq_thread;
+       bool                    sdio_irq_pending;
        atomic_t                sdio_irq_thread_abort;
 
        mmc_pm_flag_t           pm_flags;       /* requested pm features */
@@ -353,6 +354,7 @@ extern void mmc_request_done(struct mmc_host *, struct mmc_request *);
 static inline void mmc_signal_sdio_irq(struct mmc_host *host)
 {
        host->ops->enable_sdio_irq(host, 0);
+       host->sdio_irq_pending = true;
        wake_up_process(host->sdio_irq_thread);
 }
 
index eba45ea1029899aaa57da823db1fddacadae9944..82ab16b8cf9bdc38af20f9150d65cae34fffa0ba 100644 (file)
@@ -49,6 +49,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
 #define LOOKUP_FOLLOW          0x0001
 #define LOOKUP_DIRECTORY       0x0002
 #define LOOKUP_CONTINUE                0x0004
+#define LOOKUP_AUTOMOUNT       0x0008
 
 #define LOOKUP_PARENT          0x0010
 #define LOOKUP_REVAL           0x0020
@@ -67,6 +68,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
 #define LOOKUP_EMPTY           0x4000
 
 extern int user_path_at(int, const char __user *, unsigned, struct path *);
+extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
 
 #define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path)
 #define user_lpath(name, path) user_path_at(AT_FDCWD, name, 0, path)
index 33b5968e738187fa9dda5d9dd9af75cb47e26ded..c6d6d486d314d394beff4ef9462ad9c3d0622d06 100644 (file)
@@ -1453,15 +1453,6 @@ static inline bool netdev_uses_dsa_tags(struct net_device *dev)
        return 0;
 }
 
-#ifndef CONFIG_NET_NS
-static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
-{
-       skb->dev = dev;
-}
-#else /* CONFIG_NET_NS */
-void skb_set_dev(struct sk_buff *skb, struct net_device *dev);
-#endif
-
 static inline bool netdev_uses_trailer_tags(struct net_device *dev)
 {
 #ifdef CONFIG_NET_DSA_TAG_TRAILER
index 208ae938733143ce0ba2117378423d443a3d8312..faaa28b3d0613e9ff68618f0fa89db80158517e9 100644 (file)
@@ -4,6 +4,7 @@
  * Header file for Xtables timer target module.
  *
  * Copyright (C) 2004, 2010 Nokia Corporation
+ *
  * Written by Timo Teras <ext-timo.teras@nokia.com>
  *
  * Converted to x_tables and forward-ported to 2.6.34
 #include <linux/types.h>
 
 #define MAX_IDLETIMER_LABEL_SIZE 28
+#define NLMSG_MAX_SIZE 64
+
+#define NL_EVENT_TYPE_INACTIVE 0
+#define NL_EVENT_TYPE_ACTIVE 1
 
 struct idletimer_tg_info {
        __u32 timeout;
 
        char label[MAX_IDLETIMER_LABEL_SIZE];
 
+       /* Use netlink messages for notification in addition to sysfs */
+       __u8 send_nl_msg;
+
        /* for kernel module internal use only */
        struct idletimer_tg *timer __attribute__((aligned(8)));
 };
index a9dd89552f9c61e804249e5b21d2504251925f2f..e8e9cc3d42815e693d7edee81d32c274d29ed98f 100644 (file)
@@ -26,6 +26,7 @@
 #define NETLINK_ECRYPTFS       19
 #define NETLINK_RDMA           20
 
+
 #define MAX_LINKS 32           
 
 struct sockaddr_nl {
index b522370fcc228334636a13d7bd9b856a7050ba37..acdc370086a2ff28b4fa287739e414f056647d67 100644 (file)
@@ -410,6 +410,9 @@ extern const struct inode_operations nfs_file_inode_operations;
 extern const struct inode_operations nfs3_file_inode_operations;
 #endif /* CONFIG_NFS_V3 */
 extern const struct file_operations nfs_file_operations;
+#ifdef CONFIG_NFS_V4
+extern const struct file_operations nfs4_file_operations;
+#endif /* CONFIG_NFS_V4 */
 extern const struct address_space_operations nfs_file_aops;
 extern const struct address_space_operations nfs_dir_aops;
 
index be2eba7725a86aadf4c6a6783ecfe1c1cae00318..0012fc3d2c1bfe3e427ec63aaa5175a61ab0627f 100644 (file)
@@ -1149,6 +1149,7 @@ struct nfs_rpc_ops {
        const struct dentry_operations *dentry_ops;
        const struct inode_operations *dir_inode_ops;
        const struct inode_operations *file_inode_ops;
+       const struct file_operations *file_ops;
 
        int     (*getroot) (struct nfs_server *, struct nfs_fh *,
                            struct nfs_fsinfo *);
index c7ccaae15af60074d4e2da9d8f645b91608e3265..27d2dd9e671730ca8b7c5588b02a8c550229ed8e 100644 (file)
@@ -745,8 +745,14 @@ enum nl80211_commands {
  *
  * @NL80211_ATTR_MAX_NUM_SCAN_SSIDS: number of SSIDs you can scan with
  *     a single scan request, a wiphy attribute.
+ * @NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS: number of SSIDs you can
+ *     scan with a single scheduled scan request, a wiphy attribute.
  * @NL80211_ATTR_MAX_SCAN_IE_LEN: maximum length of information elements
  *     that can be added to a scan request
+ * @NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN: maximum length of information
+ *     elements that can be added to a scheduled scan request
+ * @NL80211_ATTR_MAX_MATCH_SETS: maximum number of sets that can be
+ *     used with @NL80211_ATTR_SCHED_SCAN_MATCH, a wiphy attribute.
  *
  * @NL80211_ATTR_SCAN_FREQUENCIES: nested attribute with frequencies (in MHz)
  * @NL80211_ATTR_SCAN_SSIDS: nested attribute with SSIDs, leave out for passive
@@ -987,6 +993,24 @@ enum nl80211_commands {
 
  * @NL80211_ATTR_SCHED_SCAN_INTERVAL: Interval between scheduled scan
  *     cycles, in msecs.
+
+ * @NL80211_ATTR_SCHED_SCAN_MATCH: Nested attribute with one or more
+ *     sets of attributes to match during scheduled scans.  Only BSSs
+ *     that match any of the sets will be reported.  These are
+ *     pass-thru filter rules.
+ *     For a match to succeed, the BSS must match all attributes of a
+ *     set.  Since not every hardware supports matching all types of
+ *     attributes, there is no guarantee that the reported BSSs are
+ *     fully complying with the match sets and userspace needs to be
+ *     able to ignore them by itself.
+ *     Thus, the implementation is somewhat hardware-dependent, but
+ *     this is only an optimization and the userspace application
+ *     needs to handle all the non-filtered results anyway.
+ *     If the match attributes don't make sense when combined with
+ *     the values passed in @NL80211_ATTR_SCAN_SSIDS (eg. if an SSID
+ *     is included in the probe request, but the match attributes
+ *     will never let it go through), -EINVAL may be returned.
+ *     If ommited, no filtering is done.
  *
  * @NL80211_ATTR_INTERFACE_COMBINATIONS: Nested attribute listing the supported
  *     interface combinations. In each nested item, it contains attributes
@@ -1194,6 +1218,26 @@ enum nl80211_attrs {
        NL80211_ATTR_INTERFACE_COMBINATIONS,
        NL80211_ATTR_SOFTWARE_IFTYPES,
 
+       NL80211_ATTR_REKEY_DATA,
+
+       NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS,
+       NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
+
+       NL80211_ATTR_SCAN_SUPP_RATES,
+
+       NL80211_ATTR_HIDDEN_SSID,
+
+       NL80211_ATTR_IE_PROBE_RESP,
+       NL80211_ATTR_IE_ASSOC_RESP,
+
+       NL80211_ATTR_STA_WME,
+       NL80211_ATTR_SUPPORT_AP_UAPSD,
+
+       NL80211_ATTR_ROAM_SUPPORT,
+
+       NL80211_ATTR_SCHED_SCAN_MATCH,
+       NL80211_ATTR_MAX_MATCH_SETS,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
@@ -1652,6 +1696,26 @@ enum nl80211_reg_rule_attr {
        NL80211_REG_RULE_ATTR_MAX = __NL80211_REG_RULE_ATTR_AFTER_LAST - 1
 };
 
+/**
+ * enum nl80211_sched_scan_match_attr - scheduled scan match attributes
+ * @__NL80211_SCHED_SCAN_MATCH_ATTR_INVALID: attribute number 0 is reserved
+ * @NL80211_SCHED_SCAN_MATCH_ATTR_SSID: SSID to be used for matching,
+ * only report BSS with matching SSID.
+ * @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter
+ *     attribute number currently defined
+ * @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use
+ */
+enum nl80211_sched_scan_match_attr {
+       __NL80211_SCHED_SCAN_MATCH_ATTR_INVALID,
+
+       NL80211_ATTR_SCHED_SCAN_MATCH_SSID,
+
+       /* keep last */
+       __NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST,
+       NL80211_SCHED_SCAN_MATCH_ATTR_MAX =
+               __NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST - 1
+};
+
 /**
  * enum nl80211_reg_rule_flags - regulatory rule flags
  *
index 7cea7b6c14133628c5f543e729852bd3db085b3c..c8320144fe790cc3fb8dc5fe593092412a838bae 100644 (file)
@@ -29,7 +29,7 @@ extern void pcie_aspm_pm_state_change(struct pci_dev *pdev);
 extern void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
 extern void pci_disable_link_state(struct pci_dev *pdev, int state);
 extern void pci_disable_link_state_locked(struct pci_dev *pdev, int state);
-extern void pcie_clear_aspm(void);
+extern void pcie_clear_aspm(struct pci_bus *bus);
 extern void pcie_no_aspm(void);
 #else
 static inline void pcie_aspm_init_link_state(struct pci_dev *pdev)
@@ -47,7 +47,7 @@ static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
 static inline void pci_disable_link_state(struct pci_dev *pdev, int state)
 {
 }
-static inline void pcie_clear_aspm(void)
+static inline void pcie_clear_aspm(struct pci_bus *bus)
 {
 }
 static inline void pcie_no_aspm(void)
index c446b5ca2d38e0e58b650b217bec0f0f90945dd7..ff5970b7a176f213a6564c25ae337c921037491c 100644 (file)
@@ -174,6 +174,8 @@ enum pci_dev_flags {
        PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1,
        /* Device configuration is irrevocably lost if disabled into D3 */
        PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
+       /* Device causes system crash if in D3 during S3 sleep */
+       PCI_DEV_FLAGS_NO_D3_DURING_SLEEP = (__force pci_dev_flags_t) 8,
 };
 
 enum pci_irq_reroute_variant {
index e8840964aca135c023dd2780bd36703197ea936b..dad7d9a4abce46eccc51415a2a986930762af63f 100644 (file)
 #define  PCI_EXP_TYPE_DOWNSTREAM 0x6   /* Downstream Port */
 #define  PCI_EXP_TYPE_PCI_BRIDGE 0x7   /* PCI/PCI-X Bridge */
 #define  PCI_EXP_TYPE_RC_END   0x9     /* Root Complex Integrated Endpoint */
-#define  PCI_EXP_TYPE_RC_EC    0x10    /* Root Complex Event Collector */
+#define  PCI_EXP_TYPE_RC_EC    0x    /* Root Complex Event Collector */
 #define PCI_EXP_FLAGS_SLOT     0x0100  /* Slot implemented */
 #define PCI_EXP_FLAGS_IRQ      0x3e00  /* Interrupt message number */
 #define PCI_EXP_DEVCAP         4       /* Device capabilities */
index 7da5fa845959ab3dc9c2a393d7336a318c113aeb..4d3f63ac242ad61f473db4a7d1150f6ac1755a9d 100644 (file)
@@ -418,7 +418,7 @@ struct phy_driver {
 
        /*
         * Requests a Tx timestamp for 'skb'. The phy driver promises
-        * to deliver it to the socket's error queue as soon as a
+        * to deliver it using skb_complete_tx_timestamp() as soon as a
         * timestamp becomes available. One of the PTP_CLASS_ values
         * is passed in 'type'.
         */
index 77257c92155aa4efb6ea68d9c99298426aedc60b..0072a5366e97f581fadb2d99840177534c5dfae8 100644 (file)
@@ -8,6 +8,7 @@
 #define PIPE_BUF_FLAG_LRU      0x01    /* page is on the LRU */
 #define PIPE_BUF_FLAG_ATOMIC   0x02    /* was atomically mapped */
 #define PIPE_BUF_FLAG_GIFT     0x04    /* page is a gift */
+#define PIPE_BUF_FLAG_PACKET   0x08    /* read() as a packet */
 
 /**
  *     struct pipe_buffer - a linux kernel pipe buffer
index cf793bbbd05e18e6d87e98dd1e3e4f9df767d8d6..22653d7c3f8de85cdf3b8041f038637b82c70986 100644 (file)
@@ -81,7 +81,11 @@ void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
  * Limit the time part in order to ensure there are some bits left for the
  * cycle counter and fraction multiply.
  */
+#if BITS_PER_LONG == 32
 #define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
+#else
+#define PROP_MAX_SHIFT (BITS_PER_LONG/2)
+#endif
 
 #define PROP_FRAC_SHIFT                (BITS_PER_LONG - PROP_MAX_SHIFT - 1)
 #define PROP_FRAC_BASE         (1UL << PROP_FRAC_SHIFT)
index 8abee65562230fe05174d2ded6bc8ec50249348b..686f37327a4949f50ad3d02b888f8f445f291ece 100644 (file)
@@ -335,8 +335,11 @@ static inline int copy_regset_to_user(struct task_struct *target,
 {
        const struct user_regset *regset = &view->regsets[setno];
 
+       if (!regset->get)
+               return -EOPNOTSUPP;
+
        if (!access_ok(VERIFY_WRITE, data, size))
-               return -EIO;
+               return -EFAULT;
 
        return regset->get(target, regset, offset, size, NULL, data);
 }
@@ -358,8 +361,11 @@ static inline int copy_regset_from_user(struct task_struct *target,
 {
        const struct user_regset *regset = &view->regsets[setno];
 
+       if (!regset->set)
+               return -EOPNOTSUPP;
+
        if (!access_ok(VERIFY_READ, data, size))
-               return -EIO;
+               return -EFAULT;
 
        return regset->set(target, regset, offset, size, NULL, data);
 }
index c6db9fb33c448f28197ffb6d135689daf58625b6..bb1fac5b8ee87f99def8e3565c40ac18f4e930e9 100644 (file)
@@ -141,7 +141,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
        unsigned ret;
 
 repeat:
-       ret = s->sequence;
+       ret = ACCESS_ONCE(s->sequence);
        if (unlikely(ret & 1)) {
                cpu_relax();
                goto repeat;
index e2accb3164d8d969245e4567b0f308d4a35cb152..d0de882c0d96d5277f23306bf1d2884c40807292 100644 (file)
@@ -24,7 +24,7 @@ struct sigma_firmware {
 struct sigma_firmware_header {
        unsigned char magic[7];
        u8 version;
-       u32 crc;
+       __le32 crc;
 };
 
 enum {
@@ -40,19 +40,14 @@ enum {
 struct sigma_action {
        u8 instr;
        u8 len_hi;
-       u16 len;
-       u16 addr;
+       __le16 len;
+       __be16 addr;
        unsigned char payload[];
 };
 
 static inline u32 sigma_action_len(struct sigma_action *sa)
 {
-       return (sa->len_hi << 16) | sa->len;
-}
-
-static inline size_t sigma_action_size(struct sigma_action *sa, u32 payload_len)
-{
-       return sizeof(*sa) + payload_len + (payload_len % 2);
+       return (sa->len_hi << 16) | le16_to_cpu(sa->len);
 }
 
 extern int process_sigma_firmware(struct i2c_client *client, const char *name);
index 3ff4961da9b514992cf0edabf07c1b08d2121027..247399b2979a9a331c976e9f413e4dd122ef7711 100644 (file)
@@ -61,13 +61,16 @@ static inline void signalfd_notify(struct task_struct *tsk, int sig)
                wake_up(&tsk->sighand->signalfd_wqh);
 }
 
+extern void signalfd_cleanup(struct sighand_struct *sighand);
+
 #else /* CONFIG_SIGNALFD */
 
 static inline void signalfd_notify(struct task_struct *tsk, int sig) { }
 
+static inline void signalfd_cleanup(struct sighand_struct *sighand) { }
+
 #endif /* CONFIG_SIGNALFD */
 
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_SIGNALFD_H */
-
index c0a4f3ab0cc047490eb9eaf20d9cf577cc5c2d65..37b643bd680e20b88445ea3e660d954709953d72 100644 (file)
@@ -1370,6 +1370,16 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
 }
 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
 
+static inline void skb_mac_header_rebuild(struct sk_buff *skb)
+{
+       if (skb_mac_header_was_set(skb)) {
+               const unsigned char *old_mac = skb_mac_header(skb);
+
+               skb_set_mac_header(skb, -skb->mac_len);
+               memmove(skb_mac_header(skb), old_mac, skb->mac_len);
+       }
+}
+
 static inline int skb_checksum_start_offset(const struct sk_buff *skb)
 {
        return skb->csum_start - skb_headroom(skb);
@@ -1623,8 +1633,6 @@ static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
 {
        int delta = 0;
 
-       if (headroom < NET_SKB_PAD)
-               headroom = NET_SKB_PAD;
        if (headroom > skb_headroom(skb))
                delta = headroom - skb_headroom(skb);
 
@@ -1996,8 +2004,13 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
 /**
  * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
  *
+ * PHY drivers may accept clones of transmitted packets for
+ * timestamping via their phy_driver.txtstamp method. These drivers
+ * must call this function to return the skb back to the stack, with
+ * or without a timestamp.
+ *
  * @skb: clone of the the original outgoing packet
- * @hwtstamps: hardware time stamps
+ * @hwtstamps: hardware time stamps, may be NULL if not available
  *
  */
 void skb_complete_tx_timestamp(struct sk_buff *skb,
index 4ef98e422fde2c13169b851670dbc9d511e25088..635c2136ce3ec2be06d6ef7a1e9e03ce58ce0791 100644 (file)
@@ -261,7 +261,7 @@ struct ucred {
 #define MSG_NOSIGNAL   0x4000  /* Do not generate SIGPIPE */
 #define MSG_MORE       0x8000  /* Sender will send more */
 #define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
-
+#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
 #define MSG_EOF         MSG_FIN
 
 #define MSG_CMSG_CLOEXEC 0x40000000    /* Set close_on_exit for file
index 85c50b40759de9df4528077a64b6346a472f9dfb..c84e9741cb2a25471838c2c31503b8d550c8bfbf 100644 (file)
@@ -34,7 +34,7 @@ struct svc_sock {
 /*
  * Function prototypes.
  */
-void           svc_close_all(struct list_head *);
+void           svc_close_all(struct svc_serv *);
 int            svc_recv(struct svc_rqst *, long);
 int            svc_send(struct svc_rqst *);
 void           svc_drop(struct svc_rqst *);
diff --git a/include/linux/sw_sync.h b/include/linux/sw_sync.h
new file mode 100644 (file)
index 0000000..bd6f208
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * include/linux/sw_sync.h
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SW_SYNC_H
+#define _LINUX_SW_SYNC_H
+
+#include <linux/types.h>
+
+#ifdef __KERNEL__
+
+#include <linux/sync.h>
+
+struct sw_sync_timeline {
+       struct  sync_timeline   obj;
+
+       u32                     value;
+};
+
+struct sw_sync_pt {
+       struct sync_pt          pt;
+
+       u32                     value;
+};
+
+struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
+void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
+
+struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
+
+#endif /* __KERNEL __ */
+
+struct sw_sync_create_fence_data {
+       __u32   value;
+       char    name[32];
+       __s32   fence; /* fd of new fence */
+};
+
+#define SW_SYNC_IOC_MAGIC      'W'
+
+#define SW_SYNC_IOC_CREATE_FENCE       _IOWR(SW_SYNC_IOC_MAGIC, 0,\
+               struct sw_sync_create_fence_data)
+#define SW_SYNC_IOC_INC                        _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+
+#endif /* _LINUX_SW_SYNC_H */
diff --git a/include/linux/sync.h b/include/linux/sync.h
new file mode 100644 (file)
index 0000000..4f19938
--- /dev/null
@@ -0,0 +1,390 @@
+/*
+ * include/linux/sync.h
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SYNC_H
+#define _LINUX_SYNC_H
+
+#include <linux/types.h>
+#ifdef __KERNEL__
+
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+struct sync_timeline;
+struct sync_pt;
+struct sync_fence;
+
+/**
+ * struct sync_timeline_ops - sync object implementation ops
+ * @driver_name:       name of the implentation
+ * @dup:               duplicate a sync_pt
+ * @has_signaled:      returns:
+ *                       1 if pt has signaled
+ *                       0 if pt has not signaled
+ *                      <0 on error
+ * @compare:           returns:
+ *                       1 if b will signal before a
+ *                       0 if a and b will signal at the same time
+ *                      -1 if a will signabl before b
+ * @free_pt:           called before sync_pt is freed
+ * @release_obj:       called before sync_timeline is freed
+ * @print_obj:         print aditional debug information about sync_timeline.
+ *                       should not print a newline
+ * @print_pt:          print aditional debug information about sync_pt.
+ *                       should not print a newline
+ * @fill_driver_data:  write implmentation specific driver data to data.
+ *                       should return an error if there is not enough room
+ *                       as specified by size.  This information is returned
+ *                       to userspace by SYNC_IOC_FENCE_INFO.
+ */
+struct sync_timeline_ops {
+       const char *driver_name;
+
+       /* required */
+       struct sync_pt *(*dup)(struct sync_pt *pt);
+
+       /* required */
+       int (*has_signaled)(struct sync_pt *pt);
+
+       /* required */
+       int (*compare)(struct sync_pt *a, struct sync_pt *b);
+
+       /* optional */
+       void (*free_pt)(struct sync_pt *sync_pt);
+
+       /* optional */
+       void (*release_obj)(struct sync_timeline *sync_timeline);
+
+       /* optional */
+       void (*print_obj)(struct seq_file *s,
+                         struct sync_timeline *sync_timeline);
+
+       /* optional */
+       void (*print_pt)(struct seq_file *s, struct sync_pt *sync_pt);
+
+       /* optional */
+       int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size);
+};
+
+/**
+ * struct sync_timeline - sync object
+ * @ops:               ops that define the implementaiton of the sync_timeline
+ * @name:              name of the sync_timeline. Useful for debugging
+ * @destoryed:         set when sync_timeline is destroyed
+ * @child_list_head:   list of children sync_pts for this sync_timeline
+ * @child_list_lock:   lock protecting @child_list_head, destroyed, and
+ *                       sync_pt.status
+ * @active_list_head:  list of active (unsignaled/errored) sync_pts
+ * @sync_timeline_list:        membership in global sync_timeline_list
+ */
+struct sync_timeline {
+       const struct sync_timeline_ops  *ops;
+       char                    name[32];
+
+       /* protected by child_list_lock */
+       bool                    destroyed;
+
+       struct list_head        child_list_head;
+       spinlock_t              child_list_lock;
+
+       struct list_head        active_list_head;
+       spinlock_t              active_list_lock;
+
+       struct list_head        sync_timeline_list;
+};
+
+/**
+ * struct sync_pt - sync point
+ * @parent:            sync_timeline to which this sync_pt belongs
+ * @child_list:                membership in sync_timeline.child_list_head
+ * @active_list:       membership in sync_timeline.active_list_head
+ * @fence:             sync_fence to which the sync_pt belongs
+ * @pt_list:           membership in sync_fence.pt_list_head
+ * @status:            1: signaled, 0:active, <0: error
+ * @timestamp:         time which sync_pt status transitioned from active to
+ *                       singaled or error.
+ */
+struct sync_pt {
+       struct sync_timeline            *parent;
+       struct list_head        child_list;
+
+       struct list_head        active_list;
+
+       struct sync_fence       *fence;
+       struct list_head        pt_list;
+
+       /* protected by parent->active_list_lock */
+       int                     status;
+
+       ktime_t                 timestamp;
+};
+
+/**
+ * struct sync_fence - sync fence
+ * @file:              file representing this fence
+ * @name:              name of sync_fence.  Useful for debugging
+ * @pt_list_head:      list of sync_pts in ths fence.  immutable once fence
+ *                       is created
+ * @waiter_list_head:  list of asynchronous waiters on this fence
+ * @waiter_list_lock:  lock protecting @waiter_list_head and @status
+ * @status:            1: signaled, 0:active, <0: error
+ *
+ * @wq:                        wait queue for fence signaling
+ * @sync_fence_list:   membership in global fence list
+ */
+struct sync_fence {
+       struct file             *file;
+       char                    name[32];
+
+       /* this list is immutable once the fence is created */
+       struct list_head        pt_list_head;
+
+       struct list_head        waiter_list_head;
+       spinlock_t              waiter_list_lock; /* also protects status */
+       int                     status;
+
+       wait_queue_head_t       wq;
+
+       struct list_head        sync_fence_list;
+};
+
+/**
+ * struct sync_fence_waiter - metadata for asynchronous waiter on a fence
+ * @waiter_list:       membership in sync_fence.waiter_list_head
+ * @callback:          function pointer to call when fence signals
+ * @callback_data:     pointer to pass to @callback
+ */
+struct sync_fence_waiter {
+       struct list_head        waiter_list;
+
+       void (*callback)(struct sync_fence *fence, void *data);
+       void *callback_data;
+};
+
+/*
+ * API for sync_timeline implementers
+ */
+
+/**
+ * sync_timeline_create() - creates a sync object
+ * @ops:       specifies the implemention ops for the object
+ * @size:      size to allocate for this obj
+ * @name:      sync_timeline name
+ *
+ * Creates a new sync_timeline which will use the implemetation specified by
+ * @ops.  @size bytes will be allocated allowing for implemntation specific
+ * data to be kept after the generic sync_timeline stuct.
+ */
+struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
+                                          int size, const char *name);
+
+/**
+ * sync_timeline_destory() - destorys a sync object
+ * @obj:       sync_timeline to destroy
+ *
+ * A sync implemntation should call this when the @obj is going away
+ * (i.e. module unload.)  @obj won't actually be freed until all its childern
+ * sync_pts are freed.
+ */
+void sync_timeline_destroy(struct sync_timeline *obj);
+
+/**
+ * sync_timeline_signal() - signal a status change on a sync_timeline
+ * @obj:       sync_timeline to signal
+ *
+ * A sync implemntation should call this any time one of it's sync_pts
+ * has signaled or has an error condition.
+ */
+void sync_timeline_signal(struct sync_timeline *obj);
+
+/**
+ * sync_pt_create() - creates a sync pt
+ * @parent:    sync_pt's parent sync_timeline
+ * @size:      size to allocate for this pt
+ *
+ * Creates a new sync_pt as a chiled of @parent.  @size bytes will be
+ * allocated allowing for implemntation specific data to be kept after
+ * the generic sync_timeline struct.
+ */
+struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size);
+
+/**
+ * sync_pt_free() - frees a sync pt
+ * @pt:                sync_pt to free
+ *
+ * This should only be called on sync_pts which have been created but
+ * not added to a fence.
+ */
+void sync_pt_free(struct sync_pt *pt);
+
+/**
+ * sync_fence_create() - creates a sync fence
+ * @name:      name of fence to create
+ * @pt:                sync_pt to add to the fence
+ *
+ * Creates a fence containg @pt.  Once this is called, the fence takes
+ * ownership of @pt.
+ */
+struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt);
+
+/*
+ * API for sync_fence consumers
+ */
+
+/**
+ * sync_fence_merge() - merge two fences
+ * @name:      name of new fence
+ * @a:         fence a
+ * @b:         fence b
+ *
+ * Creates a new fence which contains copies of all the sync_pts in both
+ * @a and @b.  @a and @b remain valid, independent fences.
+ */
+struct sync_fence *sync_fence_merge(const char *name,
+                                   struct sync_fence *a, struct sync_fence *b);
+
+/**
+ * sync_fence_fdget() - get a fence from an fd
+ * @fd:                fd referencing a fence
+ *
+ * Ensures @fd references a valid fence, increments the refcount of the backing
+ * file, and returns the fence.
+ */
+struct sync_fence *sync_fence_fdget(int fd);
+
+/**
+ * sync_fence_put() - puts a refernnce of a sync fence
+ * @fence:     fence to put
+ *
+ * Puts a reference on @fence.  If this is the last reference, the fence and
+ * all it's sync_pts will be freed
+ */
+void sync_fence_put(struct sync_fence *fence);
+
+/**
+ * sync_fence_install() - installs a fence into a file descriptor
+ * @fence:     fence to instal
+ * @fd:                file descriptor in which to install the fence
+ *
+ * Installs @fence into @fd.  @fd's should be acquired through get_unused_fd().
+ */
+void sync_fence_install(struct sync_fence *fence, int fd);
+
+/**
+ * sync_fence_wait_async() - registers and async wait on the fence
+ * @fence:             fence to wait on
+ * @callback:          callback
+ * @callback_data      data to pass to the callback
+ *
+ * Returns 1 if @fence has already signaled.
+ *
+ * Registers a callback to be called when @fence signals or has an error
+ */
+int sync_fence_wait_async(struct sync_fence *fence,
+                         void (*callback)(struct sync_fence *, void *data),
+                         void *callback_data);
+
+/**
+ * sync_fence_wait() - wait on fence
+ * @fence:     fence to wait on
+ * @tiemout:   timeout in ms
+ *
+ * Wait for @fence to be signaled or have an error.  Waits indefintly
+ * if @timeout = 0
+ */
+int sync_fence_wait(struct sync_fence *fence, long timeout);
+
+#endif /* __KERNEL__ */
+
+/**
+ * struct sync_merge_data - data passed to merge ioctl
+ * @fd2:       file descriptor of second fence
+ * @name:      name of new fence
+ * @fence:     returns the fd of the new fence to userspace
+ */
+struct sync_merge_data {
+       __s32   fd2; /* fd of second fence */
+       char    name[32]; /* name of new fence */
+       __s32   fence; /* fd on newly created fence */
+};
+
+/**
+ * struct sync_pt_info - detailed sync_pt information
+ * @len:               length of sync_pt_info including any driver_data
+ * @obj_name:          name of parent sync_timeline
+ * @driver_name:       name of driver implmenting the parent
+ * @status:            status of the sync_pt 0:active 1:signaled <0:error
+ * @timestamp_ns:      timestamp of status change in nanoseconds
+ * @driver_data:       any driver dependant data
+ */
+struct sync_pt_info {
+       __u32   len;
+       char    obj_name[32];
+       char    driver_name[32];
+       __s32   status;
+       __u64   timestamp_ns;
+
+       __u8    driver_data[0];
+};
+
+/**
+ * struct sync_fence_info_data - data returned from fence info ioctl
+ * @len:       ioctl caller writes the size of the buffer its passing in.
+ *             ioctl returns length of sync_fence_data reutnred to userspace
+ *             including pt_info.
+ * @name:      name of fence
+ * @status:    status of fence. 1: signaled 0:active <0:error
+ * @pt_info:   a sync_pt_info struct for every sync_pt in the fence
+ */
+struct sync_fence_info_data {
+       __u32   len;
+       char    name[32];
+       __s32   status;
+
+       __u8    pt_info[0];
+};
+
+#define SYNC_IOC_MAGIC         '>'
+
+/**
+ * DOC: SYNC_IOC_WAIT - wait for a fence to signal
+ *
+ * pass timeout in milliseconds.
+ */
+#define SYNC_IOC_WAIT          _IOW(SYNC_IOC_MAGIC, 0, __u32)
+
+/**
+ * DOC: SYNC_IOC_MERGE - merge two fences
+ *
+ * Takes a struct sync_merge_data.  Creates a new fence containing copies of
+ * the sync_pts in both the calling fd and sync_merge_data.fd2.  Returns the
+ * new fence's fd in sync_merge_data.fence
+ */
+#define SYNC_IOC_MERGE         _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
+
+/**
+ * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
+ *
+ * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
+ * Caller should write the size of the buffer into len.  On return, len is
+ * updated to reflect the total size of the sync_fence_info_data including
+ * pt_info.
+ *
+ * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
+ * To itterate over the sync_pt_infos, use the sync_pt_info.len field.
+ */
+#define SYNC_IOC_FENCE_INFO    _IOWR(SYNC_IOC_MAGIC, 2,\
+       struct sync_fence_info_data)
+
+#endif /* _LINUX_SYNC_H */
index 6660c41949ba7d0d120b4cf67928a713e914515d..1ff6b62fb697af52cb247ff5a7e6516f5d5b448e 100644 (file)
@@ -472,7 +472,9 @@ extern void proc_clear_tty(struct task_struct *p);
 extern struct tty_struct *get_current_tty(void);
 extern void tty_default_fops(struct file_operations *fops);
 extern struct tty_struct *alloc_tty_struct(void);
-extern int tty_add_file(struct tty_struct *tty, struct file *file);
+extern int tty_alloc_file(struct file *file);
+extern void tty_add_file(struct tty_struct *tty, struct file *file);
+extern void tty_free_file(struct file *file);
 extern void free_tty_struct(struct tty_struct *tty);
 extern void initialize_tty_struct(struct tty_struct *tty,
                struct tty_driver *driver, int idx);
index 73c7df4896074219ad76daee5e9eb0ba6e1a1976..b08e04cf202b44b23be1b21791b11a65169cb18c 100644 (file)
@@ -1202,6 +1202,7 @@ struct urb {
        void *transfer_buffer;          /* (in) associated data buffer */
        dma_addr_t transfer_dma;        /* (in) dma addr for transfer_buffer */
        struct scatterlist *sg;         /* (in) scatter gather buffer list */
+       int num_mapped_sgs;             /* (internal) mapped sg entries */
        int num_sgs;                    /* (in) number of entries in the sg list */
        u32 transfer_buffer_length;     /* (in) data buffer length */
        u32 actual_length;              /* (return) actual transfer length */
index 4ebaf0824179909bbff03ab5bae516c6f7e05b33..1eb735b53fc4f0e6410b33a5b65e38e61bb3e6a2 100644 (file)
 #define USB_PORT_FEAT_TEST              21
 #define USB_PORT_FEAT_INDICATOR         22
 #define USB_PORT_FEAT_C_PORT_L1         23
-#define USB_PORT_FEAT_C_PORT_LINK_STATE        25
-#define USB_PORT_FEAT_C_PORT_CONFIG_ERROR 26
-#define USB_PORT_FEAT_PORT_REMOTE_WAKE_MASK 27
-#define USB_PORT_FEAT_BH_PORT_RESET     28
-#define USB_PORT_FEAT_C_BH_PORT_RESET   29
-#define USB_PORT_FEAT_FORCE_LINKPM_ACCEPT 30
 
 /*
  * Port feature selectors added by USB 3.0 spec.
@@ -76,8 +70,8 @@
 #define USB_PORT_FEAT_LINK_STATE               5
 #define USB_PORT_FEAT_U1_TIMEOUT               23
 #define USB_PORT_FEAT_U2_TIMEOUT               24
-#define USB_PORT_FEAT_C_LINK_STATE             25
-#define USB_PORT_FEAT_C_CONFIG_ERR             26
+#define USB_PORT_FEAT_C_PORT_LINK_STATE                25
+#define USB_PORT_FEAT_C_PORT_CONFIG_ERROR      26
 #define USB_PORT_FEAT_REMOTE_WAKE_MASK         27
 #define USB_PORT_FEAT_BH_PORT_RESET            28
 #define USB_PORT_FEAT_C_BH_PORT_RESET          29
index 0fd3fbdd8283fe1004698ed22f8860eb3abecec9..cf65b5cff72268fe2820df6b1ed54264dbdf12fe 100644 (file)
@@ -583,8 +583,26 @@ struct usb_ss_ep_comp_descriptor {
 } __attribute__ ((packed));
 
 #define USB_DT_SS_EP_COMP_SIZE         6
+
 /* Bits 4:0 of bmAttributes if this is a bulk endpoint */
-#define USB_SS_MAX_STREAMS(p)          (1 << ((p) & 0x1f))
+static inline int
+usb_ss_max_streams(const struct usb_ss_ep_comp_descriptor *comp)
+{
+       int             max_streams;
+
+       if (!comp)
+               return 0;
+
+       max_streams = comp->bmAttributes & 0x1f;
+
+       if (!max_streams)
+               return 0;
+
+       max_streams = 1 << max_streams;
+
+       return max_streams;
+}
+
 /* Bits 1:0 of bmAttributes if this is an isoc endpoint */
 #define USB_SS_MULT(p)                 (1 + ((p) & 0x3))
 
index 5b2dcf9728e1065d3ef0eb1089a6794656286ec3..61ebe0aabc5ba44beca1eae91e9ac774c99ca77d 100644 (file)
 #define ACCESSORY_STRING_URI            4
 #define ACCESSORY_STRING_SERIAL         5
 
-/* Control request for retrieving device's protocol version (currently 1)
+/* Control request for retrieving device's protocol version
  *
  *     requestType:    USB_DIR_IN | USB_TYPE_VENDOR
  *     request:        ACCESSORY_GET_PROTOCOL
  *     value:          0
  *     index:          0
  *     data            version number (16 bits little endian)
+ *                     1 for original accessory support
+ *                     2 adds HID and device to host audio support
  */
 #define ACCESSORY_GET_PROTOCOL  51
 
  */
 #define ACCESSORY_START         53
 
+/* Control request for registering a HID device.
+ * Upon registering, a unique ID is sent by the accessory in the
+ * value parameter. This ID will be used for future commands for
+ * the device
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_REGISTER_HID_DEVICE
+ *     value:          Accessory assigned ID for the HID device
+ *     index:          total length of the HID report descriptor
+ *     data            none
+ */
+#define ACCESSORY_REGISTER_HID         54
+
+/* Control request for unregistering a HID device.
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_REGISTER_HID
+ *     value:          Accessory assigned ID for the HID device
+ *     index:          0
+ *     data            none
+ */
+#define ACCESSORY_UNREGISTER_HID         55
+
+/* Control request for sending the HID report descriptor.
+ * If the HID descriptor is longer than the endpoint zero max packet size,
+ * the descriptor will be sent in multiple ACCESSORY_SET_HID_REPORT_DESC
+ * commands. The data for the descriptor must be sent sequentially
+ * if multiple packets are needed.
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_SET_HID_REPORT_DESC
+ *     value:          Accessory assigned ID for the HID device
+ *     index:          offset of data in descriptor
+ *                      (needed when HID descriptor is too big for one packet)
+ *     data            the HID report descriptor
+ */
+#define ACCESSORY_SET_HID_REPORT_DESC         56
+
+/* Control request for sending HID events.
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_SEND_HID_EVENT
+ *     value:          Accessory assigned ID for the HID device
+ *     index:          0
+ *     data            the HID report for the event
+ */
+#define ACCESSORY_SEND_HID_EVENT         57
+
+/* Control request for setting the audio mode.
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_SET_AUDIO_MODE
+ *     value:          0 - no audio
+ *                     1 - device to host, 44100 16-bit stereo PCM
+ *     index:          0
+ *     data            none
+ */
+#define ACCESSORY_SET_AUDIO_MODE         58
+
 /* ioctls for retrieving strings set by the host */
 #define ACCESSORY_GET_STRING_MANUFACTURER   _IOW('M', 1, char[256])
 #define ACCESSORY_GET_STRING_MODEL          _IOW('M', 2, char[256])
 #define ACCESSORY_GET_STRING_SERIAL         _IOW('M', 6, char[256])
 /* returns 1 if there is a start request pending */
 #define ACCESSORY_IS_START_REQUESTED        _IO('M', 7)
+/* returns audio mode (set via the ACCESSORY_SET_AUDIO_MODE control request) */
+#define ACCESSORY_GET_AUDIO_MODE            _IO('M', 8)
 
 #endif /* __LINUX_USB_F_ACCESSORY_H */
index 0097136ba45da9a5a27fffb89df66c2aa8743302..c0ecc5a2ef9e35f68bdb031f2949099325ea6491 100644 (file)
@@ -178,7 +178,7 @@ struct usb_hcd {
         * this structure.
         */
        unsigned long hcd_priv[0]
-                       __attribute__ ((aligned(sizeof(unsigned long))));
+                       __attribute__ ((aligned(sizeof(s64))));
 };
 
 /* 2.4 does this a bit differently ... */
index 5bfdd1b2f0f1f1aba841c6dcef1a71eb639a214b..11d198b4f3726099368108f8997ec0114c506aa8 100644 (file)
@@ -191,7 +191,8 @@ extern void usbnet_cdc_status(struct usbnet *, struct urb *);
 enum skb_state {
        illegal = 0,
        tx_start, tx_done,
-       rx_start, rx_done, rx_cleanup
+       rx_start, rx_done, rx_cleanup,
+       unlink_start
 };
 
 struct skb_data {      /* skb->cb is one of these */
index bd8478566a705e413c2d488624bcf017115c7287..a1a3296a720a5cfa353ffb2848be74614669acff 100644 (file)
@@ -1075,6 +1075,7 @@ struct v4l2_querymenu {
 #define V4L2_CTRL_FLAG_NEXT_CTRL       0x80000000
 
 /*  User-class control IDs defined by V4L2 */
+#define V4L2_CID_MAX_CTRLS             1024
 #define V4L2_CID_BASE                  (V4L2_CTRL_CLASS_USER | 0x900)
 #define V4L2_CID_USER_BASE             V4L2_CID_BASE
 /*  IDs reserved for driver specific controls */
index 9332e52ea8c270aadacb79b61446336202e1fea6..687fb11e20107d9c22467e1923d5e736bde16d3e 100644 (file)
@@ -13,6 +13,7 @@ struct vm_area_struct;                /* vma defining user mapping in mm_types.h */
 #define VM_MAP         0x00000004      /* vmap()ed pages */
 #define VM_USERMAP     0x00000008      /* suitable for remap_vmalloc_range */
 #define VM_VPAGES      0x00000010      /* buffer for pages was vmalloc'ed */
+#define VM_UNLIST      0x00000020      /* vm_struct is not listed in vmlist */
 /* bits [20..32] reserved for arch specific ioremap internals */
 
 /*
index f584aba78ca9a8d1add3b73797811b5a34071357..6c56a14a70d973002cc0b0449611014d9962936d 100644 (file)
@@ -289,12 +289,16 @@ enum {
  *
  * system_freezable_wq is equivalent to system_wq except that it's
  * freezable.
+ *
+ * system_nrt_freezable_wq is equivalent to system_nrt_wq except that
+ * it's freezable.
  */
 extern struct workqueue_struct *system_wq;
 extern struct workqueue_struct *system_long_wq;
 extern struct workqueue_struct *system_nrt_wq;
 extern struct workqueue_struct *system_unbound_wq;
 extern struct workqueue_struct *system_freezable_wq;
+extern struct workqueue_struct *system_nrt_freezable_wq;
 
 extern struct workqueue_struct *
 __alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
index 91f0568a04ef2b85be4aa417abd7a1eb1ae1055d..fb0eb9048b192fc5539ab60c433c2c274f50ca4f 100644 (file)
@@ -16,6 +16,7 @@ extern void     arp_send(int type, int ptype, __be32 dest_ip,
                         const unsigned char *dest_hw,
                         const unsigned char *src_hw, const unsigned char *th);
 extern int     arp_bind_neighbour(struct dst_entry *dst);
+extern struct neighbour *__arp_bind_neighbour(struct dst_entry *dst, __be32 nexthop);
 extern int     arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir);
 extern void    arp_ifdown(struct net_device *dev);
 
index 0489b8b90fe7503ea5aeb3e79050d6a4d5fae27e..a2049c618022f32268af423821e5b3d4b28463f3 100644 (file)
@@ -84,6 +84,7 @@ enum {
        HCI_SERVICE_CACHE,
        HCI_LINK_KEYS,
        HCI_DEBUG_KEYS,
+       HCI_UNREGISTER,
 
        HCI_RESET,
 };
index 396e8fc8910e5901a8ae7e3b0c976fb3435b7e2b..a2a97f34b9cbcc5d112bd339766b54b8a690267f 100644 (file)
@@ -426,6 +426,7 @@ struct station_parameters {
  * @STATION_INFO_RX_BITRATE: @rxrate fields are filled
  * @STATION_INFO_BSS_PARAM: @bss_param filled
  * @STATION_INFO_CONNECTED_TIME: @connected_time filled
+ * @STATION_INFO_ASSOC_REQ_IES: @assoc_req_ies filled
  */
 enum station_info_flags {
        STATION_INFO_INACTIVE_TIME      = 1<<0,
@@ -444,7 +445,8 @@ enum station_info_flags {
        STATION_INFO_SIGNAL_AVG         = 1<<13,
        STATION_INFO_RX_BITRATE         = 1<<14,
        STATION_INFO_BSS_PARAM          = 1<<15,
-       STATION_INFO_CONNECTED_TIME     = 1<<16
+       STATION_INFO_CONNECTED_TIME     = 1<<16,
+       STATION_INFO_ASSOC_REQ_IES      = 1<<17
 };
 
 /**
@@ -536,6 +538,11 @@ struct sta_bss_parameters {
  *     This number should increase every time the list of stations
  *     changes, i.e. when a station is added or removed, so that
  *     userspace can tell whether it got a consistent snapshot.
+ * @assoc_req_ies: IEs from (Re)Association Request.
+ *     This is used only when in AP mode with drivers that do not use
+ *     user space MLME/SME implementation. The information is provided for
+ *     the cfg80211_new_sta() calls to notify user space of the IEs.
+ * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets.
  */
 struct station_info {
        u32 filled;
@@ -558,6 +565,14 @@ struct station_info {
        struct sta_bss_parameters bss_param;
 
        int generation;
+
+       const u8 *assoc_req_ies;
+       size_t assoc_req_ies_len;
+
+       /*
+        * Note: Add a new enum station_info_flags value for each new field and
+        * use it to check which fields are initialized.
+        */
 };
 
 /**
@@ -797,6 +812,15 @@ struct cfg80211_scan_request {
        struct ieee80211_channel *channels[0];
 };
 
+/**
+ * struct cfg80211_match_set - sets of attributes to match
+ *
+ * @ssid: SSID to be matched
+ */
+struct cfg80211_match_set {
+       struct cfg80211_ssid ssid;
+};
+
 /**
  * struct cfg80211_sched_scan_request - scheduled scan request description
  *
@@ -806,6 +830,11 @@ struct cfg80211_scan_request {
  * @interval: interval between each scheduled scan cycle
  * @ie: optional information element(s) to add into Probe Request or %NULL
  * @ie_len: length of ie in octets
+ * @match_sets: sets of parameters to be matched for a scan result
+ *     entry to be considered valid and to be passed to the host
+ *     (others are filtered out).
+ *     If ommited, all results are passed.
+ * @n_match_sets: number of match sets
  * @wiphy: the wiphy this was for
  * @dev: the interface
  * @channels: channels to scan
@@ -817,6 +846,8 @@ struct cfg80211_sched_scan_request {
        u32 interval;
        const u8 *ie;
        size_t ie_len;
+       struct cfg80211_match_set *match_sets;
+       int n_match_sets;
 
        /* internal */
        struct wiphy *wiphy;
@@ -1716,9 +1747,16 @@ struct wiphy_wowlan_support {
  *     this variable determines its size
  * @max_scan_ssids: maximum number of SSIDs the device can scan for in
  *     any given scan
+ * @max_sched_scan_ssids: maximum number of SSIDs the device can scan
+ *     for in any given scheduled scan
+ * @max_match_sets: maximum number of match sets the device can handle
+ *     when performing a scheduled scan, 0 if filtering is not
+ *     supported.
  * @max_scan_ie_len: maximum length of user-controlled IEs device can
  *     add to probe request frames transmitted during a scan, must not
  *     include fixed IEs like supported rates
+ * @max_sched_scan_ie_len: same as max_scan_ie_len, but for scheduled
+ *     scans
  * @coverage_class: current coverage class
  * @fw_version: firmware version for ethtool reporting
  * @hw_version: hardware version for ethtool reporting
@@ -1770,7 +1808,10 @@ struct wiphy {
 
        int bss_priv_size;
        u8 max_scan_ssids;
+       u8 max_sched_scan_ssids;
+       u8 max_match_sets;
        u16 max_scan_ie_len;
+       u16 max_sched_scan_ie_len;
 
        int n_cipher_suites;
        const u32 *cipher_suites;
index e12ddfb9eb1652626b9d8cc1d0c0f62f177895d2..7907ff15e0fdaf80fcd4ee4fad377b2136495d79 100644 (file)
@@ -37,7 +37,7 @@ struct dst_entry {
        unsigned long           _metrics;
        unsigned long           expires;
        struct dst_entry        *path;
-       struct neighbour        *neighbour;
+       struct neighbour __rcu  *_neighbour;
        struct hh_cache         *hh;
 #ifdef CONFIG_XFRM
        struct xfrm_state       *xfrm;
@@ -78,6 +78,7 @@ struct dst_entry {
 #define DST_NOHASH             0x0008
 #define DST_NOCACHE            0x0010
 #define DST_NOCOUNT            0x0020
+#define DST_XFRM_TUNNEL                0x0100
        union {
                struct dst_entry        *next;
                struct rtable __rcu     *rt_next;
@@ -86,6 +87,21 @@ struct dst_entry {
        };
 };
 
+static inline struct neighbour *dst_get_neighbour(struct dst_entry *dst)
+{
+       return rcu_dereference(dst->_neighbour);
+}
+
+static inline struct neighbour *dst_get_neighbour_raw(struct dst_entry *dst)
+{
+       return rcu_dereference_raw(dst->_neighbour);
+}
+
+static inline void dst_set_neighbour(struct dst_entry *dst, struct neighbour *neigh)
+{
+       rcu_assign_pointer(dst->_neighbour, neigh);
+}
+
 extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
 extern const u32 dst_default_metrics[RTAX_MAX];
 
@@ -371,8 +387,14 @@ static inline void dst_rcu_free(struct rcu_head *head)
 
 static inline void dst_confirm(struct dst_entry *dst)
 {
-       if (dst)
-               neigh_confirm(dst->neighbour);
+       if (dst) {
+               struct neighbour *n;
+
+               rcu_read_lock();
+               n = dst_get_neighbour(dst);
+               neigh_confirm(n);
+               rcu_read_unlock();
+       }
 }
 
 static inline void dst_link_failure(struct sk_buff *skb)
index c6d5fe5ec1bf4d788155e85b73d47e0e662dcf27..e37cfda9c0ff5deda0298d0cf696ffa389a68fb1 100644 (file)
@@ -7,6 +7,7 @@
 #ifndef _NET_FLOW_H
 #define _NET_FLOW_H
 
+#include <linux/socket.h>
 #include <linux/in6.h>
 #include <asm/atomic.h>
 
@@ -68,7 +69,7 @@ struct flowi4 {
 #define fl4_ipsec_spi          uli.spi
 #define fl4_mh_type            uli.mht.type
 #define fl4_gre_key            uli.gre_key
-};
+} __attribute__((__aligned__(BITS_PER_LONG/8)));
 
 static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
                                      __u32 mark, __u8 tos, __u8 scope,
@@ -89,6 +90,16 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
        fl4->fl4_dport = dport;
        fl4->fl4_sport = sport;
 }
+
+/* Reset some input parameters after previous lookup */
+static inline void flowi4_update_output(struct flowi4 *fl4, int oif, __u8 tos,
+                                       __be32 daddr, __be32 saddr)
+{
+       fl4->flowi4_oif = oif;
+       fl4->flowi4_tos = tos;
+       fl4->daddr = daddr;
+       fl4->saddr = saddr;
+}
                                      
 
 struct flowi6 {
@@ -112,7 +123,7 @@ struct flowi6 {
 #define fl6_ipsec_spi          uli.spi
 #define fl6_mh_type            uli.mht.type
 #define fl6_gre_key            uli.gre_key
-};
+} __attribute__((__aligned__(BITS_PER_LONG/8)));
 
 struct flowidn {
        struct flowi_common     __fl_common;
@@ -127,7 +138,7 @@ struct flowidn {
        union flowi_uli         uli;
 #define fld_sport              uli.ports.sport
 #define fld_dport              uli.ports.dport
-};
+} __attribute__((__aligned__(BITS_PER_LONG/8)));
 
 struct flowi {
        union {
@@ -161,6 +172,24 @@ static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
        return container_of(fldn, struct flowi, u.dn);
 }
 
+typedef unsigned long flow_compare_t;
+
+static inline size_t flow_key_size(u16 family)
+{
+       switch (family) {
+       case AF_INET:
+               BUILD_BUG_ON(sizeof(struct flowi4) % sizeof(flow_compare_t));
+               return sizeof(struct flowi4) / sizeof(flow_compare_t);
+       case AF_INET6:
+               BUILD_BUG_ON(sizeof(struct flowi6) % sizeof(flow_compare_t));
+               return sizeof(struct flowi6) / sizeof(flow_compare_t);
+       case AF_DECnet:
+               BUILD_BUG_ON(sizeof(struct flowidn) % sizeof(flow_compare_t));
+               return sizeof(struct flowidn) / sizeof(flow_compare_t);
+       }
+       return 0;
+}
+
 #define FLOW_DIR_IN    0
 #define FLOW_DIR_OUT   1
 #define FLOW_DIR_FWD   2
index caaff5f5f39f503a750ad16ea942fd12adb761ed..14dd9c7899249bd1352fba0e6b9aa78244f8f3d5 100644 (file)
@@ -31,6 +31,7 @@
 /** struct ip_options - IP Options
  *
  * @faddr - Saved first hop address
+ * @nexthop - Saved nexthop address in LSRR and SSRR
  * @is_data - Options in __data, rather than skb
  * @is_strictroute - Strict source route
  * @srr_is_hit - Packet destination addr was our one
@@ -41,6 +42,7 @@
  */
 struct ip_options {
        __be32          faddr;
+       __be32          nexthop;
        unsigned char   optlen;
        unsigned char   srr;
        unsigned char   rr;
index 3419bf5cd15401d611373f961edb7dcb344315d1..d55f4344333514f43f26d4da76d53574b6a9bd48 100644 (file)
@@ -41,6 +41,7 @@ static inline void *net_generic(const struct net *net, int id)
        ptr = ng->ptr[id - 1];
        rcu_read_unlock();
 
+       BUG_ON(!ptr);
        return ptr;
 }
 #endif
index db7b3432f07c41ce124c9d2a792035cead8b048d..5d7aae4ab2eec93ee8f00accf97a16f51b6be12b 100644 (file)
@@ -270,6 +270,7 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
                if (IS_ERR(rt))
                        return rt;
                ip_rt_put(rt);
+               flowi4_update_output(fl4, oif, tos, fl4->daddr, fl4->saddr);
        }
        security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
        return ip_route_output_flow(net, fl4, sk);
@@ -284,6 +285,9 @@ static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable
                fl4->fl4_dport = dport;
                fl4->fl4_sport = sport;
                ip_rt_put(rt);
+               flowi4_update_output(fl4, sk->sk_bound_dev_if,
+                                    RT_CONN_FLAGS(sk), fl4->daddr,
+                                    fl4->saddr);
                security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
                return ip_route_output_flow(sock_net(sk), fl4, sk);
        }
index b931f021d7ab4712f3fa553bef09229f5bf5aeb4..f1fbe2d5e0559e580bb5e01a893106dbcffd4eca 100644 (file)
@@ -219,9 +219,16 @@ struct tcf_proto {
 
 struct qdisc_skb_cb {
        unsigned int            pkt_len;
-       long                    data[];
+       unsigned char           data[24];
 };
 
+static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
+{
+       struct qdisc_skb_cb *qcb;
+       BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz);
+       BUILD_BUG_ON(sizeof(qcb->data) < sz);
+}
+
 static inline int qdisc_qlen(struct Qdisc *q)
 {
        return q->q.qlen;
index b2c2366676a706335ed3a7b4e486d6cff3d69bc5..f686066087d51fd1e9f1dd22c24079f1510de4b4 100644 (file)
@@ -700,4 +700,17 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr)
        addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
 }
 
+/* The cookie is always 0 since this is how it's used in the
+ * pmtu code.
+ */
+static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
+{
+       if (t->dst && !dst_check(t->dst, 0)) {
+               dst_release(t->dst);
+               t->dst = NULL;
+       }
+
+       return t->dst;
+}
+
 #endif /* __net_sctp_h__ */
index 7df327a6d564e8b2b57609ff714d4eb9a0e8b872..c3884214c600f203ea9f357f748f09f16bc9b4e9 100644 (file)
@@ -236,6 +236,9 @@ extern struct sctp_globals {
         * bits is an indicator of when to send and window update SACK.
         */
        int rwnd_update_shift;
+
+       /* Threshold for autoclose timeout, in seconds. */
+       unsigned long max_autoclose;
 } sctp_globals;
 
 #define sctp_rto_initial               (sctp_globals.rto_initial)
@@ -271,6 +274,7 @@ extern struct sctp_globals {
 #define sctp_auth_enable               (sctp_globals.auth_enable)
 #define sctp_checksum_disable          (sctp_globals.checksum_disable)
 #define sctp_rwnd_upd_shift            (sctp_globals.rwnd_update_shift)
+#define sctp_max_autoclose             (sctp_globals.max_autoclose)
 
 /* SCTP Socket type: UDP or TCP style. */
 typedef enum {
index 561ac99def5af8b51d9db422625dcf1712907267..0fe667901ed3de96add20d93d16da30a2d21bdd2 100644 (file)
@@ -36,6 +36,7 @@
 #define TRANSPORT_SENSE_BUFFER                 SCSI_SENSE_BUFFERSIZE
 /* Used by transport_send_check_condition_and_sense() */
 #define SPC_SENSE_KEY_OFFSET                   2
+#define SPC_ADD_SENSE_LEN_OFFSET               7
 #define SPC_ASC_KEY_OFFSET                     12
 #define SPC_ASCQ_KEY_OFFSET                    13
 #define TRANSPORT_IQN_LEN                      224
diff --git a/include/trace/events/cpufreq_interactive.h b/include/trace/events/cpufreq_interactive.h
new file mode 100644 (file)
index 0000000..ea83664
--- /dev/null
@@ -0,0 +1,112 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpufreq_interactive
+
+#if !defined(_TRACE_CPUFREQ_INTERACTIVE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CPUFREQ_INTERACTIVE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(set,
+       TP_PROTO(u32 cpu_id, unsigned long targfreq,
+                unsigned long actualfreq),
+       TP_ARGS(cpu_id, targfreq, actualfreq),
+
+       TP_STRUCT__entry(
+           __field(          u32, cpu_id    )
+           __field(unsigned long, targfreq   )
+           __field(unsigned long, actualfreq )
+          ),
+
+       TP_fast_assign(
+           __entry->cpu_id = (u32) cpu_id;
+           __entry->targfreq = targfreq;
+           __entry->actualfreq = actualfreq;
+       ),
+
+       TP_printk("cpu=%u targ=%lu actual=%lu",
+             __entry->cpu_id, __entry->targfreq,
+             __entry->actualfreq)
+);
+
+DEFINE_EVENT(set, cpufreq_interactive_up,
+       TP_PROTO(u32 cpu_id, unsigned long targfreq,
+            unsigned long actualfreq),
+       TP_ARGS(cpu_id, targfreq, actualfreq)
+);
+
+DEFINE_EVENT(set, cpufreq_interactive_down,
+       TP_PROTO(u32 cpu_id, unsigned long targfreq,
+            unsigned long actualfreq),
+       TP_ARGS(cpu_id, targfreq, actualfreq)
+);
+
+DECLARE_EVENT_CLASS(loadeval,
+           TP_PROTO(unsigned long cpu_id, unsigned long load,
+                    unsigned long curfreq, unsigned long targfreq),
+           TP_ARGS(cpu_id, load, curfreq, targfreq),
+
+           TP_STRUCT__entry(
+                   __field(unsigned long, cpu_id    )
+                   __field(unsigned long, load      )
+                   __field(unsigned long, curfreq   )
+                   __field(unsigned long, targfreq  )
+           ),
+
+           TP_fast_assign(
+                   __entry->cpu_id = cpu_id;
+                   __entry->load = load;
+                   __entry->curfreq = curfreq;
+                   __entry->targfreq = targfreq;
+           ),
+
+           TP_printk("cpu=%lu load=%lu cur=%lu targ=%lu",
+                     __entry->cpu_id, __entry->load, __entry->curfreq,
+                     __entry->targfreq)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_target,
+           TP_PROTO(unsigned long cpu_id, unsigned long load,
+                    unsigned long curfreq, unsigned long targfreq),
+           TP_ARGS(cpu_id, load, curfreq, targfreq)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_already,
+           TP_PROTO(unsigned long cpu_id, unsigned long load,
+                    unsigned long curfreq, unsigned long targfreq),
+           TP_ARGS(cpu_id, load, curfreq, targfreq)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
+           TP_PROTO(unsigned long cpu_id, unsigned long load,
+                    unsigned long curfreq, unsigned long targfreq),
+           TP_ARGS(cpu_id, load, curfreq, targfreq)
+);
+
+TRACE_EVENT(cpufreq_interactive_boost,
+           TP_PROTO(const char *s),
+           TP_ARGS(s),
+           TP_STRUCT__entry(
+                   __string(s, s)
+           ),
+           TP_fast_assign(
+                   __assign_str(s, s);
+           ),
+           TP_printk("%s", __get_str(s))
+);
+
+TRACE_EVENT(cpufreq_interactive_unboost,
+           TP_PROTO(const char *s),
+           TP_ARGS(s),
+           TP_STRUCT__entry(
+                   __string(s, s)
+           ),
+           TP_fast_assign(
+                   __assign_str(s, s);
+           ),
+           TP_printk("%s", __get_str(s))
+);
+
+#endif /* _TRACE_CPUFREQ_INTERACTIVE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 4e249b927eaa8092f15f452cc9f14eaa909aa164..9b60c6fc6df73ac1f88c7eed475f210ce8717f87 100644 (file)
@@ -23,7 +23,10 @@ DECLARE_EVENT_CLASS(writeback_work_class,
                __field(int, for_background)
        ),
        TP_fast_assign(
-               strncpy(__entry->name, dev_name(bdi->dev), 32);
+               struct device *dev = bdi->dev;
+               if (!dev)
+                       dev = default_backing_dev_info.dev;
+               strncpy(__entry->name, dev_name(dev), 32);
                __entry->nr_pages = work->nr_pages;
                __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
                __entry->sync_mode = work->sync_mode;
index 892b97f8e1576f969821688820350a0020762641..c0d8014f5ccc7f9c1fdf70da60256c42d2d8ca10 100644 (file)
@@ -514,6 +514,11 @@ struct omap_dss_device {
        int (*get_backlight)(struct omap_dss_device *dssdev);
 };
 
+struct omap_dss_hdmi_data
+{
+       int hpd_gpio;
+};
+
 struct omap_dss_driver {
        struct device_driver driver;
 
index 99fcffb372d15fb2580adfc4759ff48ee9b570c9..454ee262923897a0f678fb37abd95f96b342b7b3 100644 (file)
@@ -84,4 +84,7 @@ struct xenstore_domain_interface {
     XENSTORE_RING_IDX rsp_cons, rsp_prod;
 };
 
+/* Violating this is very bad.  See docs/misc/xenstore.txt. */
+#define XENSTORE_PAYLOAD_MAX 4096
+
 #endif /* _XS_WIRE_H */
index c0851a8e030cbcf38701f77a5d79e253d81e0327..8959eb32e0959946c857d3b7cbf4a95dd3b6c851 100644 (file)
@@ -360,15 +360,42 @@ out:
 }
  
 #ifdef CONFIG_ROOT_NFS
+
+#define NFSROOT_TIMEOUT_MIN    5
+#define NFSROOT_TIMEOUT_MAX    30
+#define NFSROOT_RETRY_MAX      5
+
 static int __init mount_nfs_root(void)
 {
        char *root_dev, *root_data;
+       unsigned int timeout;
+       int try, err;
 
-       if (nfs_root_data(&root_dev, &root_data) != 0)
-               return 0;
-       if (do_mount_root(root_dev, "nfs", root_mountflags, root_data) != 0)
+       err = nfs_root_data(&root_dev, &root_data);
+       if (err != 0)
                return 0;
-       return 1;
+
+       /*
+        * The server or network may not be ready, so try several
+        * times.  Stop after a few tries in case the client wants
+        * to fall back to other boot methods.
+        */
+       timeout = NFSROOT_TIMEOUT_MIN;
+       for (try = 1; ; try++) {
+               err = do_mount_root(root_dev, "nfs",
+                                       root_mountflags, root_data);
+               if (err == 0)
+                       return 1;
+               if (try > NFSROOT_RETRY_MAX)
+                       break;
+
+               /* Wait, in case the server refused us immediately */
+               ssleep(timeout);
+               timeout <<= 1;
+               if (timeout > NFSROOT_TIMEOUT_MAX)
+                       timeout = NFSROOT_TIMEOUT_MAX;
+       }
+       return 0;
 }
 #endif
 
@@ -405,7 +432,7 @@ void __init change_floppy(char *fmt, ...)
 void __init mount_root(void)
 {
 #ifdef CONFIG_ROOT_NFS
-       if (MAJOR(ROOT_DEV) == UNNAMED_MAJOR) {
+       if (ROOT_DEV == Root_NFS) {
                if (mount_nfs_root())
                        return;
 
index d7211faed2adfb295caf46bbb9c70835f622eabe..841e344d366ee9ad3c32e486fdbb300e1cc81c9c 100644 (file)
@@ -549,9 +549,6 @@ asmlinkage void __init start_kernel(void)
        early_boot_irqs_disabled = false;
        local_irq_enable();
 
-       /* Interrupts are enabled now so all GFP allocations are safe. */
-       gfp_allowed_mask = __GFP_BITS_MASK;
-
        kmem_cache_init_late();
 
        /*
@@ -783,6 +780,10 @@ static int __init kernel_init(void * unused)
         * Wait until kthreadd is all set-up.
         */
        wait_for_completion(&kthreadd_done);
+
+       /* Now the scheduler is fully set up and can do blocking allocations */
+       gfp_allowed_mask = __GFP_BITS_MASK;
+
        /*
         * init can allocate pages on any node
         */
index 241b74a307de64972f139592777d10942cd14e2d..5083a09a9b6d3370a3d47558718ec2ab63e81611 100644 (file)
@@ -1176,10 +1176,10 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
 
        /*
         * If the 'all' option was specified select all the subsystems,
-        * otherwise 'all, 'none' and a subsystem name options were not
-        * specified, let's default to 'all'
+        * otherwise if 'none', 'name=' and a subsystem name options
+        * were not specified, let's default to 'all'
         */
-       if (all_ss || (!all_ss && !one_ss && !opts->none)) {
+       if (all_ss || (!one_ss && !opts->none && !opts->name)) {
                for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
                        struct cgroup_subsys *ss = subsys[i];
                        if (ss == NULL)
@@ -2105,11 +2105,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
                        continue;
                /* get old css_set pointer */
                task_lock(tsk);
-               if (tsk->flags & PF_EXITING) {
-                       /* ignore this task if it's going away */
-                       task_unlock(tsk);
-                       continue;
-               }
                oldcg = tsk->cgroups;
                get_css_set(oldcg);
                task_unlock(tsk);
index e691818d7e450f5f8785b8b97a08cdbb2f4e1493..a3f638ac3de142c550132b5ecdca879ed8c590e6 100644 (file)
@@ -153,6 +153,13 @@ static void freezer_destroy(struct cgroup_subsys *ss,
        kfree(cgroup_freezer(cgroup));
 }
 
+/* task is frozen or will freeze immediately when next it gets woken */
+static bool is_task_frozen_enough(struct task_struct *task)
+{
+       return frozen(task) ||
+               (task_is_stopped_or_traced(task) && freezing(task));
+}
+
 /*
  * The call to cgroup_lock() in the freezer.state write method prevents
  * a write to that file racing against an attach, and hence the
@@ -231,7 +238,7 @@ static void update_if_frozen(struct cgroup *cgroup,
        cgroup_iter_start(cgroup, &it);
        while ((task = cgroup_iter_next(cgroup, &it))) {
                ntotal++;
-               if (frozen(task))
+               if (is_task_frozen_enough(task))
                        nfrozen++;
        }
 
@@ -284,7 +291,7 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
        while ((task = cgroup_iter_next(cgroup, &it))) {
                if (!freeze_task(task, true))
                        continue;
-               if (frozen(task))
+               if (is_task_frozen_enough(task))
                        continue;
                if (!freezing(task) && !freezer_should_skip(task))
                        num_cant_freeze_now++;
index fc9eb093acd5fc006e9f296b1a6c92b7a39d53ff..3507c936e5f9d86d50e00b7588e42ad4fa98a252 100644 (file)
@@ -318,25 +318,54 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
 
 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
 
-asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
-               compat_old_sigset_t __user *oset)
+/*
+ * sys_sigprocmask SIG_SETMASK sets the first (compat) word of the
+ * blocked set of signals to the supplied signal set
+ */
+static inline void compat_sig_setmask(sigset_t *blocked, compat_sigset_word set)
 {
-       old_sigset_t s;
-       long ret;
-       mm_segment_t old_fs;
+       memcpy(blocked->sig, &set, sizeof(set));
+}
 
-       if (set && get_user(s, set))
-               return -EFAULT;
-       old_fs = get_fs();
-       set_fs(KERNEL_DS);
-       ret = sys_sigprocmask(how,
-                             set ? (old_sigset_t __user *) &s : NULL,
-                             oset ? (old_sigset_t __user *) &s : NULL);
-       set_fs(old_fs);
-       if (ret == 0)
-               if (oset)
-                       ret = put_user(s, oset);
-       return ret;
+asmlinkage long compat_sys_sigprocmask(int how,
+                                      compat_old_sigset_t __user *nset,
+                                      compat_old_sigset_t __user *oset)
+{
+       old_sigset_t old_set, new_set;
+       sigset_t new_blocked;
+
+       old_set = current->blocked.sig[0];
+
+       if (nset) {
+               if (get_user(new_set, nset))
+                       return -EFAULT;
+               new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
+
+               new_blocked = current->blocked;
+
+               switch (how) {
+               case SIG_BLOCK:
+                       sigaddsetmask(&new_blocked, new_set);
+                       break;
+               case SIG_UNBLOCK:
+                       sigdelsetmask(&new_blocked, new_set);
+                       break;
+               case SIG_SETMASK:
+                       compat_sig_setmask(&new_blocked, new_set);
+                       break;
+               default:
+                       return -EINVAL;
+               }
+
+               set_current_blocked(&new_blocked);
+       }
+
+       if (oset) {
+               if (put_user(old_set, oset))
+                       return -EFAULT;
+       }
+
+       return 0;
 }
 
 #endif
index 404770761a4e9f99ea74057a64faaa31cac4709f..eae3d9b3957464f58c430d0dee2450aba093a5d9 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/stop_machine.h>
 #include <linux/mutex.h>
 #include <linux/gfp.h>
+#include <linux/suspend.h>
 
 #ifdef CONFIG_SMP
 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
@@ -476,6 +477,79 @@ static int alloc_frozen_cpus(void)
        return 0;
 }
 core_initcall(alloc_frozen_cpus);
+
+/*
+ * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
+ * hotplug when tasks are about to be frozen. Also, don't allow the freezer
+ * to continue until any currently running CPU hotplug operation gets
+ * completed.
+ * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
+ * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
+ * CPU hotplug path and released only after it is complete. Thus, we
+ * (and hence the freezer) will block here until any currently running CPU
+ * hotplug operation gets completed.
+ */
+void cpu_hotplug_disable_before_freeze(void)
+{
+       cpu_maps_update_begin();
+       cpu_hotplug_disabled = 1;
+       cpu_maps_update_done();
+}
+
+
+/*
+ * When tasks have been thawed, re-enable regular CPU hotplug (which had been
+ * disabled while beginning to freeze tasks).
+ */
+void cpu_hotplug_enable_after_thaw(void)
+{
+       cpu_maps_update_begin();
+       cpu_hotplug_disabled = 0;
+       cpu_maps_update_done();
+}
+
+/*
+ * When callbacks for CPU hotplug notifications are being executed, we must
+ * ensure that the state of the system with respect to the tasks being frozen
+ * or not, as reported by the notification, remains unchanged *throughout the
+ * duration* of the execution of the callbacks.
+ * Hence we need to prevent the freezer from racing with regular CPU hotplug.
+ *
+ * This synchronization is implemented by mutually excluding regular CPU
+ * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
+ * Hibernate notifications.
+ */
+static int
+cpu_hotplug_pm_callback(struct notifier_block *nb,
+                       unsigned long action, void *ptr)
+{
+       switch (action) {
+
+       case PM_SUSPEND_PREPARE:
+       case PM_HIBERNATION_PREPARE:
+               cpu_hotplug_disable_before_freeze();
+               break;
+
+       case PM_POST_SUSPEND:
+       case PM_POST_HIBERNATION:
+               cpu_hotplug_enable_after_thaw();
+               break;
+
+       default:
+               return NOTIFY_DONE;
+       }
+
+       return NOTIFY_OK;
+}
+
+
+int cpu_hotplug_pm_sync_init(void)
+{
+       pm_notifier(cpu_hotplug_pm_callback, 0);
+       return 0;
+}
+core_initcall(cpu_hotplug_pm_sync_init);
+
 #endif /* CONFIG_PM_SLEEP_SMP */
 
 /**
index 174fa84eca303ced39cc03910114d495f7b777b0..3a55ea4fa983ebf7259e55ae659dc583b247343a 100644 (file)
@@ -385,6 +385,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
        struct cred *new;
        int ret;
 
+       p->replacement_session_keyring = NULL;
+
        if (
 #ifdef CONFIG_KEYS
                !p->cred->thread_keyring &&
index bad6786dee88844d61dae27f83d82dbf1c40840f..5ee24d106320f69671849e76d8649a1e34194c98 100644 (file)
@@ -157,37 +157,39 @@ early_param("nokgdbroundup", opt_nokgdbroundup);
  * Weak aliases for breakpoint management,
  * can be overriden by architectures when needed:
  */
-int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
+int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
 {
        int err;
 
-       err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE);
+       err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
+                               BREAK_INSTR_SIZE);
        if (err)
                return err;
-
-       return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
-                                 BREAK_INSTR_SIZE);
+       err = probe_kernel_write((char *)bpt->bpt_addr,
+                                arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
+       return err;
 }
 
-int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
+int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
 {
-       return probe_kernel_write((char *)addr,
-                                 (char *)bundle, BREAK_INSTR_SIZE);
+       return probe_kernel_write((char *)bpt->bpt_addr,
+                                 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
 }
 
 int __weak kgdb_validate_break_address(unsigned long addr)
 {
-       char tmp_variable[BREAK_INSTR_SIZE];
+       struct kgdb_bkpt tmp;
        int err;
-       /* Validate setting the breakpoint and then removing it.  In the
+       /* Validate setting the breakpoint and then removing it.  If the
         * remove fails, the kernel needs to emit a bad message because we
         * are deep trouble not being able to put things back the way we
         * found them.
         */
-       err = kgdb_arch_set_breakpoint(addr, tmp_variable);
+       tmp.bpt_addr = addr;
+       err = kgdb_arch_set_breakpoint(&tmp);
        if (err)
                return err;
-       err = kgdb_arch_remove_breakpoint(addr, tmp_variable);
+       err = kgdb_arch_remove_breakpoint(&tmp);
        if (err)
                printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
                   "memory destroyed at: %lx", addr);
@@ -231,7 +233,6 @@ static void kgdb_flush_swbreak_addr(unsigned long addr)
  */
 int dbg_activate_sw_breakpoints(void)
 {
-       unsigned long addr;
        int error;
        int ret = 0;
        int i;
@@ -240,16 +241,15 @@ int dbg_activate_sw_breakpoints(void)
                if (kgdb_break[i].state != BP_SET)
                        continue;
 
-               addr = kgdb_break[i].bpt_addr;
-               error = kgdb_arch_set_breakpoint(addr,
-                               kgdb_break[i].saved_instr);
+               error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
                if (error) {
                        ret = error;
-                       printk(KERN_INFO "KGDB: BP install failed: %lx", addr);
+                       printk(KERN_INFO "KGDB: BP install failed: %lx",
+                              kgdb_break[i].bpt_addr);
                        continue;
                }
 
-               kgdb_flush_swbreak_addr(addr);
+               kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
                kgdb_break[i].state = BP_ACTIVE;
        }
        return ret;
@@ -298,7 +298,6 @@ int dbg_set_sw_break(unsigned long addr)
 
 int dbg_deactivate_sw_breakpoints(void)
 {
-       unsigned long addr;
        int error;
        int ret = 0;
        int i;
@@ -306,15 +305,14 @@ int dbg_deactivate_sw_breakpoints(void)
        for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
                if (kgdb_break[i].state != BP_ACTIVE)
                        continue;
-               addr = kgdb_break[i].bpt_addr;
-               error = kgdb_arch_remove_breakpoint(addr,
-                                       kgdb_break[i].saved_instr);
+               error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
                if (error) {
-                       printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr);
+                       printk(KERN_INFO "KGDB: BP remove failed: %lx\n",
+                              kgdb_break[i].bpt_addr);
                        ret = error;
                }
 
-               kgdb_flush_swbreak_addr(addr);
+               kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
                kgdb_break[i].state = BP_SET;
        }
        return ret;
@@ -348,7 +346,6 @@ int kgdb_isremovedbreak(unsigned long addr)
 
 int dbg_remove_all_break(void)
 {
-       unsigned long addr;
        int error;
        int i;
 
@@ -356,12 +353,10 @@ int dbg_remove_all_break(void)
        for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
                if (kgdb_break[i].state != BP_ACTIVE)
                        goto setundefined;
-               addr = kgdb_break[i].bpt_addr;
-               error = kgdb_arch_remove_breakpoint(addr,
-                               kgdb_break[i].saved_instr);
+               error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
                if (error)
                        printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
-                          addr);
+                              kgdb_break[i].bpt_addr);
 setundefined:
                kgdb_break[i].state = BP_UNDEFINED;
        }
index f2b321bae44037c08d4b09b90b8610a80b979eb7..303bed2966b6e100afebbdc83c41084cfc9dcc0f 100644 (file)
@@ -1553,8 +1553,15 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
        }
 
        /* dead body doesn't have much to contribute */
-       if (p->exit_state == EXIT_DEAD)
+       if (unlikely(p->exit_state == EXIT_DEAD)) {
+               /*
+                * But do not ignore this task until the tracer does
+                * wait_task_zombie()->do_notify_parent().
+                */
+               if (likely(!ptrace) && unlikely(ptrace_reparented(p)))
+                       wo->notask_error = 0;
                return 0;
+       }
 
        /* slay zombie? */
        if (p->exit_state == EXIT_ZOMBIE) {
index 4e4b3446511861fc2f4b17a344f23a7733e49652..fa6030d9c59b46a3dc1f7261204f6e77d23db95a 100644 (file)
@@ -48,6 +48,7 @@
 #include <linux/audit.h>
 #include <linux/memcontrol.h>
 #include <linux/ftrace.h>
+#include <linux/proc_fs.h>
 #include <linux/profile.h>
 #include <linux/rmap.h>
 #include <linux/ksm.h>
@@ -67,6 +68,7 @@
 #include <linux/user-return-notifier.h>
 #include <linux/oom.h>
 #include <linux/khugepaged.h>
+#include <linux/signalfd.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -933,8 +935,10 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
 
 void __cleanup_sighand(struct sighand_struct *sighand)
 {
-       if (atomic_dec_and_test(&sighand->count))
+       if (atomic_dec_and_test(&sighand->count)) {
+               signalfd_cleanup(sighand);
                kmem_cache_free(sighand_cachep, sighand);
+       }
 }
 
 
@@ -1391,6 +1395,8 @@ bad_fork_cleanup_io:
        if (p->io_context)
                exit_io_context(p);
 bad_fork_cleanup_namespaces:
+       if (unlikely(clone_flags & CLONE_NEWPID))
+               pid_ns_release_proc(p->nsproxy->pid_ns);
        exit_task_namespaces(p);
 bad_fork_cleanup_mm:
        if (p->mm) {
index 11cbe052b2e8bb571c49fc7ebcf1268866c0a8b4..b2d51a7ede3b30c8f4f5bfbcb544ba1085d32534 100644 (file)
@@ -59,6 +59,7 @@
 #include <linux/magic.h>
 #include <linux/pid.h>
 #include <linux/nsproxy.h>
+#include <linux/ptrace.h>
 
 #include <asm/futex.h>
 
@@ -314,17 +315,29 @@ again:
 #endif
 
        lock_page(page_head);
+
+       /*
+        * If page_head->mapping is NULL, then it cannot be a PageAnon
+        * page; but it might be the ZERO_PAGE or in the gate area or
+        * in a special mapping (all cases which we are happy to fail);
+        * or it may have been a good file page when get_user_pages_fast
+        * found it, but truncated or holepunched or subjected to
+        * invalidate_complete_page2 before we got the page lock (also
+        * cases which we are happy to fail).  And we hold a reference,
+        * so refcount care in invalidate_complete_page's remove_mapping
+        * prevents drop_caches from setting mapping to NULL beneath us.
+        *
+        * The case we do have to guard against is when memory pressure made
+        * shmem_writepage move it from filecache to swapcache beneath us:
+        * an unlikely race, but we do need to retry for page_head->mapping.
+        */
        if (!page_head->mapping) {
+               int shmem_swizzled = PageSwapCache(page_head);
                unlock_page(page_head);
                put_page(page_head);
-               /*
-               * ZERO_PAGE pages don't have a mapping. Avoid a busy loop
-               * trying to find one. RW mapping would have COW'd (and thus
-               * have a mapping) so this page is RO and won't ever change.
-               */
-               if ((page_head == ZERO_PAGE(address)))
-                       return -EFAULT;
-               goto again;
+               if (shmem_swizzled)
+                       goto again;
+               return -EFAULT;
        }
 
        /*
@@ -2431,40 +2444,29 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
 {
        struct robust_list_head __user *head;
        unsigned long ret;
-       const struct cred *cred = current_cred(), *pcred;
+       struct task_struct *p;
 
        if (!futex_cmpxchg_enabled)
                return -ENOSYS;
 
+       rcu_read_lock();
+
+       ret = -ESRCH;
        if (!pid)
-               head = current->robust_list;
+               p = current;
        else {
-               struct task_struct *p;
-
-               ret = -ESRCH;
-               rcu_read_lock();
                p = find_task_by_vpid(pid);
                if (!p)
                        goto err_unlock;
-               ret = -EPERM;
-               pcred = __task_cred(p);
-               /* If victim is in different user_ns, then uids are not
-                  comparable, so we must have CAP_SYS_PTRACE */
-               if (cred->user->user_ns != pcred->user->user_ns) {
-                       if (!ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
-                               goto err_unlock;
-                       goto ok;
-               }
-               /* If victim is in same user_ns, then uids are comparable */
-               if (cred->euid != pcred->euid &&
-                   cred->euid != pcred->uid &&
-                   !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
-                       goto err_unlock;
-ok:
-               head = p->robust_list;
-               rcu_read_unlock();
        }
 
+       ret = -EPERM;
+       if (!ptrace_may_access(p, PTRACE_MODE_READ))
+               goto err_unlock;
+
+       head = p->robust_list;
+       rcu_read_unlock();
+
        if (put_user(sizeof(*head), len_ptr))
                return -EFAULT;
        return put_user(head, head_ptr);
@@ -2628,6 +2630,16 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
                        return -ENOSYS;
        }
 
+       switch (cmd) {
+       case FUTEX_LOCK_PI:
+       case FUTEX_UNLOCK_PI:
+       case FUTEX_TRYLOCK_PI:
+       case FUTEX_WAIT_REQUEUE_PI:
+       case FUTEX_CMP_REQUEUE_PI:
+               if (!futex_cmpxchg_enabled)
+                       return -ENOSYS;
+       }
+
        switch (cmd) {
        case FUTEX_WAIT:
                val3 = FUTEX_BITSET_MATCH_ANY;
@@ -2649,16 +2661,13 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
                ret = futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
                break;
        case FUTEX_LOCK_PI:
-               if (futex_cmpxchg_enabled)
-                       ret = futex_lock_pi(uaddr, flags, val, timeout, 0);
+               ret = futex_lock_pi(uaddr, flags, val, timeout, 0);
                break;
        case FUTEX_UNLOCK_PI:
-               if (futex_cmpxchg_enabled)
-                       ret = futex_unlock_pi(uaddr, flags);
+               ret = futex_unlock_pi(uaddr, flags);
                break;
        case FUTEX_TRYLOCK_PI:
-               if (futex_cmpxchg_enabled)
-                       ret = futex_lock_pi(uaddr, flags, 0, timeout, 1);
+               ret = futex_lock_pi(uaddr, flags, 0, timeout, 1);
                break;
        case FUTEX_WAIT_REQUEUE_PI:
                val3 = FUTEX_BITSET_MATCH_ANY;
index 5f9e689dc8f0f7d52824108c56712e45e2e81560..a9642d528630260749f760f3c02bc8164f51af2b 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/compat.h>
 #include <linux/nsproxy.h>
 #include <linux/futex.h>
+#include <linux/ptrace.h>
 
 #include <asm/uaccess.h>
 
@@ -136,40 +137,29 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
 {
        struct compat_robust_list_head __user *head;
        unsigned long ret;
-       const struct cred *cred = current_cred(), *pcred;
+       struct task_struct *p;
 
        if (!futex_cmpxchg_enabled)
                return -ENOSYS;
 
+       rcu_read_lock();
+
+       ret = -ESRCH;
        if (!pid)
-               head = current->compat_robust_list;
+               p = current;
        else {
-               struct task_struct *p;
-
-               ret = -ESRCH;
-               rcu_read_lock();
                p = find_task_by_vpid(pid);
                if (!p)
                        goto err_unlock;
-               ret = -EPERM;
-               pcred = __task_cred(p);
-               /* If victim is in different user_ns, then uids are not
-                  comparable, so we must have CAP_SYS_PTRACE */
-               if (cred->user->user_ns != pcred->user->user_ns) {
-                       if (!ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
-                               goto err_unlock;
-                       goto ok;
-               }
-               /* If victim is in same user_ns, then uids are comparable */
-               if (cred->euid != pcred->euid &&
-                   cred->euid != pcred->uid &&
-                   !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
-                       goto err_unlock;
-ok:
-               head = p->compat_robust_list;
-               rcu_read_unlock();
        }
 
+       ret = -EPERM;
+       if (!ptrace_may_access(p, PTRACE_MODE_READ))
+               goto err_unlock;
+
+       head = p->compat_robust_list;
+       rcu_read_unlock();
+
        if (put_user(sizeof(*head), len_ptr))
                return -EFAULT;
        return put_user(ptr_to_compat(head), head_ptr);
index a9205e32a059cde761f924c498d2984b0aacb36c..2043c08d36c89d44731fb70ff01679816704f3a0 100644 (file)
@@ -885,10 +885,13 @@ static void __remove_hrtimer(struct hrtimer *timer,
                             struct hrtimer_clock_base *base,
                             unsigned long newstate, int reprogram)
 {
+       struct timerqueue_node *next_timer;
        if (!(timer->state & HRTIMER_STATE_ENQUEUED))
                goto out;
 
-       if (&timer->node == timerqueue_getnext(&base->active)) {
+       next_timer = timerqueue_getnext(&base->active);
+       timerqueue_del(&base->active, &timer->node);
+       if (&timer->node == next_timer) {
 #ifdef CONFIG_HIGH_RES_TIMERS
                /* Reprogram the clock event device. if enabled */
                if (reprogram && hrtimer_hres_active()) {
@@ -901,7 +904,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
                }
 #endif
        }
-       timerqueue_del(&base->active, &timer->node);
        if (!timerqueue_getnext(&base->active))
                base->cpu_base->active_bases &= ~(1 << base->index);
 out:
index ea640120ab8606195e8fcddabe5c81180598b4cb..e972276f12ff7bd6726f654373938643fcdda60c 100644 (file)
@@ -74,11 +74,17 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
 
        /*
         * Ensure the task is not frozen.
-        * Also, when a freshly created task is scheduled once, changes
-        * its state to TASK_UNINTERRUPTIBLE without having ever been
-        * switched out once, it musn't be checked.
+        * Also, skip vfork and any other user process that freezer should skip.
         */
-       if (unlikely(t->flags & PF_FROZEN || !switch_count))
+       if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
+           return;
+
+       /*
+        * When a freshly created task is scheduled once, changes its state to
+        * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
+        * musn't be checked.
+        */
+       if (unlikely(!switch_count))
                return;
 
        if (switch_count != t->last_switch_count) {
index 342d8f44e4010d13cb03f21b89a57d9dc955497e..0119b9d467ae6dd1d53b9f38bcf9c95d63f7ae9d 100644 (file)
@@ -53,7 +53,7 @@ unsigned long probe_irq_on(void)
                        if (desc->irq_data.chip->irq_set_type)
                                desc->irq_data.chip->irq_set_type(&desc->irq_data,
                                                         IRQ_TYPE_PROBE);
-                       irq_startup(desc);
+                       irq_startup(desc, false);
                }
                raw_spin_unlock_irq(&desc->lock);
        }
@@ -70,7 +70,7 @@ unsigned long probe_irq_on(void)
                raw_spin_lock_irq(&desc->lock);
                if (!desc->action && irq_settings_can_probe(desc)) {
                        desc->istate |= IRQS_AUTODETECT | IRQS_WAITING;
-                       if (irq_startup(desc))
+                       if (irq_startup(desc, false))
                                desc->istate |= IRQS_PENDING;
                }
                raw_spin_unlock_irq(&desc->lock);
index dc5114b4c16cc6cd656290cdb03596a2ffd3f3b8..990965ec5d0b3372bfbeb07624bebd7822312526 100644 (file)
@@ -61,8 +61,7 @@ int irq_set_irq_type(unsigned int irq, unsigned int type)
                return -EINVAL;
 
        type &= IRQ_TYPE_SENSE_MASK;
-       if (type != IRQ_TYPE_NONE)
-               ret = __irq_set_trigger(desc, irq, type);
+       ret = __irq_set_trigger(desc, irq, type);
        irq_put_desc_busunlock(desc, flags);
        return ret;
 }
@@ -157,19 +156,22 @@ static void irq_state_set_masked(struct irq_desc *desc)
        irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
 }
 
-int irq_startup(struct irq_desc *desc)
+int irq_startup(struct irq_desc *desc, bool resend)
 {
+       int ret = 0;
+
        irq_state_clr_disabled(desc);
        desc->depth = 0;
 
        if (desc->irq_data.chip->irq_startup) {
-               int ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
+               ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
                irq_state_clr_masked(desc);
-               return ret;
+       } else {
+               irq_enable(desc);
        }
-
-       irq_enable(desc);
-       return 0;
+       if (resend)
+               check_irq_resend(desc, desc->irq_data.irq);
+       return ret;
 }
 
 void irq_shutdown(struct irq_desc *desc)
@@ -312,6 +314,24 @@ out_unlock:
 }
 EXPORT_SYMBOL_GPL(handle_simple_irq);
 
+/*
+ * Called unconditionally from handle_level_irq() and only for oneshot
+ * interrupts from handle_fasteoi_irq()
+ */
+static void cond_unmask_irq(struct irq_desc *desc)
+{
+       /*
+        * We need to unmask in the following cases:
+        * - Standard level irq (IRQF_ONESHOT is not set)
+        * - Oneshot irq which did not wake the thread (caused by a
+        *   spurious interrupt or a primary handler handling it
+        *   completely).
+        */
+       if (!irqd_irq_disabled(&desc->irq_data) &&
+           irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
+               unmask_irq(desc);
+}
+
 /**
  *     handle_level_irq - Level type irq handler
  *     @irq:   the interrupt number
@@ -344,8 +364,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
 
        handle_irq_event(desc);
 
-       if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT))
-               unmask_irq(desc);
+       cond_unmask_irq(desc);
+
 out_unlock:
        raw_spin_unlock(&desc->lock);
 }
@@ -399,6 +419,9 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
        preflow_handler(desc);
        handle_irq_event(desc);
 
+       if (desc->istate & IRQS_ONESHOT)
+               cond_unmask_irq(desc);
+
 out_eoi:
        desc->irq_data.chip->irq_eoi(&desc->irq_data);
 out_unlock:
@@ -575,7 +598,7 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
                irq_settings_set_noprobe(desc);
                irq_settings_set_norequest(desc);
                irq_settings_set_nothread(desc);
-               irq_startup(desc);
+               irq_startup(desc, true);
        }
 out:
        irq_put_desc_busunlock(desc, flags);
index 6546431447d766396b371595f6d46df41c883c9a..62efdc44b64309f3665f85117f30d247de61f716 100644 (file)
@@ -67,7 +67,7 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
 extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
 extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
 
-extern int irq_startup(struct irq_desc *desc);
+extern int irq_startup(struct irq_desc *desc, bool resend);
 extern void irq_shutdown(struct irq_desc *desc);
 extern void irq_enable(struct irq_desc *desc);
 extern void irq_disable(struct irq_desc *desc);
index 0a7840aeb0fb9efbc18a6e6e8e6f01de17ed91cd..df8136fff8cc8407d309b0aaef4cf558a51e7251 100644 (file)
@@ -620,8 +620,9 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
 
 static int irq_wait_for_interrupt(struct irqaction *action)
 {
+       set_current_state(TASK_INTERRUPTIBLE);
+
        while (!kthread_should_stop()) {
-               set_current_state(TASK_INTERRUPTIBLE);
 
                if (test_and_clear_bit(IRQTF_RUNTHREAD,
                                       &action->thread_flags)) {
@@ -629,7 +630,9 @@ static int irq_wait_for_interrupt(struct irqaction *action)
                        return 0;
                }
                schedule();
+               set_current_state(TASK_INTERRUPTIBLE);
        }
+       __set_current_state(TASK_RUNNING);
        return -1;
 }
 
@@ -767,7 +770,7 @@ static int irq_thread(void *data)
                        struct irqaction *action);
        int wake;
 
-       if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
+       if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
                                        &action->thread_flags))
                handler_fn = irq_forced_thread_fn;
        else
@@ -973,6 +976,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 
                /* add new interrupt at end of irq queue */
                do {
+                       /*
+                        * Or all existing action->thread_mask bits,
+                        * so we can find the next zero bit for this
+                        * new action.
+                        */
                        thread_mask |= old->thread_mask;
                        old_ptr = &old->next;
                        old = *old_ptr;
@@ -981,14 +989,41 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
        }
 
        /*
-        * Setup the thread mask for this irqaction. Unlikely to have
-        * 32 resp 64 irqs sharing one line, but who knows.
+        * Setup the thread mask for this irqaction for ONESHOT. For
+        * !ONESHOT irqs the thread mask is 0 so we can avoid a
+        * conditional in irq_wake_thread().
         */
-       if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
-               ret = -EBUSY;
-               goto out_mask;
+       if (new->flags & IRQF_ONESHOT) {
+               /*
+                * Unlikely to have 32 resp 64 irqs sharing one line,
+                * but who knows.
+                */
+               if (thread_mask == ~0UL) {
+                       ret = -EBUSY;
+                       goto out_mask;
+               }
+               /*
+                * The thread_mask for the action is or'ed to
+                * desc->thread_active to indicate that the
+                * IRQF_ONESHOT thread handler has been woken, but not
+                * yet finished. The bit is cleared when a thread
+                * completes. When all threads of a shared interrupt
+                * line have completed desc->threads_active becomes
+                * zero and the interrupt line is unmasked. See
+                * handle.c:irq_wake_thread() for further information.
+                *
+                * If no thread is woken by primary (hard irq context)
+                * interrupt handlers, then desc->threads_active is
+                * also checked for zero to unmask the irq line in the
+                * affected hard irq flow handlers
+                * (handle_[fasteoi|level]_irq).
+                *
+                * The new action gets the first zero bit of
+                * thread_mask assigned. See the loop above which or's
+                * all existing action->thread_mask bits.
+                */
+               new->thread_mask = 1 << ffz(thread_mask);
        }
-       new->thread_mask = 1 << ffz(thread_mask);
 
        if (!shared) {
                init_waitqueue_head(&desc->wait_for_threads);
@@ -1015,7 +1050,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                        desc->istate |= IRQS_ONESHOT;
 
                if (irq_settings_can_autoenable(desc))
-                       irq_startup(desc);
+                       irq_startup(desc, true);
                else
                        /* Undo nested disables: */
                        desc->depth = 1;
index 47420908fba0a97df65676862df84fd63fdeb839..c3c89751b327c9cf257c870d046973107be810da 100644 (file)
@@ -43,12 +43,16 @@ void irq_move_masked_irq(struct irq_data *idata)
         * masking the irqs.
         */
        if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
-                  < nr_cpu_ids))
-               if (!chip->irq_set_affinity(&desc->irq_data,
-                                           desc->pending_mask, false)) {
+                  < nr_cpu_ids)) {
+               int ret = chip->irq_set_affinity(&desc->irq_data,
+                                                desc->pending_mask, false);
+               switch (ret) {
+               case IRQ_SET_MASK_OK:
                        cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
+               case IRQ_SET_MASK_OK_NOCOPY:
                        irq_set_thread_affinity(desc);
                }
+       }
 
        cpumask_clear(desc->pending_mask);
 }
index f323a4cd58ef6812cb9283db806445ba72b26f04..fe4b09cf829ca2625559e972d6eb8671e3182cdd 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/irq.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
+#include <linux/syscore_ops.h>
 
 #include "internals.h"
 
@@ -39,25 +40,58 @@ void suspend_device_irqs(void)
 }
 EXPORT_SYMBOL_GPL(suspend_device_irqs);
 
-/**
- * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs()
- *
- * Enable all interrupt lines previously disabled by suspend_device_irqs() that
- * have the IRQS_SUSPENDED flag set.
- */
-void resume_device_irqs(void)
+static void resume_irqs(bool want_early)
 {
        struct irq_desc *desc;
        int irq;
 
        for_each_irq_desc(irq, desc) {
                unsigned long flags;
+               bool is_early = desc->action &&
+                       desc->action->flags & IRQF_EARLY_RESUME;
+
+               if (is_early != want_early)
+                       continue;
 
                raw_spin_lock_irqsave(&desc->lock, flags);
                __enable_irq(desc, irq, true);
                raw_spin_unlock_irqrestore(&desc->lock, flags);
        }
 }
+
+/**
+ * irq_pm_syscore_ops - enable interrupt lines early
+ *
+ * Enable all interrupt lines with %IRQF_EARLY_RESUME set.
+ */
+static void irq_pm_syscore_resume(void)
+{
+       resume_irqs(true);
+}
+
+static struct syscore_ops irq_pm_syscore_ops = {
+       .resume         = irq_pm_syscore_resume,
+};
+
+static int __init irq_pm_init_ops(void)
+{
+       register_syscore_ops(&irq_pm_syscore_ops);
+       return 0;
+}
+
+device_initcall(irq_pm_init_ops);
+
+/**
+ * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs()
+ *
+ * Enable all non-%IRQF_EARLY_RESUME interrupt lines previously
+ * disabled by suspend_device_irqs() that have the IRQS_SUSPENDED flag
+ * set as well as those with %IRQF_FORCE_RESUME.
+ */
+void resume_device_irqs(void)
+{
+       resume_irqs(false);
+}
 EXPORT_SYMBOL_GPL(resume_device_irqs);
 
 /**
index aa57d5da18c1de65e807098702ad9b841fa3788d..dc813a948be2379fe20a0218e1d270212b8408d3 100644 (file)
@@ -84,7 +84,9 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
         */
        action = desc->action;
        if (!action || !(action->flags & IRQF_SHARED) ||
-           (action->flags & __IRQF_TIMER) || !action->next)
+           (action->flags & __IRQF_TIMER) ||
+           (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
+           !action->next)
                goto out;
 
        /* Already running on another processor */
@@ -115,7 +117,7 @@ static int misrouted_irq(int irq)
        struct irq_desc *desc;
        int i, ok = 0;
 
-       if (atomic_inc_return(&irq_poll_active) == 1)
+       if (atomic_inc_return(&irq_poll_active) != 1)
                goto out;
 
        irq_poll_cpu = smp_processor_id();
index a8ce45097f3d21354c7ed8c18e5fcbc4e11792d1..e6f1f24ad57787665d21cfaa125a078ddc9de4cd 100644 (file)
@@ -66,8 +66,9 @@ void jump_label_inc(struct jump_label_key *key)
                return;
 
        jump_label_lock();
-       if (atomic_add_return(1, &key->enabled) == 1)
+       if (atomic_read(&key->enabled) == 0)
                jump_label_update(key, JUMP_LABEL_ENABLE);
+       atomic_inc(&key->enabled);
        jump_label_unlock();
 }
 
index 47613dfb7b28c340825493ed36a4c3347a39290d..fabfe541b1d46a48c299681a73967185f4c87743 100644 (file)
@@ -114,10 +114,12 @@ int __request_module(bool wait, const char *fmt, ...)
        atomic_inc(&kmod_concurrent);
        if (atomic_read(&kmod_concurrent) > max_modprobes) {
                /* We may be blaming an innocent here, but unlikely */
-               if (kmod_loop_msg++ < 5)
+               if (kmod_loop_msg < 5) {
                        printk(KERN_ERR
                               "request_module: runaway loop modprobe %s\n",
                               module_name);
+                       kmod_loop_msg++;
+               }
                atomic_dec(&kmod_concurrent);
                return -ENOMEM;
        }
index 77981813a1e75d6c3c830dac5084bc37e47c1080..f1dcde499f69b9afdf146f019942a755c22cefad 100644 (file)
@@ -1077,6 +1077,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
                /* Early boot.  kretprobe_table_locks not yet initialized. */
                return;
 
+       INIT_HLIST_HEAD(&empty_rp);
        hash = hash_ptr(tk, KPROBE_HASH_BITS);
        head = &kretprobe_inst_table[hash];
        kretprobe_table_lock(hash, &flags);
@@ -1085,7 +1086,6 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
                        recycle_rp_inst(ri, &empty_rp);
        }
        kretprobe_table_unlock(hash, &flags);
-       INIT_HLIST_HEAD(&empty_rp);
        hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
                kfree(ri);
@@ -1660,8 +1660,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
                ri->rp = rp;
                ri->task = current;
 
-               if (rp->entry_handler && rp->entry_handler(ri, regs))
+               if (rp->entry_handler && rp->entry_handler(ri, regs)) {
+                       spin_lock_irqsave(&rp->lock, flags);
+                       hlist_add_head(&ri->hlist, &rp->free_instances);
+                       spin_unlock_irqrestore(&rp->lock, flags);
                        return 0;
+               }
 
                arch_prepare_kretprobe(ri, regs);
 
index 242a688757d4adc1c1d1b057c61a51a9e88d40ef..a0dbb8cc07056d297d15165f7e3537c12c034886 100644 (file)
@@ -2294,8 +2294,7 @@ static int copy_and_check(struct load_info *info,
                return -ENOEXEC;
 
        /* Suck in entire file: we'll want most of it. */
-       /* vmalloc barfs on "unusual" numbers.  Check here */
-       if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL)
+       if ((hdr = vmalloc(len)) == NULL)
                return -ENOMEM;
 
        if (copy_from_user(hdr, umod, len) != 0) {
index a136da2f3968939da7f44482b1691ecaa138c247..564c7bc6ecbd6358de1457fae8687c52720b4a3d 100644 (file)
@@ -242,8 +242,16 @@ void add_taint(unsigned flag)
         * Also we want to keep up lockdep for staging development and
         * post-warning case.
         */
-       if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off())
-               printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
+       switch (flag) {
+       case TAINT_CRAP:
+       case TAINT_WARN:
+       case TAINT_FIRMWARE_WORKAROUND:
+               break;
+
+       default:
+               if (__debug_locks_off())
+                       printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
+       }
 
        set_bit(flag, &tainted_mask);
 }
index 8f7b1db1ece1b3273f9eebbc128d7c884390c638..8884c27682f5c020db704d1ecd287b8854abf3d7 100644 (file)
@@ -623,7 +623,7 @@ int hibernate(void)
        /* Allocate memory management structures */
        error = create_basic_memory_bitmaps();
        if (error)
-               goto Exit;
+               goto Enable_umh;
 
        printk(KERN_INFO "PM: Syncing filesystems ... ");
        sys_sync();
@@ -631,7 +631,7 @@ int hibernate(void)
 
        error = prepare_processes();
        if (error)
-               goto Finish;
+               goto Free_bitmaps;
 
        if (hibernation_test(TEST_FREEZER))
                goto Thaw;
@@ -663,8 +663,9 @@ int hibernate(void)
 
  Thaw:
        thaw_processes();
- Finish:
+ Free_bitmaps:
        free_basic_memory_bitmaps();
+ Enable_umh:
        usermodehelper_enable();
  Exit:
        pm_notifier_call_chain(PM_POST_HIBERNATION);
index 16e28b1e2a39343ce24ed9ed598f64149a1a24b9..23ac123d03d2b6e9de593651ef8d3c570ad85713 100644 (file)
@@ -314,7 +314,7 @@ int enter_state(suspend_state_t state)
  */
 int pm_suspend(suspend_state_t state)
 {
-       if (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX)
+       if (state > PM_SUSPEND_ON && state < PM_SUSPEND_MAX)
                return enter_state(state);
        return -EINVAL;
 }
index a2407a5fb2ea519a9a028b3dd17383d860c2cf42..c6b2f794a21045fe43835c26bd99e60b3d11fc4f 100644 (file)
@@ -369,8 +369,10 @@ static int check_syslog_permissions(int type, bool from_file)
                        return 0;
                /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
                if (capable(CAP_SYS_ADMIN)) {
-                       WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
-                                "but no CAP_SYSLOG (deprecated).\n");
+                       printk_once(KERN_WARNING "%s (%d): "
+                                "Attempt to access syslog with CAP_SYS_ADMIN "
+                                "but no CAP_SYSLOG (deprecated).\n",
+                                current->comm, task_pid_nr(current));
                        return 0;
                }
                return -EPERM;
index 859ea5a9605fa40fe47aedcb865ab08c66a43797..2c242fb2368b9e66b20196a2244cfc2e65748715 100644 (file)
@@ -164,10 +164,14 @@ depopulate:
  */
 static struct rchan_buf *relay_create_buf(struct rchan *chan)
 {
-       struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
-       if (!buf)
+       struct rchan_buf *buf;
+
+       if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
                return NULL;
 
+       buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
+       if (!buf)
+               return NULL;
        buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL);
        if (!buf->padding)
                goto free_buf;
@@ -574,6 +578,8 @@ struct rchan *relay_open(const char *base_filename,
 
        if (!(subbuf_size && n_subbufs))
                return NULL;
+       if (subbuf_size > UINT_MAX / n_subbufs)
+               return NULL;
 
        chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
        if (!chan)
index 62948763bc8b8417165870bdb3ca7b6665a0b3ce..2da88acde9248d431517b14864ee01cebcce9569 100644 (file)
@@ -3393,13 +3393,10 @@ calc_load_n(unsigned long load, unsigned long exp,
  * Once we've updated the global active value, we need to apply the exponential
  * weights adjusted to the number of cycles missed.
  */
-static void calc_global_nohz(unsigned long ticks)
+static void calc_global_nohz(void)
 {
        long delta, active, n;
 
-       if (time_before(jiffies, calc_load_update))
-               return;
-
        /*
         * If we crossed a calc_load_update boundary, make sure to fold
         * any pending idle changes, the respective CPUs might have
@@ -3411,31 +3408,25 @@ static void calc_global_nohz(unsigned long ticks)
                atomic_long_add(delta, &calc_load_tasks);
 
        /*
-        * If we were idle for multiple load cycles, apply them.
+        * It could be the one fold was all it took, we done!
         */
-       if (ticks >= LOAD_FREQ) {
-               n = ticks / LOAD_FREQ;
+       if (time_before(jiffies, calc_load_update + 10))
+               return;
 
-               active = atomic_long_read(&calc_load_tasks);
-               active = active > 0 ? active * FIXED_1 : 0;
+       /*
+        * Catch-up, fold however many we are behind still
+        */
+       delta = jiffies - calc_load_update - 10;
+       n = 1 + (delta / LOAD_FREQ);
 
-               avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
-               avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
-               avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
+       active = atomic_long_read(&calc_load_tasks);
+       active = active > 0 ? active * FIXED_1 : 0;
 
-               calc_load_update += n * LOAD_FREQ;
-       }
+       avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
+       avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
+       avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
 
-       /*
-        * Its possible the remainder of the above division also crosses
-        * a LOAD_FREQ period, the regular check in calc_global_load()
-        * which comes after this will take care of that.
-        *
-        * Consider us being 11 ticks before a cycle completion, and us
-        * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
-        * age us 4 cycles, and the test in calc_global_load() will
-        * pick up the final one.
-        */
+       calc_load_update += n * LOAD_FREQ;
 }
 #else
 static void calc_load_account_idle(struct rq *this_rq)
@@ -3447,7 +3438,7 @@ static inline long calc_load_fold_idle(void)
        return 0;
 }
 
-static void calc_global_nohz(unsigned long ticks)
+static void calc_global_nohz(void)
 {
 }
 #endif
@@ -3475,8 +3466,6 @@ void calc_global_load(unsigned long ticks)
 {
        long active;
 
-       calc_global_nohz(ticks);
-
        if (time_before(jiffies, calc_load_update + 10))
                return;
 
@@ -3488,6 +3477,16 @@ void calc_global_load(unsigned long ticks)
        avenrun[2] = calc_load(avenrun[2], EXP_15, active);
 
        calc_load_update += LOAD_FREQ;
+
+       /*
+        * Account one period with whatever state we found before
+        * folding in the nohz state and ageing the entire idle period.
+        *
+        * This avoids loosing a sample when we go idle between
+        * calc_load_account_active() (10 ticks ago) and now and thus
+        * under-accounting.
+        */
+       calc_global_nohz();
 }
 
 /*
@@ -7222,11 +7221,8 @@ int sched_domain_level_max;
 
 static int __init setup_relax_domain_level(char *str)
 {
-       unsigned long val;
-
-       val = simple_strtoul(str, NULL, 0);
-       if (val < sched_domain_level_max)
-               default_relax_domain_level = val;
+       if (kstrtoint(str, 0, &default_relax_domain_level))
+               pr_warn("Unable to set relax_domain_level\n");
 
        return 1;
 }
@@ -7419,7 +7415,6 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
        if (!sd)
                return child;
 
-       set_domain_attribute(sd, attr);
        cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
        if (child) {
                sd->level = child->level + 1;
@@ -7427,6 +7422,7 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
                child->parent = sd;
        }
        sd->child = child;
+       set_domain_attribute(sd, attr);
 
        return sd;
 }
index 17f2319d5e4f601f3b1432910bf942fe42548592..ac79f9e34fd2a877c26aad4cc9268dbd2a6259a0 100644 (file)
@@ -1390,6 +1390,11 @@ static int push_rt_task(struct rq *rq)
        if (!next_task)
                return 0;
 
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+       if (unlikely(task_running(rq, next_task)))
+               return 0;
+#endif
+
 retry:
        if (unlikely(next_task == rq->curr)) {
                WARN_ON(1);
index 415d85d6f6c637b099826d012e46f70832b1d557..43fee1cf50d01efe3e58a578906b16b6ae92ba07 100644 (file)
@@ -1894,21 +1894,19 @@ static int do_signal_stop(int signr)
                 */
                if (!(sig->flags & SIGNAL_STOP_STOPPED))
                        sig->group_exit_code = signr;
-               else
-                       WARN_ON_ONCE(!task_ptrace(current));
 
                current->group_stop &= ~GROUP_STOP_SIGMASK;
                current->group_stop |= signr | gstop;
                sig->group_stop_count = 1;
                for (t = next_thread(current); t != current;
                     t = next_thread(t)) {
-                       t->group_stop &= ~GROUP_STOP_SIGMASK;
                        /*
                         * Setting state to TASK_STOPPED for a group
                         * stop is always done with the siglock held,
                         * so this check has no races.
                         */
                        if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
+                               t->group_stop &= ~GROUP_STOP_SIGMASK;
                                t->group_stop |= signr | gstop;
                                sig->group_stop_count++;
                                signal_wake_up(t, 0);
index ea468b1232ac39385d46e59e7200d6a32409a4e0..5b6afb27e8b26149959db96ce1e85232343fe42a 100644 (file)
@@ -173,7 +173,7 @@ static int proc_taint(struct ctl_table *table, int write,
 #endif
 
 #ifdef CONFIG_PRINTK
-static int proc_dmesg_restrict(struct ctl_table *table, int write,
+static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
                                void __user *buffer, size_t *lenp, loff_t *ppos);
 #endif
 
@@ -710,7 +710,7 @@ static struct ctl_table kern_table[] = {
                .data           = &dmesg_restrict,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
+               .proc_handler   = proc_dointvec_minmax_sysadmin,
                .extra1         = &zero,
                .extra2         = &one,
        },
@@ -719,7 +719,7 @@ static struct ctl_table kern_table[] = {
                .data           = &kptr_restrict,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dmesg_restrict,
+               .proc_handler   = proc_dointvec_minmax_sysadmin,
                .extra1         = &zero,
                .extra2         = &two,
        },
@@ -2424,7 +2424,7 @@ static int proc_taint(struct ctl_table *table, int write,
 }
 
 #ifdef CONFIG_PRINTK
-static int proc_dmesg_restrict(struct ctl_table *table, int write,
+static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
                                void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        if (write && !capable(CAP_SYS_ADMIN))
index 3b8e028b96014a088b6227859b9163e8bceddb5a..e055e8b533ce2fee3f3fd94485998cf23ffb2f16 100644 (file)
@@ -1354,7 +1354,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
 
        fput(file);
 out_putname:
-       putname(pathname);
+       __putname(pathname);
 out:
        return result;
 }
index fc0f220054172bb4a0f8b1edcd0f3cf9b50202f2..8d597b19f13c38c8ddbf42931684b99f178e6cb4 100644 (file)
@@ -657,6 +657,7 @@ static struct genl_ops taskstats_ops = {
        .cmd            = TASKSTATS_CMD_GET,
        .doit           = taskstats_user_cmd,
        .policy         = taskstats_cmd_get_policy,
+       .flags          = GENL_ADMIN_PERM,
 };
 
 static struct genl_ops cgroupstats_ops = {
index 8e8dc6d705c93126c8b971e6eb88529b3924f678..d77606214529a9a5c75f549eb999bbdf99342a76 100644 (file)
@@ -575,7 +575,7 @@ EXPORT_SYMBOL(jiffies_to_timeval);
 /*
  * Convert jiffies/jiffies_64 to clock_t and back.
  */
-clock_t jiffies_to_clock_t(long x)
+clock_t jiffies_to_clock_t(unsigned long x)
 {
 #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
 # if HZ < USER_HZ
index ea5e1a928d5b08c04321ab1b39a87c486118b1d2..8b70c76910aa76d1b350a31f2687abf87361776e 100644 (file)
@@ -181,7 +181,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
                struct alarm *alarm;
                ktime_t expired = next->expires;
 
-               if (expired.tv64 >= now.tv64)
+               if (expired.tv64 > now.tv64)
                        break;
 
                alarm = container_of(next, struct alarm, node);
index e0980f0d9a0ad2d559b98c12f26317b55044cad1..8b270063b5152452d093428ee0044229a5c3e89f 100644 (file)
@@ -531,7 +531,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
         * note a margin of 12.5% is used because this can be computed with
         * a shift, versus say 10% which would require division.
         */
-       return max_nsecs - (max_nsecs >> 5);
+       return max_nsecs - (max_nsecs >> 3);
 }
 
 #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
@@ -653,7 +653,7 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
         * ~ 0.06ppm granularity for NTP. We apply the same 12.5%
         * margin as we do in clocksource_max_deferment()
         */
-       sec = (cs->mask - (cs->mask >> 5));
+       sec = (cs->mask - (cs->mask >> 3));
        do_div(sec, freq);
        do_div(sec, scale);
        if (!sec)
index f6117a4c7cb8e97eab31019a23122429a800dbf7..4b85a7a72526e0e3a863e41a8b61b568754d83d0 100644 (file)
@@ -275,7 +275,7 @@ static inline s64 ntp_update_offset_fll(s64 offset64, long secs)
 
        time_status |= STA_MODE;
 
-       return div_s64(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs);
+       return div64_long(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs);
 }
 
 static void ntp_update_offset(long offset)
index c7218d13273861dd65f5a484852001f88d8ce819..7a90d021b79ae0e145273585a1381ed1924269f6 100644 (file)
@@ -71,7 +71,7 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
             (dev->features & CLOCK_EVT_FEAT_C3STOP))
                return 0;
 
-       clockevents_exchange_device(NULL, dev);
+       clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
        tick_broadcast_device.evtdev = dev;
        if (!cpumask_empty(tick_get_broadcast_mask()))
                tick_broadcast_start_periodic(dev);
index d5097c44b407e25a1acae0f5d85c520eda555ae5..c473ce246cb7d375ccfe99dbbea66bba63e63fe2 100644 (file)
@@ -484,9 +484,9 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
                                hrtimer_get_expires(&ts->sched_timer), 0))
                                break;
                }
-               /* Update jiffies and reread time */
-               tick_do_update_jiffies64(now);
+               /* Reread time and update jiffies */
                now = ktime_get();
+               tick_do_update_jiffies64(now);
        }
 }
 
index fa58ec6d3bc322042af7454a8dd5eb1da8fedea0..9b28d0407715088b86dea6926290dda269ba24b4 100644 (file)
@@ -249,6 +249,8 @@ ktime_t ktime_get(void)
                secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
                nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
                nsecs += timekeeping_get_ns();
+               /* If arch requires, add in gettimeoffset() */
+               nsecs += arch_gettimeoffset();
 
        } while (read_seqretry(&xtime_lock, seq));
        /*
@@ -280,6 +282,8 @@ void ktime_get_ts(struct timespec *ts)
                *ts = xtime;
                tomono = wall_to_monotonic;
                nsecs = timekeeping_get_ns();
+               /* If arch requires, add in gettimeoffset() */
+               nsecs += arch_gettimeoffset();
 
        } while (read_seqretry(&xtime_lock, seq));
 
index ef9271b69b4f1759fa6dff8b0f8effaa19a13726..9f8e2e11020a7f34aa4e93e69daea83121a7963f 100644 (file)
@@ -952,7 +952,7 @@ struct ftrace_func_probe {
 };
 
 enum {
-       FTRACE_ENABLE_CALLS             = (1 << 0),
+       FTRACE_UPDATE_CALLS             = (1 << 0),
        FTRACE_DISABLE_CALLS            = (1 << 1),
        FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
        FTRACE_START_FUNC_RET           = (1 << 3),
@@ -1182,8 +1182,14 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
        return NULL;
 }
 
+static void
+ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
+static void
+ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
+
 static int
-ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
+ftrace_hash_move(struct ftrace_ops *ops, int enable,
+                struct ftrace_hash **dst, struct ftrace_hash *src)
 {
        struct ftrace_func_entry *entry;
        struct hlist_node *tp, *tn;
@@ -1193,8 +1199,15 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
        unsigned long key;
        int size = src->count;
        int bits = 0;
+       int ret;
        int i;
 
+       /*
+        * Remove the current set, update the hash and add
+        * them back.
+        */
+       ftrace_hash_rec_disable(ops, enable);
+
        /*
         * If the new source is empty, just free dst and assign it
         * the empty_hash.
@@ -1215,9 +1228,10 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
        if (bits > FTRACE_HASH_MAX_BITS)
                bits = FTRACE_HASH_MAX_BITS;
 
+       ret = -ENOMEM;
        new_hash = alloc_ftrace_hash(bits);
        if (!new_hash)
-               return -ENOMEM;
+               goto out;
 
        size = 1 << src->size_bits;
        for (i = 0; i < size; i++) {
@@ -1236,7 +1250,16 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
        rcu_assign_pointer(*dst, new_hash);
        free_ftrace_hash_rcu(old_hash);
 
-       return 0;
+       ret = 0;
+ out:
+       /*
+        * Enable regardless of ret:
+        *  On success, we enable the new hash.
+        *  On failure, we re-enable the original hash.
+        */
+       ftrace_hash_rec_enable(ops, enable);
+
+       return ret;
 }
 
 /*
@@ -1498,7 +1521,7 @@ int ftrace_text_reserved(void *start, void *end)
 
 
 static int
-__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
+__ftrace_replace_code(struct dyn_ftrace *rec, int update)
 {
        unsigned long ftrace_addr;
        unsigned long flag = 0UL;
@@ -1506,17 +1529,17 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
        ftrace_addr = (unsigned long)FTRACE_ADDR;
 
        /*
-        * If we are enabling tracing:
+        * If we are updating calls:
         *
         *   If the record has a ref count, then we need to enable it
         *   because someone is using it.
         *
         *   Otherwise we make sure its disabled.
         *
-        * If we are disabling tracing, then disable all records that
+        * If we are disabling calls, then disable all records that
         * are enabled.
         */
-       if (enable && (rec->flags & ~FTRACE_FL_MASK))
+       if (update && (rec->flags & ~FTRACE_FL_MASK))
                flag = FTRACE_FL_ENABLED;
 
        /* If the state of this record hasn't changed, then do nothing */
@@ -1532,7 +1555,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
        return ftrace_make_nop(NULL, rec, ftrace_addr);
 }
 
-static void ftrace_replace_code(int enable)
+static void ftrace_replace_code(int update)
 {
        struct dyn_ftrace *rec;
        struct ftrace_page *pg;
@@ -1546,7 +1569,7 @@ static void ftrace_replace_code(int enable)
                if (rec->flags & FTRACE_FL_FREE)
                        continue;
 
-               failed = __ftrace_replace_code(rec, enable);
+               failed = __ftrace_replace_code(rec, update);
                if (failed) {
                        ftrace_bug(failed, rec->ip);
                        /* Stop processing */
@@ -1596,7 +1619,7 @@ static int __ftrace_modify_code(void *data)
 {
        int *command = data;
 
-       if (*command & FTRACE_ENABLE_CALLS)
+       if (*command & FTRACE_UPDATE_CALLS)
                ftrace_replace_code(1);
        else if (*command & FTRACE_DISABLE_CALLS)
                ftrace_replace_code(0);
@@ -1652,7 +1675,7 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
                return -ENODEV;
 
        ftrace_start_up++;
-       command |= FTRACE_ENABLE_CALLS;
+       command |= FTRACE_UPDATE_CALLS;
 
        /* ops marked global share the filter hashes */
        if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
@@ -1704,8 +1727,7 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
        if (ops != &global_ops || !global_start_up)
                ops->flags &= ~FTRACE_OPS_FL_ENABLED;
 
-       if (!ftrace_start_up)
-               command |= FTRACE_DISABLE_CALLS;
+       command |= FTRACE_UPDATE_CALLS;
 
        if (saved_ftrace_func != ftrace_trace_function) {
                saved_ftrace_func = ftrace_trace_function;
@@ -1727,7 +1749,7 @@ static void ftrace_startup_sysctl(void)
        saved_ftrace_func = NULL;
        /* ftrace_start_up is true if we want ftrace running */
        if (ftrace_start_up)
-               ftrace_run_update_code(FTRACE_ENABLE_CALLS);
+               ftrace_run_update_code(FTRACE_UPDATE_CALLS);
 }
 
 static void ftrace_shutdown_sysctl(void)
@@ -2877,7 +2899,11 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
                ftrace_match_records(hash, buf, len);
 
        mutex_lock(&ftrace_lock);
-       ret = ftrace_hash_move(orig_hash, hash);
+       ret = ftrace_hash_move(ops, enable, orig_hash, hash);
+       if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
+           && ftrace_enabled)
+               ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+
        mutex_unlock(&ftrace_lock);
 
        mutex_unlock(&ftrace_regex_lock);
@@ -3060,18 +3086,12 @@ ftrace_regex_release(struct inode *inode, struct file *file)
                        orig_hash = &iter->ops->notrace_hash;
 
                mutex_lock(&ftrace_lock);
-               /*
-                * Remove the current set, update the hash and add
-                * them back.
-                */
-               ftrace_hash_rec_disable(iter->ops, filter_hash);
-               ret = ftrace_hash_move(orig_hash, iter->hash);
-               if (!ret) {
-                       ftrace_hash_rec_enable(iter->ops, filter_hash);
-                       if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
-                           && ftrace_enabled)
-                               ftrace_run_update_code(FTRACE_ENABLE_CALLS);
-               }
+               ret = ftrace_hash_move(iter->ops, filter_hash,
+                                      orig_hash, iter->hash);
+               if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
+                   && ftrace_enabled)
+                       ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+
                mutex_unlock(&ftrace_lock);
        }
        free_ftrace_hash(iter->hash);
index ee9c921d7f21e4d0578e35032607e9bc542e219f..0731e81a5bbdbfbb7c918040b0497383acf5d753 100644 (file)
@@ -3704,8 +3704,6 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
        if (info->read < PAGE_SIZE)
                goto read;
 
-       info->read = 0;
-
        trace_access_lock(info->cpu);
        ret = ring_buffer_read_page(info->tr->buffer,
                                    &info->spare,
@@ -3715,6 +3713,8 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
        if (ret < 0)
                return 0;
 
+       info->read = 0;
+
 read:
        size = PAGE_SIZE - info->read;
        if (size > count)
index e32744c84d9497bd041a2e612d82f1f859406af9..1fe81eef1ce4ec8a9a19da634dc3853893959969 100644 (file)
@@ -156,6 +156,12 @@ FTRACE_ENTRY_DUP(wakeup, ctx_switch_entry,
 
 #define FTRACE_STACK_ENTRIES   8
 
+#ifndef CONFIG_64BIT
+# define IP_FMT "%08lx"
+#else
+# define IP_FMT "%016lx"
+#endif
+
 FTRACE_ENTRY(kernel_stack, stack_entry,
 
        TRACE_STACK,
@@ -164,8 +170,9 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
                __array(        unsigned long,  caller, FTRACE_STACK_ENTRIES    )
        ),
 
-       F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
-                "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n",
+       F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
+                "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
+                "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n",
                 __entry->caller[0], __entry->caller[1], __entry->caller[2],
                 __entry->caller[3], __entry->caller[4], __entry->caller[5],
                 __entry->caller[6], __entry->caller[7])
@@ -180,8 +187,9 @@ FTRACE_ENTRY(user_stack, userstack_entry,
                __array(        unsigned long,  caller, FTRACE_STACK_ENTRIES    )
        ),
 
-       F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
-                "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n",
+       F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
+                "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
+                "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n",
                 __entry->caller[0], __entry->caller[1], __entry->caller[2],
                 __entry->caller[3], __entry->caller[4], __entry->caller[5],
                 __entry->caller[6], __entry->caller[7])
index 3e2a7c91c548d2d1a16b0c89f4e62d686fb245b1..2d049368242053ecca48019994522ca193e2504a 100644 (file)
@@ -1096,7 +1096,6 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
        /* First see if we did not already create this dir */
        list_for_each_entry(system, &event_subsystems, list) {
                if (strcmp(system->name, name) == 0) {
-                       __get_system(system);
                        system->nr_events++;
                        return system->entry;
                }
index 256764ecccd66fca5045cf0a53bb9d04d8e7f106..bd3c6369f80d97c471818bac8349d389691b25f7 100644 (file)
@@ -1766,7 +1766,7 @@ static int replace_system_preds(struct event_subsystem *system,
                 * replace the filter for the call.
                 */
                filter = call->filter;
-               call->filter = filter_item->filter;
+               rcu_assign_pointer(call->filter, filter_item->filter);
                filter_item->filter = filter;
 
                fail = false;
@@ -1821,7 +1821,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
                filter = call->filter;
                if (!filter)
                        goto out_unlock;
-               call->filter = NULL;
+               RCU_INIT_POINTER(call->filter, NULL);
                /* Make sure the filter is not being used */
                synchronize_sched();
                __free_filter(filter);
@@ -1862,7 +1862,7 @@ out:
         * string
         */
        tmp = call->filter;
-       call->filter = filter;
+       rcu_assign_pointer(call->filter, filter);
        if (tmp) {
                /* Make sure the call is done with the filter */
                synchronize_sched();
index bbeec31e0ae3f03144684cfd5039a138497328e0..ad4000c71be093ee66d90dab6756777134c868c5 100644 (file)
@@ -150,7 +150,7 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call)   \
 #define __dynamic_array(type, item)
 
 #undef F_printk
-#define F_printk(fmt, args...) #fmt ", "  __stringify(args)
+#define F_printk(fmt, args...) __stringify(fmt) ", "  __stringify(args)
 
 #undef FTRACE_ENTRY
 #define FTRACE_ENTRY(call, struct_name, etype, tstruct, print)         \
index aec02b6a1c4a4b70ae0eb1c15fc40b93b726c849..ee1845b8d6904153b88ff14de468b2aa45d16bb2 100644 (file)
@@ -252,11 +252,13 @@ struct workqueue_struct *system_long_wq __read_mostly;
 struct workqueue_struct *system_nrt_wq __read_mostly;
 struct workqueue_struct *system_unbound_wq __read_mostly;
 struct workqueue_struct *system_freezable_wq __read_mostly;
+struct workqueue_struct *system_nrt_freezable_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_wq);
 EXPORT_SYMBOL_GPL(system_long_wq);
 EXPORT_SYMBOL_GPL(system_nrt_wq);
 EXPORT_SYMBOL_GPL(system_unbound_wq);
 EXPORT_SYMBOL_GPL(system_freezable_wq);
+EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/workqueue.h>
@@ -1212,8 +1214,13 @@ static void worker_enter_idle(struct worker *worker)
        } else
                wake_up_all(&gcwq->trustee_wait);
 
-       /* sanity check nr_running */
-       WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
+       /*
+        * Sanity check nr_running.  Because trustee releases gcwq->lock
+        * between setting %WORKER_ROGUE and zapping nr_running, the
+        * warning may trigger spuriously.  Check iff trustee is idle.
+        */
+       WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE &&
+                    gcwq->nr_workers == gcwq->nr_idle &&
                     atomic_read(get_gcwq_nr_running(gcwq->cpu)));
 }
 
@@ -3796,8 +3803,11 @@ static int __init init_workqueues(void)
                                            WQ_UNBOUND_MAX_ACTIVE);
        system_freezable_wq = alloc_workqueue("events_freezable",
                                              WQ_FREEZABLE, 0);
+       system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable",
+                       WQ_NON_REENTRANT | WQ_FREEZABLE, 0);
        BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
-              !system_unbound_wq || !system_freezable_wq);
+              !system_unbound_wq || !system_freezable_wq ||
+               !system_nrt_freezable_wq);
        return 0;
 }
 early_initcall(init_workqueues);
index 2a34392bcecc3680dfb10fbbc7ae126792aa6713..297124d4d8da081785e9f5a4f4ce11d3dd706da7 100644 (file)
@@ -319,8 +319,8 @@ void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
 
        if (head->height == 0)
                return NULL;
-retry:
        longcpy(key, __key, geo->keylen);
+retry:
        dec_key(geo, key);
 
        node = head->node;
@@ -351,7 +351,7 @@ retry:
        }
 miss:
        if (retry_key) {
-               __key = retry_key;
+               longcpy(key, retry_key, geo->keylen);
                retry_key = NULL;
                goto retry;
        }
index 70af0a7f97c0eb4801e177458d182ab6baad2767..6d40244e8010f05f2fcb734086be6864927eeef4 100644 (file)
 
 u64 uevent_seqnum;
 char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
-static DEFINE_SPINLOCK(sequence_lock);
 #ifdef CONFIG_NET
 struct uevent_sock {
        struct list_head list;
        struct sock *sk;
 };
 static LIST_HEAD(uevent_sock_list);
-static DEFINE_MUTEX(uevent_sock_mutex);
 #endif
 
+/* This lock protects uevent_seqnum and uevent_sock_list */
+static DEFINE_MUTEX(uevent_sock_mutex);
+
 /* the strings here must match the enum in include/linux/kobject.h */
 static const char *kobject_actions[] = {
        [KOBJ_ADD] =            "add",
@@ -136,7 +137,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
        struct kobject *top_kobj;
        struct kset *kset;
        const struct kset_uevent_ops *uevent_ops;
-       u64 seq;
        int i = 0;
        int retval = 0;
 #ifdef CONFIG_NET
@@ -243,17 +243,16 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
        else if (action == KOBJ_REMOVE)
                kobj->state_remove_uevent_sent = 1;
 
+       mutex_lock(&uevent_sock_mutex);
        /* we will send an event, so request a new sequence number */
-       spin_lock(&sequence_lock);
-       seq = ++uevent_seqnum;
-       spin_unlock(&sequence_lock);
-       retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)seq);
-       if (retval)
+       retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum);
+       if (retval) {
+               mutex_unlock(&uevent_sock_mutex);
                goto exit;
+       }
 
 #if defined(CONFIG_NET)
        /* send netlink message */
-       mutex_lock(&uevent_sock_mutex);
        list_for_each_entry(ue_sk, &uevent_sock_list, list) {
                struct sock *uevent_sock = ue_sk->sk;
                struct sk_buff *skb;
@@ -282,13 +281,13 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
                                                            kobj_bcast_filter,
                                                            kobj);
                        /* ENOBUFS should be handled in userspace */
-                       if (retval == -ENOBUFS)
+                       if (retval == -ENOBUFS || retval == -ESRCH)
                                retval = 0;
                } else
                        retval = -ENOMEM;
        }
-       mutex_unlock(&uevent_sock_mutex);
 #endif
+       mutex_unlock(&uevent_sock_mutex);
 
        /* call uevent_helper, usually only enabled during early boot */
        if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
index ac09f2226dc748132cd869bdf0933a5daf7509c4..a8408b6cacdf7ff8e73fcd3bf7db8a850afb56a4 100644 (file)
@@ -20,6 +20,7 @@ static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = {
        [NLA_U16]       = sizeof(u16),
        [NLA_U32]       = sizeof(u32),
        [NLA_U64]       = sizeof(u64),
+       [NLA_MSECS]     = sizeof(u64),
        [NLA_NESTED]    = NLA_HDRLEN,
 };
 
index e56fe35cef0547032faab7ccd095833abd5fbd2f..b3b122f4630dcc199f3096456df5787e79d5b7e9 100644 (file)
@@ -686,6 +686,14 @@ void bdi_destroy(struct backing_dev_info *bdi)
 
        bdi_unregister(bdi);
 
+       /*
+        * If bdi_unregister() had already been called earlier, the
+        * wakeup_timer could still be armed because bdi_prune_sb()
+        * can race with the bdi_wakeup_thread_delayed() calls from
+        * __mark_inode_dirty().
+        */
+       del_timer_sync(&bdi->wb.wakeup_timer);
+
        for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
                percpu_counter_destroy(&bdi->bdi_stat[i]);
 
index 01d5a4b3dd0c1dd857f05f474ce096a9a2938001..9686c4e3f80d4a3905b17f3389f7fd14cc2f69cf 100644 (file)
@@ -768,14 +768,13 @@ void * __init alloc_bootmem_section(unsigned long size,
                                    unsigned long section_nr)
 {
        bootmem_data_t *bdata;
-       unsigned long pfn, goal, limit;
+       unsigned long pfn, goal;
 
        pfn = section_nr_to_pfn(section_nr);
        goal = pfn << PAGE_SHIFT;
-       limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
        bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
 
-       return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit);
+       return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, 0);
 }
 #endif
 
index 6cc604bd56496e2742e371d63234af08f9f0859b..c4bc5acf865d7124cd8597b94663425525ad63d9 100644 (file)
@@ -320,12 +320,34 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
                } else if (!locked)
                        spin_lock_irq(&zone->lru_lock);
 
+               /*
+                * migrate_pfn does not necessarily start aligned to a
+                * pageblock. Ensure that pfn_valid is called when moving
+                * into a new MAX_ORDER_NR_PAGES range in case of large
+                * memory holes within the zone
+                */
+               if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
+                       if (!pfn_valid(low_pfn)) {
+                               low_pfn += MAX_ORDER_NR_PAGES - 1;
+                               continue;
+                       }
+               }
+
                if (!pfn_valid_within(low_pfn))
                        continue;
                nr_scanned++;
 
-               /* Get the page and skip if free */
+               /*
+                * Get the page and ensure the page is within the same zone.
+                * See the comment in isolate_freepages about overlapping
+                * nodes. It is deliberate that the new zone lock is not taken
+                * as memory compaction should not move pages between nodes.
+                */
                page = pfn_to_page(low_pfn);
+               if (page_zone(page) != zone)
+                       continue;
+
+               /* Skip if free */
                if (PageBuddy(page))
                        continue;
 
index a8251a8d3457d28802164d6538c9226e98416f73..b7d860390f34779836aaec1182fdd60c7a71a82f 100644 (file)
@@ -396,24 +396,11 @@ EXPORT_SYMBOL(filemap_write_and_wait_range);
 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
 {
        int error;
-       struct mem_cgroup *memcg = NULL;
 
        VM_BUG_ON(!PageLocked(old));
        VM_BUG_ON(!PageLocked(new));
        VM_BUG_ON(new->mapping);
 
-       /*
-        * This is not page migration, but prepare_migration and
-        * end_migration does enough work for charge replacement.
-        *
-        * In the longer term we probably want a specialized function
-        * for moving the charge from old to new in a more efficient
-        * manner.
-        */
-       error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask);
-       if (error)
-               return error;
-
        error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
        if (!error) {
                struct address_space *mapping = old->mapping;
@@ -435,13 +422,12 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
                if (PageSwapBacked(new))
                        __inc_zone_page_state(new, NR_SHMEM);
                spin_unlock_irq(&mapping->tree_lock);
+               /* mem_cgroup codes must not be called under tree_lock */
+               mem_cgroup_replace_page_cache(old, new);
                radix_tree_preload_end();
                if (freepage)
                        freepage(old);
                page_cache_release(old);
-               mem_cgroup_end_migration(memcg, old, new, true);
-       } else {
-               mem_cgroup_end_migration(memcg, old, new, false);
        }
 
        return error;
@@ -1393,15 +1379,12 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
        unsigned long seg = 0;
        size_t count;
        loff_t *ppos = &iocb->ki_pos;
-       struct blk_plug plug;
 
        count = 0;
        retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
        if (retval)
                return retval;
 
-       blk_start_plug(&plug);
-
        /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
        if (filp->f_flags & O_DIRECT) {
                loff_t size;
@@ -1417,8 +1400,12 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
                        retval = filemap_write_and_wait_range(mapping, pos,
                                        pos + iov_length(iov, nr_segs) - 1);
                        if (!retval) {
+                               struct blk_plug plug;
+
+                               blk_start_plug(&plug);
                                retval = mapping->a_ops->direct_IO(READ, iocb,
                                                        iov, pos, nr_segs);
+                               blk_finish_plug(&plug);
                        }
                        if (retval > 0) {
                                *ppos = pos + retval;
@@ -1474,7 +1461,6 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
                        break;
        }
 out:
-       blk_finish_plug(&plug);
        return retval;
 }
 EXPORT_SYMBOL(generic_file_aio_read);
@@ -1807,7 +1793,7 @@ repeat:
                page = __page_cache_alloc(gfp | __GFP_COLD);
                if (!page)
                        return ERR_PTR(-ENOMEM);
-               err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
+               err = add_to_page_cache_lru(page, mapping, index, gfp);
                if (unlikely(err)) {
                        page_cache_release(page);
                        if (err == -EEXIST)
@@ -1904,10 +1890,7 @@ static struct page *wait_on_page_read(struct page *page)
  * @gfp:       the page allocator flags to use if allocating
  *
  * This is the same as "read_mapping_page(mapping, index, NULL)", but with
- * any new page allocations done using the specified allocation flags. Note
- * that the Radix tree operations will still use GFP_KERNEL, so you can't
- * expect to do this atomically or anything like that - but you can pass in
- * other page requirements.
+ * any new page allocations done using the specified allocation flags.
  *
  * If the page does not get brought uptodate, return -EIO.
  */
index 93356cd12828a40eb4d635a9e0e0bea3d6ba0790..dee94297f3924f63808c2f20dca42c6d99ba0f32 100644 (file)
@@ -263,7 +263,12 @@ found:
                                                        xip_pfn);
                if (err == -ENOMEM)
                        return VM_FAULT_OOM;
-               BUG_ON(err);
+               /*
+                * err == -EBUSY is fine, we've raced against another thread
+                * that faulted-in the same page
+                */
+               if (err != -EBUSY)
+                       BUG_ON(err);
                return VM_FAULT_NOPAGE;
        } else {
                int err, ret = VM_FAULT_OOM;
index 81532f297fd22cd11e1c7a4161c8e320a7015ec2..8cc11dda6a747e4e9e823c8f988fd441a4b43ad9 100644 (file)
@@ -641,6 +641,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                set_pmd_at(mm, haddr, pmd, entry);
                prepare_pmd_huge_pte(pgtable, mm);
                add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
+               mm->nr_ptes++;
                spin_unlock(&mm->page_table_lock);
        }
 
@@ -759,6 +760,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        pmd = pmd_mkold(pmd_wrprotect(pmd));
        set_pmd_at(dst_mm, addr, dst_pmd, pmd);
        prepare_pmd_huge_pte(pgtable, dst_mm);
+       dst_mm->nr_ptes++;
 
        ret = 0;
 out_unlock:
@@ -857,7 +859,6 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
        }
        kfree(pages);
 
-       mm->nr_ptes++;
        smp_wmb(); /* make pte visible before pmd */
        pmd_populate(mm, pmd, pgtable);
        page_remove_rmap(page);
@@ -989,7 +990,7 @@ struct page *follow_trans_huge_pmd(struct mm_struct *mm,
        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
        VM_BUG_ON(!PageCompound(page));
        if (flags & FOLL_GET)
-               get_page(page);
+               get_page_foll(page);
 
 out:
        return page;
@@ -1016,6 +1017,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                        VM_BUG_ON(page_mapcount(page) < 0);
                        add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
                        VM_BUG_ON(!PageHead(page));
+                       tlb->mm->nr_ptes--;
                        spin_unlock(&tlb->mm->page_table_lock);
                        tlb_remove_page(tlb, page);
                        pte_free(tlb->mm, pgtable);
@@ -1156,6 +1158,7 @@ static void __split_huge_page_refcount(struct page *page)
        unsigned long head_index = page->index;
        struct zone *zone = page_zone(page);
        int zonestat;
+       int tail_count = 0;
 
        /* prevent PageLRU to go away from under us, and freeze lru stats */
        spin_lock_irq(&zone->lru_lock);
@@ -1164,11 +1167,27 @@ static void __split_huge_page_refcount(struct page *page)
        for (i = 1; i < HPAGE_PMD_NR; i++) {
                struct page *page_tail = page + i;
 
-               /* tail_page->_count cannot change */
-               atomic_sub(atomic_read(&page_tail->_count), &page->_count);
-               BUG_ON(page_count(page) <= 0);
-               atomic_add(page_mapcount(page) + 1, &page_tail->_count);
-               BUG_ON(atomic_read(&page_tail->_count) <= 0);
+               /* tail_page->_mapcount cannot change */
+               BUG_ON(page_mapcount(page_tail) < 0);
+               tail_count += page_mapcount(page_tail);
+               /* check for overflow */
+               BUG_ON(tail_count < 0);
+               BUG_ON(atomic_read(&page_tail->_count) != 0);
+               /*
+                * tail_page->_count is zero and not changing from
+                * under us. But get_page_unless_zero() may be running
+                * from under us on the tail_page. If we used
+                * atomic_set() below instead of atomic_add(), we
+                * would then run atomic_set() concurrently with
+                * get_page_unless_zero(), and atomic_set() is
+                * implemented in C not using locked ops. spin_unlock
+                * on x86 sometime uses locked ops because of PPro
+                * errata 66, 92, so unless somebody can guarantee
+                * atomic_set() here would be safe on all archs (and
+                * not only on x86), it's safer to use atomic_add().
+                */
+               atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
+                          &page_tail->_count);
 
                /* after clearing PageTail the gup refcount can be released */
                smp_mb();
@@ -1186,10 +1205,7 @@ static void __split_huge_page_refcount(struct page *page)
                                      (1L << PG_uptodate)));
                page_tail->flags |= (1L << PG_dirty);
 
-               /*
-                * 1) clear PageTail before overwriting first_page
-                * 2) clear PageTail before clearing PageHead for VM_BUG_ON
-                */
+               /* clear PageTail before overwriting first_page */
                smp_wmb();
 
                /*
@@ -1206,7 +1222,6 @@ static void __split_huge_page_refcount(struct page *page)
                 * status is achieved setting a reserved bit in the
                 * pmd, not by clearing the present bit.
                */
-               BUG_ON(page_mapcount(page_tail));
                page_tail->_mapcount = page->_mapcount;
 
                BUG_ON(page_tail->mapping);
@@ -1223,6 +1238,8 @@ static void __split_huge_page_refcount(struct page *page)
 
                lru_add_page_tail(zone, page, page_tail);
        }
+       atomic_sub(tail_count, &page->_count);
+       BUG_ON(atomic_read(&page->_count) <= 0);
 
        __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
        __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
@@ -1295,7 +1312,6 @@ static int __split_huge_page_map(struct page *page,
                        pte_unmap(pte);
                }
 
-               mm->nr_ptes++;
                smp_wmb(); /* make pte visible before pmd */
                /*
                 * Up to this point the pmd is present and huge and
@@ -1910,7 +1926,6 @@ static void collapse_huge_page(struct mm_struct *mm,
        set_pmd_at(mm, address, pmd, _pmd);
        update_mmu_cache(vma, address, entry);
        prepare_pmd_huge_pte(pgtable, mm);
-       mm->nr_ptes--;
        spin_unlock(&mm->page_table_lock);
 
 #ifndef CONFIG_NUMA
@@ -2005,7 +2020,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
 {
        struct mm_struct *mm = mm_slot->mm;
 
-       VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
+       VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
 
        if (khugepaged_test_exit(mm)) {
                /* free mm_slot */
@@ -2033,7 +2048,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
        int progress = 0;
 
        VM_BUG_ON(!pages);
-       VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
+       VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
 
        if (khugepaged_scan.mm_slot)
                mm_slot = khugepaged_scan.mm_slot;
index bfcf153bc82907f31ccc1921256622a82bf20d2f..05f8fd425f69e3a8de95e13e75444c20a66aa46a 100644 (file)
@@ -575,6 +575,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
        __SetPageHead(page);
        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
                __SetPageTail(p);
+               set_page_count(p, 0);
                p->first_page = page;
        }
 }
@@ -900,7 +901,6 @@ retry:
        h->resv_huge_pages += delta;
        ret = 0;
 
-       spin_unlock(&hugetlb_lock);
        /* Free the needed pages to the hugetlb pool */
        list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
                if ((--needed) < 0)
@@ -914,6 +914,7 @@ retry:
                VM_BUG_ON(page_count(page));
                enqueue_huge_page(h, page);
        }
+       spin_unlock(&hugetlb_lock);
 
        /* Free unnecessary surplus pages to the buddy allocator */
 free:
@@ -2059,6 +2060,15 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
                kref_get(&reservations->refs);
 }
 
+static void resv_map_put(struct vm_area_struct *vma)
+{
+       struct resv_map *reservations = vma_resv_map(vma);
+
+       if (!reservations)
+               return;
+       kref_put(&reservations->refs, resv_map_release);
+}
+
 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
 {
        struct hstate *h = hstate_vma(vma);
@@ -2074,7 +2084,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
                reserve = (end - start) -
                        region_count(&reservations->regions, start, end);
 
-               kref_put(&reservations->refs, resv_map_release);
+               resv_map_put(vma);
 
                if (reserve) {
                        hugetlb_acct_memory(h, -reserve);
@@ -2397,7 +2407,6 @@ retry_avoidcopy:
                if (outside_reserve) {
                        BUG_ON(huge_pte_none(pte));
                        if (unmap_ref_private(mm, vma, old_page, address)) {
-                               BUG_ON(page_count(old_page) != 1);
                                BUG_ON(huge_pte_none(pte));
                                spin_lock(&mm->page_table_lock);
                                goto retry_avoidcopy;
@@ -2415,6 +2424,8 @@ retry_avoidcopy:
         * anon_vma prepared.
         */
        if (unlikely(anon_vma_prepare(vma))) {
+               page_cache_release(new_page);
+               page_cache_release(old_page);
                /* Caller expects lock to be held */
                spin_lock(&mm->page_table_lock);
                return VM_FAULT_OOM;
@@ -2676,6 +2687,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * so no worry about deadlock.
         */
        page = pte_page(entry);
+       get_page(page);
        if (page != pagecache_page)
                lock_page(page);
 
@@ -2707,6 +2719,7 @@ out_page_table_lock:
        }
        if (page != pagecache_page)
                unlock_page(page);
+       put_page(page);
 
 out_mutex:
        mutex_unlock(&hugetlb_instantiation_mutex);
@@ -2873,12 +2886,16 @@ int hugetlb_reserve_pages(struct inode *inode,
                set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
        }
 
-       if (chg < 0)
-               return chg;
+       if (chg < 0) {
+               ret = chg;
+               goto out_err;
+       }
 
        /* There must be enough filesystem quota for the mapping */
-       if (hugetlb_get_quota(inode->i_mapping, chg))
-               return -ENOSPC;
+       if (hugetlb_get_quota(inode->i_mapping, chg)) {
+               ret = -ENOSPC;
+               goto out_err;
+       }
 
        /*
         * Check enough hugepages are available for the reservation.
@@ -2887,7 +2904,7 @@ int hugetlb_reserve_pages(struct inode *inode,
        ret = hugetlb_acct_memory(h, chg);
        if (ret < 0) {
                hugetlb_put_quota(inode->i_mapping, chg);
-               return ret;
+               goto out_err;
        }
 
        /*
@@ -2904,6 +2921,10 @@ int hugetlb_reserve_pages(struct inode *inode,
        if (!vma || vma->vm_flags & VM_MAYSHARE)
                region_add(&inode->i_mapping->private_list, from, to);
        return 0;
+out_err:
+       if (vma)
+               resv_map_put(vma);
+       return ret;
 }
 
 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
index d071d380fb498ab36ac6700343bf86485463bdbe..2189af491783f958c1337ebf1f1bb14dc5cd8b5d 100644 (file)
@@ -37,6 +37,52 @@ static inline void __put_page(struct page *page)
        atomic_dec(&page->_count);
 }
 
+static inline void __get_page_tail_foll(struct page *page,
+                                       bool get_page_head)
+{
+       /*
+        * If we're getting a tail page, the elevated page->_count is
+        * required only in the head page and we will elevate the head
+        * page->_count and tail page->_mapcount.
+        *
+        * We elevate page_tail->_mapcount for tail pages to force
+        * page_tail->_count to be zero at all times to avoid getting
+        * false positives from get_page_unless_zero() with
+        * speculative page access (like in
+        * page_cache_get_speculative()) on tail pages.
+        */
+       VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0);
+       VM_BUG_ON(atomic_read(&page->_count) != 0);
+       VM_BUG_ON(page_mapcount(page) < 0);
+       if (get_page_head)
+               atomic_inc(&page->first_page->_count);
+       atomic_inc(&page->_mapcount);
+}
+
+/*
+ * This is meant to be called as the FOLL_GET operation of
+ * follow_page() and it must be called while holding the proper PT
+ * lock while the pte (or pmd_trans_huge) is still mapping the page.
+ */
+static inline void get_page_foll(struct page *page)
+{
+       if (unlikely(PageTail(page)))
+               /*
+                * This is safe only because
+                * __split_huge_page_refcount() can't run under
+                * get_page_foll() because we hold the proper PT lock.
+                */
+               __get_page_tail_foll(page, true);
+       else {
+               /*
+                * Getting a normal page or the head of a compound page
+                * requires to already have an elevated page->_count.
+                */
+               VM_BUG_ON(atomic_read(&page->_count) <= 0);
+               atomic_inc(&page->_count);
+       }
+}
+
 extern unsigned long highest_memmap_pfn;
 
 /*
index 59ac5d6de478701f6a778a34321eb5b54eb74b02..ffb99b4e7527fb21af1264ad0cc4a1ef25b5b7e9 100644 (file)
@@ -3422,6 +3422,50 @@ int mem_cgroup_shmem_charge_fallback(struct page *page,
        return ret;
 }
 
+/*
+ * At replace page cache, newpage is not under any memcg but it's on
+ * LRU. So, this function doesn't touch res_counter but handles LRU
+ * in correct way. Both pages are locked so we cannot race with uncharge.
+ */
+void mem_cgroup_replace_page_cache(struct page *oldpage,
+                                 struct page *newpage)
+{
+       struct mem_cgroup *memcg;
+       struct page_cgroup *pc;
+       struct zone *zone;
+       enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
+       unsigned long flags;
+
+       if (mem_cgroup_disabled())
+               return;
+
+       pc = lookup_page_cgroup(oldpage);
+       /* fix accounting on old pages */
+       lock_page_cgroup(pc);
+       memcg = pc->mem_cgroup;
+       mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
+       ClearPageCgroupUsed(pc);
+       unlock_page_cgroup(pc);
+
+       if (PageSwapBacked(oldpage))
+               type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
+
+       zone = page_zone(newpage);
+       pc = lookup_page_cgroup(newpage);
+       /*
+        * Even if newpage->mapping was NULL before starting replacement,
+        * the newpage may be on LRU(or pagevec for LRU) already. We lock
+        * LRU while we overwrite pc->mem_cgroup.
+        */
+       spin_lock_irqsave(&zone->lru_lock, flags);
+       if (PageLRU(newpage))
+               del_page_from_lru_list(zone, newpage, page_lru(newpage));
+       __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type);
+       if (PageLRU(newpage))
+               add_page_to_lru_list(zone, newpage, page_lru(newpage));
+       spin_unlock_irqrestore(&zone->lru_lock, flags);
+}
+
 #ifdef CONFIG_DEBUG_VM
 static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
 {
@@ -4514,6 +4558,9 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
         */
        BUG_ON(!thresholds);
 
+       if (!thresholds->primary)
+               goto unlock;
+
        usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
 
        /* Check if a threshold crossed before removing */
@@ -4558,11 +4605,17 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
 swap_buffers:
        /* Swap primary and spare array */
        thresholds->spare = thresholds->primary;
+       /* If all events are unregistered, free the spare array */
+       if (!new) {
+               kfree(thresholds->spare);
+               thresholds->spare = NULL;
+       }
+
        rcu_assign_pointer(thresholds->primary, new);
 
        /* To be sure that nobody uses thresholds */
        synchronize_rcu();
-
+unlock:
        mutex_unlock(&memcg->thresholds_lock);
 }
 
@@ -4963,9 +5016,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
                int cpu;
                enable_swap_cgroup();
                parent = NULL;
-               root_mem_cgroup = mem;
                if (mem_cgroup_soft_limit_tree_init())
                        goto free_out;
+               root_mem_cgroup = mem;
                for_each_possible_cpu(cpu) {
                        struct memcg_stock_pcp *stock =
                                                &per_cpu(memcg_stock, cpu);
@@ -5004,7 +5057,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
        return &mem->css;
 free_out:
        __mem_cgroup_free(mem);
-       root_mem_cgroup = NULL;
        return ERR_PTR(error);
 }
 
@@ -5244,6 +5296,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
        spinlock_t *ptl;
 
        split_huge_page_pmd(walk->mm, pmd);
+       if (pmd_trans_unstable(pmd))
+               return 0;
 
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        for (; addr != end; pte++, addr += PAGE_SIZE)
@@ -5405,6 +5459,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
        spinlock_t *ptl;
 
        split_huge_page_pmd(walk->mm, pmd);
+       if (pmd_trans_unstable(pmd))
+               return 0;
 retry:
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        for (; addr != end; addr += PAGE_SIZE) {
index d961e1914d17a7fb74cb7c907ddee44dcd5aae76..d49b58aba4aeae5f2805055017a45829c67fcf9e 100644 (file)
@@ -1228,16 +1228,24 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
        do {
                next = pmd_addr_end(addr, end);
                if (pmd_trans_huge(*pmd)) {
-                       if (next-addr != HPAGE_PMD_SIZE) {
+                       if (next - addr != HPAGE_PMD_SIZE) {
                                VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
                                split_huge_page_pmd(vma->vm_mm, pmd);
                        } else if (zap_huge_pmd(tlb, vma, pmd))
-                               continue;
+                               goto next;
                        /* fall through */
                }
-               if (pmd_none_or_clear_bad(pmd))
-                       continue;
+               /*
+                * Here there can be other concurrent MADV_DONTNEED or
+                * trans huge page faults running, and if the pmd is
+                * none or trans huge it can change under us. This is
+                * because MADV_DONTNEED holds the mmap_sem in read
+                * mode.
+                */
+               if (pmd_none_or_trans_huge_or_clear_bad(pmd))
+                       goto next;
                next = zap_pte_range(tlb, vma, pmd, addr, next, details);
+next:
                cond_resched();
        } while (pmd++, addr = next, addr != end);
 
@@ -1514,7 +1522,7 @@ split_fallthrough:
        }
 
        if (flags & FOLL_GET)
-               get_page(page);
+               get_page_foll(page);
        if (flags & FOLL_TOUCH) {
                if ((flags & FOLL_WRITE) &&
                    !pte_dirty(pte) && !PageDirty(page))
index e7fb9d25c54eb80a62a987fbfd5cb08c4ae8e055..3dac2d168e47e7f501c42b478fae7a8807a4d2e4 100644 (file)
@@ -511,7 +511,7 @@ static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
        do {
                next = pmd_addr_end(addr, end);
                split_huge_page_pmd(vma->vm_mm, pmd);
-               if (pmd_none_or_clear_bad(pmd))
+               if (pmd_none_or_trans_huge_or_clear_bad(pmd))
                        continue;
                if (check_pte_range(vma, pmd, addr, next, nodes,
                                    flags, private))
@@ -606,27 +606,6 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
        return first;
 }
 
-/* Apply policy to a single VMA */
-static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
-{
-       int err = 0;
-       struct mempolicy *old = vma->vm_policy;
-
-       pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
-                vma->vm_start, vma->vm_end, vma->vm_pgoff,
-                vma->vm_ops, vma->vm_file,
-                vma->vm_ops ? vma->vm_ops->set_policy : NULL);
-
-       if (vma->vm_ops && vma->vm_ops->set_policy)
-               err = vma->vm_ops->set_policy(vma, new);
-       if (!err) {
-               mpol_get(new);
-               vma->vm_policy = new;
-               mpol_put(old);
-       }
-       return err;
-}
-
 /* Step 2: apply policy to a range and do splits. */
 static int mbind_range(struct mm_struct *mm, unsigned long start,
                       unsigned long end, struct mempolicy *new_pol)
@@ -666,9 +645,23 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
                        if (err)
                                goto out;
                }
-               err = policy_vma(vma, new_pol);
-               if (err)
-                       goto out;
+
+               /*
+                * Apply policy to a single VMA. The reference counting of
+                * policy for vma_policy linkages has already been handled by
+                * vma_merge and split_vma as necessary. If this is a shared
+                * policy then ->set_policy will increment the reference count
+                * for an sp node.
+                */
+               pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
+                       vma->vm_start, vma->vm_end, vma->vm_pgoff,
+                       vma->vm_ops, vma->vm_file,
+                       vma->vm_ops ? vma->vm_ops->set_policy : NULL);
+               if (vma->vm_ops && vma->vm_ops->set_policy) {
+                       err = vma->vm_ops->set_policy(vma, new_pol);
+                       if (err)
+                               goto out;
+               }
        }
 
  out:
index a4e6b9d75c76198be4f04d41e996f2067a97db27..117ff5492795d6962d651b86799070e3fc18c75e 100644 (file)
@@ -161,7 +161,7 @@ static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
                        }
                        /* fall through */
                }
-               if (pmd_none_or_clear_bad(pmd))
+               if (pmd_none_or_trans_huge_or_clear_bad(pmd))
                        mincore_unmapped_range(vma, addr, next, vec);
                else
                        mincore_pte_range(vma, pmd, addr, next, vec);
index 6e93dc7f25863628b576539648dfe0c7ba8d3f10..e39e3efe4a434218ec3470a164606cc1dc964313 100644 (file)
@@ -83,8 +83,7 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
 
 static void __init __free_pages_memory(unsigned long start, unsigned long end)
 {
-       int i;
-       unsigned long start_aligned, end_aligned;
+       unsigned long i, start_aligned, end_aligned;
        int order = ilog2(BITS_PER_LONG);
 
        start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
index 9edc897a3970e3a22753e6bbed4ce0bdde9d0f19..5ff9b35883ee0a3a07310dda39dad20fc758dc8a 100644 (file)
@@ -697,9 +697,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
        if (vma->vm_file) {
                mapping = vma->vm_file->f_mapping;
 
+               mutex_lock(&mapping->i_mmap_mutex);
                flush_dcache_mmap_lock(mapping);
                vma_prio_tree_insert(vma, &mapping->i_mmap);
                flush_dcache_mmap_unlock(mapping);
+               mutex_unlock(&mapping->i_mmap_mutex);
        }
 
        /* add the VMA to the tree */
@@ -761,9 +763,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
        if (vma->vm_file) {
                mapping = vma->vm_file->f_mapping;
 
+               mutex_lock(&mapping->i_mmap_mutex);
                flush_dcache_mmap_lock(mapping);
                vma_prio_tree_remove(vma, &mapping->i_mmap);
                flush_dcache_mmap_unlock(mapping);
+               mutex_unlock(&mapping->i_mmap_mutex);
        }
 
        /* remove from the MM's tree and list */
@@ -776,8 +780,6 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
 
        if (vma->vm_next)
                vma->vm_next->vm_prev = vma->vm_prev;
-
-       vma->vm_mm = NULL;
 }
 
 /*
@@ -2061,6 +2063,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
        high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
        down_write(&nommu_region_sem);
+       mutex_lock(&inode->i_mapping->i_mmap_mutex);
 
        /* search for VMAs that fall within the dead zone */
        vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
@@ -2068,6 +2071,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
                /* found one - only interested if it's shared out of the page
                 * cache */
                if (vma->vm_flags & VM_SHARED) {
+                       mutex_unlock(&inode->i_mapping->i_mmap_mutex);
                        up_write(&nommu_region_sem);
                        return -ETXTBSY; /* not quite true, but near enough */
                }
@@ -2095,6 +2099,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
                }
        }
 
+       mutex_unlock(&inode->i_mapping->i_mmap_mutex);
        up_write(&nommu_region_sem);
        return 0;
 }
index 8093fc766d16fda592f42074fbe7293cc7f6892a..7c72487ca459495026387de3c0c0c0302a44dec7 100644 (file)
@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p,
 unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
                      const nodemask_t *nodemask, unsigned long totalpages)
 {
-       int points;
+       long points;
 
        if (oom_unkillable_task(p, mem, nodemask))
                return 0;
index 03d8c484d0bc8238bd9c0dc087103c2cb87da48c..e2f474da7ee2bcae724d8cd7b66bc594c4ed602f 100644 (file)
@@ -370,8 +370,8 @@ void prep_compound_page(struct page *page, unsigned long order)
        __SetPageHead(page);
        for (i = 1; i < nr_pages; i++) {
                struct page *p = page + i;
-
                __SetPageTail(p);
+               set_page_count(p, 0);
                p->first_page = page;
        }
 }
@@ -3411,9 +3411,15 @@ static void setup_zone_migrate_reserve(struct zone *zone)
        unsigned long block_migratetype;
        int reserve;
 
-       /* Get the start pfn, end pfn and the number of blocks to reserve */
+       /*
+        * Get the start pfn, end pfn and the number of blocks to reserve
+        * We have to be careful to be aligned to pageblock_nr_pages to
+        * make sure that we always check pfn_valid for the first page in
+        * the block.
+        */
        start_pfn = zone->zone_start_pfn;
        end_pfn = start_pfn + zone->spanned_pages;
+       start_pfn = roundup(start_pfn, pageblock_nr_pages);
        reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
                                                        pageblock_order;
 
@@ -5582,6 +5588,17 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
 bool is_pageblock_removable_nolock(struct page *page)
 {
        struct zone *zone = page_zone(page);
+       unsigned long pfn = page_to_pfn(page);
+
+       /*
+        * We have to be careful here because we are iterating over memory
+        * sections which are not zone aware so we might end up outside of
+        * the zone but still within the section.
+        */
+       if (!zone || zone->zone_start_pfn > pfn ||
+                       zone->zone_start_pfn + zone->spanned_pages <= pfn)
+               return false;
+
        return __count_immobile_pages(zone, page, 0);
 }
 
index c3450d5336111830ad0b572efc08000006de13bd..87eac0ea2bf1f5cbb2fa2391902f6efaf0e1de10 100644 (file)
@@ -59,7 +59,7 @@ again:
                        continue;
 
                split_huge_page_pmd(walk->mm, pmd);
-               if (pmd_none_or_clear_bad(pmd))
+               if (pmd_none_or_trans_huge_or_clear_bad(pmd))
                        goto again;
                err = walk_pte_range(pmd, addr, next, walk);
                if (err)
index ea534960a04bcda7e87a18cacf67bd2eb1fc5fe0..bfad724666532558ac85043daa5181e9a74b3b50 100644 (file)
@@ -143,8 +143,8 @@ static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
                                 int page_start, int page_end)
 {
        flush_cache_vunmap(
-               pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
-               pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+               pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+               pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
 }
 
 static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
@@ -206,8 +206,8 @@ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
                                      int page_start, int page_end)
 {
        flush_tlb_kernel_range(
-               pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
-               pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+               pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+               pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
 }
 
 static int __pcpu_map_pages(unsigned long addr, struct page **pages,
@@ -284,8 +284,8 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
                                int page_start, int page_end)
 {
        flush_cache_vmap(
-               pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
-               pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+               pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+               pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
 }
 
 /**
index bf80e55dbed7e66fcd28f6dbc46c86fbd222a008..af0cc7a58f9f5aa2e38314dfc62b729f84e7f5a5 100644 (file)
@@ -116,9 +116,9 @@ static int pcpu_atom_size __read_mostly;
 static int pcpu_nr_slots __read_mostly;
 static size_t pcpu_chunk_struct_size __read_mostly;
 
-/* cpus with the lowest and highest unit numbers */
-static unsigned int pcpu_first_unit_cpu __read_mostly;
-static unsigned int pcpu_last_unit_cpu __read_mostly;
+/* cpus with the lowest and highest unit addresses */
+static unsigned int pcpu_low_unit_cpu __read_mostly;
+static unsigned int pcpu_high_unit_cpu __read_mostly;
 
 /* the address of the first chunk which starts with the kernel static area */
 void *pcpu_base_addr __read_mostly;
@@ -984,19 +984,19 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
 {
        void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
        bool in_first_chunk = false;
-       unsigned long first_start, first_end;
+       unsigned long first_low, first_high;
        unsigned int cpu;
 
        /*
-        * The following test on first_start/end isn't strictly
+        * The following test on unit_low/high isn't strictly
         * necessary but will speed up lookups of addresses which
         * aren't in the first chunk.
         */
-       first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0);
-       first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu,
-                                   pcpu_unit_pages);
-       if ((unsigned long)addr >= first_start &&
-           (unsigned long)addr < first_end) {
+       first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
+       first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
+                                    pcpu_unit_pages);
+       if ((unsigned long)addr >= first_low &&
+           (unsigned long)addr < first_high) {
                for_each_possible_cpu(cpu) {
                        void *start = per_cpu_ptr(base, cpu);
 
@@ -1011,9 +1011,11 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
                if (!is_vmalloc_addr(addr))
                        return __pa(addr);
                else
-                       return page_to_phys(vmalloc_to_page(addr));
+                       return page_to_phys(vmalloc_to_page(addr)) +
+                              offset_in_page(addr);
        } else
-               return page_to_phys(pcpu_addr_to_page(addr));
+               return page_to_phys(pcpu_addr_to_page(addr)) +
+                      offset_in_page(addr);
 }
 
 /**
@@ -1233,7 +1235,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 
        for (cpu = 0; cpu < nr_cpu_ids; cpu++)
                unit_map[cpu] = UINT_MAX;
-       pcpu_first_unit_cpu = NR_CPUS;
+
+       pcpu_low_unit_cpu = NR_CPUS;
+       pcpu_high_unit_cpu = NR_CPUS;
 
        for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
                const struct pcpu_group_info *gi = &ai->groups[group];
@@ -1253,9 +1257,13 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
                        unit_map[cpu] = unit + i;
                        unit_off[cpu] = gi->base_offset + i * ai->unit_size;
 
-                       if (pcpu_first_unit_cpu == NR_CPUS)
-                               pcpu_first_unit_cpu = cpu;
-                       pcpu_last_unit_cpu = cpu;
+                       /* determine low/high unit_cpu */
+                       if (pcpu_low_unit_cpu == NR_CPUS ||
+                           unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
+                               pcpu_low_unit_cpu = cpu;
+                       if (pcpu_high_unit_cpu == NR_CPUS ||
+                           unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
+                               pcpu_high_unit_cpu = cpu;
                }
        }
        pcpu_nr_units = unit;
@@ -1622,6 +1630,16 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
                areas[group] = ptr;
 
                base = min(ptr, base);
+       }
+
+       /*
+        * Copy data and free unused parts.  This should happen after all
+        * allocations are complete; otherwise, we may end up with
+        * overlapping groups.
+        */
+       for (group = 0; group < ai->nr_groups; group++) {
+               struct pcpu_group_info *gi = &ai->groups[group];
+               void *ptr = areas[group];
 
                for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
                        if (gi->cpu_map[i] == NR_CPUS) {
index 35f351f26193a47145cd8bcb7f55ae091331ca97..10ab2335e2eaff568eff6a38bb5a9349c591ebc3 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1818,6 +1818,11 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
        if (unlikely(!node_match(c, node)))
                goto another_slab;
 
+       /* must check again c->freelist in case of cpu migration or IRQ */
+       object = c->freelist;
+       if (object)
+               goto update_freelist;
+
        stat(s, ALLOC_REFILL);
 
 load_freelist:
@@ -1827,6 +1832,7 @@ load_freelist:
        if (kmem_cache_debug(s))
                goto debug;
 
+update_freelist:
        c->freelist = get_freepointer(s, object);
        page->inuse = page->objects;
        page->freelist = NULL;
@@ -3433,13 +3439,14 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
                if (kmem_cache_open(s, n,
                                size, align, flags, ctor)) {
                        list_add(&s->list, &slab_caches);
+                       up_write(&slub_lock);
                        if (sysfs_slab_add(s)) {
+                               down_write(&slub_lock);
                                list_del(&s->list);
                                kfree(n);
                                kfree(s);
                                goto err;
                        }
-                       up_write(&slub_lock);
                        return s;
                }
                kfree(n);
index aa64b12831a24b8c831878b1ea8b66c2553a619e..4cd05e5f2f438ae3512426748f3bb39df061bbe9 100644 (file)
@@ -353,29 +353,21 @@ static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
 
        usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
                                                                 usemap_count);
-       if (usemap) {
-               for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
-                       if (!present_section_nr(pnum))
-                               continue;
-                       usemap_map[pnum] = usemap;
-                       usemap += size;
+       if (!usemap) {
+               usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
+               if (!usemap) {
+                       printk(KERN_WARNING "%s: allocation failed\n", __func__);
+                       return;
                }
-               return;
        }
 
-       usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
-       if (usemap) {
-               for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
-                       if (!present_section_nr(pnum))
-                               continue;
-                       usemap_map[pnum] = usemap;
-                       usemap += size;
-                       check_usemap_section_nr(nodeid, usemap_map[pnum]);
-               }
-               return;
+       for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+               if (!present_section_nr(pnum))
+                       continue;
+               usemap_map[pnum] = usemap;
+               usemap += size;
+               check_usemap_section_nr(nodeid, usemap_map[pnum]);
        }
-
-       printk(KERN_WARNING "%s: allocation failed\n", __func__);
 }
 
 #ifndef CONFIG_SPARSEMEM_VMEMMAP
index 3a442f18b0b3dab5acfd99b5fc4e85389b9b5439..4a1fc6db89e8a2153130f6f21eb9eda85937b6d7 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -78,39 +78,22 @@ static void put_compound_page(struct page *page)
 {
        if (unlikely(PageTail(page))) {
                /* __split_huge_page_refcount can run under us */
-               struct page *page_head = page->first_page;
-               smp_rmb();
-               /*
-                * If PageTail is still set after smp_rmb() we can be sure
-                * that the page->first_page we read wasn't a dangling pointer.
-                * See __split_huge_page_refcount() smp_wmb().
-                */
-               if (likely(PageTail(page) && get_page_unless_zero(page_head))) {
+               struct page *page_head = compound_trans_head(page);
+
+               if (likely(page != page_head &&
+                          get_page_unless_zero(page_head))) {
                        unsigned long flags;
                        /*
-                        * Verify that our page_head wasn't converted
-                        * to a a regular page before we got a
-                        * reference on it.
+                        * page_head wasn't a dangling pointer but it
+                        * may not be a head page anymore by the time
+                        * we obtain the lock. That is ok as long as it
+                        * can't be freed from under us.
                         */
-                       if (unlikely(!PageHead(page_head))) {
-                               /* PageHead is cleared after PageTail */
-                               smp_rmb();
-                               VM_BUG_ON(PageTail(page));
-                               goto out_put_head;
-                       }
-                       /*
-                        * Only run compound_lock on a valid PageHead,
-                        * after having it pinned with
-                        * get_page_unless_zero() above.
-                        */
-                       smp_mb();
-                       /* page_head wasn't a dangling pointer */
                        flags = compound_lock_irqsave(page_head);
                        if (unlikely(!PageTail(page))) {
                                /* __split_huge_page_refcount run before us */
                                compound_unlock_irqrestore(page_head, flags);
                                VM_BUG_ON(PageHead(page_head));
-                       out_put_head:
                                if (put_page_testzero(page_head))
                                        __put_single_page(page_head);
                        out_put_single:
@@ -121,16 +104,17 @@ static void put_compound_page(struct page *page)
                        VM_BUG_ON(page_head != page->first_page);
                        /*
                         * We can release the refcount taken by
-                        * get_page_unless_zero now that
-                        * split_huge_page_refcount is blocked on the
-                        * compound_lock.
+                        * get_page_unless_zero() now that
+                        * __split_huge_page_refcount() is blocked on
+                        * the compound_lock.
                         */
                        if (put_page_testzero(page_head))
                                VM_BUG_ON(1);
                        /* __split_huge_page_refcount will wait now */
-                       VM_BUG_ON(atomic_read(&page->_count) <= 0);
-                       atomic_dec(&page->_count);
+                       VM_BUG_ON(page_mapcount(page) <= 0);
+                       atomic_dec(&page->_mapcount);
                        VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
+                       VM_BUG_ON(atomic_read(&page->_count) != 0);
                        compound_unlock_irqrestore(page_head, flags);
                        if (put_page_testzero(page_head)) {
                                if (PageHead(page_head))
@@ -160,6 +144,45 @@ void put_page(struct page *page)
 }
 EXPORT_SYMBOL(put_page);
 
+/*
+ * This function is exported but must not be called by anything other
+ * than get_page(). It implements the slow path of get_page().
+ */
+bool __get_page_tail(struct page *page)
+{
+       /*
+        * This takes care of get_page() if run on a tail page
+        * returned by one of the get_user_pages/follow_page variants.
+        * get_user_pages/follow_page itself doesn't need the compound
+        * lock because it runs __get_page_tail_foll() under the
+        * proper PT lock that already serializes against
+        * split_huge_page().
+        */
+       unsigned long flags;
+       bool got = false;
+       struct page *page_head = compound_trans_head(page);
+
+       if (likely(page != page_head && get_page_unless_zero(page_head))) {
+               /*
+                * page_head wasn't a dangling pointer but it
+                * may not be a head page anymore by the time
+                * we obtain the lock. That is ok as long as it
+                * can't be freed from under us.
+                */
+               flags = compound_lock_irqsave(page_head);
+               /* here __split_huge_page_refcount won't run anymore */
+               if (likely(PageTail(page))) {
+                       __get_page_tail_foll(page, false);
+                       got = true;
+               }
+               compound_unlock_irqrestore(page_head, flags);
+               if (unlikely(!got))
+                       put_page(page_head);
+       }
+       return got;
+}
+EXPORT_SYMBOL(__get_page_tail);
+
 /**
  * put_pages_list() - release a list of pages
  * @pages: list of pages threaded on page->lru
@@ -644,7 +667,7 @@ void lru_add_page_tail(struct zone* zone,
        VM_BUG_ON(!PageHead(page));
        VM_BUG_ON(PageCompound(page_tail));
        VM_BUG_ON(PageLRU(page_tail));
-       VM_BUG_ON(!spin_is_locked(&zone->lru_lock));
+       VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock));
 
        SetPageLRU(page_tail);
 
index 46680461785bef647c3618bc7d493d1909aea80f..10e9198778cf1d2f6d4f255bffd68b548359ccc7 100644 (file)
@@ -28,7 +28,7 @@
  */
 static const struct address_space_operations swap_aops = {
        .writepage      = swap_writepage,
-       .set_page_dirty = __set_page_dirty_nobuffers,
+       .set_page_dirty = __set_page_dirty_no_writeback,
        .migratepage    = migrate_page,
 };
 
index ff8dc1a18cb4fb6e15dac52b3dff0c491337f9af..c8f4338848df2fc01b5c7062bff7b83249a01cff 100644 (file)
@@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
        pmd = pmd_offset(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
-               if (unlikely(pmd_trans_huge(*pmd)))
-                       continue;
-               if (pmd_none_or_clear_bad(pmd))
+               if (pmd_none_or_trans_huge_or_clear_bad(pmd))
                        continue;
                ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
                if (ret)
index 45ece8967e479b596a8e68290ce728fa7202297b..bdb70042c123f879830f17155146156ac46b971b 100644 (file)
@@ -256,7 +256,7 @@ struct vmap_area {
        struct rb_node rb_node;         /* address sorted rbtree */
        struct list_head list;          /* address sorted list */
        struct list_head purge_list;    /* "lazy purge" list */
-       void *private;
+       struct vm_struct *vm;
        struct rcu_head rcu_head;
 };
 
@@ -1174,9 +1174,10 @@ void __init vmalloc_init(void)
        /* Import existing vmlist entries. */
        for (tmp = vmlist; tmp; tmp = tmp->next) {
                va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
-               va->flags = tmp->flags | VM_VM_AREA;
+               va->flags = VM_VM_AREA;
                va->va_start = (unsigned long)tmp->addr;
                va->va_end = va->va_start + tmp->size;
+               va->vm = tmp;
                __insert_vmap_area(va);
        }
 
@@ -1267,18 +1268,22 @@ EXPORT_SYMBOL_GPL(map_vm_area);
 DEFINE_RWLOCK(vmlist_lock);
 struct vm_struct *vmlist;
 
-static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
+static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
                              unsigned long flags, void *caller)
 {
-       struct vm_struct *tmp, **p;
-
        vm->flags = flags;
        vm->addr = (void *)va->va_start;
        vm->size = va->va_end - va->va_start;
        vm->caller = caller;
-       va->private = vm;
+       va->vm = vm;
        va->flags |= VM_VM_AREA;
+}
+
+static void insert_vmalloc_vmlist(struct vm_struct *vm)
+{
+       struct vm_struct *tmp, **p;
 
+       vm->flags &= ~VM_UNLIST;
        write_lock(&vmlist_lock);
        for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
                if (tmp->addr >= vm->addr)
@@ -1289,6 +1294,13 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
        write_unlock(&vmlist_lock);
 }
 
+static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
+                             unsigned long flags, void *caller)
+{
+       setup_vmalloc_vm(vm, va, flags, caller);
+       insert_vmalloc_vmlist(vm);
+}
+
 static struct vm_struct *__get_vm_area_node(unsigned long size,
                unsigned long align, unsigned long flags, unsigned long start,
                unsigned long end, int node, gfp_t gfp_mask, void *caller)
@@ -1327,7 +1339,18 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
                return NULL;
        }
 
-       insert_vmalloc_vm(area, va, flags, caller);
+       /*
+        * When this function is called from __vmalloc_node_range,
+        * we do not add vm_struct to vmlist here to avoid
+        * accessing uninitialized members of vm_struct such as
+        * pages and nr_pages fields. They will be set later.
+        * To distinguish it from others, we use a VM_UNLIST flag.
+        */
+       if (flags & VM_UNLIST)
+               setup_vmalloc_vm(area, va, flags, caller);
+       else
+               insert_vmalloc_vm(area, va, flags, caller);
+
        return area;
 }
 
@@ -1375,7 +1398,7 @@ static struct vm_struct *find_vm_area(const void *addr)
 
        va = find_vmap_area((unsigned long)addr);
        if (va && va->flags & VM_VM_AREA)
-               return va->private;
+               return va->vm;
 
        return NULL;
 }
@@ -1394,18 +1417,21 @@ struct vm_struct *remove_vm_area(const void *addr)
 
        va = find_vmap_area((unsigned long)addr);
        if (va && va->flags & VM_VM_AREA) {
-               struct vm_struct *vm = va->private;
-               struct vm_struct *tmp, **p;
-               /*
-                * remove from list and disallow access to this vm_struct
-                * before unmap. (address range confliction is maintained by
-                * vmap.)
-                */
-               write_lock(&vmlist_lock);
-               for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
-                       ;
-               *p = tmp->next;
-               write_unlock(&vmlist_lock);
+               struct vm_struct *vm = va->vm;
+
+               if (!(vm->flags & VM_UNLIST)) {
+                       struct vm_struct *tmp, **p;
+                       /*
+                        * remove from list and disallow access to
+                        * this vm_struct before unmap. (address range
+                        * confliction is maintained by vmap.)
+                        */
+                       write_lock(&vmlist_lock);
+                       for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
+                               ;
+                       *p = tmp->next;
+                       write_unlock(&vmlist_lock);
+               }
 
                vmap_debug_free_range(va->va_start, va->va_end);
                free_unmap_vmap_area(va);
@@ -1616,13 +1642,21 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
        if (!size || (size >> PAGE_SHIFT) > totalram_pages)
                return NULL;
 
-       area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
-                                 gfp_mask, caller);
+       area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
+                                 start, end, node, gfp_mask, caller);
 
        if (!area)
                return NULL;
 
        addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
+       if (!addr)
+               return NULL;
+
+       /*
+        * In this function, newly allocated vm_struct is not added
+        * to vmlist at __get_vm_area_node(). so, it is added here.
+        */
+       insert_vmalloc_vmlist(area);
 
        /*
         * A ref_count = 3 is needed because the vm_struct and vmap_area
index 6072d74a16f5f38ed0ea9db1026d89e8c467b48f..769935d17c01de6262e705bd72d2d2da725583da 100644 (file)
@@ -665,7 +665,7 @@ static enum page_references page_check_references(struct page *page,
                return PAGEREF_RECLAIM;
 
        if (referenced_ptes) {
-               if (PageAnon(page))
+               if (PageSwapBacked(page))
                        return PAGEREF_ACTIVATE;
                /*
                 * All mapped pages start out with page table
index 5b4f51d440f46ac22df718f0fd0a6c3015d591f9..d54845618c2a10b296dea2e95be2caff816a8663 100644 (file)
@@ -154,7 +154,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
                skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
        }
 
-       skb_set_dev(skb, vlan_dev_info(dev)->real_dev);
+       skb->dev = vlan_dev_info(dev)->real_dev;
        len = skb->len;
        ret = dev_queue_xmit(skb);
 
index 1d4be60e1390534ccafce68d2859404ce15fb0b9..5889074e97183dc7a89c44a0e473a0533d894143 100644 (file)
@@ -364,33 +364,37 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
                                   struct net_device *dev)
 {
        struct clip_priv *clip_priv = PRIV(dev);
+       struct dst_entry *dst = skb_dst(skb);
        struct atmarp_entry *entry;
+       struct neighbour *n;
        struct atm_vcc *vcc;
        int old;
        unsigned long flags;
 
        pr_debug("(skb %p)\n", skb);
-       if (!skb_dst(skb)) {
+       if (!dst) {
                pr_err("skb_dst(skb) == NULL\n");
                dev_kfree_skb(skb);
                dev->stats.tx_dropped++;
                return NETDEV_TX_OK;
        }
-       if (!skb_dst(skb)->neighbour) {
+       n = dst_get_neighbour(dst);
+       if (!n) {
 #if 0
-               skb_dst(skb)->neighbour = clip_find_neighbour(skb_dst(skb), 1);
-               if (!skb_dst(skb)->neighbour) {
+               n = clip_find_neighbour(skb_dst(skb), 1);
+               if (!n) {
                        dev_kfree_skb(skb);     /* lost that one */
                        dev->stats.tx_dropped++;
                        return 0;
                }
+               dst_set_neighbour(dst, n);
 #endif
                pr_err("NO NEIGHBOUR !\n");
                dev_kfree_skb(skb);
                dev->stats.tx_dropped++;
                return NETDEV_TX_OK;
        }
-       entry = NEIGH2ENTRY(skb_dst(skb)->neighbour);
+       entry = NEIGH2ENTRY(n);
        if (!entry->vccs) {
                if (time_after(jiffies, entry->expires)) {
                        /* should be resolved */
@@ -407,7 +411,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
        }
        pr_debug("neigh %p, vccs %p\n", entry, entry->vccs);
        ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
-       pr_debug("using neighbour %p, vcc %p\n", skb_dst(skb)->neighbour, vcc);
+       pr_debug("using neighbour %p, vcc %p\n", n, vcc);
        if (entry->vccs->encap) {
                void *here;
 
index e7c69f4619ec0f7f90d41d9fbbe7f0caa2b7d52f..b04a6ef4da94875ee86a0aacdce0f082c583a4b5 100644 (file)
@@ -2006,16 +2006,17 @@ static void __exit ax25_exit(void)
        proc_net_remove(&init_net, "ax25_route");
        proc_net_remove(&init_net, "ax25");
        proc_net_remove(&init_net, "ax25_calls");
-       ax25_rt_free();
-       ax25_uid_free();
-       ax25_dev_free();
 
-       ax25_unregister_sysctl();
        unregister_netdevice_notifier(&ax25_dev_notifier);
+       ax25_unregister_sysctl();
 
        dev_remove_pack(&ax25_packet_type);
 
        sock_unregister(PF_AX25);
        proto_unregister(&ax25_proto);
+
+       ax25_rt_free();
+       ax25_uid_free();
+       ax25_dev_free();
 }
 module_exit(ax25_exit);
index 3b39198640788439f78f923bc4a6a8fe7f2ea146..f38e633c75466f54438c3bf6cb4550d7806b250f 100644 (file)
@@ -510,6 +510,11 @@ int hci_dev_open(__u16 dev)
 
        hci_req_lock(hdev);
 
+       if (test_bit(HCI_UNREGISTER, &hdev->flags)) {
+               ret = -ENODEV;
+               goto done;
+       }
+
        if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
                ret = -ERFKILL;
                goto done;
@@ -1563,6 +1568,8 @@ int hci_unregister_dev(struct hci_dev *hdev)
 
        BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
 
+       set_bit(HCI_UNREGISTER, &hdev->flags);
+
        write_lock_bh(&hci_dev_list_lock);
        list_del(&hdev->list);
        write_unlock_bh(&hci_dev_list_lock);
index a9dd166aab78e9e2de7ab39bdd24499a4198b848..dac6a2147467814a417ca554ef5dc1efabfd51de 100644 (file)
@@ -92,7 +92,6 @@ static int br_dev_open(struct net_device *dev)
 {
        struct net_bridge *br = netdev_priv(dev);
 
-       netif_carrier_off(dev);
        netdev_update_features(dev);
        netif_start_queue(dev);
        br_stp_enable_bridge(br);
@@ -109,8 +108,6 @@ static int br_dev_stop(struct net_device *dev)
 {
        struct net_bridge *br = netdev_priv(dev);
 
-       netif_carrier_off(dev);
-
        br_stp_disable_bridge(br);
        br_multicast_stop(br);
 
index 6f156c199998b6d197ee9d92a46eb2a2c23ba94a..449087373d883114f4907e5df2e50f362a0e067c 100644 (file)
@@ -161,9 +161,10 @@ static void del_nbp(struct net_bridge_port *p)
        call_rcu(&p->rcu, destroy_nbp_rcu);
 }
 
-/* called with RTNL */
-static void del_br(struct net_bridge *br, struct list_head *head)
+/* Delete bridge device */
+void br_dev_delete(struct net_device *dev, struct list_head *head)
 {
+       struct net_bridge *br = netdev_priv(dev);
        struct net_bridge_port *p, *n;
 
        list_for_each_entry_safe(p, n, &br->port_list, list) {
@@ -268,7 +269,7 @@ int br_del_bridge(struct net *net, const char *name)
        }
 
        else
-               del_br(netdev_priv(dev), NULL);
+               br_dev_delete(dev, NULL);
 
        rtnl_unlock();
        return ret;
@@ -445,7 +446,7 @@ void __net_exit br_net_exit(struct net *net)
        rtnl_lock();
        for_each_netdev(net, dev)
                if (dev->priv_flags & IFF_EBRIDGE)
-                       del_br(netdev_priv(dev), &list);
+                       br_dev_delete(dev, &list);
 
        unregister_netdevice_many(&list);
        rtnl_unlock();
index 995cbe0ac0b2b1e74f2b4762d61a13102d9f66bb..e78269d798c05d596cf403cd8ff05bfbd8d1f801 100644 (file)
@@ -241,7 +241,6 @@ static void br_multicast_group_expired(unsigned long data)
        hlist_del_rcu(&mp->hlist[mdb->ver]);
        mdb->size--;
 
-       del_timer(&mp->query_timer);
        call_rcu_bh(&mp->rcu, br_multicast_free_group);
 
 out:
@@ -271,7 +270,6 @@ static void br_multicast_del_pg(struct net_bridge *br,
                rcu_assign_pointer(*pp, p->next);
                hlist_del_init(&p->mglist);
                del_timer(&p->timer);
-               del_timer(&p->query_timer);
                call_rcu_bh(&p->rcu, br_multicast_free_pg);
 
                if (!mp->ports && !mp->mglist &&
@@ -446,8 +444,11 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
        ip6h->nexthdr = IPPROTO_HOPOPTS;
        ip6h->hop_limit = 1;
        ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
-       ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
-                          &ip6h->saddr);
+       if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
+                              &ip6h->saddr)) {
+               kfree_skb(skb);
+               return NULL;
+       }
        ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
 
        hopopt = (u8 *)(ip6h + 1);
@@ -504,74 +505,6 @@ static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
        return NULL;
 }
 
-static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp)
-{
-       struct net_bridge *br = mp->br;
-       struct sk_buff *skb;
-
-       skb = br_multicast_alloc_query(br, &mp->addr);
-       if (!skb)
-               goto timer;
-
-       netif_rx(skb);
-
-timer:
-       if (++mp->queries_sent < br->multicast_last_member_count)
-               mod_timer(&mp->query_timer,
-                         jiffies + br->multicast_last_member_interval);
-}
-
-static void br_multicast_group_query_expired(unsigned long data)
-{
-       struct net_bridge_mdb_entry *mp = (void *)data;
-       struct net_bridge *br = mp->br;
-
-       spin_lock(&br->multicast_lock);
-       if (!netif_running(br->dev) || !mp->mglist ||
-           mp->queries_sent >= br->multicast_last_member_count)
-               goto out;
-
-       br_multicast_send_group_query(mp);
-
-out:
-       spin_unlock(&br->multicast_lock);
-}
-
-static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg)
-{
-       struct net_bridge_port *port = pg->port;
-       struct net_bridge *br = port->br;
-       struct sk_buff *skb;
-
-       skb = br_multicast_alloc_query(br, &pg->addr);
-       if (!skb)
-               goto timer;
-
-       br_deliver(port, skb);
-
-timer:
-       if (++pg->queries_sent < br->multicast_last_member_count)
-               mod_timer(&pg->query_timer,
-                         jiffies + br->multicast_last_member_interval);
-}
-
-static void br_multicast_port_group_query_expired(unsigned long data)
-{
-       struct net_bridge_port_group *pg = (void *)data;
-       struct net_bridge_port *port = pg->port;
-       struct net_bridge *br = port->br;
-
-       spin_lock(&br->multicast_lock);
-       if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
-           pg->queries_sent >= br->multicast_last_member_count)
-               goto out;
-
-       br_multicast_send_port_group_query(pg);
-
-out:
-       spin_unlock(&br->multicast_lock);
-}
-
 static struct net_bridge_mdb_entry *br_multicast_get_group(
        struct net_bridge *br, struct net_bridge_port *port,
        struct br_ip *group, int hash)
@@ -687,8 +620,6 @@ rehash:
        mp->addr = *group;
        setup_timer(&mp->timer, br_multicast_group_expired,
                    (unsigned long)mp);
-       setup_timer(&mp->query_timer, br_multicast_group_query_expired,
-                   (unsigned long)mp);
 
        hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
        mdb->size++;
@@ -743,8 +674,6 @@ static int br_multicast_add_group(struct net_bridge *br,
        hlist_add_head(&p->mglist, &port->mglist);
        setup_timer(&p->timer, br_multicast_port_group_expired,
                    (unsigned long)p);
-       setup_timer(&p->query_timer, br_multicast_port_group_query_expired,
-                   (unsigned long)p);
 
        rcu_assign_pointer(*pp, p);
 
@@ -1288,9 +1217,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
                     time_after(mp->timer.expires, time) :
                     try_to_del_timer_sync(&mp->timer) >= 0)) {
                        mod_timer(&mp->timer, time);
-
-                       mp->queries_sent = 0;
-                       mod_timer(&mp->query_timer, now);
                }
 
                goto out;
@@ -1307,9 +1233,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
                     time_after(p->timer.expires, time) :
                     try_to_del_timer_sync(&p->timer) >= 0)) {
                        mod_timer(&p->timer, time);
-
-                       p->queries_sent = 0;
-                       mod_timer(&p->query_timer, now);
                }
 
                break;
@@ -1675,7 +1598,6 @@ void br_multicast_stop(struct net_bridge *br)
                hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
                                          hlist[ver]) {
                        del_timer(&mp->timer);
-                       del_timer(&mp->query_timer);
                        call_rcu_bh(&mp->rcu, br_multicast_free_group);
                }
        }
index 56149ec36d7fd5d8a411b3996f5e7985a3792c77..3dc7f5446a9d29c8e6eafaa92aea2487eab8c4d3 100644 (file)
@@ -343,24 +343,26 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
 static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
 {
        struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+       struct neighbour *neigh;
        struct dst_entry *dst;
 
        skb->dev = bridge_parent(skb->dev);
        if (!skb->dev)
                goto free_skb;
        dst = skb_dst(skb);
+       neigh = dst_get_neighbour(dst);
        if (dst->hh) {
                neigh_hh_bridge(dst->hh, skb);
                skb->dev = nf_bridge->physindev;
                return br_handle_frame_finish(skb);
-       } else if (dst->neighbour) {
+       } else if (neigh) {
                /* the neighbour function below overwrites the complete
                 * MAC header, so we save the Ethernet source address and
                 * protocol number. */
                skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
                /* tell br_dev_xmit to continue with forwarding */
                nf_bridge->mask |= BRNF_BRIDGED_DNAT;
-               return dst->neighbour->output(skb);
+               return neigh->output(skb);
        }
 free_skb:
        kfree_skb(skb);
index ffb0dc4cc0e80691c1264e2344064b89d01c2f2f..2c16055256802a053b311dac8f4c92e754b63c4c 100644 (file)
@@ -208,6 +208,7 @@ static struct rtnl_link_ops br_link_ops __read_mostly = {
        .priv_size      = sizeof(struct net_bridge),
        .setup          = br_dev_setup,
        .validate       = br_validate,
+       .dellink        = br_dev_delete,
 };
 
 int __init br_netlink_init(void)
index 78cc364997d9888bc4a1e9869c7c64edb6378ee9..1ca1b1c7560e9398208638541261de971f07b399 100644 (file)
@@ -77,9 +77,7 @@ struct net_bridge_port_group {
        struct hlist_node               mglist;
        struct rcu_head                 rcu;
        struct timer_list               timer;
-       struct timer_list               query_timer;
        struct br_ip                    addr;
-       u32                             queries_sent;
 };
 
 struct net_bridge_mdb_entry
@@ -89,10 +87,8 @@ struct net_bridge_mdb_entry
        struct net_bridge_port_group __rcu *ports;
        struct rcu_head                 rcu;
        struct timer_list               timer;
-       struct timer_list               query_timer;
        struct br_ip                    addr;
        bool                            mglist;
-       u32                             queries_sent;
 };
 
 struct net_bridge_mdb_htable
@@ -294,6 +290,7 @@ static inline int br_is_root_bridge(const struct net_bridge *br)
 
 /* br_device.c */
 extern void br_dev_setup(struct net_device *dev);
+extern void br_dev_delete(struct net_device *dev, struct list_head *list);
 extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
                               struct net_device *dev);
 #ifdef CONFIG_NET_POLL_CONTROLLER
index 682c0fedf360c3f87177439f3a35e01e18daac18..5ba4366a220d0c1b4047d13e7bd00e3edab2dae3 100644 (file)
@@ -53,7 +53,6 @@ struct cfcnfg *get_cfcnfg(struct net *net)
        struct caif_net *caifn;
        BUG_ON(!net);
        caifn = net_generic(net, caif_net_id);
-       BUG_ON(!caifn);
        return caifn->cfg;
 }
 EXPORT_SYMBOL(get_cfcnfg);
@@ -63,7 +62,6 @@ static struct caif_device_entry_list *caif_device_list(struct net *net)
        struct caif_net *caifn;
        BUG_ON(!net);
        caifn = net_generic(net, caif_net_id);
-       BUG_ON(!caifn);
        return &caifn->caifdevs;
 }
 
@@ -92,7 +90,6 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
        struct caif_device_entry *caifd;
 
        caifdevs = caif_device_list(dev_net(dev));
-       BUG_ON(!caifdevs);
 
        caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC);
        if (!caifd)
@@ -108,7 +105,7 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
        struct caif_device_entry_list *caifdevs =
            caif_device_list(dev_net(dev));
        struct caif_device_entry *caifd;
-       BUG_ON(!caifdevs);
+
        list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
                if (caifd->netdev == dev)
                        return caifd;
@@ -209,8 +206,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
        enum cfcnfg_phy_preference pref;
        enum cfcnfg_phy_type phy_type;
        struct cfcnfg *cfg;
-       struct caif_device_entry_list *caifdevs =
-           caif_device_list(dev_net(dev));
+       struct caif_device_entry_list *caifdevs;
 
        if (dev->type != ARPHRD_CAIF)
                return 0;
@@ -219,6 +215,8 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
        if (cfg == NULL)
                return 0;
 
+       caifdevs = caif_device_list(dev_net(dev));
+
        switch (what) {
        case NETDEV_REGISTER:
                caifd = caif_device_alloc(dev);
@@ -348,7 +346,7 @@ static struct notifier_block caif_device_notifier = {
 static int caif_init_net(struct net *net)
 {
        struct caif_net *caifn = net_generic(net, caif_net_id);
-       BUG_ON(!caifn);
+
        INIT_LIST_HEAD(&caifn->caifdevs.list);
        mutex_init(&caifn->caifdevs.lock);
 
@@ -413,7 +411,7 @@ static int __init caif_device_init(void)
 {
        int result;
 
-       result = register_pernet_device(&caif_net_ops);
+       result = register_pernet_subsys(&caif_net_ops);
 
        if (result)
                return result;
@@ -426,7 +424,7 @@ static int __init caif_device_init(void)
 
 static void __exit caif_device_exit(void)
 {
-       unregister_pernet_device(&caif_net_ops);
+       unregister_pernet_subsys(&caif_net_ops);
        unregister_netdevice_notifier(&caif_device_notifier);
        dev_remove_pack(&caif_packet_type);
 }
index 52fe33bee0298a8eac119faf3dbe07be0b0ebbea..bca32d7c15c952619ce764be2250789003877f1a 100644 (file)
@@ -313,7 +313,6 @@ int caif_connect_client(struct net *net, struct caif_connect_request *conn_req,
        int err;
        struct cfctrl_link_param param;
        struct cfcnfg *cfg = get_cfcnfg(net);
-       caif_assert(cfg != NULL);
 
        rcu_read_lock();
        err = caif_connect_req_to_link_param(cfg, conn_req, &param);
index 184a6572b67e6be824ba1ef30b122d065269a262..c6cc66f72861ba380aec4348a7635189a40a3fc8 100644 (file)
@@ -343,6 +343,18 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
        }
 }
 
+static void bcm_tx_start_timer(struct bcm_op *op)
+{
+       if (op->kt_ival1.tv64 && op->count)
+               hrtimer_start(&op->timer,
+                             ktime_add(ktime_get(), op->kt_ival1),
+                             HRTIMER_MODE_ABS);
+       else if (op->kt_ival2.tv64)
+               hrtimer_start(&op->timer,
+                             ktime_add(ktime_get(), op->kt_ival2),
+                             HRTIMER_MODE_ABS);
+}
+
 static void bcm_tx_timeout_tsklet(unsigned long data)
 {
        struct bcm_op *op = (struct bcm_op *)data;
@@ -364,26 +376,12 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
 
                        bcm_send_to_user(op, &msg_head, NULL, 0);
                }
-       }
-
-       if (op->kt_ival1.tv64 && (op->count > 0)) {
-
-               /* send (next) frame */
                bcm_can_tx(op);
-               hrtimer_start(&op->timer,
-                             ktime_add(ktime_get(), op->kt_ival1),
-                             HRTIMER_MODE_ABS);
 
-       } else {
-               if (op->kt_ival2.tv64) {
+       } else if (op->kt_ival2.tv64)
+               bcm_can_tx(op);
 
-                       /* send (next) frame */
-                       bcm_can_tx(op);
-                       hrtimer_start(&op->timer,
-                                     ktime_add(ktime_get(), op->kt_ival2),
-                                     HRTIMER_MODE_ABS);
-               }
-       }
+       bcm_tx_start_timer(op);
 }
 
 /*
@@ -963,23 +961,20 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                        hrtimer_cancel(&op->timer);
        }
 
-       if ((op->flags & STARTTIMER) &&
-           ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) {
-
+       if (op->flags & STARTTIMER) {
+               hrtimer_cancel(&op->timer);
                /* spec: send can_frame when starting timer */
                op->flags |= TX_ANNOUNCE;
-
-               if (op->kt_ival1.tv64 && (op->count > 0)) {
-                       /* op->count-- is done in bcm_tx_timeout_handler */
-                       hrtimer_start(&op->timer, op->kt_ival1,
-                                     HRTIMER_MODE_REL);
-               } else
-                       hrtimer_start(&op->timer, op->kt_ival2,
-                                     HRTIMER_MODE_REL);
        }
 
-       if (op->flags & TX_ANNOUNCE)
+       if (op->flags & TX_ANNOUNCE) {
                bcm_can_tx(op);
+               if (op->count)
+                       op->count--;
+       }
+
+       if (op->flags & STARTTIMER)
+               bcm_tx_start_timer(op);
 
        return msg_head->nframes * CFSIZ + MHSIZ;
 }
index 9c58c1ec41a9dba24cca0318f723b01208c7b880..a71eafc392e71b2f134585edf6bd5fac4ef7791e 100644 (file)
@@ -1406,14 +1406,34 @@ EXPORT_SYMBOL(register_netdevice_notifier);
  *     register_netdevice_notifier(). The notifier is unlinked into the
  *     kernel structures and may then be reused. A negative errno code
  *     is returned on a failure.
+ *
+ *     After unregistering unregister and down device events are synthesized
+ *     for all devices on the device list to the removed notifier to remove
+ *     the need for special case cleanup code.
  */
 
 int unregister_netdevice_notifier(struct notifier_block *nb)
 {
+       struct net_device *dev;
+       struct net *net;
        int err;
 
        rtnl_lock();
        err = raw_notifier_chain_unregister(&netdev_chain, nb);
+       if (err)
+               goto unlock;
+
+       for_each_net(net) {
+               for_each_netdev(net, dev) {
+                       if (dev->flags & IFF_UP) {
+                               nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
+                               nb->notifier_call(nb, NETDEV_DOWN, dev);
+                       }
+                       nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
+                       nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
+               }
+       }
+unlock:
        rtnl_unlock();
        return err;
 }
@@ -1513,10 +1533,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
                kfree_skb(skb);
                return NET_RX_DROP;
        }
-       skb_set_dev(skb, dev);
+       skb->dev = dev;
+       skb_dst_drop(skb);
        skb->tstamp.tv64 = 0;
        skb->pkt_type = PACKET_HOST;
        skb->protocol = eth_type_trans(skb, dev);
+       skb->mark = 0;
+       secpath_reset(skb);
+       nf_reset(skb);
        return netif_rx(skb);
 }
 EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -1771,36 +1795,6 @@ void netif_device_attach(struct net_device *dev)
 }
 EXPORT_SYMBOL(netif_device_attach);
 
-/**
- * skb_dev_set -- assign a new device to a buffer
- * @skb: buffer for the new device
- * @dev: network device
- *
- * If an skb is owned by a device already, we have to reset
- * all data private to the namespace a device belongs to
- * before assigning it a new device.
- */
-#ifdef CONFIG_NET_NS
-void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
-{
-       skb_dst_drop(skb);
-       if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
-               secpath_reset(skb);
-               nf_reset(skb);
-               skb_init_secmark(skb);
-               skb->mark = 0;
-               skb->priority = 0;
-               skb->nf_trace = 0;
-               skb->ipvs_property = 0;
-#ifdef CONFIG_NET_SCHED
-               skb->tc_index = 0;
-#endif
-       }
-       skb->dev = dev;
-}
-EXPORT_SYMBOL(skb_set_dev);
-#endif /* CONFIG_NET_NS */
-
 /*
  * Invalidate hardware checksum when packet is to be mangled, and
  * complete checksum manually on outgoing path.
@@ -3434,14 +3428,20 @@ static inline gro_result_t
 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
        struct sk_buff *p;
+       unsigned int maclen = skb->dev->hard_header_len;
 
        for (p = napi->gro_list; p; p = p->next) {
                unsigned long diffs;
 
                diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
                diffs |= p->vlan_tci ^ skb->vlan_tci;
-               diffs |= compare_ether_header(skb_mac_header(p),
-                                             skb_gro_mac_header(skb));
+               if (maclen == ETH_HLEN)
+                       diffs |= compare_ether_header(skb_mac_header(p),
+                                                     skb_gro_mac_header(skb));
+               else if (!diffs)
+                       diffs = memcmp(skb_mac_header(p),
+                                      skb_gro_mac_header(skb),
+                                      maclen);
                NAPI_GRO_CB(p)->same_flow = !diffs;
                NAPI_GRO_CB(p)->flush = 0;
        }
@@ -3498,7 +3498,8 @@ EXPORT_SYMBOL(napi_gro_receive);
 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
 {
        __skb_pull(skb, skb_headlen(skb));
-       skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
+       /* restore the reserve we had after netdev_alloc_skb_ip_align() */
+       skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
        skb->vlan_tci = 0;
        skb->dev = napi->dev;
        skb->skb_iif = 0;
@@ -6105,6 +6106,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
        */
        call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
        call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
+       rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
 
        /*
         *      Flush the unicast and multicast chains
index 6135f3671692689b773c2289d940ecabb29c6a26..8246d47a21842fa6d60823c979166526f1b8b04c 100644 (file)
@@ -171,7 +171,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
        dst_init_metrics(dst, dst_default_metrics, true);
        dst->expires = 0UL;
        dst->path = dst;
-       dst->neighbour = NULL;
+       RCU_INIT_POINTER(dst->_neighbour, NULL);
        dst->hh = NULL;
 #ifdef CONFIG_XFRM
        dst->xfrm = NULL;
@@ -231,7 +231,7 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
        smp_rmb();
 
 again:
-       neigh = dst->neighbour;
+       neigh = rcu_dereference_protected(dst->_neighbour, 1);
        hh = dst->hh;
        child = dst->child;
 
@@ -240,7 +240,7 @@ again:
                hh_cache_put(hh);
 
        if (neigh) {
-               dst->neighbour = NULL;
+               RCU_INIT_POINTER(dst->_neighbour, NULL);
                neigh_release(neigh);
        }
 
@@ -367,14 +367,19 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
        if (!unregister) {
                dst->input = dst->output = dst_discard;
        } else {
+               struct neighbour *neigh;
+
                dst->dev = dev_net(dst->dev)->loopback_dev;
                dev_hold(dst->dev);
                dev_put(dev);
-               if (dst->neighbour && dst->neighbour->dev == dev) {
-                       dst->neighbour->dev = dst->dev;
+               rcu_read_lock();
+               neigh = dst_get_neighbour(dst);
+               if (neigh && neigh->dev == dev) {
+                       neigh->dev = dst->dev;
                        dev_hold(dst->dev);
                        dev_put(dev);
                }
+               rcu_read_unlock();
        }
 }
 
index 990703b8863b4d0bdb29619350e7d4aec01bde17..a6bda2a514f729dce14ae2d72b079dd6970483c8 100644 (file)
@@ -172,29 +172,26 @@ static void flow_new_hash_rnd(struct flow_cache *fc,
 
 static u32 flow_hash_code(struct flow_cache *fc,
                          struct flow_cache_percpu *fcp,
-                         const struct flowi *key)
+                         const struct flowi *key,
+                         size_t keysize)
 {
        const u32 *k = (const u32 *) key;
+       const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
 
-       return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
+       return jhash2(k, length, fcp->hash_rnd)
                & (flow_cache_hash_size(fc) - 1);
 }
 
-typedef unsigned long flow_compare_t;
-
 /* I hear what you're saying, use memcmp.  But memcmp cannot make
- * important assumptions that we can here, such as alignment and
- * constant size.
+ * important assumptions that we can here, such as alignment.
  */
-static int flow_key_compare(const struct flowi *key1, const struct flowi *key2)
+static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
+                           size_t keysize)
 {
        const flow_compare_t *k1, *k1_lim, *k2;
-       const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
-
-       BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
 
        k1 = (const flow_compare_t *) key1;
-       k1_lim = k1 + n_elem;
+       k1_lim = k1 + keysize;
 
        k2 = (const flow_compare_t *) key2;
 
@@ -215,6 +212,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
        struct flow_cache_entry *fle, *tfle;
        struct hlist_node *entry;
        struct flow_cache_object *flo;
+       size_t keysize;
        unsigned int hash;
 
        local_bh_disable();
@@ -222,6 +220,11 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
 
        fle = NULL;
        flo = NULL;
+
+       keysize = flow_key_size(family);
+       if (!keysize)
+               goto nocache;
+
        /* Packet really early in init?  Making flow_cache_init a
         * pre-smp initcall would solve this.  --RR */
        if (!fcp->hash_table)
@@ -230,11 +233,11 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
        if (fcp->hash_rnd_recalc)
                flow_new_hash_rnd(fc, fcp);
 
-       hash = flow_hash_code(fc, fcp, key);
+       hash = flow_hash_code(fc, fcp, key, keysize);
        hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
                if (tfle->family == family &&
                    tfle->dir == dir &&
-                   flow_key_compare(key, &tfle->key) == 0) {
+                   flow_key_compare(key, &tfle->key, keysize) == 0) {
                        fle = tfle;
                        break;
                }
@@ -248,7 +251,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
                if (fle) {
                        fle->family = family;
                        fle->dir = dir;
-                       memcpy(&fle->key, key, sizeof(*key));
+                       memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
                        fle->object = NULL;
                        hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
                        fcp->hash_count++;
index 16db887078047570afe7f3f7a1b0405bc0181aa2..96bb0a33f86133b29c3f93082bae3987bc6be4bd 100644 (file)
@@ -823,6 +823,8 @@ next_elt:
                write_unlock_bh(&tbl->lock);
                cond_resched();
                write_lock_bh(&tbl->lock);
+               nht = rcu_dereference_protected(tbl->nht,
+                                               lockdep_is_held(&tbl->lock));
        }
        /* Cycle through all hash buckets every base_reachable_time/2 ticks.
         * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
@@ -1173,12 +1175,17 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
 
                while (neigh->nud_state & NUD_VALID &&
                       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
-                       struct neighbour *n1 = neigh;
+                       struct dst_entry *dst = skb_dst(skb);
+                       struct neighbour *n2, *n1 = neigh;
                        write_unlock_bh(&neigh->lock);
+
+                       rcu_read_lock();
                        /* On shaper/eql skb->dst->neighbour != neigh :( */
-                       if (skb_dst(skb) && skb_dst(skb)->neighbour)
-                               n1 = skb_dst(skb)->neighbour;
+                       if (dst && (n2 = dst_get_neighbour(dst)) != NULL)
+                               n1 = n2;
                        n1->output(skb);
+                       rcu_read_unlock();
+
                        write_lock_bh(&neigh->lock);
                }
                skb_queue_purge(&neigh->arp_queue);
@@ -1300,10 +1307,10 @@ EXPORT_SYMBOL(neigh_compat_output);
 int neigh_resolve_output(struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
-       struct neighbour *neigh;
+       struct neighbour *neigh = dst_get_neighbour(dst);
        int rc = 0;
 
-       if (!dst || !(neigh = dst->neighbour))
+       if (!dst)
                goto discard;
 
        __skb_pull(skb, skb_network_offset(skb));
@@ -1333,7 +1340,7 @@ out:
        return rc;
 discard:
        NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
-                     dst, dst ? dst->neighbour : NULL);
+                     dst, neigh);
 out_kfree_skb:
        rc = -EINVAL;
        kfree_skb(skb);
@@ -1347,7 +1354,7 @@ int neigh_connected_output(struct sk_buff *skb)
 {
        int err;
        struct dst_entry *dst = skb_dst(skb);
-       struct neighbour *neigh = dst->neighbour;
+       struct neighbour *neigh = dst_get_neighbour(dst);
        struct net_device *dev = neigh->dev;
        unsigned int seq;
 
index ea489db1bc2361c20001576a5e909aa22cb689d2..2772ed11bec911d13d8279fa085a66e4969db3d1 100644 (file)
@@ -29,6 +29,20 @@ EXPORT_SYMBOL(init_net);
 
 #define INITIAL_NET_GEN_PTRS   13 /* +1 for len +2 for rcu_head */
 
+static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
+
+static struct net_generic *net_alloc_generic(void)
+{
+       struct net_generic *ng;
+       size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
+
+       ng = kzalloc(generic_size, GFP_KERNEL);
+       if (ng)
+               ng->len = max_gen_ptrs;
+
+       return ng;
+}
+
 static int net_assign_generic(struct net *net, int id, void *data)
 {
        struct net_generic *ng, *old_ng;
@@ -42,8 +56,7 @@ static int net_assign_generic(struct net *net, int id, void *data)
        if (old_ng->len >= id)
                goto assign;
 
-       ng = kzalloc(sizeof(struct net_generic) +
-                       id * sizeof(void *), GFP_KERNEL);
+       ng = net_alloc_generic();
        if (ng == NULL)
                return -ENOMEM;
 
@@ -58,7 +71,6 @@ static int net_assign_generic(struct net *net, int id, void *data)
         * the old copy for kfree after a grace period.
         */
 
-       ng->len = id;
        memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
 
        rcu_assign_pointer(net->gen, ng);
@@ -70,21 +82,29 @@ assign:
 
 static int ops_init(const struct pernet_operations *ops, struct net *net)
 {
-       int err;
+       int err = -ENOMEM;
+       void *data = NULL;
+
        if (ops->id && ops->size) {
-               void *data = kzalloc(ops->size, GFP_KERNEL);
+               data = kzalloc(ops->size, GFP_KERNEL);
                if (!data)
-                       return -ENOMEM;
+                       goto out;
 
                err = net_assign_generic(net, *ops->id, data);
-               if (err) {
-                       kfree(data);
-                       return err;
-               }
+               if (err)
+                       goto cleanup;
        }
+       err = 0;
        if (ops->init)
-               return ops->init(net);
-       return 0;
+               err = ops->init(net);
+       if (!err)
+               return 0;
+
+cleanup:
+       kfree(data);
+
+out:
+       return err;
 }
 
 static void ops_free(const struct pernet_operations *ops, struct net *net)
@@ -159,18 +179,6 @@ out_undo:
        goto out;
 }
 
-static struct net_generic *net_alloc_generic(void)
-{
-       struct net_generic *ng;
-       size_t generic_size = sizeof(struct net_generic) +
-               INITIAL_NET_GEN_PTRS * sizeof(void *);
-
-       ng = kzalloc(generic_size, GFP_KERNEL);
-       if (ng)
-               ng->len = INITIAL_NET_GEN_PTRS;
-
-       return ng;
-}
 
 #ifdef CONFIG_NET_NS
 static struct kmem_cache *net_cachep;
@@ -446,12 +454,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
 static int __register_pernet_operations(struct list_head *list,
                                        struct pernet_operations *ops)
 {
-       int err = 0;
-       err = ops_init(ops, &init_net);
-       if (err)
-               ops_free(ops, &init_net);
-       return err;
-       
+       return ops_init(ops, &init_net);
 }
 
 static void __unregister_pernet_operations(struct pernet_operations *ops)
@@ -481,6 +484,7 @@ again:
                        }
                        return error;
                }
+               max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id);
        }
        error = __register_pernet_operations(list, ops);
        if (error) {
index 18d9cbda3a39c0b19d0751350489c08249cba635..05db410fd1354bd23d1fdfa25fe1c87a6c81e7ae 100644 (file)
@@ -193,7 +193,7 @@ void netpoll_poll_dev(struct net_device *dev)
 
        poll_napi(dev);
 
-       if (dev->priv_flags & IFF_SLAVE) {
+       if (dev->flags & IFF_SLAVE) {
                if (dev->npinfo) {
                        struct net_device *bond_dev = dev->master;
                        struct sk_buff *skb;
index e35a6fbb81107229d05610ae89161905c927d6a0..c0e0f7679e75838833b1ea9be807600ae4d3cbbc 100644 (file)
@@ -1932,7 +1932,7 @@ static int pktgen_device_event(struct notifier_block *unused,
 {
        struct net_device *dev = ptr;
 
-       if (!net_eq(dev_net(dev), &init_net))
+       if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting)
                return NOTIFY_DONE;
 
        /* It is OK that we do not hold the group lock right now,
@@ -3755,12 +3755,18 @@ static void __exit pg_cleanup(void)
 {
        struct pktgen_thread *t;
        struct list_head *q, *n;
+       LIST_HEAD(list);
 
        /* Stop all interfaces & threads */
        pktgen_exiting = true;
 
-       list_for_each_safe(q, n, &pktgen_threads) {
+       mutex_lock(&pktgen_thread_lock);
+       list_splice_init(&pktgen_threads, &list);
+       mutex_unlock(&pktgen_thread_lock);
+
+       list_for_each_safe(q, n, &list) {
                t = list_entry(q, struct pktgen_thread, th_list);
+               list_del(&t->th_list);
                kthread_stop(t->tsk);
                kfree(t);
        }
index 46cbd28f40f9698bfbfad886047223c4bba16df2..4821df84eba39f5adc2b04d129562dda4ab8e1da 100644 (file)
@@ -2985,6 +2985,8 @@ static void sock_rmem_free(struct sk_buff *skb)
  */
 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
 {
+       int len = skb->len;
+
        if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
            (unsigned)sk->sk_rcvbuf)
                return -ENOMEM;
@@ -2999,7 +3001,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
 
        skb_queue_tail(&sk->sk_error_queue, skb);
        if (!sock_flag(sk, SOCK_DEAD))
-               sk->sk_data_ready(sk, skb->len);
+               sk->sk_data_ready(sk, len);
        return 0;
 }
 EXPORT_SYMBOL(sock_queue_err_skb);
index 6e819780c23252b3387c1b2daec25a261fa1f2fb..aebb419519b38163f1b60ef613ce6388692ac3d4 100644 (file)
@@ -1257,6 +1257,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
                        /* It is still raw copy of parent, so invalidate
                         * destructor and make plain sk_free() */
                        newsk->sk_destruct = NULL;
+                       bh_unlock_sock(newsk);
                        sk_free(newsk);
                        newsk = NULL;
                        goto out;
index 7e7ca375d4316e93d85482dacf0b5da951b5b12e..97d036a6b899d77e829e1a8871fdba3832b5804f 100644 (file)
@@ -57,9 +57,13 @@ void skb_clone_tx_timestamp(struct sk_buff *skb)
        case PTP_CLASS_V2_VLAN:
                phydev = skb->dev->phydev;
                if (likely(phydev->drv->txtstamp)) {
+                       if (!atomic_inc_not_zero(&sk->sk_refcnt))
+                               return;
                        clone = skb_clone(skb, GFP_ATOMIC);
-                       if (!clone)
+                       if (!clone) {
+                               sock_put(sk);
                                return;
+                       }
                        clone->sk = sk;
                        phydev->drv->txtstamp(phydev, clone, type);
                }
@@ -76,8 +80,11 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
        struct sock_exterr_skb *serr;
        int err;
 
-       if (!hwtstamps)
+       if (!hwtstamps) {
+               sock_put(sk);
+               kfree_skb(skb);
                return;
+       }
 
        *skb_hwtstamps(skb) = *hwtstamps;
        serr = SKB_EXT_ERR(skb);
@@ -86,6 +93,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
        serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
        skb->sk = NULL;
        err = sock_queue_err_skb(sk, skb);
+       sock_put(sk);
        if (err)
                kfree_skb(skb);
 }
index 602dade7e9a3576905ae6f1d1dc927df7c8f8b63..9810610d26c6c36af1df40306b44c7fa746d5f02 100644 (file)
@@ -208,7 +208,7 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
        struct dn_route *rt = (struct dn_route *)dst;
-       struct neighbour *neigh = dst->neighbour;
+       struct neighbour *neigh = dst_get_neighbour(dst);
        struct net_device *dev = neigh->dev;
        char mac_addr[ETH_ALEN];
 
@@ -227,7 +227,7 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
 static int dn_long_output(struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
-       struct neighbour *neigh = dst->neighbour;
+       struct neighbour *neigh = dst_get_neighbour(dst);
        struct net_device *dev = neigh->dev;
        int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3;
        unsigned char *data;
@@ -274,7 +274,7 @@ static int dn_long_output(struct sk_buff *skb)
 static int dn_short_output(struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
-       struct neighbour *neigh = dst->neighbour;
+       struct neighbour *neigh = dst_get_neighbour(dst);
        struct net_device *dev = neigh->dev;
        int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
        struct dn_short_packet *sp;
@@ -318,7 +318,7 @@ static int dn_short_output(struct sk_buff *skb)
 static int dn_phase3_output(struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
-       struct neighbour *neigh = dst->neighbour;
+       struct neighbour *neigh = dst_get_neighbour(dst);
        struct net_device *dev = neigh->dev;
        int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
        struct dn_short_packet *sp;
index 74544bc6fdecf8251f8377cf967a0c37992352ed..b91b60363c3926ddf7ac3d4626b5f52ce77d0802 100644 (file)
@@ -241,9 +241,11 @@ static int dn_dst_gc(struct dst_ops *ops)
  */
 static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
+       struct neighbour *n = dst_get_neighbour(dst);
        u32 min_mtu = 230;
-       struct dn_dev *dn = dst->neighbour ?
-                           rcu_dereference_raw(dst->neighbour->dev->dn_ptr) : NULL;
+       struct dn_dev *dn;
+
+       dn = n ? rcu_dereference_raw(n->dev->dn_ptr) : NULL;
 
        if (dn && dn->use_long == 0)
                min_mtu -= 6;
@@ -715,7 +717,7 @@ static int dn_output(struct sk_buff *skb)
 
        int err = -EINVAL;
 
-       if ((neigh = dst->neighbour) == NULL)
+       if ((neigh = dst_get_neighbour(dst)) == NULL)
                goto error;
 
        skb->dev = dev;
@@ -750,7 +752,7 @@ static int dn_forward(struct sk_buff *skb)
        struct dst_entry *dst = skb_dst(skb);
        struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr);
        struct dn_route *rt;
-       struct neighbour *neigh = dst->neighbour;
+       struct neighbour *neigh = dst_get_neighbour(dst);
        int header_len;
 #ifdef CONFIG_NETFILTER
        struct net_device *dev = skb->dev;
@@ -833,11 +835,11 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
        }
        rt->rt_type = res->type;
 
-       if (dev != NULL && rt->dst.neighbour == NULL) {
+       if (dev != NULL && dst_get_neighbour(&rt->dst) == NULL) {
                n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
                if (IS_ERR(n))
                        return PTR_ERR(n);
-               rt->dst.neighbour = n;
+               dst_set_neighbour(&rt->dst, n);
        }
 
        if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
@@ -1144,7 +1146,7 @@ make_route:
        rt->rt_dst_map    = fld.daddr;
        rt->rt_src_map    = fld.saddr;
 
-       rt->dst.neighbour = neigh;
+       dst_set_neighbour(&rt->dst, neigh);
        neigh = NULL;
 
        rt->dst.lastuse = jiffies;
@@ -1416,7 +1418,7 @@ make_route:
        rt->fld.flowidn_iif  = in_dev->ifindex;
        rt->fld.flowidn_mark = fld.flowidn_mark;
 
-       rt->dst.neighbour = neigh;
+       dst_set_neighbour(&rt->dst, neigh);
        rt->dst.lastuse = jiffies;
        rt->dst.output = dn_rt_bug;
        switch(res.type) {
index c1f4154552fc582320e276e6768831ed9ec32548..36d14406261e8c9ad486103127ae1106baca4678 100644 (file)
@@ -136,8 +136,6 @@ static void ah_output_done(struct crypto_async_request *base, int err)
                memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
        }
 
-       err = ah->nexthdr;
-
        kfree(AH_SKB_CB(skb)->tmp);
        xfrm_output_resume(skb, err);
 }
@@ -264,12 +262,12 @@ static void ah_input_done(struct crypto_async_request *base, int err)
        if (err)
                goto out;
 
+       err = ah->nexthdr;
+
        skb->network_header += ah_hlen;
        memcpy(skb_network_header(skb), work_iph, ihl);
        __skb_pull(skb, ah_hlen + ihl);
        skb_set_transport_header(skb, -ihl);
-
-       err = ah->nexthdr;
 out:
        kfree(AH_SKB_CB(skb)->tmp);
        xfrm_input_resume(skb, err);
@@ -371,8 +369,6 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
                if (err == -EINPROGRESS)
                        goto out;
 
-               if (err == -EBUSY)
-                       err = NET_XMIT_DROP;
                goto out_free;
        }
 
index 1b74d3b643712ad94ed3bd2fd6d2aab6d6cd1d96..d8f852dbf6600f4dab50893153bebdbc1d927f2a 100644 (file)
@@ -518,26 +518,32 @@ EXPORT_SYMBOL(arp_find);
 
 /* END OF OBSOLETE FUNCTIONS */
 
+struct neighbour *__arp_bind_neighbour(struct dst_entry *dst, __be32 nexthop)
+{
+       struct net_device *dev = dst->dev;
+
+       if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
+               nexthop = 0;
+       return __neigh_lookup_errno(
+#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
+               dev->type == ARPHRD_ATM ?
+               clip_tbl_hook :
+#endif
+               &arp_tbl, &nexthop, dev);
+}
+
 int arp_bind_neighbour(struct dst_entry *dst)
 {
        struct net_device *dev = dst->dev;
-       struct neighbour *n = dst->neighbour;
+       struct neighbour *n = dst_get_neighbour(dst);
 
        if (dev == NULL)
                return -EINVAL;
        if (n == NULL) {
-               __be32 nexthop = ((struct rtable *)dst)->rt_gateway;
-               if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
-                       nexthop = 0;
-               n = __neigh_lookup_errno(
-#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
-                                        dev->type == ARPHRD_ATM ?
-                                        clip_tbl_hook :
-#endif
-                                        &arp_tbl, &nexthop, dev);
+               n = __arp_bind_neighbour(dst, ((struct rtable *)dst)->rt_gateway);
                if (IS_ERR(n))
                        return PTR_ERR(n);
-               dst->neighbour = n;
+               dst_set_neighbour(dst, n);
        }
        return 0;
 }
@@ -900,7 +906,8 @@ static int arp_process(struct sk_buff *skb)
                        if (addr_type == RTN_UNICAST  &&
                            (arp_fwd_proxy(in_dev, dev, rt) ||
                             arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
-                            pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) {
+                            (rt->dst.dev != dev &&
+                             pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))) {
                                n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
                                if (n)
                                        neigh_release(n);
index 66439a7c6d3123309786a50bff4daff090c2f96c..c48323ad268bd6b080f90a0e265a39f065b30874 100644 (file)
@@ -1496,7 +1496,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
                             void __user *buffer,
                             size_t *lenp, loff_t *ppos)
 {
+       int old_value = *(int *)ctl->data;
        int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+       int new_value = *(int *)ctl->data;
 
        if (write) {
                struct ipv4_devconf *cnf = ctl->extra1;
@@ -1507,6 +1509,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
 
                if (cnf == net->ipv4.devconf_dflt)
                        devinet_copy_dflt_conf(net, i);
+               if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1)
+                       if ((new_value == 0) && (old_value != 0))
+                               rt_cache_flush(net, 0);
        }
 
        return ret;
index a5b413416da33305bdf83ed2fd9e6104031e3687..530787bc19902a05ce1934c7f9d6b656e0aed46e 100644 (file)
@@ -457,28 +457,22 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
        struct esp_data *esp = x->data;
        u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
        u32 align = max_t(u32, blksize, esp->padlen);
-       u32 rem;
-
-       mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
-       rem = mtu & (align - 1);
-       mtu &= ~(align - 1);
+       unsigned int net_adj;
 
        switch (x->props.mode) {
-       case XFRM_MODE_TUNNEL:
-               break;
-       default:
        case XFRM_MODE_TRANSPORT:
-               /* The worst case */
-               mtu -= blksize - 4;
-               mtu += min_t(u32, blksize - 4, rem);
-               break;
        case XFRM_MODE_BEET:
-               /* The worst case. */
-               mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem);
+               net_adj = sizeof(struct iphdr);
                break;
+       case XFRM_MODE_TUNNEL:
+               net_adj = 0;
+               break;
+       default:
+               BUG();
        }
 
-       return mtu - 2;
+       return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
+                net_adj) & ~(align - 1)) + (net_adj - 2);
 }
 
 static void esp4_err(struct sk_buff *skb, u32 info)
index 33e2c35b74b770f526045776e355e01114477fd9..7e454ba8e850e4f6d92975ff420a1dd21091f6bc 100644 (file)
@@ -142,6 +142,18 @@ const struct fib_prop fib_props[RTN_MAX + 1] = {
 };
 
 /* Release a nexthop info record */
+static void free_fib_info_rcu(struct rcu_head *head)
+{
+       struct fib_info *fi = container_of(head, struct fib_info, rcu);
+
+       change_nexthops(fi) {
+               if (nexthop_nh->nh_dev)
+                       dev_put(nexthop_nh->nh_dev);
+       } endfor_nexthops(fi);
+
+       release_net(fi->fib_net);
+       kfree(fi);
+}
 
 void free_fib_info(struct fib_info *fi)
 {
@@ -149,14 +161,8 @@ void free_fib_info(struct fib_info *fi)
                pr_warning("Freeing alive fib_info %p\n", fi);
                return;
        }
-       change_nexthops(fi) {
-               if (nexthop_nh->nh_dev)
-                       dev_put(nexthop_nh->nh_dev);
-               nexthop_nh->nh_dev = NULL;
-       } endfor_nexthops(fi);
        fib_info_cnt--;
-       release_net(fi->fib_net);
-       kfree_rcu(fi, rcu);
+       call_rcu(&fi->rcu, free_fib_info_rcu);
 }
 
 void fib_release_info(struct fib_info *fi)
index 58c25ea5a5c19ef58ec204d79c209744bfc6a59a..0d884eb2b14f639bb70684b024d0811e990921c1 100644 (file)
@@ -1371,6 +1371,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
 
                        if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
                                continue;
+                       if (fi->fib_dead)
+                               continue;
                        if (fa->fa_info->fib_scope < flp->flowi4_scope)
                                continue;
                        fib_alias_accessed(fa);
index d577199eabd5c3f179acd8e3d7f6602837fb8b5b..e0d42dbb33feb9a040cdf2a467028afbd20da2e7 100644 (file)
@@ -875,6 +875,8 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
                 * to be intended in a v3 query.
                 */
                max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
+               if (!max_delay)
+                       max_delay = 1;  /* can't mod w/ 0 */
        } else { /* v3 */
                if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
                        return;
index 3b34d1c862709e7bde3cd3665fad1e191692506d..29a07b6c7168f7369b13e25d8c96011c6118ec56 100644 (file)
@@ -84,7 +84,7 @@ int ip_forward(struct sk_buff *skb)
 
        rt = skb_rtable(skb);
 
-       if (opt->is_strictroute && ip_hdr(skb)->daddr != rt->rt_gateway)
+       if (opt->is_strictroute && opt->nexthop != rt->rt_gateway)
                goto sr_failed;
 
        if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
index 8871067560dba3ee7686fd2add36bc50f5f2587c..d7bb94c48345b90e4a80c27e8338da4dbe28dec0 100644 (file)
@@ -731,9 +731,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                }
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
                else if (skb->protocol == htons(ETH_P_IPV6)) {
+                       struct neighbour *neigh = dst_get_neighbour(skb_dst(skb));
                        const struct in6_addr *addr6;
                        int addr_type;
-                       struct neighbour *neigh = skb_dst(skb)->neighbour;
 
                        if (neigh == NULL)
                                goto tx_error;
index ec93335901ddc45d09f7c85c53270ea9a6772207..42dd1a90edea0916379f41d9722d5b574f7b69e2 100644 (file)
@@ -568,11 +568,12 @@ void ip_forward_options(struct sk_buff *skb)
                     ) {
                        if (srrptr + 3 > srrspace)
                                break;
-                       if (memcmp(&ip_hdr(skb)->daddr, &optptr[srrptr-1], 4) == 0)
+                       if (memcmp(&opt->nexthop, &optptr[srrptr-1], 4) == 0)
                                break;
                }
                if (srrptr + 3 <= srrspace) {
                        opt->is_changed = 1;
+                       ip_hdr(skb)->daddr = opt->nexthop;
                        ip_rt_get_source(&optptr[srrptr-1], skb, rt);
                        optptr[2] = srrptr+4;
                } else if (net_ratelimit())
@@ -640,6 +641,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
        }
        if (srrptr <= srrspace) {
                opt->srr_is_hit = 1;
+               opt->nexthop = nexthop;
                opt->is_changed = 1;
        }
        return 0;
index 0c99db4c80b1fb5054d4d09018288775755aee16..51a3eec2c7069fa5c866854e2fb25178c41a92fe 100644 (file)
@@ -182,6 +182,8 @@ static inline int ip_finish_output2(struct sk_buff *skb)
        struct rtable *rt = (struct rtable *)dst;
        struct net_device *dev = dst->dev;
        unsigned int hh_len = LL_RESERVED_SPACE(dev);
+       struct neighbour *neigh;
+       int res;
 
        if (rt->rt_type == RTN_MULTICAST) {
                IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
@@ -203,10 +205,22 @@ static inline int ip_finish_output2(struct sk_buff *skb)
                skb = skb2;
        }
 
-       if (dst->hh)
-               return neigh_hh_output(dst->hh, skb);
-       else if (dst->neighbour)
-               return dst->neighbour->output(skb);
+       rcu_read_lock();
+       if (dst->hh) {
+               int res = neigh_hh_output(dst->hh, skb);
+
+               rcu_read_unlock();
+               return res;
+       } else {
+               neigh = dst_get_neighbour(dst);
+               if (neigh) {
+                       res = neigh->output(skb);
+
+                       rcu_read_unlock();
+                       return res;
+               }
+               rcu_read_unlock();
+       }
 
        if (net_ratelimit())
                printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
index ab7e5542c1cff9203b8c5e21108eb83cb5a27515..7fbcabafa29b43ab0cff0cd32bdfba616724e8fa 100644 (file)
@@ -252,6 +252,10 @@ static int __init ic_open_devs(void)
                }
        }
 
+       /* no point in waiting if we could not bring up at least one device */
+       if (!ic_first_dev)
+               goto have_carrier;
+
        /* wait for a carrier on at least one device */
        start = jiffies;
        while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
index 378b20b7ca6e7401c9c4041635606b5a2526ccb6..6f06f7f39ea2b6e9deda77103495656a544fa2d3 100644 (file)
@@ -285,6 +285,8 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
        if (register_netdevice(dev) < 0)
                goto failed_free;
 
+       strcpy(nt->parms.name, dev->name);
+
        dev_hold(dev);
        ipip_tunnel_link(ipn, nt);
        return nt;
@@ -759,7 +761,6 @@ static int ipip_tunnel_init(struct net_device *dev)
        struct ip_tunnel *tunnel = netdev_priv(dev);
 
        tunnel->dev = dev;
-       strcpy(tunnel->parms.name, dev->name);
 
        memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
        memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -825,6 +826,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
 static int __net_init ipip_init_net(struct net *net)
 {
        struct ipip_net *ipn = net_generic(net, ipip_net_id);
+       struct ip_tunnel *t;
        int err;
 
        ipn->tunnels[0] = ipn->tunnels_wc;
@@ -848,6 +850,9 @@ static int __net_init ipip_init_net(struct net *net)
        if ((err = register_netdev(ipn->fb_tunnel_dev)))
                goto err_reg_dev;
 
+       t = netdev_priv(ipn->fb_tunnel_dev);
+
+       strcpy(t->parms.name, ipn->fb_tunnel_dev->name);
        return 0;
 
 err_reg_dev:
index 75ef66f31832fe96ecfe019dd36e4351efb9d740..6b95f74a91d3f87c5d6b4f2bcef7dc35b671882d 100644 (file)
@@ -91,6 +91,7 @@
 #include <linux/rcupdate.h>
 #include <linux/times.h>
 #include <linux/slab.h>
+#include <linux/prefetch.h>
 #include <net/dst.h>
 #include <net/net_namespace.h>
 #include <net/protocol.h>
@@ -132,6 +133,9 @@ static int ip_rt_min_pmtu __read_mostly             = 512 + 20 + 20;
 static int ip_rt_min_advmss __read_mostly      = 256;
 static int rt_chain_length_max __read_mostly   = 20;
 
+static struct delayed_work expires_work;
+static unsigned long expires_ljiffies;
+
 /*
  *     Interface to generic destination cache.
  */
@@ -412,7 +416,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
                           "HHUptod\tSpecDst");
        else {
                struct rtable *r = v;
-               int len;
+               struct neighbour *n;
+               int len, HHUptod;
+
+               rcu_read_lock();
+               n = dst_get_neighbour(&r->dst);
+               HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
+               rcu_read_unlock();
 
                seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
                              "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
@@ -427,8 +437,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
                              dst_metric(&r->dst, RTAX_RTTVAR)),
                        r->rt_key_tos,
                        r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
-                       r->dst.hh ? (r->dst.hh->hh_output ==
-                                      dev_queue_xmit) : 0,
+                       HHUptod,
                        r->rt_spec_dst, &len);
 
                seq_printf(seq, "%*s\n", 127 - len, "");
@@ -821,6 +830,97 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
        return ONE;
 }
 
+static void rt_check_expire(void)
+{
+       static unsigned int rover;
+       unsigned int i = rover, goal;
+       struct rtable *rth;
+       struct rtable __rcu **rthp;
+       unsigned long samples = 0;
+       unsigned long sum = 0, sum2 = 0;
+       unsigned long delta;
+       u64 mult;
+
+       delta = jiffies - expires_ljiffies;
+       expires_ljiffies = jiffies;
+       mult = ((u64)delta) << rt_hash_log;
+       if (ip_rt_gc_timeout > 1)
+               do_div(mult, ip_rt_gc_timeout);
+       goal = (unsigned int)mult;
+       if (goal > rt_hash_mask)
+               goal = rt_hash_mask + 1;
+       for (; goal > 0; goal--) {
+               unsigned long tmo = ip_rt_gc_timeout;
+               unsigned long length;
+
+               i = (i + 1) & rt_hash_mask;
+               rthp = &rt_hash_table[i].chain;
+
+               if (need_resched())
+                       cond_resched();
+
+               samples++;
+
+               if (rcu_dereference_raw(*rthp) == NULL)
+                       continue;
+               length = 0;
+               spin_lock_bh(rt_hash_lock_addr(i));
+               while ((rth = rcu_dereference_protected(*rthp,
+                                       lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
+                       prefetch(rth->dst.rt_next);
+                       if (rt_is_expired(rth)) {
+                               *rthp = rth->dst.rt_next;
+                               rt_free(rth);
+                               continue;
+                       }
+                       if (rth->dst.expires) {
+                               /* Entry is expired even if it is in use */
+                               if (time_before_eq(jiffies, rth->dst.expires)) {
+nofree:
+                                       tmo >>= 1;
+                                       rthp = &rth->dst.rt_next;
+                                       /*
+                                        * We only count entries on
+                                        * a chain with equal hash inputs once
+                                        * so that entries for different QOS
+                                        * levels, and other non-hash input
+                                        * attributes don't unfairly skew
+                                        * the length computation
+                                        */
+                                       length += has_noalias(rt_hash_table[i].chain, rth);
+                                       continue;
+                               }
+                       } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
+                               goto nofree;
+
+                       /* Cleanup aged off entries. */
+                       *rthp = rth->dst.rt_next;
+                       rt_free(rth);
+               }
+               spin_unlock_bh(rt_hash_lock_addr(i));
+               sum += length;
+               sum2 += length*length;
+       }
+       if (samples) {
+               unsigned long avg = sum / samples;
+               unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
+               rt_chain_length_max = max_t(unsigned long,
+                                       ip_rt_gc_elasticity,
+                                       (avg + 4*sd) >> FRACT_BITS);
+       }
+       rover = i;
+}
+
+/*
+ * rt_worker_func() is run in process context.
+ * we call rt_check_expire() to scan part of the hash table
+ */
+static void rt_worker_func(struct work_struct *work)
+{
+       rt_check_expire();
+       schedule_delayed_work(&expires_work, ip_rt_gc_interval);
+}
+
 /*
  * Perturbation of rt_genid by a small quantity [1..256]
  * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
@@ -1269,11 +1369,41 @@ static void rt_del(unsigned hash, struct rtable *rt)
        spin_unlock_bh(rt_hash_lock_addr(hash));
 }
 
+static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
+{
+       struct rtable *rt = (struct rtable *) dst;
+       __be32 orig_gw = rt->rt_gateway;
+       struct neighbour *n, *old_n;
+
+       dst_confirm(&rt->dst);
+
+       rt->rt_gateway = peer->redirect_learned.a4;
+       n = __arp_bind_neighbour(&rt->dst, rt->rt_gateway);
+       if (IS_ERR(n))
+               return PTR_ERR(n);
+       old_n = xchg(&rt->dst._neighbour, n);
+       if (old_n)
+               neigh_release(old_n);
+       if (!n || !(n->nud_state & NUD_VALID)) {
+               if (n)
+                       neigh_event_send(n, NULL);
+               rt->rt_gateway = orig_gw;
+               return -EAGAIN;
+       } else {
+               rt->rt_flags |= RTCF_REDIRECTED;
+               call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
+       }
+       return 0;
+}
+
 /* called in rcu_read_lock() section */
 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
                    __be32 saddr, struct net_device *dev)
 {
+       int s, i;
        struct in_device *in_dev = __in_dev_get_rcu(dev);
+       __be32 skeys[2] = { saddr, 0 };
+       int    ikeys[2] = { dev->ifindex, 0 };
        struct inet_peer *peer;
        struct net *net;
 
@@ -1296,13 +1426,43 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
                        goto reject_redirect;
        }
 
-       peer = inet_getpeer_v4(daddr, 1);
-       if (peer) {
-               peer->redirect_learned.a4 = new_gw;
+       for (s = 0; s < 2; s++) {
+               for (i = 0; i < 2; i++) {
+                       unsigned int hash;
+                       struct rtable __rcu **rthp;
+                       struct rtable *rt;
 
-               inet_putpeer(peer);
+                       hash = rt_hash(daddr, skeys[s], ikeys[i], rt_genid(net));
 
-               atomic_inc(&__rt_peer_genid);
+                       rthp = &rt_hash_table[hash].chain;
+
+                       while ((rt = rcu_dereference(*rthp)) != NULL) {
+                               rthp = &rt->dst.rt_next;
+
+                               if (rt->rt_key_dst != daddr ||
+                                   rt->rt_key_src != skeys[s] ||
+                                   rt->rt_oif != ikeys[i] ||
+                                   rt_is_input_route(rt) ||
+                                   rt_is_expired(rt) ||
+                                   !net_eq(dev_net(rt->dst.dev), net) ||
+                                   rt->dst.error ||
+                                   rt->dst.dev != dev ||
+                                   rt->rt_gateway != old_gw)
+                                       continue;
+
+                               if (!rt->peer)
+                                       rt_bind_peer(rt, rt->rt_dst, 1);
+
+                               peer = rt->peer;
+                               if (peer) {
+                                       if (peer->redirect_learned.a4 != new_gw) {
+                                               peer->redirect_learned.a4 = new_gw;
+                                               atomic_inc(&__rt_peer_genid);
+                                       }
+                                       check_peer_redir(&rt->dst, peer);
+                               }
+                       }
+               }
        }
        return;
 
@@ -1589,31 +1749,6 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
        }
 }
 
-static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
-{
-       struct rtable *rt = (struct rtable *) dst;
-       __be32 orig_gw = rt->rt_gateway;
-
-       dst_confirm(&rt->dst);
-
-       neigh_release(rt->dst.neighbour);
-       rt->dst.neighbour = NULL;
-
-       rt->rt_gateway = peer->redirect_learned.a4;
-       if (arp_bind_neighbour(&rt->dst) ||
-           !(rt->dst.neighbour->nud_state & NUD_VALID)) {
-               if (rt->dst.neighbour)
-                       neigh_event_send(rt->dst.neighbour, NULL);
-               rt->rt_gateway = orig_gw;
-               return -EAGAIN;
-       } else {
-               rt->rt_flags |= RTCF_REDIRECTED;
-               call_netevent_notifiers(NETEVENT_NEIGH_UPDATE,
-                                       rt->dst.neighbour);
-       }
-       return 0;
-}
-
 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
 {
        struct rtable *rt = (struct rtable *) dst;
@@ -3087,6 +3222,13 @@ static ctl_table ipv4_route_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
+       {
+               .procname       = "gc_interval",
+               .data           = &ip_rt_gc_interval,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
        {
                .procname       = "redirect_load",
                .data           = &ip_rt_redirect_load,
@@ -3297,6 +3439,11 @@ int __init ip_rt_init(void)
        devinet_init();
        ip_fib_init();
 
+       INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
+       expires_ljiffies = jiffies;
+       schedule_delayed_work(&expires_work,
+               net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
+
        if (ip_rt_proc_init())
                printk(KERN_ERR "Unable to create route proc files\n");
 #ifdef CONFIG_XFRM
index 438262977b0f377abeb67075d1ee2a159106e1a0..895f2157e136f503e73307e7a55ca102f02a28da 100644 (file)
@@ -277,6 +277,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
        struct rtable *rt;
        __u8 rcv_wscale;
        bool ecn_ok = false;
+       struct flowi4 fl4;
 
        if (!sysctl_tcp_syncookies || !th->ack || th->rst)
                goto out;
@@ -344,20 +345,16 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
         * hasn't changed since we received the original syn, but I see
         * no easy way to do this.
         */
-       {
-               struct flowi4 fl4;
-
-               flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
-                                  RT_SCOPE_UNIVERSE, IPPROTO_TCP,
-                                  inet_sk_flowi_flags(sk),
-                                  (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
-                                  ireq->loc_addr, th->source, th->dest);
-               security_req_classify_flow(req, flowi4_to_flowi(&fl4));
-               rt = ip_route_output_key(sock_net(sk), &fl4);
-               if (IS_ERR(rt)) {
-                       reqsk_free(req);
-                       goto out;
-               }
+       flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
+                          RT_SCOPE_UNIVERSE, IPPROTO_TCP,
+                          inet_sk_flowi_flags(sk),
+                          (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
+                          ireq->loc_addr, th->source, th->dest);
+       security_req_classify_flow(req, flowi4_to_flowi(&fl4));
+       rt = ip_route_output_key(sock_net(sk), &fl4);
+       if (IS_ERR(rt)) {
+               reqsk_free(req);
+               goto out;
        }
 
        /* Try to redo what tcp_v4_send_synack did. */
@@ -371,5 +368,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
        ireq->rcv_wscale  = rcv_wscale;
 
        ret = get_cookie_sock(sk, skb, req, &rt->dst);
+       /* ip_queue_xmit() depends on our flow being setup
+        * Normal sockets get it right from inet_csk_route_child_sock()
+        */
+       if (ret)
+               inet_sk(ret)->cork.fl.u.ip4 = fl4;
 out:   return ret;
 }
index 09ced58e6a5151f86bbd8550557758621878b90f..58a944f4f791ce54280250856b065384ad4062b2 100644 (file)
@@ -854,8 +854,7 @@ new_segment:
 wait_for_sndbuf:
                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 wait_for_memory:
-               if (copied)
-                       tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
+               tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
 
                if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
                        goto do_error;
@@ -864,7 +863,7 @@ wait_for_memory:
        }
 
 out:
-       if (copied)
+       if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
                tcp_push(sk, flags, mss_now, tp->nonagle);
        return copied;
 
@@ -3236,7 +3235,7 @@ void __init tcp_init(void)
 {
        struct sk_buff *skb = NULL;
        unsigned long limit;
-       int i, max_share, cnt;
+       int i, max_rshare, max_wshare, cnt;
        unsigned long jiffy = jiffies;
 
        BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
@@ -3300,15 +3299,16 @@ void __init tcp_init(void)
 
        /* Set per-socket limits to no more than 1/128 the pressure threshold */
        limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
-       max_share = min(4UL*1024*1024, limit);
+       max_wshare = min(4UL*1024*1024, limit);
+       max_rshare = min(6UL*1024*1024, limit);
 
        sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
        sysctl_tcp_wmem[1] = 16*1024;
-       sysctl_tcp_wmem[2] = max(64*1024, max_share);
+       sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
 
        sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
        sysctl_tcp_rmem[1] = 87380;
-       sysctl_tcp_rmem[2] = max(87380, max_share);
+       sysctl_tcp_rmem[2] = max(87380, max_rshare);
 
        printk(KERN_INFO "TCP: Hash tables configured "
               "(established %u bind %u)\n",
index b6771f9eb9d40affb7ffbdca7fb70a6f5bc8979e..7410a8c28e14bdde88b4a7b49c78576ddeee2994 100644 (file)
@@ -83,7 +83,7 @@ int sysctl_tcp_ecn __read_mostly = 2;
 EXPORT_SYMBOL(sysctl_tcp_ecn);
 int sysctl_tcp_dsack __read_mostly = 1;
 int sysctl_tcp_app_win __read_mostly = 31;
-int sysctl_tcp_adv_win_scale __read_mostly = 2;
+int sysctl_tcp_adv_win_scale __read_mostly = 1;
 EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
 
 int sysctl_tcp_stdurg __read_mostly;
@@ -328,6 +328,7 @@ static void tcp_grow_window(struct sock *sk, struct sk_buff *skb)
                        incr = __tcp_grow_window(sk, skb);
 
                if (incr) {
+                       incr = max_t(int, incr, 2 * skb->len);
                        tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
                                               tp->window_clamp);
                        inet_csk(sk)->icsk_ack.quick |= 1;
@@ -460,8 +461,11 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
                if (!win_dep) {
                        m -= (new_sample >> 3);
                        new_sample += m;
-               } else if (m < new_sample)
-                       new_sample = m << 3;
+               } else {
+                       m <<= 3;
+                       if (m < new_sample)
+                               new_sample = m;
+               }
        } else {
                /* No previous measure. */
                new_sample = m << 3;
@@ -1289,25 +1293,26 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
        return in_sack;
 }
 
-static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
-                         struct tcp_sacktag_state *state,
+/* Mark the given newly-SACKed range as such, adjusting counters and hints. */
+static u8 tcp_sacktag_one(struct sock *sk,
+                         struct tcp_sacktag_state *state, u8 sacked,
+                         u32 start_seq, u32 end_seq,
                          int dup_sack, int pcount)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       u8 sacked = TCP_SKB_CB(skb)->sacked;
        int fack_count = state->fack_count;
 
        /* Account D-SACK for retransmitted packet. */
        if (dup_sack && (sacked & TCPCB_RETRANS)) {
                if (tp->undo_marker && tp->undo_retrans &&
-                   after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
+                   after(end_seq, tp->undo_marker))
                        tp->undo_retrans--;
                if (sacked & TCPCB_SACKED_ACKED)
                        state->reord = min(fack_count, state->reord);
        }
 
        /* Nothing to do; acked frame is about to be dropped (was ACKed). */
-       if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
+       if (!after(end_seq, tp->snd_una))
                return sacked;
 
        if (!(sacked & TCPCB_SACKED_ACKED)) {
@@ -1326,13 +1331,13 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
                                /* New sack for not retransmitted frame,
                                 * which was in hole. It is reordering.
                                 */
-                               if (before(TCP_SKB_CB(skb)->seq,
+                               if (before(start_seq,
                                           tcp_highest_sack_seq(tp)))
                                        state->reord = min(fack_count,
                                                           state->reord);
 
                                /* SACK enhanced F-RTO (RFC4138; Appendix B) */
-                               if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark))
+                               if (!after(end_seq, tp->frto_highmark))
                                        state->flag |= FLAG_ONLY_ORIG_SACKED;
                        }
 
@@ -1350,8 +1355,7 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
 
                /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
                if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
-                   before(TCP_SKB_CB(skb)->seq,
-                          TCP_SKB_CB(tp->lost_skb_hint)->seq))
+                   before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
                        tp->lost_cnt_hint += pcount;
 
                if (fack_count > tp->fackets_out)
@@ -1370,6 +1374,9 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
        return sacked;
 }
 
+/* Shift newly-SACKed bytes from this skb to the immediately previous
+ * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
+ */
 static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
                           struct tcp_sacktag_state *state,
                           unsigned int pcount, int shifted, int mss,
@@ -1377,12 +1384,21 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
+       u32 start_seq = TCP_SKB_CB(skb)->seq;   /* start of newly-SACKed */
+       u32 end_seq = start_seq + shifted;      /* end of newly-SACKed */
 
        BUG_ON(!pcount);
 
-       /* Tweak before seqno plays */
-       if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint &&
-           !before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq))
+       /* Adjust counters and hints for the newly sacked sequence
+        * range but discard the return value since prev is already
+        * marked. We must tag the range first because the seq
+        * advancement below implicitly advances
+        * tcp_highest_sack_seq() when skb is highest_sack.
+        */
+       tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
+                       start_seq, end_seq, dup_sack, pcount);
+
+       if (skb == tp->lost_skb_hint)
                tp->lost_cnt_hint += pcount;
 
        TCP_SKB_CB(prev)->end_seq += shifted;
@@ -1408,9 +1424,6 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
                skb_shinfo(skb)->gso_type = 0;
        }
 
-       /* We discard results */
-       tcp_sacktag_one(skb, sk, state, dup_sack, pcount);
-
        /* Difference in this won't matter, both ACKed by the same cumul. ACK */
        TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
 
@@ -1558,6 +1571,10 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
                }
        }
 
+       /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */
+       if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
+               goto fallback;
+
        if (!skb_shift(prev, skb, len))
                goto fallback;
        if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
@@ -1648,10 +1665,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
                        break;
 
                if (in_sack) {
-                       TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk,
-                                                                 state,
-                                                                 dup_sack,
-                                                                 tcp_skb_pcount(skb));
+                       TCP_SKB_CB(skb)->sacked =
+                               tcp_sacktag_one(sk,
+                                               state,
+                                               TCP_SKB_CB(skb)->sacked,
+                                               TCP_SKB_CB(skb)->seq,
+                                               TCP_SKB_CB(skb)->end_seq,
+                                               dup_sack,
+                                               tcp_skb_pcount(skb));
 
                        if (!before(TCP_SKB_CB(skb)->seq,
                                    tcp_highest_sack_seq(tp)))
@@ -2536,6 +2557,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
 
                if (cnt > packets) {
                        if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
+                           (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) ||
                            (oldcnt >= packets))
                                break;
 
index b3e6956d7ba7da11f5342821e39541759eaee157..53a5af66c0bbd79ed5eb7c2a00fac5ecdce96ba4 100644 (file)
@@ -630,7 +630,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
        arg.iov[0].iov_len  = sizeof(rep.th);
 
 #ifdef CONFIG_TCP_MD5SIG
-       key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
+       key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL;
        if (key) {
                rep.opt[0] = htonl((TCPOPT_NOP << 24) |
                                   (TCPOPT_NOP << 16) |
@@ -650,6 +650,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
                                      arg.iov[0].iov_len, IPPROTO_TCP, 0);
        arg.csumoffset = offsetof(struct tcphdr, check) / 2;
        arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
+       /* When socket is gone, all binding information is lost.
+        * routing might fail in this case. using iif for oif to
+        * make sure we can deliver it
+        */
+       arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
 
        net = dev_net(skb_dst(skb)->dev);
        ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
@@ -909,18 +914,21 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
                        }
                        sk_nocaps_add(sk, NETIF_F_GSO_MASK);
                }
-               if (tcp_alloc_md5sig_pool(sk) == NULL) {
+
+               md5sig = tp->md5sig_info;
+               if (md5sig->entries4 == 0 &&
+                   tcp_alloc_md5sig_pool(sk) == NULL) {
                        kfree(newkey);
                        return -ENOMEM;
                }
-               md5sig = tp->md5sig_info;
 
                if (md5sig->alloced4 == md5sig->entries4) {
                        keys = kmalloc((sizeof(*keys) *
                                        (md5sig->entries4 + 1)), GFP_ATOMIC);
                        if (!keys) {
                                kfree(newkey);
-                               tcp_free_md5sig_pool();
+                               if (md5sig->entries4 == 0)
+                                       tcp_free_md5sig_pool();
                                return -ENOMEM;
                        }
 
@@ -964,6 +972,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
                                kfree(tp->md5sig_info->keys4);
                                tp->md5sig_info->keys4 = NULL;
                                tp->md5sig_info->alloced4 = 0;
+                               tcp_free_md5sig_pool();
                        } else if (tp->md5sig_info->entries4 != i) {
                                /* Need to do some manipulation */
                                memmove(&tp->md5sig_info->keys4[i],
@@ -971,7 +980,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
                                        (tp->md5sig_info->entries4 - i) *
                                         sizeof(struct tcp4_md5sig_key));
                        }
-                       tcp_free_md5sig_pool();
                        return 0;
                }
        }
@@ -1446,9 +1454,13 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
        newinet->inet_id = newtp->write_seq ^ jiffies;
 
-       if (!dst && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
-               goto put_and_exit;
-
+       if (!dst) {
+               dst = inet_csk_route_child_sock(sk, newsk, req);
+               if (!dst)
+                       goto put_and_exit;
+       } else {
+               /* syncookie case : see end of cookie_v4_check() */
+       }
        sk_setup_caps(newsk, dst);
 
        tcp_mtup_init(newsk);
index 882e0b0964d045bba56b6fff551d9c0033fe8505..faf257b94154af56272650ff269de53c3eed2239 100644 (file)
@@ -1134,11 +1134,9 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
        sk_mem_uncharge(sk, len);
        sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
 
-       /* Any change of skb->len requires recalculation of tso
-        * factor and mss.
-        */
+       /* Any change of skb->len requires recalculation of tso factor. */
        if (tcp_skb_pcount(skb) > 1)
-               tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk));
+               tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
 
        return 0;
 }
index 63418185f5249aaba6a8ae55be365536a56397a1..e3db3f9151146a6d393293e4e75daa63c988df1a 100644 (file)
@@ -110,10 +110,7 @@ static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb)
 
        skb_push(skb, sizeof(*iph));
        skb_reset_network_header(skb);
-
-       memmove(skb->data - skb->mac_len, skb_mac_header(skb),
-               skb->mac_len);
-       skb_set_mac_header(skb, -skb->mac_len);
+       skb_mac_header_rebuild(skb);
 
        xfrm4_beet_make_header(skb);
 
index 534972e114ac1f7cf259cb6b26413081cfdb6e9a..ed4bf11ef9f4709b9418ebeff06789c9b84b77ab 100644 (file)
@@ -66,7 +66,6 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
 
 static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
 {
-       const unsigned char *old_mac;
        int err = -EINVAL;
 
        if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
@@ -84,10 +83,9 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
        if (!(x->props.flags & XFRM_STATE_NOECN))
                ipip_ecn_decapsulate(skb);
 
-       old_mac = skb_mac_header(skb);
-       skb_set_mac_header(skb, -skb->mac_len);
-       memmove(skb_mac_header(skb), old_mac, skb->mac_len);
        skb_reset_network_header(skb);
+       skb_mac_header_rebuild(skb);
+
        err = 0;
 
 out:
index 981e43eaf7046c36bc5fc3961a5b564b875c0926..581fe0ab409f1f211ef87971ecf7602b4a5796bf 100644 (file)
@@ -79,13 +79,13 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
        struct rtable *rt = (struct rtable *)xdst->route;
        const struct flowi4 *fl4 = &fl->u.ip4;
 
-       rt->rt_key_dst = fl4->daddr;
-       rt->rt_key_src = fl4->saddr;
-       rt->rt_key_tos = fl4->flowi4_tos;
-       rt->rt_route_iif = fl4->flowi4_iif;
-       rt->rt_iif = fl4->flowi4_iif;
-       rt->rt_oif = fl4->flowi4_oif;
-       rt->rt_mark = fl4->flowi4_mark;
+       xdst->u.rt.rt_key_dst = fl4->daddr;
+       xdst->u.rt.rt_key_src = fl4->saddr;
+       xdst->u.rt.rt_key_tos = fl4->flowi4_tos;
+       xdst->u.rt.rt_route_iif = fl4->flowi4_iif;
+       xdst->u.rt.rt_iif = fl4->flowi4_iif;
+       xdst->u.rt.rt_oif = fl4->flowi4_oif;
+       xdst->u.rt.rt_mark = fl4->flowi4_mark;
 
        xdst->u.dst.dev = dev;
        dev_hold(dev);
index cf2cf62f33fcdb758397fd73c6af9499456370de..8a4bf719c253a52590f08c8e358c5db4dbd76348 100644 (file)
@@ -433,6 +433,10 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
        /* Join all-node multicast group */
        ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
 
+       /* Join all-router multicast group if forwarding is set */
+       if (ndev->cnf.forwarding && dev && (dev->flags & IFF_MULTICAST))
+               ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
+
        return ndev;
 }
 
@@ -656,7 +660,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
         * layer address of our nexhop router
         */
 
-       if (rt->rt6i_nexthop == NULL)
+       if (dst_get_neighbour_raw(&rt->dst) == NULL)
                ifa->flags &= ~IFA_F_OPTIMISTIC;
 
        ifa->idev = idev;
index 2195ae651923e0e3242c3e61738fed91d19711ff..4c0f894d08432564899bb5611483f5dab3d2ae59 100644 (file)
@@ -324,8 +324,6 @@ static void ah6_output_done(struct crypto_async_request *base, int err)
 #endif
        }
 
-       err = ah->nexthdr;
-
        kfree(AH_SKB_CB(skb)->tmp);
        xfrm_output_resume(skb, err);
 }
@@ -466,12 +464,12 @@ static void ah6_input_done(struct crypto_async_request *base, int err)
        if (err)
                goto out;
 
+       err = ah->nexthdr;
+
        skb->network_header += ah_hlen;
        memcpy(skb_network_header(skb), work_iph, hdr_len);
        __skb_pull(skb, ah_hlen + hdr_len);
        skb_set_transport_header(skb, -hdr_len);
-
-       err = ah->nexthdr;
 out:
        kfree(AH_SKB_CB(skb)->tmp);
        xfrm_input_resume(skb, err);
@@ -583,8 +581,6 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
                if (err == -EINPROGRESS)
                        goto out;
 
-               if (err == -EBUSY)
-                       err = NET_XMIT_DROP;
                goto out_free;
        }
 
index 1ac7938dd9ec38a300266be6eb64b69d5b98df1c..65dd5433f08b5e57095e47097920fc5c4ac02c35 100644 (file)
@@ -411,19 +411,15 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
        struct esp_data *esp = x->data;
        u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
        u32 align = max_t(u32, blksize, esp->padlen);
-       u32 rem;
+       unsigned int net_adj;
 
-       mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
-       rem = mtu & (align - 1);
-       mtu &= ~(align - 1);
-
-       if (x->props.mode != XFRM_MODE_TUNNEL) {
-               u32 padsize = ((blksize - 1) & 7) + 1;
-               mtu -= blksize - padsize;
-               mtu += min_t(u32, blksize - padsize, rem);
-       }
+       if (x->props.mode != XFRM_MODE_TUNNEL)
+               net_adj = sizeof(struct ipv6hdr);
+       else
+               net_adj = 0;
 
-       return mtu - 2;
+       return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
+                net_adj) & ~(align - 1)) + (net_adj - 2);
 }
 
 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
index 4076a0b14b2098545b795f2b106b46c81ddd0896..0f9b37a1c1d4025e2e7076a5bf2d12848dfec39d 100644 (file)
@@ -1455,7 +1455,7 @@ static int fib6_age(struct rt6_info *rt, void *arg)
                        RT6_TRACE("aging clone %p\n", rt);
                        return -1;
                } else if ((rt->rt6i_flags & RTF_GATEWAY) &&
-                          (!(rt->rt6i_nexthop->flags & NTF_ROUTER))) {
+                          (!(dst_get_neighbour_raw(&rt->dst)->flags & NTF_ROUTER))) {
                        RT6_TRACE("purging route %p via non-router but gateway\n",
                                  rt);
                        return -1;
index e17596b8407aaacce155b12890d2bae4ae669b00..ae9f6d436171dd0211e955a1e5ed9f27efd133d7 100644 (file)
@@ -100,6 +100,8 @@ static int ip6_finish_output2(struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
        struct net_device *dev = dst->dev;
+       struct neighbour *neigh;
+       int res;
 
        skb->protocol = htons(ETH_P_IPV6);
        skb->dev = dev;
@@ -134,10 +136,22 @@ static int ip6_finish_output2(struct sk_buff *skb)
                                skb->len);
        }
 
-       if (dst->hh)
-               return neigh_hh_output(dst->hh, skb);
-       else if (dst->neighbour)
-               return dst->neighbour->output(skb);
+       rcu_read_lock();
+       if (dst->hh) {
+               res = neigh_hh_output(dst->hh, skb);
+
+               rcu_read_unlock();
+               return res;
+       } else {
+               neigh = dst_get_neighbour(dst);
+               if (neigh) {
+                       res = neigh->output(skb);
+
+                       rcu_read_unlock();
+                       return res;
+               }
+               rcu_read_unlock();
+       }
 
        IP6_INC_STATS_BH(dev_net(dst->dev),
                         ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
@@ -385,6 +399,7 @@ int ip6_forward(struct sk_buff *skb)
        struct ipv6hdr *hdr = ipv6_hdr(skb);
        struct inet6_skb_parm *opt = IP6CB(skb);
        struct net *net = dev_net(dst->dev);
+       struct neighbour *n;
        u32 mtu;
 
        if (net->ipv6.devconf_all->forwarding == 0)
@@ -459,11 +474,10 @@ int ip6_forward(struct sk_buff *skb)
           send redirects to source routed frames.
           We don't send redirects to frames decapsulated from IPsec.
         */
-       if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0 &&
-           !skb_sec_path(skb)) {
+       n = dst_get_neighbour(dst);
+       if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) {
                struct in6_addr *target = NULL;
                struct rt6_info *rt;
-               struct neighbour *n = dst->neighbour;
 
                /*
                 *      incoming and outgoing devices are the same
@@ -949,8 +963,11 @@ out:
 static int ip6_dst_lookup_tail(struct sock *sk,
                               struct dst_entry **dst, struct flowi6 *fl6)
 {
-       int err;
        struct net *net = sock_net(sk);
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+       struct neighbour *n;
+#endif
+       int err;
 
        if (*dst == NULL)
                *dst = ip6_route_output(net, sk, fl6);
@@ -976,11 +993,14 @@ static int ip6_dst_lookup_tail(struct sock *sk,
         * dst entry and replace it instead with the
         * dst entry of the nexthop router
         */
-       if ((*dst)->neighbour && !((*dst)->neighbour->nud_state & NUD_VALID)) {
+       rcu_read_lock();
+       n = dst_get_neighbour(*dst);
+       if (n && !(n->nud_state & NUD_VALID)) {
                struct inet6_ifaddr *ifp;
                struct flowi6 fl_gw6;
                int redirect;
 
+               rcu_read_unlock();
                ifp = ipv6_get_ifaddr(net, &fl6->saddr,
                                      (*dst)->dev, 1);
 
@@ -1000,6 +1020,8 @@ static int ip6_dst_lookup_tail(struct sock *sk,
                        if ((err = (*dst)->error))
                                goto out_err_release;
                }
+       } else {
+               rcu_read_unlock();
        }
 #endif
 
@@ -1172,6 +1194,29 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
        return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
 }
 
+static void ip6_append_data_mtu(int *mtu,
+                               int *maxfraglen,
+                               unsigned int fragheaderlen,
+                               struct sk_buff *skb,
+                               struct rt6_info *rt)
+{
+       if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
+               if (skb == NULL) {
+                       /* first fragment, reserve header_len */
+                       *mtu = *mtu - rt->dst.header_len;
+
+               } else {
+                       /*
+                        * this fragment is not first, the headers
+                        * space is regarded as data space.
+                        */
+                       *mtu = dst_mtu(rt->dst.path);
+               }
+               *maxfraglen = ((*mtu - fragheaderlen) & ~7)
+                             + fragheaderlen - sizeof(struct frag_hdr);
+       }
+}
+
 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
        int offset, int len, int odd, struct sk_buff *skb),
        void *from, int length, int transhdrlen,
@@ -1181,7 +1226,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
        struct inet_sock *inet = inet_sk(sk);
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct inet_cork *cork;
-       struct sk_buff *skb;
+       struct sk_buff *skb, *skb_prev = NULL;
        unsigned int maxfraglen, fragheaderlen;
        int exthdrlen;
        int hh_len;
@@ -1238,8 +1283,12 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                inet->cork.fl.u.ip6 = *fl6;
                np->cork.hop_limit = hlimit;
                np->cork.tclass = tclass;
-               mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
-                     rt->dst.dev->mtu : dst_mtu(rt->dst.path);
+               if (rt->dst.flags & DST_XFRM_TUNNEL)
+                       mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+                             rt->dst.dev->mtu : dst_mtu(&rt->dst);
+               else
+                       mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+                             rt->dst.dev->mtu : dst_mtu(rt->dst.path);
                if (np->frag_size < mtu) {
                        if (np->frag_size)
                                mtu = np->frag_size;
@@ -1334,38 +1383,43 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                        unsigned int fraglen;
                        unsigned int fraggap;
                        unsigned int alloclen;
-                       struct sk_buff *skb_prev;
 alloc_new_skb:
-                       skb_prev = skb;
-
                        /* There's no room in the current skb */
-                       if (skb_prev)
-                               fraggap = skb_prev->len - maxfraglen;
+                       if (skb)
+                               fraggap = skb->len - maxfraglen;
                        else
                                fraggap = 0;
+                       /* update mtu and maxfraglen if necessary */
+                       if (skb == NULL || skb_prev == NULL)
+                               ip6_append_data_mtu(&mtu, &maxfraglen,
+                                                   fragheaderlen, skb, rt);
+
+                       skb_prev = skb;
 
                        /*
                         * If remaining data exceeds the mtu,
                         * we know we need more fragment(s).
                         */
                        datalen = length + fraggap;
-                       if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
-                               datalen = maxfraglen - fragheaderlen;
 
-                       fraglen = datalen + fragheaderlen;
+                       if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
+                               datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
                        if ((flags & MSG_MORE) &&
                            !(rt->dst.dev->features&NETIF_F_SG))
                                alloclen = mtu;
                        else
                                alloclen = datalen + fragheaderlen;
 
-                       /*
-                        * The last fragment gets additional space at tail.
-                        * Note: we overallocate on fragments with MSG_MODE
-                        * because we have no idea if we're the last one.
-                        */
-                       if (datalen == length + fraggap)
-                               alloclen += rt->dst.trailer_len;
+                       if (datalen != length + fraggap) {
+                               /*
+                                * this is not the last fragment, the trailer
+                                * space is regarded as data space.
+                                */
+                               datalen += rt->dst.trailer_len;
+                       }
+
+                       alloclen += rt->dst.trailer_len;
+                       fraglen = datalen + fragheaderlen;
 
                        /*
                         * We just reserve space for fragment header.
index 36c2842a86b25c8714e166b4b01a8293ebb2fa1b..848e494fa3c59c032667c92ad9cdb051a6e01bbf 100644 (file)
@@ -289,6 +289,8 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
        if ((err = register_netdevice(dev)) < 0)
                goto failed_free;
 
+       strcpy(t->parms.name, dev->name);
+
        dev_hold(dev);
        ip6_tnl_link(ip6n, t);
        return t;
@@ -1397,7 +1399,6 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
        struct ip6_tnl *t = netdev_priv(dev);
 
        t->dev = dev;
-       strcpy(t->parms.name, dev->name);
        dev->tstats = alloc_percpu(struct pcpu_tstats);
        if (!dev->tstats)
                return -ENOMEM;
@@ -1477,6 +1478,7 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
 static int __net_init ip6_tnl_init_net(struct net *net)
 {
        struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+       struct ip6_tnl *t = NULL;
        int err;
 
        ip6n->tnls[0] = ip6n->tnls_wc;
@@ -1497,6 +1499,10 @@ static int __net_init ip6_tnl_init_net(struct net *net)
        err = register_netdev(ip6n->fb_tnl_dev);
        if (err < 0)
                goto err_register;
+
+       t = netdev_priv(ip6n->fb_tnl_dev);
+
+       strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
        return 0;
 
 err_register:
index 82a809901f8e9d3522f562562edf1e0f5e05e74d..86e3cc10fc2e44b38250e2142f9cd3fd79928bf5 100644 (file)
@@ -696,8 +696,10 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
        int err;
 
        err = ip6mr_fib_lookup(net, &fl6, &mrt);
-       if (err < 0)
+       if (err < 0) {
+               kfree_skb(skb);
                return err;
+       }
 
        read_lock(&mrt_lock);
        dev->stats.tx_bytes += skb->len;
@@ -2051,8 +2053,10 @@ int ip6_mr_input(struct sk_buff *skb)
        int err;
 
        err = ip6mr_fib_lookup(net, &fl6, &mrt);
-       if (err < 0)
+       if (err < 0) {
+               kfree_skb(skb);
                return err;
+       }
 
        read_lock(&mrt_lock);
        cache = ip6mr_cache_find(mrt,
index ee7839f4d6e3450c222f2b2677d3b9cf0709d21e..f2d74ea19a761d9caa20cacda4af28d8bebe715e 100644 (file)
@@ -257,7 +257,6 @@ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
 
                if (rt) {
                        dev = rt->rt6i_dev;
-                       dev_hold(dev);
                        dst_release(&rt->dst);
                }
        } else
@@ -2055,7 +2054,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
                if (!delta)
                        pmc->mca_sfcount[sfmode]--;
                for (j=0; j<i; j++)
-                       (void) ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
+                       ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
        } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
                struct ip6_sf_list *psf;
 
index 7596f071d3088cd86643ade87203b39eb42c6552..10a8d411707ed2dbc6a566baf6103c242bf4dbfe 100644 (file)
@@ -1244,7 +1244,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
        rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev);
 
        if (rt)
-               neigh = rt->rt6i_nexthop;
+               neigh = dst_get_neighbour(&rt->dst);
 
        if (rt && lifetime == 0) {
                neigh_clone(neigh);
@@ -1265,7 +1265,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
                        return;
                }
 
-               neigh = rt->rt6i_nexthop;
+               neigh = dst_get_neighbour(&rt->dst);
                if (neigh == NULL) {
                        ND_PRINTK0(KERN_ERR
                                   "ICMPv6 RA: %s() got default router without neighbour.\n",
index 0ef1f086feb8ec294ed71fef231f0cab1642faa7..7ef5d08201859227451a857a1d480a69fcc34d27 100644 (file)
@@ -233,7 +233,9 @@ static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
 {
        struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
 
-       memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
+       if (rt != NULL)
+               memset(&rt->rt6i_table, 0,
+                       sizeof(*rt) - sizeof(struct dst_entry));
 
        return rt;
 }
@@ -356,7 +358,7 @@ out:
 #ifdef CONFIG_IPV6_ROUTER_PREF
 static void rt6_probe(struct rt6_info *rt)
 {
-       struct neighbour *neigh = rt ? rt->rt6i_nexthop : NULL;
+       struct neighbour *neigh;
        /*
         * Okay, this does not seem to be appropriate
         * for now, however, we need to check if it
@@ -365,8 +367,10 @@ static void rt6_probe(struct rt6_info *rt)
         * Router Reachability Probe MUST be rate-limited
         * to no more than one per minute.
         */
+       rcu_read_lock();
+       neigh = rt ? dst_get_neighbour(&rt->dst) : NULL;
        if (!neigh || (neigh->nud_state & NUD_VALID))
-               return;
+               goto out;
        read_lock_bh(&neigh->lock);
        if (!(neigh->nud_state & NUD_VALID) &&
            time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
@@ -379,8 +383,11 @@ static void rt6_probe(struct rt6_info *rt)
                target = (struct in6_addr *)&neigh->primary_key;
                addrconf_addr_solict_mult(target, &mcaddr);
                ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL);
-       } else
+       } else {
                read_unlock_bh(&neigh->lock);
+       }
+out:
+       rcu_read_unlock();
 }
 #else
 static inline void rt6_probe(struct rt6_info *rt)
@@ -404,8 +411,11 @@ static inline int rt6_check_dev(struct rt6_info *rt, int oif)
 
 static inline int rt6_check_neigh(struct rt6_info *rt)
 {
-       struct neighbour *neigh = rt->rt6i_nexthop;
+       struct neighbour *neigh;
        int m;
+
+       rcu_read_lock();
+       neigh = dst_get_neighbour(&rt->dst);
        if (rt->rt6i_flags & RTF_NONEXTHOP ||
            !(rt->rt6i_flags & RTF_GATEWAY))
                m = 1;
@@ -422,6 +432,7 @@ static inline int rt6_check_neigh(struct rt6_info *rt)
                read_unlock_bh(&neigh->lock);
        } else
                m = 0;
+       rcu_read_unlock();
        return m;
 }
 
@@ -745,8 +756,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, const struct in6_add
                        dst_free(&rt->dst);
                        return NULL;
                }
-               rt->rt6i_nexthop = neigh;
-
+               dst_set_neighbour(&rt->dst, neigh);
        }
 
        return rt;
@@ -760,7 +770,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, const struct in6_a
                rt->rt6i_dst.plen = 128;
                rt->rt6i_flags |= RTF_CACHE;
                rt->dst.flags |= DST_HOST;
-               rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop);
+               dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));
        }
        return rt;
 }
@@ -794,7 +804,7 @@ restart:
        dst_hold(&rt->dst);
        read_unlock_bh(&table->tb6_lock);
 
-       if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
+       if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
                nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
        else if (!(rt->dst.flags & DST_HOST))
                nrt = rt6_alloc_clone(rt, &fl6->daddr);
@@ -1058,7 +1068,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
        }
 
        rt->rt6i_idev     = idev;
-       rt->rt6i_nexthop  = neigh;
+       dst_set_neighbour(&rt->dst, neigh);
        atomic_set(&rt->dst.__refcnt, 1);
        dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
        rt->dst.output  = ip6_output;
@@ -1338,12 +1348,12 @@ int ip6_route_add(struct fib6_config *cfg)
                rt->rt6i_prefsrc.plen = 0;
 
        if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
-               rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
-               if (IS_ERR(rt->rt6i_nexthop)) {
-                       err = PTR_ERR(rt->rt6i_nexthop);
-                       rt->rt6i_nexthop = NULL;
+               struct neighbour *neigh = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
+               if (IS_ERR(neigh)) {
+                       err = PTR_ERR(neigh);
                        goto out;
                }
+               dst_set_neighbour(&rt->dst, neigh);
        }
 
        rt->rt6i_flags = cfg->fc_flags;
@@ -1574,7 +1584,7 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
        dst_confirm(&rt->dst);
 
        /* Duplicate redirect: silently ignore. */
-       if (neigh == rt->dst.neighbour)
+       if (neigh == dst_get_neighbour_raw(&rt->dst))
                goto out;
 
        nrt = ip6_rt_copy(rt);
@@ -1590,7 +1600,7 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
        nrt->dst.flags |= DST_HOST;
 
        ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
-       nrt->rt6i_nexthop = neigh_clone(neigh);
+       dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
 
        if (ip6_ins_rt(nrt))
                goto out;
@@ -1670,7 +1680,7 @@ again:
           1. It is connected route. Action: COW
           2. It is gatewayed route or NONEXTHOP route. Action: clone it.
         */
-       if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
+       if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
                nrt = rt6_alloc_cow(rt, daddr, saddr);
        else
                nrt = rt6_alloc_clone(rt, daddr);
@@ -2035,7 +2045,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
 
                return ERR_CAST(neigh);
        }
-       rt->rt6i_nexthop = neigh;
+       dst_set_neighbour(&rt->dst, neigh);
 
        ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
        rt->rt6i_dst.plen = 128;
@@ -2312,6 +2322,7 @@ static int rt6_fill_node(struct net *net,
        struct nlmsghdr *nlh;
        long expires;
        u32 table;
+       struct neighbour *n;
 
        if (prefix) {   /* user wants prefix routes only */
                if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
@@ -2400,8 +2411,15 @@ static int rt6_fill_node(struct net *net,
        if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
                goto nla_put_failure;
 
-       if (rt->dst.neighbour)
-               NLA_PUT(skb, RTA_GATEWAY, 16, &rt->dst.neighbour->primary_key);
+       rcu_read_lock();
+       n = dst_get_neighbour(&rt->dst);
+       if (n) {
+               if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) {
+                       rcu_read_unlock();
+                       goto nla_put_failure;
+               }
+       }
+       rcu_read_unlock();
 
        if (rt->dst.dev)
                NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
@@ -2585,6 +2603,7 @@ struct rt6_proc_arg
 static int rt6_info_route(struct rt6_info *rt, void *p_arg)
 {
        struct seq_file *m = p_arg;
+       struct neighbour *n;
 
        seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
 
@@ -2593,12 +2612,14 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
 #else
        seq_puts(m, "00000000000000000000000000000000 00 ");
 #endif
-
-       if (rt->rt6i_nexthop) {
-               seq_printf(m, "%pi6", rt->rt6i_nexthop->primary_key);
+       rcu_read_lock();
+       n = dst_get_neighbour(&rt->dst);
+       if (n) {
+               seq_printf(m, "%pi6", n->primary_key);
        } else {
                seq_puts(m, "00000000000000000000000000000000");
        }
+       rcu_read_unlock();
        seq_printf(m, " %08x %08x %08x %08x %8s\n",
                   rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
                   rt->dst.__use, rt->rt6i_flags,
index 1cca5761aea9709e39302c82df57acd777676d7d..f56acd096598d50ea63dba0b05f0ac5f47b5a18d 100644 (file)
@@ -263,6 +263,8 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
        if (register_netdevice(dev) < 0)
                goto failed_free;
 
+       strcpy(nt->parms.name, dev->name);
+
        dev_hold(dev);
 
        ipip6_tunnel_link(sitn, nt);
@@ -677,7 +679,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                struct neighbour *neigh = NULL;
 
                if (skb_dst(skb))
-                       neigh = skb_dst(skb)->neighbour;
+                       neigh = dst_get_neighbour(skb_dst(skb));
 
                if (neigh == NULL) {
                        if (net_ratelimit())
@@ -702,7 +704,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                struct neighbour *neigh = NULL;
 
                if (skb_dst(skb))
-                       neigh = skb_dst(skb)->neighbour;
+                       neigh = dst_get_neighbour(skb_dst(skb));
 
                if (neigh == NULL) {
                        if (net_ratelimit())
@@ -1141,7 +1143,6 @@ static int ipip6_tunnel_init(struct net_device *dev)
        struct ip_tunnel *tunnel = netdev_priv(dev);
 
        tunnel->dev = dev;
-       strcpy(tunnel->parms.name, dev->name);
 
        memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
        memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -1204,6 +1205,7 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
 static int __net_init sit_init_net(struct net *net)
 {
        struct sit_net *sitn = net_generic(net, sit_net_id);
+       struct ip_tunnel *t;
        int err;
 
        sitn->tunnels[0] = sitn->tunnels_wc;
@@ -1228,6 +1230,9 @@ static int __net_init sit_init_net(struct net *net)
        if ((err = register_netdev(sitn->fb_tunnel_dev)))
                goto err_reg_dev;
 
+       t = netdev_priv(sitn->fb_tunnel_dev);
+
+       strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
        return 0;
 
 err_reg_dev:
index 7c43e861475fd68d8d37839832e4053bbfe10964..848f9634bbdfe968701540db7edd2f6ca7851258 100644 (file)
@@ -605,7 +605,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
                        }
                        sk_nocaps_add(sk, NETIF_F_GSO_MASK);
                }
-               if (tcp_alloc_md5sig_pool(sk) == NULL) {
+               if (tp->md5sig_info->entries6 == 0 &&
+                       tcp_alloc_md5sig_pool(sk) == NULL) {
                        kfree(newkey);
                        return -ENOMEM;
                }
@@ -614,8 +615,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
                                       (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
 
                        if (!keys) {
-                               tcp_free_md5sig_pool();
                                kfree(newkey);
+                               if (tp->md5sig_info->entries6 == 0)
+                                       tcp_free_md5sig_pool();
                                return -ENOMEM;
                        }
 
@@ -661,6 +663,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
                                kfree(tp->md5sig_info->keys6);
                                tp->md5sig_info->keys6 = NULL;
                                tp->md5sig_info->alloced6 = 0;
+                               tcp_free_md5sig_pool();
                        } else {
                                /* shrink the database */
                                if (tp->md5sig_info->entries6 != i)
@@ -669,7 +672,6 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
                                                (tp->md5sig_info->entries6 - i)
                                                * sizeof (tp->md5sig_info->keys6[0]));
                        }
-                       tcp_free_md5sig_pool();
                        return 0;
                }
        }
@@ -1094,7 +1096,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
 
 #ifdef CONFIG_TCP_MD5SIG
        if (sk)
-               key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
+               key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr);
 #endif
 
        if (th->ack)
@@ -1407,6 +1409,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
 #endif
 
+               newnp->ipv6_ac_list = NULL;
+               newnp->ipv6_fl_list = NULL;
                newnp->pktoptions  = NULL;
                newnp->opt         = NULL;
                newnp->mcast_oif   = inet6_iif(skb);
@@ -1471,6 +1475,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
           First: no IPv4 options.
         */
        newinet->inet_opt = NULL;
+       newnp->ipv6_ac_list = NULL;
        newnp->ipv6_fl_list = NULL;
 
        /* Clone RX bits */
@@ -1509,6 +1514,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        tcp_mtup_init(newsk);
        tcp_sync_mss(newsk, dst_mtu(dst));
        newtp->advmss = dst_metric_advmss(dst);
+       if (tcp_sk(sk)->rx_opt.user_mss &&
+           tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
+               newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
+
        tcp_initialize_rcv_mss(newsk);
 
        newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
index 3437d7d4eed6dcff351adcd7645dffd919c3477e..f37cba9e6891f9b3c503a98856e51e8ef5847f29 100644 (file)
@@ -80,7 +80,6 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb)
 static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
 {
        struct ipv6hdr *ip6h;
-       const unsigned char *old_mac;
        int size = sizeof(struct ipv6hdr);
        int err;
 
@@ -90,10 +89,7 @@ static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
 
        __skb_push(skb, size);
        skb_reset_network_header(skb);
-
-       old_mac = skb_mac_header(skb);
-       skb_set_mac_header(skb, -skb->mac_len);
-       memmove(skb_mac_header(skb), old_mac, skb->mac_len);
+       skb_mac_header_rebuild(skb);
 
        xfrm6_beet_make_header(skb);
 
index 4d6edff0498f6a1e55e2301aa958d0fde5556baa..23ecd68a5e62be4c88d11f311accdda151e8bf89 100644 (file)
@@ -63,7 +63,6 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
 static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
 {
        int err = -EINVAL;
-       const unsigned char *old_mac;
 
        if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6)
                goto out;
@@ -80,10 +79,9 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
        if (!(x->props.flags & XFRM_STATE_NOECN))
                ipip6_ecn_decapsulate(skb);
 
-       old_mac = skb_mac_header(skb);
-       skb_set_mac_header(skb, -skb->mac_len);
-       memmove(skb_mac_header(skb), old_mac, skb->mac_len);
        skb_reset_network_header(skb);
+       skb_mac_header_rebuild(skb);
+
        err = 0;
 
 out:
index ed8a2335442ffa5299450b341bb20b70a586f18a..71c292e3e039d5b61c3fe42f108c951b4b3fe3dc 100644 (file)
@@ -1045,8 +1045,10 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
        headroom = NET_SKB_PAD + sizeof(struct iphdr) +
                uhlen + hdr_len;
        old_headroom = skb_headroom(skb);
-       if (skb_cow_head(skb, headroom))
+       if (skb_cow_head(skb, headroom)) {
+               dev_kfree_skb(skb);
                goto abort;
+       }
 
        new_headroom = skb_headroom(skb);
        skb_orphan(skb);
index b6466e71f5e1dc435006d370552f06557172c930..78bc442b2b6f547b594fa5b9a521218fd715d896 100644 (file)
@@ -251,9 +251,16 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
        struct inet_sock *inet = inet_sk(sk);
        struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
-       int ret = -EINVAL;
+       int ret;
        int chk_addr_ret;
 
+       if (!sock_flag(sk, SOCK_ZAPPED))
+               return -EINVAL;
+       if (addr_len < sizeof(struct sockaddr_l2tpip))
+               return -EINVAL;
+       if (addr->l2tp_family != AF_INET)
+               return -EINVAL;
+
        ret = -EADDRINUSE;
        read_lock_bh(&l2tp_ip_lock);
        if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
@@ -283,6 +290,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        sk_del_node_init(sk);
        write_unlock_bh(&l2tp_ip_lock);
        ret = 0;
+       sock_reset_flag(sk, SOCK_ZAPPED);
+
 out:
        release_sock(sk);
 
@@ -303,13 +312,14 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
        __be32 saddr;
        int oif, rc;
 
-       rc = -EINVAL;
+       if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
+               return -EINVAL;
+
        if (addr_len < sizeof(*lsa))
-               goto out;
+               return -EINVAL;
 
-       rc = -EAFNOSUPPORT;
        if (lsa->l2tp_family != AF_INET)
-               goto out;
+               return -EAFNOSUPPORT;
 
        lock_sock(sk);
 
@@ -363,6 +373,14 @@ out:
        return rc;
 }
 
+static int l2tp_ip_disconnect(struct sock *sk, int flags)
+{
+       if (sock_flag(sk, SOCK_ZAPPED))
+               return 0;
+
+       return udp_disconnect(sk, flags);
+}
+
 static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
                           int *uaddr_len, int peer)
 {
@@ -393,11 +411,6 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
 {
        int rc;
 
-       if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
-               goto drop;
-
-       nf_reset(skb);
-
        /* Charge it to the socket, dropping if the queue is full. */
        rc = sock_queue_rcv_skb(sk, skb);
        if (rc < 0)
@@ -446,8 +459,9 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
 
                daddr = lip->l2tp_addr.s_addr;
        } else {
+               rc = -EDESTADDRREQ;
                if (sk->sk_state != TCP_ESTABLISHED)
-                       return -EDESTADDRREQ;
+                       goto out;
 
                daddr = inet->inet_daddr;
                connected = 1;
@@ -595,7 +609,7 @@ static struct proto l2tp_ip_prot = {
        .close             = l2tp_ip_close,
        .bind              = l2tp_ip_bind,
        .connect           = l2tp_ip_connect,
-       .disconnect        = udp_disconnect,
+       .disconnect        = l2tp_ip_disconnect,
        .ioctl             = udp_ioctl,
        .destroy           = l2tp_ip_destroy_sock,
        .setsockopt        = ip_setsockopt,
index 39a21d0c61c48f2506bc7d24e7eeeee1610a11ae..13f9868e694927c2aad43da0d815c637648b885b 100644 (file)
@@ -908,7 +908,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
                goto end_put_sess;
        }
 
-       inet = inet_sk(sk);
+       inet = inet_sk(tunnel->sock);
        if (tunnel->version == 2) {
                struct sockaddr_pppol2tp sp;
                len = sizeof(sp);
index dfd3a648a55107bda2ff14adb6f9e91c06449240..a18e6c3d36e37e699089ed5e0910c857da073d1c 100644 (file)
@@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
                copied += used;
                len -= used;
 
+               /* For non stream protcols we get one packet per recvmsg call */
+               if (sk->sk_type != SOCK_STREAM)
+                       goto copy_uaddr;
+
                if (!(flags & MSG_PEEK)) {
                        sk_eat_skb(sk, skb, 0);
                        *seq = 0;
                }
 
-               /* For non stream protcols we get one packet per recvmsg call */
-               if (sk->sk_type != SOCK_STREAM)
-                       goto copy_uaddr;
-
                /* Partial read */
                if (used + offset < skb->len)
                        continue;
@@ -857,6 +857,12 @@ copy_uaddr:
        }
        if (llc_sk(sk)->cmsg_flags)
                llc_cmsg_rcv(msg, skb);
+
+       if (!(flags & MSG_PEEK)) {
+                       sk_eat_skb(sk, skb, 0);
+                       *seq = 0;
+       }
+
        goto out;
 }
 
index 9c0d76cdca920ed3b322c4b70ed86f044b4a404b..1a41b1423d2415ea1c8fdccf9da77d0beb7bdad0 100644 (file)
@@ -48,6 +48,8 @@ static void ieee80211_free_tid_rx(struct rcu_head *h)
                container_of(h, struct tid_ampdu_rx, rcu_head);
        int i;
 
+       del_timer_sync(&tid_rx->reorder_timer);
+
        for (i = 0; i < tid_rx->buf_size; i++)
                dev_kfree_skb(tid_rx->reorder_buf[i]);
        kfree(tid_rx->reorder_buf);
@@ -87,7 +89,6 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
                                     tid, 0, reason);
 
        del_timer_sync(&tid_rx->session_timer);
-       del_timer_sync(&tid_rx->reorder_timer);
 
        call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
 }
index c8be8eff70daa5595b823c8648a1261153fe71b1..b7f4f5c1f693bd93c9e644aa6c805a78866d838a 100644 (file)
@@ -162,6 +162,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
                return -ENOENT;
        }
 
+       /* if we're already stopping ignore any new requests to stop */
+       if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+               spin_unlock_bh(&sta->lock);
+               return -EALREADY;
+       }
+
        if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
                /* not even started yet! */
                ieee80211_assign_tid_tx(sta, tid, NULL);
@@ -170,6 +176,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
                return 0;
        }
 
+       set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
+
        spin_unlock_bh(&sta->lock);
 
 #ifdef CONFIG_MAC80211_HT_DEBUG
@@ -177,8 +185,6 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
               sta->sta.addr, tid);
 #endif /* CONFIG_MAC80211_HT_DEBUG */
 
-       set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
-
        del_timer_sync(&tid_tx->addba_resp_timer);
 
        /*
@@ -188,6 +194,20 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
         */
        clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
 
+       /*
+        * There might be a few packets being processed right now (on
+        * another CPU) that have already gotten past the aggregation
+        * check when it was still OPERATIONAL and consequently have
+        * IEEE80211_TX_CTL_AMPDU set. In that case, this code might
+        * call into the driver at the same time or even before the
+        * TX paths calls into it, which could confuse the driver.
+        *
+        * Wait for all currently running TX paths to finish before
+        * telling the driver. New packets will not go through since
+        * the aggregation session is no longer OPERATIONAL.
+        */
+       synchronize_net();
+
        tid_tx->stop_initiator = initiator;
        tid_tx->tx_stop = tx;
 
@@ -284,6 +304,38 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
        __release(agg_queue);
 }
 
+/*
+ * splice packets from the STA's pending to the local pending,
+ * requires a call to ieee80211_agg_splice_finish later
+ */
+static void __acquires(agg_queue)
+ieee80211_agg_splice_packets(struct ieee80211_local *local,
+                            struct tid_ampdu_tx *tid_tx, u16 tid)
+{
+       int queue = ieee80211_ac_from_tid(tid);
+       unsigned long flags;
+
+       ieee80211_stop_queue_agg(local, tid);
+
+       if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
+                         " from the pending queue\n", tid))
+               return;
+
+       if (!skb_queue_empty(&tid_tx->pending)) {
+               spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+               /* copy over remaining packets */
+               skb_queue_splice_tail_init(&tid_tx->pending,
+                                          &local->pending[queue]);
+               spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+       }
+}
+
+static void __releases(agg_queue)
+ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
+{
+       ieee80211_wake_queue_agg(local, tid);
+}
+
 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
 {
        struct tid_ampdu_tx *tid_tx;
@@ -295,19 +347,17 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
        tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
 
        /*
-        * While we're asking the driver about the aggregation,
-        * stop the AC queue so that we don't have to worry
-        * about frames that came in while we were doing that,
-        * which would require us to put them to the AC pending
-        * afterwards which just makes the code more complex.
+        * Start queuing up packets for this aggregation session.
+        * We're going to release them once the driver is OK with
+        * that.
         */
-       ieee80211_stop_queue_agg(local, tid);
-
        clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
 
        /*
-        * make sure no packets are being processed to get
-        * valid starting sequence number
+        * Make sure no packets are being processed. This ensures that
+        * we have a valid starting sequence number and that in-flight
+        * packets have been flushed out and no packets for this TID
+        * will go into the driver during the ampdu_action call.
         */
        synchronize_net();
 
@@ -321,17 +371,15 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
                                        " tid %d\n", tid);
 #endif
                spin_lock_bh(&sta->lock);
+               ieee80211_agg_splice_packets(local, tid_tx, tid);
                ieee80211_assign_tid_tx(sta, tid, NULL);
+               ieee80211_agg_splice_finish(local, tid);
                spin_unlock_bh(&sta->lock);
 
-               ieee80211_wake_queue_agg(local, tid);
                kfree_rcu(tid_tx, rcu_head);
                return;
        }
 
-       /* we can take packets again now */
-       ieee80211_wake_queue_agg(local, tid);
-
        /* activate the timer for the recipient's addBA response */
        mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
 #ifdef CONFIG_MAC80211_HT_DEBUG
@@ -451,38 +499,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
 }
 EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
 
-/*
- * splice packets from the STA's pending to the local pending,
- * requires a call to ieee80211_agg_splice_finish later
- */
-static void __acquires(agg_queue)
-ieee80211_agg_splice_packets(struct ieee80211_local *local,
-                            struct tid_ampdu_tx *tid_tx, u16 tid)
-{
-       int queue = ieee80211_ac_from_tid(tid);
-       unsigned long flags;
-
-       ieee80211_stop_queue_agg(local, tid);
-
-       if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
-                         " from the pending queue\n", tid))
-               return;
-
-       if (!skb_queue_empty(&tid_tx->pending)) {
-               spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
-               /* copy over remaining packets */
-               skb_queue_splice_tail_init(&tid_tx->pending,
-                                          &local->pending[queue]);
-               spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
-       }
-}
-
-static void __releases(agg_queue)
-ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
-{
-       ieee80211_wake_queue_agg(local, tid);
-}
-
 static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
                                         struct sta_info *sta, u16 tid)
 {
@@ -772,12 +788,27 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
                goto out;
        }
 
-       del_timer(&tid_tx->addba_resp_timer);
+       del_timer_sync(&tid_tx->addba_resp_timer);
 
 #ifdef CONFIG_MAC80211_HT_DEBUG
        printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
 #endif
 
+       /*
+        * addba_resp_timer may have fired before we got here, and
+        * caused WANT_STOP to be set. If the stop then was already
+        * processed further, STOPPING might be set.
+        */
+       if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
+           test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+               printk(KERN_DEBUG
+                      "got addBA resp for tid %d but we already gave up\n",
+                      tid);
+#endif
+               goto out;
+       }
+
        if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
                        == WLAN_STATUS_SUCCESS) {
                /*
index be70c70d3f5bfa6ecfb57e56d3b12eb06e6f78e9..143a0064348aaa489ac6ec17d6ceccfee4d38b0f 100644 (file)
@@ -1798,7 +1798,7 @@ ieee80211_offchan_tx_done(struct ieee80211_work *wk, struct sk_buff *skb)
         * so in that case userspace will have to deal with it.
         */
 
-       if (wk->offchan_tx.wait && wk->offchan_tx.frame)
+       if (wk->offchan_tx.wait && !wk->offchan_tx.status)
                cfg80211_mgmt_tx_status(wk->sdata->dev,
                                        (unsigned long) wk->offchan_tx.frame,
                                        wk->ie, wk->ie_len, false, GFP_KERNEL);
index 090b0ec1e05653c246973a74de8fe4c6057f261e..3fdac77b9cc344669ec8584f8a833addd43b7d7a 100644 (file)
@@ -328,6 +328,7 @@ struct ieee80211_work {
                struct {
                        struct sk_buff *frame;
                        u32 wait;
+                       bool status;
                } offchan_tx;
        };
 
@@ -372,6 +373,7 @@ struct ieee80211_if_managed {
 
        unsigned long timers_running; /* used for quiesce/restart */
        bool powersave; /* powersave requested for this iface */
+       bool broken_ap; /* AP is broken -- turn off powersave */
        enum ieee80211_smps_mode req_smps, /* requested smps mode */
                                 ap_smps, /* smps mode AP thinks we're in */
                                 driver_smps_mode; /* smps mode request */
index 895eec19f2e218d9763b97add32ff3b20136a3b3..65f3764c5aa2c322dada680293f0605f8f856318 100644 (file)
@@ -498,6 +498,18 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                ieee80211_configure_filter(local);
                break;
        default:
+               mutex_lock(&local->mtx);
+               if (local->hw_roc_dev == sdata->dev &&
+                   local->hw_roc_channel) {
+                       /* ignore return value since this is racy */
+                       drv_cancel_remain_on_channel(local);
+                       ieee80211_queue_work(&local->hw, &local->hw_roc_done);
+               }
+               mutex_unlock(&local->mtx);
+
+               flush_work(&local->hw_roc_start);
+               flush_work(&local->hw_roc_done);
+
                flush_work(&sdata->work);
                /*
                 * When we get here, the interface is marked down.
index 866f269183cf9a1532f317e3b9f52bc437a98213..1e36fb3318cb6994a3298cd0024a253f18cb3cda 100644 (file)
@@ -910,6 +910,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
                            result);
 
+       ieee80211_led_init(local);
+
        rtnl_lock();
 
        result = ieee80211_init_rate_ctrl_alg(local,
@@ -931,8 +933,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
 
        rtnl_unlock();
 
-       ieee80211_led_init(local);
-
        local->network_latency_notifier.notifier_call =
                ieee80211_max_network_latency;
        result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
index 7a334fdd8d6cf250896fac701be844141ada0b1c..1563250a5579a1f605e812f7dcac739c47583f00 100644 (file)
@@ -613,6 +613,9 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
        if (!mgd->powersave)
                return false;
 
+       if (mgd->broken_ap)
+               return false;
+
        if (!mgd->associated)
                return false;
 
@@ -1450,10 +1453,21 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
        capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
 
        if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
-               printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not "
-                      "set\n", sdata->name, aid);
+               printk(KERN_DEBUG
+                      "%s: invalid AID value 0x%x; bits 15:14 not set\n",
+                      sdata->name, aid);
        aid &= ~(BIT(15) | BIT(14));
 
+       ifmgd->broken_ap = false;
+
+       if (aid == 0 || aid > IEEE80211_MAX_AID) {
+               printk(KERN_DEBUG
+                      "%s: invalid AID value %d (out of range), turn off PS\n",
+                      sdata->name, aid);
+               aid = 0;
+               ifmgd->broken_ap = true;
+       }
+
        pos = mgmt->u.assoc_resp.variable;
        ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
 
index 13427b194ced05d961dcf32fabf0e2c02cd073d5..c55eb9d8ea55437ab57a6d77ee65ff1e2af5839f 100644 (file)
@@ -251,6 +251,22 @@ static void ieee80211_hw_roc_done(struct work_struct *work)
                return;
        }
 
+       /* was never transmitted */
+       if (local->hw_roc_skb) {
+               u64 cookie;
+
+               cookie = local->hw_roc_cookie ^ 2;
+
+               cfg80211_mgmt_tx_status(local->hw_roc_dev, cookie,
+                                       local->hw_roc_skb->data,
+                                       local->hw_roc_skb->len, false,
+                                       GFP_KERNEL);
+
+               kfree_skb(local->hw_roc_skb);
+               local->hw_roc_skb = NULL;
+               local->hw_roc_skb_for_status = NULL;
+       }
+
        if (!local->hw_roc_for_tx)
                cfg80211_remain_on_channel_expired(local->hw_roc_dev,
                                                   local->hw_roc_cookie,
index 3d5a2cb835c4203bb8cc4e768df7f7705594ee21..816590b0d7f29bab3f63c8624ddca3924fd65688 100644 (file)
@@ -314,7 +314,7 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
        for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
                info->control.rates[i].idx = -1;
                info->control.rates[i].flags = 0;
-               info->control.rates[i].count = 1;
+               info->control.rates[i].count = 0;
        }
 
        if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
index 7fa8c6be7bf02564cd5714033fbdb8761b30b03d..41000650f4a0dcffd5e21f63586e1dad06cd7e65 100644 (file)
@@ -140,8 +140,9 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
        pos++;
 
        /* IEEE80211_RADIOTAP_RATE */
-       if (status->flag & RX_FLAG_HT) {
+       if (!rate || status->flag & RX_FLAG_HT) {
                /*
+                * Without rate information don't add it. If we have,
                 * MCS information is a separate field in radiotap,
                 * added below. The byte here is needed as padding
                 * for the channel though, so initialise it to 0.
@@ -162,12 +163,14 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
        else if (status->flag & RX_FLAG_HT)
                put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
                                   pos);
-       else if (rate->flags & IEEE80211_RATE_ERP_G)
+       else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
                put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
                                   pos);
-       else
+       else if (rate)
                put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
                                   pos);
+       else
+               put_unaligned_le16(IEEE80211_CHAN_2GHZ, pos);
        pos += 2;
 
        /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
@@ -607,7 +610,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
        index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
                                                tid_agg_rx->buf_size;
        if (!tid_agg_rx->reorder_buf[index] &&
-           tid_agg_rx->stored_mpdu_num > 1) {
+           tid_agg_rx->stored_mpdu_num) {
                /*
                 * No buffers ready to be released, but check whether any
                 * frames in the reorder buffer have timed out.
index ca7bf1052eba2e27e9e9ffa7ade276e02646e513..3ff633e81b680dc477338f4c372b7241dce15ca9 100644 (file)
@@ -334,6 +334,7 @@ static int sta_info_finish_insert(struct sta_info *sta, bool async)
        ieee80211_sta_debugfs_add(sta);
        rate_control_add_sta_debugfs(sta);
 
+       memset(&sinfo, 0, sizeof(sinfo));
        sinfo.filled = 0;
        sinfo.generation = local->sta_generation;
        cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
index 1658efaa2e8eef4b042ec05ffd875cbd0d3c6ec1..04cdbaf160ba7651a2a0b434fc0208c5aad587a3 100644 (file)
@@ -336,7 +336,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
                                continue;
                        if (wk->offchan_tx.frame != skb)
                                continue;
-                       wk->offchan_tx.frame = NULL;
+                       wk->offchan_tx.status = true;
                        break;
                }
                rcu_read_unlock();
index 3104c844b544c5465ef5ba1e7efcff05ca920501..da878c14182c5b20c82677886fecfa47cca37ecf 100644 (file)
@@ -1222,7 +1222,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
                tx->sta = rcu_dereference(sdata->u.vlan.sta);
                if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
                        return TX_DROP;
-       } else if (info->flags & IEEE80211_TX_CTL_INJECTED) {
+       } else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
+                  tx->sdata->control_port_protocol == tx->skb->protocol) {
                tx->sta = sta_info_get_bss(sdata, hdr->addr1);
        }
        if (!tx->sta)
index d3fe2d2374859566d0c968a949aff2536dadf60d..11d9d49f22dc9f69b3e643e78c2092cef94a4198 100644 (file)
@@ -1047,6 +1047,8 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
        skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
                                     ssid, ssid_len,
                                     buf, buf_len);
+       if (!skb)
+               goto out;
 
        if (dst) {
                mgmt = (struct ieee80211_mgmt *) skb->data;
@@ -1055,6 +1057,8 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
        }
 
        IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+
+ out:
        kfree(buf);
 
        return skb;
@@ -1250,6 +1254,12 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                }
        }
 
+       /* add back keys */
+       list_for_each_entry(sdata, &local->interfaces, list)
+               if (ieee80211_sdata_running(sdata))
+                       ieee80211_enable_keys(sdata);
+
+ wake_up:
        /*
         * Clear the WLAN_STA_BLOCK_BA flag so new aggregation
         * sessions can be established after a resume.
@@ -1271,12 +1281,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                mutex_unlock(&local->sta_mtx);
        }
 
-       /* add back keys */
-       list_for_each_entry(sdata, &local->interfaces, list)
-               if (ieee80211_sdata_running(sdata))
-                       ieee80211_enable_keys(sdata);
-
- wake_up:
        ieee80211_wake_queues_by_reason(hw,
                        IEEE80211_QUEUE_STOP_REASON_SUSPEND);
 
index d2e7f0e866772ed0fb3969837d9aab62d27bd755..52b758dbff5b99dd5919337a898b1007e6f12a2a 100644 (file)
@@ -553,7 +553,7 @@ ieee80211_offchannel_tx(struct ieee80211_work *wk)
                /*
                 * After this, offchan_tx.frame remains but now is no
                 * longer a valid pointer -- we still need it as the
-                * cookie for canceling this work.
+                * cookie for canceling this work/status matching.
                 */
                ieee80211_tx_skb(wk->sdata, wk->offchan_tx.frame);
 
@@ -1060,14 +1060,13 @@ static void ieee80211_work_work(struct work_struct *work)
                        continue;
                if (wk->chan != local->tmp_channel)
                        continue;
-               if (ieee80211_work_ct_coexists(wk->chan_type,
-                                              local->tmp_channel_type))
+               if (!ieee80211_work_ct_coexists(wk->chan_type,
+                                               local->tmp_channel_type))
                        continue;
                remain_off_channel = true;
        }
 
        if (!remain_off_channel && local->tmp_channel) {
-               bool on_oper_chan = ieee80211_cfg_on_oper_channel(local);
                local->tmp_channel = NULL;
                /* If tmp_channel wasn't operating channel, then
                 * we need to go back on-channel.
@@ -1077,7 +1076,7 @@ static void ieee80211_work_work(struct work_struct *work)
                 * we still need to do a hardware config.  Currently,
                 * we cannot be here while scanning, however.
                 */
-               if (ieee80211_cfg_on_oper_channel(local) && !on_oper_chan)
+               if (!ieee80211_cfg_on_oper_channel(local))
                        ieee80211_hw_config(local, 0);
 
                /* At the least, we need to disable offchannel_ps,
index 8f6a302d2ac3b89d191708f6f7f8eea1409ced8b..aa1c40ab6a7c705a5e62aa414bfbec77a5017cf1 100644 (file)
@@ -109,7 +109,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
                if (status->flag & RX_FLAG_MMIC_ERROR)
                        goto mic_fail;
 
-               if (!(status->flag & RX_FLAG_IV_STRIPPED))
+               if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key)
                        goto update_iv;
 
                return RX_CONTINUE;
index 24c28d238dcb62f7dc148648a35e8a05b7278fbc..0787bed04180fb5f4c14d8f4e1ad4587b2644129 100644 (file)
@@ -233,6 +233,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
        __be16 dport = 0;               /* destination port to forward */
        unsigned int flags;
        struct ip_vs_conn_param param;
+       const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
        union nf_inet_addr snet;        /* source network of the client,
                                           after masking */
 
@@ -268,7 +269,6 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
        {
                int protocol = iph.protocol;
                const union nf_inet_addr *vaddr = &iph.daddr;
-               const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
                __be16 vport = 0;
 
                if (dst_port == svc->port) {
index 3bdd443aaf154d7946abdd802bd080df4e2c8fad..43a16d0bb629b717191e6deb73827741049e1725 100644 (file)
@@ -5,6 +5,7 @@
  * After timer expires a kevent will be sent.
  *
  * Copyright (C) 2004, 2010 Nokia Corporation
+ *
  * Written by Timo Teras <ext-timo.teras@nokia.com>
  *
  * Converted to x_tables and reworked for upstream inclusion
 #include <linux/netfilter/xt_IDLETIMER.h>
 #include <linux/kdev_t.h>
 #include <linux/kobject.h>
+#include <linux/skbuff.h>
 #include <linux/workqueue.h>
 #include <linux/sysfs.h>
+#include <net/net_namespace.h>
 
 struct idletimer_tg_attr {
        struct attribute attr;
@@ -56,6 +59,8 @@ struct idletimer_tg {
        struct idletimer_tg_attr attr;
 
        unsigned int refcnt;
+       bool send_nl_msg;
+       bool active;
 };
 
 static LIST_HEAD(idletimer_tg_list);
@@ -63,6 +68,32 @@ static DEFINE_MUTEX(list_mutex);
 
 static struct kobject *idletimer_tg_kobj;
 
+static void notify_netlink_uevent(const char *iface, struct idletimer_tg *timer)
+{
+       char iface_msg[NLMSG_MAX_SIZE];
+       char state_msg[NLMSG_MAX_SIZE];
+       char *envp[] = { iface_msg, state_msg, NULL };
+       int res;
+
+       res = snprintf(iface_msg, NLMSG_MAX_SIZE, "INTERFACE=%s",
+                      iface);
+       if (NLMSG_MAX_SIZE <= res) {
+               pr_err("message too long (%d)", res);
+               return;
+       }
+       res = snprintf(state_msg, NLMSG_MAX_SIZE, "STATE=%s",
+                      timer->active ? "active" : "inactive");
+       if (NLMSG_MAX_SIZE <= res) {
+               pr_err("message too long (%d)", res);
+               return;
+       }
+       pr_debug("putting nlmsg: <%s> <%s>\n", iface_msg, state_msg);
+       kobject_uevent_env(idletimer_tg_kobj, KOBJ_CHANGE, envp);
+       return;
+
+
+}
+
 static
 struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
 {
@@ -83,6 +114,7 @@ static ssize_t idletimer_tg_show(struct kobject *kobj, struct attribute *attr,
 {
        struct idletimer_tg *timer;
        unsigned long expires = 0;
+       unsigned long now = jiffies;
 
        mutex_lock(&list_mutex);
 
@@ -92,11 +124,15 @@ static ssize_t idletimer_tg_show(struct kobject *kobj, struct attribute *attr,
 
        mutex_unlock(&list_mutex);
 
-       if (time_after(expires, jiffies))
+       if (time_after(expires, now))
                return sprintf(buf, "%u\n",
-                              jiffies_to_msecs(expires - jiffies) / 1000);
+                              jiffies_to_msecs(expires - now) / 1000);
 
-       return sprintf(buf, "0\n");
+       if (timer->send_nl_msg)
+               return sprintf(buf, "0 %d\n",
+                       jiffies_to_msecs(now - expires) / 1000);
+       else
+               return sprintf(buf, "0\n");
 }
 
 static void idletimer_tg_work(struct work_struct *work)
@@ -105,6 +141,9 @@ static void idletimer_tg_work(struct work_struct *work)
                                                  work);
 
        sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
+
+       if (timer->send_nl_msg)
+               notify_netlink_uevent(timer->attr.attr.name, timer);
 }
 
 static void idletimer_tg_expired(unsigned long data)
@@ -113,6 +152,7 @@ static void idletimer_tg_expired(unsigned long data)
 
        pr_debug("timer %s expired\n", timer->attr.attr.name);
 
+       timer->active = false;
        schedule_work(&timer->work);
 }
 
@@ -147,6 +187,8 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
        setup_timer(&info->timer->timer, idletimer_tg_expired,
                    (unsigned long) info->timer);
        info->timer->refcnt = 1;
+       info->timer->send_nl_msg = (info->send_nl_msg == 0) ? false : true;
+       info->timer->active = true;
 
        mod_timer(&info->timer->timer,
                  msecs_to_jiffies(info->timeout * 1000) + jiffies);
@@ -170,14 +212,24 @@ static unsigned int idletimer_tg_target(struct sk_buff *skb,
                                         const struct xt_action_param *par)
 {
        const struct idletimer_tg_info *info = par->targinfo;
+       unsigned long now = jiffies;
 
        pr_debug("resetting timer %s, timeout period %u\n",
                 info->label, info->timeout);
 
        BUG_ON(!info->timer);
 
+       info->timer->active = true;
+
+       if (time_before(info->timer->timer.expires, now)) {
+               schedule_work(&info->timer->work);
+               pr_debug("Starting timer %s (Expired, Jiffies): %lu, %lu\n",
+                        info->label, info->timer->timer.expires, now);
+       }
+
+       /* TODO: Avoid modifying timers on each packet */
        mod_timer(&info->timer->timer,
-                 msecs_to_jiffies(info->timeout * 1000) + jiffies);
+                 msecs_to_jiffies(info->timeout * 1000) + now);
 
        return XT_CONTINUE;
 }
@@ -186,8 +238,9 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
 {
        struct idletimer_tg_info *info = par->targinfo;
        int ret;
+       unsigned long now = jiffies;
 
-       pr_debug("checkentry targinfo%s\n", info->label);
+       pr_debug("checkentry targinfo %s\n", info->label);
 
        if (info->timeout == 0) {
                pr_debug("timeout value is zero\n");
@@ -206,8 +259,17 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
        info->timer = __idletimer_tg_find_by_label(info->label);
        if (info->timer) {
                info->timer->refcnt++;
+               info->timer->active = true;
+
+               if (time_before(info->timer->timer.expires, now)) {
+                       schedule_work(&info->timer->work);
+                       pr_debug("Starting Checkentry timer"
+                               "(Expired, Jiffies): %lu, %lu\n",
+                               info->timer->timer.expires, now);
+               }
+
                mod_timer(&info->timer->timer,
-                         msecs_to_jiffies(info->timeout * 1000) + jiffies);
+                         msecs_to_jiffies(info->timeout * 1000) + now);
 
                pr_debug("increased refcnt of timer %s to %u\n",
                         info->label, info->timer->refcnt);
@@ -221,6 +283,7 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
        }
 
        mutex_unlock(&list_mutex);
+
        return 0;
 }
 
@@ -242,7 +305,7 @@ static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
                kfree(info->timer);
        } else {
                pr_debug("decreased refcnt of timer %s to %u\n",
-                        info->label, info->timer->refcnt);
+               info->label, info->timer->refcnt);
        }
 
        mutex_unlock(&list_mutex);
@@ -250,6 +313,7 @@ static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
 
 static struct xt_target idletimer_tg __read_mostly = {
        .name           = "IDLETIMER",
+       .revision       = 1,
        .family         = NFPROTO_UNSPEC,
        .target         = idletimer_tg_target,
        .targetsize     = sizeof(struct idletimer_tg_info),
@@ -315,3 +379,4 @@ MODULE_DESCRIPTION("Xtables: idle time monitor");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("ipt_IDLETIMER");
 MODULE_ALIAS("ip6t_IDLETIMER");
+MODULE_ALIAS("arpt_IDLETIMER");
index 08086d680c2c2b6c5ec668c704f500ae00b5ff9d..f6d4cfc05f3c2101e94f320d211b6f42546d5292 100644 (file)
 #include <net/tcp.h>
 #include <net/udp.h>
 
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#endif
+
 #include <linux/netfilter/xt_socket.h>
 #include "xt_qtaguid_internal.h"
 #include "xt_qtaguid_print.h"
@@ -110,8 +114,15 @@ module_param_named(debug_mask, qtaguid_debug_mask, uint, S_IRUGO | S_IWUSR);
 /*---------------------------------------------------------------------------*/
 static const char *iface_stat_procdirname = "iface_stat";
 static struct proc_dir_entry *iface_stat_procdir;
+/*
+ * The iface_stat_all* will go away once userspace gets use to the new fields
+ * that have a format line.
+ */
 static const char *iface_stat_all_procfilename = "iface_stat_all";
 static struct proc_dir_entry *iface_stat_all_procfile;
+static const char *iface_stat_fmt_procfilename = "iface_stat_fmt";
+static struct proc_dir_entry *iface_stat_fmt_procfile;
+
 
 /*
  * Ordering of locks:
@@ -124,9 +135,9 @@ static struct proc_dir_entry *iface_stat_all_procfile;
  * Notice how sock_tag_list_lock is held sometimes when uid_tag_data_tree_lock
  * is acquired.
  *
- * Call tree with all lock holders as of 2011-09-25:
+ * Call tree with all lock holders as of 2012-04-27:
  *
- * iface_stat_all_proc_read()
+ * iface_stat_fmt_proc_read()
  *   iface_stat_list_lock
  *     (struct iface_stat)
  *
@@ -777,13 +788,14 @@ done:
        return iface_entry;
 }
 
-static int iface_stat_all_proc_read(char *page, char **num_items_returned,
+static int iface_stat_fmt_proc_read(char *page, char **num_items_returned,
                                    off_t items_to_skip, int char_count,
                                    int *eof, void *data)
 {
        char *outp = page;
        int item_index = 0;
        int len;
+       int fmt = (int)data; /* The data is just 1 (old) or 2 (uses fmt) */
        struct iface_stat *iface_entry;
        struct rtnl_link_stats64 dev_stats, *stats;
        struct rtnl_link_stats64 no_dev_stats = {0};
@@ -793,14 +805,32 @@ static int iface_stat_all_proc_read(char *page, char **num_items_returned,
                return 0;
        }
 
-       CT_DEBUG("qtaguid:proc iface_stat_all "
+       CT_DEBUG("qtaguid:proc iface_stat_fmt "
+                "pid=%u tgid=%u uid=%u "
                 "page=%p *num_items_returned=%p off=%ld "
-                "char_count=%d *eof=%d\n", page, *num_items_returned,
+                "char_count=%d *eof=%d\n",
+                current->pid, current->tgid, current_fsuid(),
+                page, *num_items_returned,
                 items_to_skip, char_count, *eof);
 
        if (*eof)
                return 0;
 
+       if (fmt == 2 && item_index++ >= items_to_skip) {
+               len = snprintf(outp, char_count,
+                              "ifname "
+                              "total_skb_rx_bytes total_skb_rx_packets "
+                              "total_skb_tx_bytes total_skb_tx_packets\n"
+                       );
+               if (len >= char_count) {
+                       *outp = '\0';
+                       return outp - page;
+               }
+               outp += len;
+               char_count -= len;
+               (*num_items_returned)++;
+       }
+
        /*
         * This lock will prevent iface_stat_update() from changing active,
         * and in turn prevent an interface from unregistering itself.
@@ -816,18 +846,37 @@ static int iface_stat_all_proc_read(char *page, char **num_items_returned,
                } else {
                        stats = &no_dev_stats;
                }
-               len = snprintf(outp, char_count,
-                              "%s %d "
-                              "%llu %llu %llu %llu "
-                              "%llu %llu %llu %llu\n",
-                              iface_entry->ifname,
-                              iface_entry->active,
-                              iface_entry->totals[IFS_RX].bytes,
-                              iface_entry->totals[IFS_RX].packets,
-                              iface_entry->totals[IFS_TX].bytes,
-                              iface_entry->totals[IFS_TX].packets,
-                              stats->rx_bytes, stats->rx_packets,
-                              stats->tx_bytes, stats->tx_packets);
+               /*
+                * If the meaning of the data changes, then update the fmtX
+                * string.
+                */
+               if (fmt == 1) {
+                       len = snprintf(
+                               outp, char_count,
+                               "%s %d "
+                               "%llu %llu %llu %llu "
+                               "%llu %llu %llu %llu\n",
+                               iface_entry->ifname,
+                               iface_entry->active,
+                               iface_entry->totals_via_dev[IFS_RX].bytes,
+                               iface_entry->totals_via_dev[IFS_RX].packets,
+                               iface_entry->totals_via_dev[IFS_TX].bytes,
+                               iface_entry->totals_via_dev[IFS_TX].packets,
+                               stats->rx_bytes, stats->rx_packets,
+                               stats->tx_bytes, stats->tx_packets
+                               );
+               } else {
+                       len = snprintf(
+                               outp, char_count,
+                               "%s "
+                               "%llu %llu %llu %llu\n",
+                               iface_entry->ifname,
+                               iface_entry->totals_via_skb[IFS_RX].bytes,
+                               iface_entry->totals_via_skb[IFS_RX].packets,
+                               iface_entry->totals_via_skb[IFS_TX].bytes,
+                               iface_entry->totals_via_skb[IFS_TX].packets
+                               );
+               }
                if (len >= char_count) {
                        spin_unlock_bh(&iface_stat_list_lock);
                        *outp = '\0';
@@ -861,13 +910,17 @@ static void iface_create_proc_worker(struct work_struct *work)
        new_iface->proc_ptr = proc_entry;
 
        create_proc_read_entry("tx_bytes", proc_iface_perms, proc_entry,
-                       read_proc_u64, &new_iface->totals[IFS_TX].bytes);
+                              read_proc_u64,
+                              &new_iface->totals_via_dev[IFS_TX].bytes);
        create_proc_read_entry("rx_bytes", proc_iface_perms, proc_entry,
-                       read_proc_u64, &new_iface->totals[IFS_RX].bytes);
+                              read_proc_u64,
+                              &new_iface->totals_via_dev[IFS_RX].bytes);
        create_proc_read_entry("tx_packets", proc_iface_perms, proc_entry,
-                       read_proc_u64, &new_iface->totals[IFS_TX].packets);
+                              read_proc_u64,
+                              &new_iface->totals_via_dev[IFS_TX].packets);
        create_proc_read_entry("rx_packets", proc_iface_perms, proc_entry,
-                       read_proc_u64, &new_iface->totals[IFS_RX].packets);
+                              read_proc_u64,
+                              &new_iface->totals_via_dev[IFS_RX].packets);
        create_proc_read_entry("active", proc_iface_perms, proc_entry,
                        read_proc_bool, &new_iface->active);
 
@@ -971,11 +1024,13 @@ static void iface_check_stats_reset_and_adjust(struct net_device *net_dev,
                             "iface reset its stats unexpectedly\n", __func__,
                             net_dev->name);
 
-               iface->totals[IFS_TX].bytes += iface->last_known[IFS_TX].bytes;
-               iface->totals[IFS_TX].packets +=
+               iface->totals_via_dev[IFS_TX].bytes +=
+                       iface->last_known[IFS_TX].bytes;
+               iface->totals_via_dev[IFS_TX].packets +=
                        iface->last_known[IFS_TX].packets;
-               iface->totals[IFS_RX].bytes += iface->last_known[IFS_RX].bytes;
-               iface->totals[IFS_RX].packets +=
+               iface->totals_via_dev[IFS_RX].bytes +=
+                       iface->last_known[IFS_RX].bytes;
+               iface->totals_via_dev[IFS_RX].packets +=
                        iface->last_known[IFS_RX].packets;
                iface->last_known_valid = false;
                IF_DEBUG("qtaguid: %s(%s): iface=%p "
@@ -1143,6 +1198,27 @@ static struct sock_tag *get_sock_stat(const struct sock *sk)
        return sock_tag_entry;
 }
 
+static int ipx_proto(const struct sk_buff *skb,
+                    struct xt_action_param *par)
+{
+       int thoff, tproto;
+
+       switch (par->family) {
+       case NFPROTO_IPV6:
+               tproto = ipv6_find_hdr(skb, &thoff, -1, NULL);
+               if (tproto < 0)
+                       MT_DEBUG("%s(): transport header not found in ipv6"
+                                " skb=%p\n", __func__, skb);
+               break;
+       case NFPROTO_IPV4:
+               tproto = ip_hdr(skb)->protocol;
+               break;
+       default:
+               tproto = IPPROTO_RAW;
+       }
+       return tproto;
+}
+
 static void
 data_counters_update(struct data_counters *dc, int set,
                     enum ifs_tx_rx direction, int proto, int bytes)
@@ -1203,10 +1279,10 @@ static void iface_stat_update(struct net_device *net_dev, bool stash_only)
                spin_unlock_bh(&iface_stat_list_lock);
                return;
        }
-       entry->totals[IFS_TX].bytes += stats->tx_bytes;
-       entry->totals[IFS_TX].packets += stats->tx_packets;
-       entry->totals[IFS_RX].bytes += stats->rx_bytes;
-       entry->totals[IFS_RX].packets += stats->rx_packets;
+       entry->totals_via_dev[IFS_TX].bytes += stats->tx_bytes;
+       entry->totals_via_dev[IFS_TX].packets += stats->tx_packets;
+       entry->totals_via_dev[IFS_RX].bytes += stats->rx_bytes;
+       entry->totals_via_dev[IFS_RX].packets += stats->rx_packets;
        /* We don't need the last_known[] anymore */
        entry->last_known_valid = false;
        _iface_stat_set_active(entry, net_dev, false);
@@ -1216,6 +1292,67 @@ static void iface_stat_update(struct net_device *net_dev, bool stash_only)
        spin_unlock_bh(&iface_stat_list_lock);
 }
 
+/*
+ * Update stats for the specified interface from the skb.
+ * Do nothing if the entry
+ * does not exist (when a device was never configured with an IP address).
+ * Called on each sk.
+ */
+static void iface_stat_update_from_skb(const struct sk_buff *skb,
+                                      struct xt_action_param *par)
+{
+       struct iface_stat *entry;
+       const struct net_device *el_dev;
+       enum ifs_tx_rx direction = par->in ? IFS_RX : IFS_TX;
+       int bytes = skb->len;
+
+       if (!skb->dev) {
+               MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
+               el_dev = par->in ? : par->out;
+       } else {
+               const struct net_device *other_dev;
+               el_dev = skb->dev;
+               other_dev = par->in ? : par->out;
+               if (el_dev != other_dev) {
+                       MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
+                                "par->(in/out)=%p %s\n",
+                                par->hooknum, el_dev, el_dev->name, other_dev,
+                                other_dev->name);
+               }
+       }
+
+       if (unlikely(!el_dev)) {
+               pr_err("qtaguid[%d]: %s(): no par->in/out?!!\n",
+                      par->hooknum, __func__);
+               BUG();
+       } else if (unlikely(!el_dev->name)) {
+               pr_err("qtaguid[%d]: %s(): no dev->name?!!\n",
+                      par->hooknum, __func__);
+               BUG();
+       } else {
+               int proto = ipx_proto(skb, par);
+               MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
+                        par->hooknum, el_dev->name, el_dev->type,
+                        par->family, proto);
+       }
+
+       spin_lock_bh(&iface_stat_list_lock);
+       entry = get_iface_entry(el_dev->name);
+       if (entry == NULL) {
+               IF_DEBUG("qtaguid: iface_stat: %s(%s): not tracked\n",
+                        __func__, el_dev->name);
+               spin_unlock_bh(&iface_stat_list_lock);
+               return;
+       }
+
+       IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+                el_dev->name, entry);
+
+       entry->totals_via_skb[direction].bytes += bytes;
+       entry->totals_via_skb[direction].packets++;
+       spin_unlock_bh(&iface_stat_list_lock);
+}
+
 static void tag_stat_update(struct tag_stat *tag_entry,
                        enum ifs_tx_rx direction, int proto, int bytes)
 {
@@ -1265,7 +1402,7 @@ static void if_tag_stat_update(const char *ifname, uid_t uid,
        struct data_counters *uid_tag_counters;
        struct sock_tag *sock_tag_entry;
        struct iface_stat *iface_entry;
-       struct tag_stat *new_tag_stat;
+       struct tag_stat *new_tag_stat = NULL;
        MT_DEBUG("qtaguid: if_tag_stat_update(ifname=%s "
                "uid=%u sk=%p dir=%d proto=%d bytes=%d)\n",
                 ifname, uid, sk, direction, proto, bytes);
@@ -1330,8 +1467,19 @@ static void if_tag_stat_update(const char *ifname, uid_t uid,
        }
 
        if (acct_tag) {
+               /* Create the child {acct_tag, uid_tag} and hook up parent. */
                new_tag_stat = create_if_tag_stat(iface_entry, tag);
                new_tag_stat->parent_counters = uid_tag_counters;
+       } else {
+               /*
+                * For new_tag_stat to be still NULL here would require:
+                *  {0, uid_tag} exists
+                *  and {acct_tag, uid_tag} doesn't exist
+                *  AND acct_tag == 0.
+                * Impossible. This reassures us that new_tag_stat
+                * below will always be assigned.
+                */
+               BUG_ON(!new_tag_stat);
        }
        tag_stat_update(new_tag_stat, direction, proto, bytes);
        spin_unlock_bh(&iface_entry->tag_stat_list_lock);
@@ -1452,18 +1600,31 @@ static int __init iface_stat_init(struct proc_dir_entry *parent_procdir)
                                                    parent_procdir);
        if (!iface_stat_all_procfile) {
                pr_err("qtaguid: iface_stat: init "
-                      " failed to create stat_all proc entry\n");
+                      " failed to create stat_old proc entry\n");
                err = -1;
                goto err_zap_entry;
        }
-       iface_stat_all_procfile->read_proc = iface_stat_all_proc_read;
+       iface_stat_all_procfile->read_proc = iface_stat_fmt_proc_read;
+       iface_stat_all_procfile->data = (void *)1; /* fmt1 */
+
+       iface_stat_fmt_procfile = create_proc_entry(iface_stat_fmt_procfilename,
+                                                   proc_iface_perms,
+                                                   parent_procdir);
+       if (!iface_stat_fmt_procfile) {
+               pr_err("qtaguid: iface_stat: init "
+                      " failed to create stat_all proc entry\n");
+               err = -1;
+               goto err_zap_all_stats_entry;
+       }
+       iface_stat_fmt_procfile->read_proc = iface_stat_fmt_proc_read;
+       iface_stat_fmt_procfile->data = (void *)2; /* fmt2 */
 
 
        err = register_netdevice_notifier(&iface_netdev_notifier_blk);
        if (err) {
                pr_err("qtaguid: iface_stat: init "
                       "failed to register dev event handler\n");
-               goto err_zap_all_stats_entry;
+               goto err_zap_all_stats_entries;
        }
        err = register_inetaddr_notifier(&iface_inetaddr_notifier_blk);
        if (err) {
@@ -1484,6 +1645,8 @@ err_unreg_ip4_addr:
        unregister_inetaddr_notifier(&iface_inetaddr_notifier_blk);
 err_unreg_nd:
        unregister_netdevice_notifier(&iface_netdev_notifier_blk);
+err_zap_all_stats_entries:
+       remove_proc_entry(iface_stat_fmt_procfilename, parent_procdir);
 err_zap_all_stats_entry:
        remove_proc_entry(iface_stat_all_procfilename, parent_procdir);
 err_zap_entry:
@@ -1561,15 +1724,15 @@ static void account_for_uid(const struct sk_buff *skb,
        } else if (unlikely(!el_dev->name)) {
                pr_info("qtaguid[%d]: no dev->name?!!\n", par->hooknum);
        } else {
-               MT_DEBUG("qtaguid[%d]: dev name=%s type=%d\n",
-                        par->hooknum,
-                        el_dev->name,
-                        el_dev->type);
+               int proto = ipx_proto(skb, par);
+               MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
+                        par->hooknum, el_dev->name, el_dev->type,
+                        par->family, proto);
 
                if_tag_stat_update(el_dev->name, uid,
                                skb->sk ? skb->sk : alternate_sk,
                                par->in ? IFS_RX : IFS_TX,
-                               ip_hdr(skb)->protocol, skb->len);
+                               proto, skb->len);
        }
 }
 
@@ -1594,8 +1757,22 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
                goto ret_res;
        }
 
-       sk = skb->sk;
+       switch (par->hooknum) {
+       case NF_INET_PRE_ROUTING:
+       case NF_INET_POST_ROUTING:
+               atomic64_inc(&qtu_events.match_calls_prepost);
+               iface_stat_update_from_skb(skb, par);
+               /*
+                * We are done in pre/post. The skb will get processed
+                * further alter.
+                */
+               res = (info->match ^ info->invert);
+               goto ret_res;
+               break;
+       /* default: Fall through and do UID releated work */
+       }
 
+       sk = skb->sk;
        if (sk == NULL) {
                /*
                 * A missing sk->sk_socket happens when packets are in-flight
@@ -1614,8 +1791,8 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
        } else {
                atomic64_inc(&qtu_events.match_found_sk);
        }
-       MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d proto=%d\n",
-               par->hooknum, sk, got_sock, ip_hdr(skb)->protocol);
+       MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d fam=%d proto=%d\n",
+                par->hooknum, sk, got_sock, par->family, ipx_proto(skb, par));
        if (sk != NULL) {
                MT_DEBUG("qtaguid[%d]: sk=%p->sk_socket=%p->file=%p\n",
                        par->hooknum, sk, sk->sk_socket,
@@ -1770,8 +1947,10 @@ static int qtaguid_ctrl_proc_read(char *page, char **num_items_returned,
        if (*eof)
                return 0;
 
-       CT_DEBUG("qtaguid: proc ctrl page=%p off=%ld char_count=%d *eof=%d\n",
-               page, items_to_skip, char_count, *eof);
+       CT_DEBUG("qtaguid: proc ctrl pid=%u tgid=%u uid=%u "
+                "page=%p off=%ld char_count=%d *eof=%d\n",
+                current->pid, current->tgid, current_fsuid(),
+                page, items_to_skip, char_count, *eof);
 
        spin_lock_bh(&sock_tag_list_lock);
        for (node = rb_first(&sock_tag_tree);
@@ -1815,6 +1994,7 @@ static int qtaguid_ctrl_proc_read(char *page, char **num_items_returned,
                               "delete_cmds=%llu "
                               "iface_events=%llu "
                               "match_calls=%llu "
+                              "match_calls_prepost=%llu "
                               "match_found_sk=%llu "
                               "match_found_sk_in_ct=%llu "
                               "match_found_no_sk_in_ct=%llu "
@@ -1826,6 +2006,7 @@ static int qtaguid_ctrl_proc_read(char *page, char **num_items_returned,
                               atomic64_read(&qtu_events.delete_cmds),
                               atomic64_read(&qtu_events.iface_events),
                               atomic64_read(&qtu_events.match_calls),
+                              atomic64_read(&qtu_events.match_calls_prepost),
                               atomic64_read(&qtu_events.match_found_sk),
                               atomic64_read(&qtu_events.match_found_sk_in_ct),
                               atomic64_read(
@@ -2099,7 +2280,9 @@ static int ctrl_cmd_tag(const char *input)
        el_socket = sockfd_lookup(sock_fd, &res);  /* This locks the file */
        if (!el_socket) {
                pr_info("qtaguid: ctrl_tag(%s): failed to lookup"
-                       " sock_fd=%d err=%d\n", input, sock_fd, res);
+                       " sock_fd=%d err=%d pid=%u tgid=%u uid=%u\n",
+                       input, sock_fd, res, current->pid, current->tgid,
+                       current_fsuid());
                goto err;
        }
        CT_DEBUG("qtaguid: ctrl_tag(%s): socket->...->f_count=%ld ->sk=%p\n",
@@ -2244,7 +2427,9 @@ static int ctrl_cmd_untag(const char *input)
        el_socket = sockfd_lookup(sock_fd, &res);  /* This locks the file */
        if (!el_socket) {
                pr_info("qtaguid: ctrl_untag(%s): failed to lookup"
-                       " sock_fd=%d err=%d\n", input, sock_fd, res);
+                       " sock_fd=%d err=%d pid=%u tgid=%u uid=%u\n",
+                       input, sock_fd, res, current->pid, current->tgid,
+                       current_fsuid());
                goto err;
        }
        CT_DEBUG("qtaguid: ctrl_untag(%s): socket->...->f_count=%ld ->sk=%p\n",
@@ -2320,6 +2505,9 @@ static int qtaguid_ctrl_parse(const char *input, int count)
        char cmd;
        int res;
 
+       CT_DEBUG("qtaguid: ctrl(%s): pid=%u tgid=%u uid=%u\n",
+                input, current->pid, current->tgid, current_fsuid());
+
        cmd = input[0];
        /* Collect params for commands */
        switch (cmd) {
@@ -2496,9 +2684,12 @@ static int qtaguid_stats_proc_read(char *page, char **num_items_returned,
                return len;
        }
 
-       CT_DEBUG("qtaguid:proc stats page=%p *num_items_returned=%p off=%ld "
-               "char_count=%d *eof=%d\n", page, *num_items_returned,
-               items_to_skip, char_count, *eof);
+       CT_DEBUG("qtaguid:proc stats pid=%u tgid=%u uid=%u "
+                "page=%p *num_items_returned=%p off=%ld "
+                "char_count=%d *eof=%d\n",
+                current->pid, current->tgid, current_fsuid(),
+                page, *num_items_returned,
+                items_to_skip, char_count, *eof);
 
        if (*eof)
                return 0;
index 02479d6d317d5167fa0ed33127ca6d4ff37aa696..d79f8383abf4b154896bbfb8f8c168fe872b9310 100644 (file)
@@ -202,7 +202,8 @@ struct iface_stat {
        /* net_dev is only valid for active iface_stat */
        struct net_device *net_dev;
 
-       struct byte_packet_counters totals[IFS_MAX_DIRECTIONS];
+       struct byte_packet_counters totals_via_dev[IFS_MAX_DIRECTIONS];
+       struct byte_packet_counters totals_via_skb[IFS_MAX_DIRECTIONS];
        /*
         * We keep the last_known, because some devices reset their counters
         * just before NETDEV_UP, while some will reset just before
@@ -254,6 +255,8 @@ struct qtaguid_event_counts {
        atomic64_t iface_events;  /* Number of NETDEV_* events handled */
 
        atomic64_t match_calls;   /* Number of times iptables called mt */
+       /* Number of times iptables called mt from pre or post routing hooks */
+       atomic64_t match_calls_prepost;
        /*
         * match_found_sk_*: numbers related to the netfilter matching
         * function finding a sock for the sk_buff.
index 39176785c91f811fdbd1f6b1559e6fc0d1f4d504..8cbd8e42bcc43e6e58c18aaccbd6cc1d4ea0f4d5 100644 (file)
@@ -183,7 +183,11 @@ char *pp_iface_stat(struct iface_stat *is)
                res = kasprintf(GFP_ATOMIC, "iface_stat@%p{"
                                "list=list_head{...}, "
                                "ifname=%s, "
-                               "total={rx={bytes=%llu, "
+                               "total_dev={rx={bytes=%llu, "
+                               "packets=%llu}, "
+                               "tx={bytes=%llu, "
+                               "packets=%llu}}, "
+                               "total_skb={rx={bytes=%llu, "
                                "packets=%llu}, "
                                "tx={bytes=%llu, "
                                "packets=%llu}}, "
@@ -198,10 +202,14 @@ char *pp_iface_stat(struct iface_stat *is)
                                "tag_stat_tree=rb_root{...}}",
                                is,
                                is->ifname,
-                               is->totals[IFS_RX].bytes,
-                               is->totals[IFS_RX].packets,
-                               is->totals[IFS_TX].bytes,
-                               is->totals[IFS_TX].packets,
+                               is->totals_via_dev[IFS_RX].bytes,
+                               is->totals_via_dev[IFS_RX].packets,
+                               is->totals_via_dev[IFS_TX].bytes,
+                               is->totals_via_dev[IFS_TX].packets,
+                               is->totals_via_skb[IFS_RX].bytes,
+                               is->totals_via_skb[IFS_RX].packets,
+                               is->totals_via_skb[IFS_TX].bytes,
+                               is->totals_via_skb[IFS_TX].packets,
                                is->last_known_valid,
                                is->last_known[IFS_RX].bytes,
                                is->last_known[IFS_RX].packets,
index 6ef64adf7362a421240c28e5196407ce5729f1dd..24bc620b539aa74f4af32c631c1a57066af8b5f6 100644 (file)
@@ -830,12 +830,19 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
        return 0;
 }
 
-int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
+static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
 {
        int len = skb->len;
 
        skb_queue_tail(&sk->sk_receive_queue, skb);
        sk->sk_data_ready(sk, len);
+       return len;
+}
+
+int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
+{
+       int len = __netlink_sendskb(sk, skb);
+
        sock_put(sk);
        return len;
 }
@@ -960,8 +967,7 @@ static inline int netlink_broadcast_deliver(struct sock *sk,
        if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
            !test_bit(0, &nlk->state)) {
                skb_set_owner_r(skb, sk);
-               skb_queue_tail(&sk->sk_receive_queue, skb);
-               sk->sk_data_ready(sk, skb->len);
+               __netlink_sendskb(sk, skb);
                return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
        }
        return -1;
@@ -1682,10 +1688,8 @@ static int netlink_dump(struct sock *sk)
 
                if (sk_filter(sk, skb))
                        kfree_skb(skb);
-               else {
-                       skb_queue_tail(&sk->sk_receive_queue, skb);
-                       sk->sk_data_ready(sk, skb->len);
-               }
+               else
+                       __netlink_sendskb(sk, skb);
                return 0;
        }
 
@@ -1697,10 +1701,8 @@ static int netlink_dump(struct sock *sk)
 
        if (sk_filter(sk, skb))
                kfree_skb(skb);
-       else {
-               skb_queue_tail(&sk->sk_receive_queue, skb);
-               sk->sk_data_ready(sk, skb->len);
-       }
+       else
+               __netlink_sendskb(sk, skb);
 
        if (cb->done)
                cb->done(cb);
index c0c3cda19712d3a5a76307f5c85aefc98dc94957..fafb96830e78b9d6fd9469a1a100a562bbb22961 100644 (file)
@@ -654,7 +654,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
        return 0;
 
 drop_n_acct:
-       po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
+       spin_lock(&sk->sk_receive_queue.lock);
+       po->stats.tp_drops++;
+       atomic_inc(&sk->sk_drops);
+       spin_unlock(&sk->sk_receive_queue.lock);
 
 drop_n_restore:
        if (skb_head != skb->data && skb_shared(skb)) {
index f17fd841f9487d944adcab970e3ff238ea2c9287..d29a7fb3f61db0302f8789f14b9d645fd8aaa10e 100644 (file)
@@ -1045,6 +1045,9 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
        int flags = msg->msg_flags;
        int err, done;
 
+       if (len > USHRT_MAX)
+               return -EMSGSIZE;
+
        if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
                                MSG_CMSG_COMPAT)) ||
                        !(msg->msg_flags & MSG_EOR))
index bb6ad81b671d055b89fbe74287ed328a834aac03..424ff622ab5f8e77dc69c7b54b98bdaafa350329 100644 (file)
@@ -68,7 +68,6 @@ static int rds_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
        struct rds_sock *rs;
-       unsigned long flags;
 
        if (!sk)
                goto out;
@@ -94,10 +93,10 @@ static int rds_release(struct socket *sock)
        rds_rdma_drop_keys(rs);
        rds_notify_queue_get(rs, NULL);
 
-       spin_lock_irqsave(&rds_sock_lock, flags);
+       spin_lock_bh(&rds_sock_lock);
        list_del_init(&rs->rs_item);
        rds_sock_count--;
-       spin_unlock_irqrestore(&rds_sock_lock, flags);
+       spin_unlock_bh(&rds_sock_lock);
 
        rds_trans_put(rs->rs_transport);
 
@@ -409,7 +408,6 @@ static const struct proto_ops rds_proto_ops = {
 
 static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
 {
-       unsigned long flags;
        struct rds_sock *rs;
 
        sock_init_data(sock, sk);
@@ -426,10 +424,10 @@ static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
        spin_lock_init(&rs->rs_rdma_lock);
        rs->rs_rdma_keys = RB_ROOT;
 
-       spin_lock_irqsave(&rds_sock_lock, flags);
+       spin_lock_bh(&rds_sock_lock);
        list_add_tail(&rs->rs_item, &rds_sock_list);
        rds_sock_count++;
-       spin_unlock_irqrestore(&rds_sock_lock, flags);
+       spin_unlock_bh(&rds_sock_lock);
 
        return 0;
 }
@@ -471,12 +469,11 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
 {
        struct rds_sock *rs;
        struct rds_incoming *inc;
-       unsigned long flags;
        unsigned int total = 0;
 
        len /= sizeof(struct rds_info_message);
 
-       spin_lock_irqsave(&rds_sock_lock, flags);
+       spin_lock_bh(&rds_sock_lock);
 
        list_for_each_entry(rs, &rds_sock_list, rs_item) {
                read_lock(&rs->rs_recv_lock);
@@ -492,7 +489,7 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
                read_unlock(&rs->rs_recv_lock);
        }
 
-       spin_unlock_irqrestore(&rds_sock_lock, flags);
+       spin_unlock_bh(&rds_sock_lock);
 
        lens->nr = total;
        lens->each = sizeof(struct rds_info_message);
@@ -504,11 +501,10 @@ static void rds_sock_info(struct socket *sock, unsigned int len,
 {
        struct rds_info_socket sinfo;
        struct rds_sock *rs;
-       unsigned long flags;
 
        len /= sizeof(struct rds_info_socket);
 
-       spin_lock_irqsave(&rds_sock_lock, flags);
+       spin_lock_bh(&rds_sock_lock);
 
        if (len < rds_sock_count)
                goto out;
@@ -529,7 +525,7 @@ out:
        lens->nr = rds_sock_count;
        lens->each = sizeof(struct rds_info_socket);
 
-       spin_unlock_irqrestore(&rds_sock_lock, flags);
+       spin_unlock_bh(&rds_sock_lock);
 }
 
 static void rds_exit(void)
index d58ae5f9339ea6716f5c17973ed9a45d0e66f6a6..c803341f284439a2d2627bd4d0d8be03008203e2 100644 (file)
@@ -932,7 +932,6 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
        /* Mirror Linux UDP mirror of BSD error message compatibility */
        /* XXX: Perhaps MSG_MORE someday */
        if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
-               printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
                ret = -EOPNOTSUPP;
                goto out;
        }
index 178ff4f73c85102518025fa3d4e20f647a389743..2679507ad33374dc01805dbfbb5421fee3916abe 100644 (file)
@@ -96,11 +96,11 @@ static int rose_set_mac_address(struct net_device *dev, void *addr)
        struct sockaddr *sa = addr;
        int err;
 
-       if (!memcpy(dev->dev_addr, sa->sa_data, dev->addr_len))
+       if (!memcmp(dev->dev_addr, sa->sa_data, dev->addr_len))
                return 0;
 
        if (dev->flags & IFF_UP) {
-               err = rose_add_loopback_node((rose_address *)dev->dev_addr);
+               err = rose_add_loopback_node((rose_address *)sa->sa_data);
                if (err)
                        return err;
 
index 06afbaeb4c88aceb565af85dbab35baf3d4ae6d6..178ee83175a407984ca71de4eadd9833fb9eab73 100644 (file)
@@ -225,8 +225,7 @@ struct choke_skb_cb {
 
 static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
 {
-       BUILD_BUG_ON(sizeof(skb->cb) <
-               sizeof(struct qdisc_skb_cb) + sizeof(struct choke_skb_cb));
+       qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
        return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
 }
 
index b9493a09a870343fe90444bea4b1fac547d42e46..e1afe0c205fa4c0321659c29cbe5b161672956f2 100644 (file)
@@ -385,7 +385,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
        struct gred_sched_data *q;
 
        if (table->tab[dp] == NULL) {
-               table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
+               table->tab[dp] = kzalloc(sizeof(*q), GFP_ATOMIC);
                if (table->tab[dp] == NULL)
                        return -ENOMEM;
        }
@@ -544,11 +544,8 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
                opt.packets     = q->packetsin;
                opt.bytesin     = q->bytesin;
 
-               if (gred_wred_mode(table)) {
-                       q->parms.qidlestart =
-                               table->tab[table->def]->parms.qidlestart;
-                       q->parms.qavg = table->tab[table->def]->parms.qavg;
-               }
+               if (gred_wred_mode(table))
+                       gred_load_wred_set(table, q);
 
                opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);
 
index ea17cbed29eff7215c062efe04c32de6bf513596..59b26b8ff4b0bd8f89d87ce99f9a114b96240408 100644 (file)
@@ -106,7 +106,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
        if (!netif_is_multiqueue(dev))
                return -EOPNOTSUPP;
 
-       if (nla_len(opt) < sizeof(*qopt))
+       if (!opt || nla_len(opt) < sizeof(*qopt))
                return -EINVAL;
 
        qopt = nla_data(opt);
index 69c35f6cd13f329cab2d7442c626a31919fc122e..945f3dd6c6f6bdf7a7fd2543ec88d874dcbd75f3 100644 (file)
@@ -117,8 +117,7 @@ struct netem_skb_cb {
 
 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
 {
-       BUILD_BUG_ON(sizeof(skb->cb) <
-               sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
+       qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
        return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
 }
 
@@ -351,10 +350,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
                if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
                    (skb->ip_summed == CHECKSUM_PARTIAL &&
-                    skb_checksum_help(skb))) {
-                       sch->qstats.drops++;
-                       return NET_XMIT_DROP;
-               }
+                    skb_checksum_help(skb)))
+                       return qdisc_drop(skb, sch);
 
                skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
        }
@@ -382,8 +379,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                q->counter = 0;
 
                __skb_queue_head(&q->qdisc->q, skb);
-               q->qdisc->qstats.backlog += qdisc_pkt_len(skb);
-               q->qdisc->qstats.requeues++;
+               sch->qstats.backlog += qdisc_pkt_len(skb);
+               sch->qstats.requeues++;
                ret = NET_XMIT_SUCCESS;
        }
 
index 0a833d0c1f6189eb16843a34d23d6fd157bfa03f..47ee29fad350df1ccb850be1858ccde53963c6cb 100644 (file)
@@ -93,8 +93,7 @@ struct sfb_skb_cb {
 
 static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
 {
-       BUILD_BUG_ON(sizeof(skb->cb) <
-               sizeof(struct qdisc_skb_cb) + sizeof(struct sfb_skb_cb));
+       qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
        return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
 }
 
index 45cd30098e34800ddb0ab5caf880f81340531636..4f4c52c0eeb3b28459d1abad498531a777c598bc 100644 (file)
@@ -225,11 +225,11 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
 
 
 static int
-__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
+__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
+              struct net_device *dev, struct netdev_queue *txq,
+              struct neighbour *mn)
 {
-       struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
-       struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
-       struct neighbour *mn = skb_dst(skb)->neighbour;
+       struct teql_sched_data *q = qdisc_priv(txq->qdisc);
        struct neighbour *n = q->ncache;
 
        if (mn->tbl == NULL)
@@ -262,17 +262,26 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
 }
 
 static inline int teql_resolve(struct sk_buff *skb,
-                              struct sk_buff *skb_res, struct net_device *dev)
+                              struct sk_buff *skb_res,
+                              struct net_device *dev,
+                              struct netdev_queue *txq)
 {
-       struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+       struct dst_entry *dst = skb_dst(skb);
+       struct neighbour *mn;
+       int res;
+
        if (txq->qdisc == &noop_qdisc)
                return -ENODEV;
 
-       if (dev->header_ops == NULL ||
-           skb_dst(skb) == NULL ||
-           skb_dst(skb)->neighbour == NULL)
+       if (!dev->header_ops || !dst)
                return 0;
-       return __teql_resolve(skb, skb_res, dev);
+
+       rcu_read_lock();
+       mn = dst_get_neighbour(dst);
+       res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
+       rcu_read_unlock();
+
+       return res;
 }
 
 static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -307,7 +316,7 @@ restart:
                        continue;
                }
 
-               switch (teql_resolve(skb, skb_res, slave)) {
+               switch (teql_resolve(skb, skb_res, slave, slave_txq)) {
                case 0:
                        if (__netif_tx_trylock(slave_txq)) {
                                unsigned int length = qdisc_pkt_len(skb);
index 4a62888f2e43f43a7037f556257520a462d5ece2..17a6e658a4ca8a778a70da16916db91bbc6c381e 100644 (file)
@@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
        asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
        asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
        asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
-               (unsigned long)sp->autoclose * HZ;
+               min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
 
        /* Initializes the timers */
        for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
index 08b3cead6503c62f91dc8e97d9b817de7a79ffb9..8fc4dcd294abdafbb669a18c190679ceccdd3015 100644 (file)
@@ -377,9 +377,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
         */
        skb_set_owner_w(nskb, sk);
 
-       /* The 'obsolete' field of dst is set to 2 when a dst is freed. */
-       if (!dst || (dst->obsolete > 1)) {
-               dst_release(dst);
+       if (!sctp_transport_dst_check(tp)) {
                sctp_transport_route(tp, NULL, sctp_sk(sk));
                if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) {
                        sctp_assoc_sync_pmtu(asoc);
@@ -697,13 +695,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
        /* Keep track of how many bytes are in flight to the receiver. */
        asoc->outqueue.outstanding_bytes += datasize;
 
-       /* Update our view of the receiver's rwnd. Include sk_buff overhead
-        * while updating peer.rwnd so that it reduces the chances of a
-        * receiver running out of receive buffer space even when receive
-        * window is still open. This can happen when a sender is sending
-        * sending small messages.
-        */
-       datasize += sizeof(struct sk_buff);
+       /* Update our view of the receiver's rwnd. */
        if (datasize < rwnd)
                rwnd -= datasize;
        else
index d03682109b7a0417ea6dc1007a6277a0468f0b1b..1f2938fbf9b73c14c9e990a445af4470500444be 100644 (file)
@@ -411,8 +411,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
                                        chunk->transport->flight_size -=
                                                        sctp_data_size(chunk);
                                q->outstanding_bytes -= sctp_data_size(chunk);
-                               q->asoc->peer.rwnd += (sctp_data_size(chunk) +
-                                                       sizeof(struct sk_buff));
+                               q->asoc->peer.rwnd += sctp_data_size(chunk);
                        }
                        continue;
                }
@@ -432,8 +431,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
                         * (Section 7.2.4)), add the data size of those
                         * chunks to the rwnd.
                         */
-                       q->asoc->peer.rwnd += (sctp_data_size(chunk) +
-                                               sizeof(struct sk_buff));
+                       q->asoc->peer.rwnd += sctp_data_size(chunk);
                        q->outstanding_bytes -= sctp_data_size(chunk);
                        if (chunk->transport)
                                transport->flight_size -= sctp_data_size(chunk);
index 207175b2f40a9cdaaaba8fcc6e8ac8e02cc9769f..946afd6045c370ff67e50fc2592912e6dff476e6 100644 (file)
@@ -1144,6 +1144,9 @@ SCTP_STATIC __init int sctp_init(void)
        sctp_max_instreams              = SCTP_DEFAULT_INSTREAMS;
        sctp_max_outstreams             = SCTP_DEFAULT_OUTSTREAMS;
 
+       /* Initialize maximum autoclose timeout. */
+       sctp_max_autoclose              = INT_MAX / HZ;
+
        /* Initialize handle used for association ids. */
        idr_init(&sctp_assocs_id);
 
index d3ccf7973c597402ba6e0783ef583f40d87a39ca..4434853a9fe763994b32fc452511355e673cd7f8 100644 (file)
@@ -2129,8 +2129,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
                return -EINVAL;
        if (copy_from_user(&sp->autoclose, optval, optlen))
                return -EFAULT;
-       /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
-       sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
 
        return 0;
 }
@@ -4011,9 +4009,10 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
                                  int __user *optlen)
 {
-       if (len < sizeof(struct sctp_event_subscribe))
+       if (len <= 0)
                return -EINVAL;
-       len = sizeof(struct sctp_event_subscribe);
+       if (len > sizeof(struct sctp_event_subscribe))
+               len = sizeof(struct sctp_event_subscribe);
        if (put_user(len, optlen))
                return -EFAULT;
        if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
index 50cb57f0919e780bed02f6f963e62faf6d13144e..6752f489febfaecf1d927af64e18197320134fde 100644 (file)
@@ -53,6 +53,10 @@ static int sack_timer_min = 1;
 static int sack_timer_max = 500;
 static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
 static int rwnd_scale_max = 16;
+static unsigned long max_autoclose_min = 0;
+static unsigned long max_autoclose_max =
+       (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
+       ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
 
 extern long sysctl_sctp_mem[3];
 extern int sysctl_sctp_rmem[3];
@@ -251,6 +255,15 @@ static ctl_table sctp_table[] = {
                .extra1         = &one,
                .extra2         = &rwnd_scale_max,
        },
+       {
+               .procname       = "max_autoclose",
+               .data           = &sctp_max_autoclose,
+               .maxlen         = sizeof(unsigned long),
+               .mode           = 0644,
+               .proc_handler   = &proc_doulongvec_minmax,
+               .extra1         = &max_autoclose_min,
+               .extra2         = &max_autoclose_max,
+       },
 
        { /* sentinel */ }
 };
index 394c57ca2f54210e4060654fb72f1937aee3bc75..8da4481ed30ab66b6731c6c7117c1532290be534 100644 (file)
@@ -226,23 +226,6 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
                transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
 }
 
-/* this is a complete rip-off from __sk_dst_check
- * the cookie is always 0 since this is how it's used in the
- * pmtu code
- */
-static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
-{
-       struct dst_entry *dst = t->dst;
-
-       if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) {
-               dst_release(t->dst);
-               t->dst = NULL;
-               return NULL;
-       }
-
-       return dst;
-}
-
 void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
 {
        struct dst_entry *dst;
index 1ad42d3604e45e3eb1965f097eb2250c0b2ffd0f..cf41afcc89bba4314411d9c392af1463bf39e099 100644 (file)
@@ -791,9 +791,9 @@ static ssize_t sock_sendpage(struct file *file, struct page *page,
 
        sock = file->private_data;
 
-       flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT;
-       if (more)
-               flags |= MSG_MORE;
+       flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
+       /* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */
+       flags |= more;
 
        return kernel_sendpage(sock, page, offset, size, flags);
 }
index 4cb70dc6e7ad26e3008ab9be4f617af79e3cc0d1..e50502d8ceb777a6c6795e22673753c4eea6e538 100644 (file)
@@ -129,6 +129,9 @@ unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags)
        for (i = 0; i < groups ; i++)
                if (cred->uc_gids[i] != GROUP_AT(acred->group_info, i))
                        return 0;
+       if (groups < NFS_NGROUPS &&
+           cred->uc_gids[groups] != NOGROUP)
+               return 0;
        return 1;
 }
 
index 72ad836e4fe05d7e5d9153918e5d8c1dacecc4a5..4530a912b8b045b950151262804db79caef34fa8 100644 (file)
@@ -828,6 +828,8 @@ static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
 {
        ssize_t ret;
 
+       if (count == 0)
+               return -EINVAL;
        if (copy_from_user(kaddr, buf, count))
                return -EFAULT;
        kaddr[count] = '\0';
index 4814e246a874ac1c19c51c5c36e7bbcd60b2d373..b6bb22571c5741b3b62ef8821627cbfe4c6c7d8c 100644 (file)
@@ -480,14 +480,18 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_next);
  */
 void rpc_wake_up(struct rpc_wait_queue *queue)
 {
-       struct rpc_task *task, *next;
        struct list_head *head;
 
        spin_lock_bh(&queue->lock);
        head = &queue->tasks[queue->maxpriority];
        for (;;) {
-               list_for_each_entry_safe(task, next, head, u.tk_wait.list)
+               while (!list_empty(head)) {
+                       struct rpc_task *task;
+                       task = list_first_entry(head,
+                                       struct rpc_task,
+                                       u.tk_wait.list);
                        rpc_wake_up_task_queue_locked(queue, task);
+               }
                if (head == &queue->tasks[0])
                        break;
                head--;
@@ -505,13 +509,16 @@ EXPORT_SYMBOL_GPL(rpc_wake_up);
  */
 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
 {
-       struct rpc_task *task, *next;
        struct list_head *head;
 
        spin_lock_bh(&queue->lock);
        head = &queue->tasks[queue->maxpriority];
        for (;;) {
-               list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
+               while (!list_empty(head)) {
+                       struct rpc_task *task;
+                       task = list_first_entry(head,
+                                       struct rpc_task,
+                                       u.tk_wait.list);
                        task->tk_status = status;
                        rpc_wake_up_task_queue_locked(queue, task);
                }
index 2b90292e95053acace4d02a3269b960591b3b426..54c59ab3b1075d74110465cdd2c6d636f1a223d1 100644 (file)
@@ -167,6 +167,7 @@ svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
 
 fail_free:
        kfree(m->to_pool);
+       m->to_pool = NULL;
 fail:
        return -ENOMEM;
 }
@@ -287,7 +288,9 @@ svc_pool_map_put(void)
        if (!--m->count) {
                m->mode = SVC_POOL_DEFAULT;
                kfree(m->to_pool);
+               m->to_pool = NULL;
                kfree(m->pool_to);
+               m->pool_to = NULL;
                m->npools = 0;
        }
 
@@ -472,17 +475,20 @@ svc_destroy(struct svc_serv *serv)
                printk("svc_destroy: no threads for serv=%p!\n", serv);
 
        del_timer_sync(&serv->sv_temptimer);
-
-       svc_close_all(&serv->sv_tempsocks);
+       /*
+        * The set of xprts (contained in the sv_tempsocks and
+        * sv_permsocks lists) is now constant, since it is modified
+        * only by accepting new sockets (done by service threads in
+        * svc_recv) or aging old ones (done by sv_temptimer), or
+        * configuration changes (excluded by whatever locking the
+        * caller is using--nfsd_mutex in the case of nfsd).  So it's
+        * safe to traverse those lists and shut everything down:
+        */
+       svc_close_all(serv);
 
        if (serv->sv_shutdown)
                serv->sv_shutdown(serv);
 
-       svc_close_all(&serv->sv_permsocks);
-
-       BUG_ON(!list_empty(&serv->sv_permsocks));
-       BUG_ON(!list_empty(&serv->sv_tempsocks));
-
        cache_clean_deferred(serv);
 
        if (svc_serv_is_pooled(serv))
@@ -1296,7 +1302,8 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
                                                sizeof(req->rq_snd_buf));
                return bc_send(req);
        } else {
-               /* Nothing to do to drop request */
+               /* drop request */
+               xprt_free_bc_request(req);
                return 0;
        }
 }
index bd31208bbb61b877b39e276c9e627548b6f2bcb7..9d7ed0b48b515f8edfb84ecb6eadf2749cf86600 100644 (file)
@@ -901,14 +901,7 @@ void svc_delete_xprt(struct svc_xprt *xprt)
        spin_lock_bh(&serv->sv_lock);
        if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
                list_del_init(&xprt->xpt_list);
-       /*
-        * The only time we're called while xpt_ready is still on a list
-        * is while the list itself is about to be destroyed (in
-        * svc_destroy).  BUT svc_xprt_enqueue could still be attempting
-        * to add new entries to the sp_sockets list, so we can't leave
-        * a freed xprt on it.
-        */
-       list_del_init(&xprt->xpt_ready);
+       BUG_ON(!list_empty(&xprt->xpt_ready));
        if (test_bit(XPT_TEMP, &xprt->xpt_flags))
                serv->sv_tmpcnt--;
        spin_unlock_bh(&serv->sv_lock);
@@ -936,22 +929,48 @@ void svc_close_xprt(struct svc_xprt *xprt)
 }
 EXPORT_SYMBOL_GPL(svc_close_xprt);
 
-void svc_close_all(struct list_head *xprt_list)
+static void svc_close_list(struct list_head *xprt_list)
+{
+       struct svc_xprt *xprt;
+
+       list_for_each_entry(xprt, xprt_list, xpt_list) {
+               set_bit(XPT_CLOSE, &xprt->xpt_flags);
+               set_bit(XPT_BUSY, &xprt->xpt_flags);
+       }
+}
+
+void svc_close_all(struct svc_serv *serv)
 {
+       struct svc_pool *pool;
        struct svc_xprt *xprt;
        struct svc_xprt *tmp;
+       int i;
+
+       svc_close_list(&serv->sv_tempsocks);
+       svc_close_list(&serv->sv_permsocks);
 
+       for (i = 0; i < serv->sv_nrpools; i++) {
+               pool = &serv->sv_pools[i];
+
+               spin_lock_bh(&pool->sp_lock);
+               while (!list_empty(&pool->sp_sockets)) {
+                       xprt = list_first_entry(&pool->sp_sockets, struct svc_xprt, xpt_ready);
+                       list_del_init(&xprt->xpt_ready);
+               }
+               spin_unlock_bh(&pool->sp_lock);
+       }
        /*
-        * The server is shutting down, and no more threads are running.
-        * svc_xprt_enqueue() might still be running, but at worst it
-        * will re-add the xprt to sp_sockets, which will soon get
-        * freed.  So we don't bother with any more locking, and don't
-        * leave the close to the (nonexistent) server threads:
+        * At this point the sp_sockets lists will stay empty, since
+        * svc_enqueue will not add new entries without taking the
+        * sp_lock and checking XPT_BUSY.
         */
-       list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
-               set_bit(XPT_CLOSE, &xprt->xpt_flags);
+       list_for_each_entry_safe(xprt, tmp, &serv->sv_tempsocks, xpt_list)
                svc_delete_xprt(xprt);
-       }
+       list_for_each_entry_safe(xprt, tmp, &serv->sv_permsocks, xpt_list)
+               svc_delete_xprt(xprt);
+
+       BUG_ON(!list_empty(&serv->sv_permsocks));
+       BUG_ON(!list_empty(&serv->sv_tempsocks));
 }
 
 /*
index 72abb735893321d4616d5d9b0801e5cb6f777cff..ea7507979b0b4827c1a4671c571bc9f28c1b9143 100644 (file)
@@ -485,7 +485,7 @@ static int xs_nospace(struct rpc_task *task)
        struct rpc_rqst *req = task->tk_rqstp;
        struct rpc_xprt *xprt = req->rq_xprt;
        struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
-       int ret = 0;
+       int ret = -EAGAIN;
 
        dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
                        task->tk_pid, req->rq_slen - req->rq_bytes_sent,
@@ -497,7 +497,6 @@ static int xs_nospace(struct rpc_task *task)
        /* Don't race with disconnect */
        if (xprt_connected(xprt)) {
                if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
-                       ret = -EAGAIN;
                        /*
                         * Notify TCP that we're limited by the application
                         * window size
index 1ac9443b5265c11f1a22af4cac1e720688ba8d1a..4e84e222a490708d1403b2cf1782bce8bae44bb6 100644 (file)
@@ -83,8 +83,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
        [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
        [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
 
-       [NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN },
-       [NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN },
+       [NL80211_ATTR_MAC] = { .len = ETH_ALEN },
+       [NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN },
 
        [NL80211_ATTR_KEY] = { .type = NLA_NESTED, },
        [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY,
@@ -126,8 +126,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
        [NL80211_ATTR_MESH_CONFIG] = { .type = NLA_NESTED },
        [NL80211_ATTR_SUPPORT_MESH_AUTH] = { .type = NLA_FLAG },
 
-       [NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY,
-                                        .len = NL80211_HT_CAPABILITY_LEN },
+       [NL80211_ATTR_HT_CAPABILITY] = { .len = NL80211_HT_CAPABILITY_LEN },
 
        [NL80211_ATTR_MGMT_SUBTYPE] = { .type = NLA_U8 },
        [NL80211_ATTR_IE] = { .type = NLA_BINARY,
@@ -176,6 +175,15 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
        [NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED },
        [NL80211_ATTR_STA_PLINK_STATE] = { .type = NLA_U8 },
        [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 },
+       [NL80211_ATTR_REKEY_DATA] = { .type = NLA_NESTED },
+       [NL80211_ATTR_SCAN_SUPP_RATES] = { .type = NLA_NESTED },
+       [NL80211_ATTR_HIDDEN_SSID] = { .type = NLA_U32 },
+       [NL80211_ATTR_IE_PROBE_RESP] = { .type = NLA_BINARY,
+                                        .len = IEEE80211_MAX_DATA_LEN },
+       [NL80211_ATTR_IE_ASSOC_RESP] = { .type = NLA_BINARY,
+                                        .len = IEEE80211_MAX_DATA_LEN },
+       [NL80211_ATTR_ROAM_SUPPORT] = { .type = NLA_FLAG },
+       [NL80211_ATTR_SCHED_SCAN_MATCH] = { .type = NLA_NESTED },
 };
 
 /* policy for the key attributes */
@@ -206,6 +214,12 @@ nl80211_wowlan_policy[NUM_NL80211_WOWLAN_TRIG] = {
        [NL80211_WOWLAN_TRIG_PKT_PATTERN] = { .type = NLA_NESTED },
 };
 
+static const struct nla_policy
+nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = {
+       [NL80211_ATTR_SCHED_SCAN_MATCH_SSID] = { .type = NLA_BINARY,
+                                                .len = IEEE80211_MAX_SSID_LEN },
+};
+
 /* ifidx get helper */
 static int nl80211_get_ifidx(struct netlink_callback *cb)
 {
@@ -683,8 +697,14 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
                    dev->wiphy.coverage_class);
        NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
                   dev->wiphy.max_scan_ssids);
+       NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS,
+                  dev->wiphy.max_sched_scan_ssids);
        NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
                    dev->wiphy.max_scan_ie_len);
+       NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
+                   dev->wiphy.max_sched_scan_ie_len);
+       NLA_PUT_U8(msg, NL80211_ATTR_MAX_MATCH_SETS,
+                  dev->wiphy.max_match_sets);
 
        if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)
                NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN);
@@ -1182,6 +1202,11 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
                        goto bad_res;
                }
 
+               if (!netif_running(netdev)) {
+                       result = -ENETDOWN;
+                       goto bad_res;
+               }
+
                nla_for_each_nested(nl_txq_params,
                                    info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
                                    rem_txq_params) {
@@ -2209,6 +2234,10 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
        }
        nla_nest_end(msg, sinfoattr);
 
+       if (sinfo->filled & STATION_INFO_ASSOC_REQ_IES)
+               NLA_PUT(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
+                       sinfo->assoc_req_ies);
+
        return genlmsg_end(msg, hdr);
 
  nla_put_failure:
@@ -2236,6 +2265,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
        }
 
        while (1) {
+               memset(&sinfo, 0, sizeof(sinfo));
                err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx,
                                             mac_addr, &sinfo);
                if (err == -ENOENT)
@@ -3449,10 +3479,11 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
        struct net_device *dev = info->user_ptr[1];
        struct nlattr *attr;
        struct wiphy *wiphy;
-       int err, tmp, n_ssids = 0, n_channels, i;
+       int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i;
        u32 interval;
        enum ieee80211_band band;
        size_t ie_len;
+       struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1];
 
        if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
            !rdev->ops->sched_scan_start)
@@ -3488,7 +3519,16 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
                                    tmp)
                        n_ssids++;
 
-       if (n_ssids > wiphy->max_scan_ssids)
+       if (n_ssids > wiphy->max_sched_scan_ssids)
+               return -EINVAL;
+
+       if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH])
+               nla_for_each_nested(attr,
+                                   info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
+                                   tmp)
+                       n_match_sets++;
+
+       if (n_match_sets > wiphy->max_match_sets)
                return -EINVAL;
 
        if (info->attrs[NL80211_ATTR_IE])
@@ -3496,7 +3536,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
        else
                ie_len = 0;
 
-       if (ie_len > wiphy->max_scan_ie_len)
+       if (ie_len > wiphy->max_sched_scan_ie_len)
                return -EINVAL;
 
        mutex_lock(&rdev->sched_scan_mtx);
@@ -3508,6 +3548,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
 
        request = kzalloc(sizeof(*request)
                        + sizeof(*request->ssids) * n_ssids
+                       + sizeof(*request->match_sets) * n_match_sets
                        + sizeof(*request->channels) * n_channels
                        + ie_len, GFP_KERNEL);
        if (!request) {
@@ -3525,6 +3566,18 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
                        request->ie = (void *)(request->channels + n_channels);
        }
 
+       if (n_match_sets) {
+               if (request->ie)
+                       request->match_sets = (void *)(request->ie + ie_len);
+               else if (request->ssids)
+                       request->match_sets =
+                               (void *)(request->ssids + n_ssids);
+               else
+                       request->match_sets =
+                               (void *)(request->channels + n_channels);
+       }
+       request->n_match_sets = n_match_sets;
+
        i = 0;
        if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
                /* user specified, bail out if channel not found */
@@ -3589,6 +3642,31 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
                }
        }
 
+       i = 0;
+       if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) {
+               nla_for_each_nested(attr,
+                                   info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
+                                   tmp) {
+                       struct nlattr *ssid;
+
+                       nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX,
+                                 nla_data(attr), nla_len(attr),
+                                 nl80211_match_policy);
+                       ssid = tb[NL80211_ATTR_SCHED_SCAN_MATCH_SSID];
+                       if (ssid) {
+                               if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) {
+                                       err = -EINVAL;
+                                       goto out_free;
+                               }
+                               memcpy(request->match_sets[i].ssid.ssid,
+                                      nla_data(ssid), nla_len(ssid));
+                               request->match_sets[i].ssid.ssid_len =
+                                       nla_len(ssid);
+                       }
+                       i++;
+               }
+       }
+
        if (info->attrs[NL80211_ATTR_IE]) {
                request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
                memcpy((void *)request->ie,
@@ -5433,7 +5511,7 @@ static struct genl_ops nl80211_ops[] = {
                .doit = nl80211_get_key,
                .policy = nl80211_policy,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = NL80211_FLAG_NEED_NETDEV |
+               .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
                                  NL80211_FLAG_NEED_RTNL,
        },
        {
@@ -5465,7 +5543,7 @@ static struct genl_ops nl80211_ops[] = {
                .policy = nl80211_policy,
                .flags = GENL_ADMIN_PERM,
                .doit = nl80211_addset_beacon,
-               .internal_flags = NL80211_FLAG_NEED_NETDEV |
+               .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
                                  NL80211_FLAG_NEED_RTNL,
        },
        {
@@ -5473,7 +5551,7 @@ static struct genl_ops nl80211_ops[] = {
                .policy = nl80211_policy,
                .flags = GENL_ADMIN_PERM,
                .doit = nl80211_addset_beacon,
-               .internal_flags = NL80211_FLAG_NEED_NETDEV |
+               .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
                                  NL80211_FLAG_NEED_RTNL,
        },
        {
@@ -5497,7 +5575,7 @@ static struct genl_ops nl80211_ops[] = {
                .doit = nl80211_set_station,
                .policy = nl80211_policy,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = NL80211_FLAG_NEED_NETDEV |
+               .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
                                  NL80211_FLAG_NEED_RTNL,
        },
        {
@@ -5513,7 +5591,7 @@ static struct genl_ops nl80211_ops[] = {
                .doit = nl80211_del_station,
                .policy = nl80211_policy,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = NL80211_FLAG_NEED_NETDEV |
+               .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
                                  NL80211_FLAG_NEED_RTNL,
        },
        {
@@ -5546,7 +5624,7 @@ static struct genl_ops nl80211_ops[] = {
                .doit = nl80211_del_mpath,
                .policy = nl80211_policy,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = NL80211_FLAG_NEED_NETDEV |
+               .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
                                  NL80211_FLAG_NEED_RTNL,
        },
        {
@@ -5554,7 +5632,7 @@ static struct genl_ops nl80211_ops[] = {
                .doit = nl80211_set_bss,
                .policy = nl80211_policy,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = NL80211_FLAG_NEED_NETDEV |
+               .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
                                  NL80211_FLAG_NEED_RTNL,
        },
        {
@@ -5580,7 +5658,7 @@ static struct genl_ops nl80211_ops[] = {
                .doit = nl80211_get_mesh_config,
                .policy = nl80211_policy,
                /* can be retrieved by unprivileged users */
-               .internal_flags = NL80211_FLAG_NEED_NETDEV |
+               .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
                                  NL80211_FLAG_NEED_RTNL,
        },
        {
@@ -5712,7 +5790,7 @@ static struct genl_ops nl80211_ops[] = {
                .doit = nl80211_setdel_pmksa,
                .policy = nl80211_policy,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = NL80211_FLAG_NEED_NETDEV |
+               .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
                                  NL80211_FLAG_NEED_RTNL,
        },
        {
@@ -5720,7 +5798,7 @@ static struct genl_ops nl80211_ops[] = {
                .doit = nl80211_setdel_pmksa,
                .policy = nl80211_policy,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = NL80211_FLAG_NEED_NETDEV |
+               .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
                                  NL80211_FLAG_NEED_RTNL,
        },
        {
@@ -5728,7 +5806,7 @@ static struct genl_ops nl80211_ops[] = {
                .doit = nl80211_flush_pmksa,
                .policy = nl80211_policy,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = NL80211_FLAG_NEED_NETDEV |
+               .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
                                  NL80211_FLAG_NEED_RTNL,
        },
        {
@@ -5816,7 +5894,7 @@ static struct genl_ops nl80211_ops[] = {
                .doit = nl80211_set_wds_peer,
                .policy = nl80211_policy,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = NL80211_FLAG_NEED_NETDEV |
+               .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
                                  NL80211_FLAG_NEED_RTNL,
        },
        {
index 7b0add2badaa2edff8e744d63460a7871b1ddde3..9bcb6bc2ce0c841c229606b1605318ce6d7a750a 100644 (file)
 #define REG_DBG_PRINT(args...)
 #endif
 
+static struct regulatory_request core_request_world = {
+       .initiator = NL80211_REGDOM_SET_BY_CORE,
+       .alpha2[0] = '0',
+       .alpha2[1] = '0',
+       .intersect = false,
+       .processed = true,
+       .country_ie_env = ENVIRON_ANY,
+};
+
 /* Receipt of information from last regulatory request */
-static struct regulatory_request *last_request;
+static struct regulatory_request *last_request = &core_request_world;
 
 /* To trigger userspace events */
 static struct platform_device *reg_pdev;
@@ -150,7 +159,7 @@ static char user_alpha2[2];
 module_param(ieee80211_regdom, charp, 0444);
 MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
 
-static void reset_regdomains(void)
+static void reset_regdomains(bool full_reset)
 {
        /* avoid freeing static information or freeing something twice */
        if (cfg80211_regdomain == cfg80211_world_regdom)
@@ -165,6 +174,13 @@ static void reset_regdomains(void)
 
        cfg80211_world_regdom = &world_regdom;
        cfg80211_regdomain = NULL;
+
+       if (!full_reset)
+               return;
+
+       if (last_request != &core_request_world)
+               kfree(last_request);
+       last_request = &core_request_world;
 }
 
 /*
@@ -175,7 +191,7 @@ static void update_world_regdomain(const struct ieee80211_regdomain *rd)
 {
        BUG_ON(!last_request);
 
-       reset_regdomains();
+       reset_regdomains(false);
 
        cfg80211_world_regdom = rd;
        cfg80211_regdomain = rd;
@@ -363,7 +379,15 @@ static void reg_regdb_query(const char *alpha2)
 
        schedule_work(&reg_regdb_work);
 }
+
+/* Feel free to add any other sanity checks here */
+static void reg_regdb_size_check(void)
+{
+       /* We should ideally BUILD_BUG_ON() but then random builds would fail */
+       WARN_ONCE(!reg_regdb_size, "db.txt is empty, you should update it...");
+}
 #else
+static inline void reg_regdb_size_check(void) {}
 static inline void reg_regdb_query(const char *alpha2) {}
 #endif /* CONFIG_CFG80211_INTERNAL_REGDB */
 
@@ -1396,7 +1420,8 @@ static int __regulatory_hint(struct wiphy *wiphy,
        }
 
 new_request:
-       kfree(last_request);
+       if (last_request != &core_request_world)
+               kfree(last_request);
 
        last_request = pending_request;
        last_request->intersect = intersect;
@@ -1566,9 +1591,6 @@ static int regulatory_hint_core(const char *alpha2)
 {
        struct regulatory_request *request;
 
-       kfree(last_request);
-       last_request = NULL;
-
        request = kzalloc(sizeof(struct regulatory_request),
                          GFP_KERNEL);
        if (!request)
@@ -1767,7 +1789,7 @@ static void restore_regulatory_settings(bool reset_user)
        mutex_lock(&cfg80211_mutex);
        mutex_lock(&reg_mutex);
 
-       reset_regdomains();
+       reset_regdomains(true);
        restore_alpha2(alpha2, reset_user);
 
        /*
@@ -2029,12 +2051,18 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
        }
 
        request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
+       if (!request_wiphy &&
+           (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER ||
+            last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)) {
+               schedule_delayed_work(&reg_timeout, 0);
+               return -ENODEV;
+       }
 
        if (!last_request->intersect) {
                int r;
 
                if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) {
-                       reset_regdomains();
+                       reset_regdomains(false);
                        cfg80211_regdomain = rd;
                        return 0;
                }
@@ -2055,7 +2083,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
                if (r)
                        return r;
 
-               reset_regdomains();
+               reset_regdomains(false);
                cfg80211_regdomain = rd;
                return 0;
        }
@@ -2080,7 +2108,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
 
                rd = NULL;
 
-               reset_regdomains();
+               reset_regdomains(false);
                cfg80211_regdomain = intersected_rd;
 
                return 0;
@@ -2100,7 +2128,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
        kfree(rd);
        rd = NULL;
 
-       reset_regdomains();
+       reset_regdomains(false);
        cfg80211_regdomain = intersected_rd;
 
        return 0;
@@ -2208,6 +2236,8 @@ int __init regulatory_init(void)
        spin_lock_init(&reg_requests_lock);
        spin_lock_init(&reg_pending_beacons_lock);
 
+       reg_regdb_size_check();
+
        cfg80211_regdomain = cfg80211_world_regdom;
 
        user_alpha2[0] = '9';
@@ -2253,9 +2283,9 @@ void /* __init_or_exit */ regulatory_exit(void)
        mutex_lock(&cfg80211_mutex);
        mutex_lock(&reg_mutex);
 
-       reset_regdomains();
+       reset_regdomains(true);
 
-       kfree(last_request);
+       dev_set_uevent_suppress(&reg_pdev->dev, true);
 
        platform_device_unregister(reg_pdev);
 
index 4d7b83fbc32f0eb042e3a70e23d01d626a5add04..30f68dc76ac04412520605cfb34a8608d5d2e204 100644 (file)
@@ -937,6 +937,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
                                  enum nl80211_iftype iftype)
 {
        struct wireless_dev *wdev_iter;
+       u32 used_iftypes = BIT(iftype);
        int num[NUM_NL80211_IFTYPES];
        int total = 1;
        int i, j;
@@ -970,12 +971,14 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
 
                num[wdev_iter->iftype]++;
                total++;
+               used_iftypes |= BIT(wdev_iter->iftype);
        }
        mutex_unlock(&rdev->devlist_mtx);
 
        for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
                const struct ieee80211_iface_combination *c;
                struct ieee80211_iface_limit *limits;
+               u32 all_iftypes = 0;
 
                c = &rdev->wiphy.iface_combinations[i];
 
@@ -990,14 +993,28 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
                        if (rdev->wiphy.software_iftypes & BIT(iftype))
                                continue;
                        for (j = 0; j < c->n_limits; j++) {
-                               if (!(limits[j].types & iftype))
+                               all_iftypes |= limits[j].types;
+                               if (!(limits[j].types & BIT(iftype)))
                                        continue;
                                if (limits[j].max < num[iftype])
                                        goto cont;
                                limits[j].max -= num[iftype];
                        }
                }
-               /* yay, it fits */
+
+               /*
+                * Finally check that all iftypes that we're currently
+                * using are actually part of this combination. If they
+                * aren't then we can't use this combination and have
+                * to continue to the next.
+                */
+               if ((all_iftypes & used_iftypes) != used_iftypes)
+                       goto cont;
+
+               /*
+                * This combination covered all interface types and
+                * supported the requested numbers, so we're good.
+                */
                kfree(limits);
                return 0;
  cont:
index 5ce74a385525c9a5ef036428670663d1d924e063..7372127bd61b53e41faaa895705b83b8ae04c769 100644 (file)
@@ -1349,14 +1349,16 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
                BUG();
        }
        xdst = dst_alloc(dst_ops, NULL, 0, 0, 0);
-       memset(&xdst->u.rt6.rt6i_table, 0, sizeof(*xdst) - sizeof(struct dst_entry));
-       xfrm_policy_put_afinfo(afinfo);
 
-       if (likely(xdst))
+       if (likely(xdst)) {
+               memset(&xdst->u.rt6.rt6i_table, 0,
+                       sizeof(*xdst) - sizeof(struct dst_entry));
                xdst->flo.ops = &xfrm_bundle_fc_ops;
-       else
+       else
                xdst = ERR_PTR(-ENOBUFS);
 
+       xfrm_policy_put_afinfo(afinfo);
+
        return xdst;
 }
 
@@ -1497,7 +1499,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
                goto free_dst;
 
        /* Copy neighbour for reachability confirmation */
-       dst0->neighbour = neigh_clone(dst->neighbour);
+       dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour(dst)));
 
        xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
        xfrm_init_pmtu(dst_prev);
@@ -1917,6 +1919,9 @@ no_transform:
        }
 ok:
        xfrm_pols_put(pols, drop_pols);
+       if (dst && dst->xfrm &&
+           dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
+               dst->flags |= DST_XFRM_TUNNEL;
        return dst;
 
 nopol:
index b11ea692bd7d0870c8586e115bcfc6b6a69c2997..3235023eaf4eef04b8c0c003850ee0a30c5b6ccb 100644 (file)
@@ -166,7 +166,7 @@ static void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
        }
 
        if (xfrm_aevent_is_on(xs_net(x)))
-               xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
+               x->repl->notify(x, XFRM_REPLAY_UPDATE);
 }
 
 static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb)
@@ -293,7 +293,7 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
        }
 
        if (xfrm_aevent_is_on(xs_net(x)))
-               xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
+               x->repl->notify(x, XFRM_REPLAY_UPDATE);
 }
 
 static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
@@ -502,7 +502,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
        }
 
        if (xfrm_aevent_is_on(xs_net(x)))
-               xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
+               x->repl->notify(x, XFRM_REPLAY_UPDATE);
 }
 
 static struct xfrm_replay xfrm_replay_legacy = {
index 5fdf10dc1d8a45eb2ed06bffbb9425233b02c006..b1a3ceeb215877bc55df031a8c2ab08e7d7d9267 100644 (file)
@@ -596,11 +596,10 @@ void menu_get_ext_help(struct menu *menu, struct gstr *help)
        struct symbol *sym = menu->sym;
 
        if (menu_has_help(menu)) {
-               if (sym->name) {
+               if (sym->name)
                        str_printf(help, "%s%s:\n\n", CONFIG_, sym->name);
-                       str_append(help, _(menu_get_help(menu)));
-                       str_append(help, "\n");
-               }
+               str_append(help, _(menu_get_help(menu)));
+               str_append(help, "\n");
        } else {
                str_append(help, nohelp_text);
        }
index a4fe923c01315205e3038b3da5b3514c6d134291..25f1e71c9bb507d490cf5152b51de21c1cc86c47 100644 (file)
@@ -242,33 +242,61 @@ if ($kconfig) {
     read_kconfig($kconfig);
 }
 
+sub convert_vars {
+    my ($line, %vars) = @_;
+
+    my $process = "";
+
+    while ($line =~ s/^(.*?)(\$\((.*?)\))//) {
+       my $start = $1;
+       my $variable = $2;
+       my $var = $3;
+
+       if (defined($vars{$var})) {
+           $process .= $start . $vars{$var};
+       } else {
+           $process .= $start . $variable;
+       }
+    }
+
+    $process .= $line;
+
+    return $process;
+}
+
 # Read all Makefiles to map the configs to the objects
 foreach my $makefile (@makefiles) {
 
-    my $cont = 0;
+    my $line = "";
+    my %make_vars;
 
     open(MIN,$makefile) || die "Can't open $makefile";
     while (<MIN>) {
+       # if this line ends with a backslash, continue
+       chomp;
+       if (/^(.*)\\$/) {
+           $line .= $1;
+           next;
+       }
+
+       $line .= $_;
+       $_ = $line;
+       $line = "";
+
        my $objs;
 
-       # is this a line after a line with a backslash?
-       if ($cont && /(\S.*)$/) {
-           $objs = $1;
-       }
-       $cont = 0;
+       $_ = convert_vars($_, %make_vars);
 
        # collect objects after obj-$(CONFIG_FOO_BAR)
        if (/obj-\$\((CONFIG_[^\)]*)\)\s*[+:]?=\s*(.*)/) {
            $var = $1;
            $objs = $2;
+
+       # check if variables are set
+       } elsif (/^\s*(\S+)\s*[:]?=\s*(.*\S)/) {
+           $make_vars{$1} = $2;
        }
        if (defined($objs)) {
-           # test if the line ends with a backslash
-           if ($objs =~ m,(.*)\\$,) {
-               $objs = $1;
-               $cont = 1;
-           }
-
            foreach my $obj (split /\s+/,$objs) {
                $obj =~ s/-/_/g;
                if ($obj =~ /(.*)\.o$/) {
index e26e2fb462d41808167ffa31cc4bb025fd583e2b..f210eae9bd7ecd9b3b22d4016c8e7011b019fef2 100644 (file)
@@ -905,6 +905,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
        if (!sym->st_shndx || get_secindex(info, sym) >= info->num_sections)
                return;
 
+       /* We're looking for an object */
+       if (ELF_ST_TYPE(sym->st_info) != STT_OBJECT)
+               return;
+
        /* Handle all-NULL symbols allocated into .bss */
        if (info->sechdrs[get_secindex(info, sym)].sh_type & SHT_NOBITS) {
                zeros = calloc(1, sym->st_size);
index 413c53693e62d818544c4c931ccf2c6ef48fbb8b..e3cd34525894d1775af6b3d882a97bb9e92e228d 100644 (file)
@@ -132,8 +132,10 @@ static struct module *new_module(char *modname)
        /* strip trailing .o */
        s = strrchr(p, '.');
        if (s != NULL)
-               if (strcmp(s, ".o") == 0)
+               if (strcmp(s, ".o") == 0) {
                        *s = '\0';
+                       mod->is_dot_o = 1;
+               }
 
        /* add to list */
        mod->name = p;
@@ -254,6 +256,28 @@ static enum export export_no(const char *s)
        return export_unknown;
 }
 
+static const char *sec_name(struct elf_info *elf, int secindex);
+
+#define strstarts(str, prefix) (strncmp(str, prefix, strlen(prefix)) == 0)
+
+static enum export export_from_secname(struct elf_info *elf, unsigned int sec)
+{
+       const char *secname = sec_name(elf, sec);
+
+       if (strstarts(secname, "___ksymtab+"))
+               return export_plain;
+       else if (strstarts(secname, "___ksymtab_unused+"))
+               return export_unused;
+       else if (strstarts(secname, "___ksymtab_gpl+"))
+               return export_gpl;
+       else if (strstarts(secname, "___ksymtab_unused_gpl+"))
+               return export_unused_gpl;
+       else if (strstarts(secname, "___ksymtab_gpl_future+"))
+               return export_gpl_future;
+       else
+               return export_unknown;
+}
+
 static enum export export_from_sec(struct elf_info *elf, unsigned int sec)
 {
        if (sec == elf->export_sec)
@@ -563,7 +587,13 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
                               Elf_Sym *sym, const char *symname)
 {
        unsigned int crc;
-       enum export export = export_from_sec(info, get_secindex(info, sym));
+       enum export export;
+
+       if ((!is_vmlinux(mod->name) || mod->is_dot_o) &&
+           strncmp(symname, "__ksymtab", 9) == 0)
+               export = export_from_secname(info, get_secindex(info, sym));
+       else
+               export = export_from_sec(info, get_secindex(info, sym));
 
        switch (sym->st_shndx) {
        case SHN_COMMON:
@@ -822,7 +852,7 @@ static void check_section(const char *modname, struct elf_info *elf,
 
 #define ALL_INIT_DATA_SECTIONS \
        ".init.setup$", ".init.rodata$", \
-       ".devinit.rodata$", ".cpuinit.rodata$", ".meminit.rodata$" \
+       ".devinit.rodata$", ".cpuinit.rodata$", ".meminit.rodata$", \
        ".init.data$", ".devinit.data$", ".cpuinit.data$", ".meminit.data$"
 #define ALL_EXIT_DATA_SECTIONS \
        ".exit.data$", ".devexit.data$", ".cpuexit.data$", ".memexit.data$"
index 2031119080dce2ac5d8ca63aeb967f0505cc5f74..51207e4d5f8bcae0add675e31cf579f28763d1d4 100644 (file)
@@ -113,6 +113,7 @@ struct module {
        int has_cleanup;
        struct buffer dev_table_buf;
        char         srcversion[25];
+       int is_dot_o;
 };
 
 struct elf_info {
index f6cbc3ddb68ba67bd5ecc0f34d77f2908d89a589..3c6c0b14c8073af178f83e238683b23f5e95e12b 100644 (file)
@@ -238,14 +238,14 @@ EOF
 fi
 
 # Build header package
-(cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > /tmp/files$$)
-(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> /tmp/files$$)
-(cd $objtree; find .config Module.symvers include scripts -type f >> /tmp/objfiles$$)
+(cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
+(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
+(cd $objtree; find .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
 destdir=$kernel_headers_dir/usr/src/linux-headers-$version
 mkdir -p "$destdir"
-(cd $srctree; tar -c -f - -T /tmp/files$$) | (cd $destdir; tar -xf -)
-(cd $objtree; tar -c -f - -T /tmp/objfiles$$) | (cd $destdir; tar -xf -)
-rm -f /tmp/files$$ /tmp/objfiles$$
+(cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
+(cd $objtree; tar -c -f - -T "$objtree/debian/hdrobjfiles") | (cd $destdir; tar -xf -)
+rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
 arch=$(dpkg --print-architecture)
 
 cat <<EOF >> debian/control
index f40a6af6bf40068f2a074bf6bff86d68b2a05440..54e35c1e5948c521f9fdebf57c27072b6d844f34 100644 (file)
@@ -462,7 +462,7 @@ __has_rel_mcount(Elf_Shdr const *const relhdr,  /* is SHT_REL or SHT_RELA */
                succeed_file();
        }
        if (w(txthdr->sh_type) != SHT_PROGBITS ||
-           !(w(txthdr->sh_flags) & SHF_EXECINSTR))
+           !(_w(txthdr->sh_flags) & SHF_EXECINSTR))
                return NULL;
        return txtname;
 }
index 36cc0cc39e78e135630384e6500f4a789fe7aa2a..b566eba4a65cc16128b34152154c3ba26505b692 100644 (file)
@@ -57,23 +57,44 @@ static int prepend(char **buffer, int buflen, const char *str, int namelen)
 static int d_namespace_path(struct path *path, char *buf, int buflen,
                            char **name, int flags)
 {
-       struct path root, tmp;
        char *res;
-       int connected, error = 0;
+       int error = 0;
+       int connected = 1;
+
+       if (path->mnt->mnt_flags & MNT_INTERNAL) {
+               /* it's not mounted anywhere */
+               res = dentry_path(path->dentry, buf, buflen);
+               *name = res;
+               if (IS_ERR(res)) {
+                       *name = buf;
+                       return PTR_ERR(res);
+               }
+               if (path->dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
+                   strncmp(*name, "/sys/", 5) == 0) {
+                       /* TODO: convert over to using a per namespace
+                        * control instead of hard coded /proc
+                        */
+                       return prepend(name, *name - buf, "/proc", 5);
+               }
+               return 0;
+       }
 
-       /* Get the root we want to resolve too, released below */
+       /* resolve paths relative to chroot?*/
        if (flags & PATH_CHROOT_REL) {
-               /* resolve paths relative to chroot */
+               struct path root;
                get_fs_root(current->fs, &root);
-       } else {
-               /* resolve paths relative to namespace */
-               root.mnt = current->nsproxy->mnt_ns->root;
-               root.dentry = root.mnt->mnt_root;
-               path_get(&root);
+               res = __d_path(path, &root, buf, buflen);
+               if (res && !IS_ERR(res)) {
+                       /* everything's fine */
+                       *name = res;
+                       path_put(&root);
+                       goto ok;
+               }
+               path_put(&root);
+               connected = 0;
        }
 
-       tmp = root;
-       res = __d_path(path, &tmp, buf, buflen);
+       res = d_absolute_path(path, buf, buflen);
 
        *name = res;
        /* handle error conditions - and still allow a partial path to
@@ -84,7 +105,10 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
                *name = buf;
                goto out;
        }
+       if (!our_mnt(path->mnt))
+               connected = 0;
 
+ok:
        /* Handle two cases:
         * 1. A deleted dentry && profile is not allowing mediation of deleted
         * 2. On some filesystems, newly allocated dentries appear to the
@@ -97,10 +121,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
                        goto out;
        }
 
-       /* Determine if the path is connected to the expected root */
-       connected = tmp.dentry == root.dentry && tmp.mnt == root.mnt;
-
-       /* If the path is not connected,
+       /* If the path is not connected to the expected root,
         * check if it is a sysctl and handle specially else remove any
         * leading / that __d_path may have returned.
         * Unless
@@ -112,17 +133,9 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
         *     namespace root.
         */
        if (!connected) {
-               /* is the disconnect path a sysctl? */
-               if (tmp.dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
-                   strncmp(*name, "/sys/", 5) == 0) {
-                       /* TODO: convert over to using a per namespace
-                        * control instead of hard coded /proc
-                        */
-                       error = prepend(name, *name - buf, "/proc", 5);
-               } else if (!(flags & PATH_CONNECT_PATH) &&
+               if (!(flags & PATH_CONNECT_PATH) &&
                           !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) &&
-                            (tmp.mnt == current->nsproxy->mnt_ns->root &&
-                             tmp.dentry == tmp.mnt->mnt_root))) {
+                            our_mnt(path->mnt))) {
                        /* disconnected path, don't return pathname starting
                         * with '/'
                         */
@@ -133,8 +146,6 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
        }
 
 out:
-       path_put(&root);
-
        return error;
 }
 
index da21e7c93e451a36a0f9458fa9a292da9fd9ee8b..8bfbd13497467522462738226117d54c06044c68 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/prctl.h>
 #include <linux/securebits.h>
 #include <linux/user_namespace.h>
+#include <linux/personality.h>
 
 #ifdef CONFIG_ANDROID_PARANOID_NETWORK
 #include <linux/android_aid.h>
@@ -521,6 +522,11 @@ int cap_bprm_set_creds(struct linux_binprm *bprm)
        }
 skip:
 
+       /* if we have fs caps, clear dangerous personality flags */
+       if (!cap_issubset(new->cap_permitted, old->cap_permitted))
+               bprm->per_clear |= PER_CLEAR_ON_SETID;
+
+
        /* Don't let someone trace a set[ug]id/setpcap binary with the revised
         * credentials unless they have the appropriate permit
         */
index da36d2c085a4ff732967a273fe41423da0ae1b46..5335605571fe41c98014b9de7eb6791eecdc5a2f 100644 (file)
@@ -177,8 +177,8 @@ void ima_store_measurement(struct ima_iint_cache *iint, struct file *file,
        strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX);
 
        result = ima_store_template(entry, violation, inode);
-       if (!result)
+       if (!result || result == -EEXIST)
                iint->flags |= IMA_MEASURED;
-       else
+       if (result < 0)
                kfree(entry);
 }
index 8e28f04a5e2e8282d2bdd940393fda3a889e77cc..55a6271bce7ab7e0b87cd0ac250dd47ea68f9e23 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/slab.h>
 #include "ima.h"
 
+#define AUDIT_CAUSE_LEN_MAX 32
+
 LIST_HEAD(ima_measurements);   /* list of all measurements */
 
 /* key: inode (before secure-hashing a file) */
@@ -94,7 +96,8 @@ static int ima_pcr_extend(const u8 *hash)
 
        result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash);
        if (result != 0)
-               pr_err("IMA: Error Communicating to TPM chip\n");
+               pr_err("IMA: Error Communicating to TPM chip, result: %d\n",
+                      result);
        return result;
 }
 
@@ -106,14 +109,16 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
 {
        u8 digest[IMA_DIGEST_SIZE];
        const char *audit_cause = "hash_added";
+       char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
        int audit_info = 1;
-       int result = 0;
+       int result = 0, tpmresult = 0;
 
        mutex_lock(&ima_extend_list_mutex);
        if (!violation) {
                memcpy(digest, entry->digest, sizeof digest);
                if (ima_lookup_digest_entry(digest)) {
                        audit_cause = "hash_exists";
+                       result = -EEXIST;
                        goto out;
                }
        }
@@ -128,9 +133,11 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
        if (violation)          /* invalidate pcr */
                memset(digest, 0xff, sizeof digest);
 
-       result = ima_pcr_extend(digest);
-       if (result != 0) {
-               audit_cause = "TPM error";
+       tpmresult = ima_pcr_extend(digest);
+       if (tpmresult != 0) {
+               snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",
+                        tpmresult);
+               audit_cause = tpm_audit_cause;
                audit_info = 0;
        }
 out:
index 5b366d7af3c4dc17b595c67af3dbe99d5fd69df1..69ff52c08e97bb0eab715b1ea61c709e4f80faad 100644 (file)
@@ -102,7 +102,8 @@ int user_update(struct key *key, const void *data, size_t datalen)
                key->expiry = 0;
        }
 
-       kfree_rcu(zap, rcu);
+       if (zap)
+               kfree_rcu(zap, rcu);
 
 error:
        return ret;
index cfe2d72d3fb76b0f36a61471244dc1c8ec84b729..e2b74ebdc383d69025bc6573baf271a451ba1c56 100644 (file)
@@ -139,7 +139,9 @@ static void sel_netport_insert(struct sel_netport *port)
        if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) {
                struct sel_netport *tail;
                tail = list_entry(
-                       rcu_dereference(sel_netport_hash[idx].list.prev),
+                       rcu_dereference_protected(
+                               sel_netport_hash[idx].list.prev,
+                               lockdep_is_held(&sel_netport_lock)),
                        struct sel_netport, list);
                list_del_rcu(&tail->list);
                call_rcu(&tail->rcu, sel_netport_free);
index 35459340019e44399775c2f96aa9b5871f8d566c..27a96732b872bcdbf12d3d686dc674a9d8e7fd99 100644 (file)
@@ -1241,6 +1241,7 @@ static int sel_make_bools(void)
                kfree(bool_pending_names[i]);
        kfree(bool_pending_names);
        kfree(bool_pending_values);
+       bool_num = 0;
        bool_pending_names = NULL;
        bool_pending_values = NULL;
 
index 9fc2e15841c96f54edbefb24a96c40405d06bedd..892494ac58e2250c7a7af100cb4113d30d82d3dc 100644 (file)
@@ -205,30 +205,32 @@ int tomoyo_mount_permission(char *dev_name, struct path *path, char *type,
        if (flags & MS_REMOUNT) {
                type = TOMOYO_MOUNT_REMOUNT_KEYWORD;
                flags &= ~MS_REMOUNT;
-       }
-       if (flags & MS_MOVE) {
-               type = TOMOYO_MOUNT_MOVE_KEYWORD;
-               flags &= ~MS_MOVE;
-       }
-       if (flags & MS_BIND) {
+       } else if (flags & MS_BIND) {
                type = TOMOYO_MOUNT_BIND_KEYWORD;
                flags &= ~MS_BIND;
-       }
-       if (flags & MS_UNBINDABLE) {
-               type = TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD;
-               flags &= ~MS_UNBINDABLE;
-       }
-       if (flags & MS_PRIVATE) {
+       } else if (flags & MS_SHARED) {
+               if (flags & (MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
+                       return -EINVAL;
+               type = TOMOYO_MOUNT_MAKE_SHARED_KEYWORD;
+               flags &= ~MS_SHARED;
+       } else if (flags & MS_PRIVATE) {
+               if (flags & (MS_SHARED | MS_SLAVE | MS_UNBINDABLE))
+                       return -EINVAL;
                type = TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD;
                flags &= ~MS_PRIVATE;
-       }
-       if (flags & MS_SLAVE) {
+       } else if (flags & MS_SLAVE) {
+               if (flags & (MS_SHARED | MS_PRIVATE | MS_UNBINDABLE))
+                       return -EINVAL;
                type = TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD;
                flags &= ~MS_SLAVE;
-       }
-       if (flags & MS_SHARED) {
-               type = TOMOYO_MOUNT_MAKE_SHARED_KEYWORD;
-               flags &= ~MS_SHARED;
+       } else if (flags & MS_UNBINDABLE) {
+               if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE))
+                       return -EINVAL;
+               type = TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD;
+               flags &= ~MS_UNBINDABLE;
+       } else if (flags & MS_MOVE) {
+               type = TOMOYO_MOUNT_MOVE_KEYWORD;
+               flags &= ~MS_MOVE;
        }
        if (!type)
                type = "<NULL>";
index d1e05b0477154447ce7cb75dcdb3cb8e2ed94259..a339187cad750ef375bbec610510e59b5e767a4e 100644 (file)
@@ -95,7 +95,6 @@ char *tomoyo_realpath_from_path(struct path *path)
                return NULL;
        is_dir = dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode);
        while (1) {
-               struct path ns_root = { .mnt = NULL, .dentry = NULL };
                char *pos;
                buf_len <<= 1;
                kfree(buf);
@@ -128,8 +127,12 @@ char *tomoyo_realpath_from_path(struct path *path)
                /* If we don't have a vfsmount, we can't calculate. */
                if (!path->mnt)
                        break;
-               /* go to whatever namespace root we are under */
-               pos = __d_path(path, &ns_root, buf, buf_len);
+               pos = d_absolute_path(path, buf, buf_len - 1);
+               /* If path is disconnected, use "[unknown]" instead. */
+               if (pos == ERR_PTR(-EINVAL)) {
+                       name = tomoyo_encode("[unknown]");
+                       break;
+               }
                /* Prepend "/proc" prefix if using internal proc vfs mount. */
                if (!IS_ERR(pos) && (path->mnt->mnt_flags & MNT_INTERNAL) &&
                    (path->mnt->mnt_sb->s_magic == PROC_SUPER_MAGIC)) {
index 64417a7332200a8dda0acf4adf7ec36877d0719e..d8c670c9d62c4e35cab25ac6e36d09466e2716d9 100644 (file)
@@ -475,7 +475,7 @@ static int load_firmware(struct echoaudio *chip)
        const struct firmware *fw;
        int box_type, err;
 
-       if (snd_BUG_ON(!chip->dsp_code_to_load || !chip->comm_page))
+       if (snd_BUG_ON(!chip->comm_page))
                return -EPERM;
 
        /* See if the ASIC is present and working - only if the DSP is already loaded */
index 45b4a8d70e085a36849f32d971e0d987b6ded638..39e1a6a3ede8d85dba5a0fc0bfac78ab30b0df49 100644 (file)
@@ -1328,7 +1328,7 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
                for (i = 0; i < c->cvt_setups.used; i++) {
                        p = snd_array_elem(&c->cvt_setups, i);
                        if (!p->active && p->stream_tag == stream_tag &&
-                           get_wcaps_type(get_wcaps(codec, p->nid)) == type)
+                           get_wcaps_type(get_wcaps(c, p->nid)) == type)
                                p->dirty = 1;
                }
        }
@@ -1651,7 +1651,11 @@ static void put_vol_mute(struct hda_codec *codec, struct hda_amp_info *info,
        parm = ch ? AC_AMP_SET_RIGHT : AC_AMP_SET_LEFT;
        parm |= direction == HDA_OUTPUT ? AC_AMP_SET_OUTPUT : AC_AMP_SET_INPUT;
        parm |= index << AC_AMP_SET_INDEX_SHIFT;
-       parm |= val;
+       if ((val & HDA_AMP_MUTE) && !(info->amp_caps & AC_AMPCAP_MUTE) &&
+           (info->amp_caps & AC_AMPCAP_MIN_MUTE))
+               ; /* set the zero value as a fake mute */
+       else
+               parm |= val;
        snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE, parm);
        info->vol[ch] = val;
 }
@@ -1918,7 +1922,7 @@ int snd_hda_mixer_amp_tlv(struct snd_kcontrol *kcontrol, int op_flag,
        val1 = -((caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT);
        val1 += ofs;
        val1 = ((int)val1) * ((int)val2);
-       if (min_mute)
+       if (min_mute || (caps & AC_AMPCAP_MIN_MUTE))
                val2 |= TLV_DB_SCALE_MUTE;
        if (put_user(SNDRV_CTL_TLVT_DB_SCALE, _tlv))
                return -EFAULT;
@@ -2187,6 +2191,39 @@ int snd_hda_codec_reset(struct hda_codec *codec)
        return 0;
 }
 
+typedef int (*map_slave_func_t)(void *, struct snd_kcontrol *);
+
+/* apply the function to all matching slave ctls in the mixer list */
+static int map_slaves(struct hda_codec *codec, const char * const *slaves,
+                     map_slave_func_t func, void *data)
+{
+       struct hda_nid_item *items;
+       const char * const *s;
+       int i, err;
+
+       items = codec->mixers.list;
+       for (i = 0; i < codec->mixers.used; i++) {
+               struct snd_kcontrol *sctl = items[i].kctl;
+               if (!sctl || !sctl->id.name ||
+                   sctl->id.iface != SNDRV_CTL_ELEM_IFACE_MIXER)
+                       continue;
+               for (s = slaves; *s; s++) {
+                       if (!strcmp(sctl->id.name, *s)) {
+                               err = func(data, sctl);
+                               if (err)
+                                       return err;
+                               break;
+                       }
+               }
+       }
+       return 0;
+}
+
+static int check_slave_present(void *data, struct snd_kcontrol *sctl)
+{
+       return 1;
+}
+
 /**
  * snd_hda_add_vmaster - create a virtual master control and add slaves
  * @codec: HD-audio codec
@@ -2207,12 +2244,10 @@ int snd_hda_add_vmaster(struct hda_codec *codec, char *name,
                        unsigned int *tlv, const char * const *slaves)
 {
        struct snd_kcontrol *kctl;
-       const char * const *s;
        int err;
 
-       for (s = slaves; *s && !snd_hda_find_mixer_ctl(codec, *s); s++)
-               ;
-       if (!*s) {
+       err = map_slaves(codec, slaves, check_slave_present, NULL);
+       if (err != 1) {
                snd_printdd("No slave found for %s\n", name);
                return 0;
        }
@@ -2223,23 +2258,10 @@ int snd_hda_add_vmaster(struct hda_codec *codec, char *name,
        if (err < 0)
                return err;
 
-       for (s = slaves; *s; s++) {
-               struct snd_kcontrol *sctl;
-               int i = 0;
-               for (;;) {
-                       sctl = _snd_hda_find_mixer_ctl(codec, *s, i);
-                       if (!sctl) {
-                               if (!i)
-                                       snd_printdd("Cannot find slave %s, "
-                                                   "skipped\n", *s);
-                               break;
-                       }
-                       err = snd_ctl_add_slave(kctl, sctl);
-                       if (err < 0)
-                               return err;
-                       i++;
-               }
-       }
+       err = map_slaves(codec, slaves, (map_slave_func_t)snd_ctl_add_slave,
+                        kctl);
+       if (err < 0)
+               return err;
        return 0;
 }
 EXPORT_SYMBOL_HDA(snd_hda_add_vmaster);
index 59c97306c1decee089817fbd80f4ea79f39cacd9..eff1fc54e56c60897d1b236841662b765196d88a 100644 (file)
@@ -302,6 +302,9 @@ enum {
 #define AC_AMPCAP_MUTE                 (1<<31)    /* mute capable */
 #define AC_AMPCAP_MUTE_SHIFT           31
 
+/* driver-specific amp-caps: using bits 24-30 */
+#define AC_AMPCAP_MIN_MUTE             (1 << 30) /* min-volume = mute */
+
 /* Connection list */
 #define AC_CLIST_LENGTH                        (0x7f<<0)
 #define AC_CLIST_LONG                  (1<<7)
index 981b6fd1ae7cbc2adb8725662b4e2d33d035c0e0..c5c97880f505f936d6287364d809d5e372a8c586 100644 (file)
@@ -702,11 +702,13 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
 {
        struct azx *chip = bus->private_data;
        unsigned long timeout;
+       unsigned long loopcounter;
        int do_poll = 0;
 
  again:
        timeout = jiffies + msecs_to_jiffies(1000);
-       for (;;) {
+
+       for (loopcounter = 0;; loopcounter++) {
                if (chip->polling_mode || do_poll) {
                        spin_lock_irq(&chip->reg_lock);
                        azx_update_rirb(chip);
@@ -722,7 +724,7 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
                }
                if (time_after(jiffies, timeout))
                        break;
-               if (bus->needs_damn_long_delay)
+               if (bus->needs_damn_long_delay || loopcounter > 3000)
                        msleep(2); /* temporary workaround */
                else {
                        udelay(10);
index 08ec073444e269a770d094e21f2076faa3581d9d..e289a13c4889a41468f32fe7570975149a28fb4f 100644 (file)
@@ -474,7 +474,12 @@ static inline u32 get_wcaps(struct hda_codec *codec, hda_nid_t nid)
 }
 
 /* get the widget type from widget capability bits */
-#define get_wcaps_type(wcaps) (((wcaps) & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT)
+static inline int get_wcaps_type(unsigned int wcaps)
+{
+       if (!wcaps)
+               return -1; /* invalid type */
+       return (wcaps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
+}
 
 static inline unsigned int get_wcaps_channels(u32 wcaps)
 {
index bfe74c2fb0795b0da254d7c3bf1670a802148b91..6fe944a386c216199744135b7b9f5cb1e59a766e 100644 (file)
@@ -54,6 +54,8 @@ static const char *get_wid_type_name(unsigned int wid_value)
                [AC_WID_BEEP] = "Beep Generator Widget",
                [AC_WID_VENDOR] = "Vendor Defined Widget",
        };
+       if (wid_value == -1)
+               return "UNKNOWN Widget";
        wid_value &= 0xf;
        if (names[wid_value])
                return names[wid_value];
index cf1fa36728b9acf0cc911b25f8d7cf067c49b648..4cf3266c94248eaee552a9d1d0a809a48f325f45 100644 (file)
@@ -136,6 +136,7 @@ struct conexant_spec {
        unsigned int thinkpad:1;
        unsigned int hp_laptop:1;
        unsigned int asus:1;
+       unsigned int single_adc_amp:1;
 
        unsigned int adc_switching:1;
 
@@ -1916,6 +1917,10 @@ static void cxt5051_init_mic_port(struct hda_codec *codec, hda_nid_t nid,
        snd_hda_codec_write(codec, nid, 0,
                            AC_VERB_SET_UNSOLICITED_ENABLE,
                            AC_USRSP_EN | event);
+}
+
+static void cxt5051_init_mic_jack(struct hda_codec *codec, hda_nid_t nid)
+{
        snd_hda_input_jack_add(codec, nid, SND_JACK_MICROPHONE, NULL);
        snd_hda_input_jack_report(codec, nid);
 }
@@ -1933,7 +1938,6 @@ static int cxt5051_init(struct hda_codec *codec)
        struct conexant_spec *spec = codec->spec;
 
        conexant_init(codec);
-       conexant_init_jacks(codec);
 
        if (spec->auto_mic & AUTO_MIC_PORTB)
                cxt5051_init_mic_port(codec, 0x17, CXT5051_PORTB_EVENT);
@@ -2066,6 +2070,12 @@ static int patch_cxt5051(struct hda_codec *codec)
        if (spec->beep_amp)
                snd_hda_attach_beep_device(codec, spec->beep_amp);
 
+       conexant_init_jacks(codec);
+       if (spec->auto_mic & AUTO_MIC_PORTB)
+               cxt5051_init_mic_jack(codec, 0x17);
+       if (spec->auto_mic & AUTO_MIC_PORTC)
+               cxt5051_init_mic_jack(codec, 0x18);
+
        return 0;
 }
 
@@ -3993,9 +4003,14 @@ static void cx_auto_init_output(struct hda_codec *codec)
        int i;
 
        mute_outputs(codec, spec->multiout.num_dacs, spec->multiout.dac_nids);
-       for (i = 0; i < cfg->hp_outs; i++)
+       for (i = 0; i < cfg->hp_outs; i++) {
+               unsigned int val = PIN_OUT;
+               if (snd_hda_query_pin_caps(codec, cfg->hp_pins[i]) &
+                   AC_PINCAP_HP_DRV)
+                       val |= AC_PINCTL_HP_EN;
                snd_hda_codec_write(codec, cfg->hp_pins[i], 0,
-                                   AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP);
+                                   AC_VERB_SET_PIN_WIDGET_CONTROL, val);
+       }
        mute_outputs(codec, cfg->hp_outs, cfg->hp_pins);
        mute_outputs(codec, cfg->line_outs, cfg->line_out_pins);
        mute_outputs(codec, cfg->speaker_outs, cfg->speaker_pins);
@@ -4117,7 +4132,8 @@ static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename,
                err = snd_hda_ctl_add(codec, nid, kctl);
                if (err < 0)
                        return err;
-               if (!(query_amp_caps(codec, nid, hda_dir) & AC_AMPCAP_MUTE))
+               if (!(query_amp_caps(codec, nid, hda_dir) &
+                     (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)))
                        break;
        }
        return 0;
@@ -4205,6 +4221,8 @@ static int cx_auto_add_capture_volume(struct hda_codec *codec, hda_nid_t nid,
                int idx = get_input_connection(codec, adc_nid, nid);
                if (idx < 0)
                        continue;
+               if (spec->single_adc_amp)
+                       idx = 0;
                return cx_auto_add_volume_idx(codec, label, pfx,
                                              cidx, adc_nid, HDA_INPUT, idx);
        }
@@ -4245,14 +4263,21 @@ static int cx_auto_build_input_controls(struct hda_codec *codec)
        struct hda_input_mux *imux = &spec->private_imux;
        const char *prev_label;
        int input_conn[HDA_MAX_NUM_INPUTS];
-       int i, err, cidx;
+       int i, j, err, cidx;
        int multi_connection;
 
+       if (!imux->num_items)
+               return 0;
+
        multi_connection = 0;
        for (i = 0; i < imux->num_items; i++) {
                cidx = get_input_connection(codec, spec->imux_info[i].adc,
                                            spec->imux_info[i].pin);
-               input_conn[i] = (spec->imux_info[i].adc << 8) | cidx;
+               if (cidx < 0)
+                       continue;
+               input_conn[i] = spec->imux_info[i].adc;
+               if (!spec->single_adc_amp)
+                       input_conn[i] |= cidx << 8;
                if (i > 0 && input_conn[i] != input_conn[0])
                        multi_connection = 1;
        }
@@ -4281,6 +4306,15 @@ static int cx_auto_build_input_controls(struct hda_codec *codec)
                        err = cx_auto_add_capture_volume(codec, nid,
                                                         "Capture", "", cidx);
                } else {
+                       bool dup_found = false;
+                       for (j = 0; j < i; j++) {
+                               if (input_conn[j] == input_conn[i]) {
+                                       dup_found = true;
+                                       break;
+                               }
+                       }
+                       if (dup_found)
+                               continue;
                        err = cx_auto_add_capture_volume(codec, nid,
                                                         label, " Capture", cidx);
                }
@@ -4344,6 +4378,22 @@ static const struct hda_codec_ops cx_auto_patch_ops = {
        .reboot_notify = snd_hda_shutup_pins,
 };
 
+/* add "fake" mute amp-caps to DACs on cx5051 so that mixer mute switches
+ * can be created (bko#42825)
+ */
+static void add_cx5051_fake_mutes(struct hda_codec *codec)
+{
+       static hda_nid_t out_nids[] = {
+               0x10, 0x11, 0
+       };
+       hda_nid_t *p;
+
+       for (p = out_nids; *p; p++)
+               snd_hda_override_amp_caps(codec, *p, HDA_OUTPUT,
+                                         AC_AMPCAP_MIN_MUTE |
+                                         query_amp_caps(codec, *p, HDA_OUTPUT));
+}
+
 static int patch_conexant_auto(struct hda_codec *codec)
 {
        struct conexant_spec *spec;
@@ -4357,6 +4407,16 @@ static int patch_conexant_auto(struct hda_codec *codec)
                return -ENOMEM;
        codec->spec = spec;
        codec->pin_amp_workaround = 1;
+
+       switch (codec->vendor_id) {
+       case 0x14f15045:
+               spec->single_adc_amp = 1;
+               break;
+       case 0x14f15051:
+               add_cx5051_fake_mutes(codec);
+               break;
+       }
+
        err = cx_auto_search_adcs(codec);
        if (err < 0)
                return err;
index 4c7cd6b58b2d164f97317114b2d2f8b8321f883d..51412e1296f7209743e1706f45e6955829a98ca0 100644 (file)
@@ -509,6 +509,8 @@ static int alc_mux_enum_put(struct snd_kcontrol *kcontrol,
        imux = &spec->input_mux[mux_idx];
        if (!imux->num_items && mux_idx > 0)
                imux = &spec->input_mux[0];
+       if (!imux->num_items)
+               return 0;
 
        type = get_wcaps_type(get_wcaps(codec, nid));
        if (type == AC_WID_AUD_MIX) {
@@ -2088,25 +2090,27 @@ static void alc_auto_init_digital(struct hda_codec *codec)
 static void alc_auto_parse_digital(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
-       int i, err;
+       int i, err, nums;
        hda_nid_t dig_nid;
 
        /* support multiple SPDIFs; the secondary is set up as a slave */
+       nums = 0;
        for (i = 0; i < spec->autocfg.dig_outs; i++) {
                err = snd_hda_get_connections(codec,
                                              spec->autocfg.dig_out_pins[i],
                                              &dig_nid, 1);
-               if (err < 0)
+               if (err <= 0)
                        continue;
-               if (!i) {
+               if (!nums) {
                        spec->multiout.dig_out_nid = dig_nid;
                        spec->dig_out_type = spec->autocfg.dig_out_type[0];
                } else {
                        spec->multiout.slave_dig_outs = spec->slave_dig_outs;
-                       if (i >= ARRAY_SIZE(spec->slave_dig_outs) - 1)
+                       if (nums >= ARRAY_SIZE(spec->slave_dig_outs) - 1)
                                break;
-                       spec->slave_dig_outs[i - 1] = dig_nid;
+                       spec->slave_dig_outs[nums - 1] = dig_nid;
                }
+               nums++;
        }
 
        if (spec->autocfg.dig_in_pin) {
@@ -16415,6 +16419,7 @@ static const struct alc_config_preset alc861_presets[] = {
 /* Pin config fixes */
 enum {
        PINFIX_FSC_AMILO_PI1505,
+       PINFIX_ASUS_A6RP,
 };
 
 static const struct alc_fixup alc861_fixups[] = {
@@ -16426,9 +16431,19 @@ static const struct alc_fixup alc861_fixups[] = {
                        { }
                }
        },
+       [PINFIX_ASUS_A6RP] = {
+               .type = ALC_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       /* node 0x0f VREF seems controlling the master output */
+                       { 0x0f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF50 },
+                       { }
+               },
+       },
 };
 
 static const struct snd_pci_quirk alc861_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", PINFIX_ASUS_A6RP),
+       SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", PINFIX_ASUS_A6RP),
        SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", PINFIX_FSC_AMILO_PI1505),
        {}
 };
@@ -20126,6 +20141,8 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
          .patch = patch_alc882 },
        { .id = 0x10ec0662, .rev = 0x100101, .name = "ALC662 rev1",
          .patch = patch_alc662 },
+       { .id = 0x10ec0662, .rev = 0x100300, .name = "ALC662 rev3",
+         .patch = patch_alc662 },
        { .id = 0x10ec0663, .name = "ALC663", .patch = patch_alc662 },
        { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 },
        { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 },
index 5c42f3e0b16ed10ae5ac58e8407e7e5dedf794aa..86706821a236bec6cfade794ff5002da8425b728 100644 (file)
@@ -1602,7 +1602,7 @@ static const struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd,
                                "Dell Studio 1557", STAC_DELL_M6_DMIC),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
-                               "Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
+                               "Dell Studio XPS 1645", STAC_DELL_M6_DMIC),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
                                "Dell Studio 1558", STAC_DELL_M6_DMIC),
        {} /* terminator */
@@ -4162,13 +4162,15 @@ static int enable_pin_detect(struct hda_codec *codec, hda_nid_t nid,
        return 1;
 }
 
-static int is_nid_hp_pin(struct auto_pin_cfg *cfg, hda_nid_t nid)
+static int is_nid_out_jack_pin(struct auto_pin_cfg *cfg, hda_nid_t nid)
 {
        int i;
        for (i = 0; i < cfg->hp_outs; i++)
                if (cfg->hp_pins[i] == nid)
                        return 1; /* nid is a HP-Out */
-
+       for (i = 0; i < cfg->line_outs; i++)
+               if (cfg->line_out_pins[i] == nid)
+                       return 1; /* nid is a line-Out */
        return 0; /* nid is not a HP-Out */
 };
 
@@ -4354,7 +4356,7 @@ static int stac92xx_init(struct hda_codec *codec)
                        continue;
                }
 
-               if (is_nid_hp_pin(cfg, nid))
+               if (is_nid_out_jack_pin(cfg, nid))
                        continue; /* already has an unsol event */
 
                pinctl = snd_hda_codec_read(codec, nid, 0,
@@ -4587,7 +4589,7 @@ static void stac92xx_hp_detect(struct hda_codec *codec)
                unsigned int val = AC_PINCTL_OUT_EN | AC_PINCTL_HP_EN;
                if (no_hp_sensing(spec, i))
                        continue;
-               if (presence)
+               if (1 /*presence*/)
                        stac92xx_set_pinctl(codec, cfg->hp_pins[i], val);
 #if 0 /* FIXME */
 /* Resetting the pinctl like below may lead to (a sort of) regressions
@@ -5425,9 +5427,7 @@ static void stac92hd8x_fill_auto_spec(struct hda_codec *codec)
 static int patch_stac92hd83xxx(struct hda_codec *codec)
 {
        struct sigmatel_spec *spec;
-       hda_nid_t conn[STAC92HD83_DAC_COUNT + 1];
        int err;
-       int num_dacs;
 
        spec  = kzalloc(sizeof(*spec), GFP_KERNEL);
        if (spec == NULL)
@@ -5467,26 +5467,8 @@ again:
                stac92xx_set_config_regs(codec,
                                stac92hd83xxx_brd_tbl[spec->board_config]);
 
-       switch (codec->vendor_id) {
-       case 0x111d76d1:
-       case 0x111d76d9:
-       case 0x111d76df:
-       case 0x111d76e5:
-       case 0x111d7666:
-       case 0x111d7667:
-       case 0x111d7668:
-       case 0x111d7669:
-       case 0x111d76e3:
-       case 0x111d7604:
-       case 0x111d76d4:
-       case 0x111d7605:
-       case 0x111d76d5:
-       case 0x111d76e7:
-               if (spec->board_config == STAC_92HD83XXX_PWR_REF)
-                       break;
+       if (spec->board_config != STAC_92HD83XXX_PWR_REF)
                spec->num_pwrs = 0;
-               break;
-       }
 
        codec->patch_ops = stac92xx_patch_ops;
 
@@ -5506,7 +5488,11 @@ again:
        }
 #endif 
 
-       err = stac92xx_parse_auto_config(codec, 0x1d, 0);
+       /* 92HD65/66 series has S/PDIF-IN */
+       if (codec->vendor_id >= 0x111d76e8 && codec->vendor_id <= 0x111d76f3)
+               err = stac92xx_parse_auto_config(codec, 0x1d, 0x22);
+       else
+               err = stac92xx_parse_auto_config(codec, 0x1d, 0);
        if (!err) {
                if (spec->board_config < 0) {
                        printk(KERN_WARNING "hda_codec: No auto-config is "
@@ -5522,22 +5508,6 @@ again:
                return err;
        }
 
-       /* docking output support */
-       num_dacs = snd_hda_get_connections(codec, 0xF,
-                               conn, STAC92HD83_DAC_COUNT + 1) - 1;
-       /* skip non-DAC connections */
-       while (num_dacs >= 0 &&
-                       (get_wcaps_type(get_wcaps(codec, conn[num_dacs]))
-                                       != AC_WID_AUD_OUT))
-               num_dacs--;
-       /* set port E and F to select the last DAC */
-       if (num_dacs >= 0) {
-               snd_hda_codec_write_cache(codec, 0xE, 0,
-                       AC_VERB_SET_CONNECT_SEL, num_dacs);
-               snd_hda_codec_write_cache(codec, 0xF, 0,
-                       AC_VERB_SET_CONNECT_SEL, num_dacs);
-       }
-
        codec->proc_widget_hook = stac92hd_proc_hook;
 
        return 0;
@@ -6405,6 +6375,18 @@ static const struct hda_codec_preset snd_hda_preset_sigmatel[] = {
        { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx},
        { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx},
        { .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx},
+       { .id = 0x111d76e8, .name = "92HD66B1X5", .patch = patch_stac92hd83xxx},
+       { .id = 0x111d76e9, .name = "92HD66B2X5", .patch = patch_stac92hd83xxx},
+       { .id = 0x111d76ea, .name = "92HD66B3X5", .patch = patch_stac92hd83xxx},
+       { .id = 0x111d76eb, .name = "92HD66C1X5", .patch = patch_stac92hd83xxx},
+       { .id = 0x111d76ec, .name = "92HD66C2X5", .patch = patch_stac92hd83xxx},
+       { .id = 0x111d76ed, .name = "92HD66C3X5", .patch = patch_stac92hd83xxx},
+       { .id = 0x111d76ee, .name = "92HD66B1X3", .patch = patch_stac92hd83xxx},
+       { .id = 0x111d76ef, .name = "92HD66B2X3", .patch = patch_stac92hd83xxx},
+       { .id = 0x111d76f0, .name = "92HD66B3X3", .patch = patch_stac92hd83xxx},
+       { .id = 0x111d76f1, .name = "92HD66C1X3", .patch = patch_stac92hd83xxx},
+       { .id = 0x111d76f2, .name = "92HD66C2X3", .patch = patch_stac92hd83xxx},
+       { .id = 0x111d76f3, .name = "92HD66C3/65", .patch = patch_stac92hd83xxx},
        {} /* terminator */
 };
 
index e328cfb7620c77bd80d1c3788c373f6ababfc520..e525da2673be3c5a4b12de44eb7c81e6ccadfcdd 100644 (file)
@@ -68,8 +68,11 @@ static int __devinit snd_vt1724_amp_init(struct snd_ice1712 *ice)
 
 static int __devinit snd_vt1724_amp_add_controls(struct snd_ice1712 *ice)
 {
-       /* we use pins 39 and 41 of the VT1616 for left and right read outputs */
-       snd_ac97_write_cache(ice->ac97, 0x5a, snd_ac97_read(ice->ac97, 0x5a) & ~0x8000);
+       if (ice->ac97)
+               /* we use pins 39 and 41 of the VT1616 for left and right
+               read outputs */
+               snd_ac97_write_cache(ice->ac97, 0x5a,
+                       snd_ac97_read(ice->ac97, 0x5a) & ~0x8000);
        return 0;
 }
 
index 6c896dbfd796b39a65596783f036f245078f45c4..2e799a9a494d78b32d49c05d7bdd281f7bd8d334 100644 (file)
@@ -2074,6 +2074,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
                .name = "MSI P4 ATX 645 Ultra",
                .type = AC97_TUNE_HP_ONLY
        },
+       {
+               .subvendor = 0x161f,
+               .subdevice = 0x202f,
+               .name = "Gateway M520",
+               .type = AC97_TUNE_INV_EAPD
+       },
        {
                .subvendor = 0x161f,
                .subdevice = 0x203a,
index 617f98b0cbae47ce9c76265f50590f88a83c187d..713f7986e626700256a406155e3bcbde7a018fda 100644 (file)
@@ -80,8 +80,12 @@ unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port)
 
 void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len)
 {
-       void __iomem *address = lx_dsp_register(chip, port);
-       memcpy_fromio(data, address, len*sizeof(u32));
+       u32 __iomem *address = lx_dsp_register(chip, port);
+       int i;
+
+       /* we cannot use memcpy_fromio */
+       for (i = 0; i != len; ++i)
+               data[i] = ioread32(address + i);
 }
 
 
@@ -94,8 +98,12 @@ void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data)
 void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data,
                         u32 len)
 {
-       void __iomem *address = lx_dsp_register(chip, port);
-       memcpy_toio(address, data, len*sizeof(u32));
+       u32 __iomem *address = lx_dsp_register(chip, port);
+       int i;
+
+       /* we cannot use memcpy_to */
+       for (i = 0; i != len; ++i)
+               iowrite32(data[i], address + i);
 }
 
 
index 42d1ab136217dd1e001d7a1d9621d26cd741285c..915546a79545fff25d5591ebc462768abaadb8ee 100644 (file)
@@ -177,6 +177,7 @@ static void wm8776_registers_init(struct oxygen *chip)
        struct xonar_wm87x6 *data = chip->model_data;
 
        wm8776_write(chip, WM8776_RESET, 0);
+       wm8776_write(chip, WM8776_PHASESWAP, WM8776_PH_MASK);
        wm8776_write(chip, WM8776_DACCTRL1, WM8776_DZCEN |
                     WM8776_PL_LEFT_LEFT | WM8776_PL_RIGHT_RIGHT);
        wm8776_write(chip, WM8776_DACMUTE, chip->dac_mute ? WM8776_DMUTE : 0);
index 2b5c7a95ae1fa36cd18366f6c93edcb941ea775d..5fe840b3666ddf2966e23073f7b8ad4ffa219fed 100644 (file)
@@ -41,6 +41,7 @@ MODULE_SUPPORTED_DEVICE("{{SiS,SiS7019 Audio Accelerator}}");
 static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
 static char *id = SNDRV_DEFAULT_STR1;  /* ID for this card */
 static int enable = 1;
+static int codecs = 1;
 
 module_param(index, int, 0444);
 MODULE_PARM_DESC(index, "Index value for SiS7019 Audio Accelerator.");
@@ -48,6 +49,8 @@ module_param(id, charp, 0444);
 MODULE_PARM_DESC(id, "ID string for SiS7019 Audio Accelerator.");
 module_param(enable, bool, 0444);
 MODULE_PARM_DESC(enable, "Enable SiS7019 Audio Accelerator.");
+module_param(codecs, int, 0444);
+MODULE_PARM_DESC(codecs, "Set bit to indicate that codec number is expected to be present (default 1)");
 
 static DEFINE_PCI_DEVICE_TABLE(snd_sis7019_ids) = {
        { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x7019) },
@@ -140,6 +143,9 @@ struct sis7019 {
        dma_addr_t silence_dma_addr;
 };
 
+/* These values are also used by the module param 'codecs' to indicate
+ * which codecs should be present.
+ */
 #define SIS_PRIMARY_CODEC_PRESENT      0x0001
 #define SIS_SECONDARY_CODEC_PRESENT    0x0002
 #define SIS_TERTIARY_CODEC_PRESENT     0x0004
@@ -1078,6 +1084,7 @@ static int sis_chip_init(struct sis7019 *sis)
 {
        unsigned long io = sis->ioport;
        void __iomem *ioaddr = sis->ioaddr;
+       unsigned long timeout;
        u16 status;
        int count;
        int i;
@@ -1104,21 +1111,45 @@ static int sis_chip_init(struct sis7019 *sis)
        while ((inw(io + SIS_AC97_STATUS) & SIS_AC97_STATUS_BUSY) && --count)
                udelay(1);
 
+       /* Command complete, we can let go of the semaphore now.
+        */
+       outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
+       if (!count)
+               return -EIO;
+
        /* Now that we've finished the reset, find out what's attached.
+        * There are some codec/board combinations that take an extremely
+        * long time to come up. 350+ ms has been observed in the field,
+        * so we'll give them up to 500ms.
         */
-       status = inl(io + SIS_AC97_STATUS);
-       if (status & SIS_AC97_STATUS_CODEC_READY)
-               sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
-       if (status & SIS_AC97_STATUS_CODEC2_READY)
-               sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
-       if (status & SIS_AC97_STATUS_CODEC3_READY)
-               sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
-
-       /* All done, let go of the semaphore, and check for errors
+       sis->codecs_present = 0;
+       timeout = msecs_to_jiffies(500) + jiffies;
+       while (time_before_eq(jiffies, timeout)) {
+               status = inl(io + SIS_AC97_STATUS);
+               if (status & SIS_AC97_STATUS_CODEC_READY)
+                       sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
+               if (status & SIS_AC97_STATUS_CODEC2_READY)
+                       sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
+               if (status & SIS_AC97_STATUS_CODEC3_READY)
+                       sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
+
+               if (sis->codecs_present == codecs)
+                       break;
+
+               msleep(1);
+       }
+
+       /* All done, check for errors.
         */
-       outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
-       if (!sis->codecs_present || !count)
+       if (!sis->codecs_present) {
+               printk(KERN_ERR "sis7019: could not find any codecs\n");
                return -EIO;
+       }
+
+       if (sis->codecs_present != codecs) {
+               printk(KERN_WARNING "sis7019: missing codecs, found %0x, expected %0x\n",
+                      sis->codecs_present, codecs);
+       }
 
        /* Let the hardware know that the audio driver is alive,
         * and enable PCM slots on the AC-link for L/R playback (3 & 4) and
@@ -1390,6 +1421,17 @@ static int __devinit snd_sis7019_probe(struct pci_dev *pci,
        if (!enable)
                goto error_out;
 
+       /* The user can specify which codecs should be present so that we
+        * can wait for them to show up if they are slow to recover from
+        * the AC97 cold reset. We default to a single codec, the primary.
+        *
+        * We assume that SIS_PRIMARY_*_PRESENT matches bits 0-2.
+        */
+       codecs &= SIS_PRIMARY_CODEC_PRESENT | SIS_SECONDARY_CODEC_PRESENT |
+                 SIS_TERTIARY_CODEC_PRESENT;
+       if (!codecs)
+               codecs = SIS_PRIMARY_CODEC_PRESENT;
+
        rc = snd_card_create(index, id, THIS_MODULE, sizeof(*sis), &card);
        if (rc < 0)
                goto error_out;
index e1a214ee757ffaa2621572ebb084fb0033bd621c..65abd09e1ca55b372f115b792ec624268ab12773 100644 (file)
@@ -40,11 +40,11 @@ struct ak4535_priv {
 /*
  * ak4535 register cache
  */
-static const u16 ak4535_reg[AK4535_CACHEREGNUM] = {
-    0x0000, 0x0080, 0x0000, 0x0003,
-    0x0002, 0x0000, 0x0011, 0x0001,
-    0x0000, 0x0040, 0x0036, 0x0010,
-    0x0000, 0x0000, 0x0057, 0x0000,
+static const u8 ak4535_reg[AK4535_CACHEREGNUM] = {
+       0x00, 0x80, 0x00, 0x03,
+       0x02, 0x00, 0x11, 0x01,
+       0x00, 0x40, 0x36, 0x10,
+       0x00, 0x00, 0x57, 0x00,
 };
 
 /*
index 65f46047b1cbd21d0d9441966ae6ceb6c5c6120e..7d45197f5cfd5a8f43a321698d7f12eb4b2ca754 100644 (file)
  * min : 0xFE : -115.0 dB
  * mute: 0xFF
  */
-static const DECLARE_TLV_DB_SCALE(out_tlv, -11500, 50, 1);
+static const DECLARE_TLV_DB_SCALE(out_tlv, -11550, 50, 1);
 
 static const struct snd_kcontrol_new ak4642_snd_controls[] = {
 
@@ -162,17 +162,17 @@ struct ak4642_priv {
 /*
  * ak4642 register cache
  */
-static const u16 ak4642_reg[AK4642_CACHEREGNUM] = {
-       0x0000, 0x0000, 0x0001, 0x0000,
-       0x0002, 0x0000, 0x0000, 0x0000,
-       0x00e1, 0x00e1, 0x0018, 0x0000,
-       0x00e1, 0x0018, 0x0011, 0x0008,
-       0x0000, 0x0000, 0x0000, 0x0000,
-       0x0000, 0x0000, 0x0000, 0x0000,
-       0x0000, 0x0000, 0x0000, 0x0000,
-       0x0000, 0x0000, 0x0000, 0x0000,
-       0x0000, 0x0000, 0x0000, 0x0000,
-       0x0000,
+static const u8 ak4642_reg[AK4642_CACHEREGNUM] = {
+       0x00, 0x00, 0x01, 0x00,
+       0x02, 0x00, 0x00, 0x00,
+       0xe1, 0xe1, 0x18, 0x00,
+       0xe1, 0x18, 0x11, 0x08,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00,
+       0x00,
 };
 
 /*
index a537e4af6ae74efe7950d6a1806ee3942118a834..1dae5c4d99315159ba808704023ffdb38366795c 100644 (file)
@@ -150,7 +150,7 @@ static int wm8711_hw_params(struct snd_pcm_substream *substream,
 {
        struct snd_soc_codec *codec = dai->codec;
        struct wm8711_priv *wm8711 =  snd_soc_codec_get_drvdata(codec);
-       u16 iface = snd_soc_read(codec, WM8711_IFACE) & 0xfffc;
+       u16 iface = snd_soc_read(codec, WM8711_IFACE) & 0xfff3;
        int i = get_coeff(wm8711->sysclk, params_rate(params));
        u16 srate = (coeff_div[i].sr << 2) |
                (coeff_div[i].bosr << 1) | coeff_div[i].usb;
@@ -231,7 +231,7 @@ static int wm8711_set_dai_fmt(struct snd_soc_dai *codec_dai,
                unsigned int fmt)
 {
        struct snd_soc_codec *codec = codec_dai->codec;
-       u16 iface = 0;
+       u16 iface = snd_soc_read(codec, WM8711_IFACE) & 0x000c;
 
        /* set master/slave audio interface */
        switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
index 76b4361e9b8042c112a32b14b290304e17761604..f5a0ec4ade5256d3f7bec64d4eb0ac1b22fa7ca0 100644 (file)
@@ -463,6 +463,7 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
                snd_soc_write(codec, WM8731_PWR, 0xffff);
                regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies),
                                       wm8731->supplies);
+               codec->cache_sync = 1;
                break;
        }
        codec->dapm.bias_level = level;
index 25af901fe8133029e7efe35914c07e02105e4085..c173aee339090f0aa7b156e65f44275fd4103674 100644 (file)
@@ -337,10 +337,10 @@ static int wm8741_set_dai_fmt(struct snd_soc_dai *codec_dai,
                iface |= 0x0004;
                break;
        case SND_SOC_DAIFMT_DSP_A:
-               iface |= 0x0003;
+               iface |= 0x000C;
                break;
        case SND_SOC_DAIFMT_DSP_B:
-               iface |= 0x0013;
+               iface |= 0x001C;
                break;
        default:
                return -EINVAL;
index aa091a0d81873b9f09ceeb30bf7dcd46f6d05cd1..66d18a3e57f6f6b7dc07f6e469314131fffb2af1 100644 (file)
@@ -189,6 +189,9 @@ static int wm8753_set_dai(struct snd_kcontrol *kcontrol,
        struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
        u16 ioctl;
 
+       if (wm8753->dai_func == ucontrol->value.integer.value[0])
+               return 0;
+
        if (codec->active)
                return -EBUSY;
 
index 9b3bba4df5b363fdeec2cb23a69af3fe38594477..0fce199a727ef826a2646fe095478a53293cf06f 100644 (file)
@@ -868,7 +868,7 @@ SOC_ENUM("Right Capture Mode", rin_mode),
 SOC_DOUBLE_R("Capture Volume", WM8904_ANALOGUE_LEFT_INPUT_0,
             WM8904_ANALOGUE_RIGHT_INPUT_0, 0, 31, 0),
 SOC_DOUBLE_R("Capture Switch", WM8904_ANALOGUE_LEFT_INPUT_0,
-            WM8904_ANALOGUE_RIGHT_INPUT_0, 7, 1, 0),
+            WM8904_ANALOGUE_RIGHT_INPUT_0, 7, 1, 1),
 
 SOC_SINGLE("High Pass Filter Switch", WM8904_ADC_DIGITAL_0, 4, 1, 0),
 SOC_ENUM("High Pass Filter Mode", hpf_mode),
index 25580e3ee7c4d18f5941edce1ace022774b6e3c5..d4ecb3f2d8d8579ecaae60830d545ac17f63fdf2 100644 (file)
@@ -472,6 +472,8 @@ static int wm8940_set_bias_level(struct snd_soc_codec *codec,
                break;
        }
 
+       codec->dapm.bias_level = level;
+
        return ret;
 }
 
index 5e05eed96c381c9730b31d65ea1b786b459ec41e..c850e3d84ed00b300f67c9b17d1006ed0903d9e2 100644 (file)
@@ -1957,7 +1957,13 @@ static int wm8962_readable_register(struct snd_soc_codec *codec, unsigned int re
 
 static int wm8962_reset(struct snd_soc_codec *codec)
 {
-       return snd_soc_write(codec, WM8962_SOFTWARE_RESET, 0x6243);
+       int ret;
+
+       ret = snd_soc_write(codec, WM8962_SOFTWARE_RESET, 0x6243);
+       if (ret != 0)
+               return ret;
+
+       return snd_soc_write(codec, WM8962_PLL_SOFTWARE_RESET, 0);
 }
 
 static const DECLARE_TLV_DB_SCALE(inpga_tlv, -2325, 75, 0);
@@ -2018,7 +2024,6 @@ static int wm8962_put_spk_sw(struct snd_kcontrol *kcontrol,
                            struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-       u16 *reg_cache = codec->reg_cache;
        int ret;
 
        /* Apply the update (if any) */
@@ -2027,16 +2032,19 @@ static int wm8962_put_spk_sw(struct snd_kcontrol *kcontrol,
                return 0;
 
        /* If the left PGA is enabled hit that VU bit... */
-       if (reg_cache[WM8962_PWR_MGMT_2] & WM8962_SPKOUTL_PGA_ENA)
-               return snd_soc_write(codec, WM8962_SPKOUTL_VOLUME,
-                                    reg_cache[WM8962_SPKOUTL_VOLUME]);
+       ret = snd_soc_read(codec, WM8962_PWR_MGMT_2);
+       if (ret & WM8962_SPKOUTL_PGA_ENA) {
+               snd_soc_write(codec, WM8962_SPKOUTL_VOLUME,
+                             snd_soc_read(codec, WM8962_SPKOUTL_VOLUME));
+               return 1;
+       }
 
        /* ...otherwise the right.  The VU is stereo. */
-       if (reg_cache[WM8962_PWR_MGMT_2] & WM8962_SPKOUTR_PGA_ENA)
-               return snd_soc_write(codec, WM8962_SPKOUTR_VOLUME,
-                                    reg_cache[WM8962_SPKOUTR_VOLUME]);
+       if (ret & WM8962_SPKOUTR_PGA_ENA)
+               snd_soc_write(codec, WM8962_SPKOUTR_VOLUME,
+                             snd_soc_read(codec, WM8962_SPKOUTR_VOLUME));
 
-       return 0;
+       return 1;
 }
 
 static const char *cap_hpf_mode_text[] = {
@@ -2336,7 +2344,6 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
                         struct snd_kcontrol *kcontrol, int event)
 {
        struct snd_soc_codec *codec = w->codec;
-       u16 *reg_cache = codec->reg_cache;
        int reg;
 
        switch (w->shift) {
@@ -2359,14 +2366,14 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
 
        switch (event) {
        case SND_SOC_DAPM_POST_PMU:
-               return snd_soc_write(codec, reg, reg_cache[reg]);
+               return snd_soc_write(codec, reg, snd_soc_read(codec, reg));
        default:
                BUG();
                return -EINVAL;
        }
 }
 
-static const char *st_text[] = { "None", "Right", "Left" };
+static const char *st_text[] = { "None", "Left", "Right" };
 
 static const struct soc_enum str_enum =
        SOC_ENUM_SINGLE(WM8962_DAC_DSP_MIXING_1, 2, 3, st_text);
@@ -2968,13 +2975,13 @@ static int wm8962_hw_params(struct snd_pcm_substream *substream,
        case SNDRV_PCM_FORMAT_S16_LE:
                break;
        case SNDRV_PCM_FORMAT_S20_3LE:
-               aif0 |= 0x40;
+               aif0 |= 0x4;
                break;
        case SNDRV_PCM_FORMAT_S24_LE:
-               aif0 |= 0x80;
+               aif0 |= 0x8;
                break;
        case SNDRV_PCM_FORMAT_S32_LE:
-               aif0 |= 0xc0;
+               aif0 |= 0xc;
                break;
        default:
                return -EINVAL;
@@ -3027,9 +3034,9 @@ static int wm8962_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        int aif0 = 0;
 
        switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
-       case SND_SOC_DAIFMT_DSP_A:
-               aif0 |= WM8962_LRCLK_INV;
        case SND_SOC_DAIFMT_DSP_B:
+               aif0 |= WM8962_LRCLK_INV | 3;
+       case SND_SOC_DAIFMT_DSP_A:
                aif0 |= 3;
 
                switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
@@ -3822,6 +3829,11 @@ static int wm8962_probe(struct snd_soc_codec *codec)
         */
        snd_soc_update_bits(codec, WM8962_CLOCKING2, WM8962_SYSCLK_ENA, 0);
 
+       /* Ensure that the oscillator and PLLs are disabled */
+       snd_soc_update_bits(codec, WM8962_PLL2,
+                           WM8962_OSC_ENA | WM8962_PLL2_ENA | WM8962_PLL3_ENA,
+                           0);
+
        regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies);
 
        if (pdata) {
index 5121b4341c313a353efd23ac7e3fec40ea5cf9dc..0c877ffeddccd3dbe4f2e93b8d11bb859405f465 100755 (executable)
@@ -87,7 +87,7 @@ static int wm8994_retune_mobile_base[] = {
 static int wm8994_readable(struct snd_soc_codec *codec, unsigned int reg)
 {
        struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
-       struct wm8994 *control = wm8994->control_data;
+       struct wm8994 *control = codec->control_data;
 
        switch (reg) {
        case WM8994_GPIO_1:
@@ -1402,7 +1402,7 @@ SND_SOC_DAPM_MUX("AIF2DAC Mux", SND_SOC_NOPM, 0, 0, &aif2dac_mux),
 SND_SOC_DAPM_MUX("AIF2ADC Mux", SND_SOC_NOPM, 0, 0, &aif2adc_mux),
 
 SND_SOC_DAPM_AIF_IN("AIF3DACDAT", "AIF3 Playback", 0, SND_SOC_NOPM, 0, 0),
-SND_SOC_DAPM_AIF_IN("AIF3ADCDAT", "AIF3 Capture", 0, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_AIF_OUT("AIF3ADCDAT", "AIF3 Capture", 0, SND_SOC_NOPM, 0, 0),
 
 SND_SOC_DAPM_SUPPLY("TOCLK", WM8994_CLOCKING_1, 4, 0, NULL, 0),
 
index e57d1d5c1dbc890e1d204ea22f32839cfd854f19..7fdb014ec2a575c1b2c6d1aa833548c44f13f74c 100755 (executable)
@@ -565,14 +565,14 @@ SOC_DAPM_SINGLE("Left Output Switch", WM8993_LINE_MIXER1, 0, 1, 0),
 };
 
 static const struct snd_kcontrol_new line2_mix[] = {
-SOC_DAPM_SINGLE("IN2R Switch", WM8993_LINE_MIXER2, 2, 1, 0),
-SOC_DAPM_SINGLE("IN2L Switch", WM8993_LINE_MIXER2, 1, 1, 0),
+SOC_DAPM_SINGLE("IN1L Switch", WM8993_LINE_MIXER2, 2, 1, 0),
+SOC_DAPM_SINGLE("IN1R Switch", WM8993_LINE_MIXER2, 1, 1, 0),
 SOC_DAPM_SINGLE("Output Switch", WM8993_LINE_MIXER2, 0, 1, 0),
 };
 
 static const struct snd_kcontrol_new line2n_mix[] = {
-SOC_DAPM_SINGLE("Left Output Switch", WM8993_LINE_MIXER2, 6, 1, 0),
-SOC_DAPM_SINGLE("Right Output Switch", WM8993_LINE_MIXER2, 5, 1, 0),
+SOC_DAPM_SINGLE("Left Output Switch", WM8993_LINE_MIXER2, 5, 1, 0),
+SOC_DAPM_SINGLE("Right Output Switch", WM8993_LINE_MIXER2, 6, 1, 0),
 };
 
 static const struct snd_kcontrol_new line2p_mix[] = {
@@ -592,6 +592,8 @@ SND_SOC_DAPM_INPUT("IN2RP:VXRP"),
 SND_SOC_DAPM_MICBIAS("MICBIAS2", WM8993_POWER_MANAGEMENT_1, 5, 0),
 SND_SOC_DAPM_MICBIAS("MICBIAS1", WM8993_POWER_MANAGEMENT_1, 4, 0),
 
+SND_SOC_DAPM_SUPPLY("LINEOUT_VMID_BUF", WM8993_ANTIPOP1, 7, 0, NULL, 0),
+
 SND_SOC_DAPM_MIXER("IN1L PGA", WM8993_POWER_MANAGEMENT_2, 6, 0,
                   in1l_pga, ARRAY_SIZE(in1l_pga)),
 SND_SOC_DAPM_MIXER("IN1R PGA", WM8993_POWER_MANAGEMENT_2, 4, 0,
@@ -804,9 +806,11 @@ static const struct snd_soc_dapm_route lineout1_diff_routes[] = {
 };
 
 static const struct snd_soc_dapm_route lineout1_se_routes[] = {
+       { "LINEOUT1N Mixer", NULL, "LINEOUT_VMID_BUF" },
        { "LINEOUT1N Mixer", "Left Output Switch", "Left Output PGA" },
        { "LINEOUT1N Mixer", "Right Output Switch", "Right Output PGA" },
 
+       { "LINEOUT1P Mixer", NULL, "LINEOUT_VMID_BUF" },
        { "LINEOUT1P Mixer", "Left Output Switch", "Left Output PGA" },
 
        { "LINEOUT1N Driver", NULL, "LINEOUT1N Mixer" },
@@ -814,8 +818,8 @@ static const struct snd_soc_dapm_route lineout1_se_routes[] = {
 };
 
 static const struct snd_soc_dapm_route lineout2_diff_routes[] = {
-       { "LINEOUT2 Mixer", "IN2L Switch", "IN2L PGA" },
-       { "LINEOUT2 Mixer", "IN2R Switch", "IN2R PGA" },
+       { "LINEOUT2 Mixer", "IN1L Switch", "IN1L PGA" },
+       { "LINEOUT2 Mixer", "IN1R Switch", "IN1R PGA" },
        { "LINEOUT2 Mixer", "Output Switch", "Right Output PGA" },
 
        { "LINEOUT2N Driver", NULL, "LINEOUT2 Mixer" },
@@ -823,9 +827,11 @@ static const struct snd_soc_dapm_route lineout2_diff_routes[] = {
 };
 
 static const struct snd_soc_dapm_route lineout2_se_routes[] = {
+       { "LINEOUT2N Mixer", NULL, "LINEOUT_VMID_BUF" },
        { "LINEOUT2N Mixer", "Left Output Switch", "Left Output PGA" },
        { "LINEOUT2N Mixer", "Right Output Switch", "Right Output PGA" },
 
+       { "LINEOUT2P Mixer", NULL, "LINEOUT_VMID_BUF" },
        { "LINEOUT2P Mixer", "Right Output Switch", "Right Output PGA" },
 
        { "LINEOUT2N Driver", NULL, "LINEOUT2N Mixer" },
index 313e0ccedd5b6ad64d270367a4e40ac78f80adf8..bd811a04f42aab2ce23be8ddc578667a2e1a20d5 100644 (file)
@@ -698,6 +698,7 @@ static int __devinit fsl_ssi_probe(struct platform_device *pdev)
 
        /* Initialize the the device_attribute structure */
        dev_attr = &ssi_private->dev_attr;
+       sysfs_attr_init(&dev_attr->attr);
        dev_attr->attr.name = "statistics";
        dev_attr->attr.mode = S_IRUGO;
        dev_attr->show = fsl_sysfs_ssi_show;
index 61fceb09cdb5bbbf426270696bbf48b058f4d658..3b56254006aa8eb1159da328d8aac05a29207847 100644 (file)
@@ -112,7 +112,7 @@ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
                break;
        case SND_SOC_DAIFMT_DSP_A:
                /* data on rising edge of bclk, frame high 1clk before data */
-               strcr |= SSI_STCR_TFSL | SSI_STCR_TEFS;
+               strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0 | SSI_STCR_TEFS;
                break;
        }
 
index 8ad93ee2e92bfe312157b46eabbe251b9db35f9d..b583e604a1eac1562d258a9e4beda39e83edc38b 100644 (file)
@@ -668,6 +668,38 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
        return 0;
 }
 
+static void pxa_ssp_set_running_bit(struct snd_pcm_substream *substream,
+                                   struct ssp_device *ssp, int value)
+{
+       uint32_t sscr0 = pxa_ssp_read_reg(ssp, SSCR0);
+       uint32_t sscr1 = pxa_ssp_read_reg(ssp, SSCR1);
+       uint32_t sspsp = pxa_ssp_read_reg(ssp, SSPSP);
+       uint32_t sssr = pxa_ssp_read_reg(ssp, SSSR);
+
+       if (value && (sscr0 & SSCR0_SSE))
+               pxa_ssp_write_reg(ssp, SSCR0, sscr0 & ~SSCR0_SSE);
+
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+               if (value)
+                       sscr1 |= SSCR1_TSRE;
+               else
+                       sscr1 &= ~SSCR1_TSRE;
+       } else {
+               if (value)
+                       sscr1 |= SSCR1_RSRE;
+               else
+                       sscr1 &= ~SSCR1_RSRE;
+       }
+
+       pxa_ssp_write_reg(ssp, SSCR1, sscr1);
+
+       if (value) {
+               pxa_ssp_write_reg(ssp, SSSR, sssr);
+               pxa_ssp_write_reg(ssp, SSPSP, sspsp);
+               pxa_ssp_write_reg(ssp, SSCR0, sscr0 | SSCR0_SSE);
+       }
+}
+
 static int pxa_ssp_trigger(struct snd_pcm_substream *substream, int cmd,
                           struct snd_soc_dai *cpu_dai)
 {
@@ -681,42 +713,21 @@ static int pxa_ssp_trigger(struct snd_pcm_substream *substream, int cmd,
                pxa_ssp_enable(ssp);
                break;
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-               val = pxa_ssp_read_reg(ssp, SSCR1);
-               if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-                       val |= SSCR1_TSRE;
-               else
-                       val |= SSCR1_RSRE;
-               pxa_ssp_write_reg(ssp, SSCR1, val);
+               pxa_ssp_set_running_bit(substream, ssp, 1);
                val = pxa_ssp_read_reg(ssp, SSSR);
                pxa_ssp_write_reg(ssp, SSSR, val);
                break;
        case SNDRV_PCM_TRIGGER_START:
-               val = pxa_ssp_read_reg(ssp, SSCR1);
-               if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-                       val |= SSCR1_TSRE;
-               else
-                       val |= SSCR1_RSRE;
-               pxa_ssp_write_reg(ssp, SSCR1, val);
-               pxa_ssp_enable(ssp);
+               pxa_ssp_set_running_bit(substream, ssp, 1);
                break;
        case SNDRV_PCM_TRIGGER_STOP:
-               val = pxa_ssp_read_reg(ssp, SSCR1);
-               if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-                       val &= ~SSCR1_TSRE;
-               else
-                       val &= ~SSCR1_RSRE;
-               pxa_ssp_write_reg(ssp, SSCR1, val);
+               pxa_ssp_set_running_bit(substream, ssp, 0);
                break;
        case SNDRV_PCM_TRIGGER_SUSPEND:
                pxa_ssp_disable(ssp);
                break;
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-               val = pxa_ssp_read_reg(ssp, SSCR1);
-               if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-                       val &= ~SSCR1_TSRE;
-               else
-                       val &= ~SSCR1_RSRE;
-               pxa_ssp_write_reg(ssp, SSCR1, val);
+               pxa_ssp_set_running_bit(substream, ssp, 0);
                break;
 
        default:
index 16152ed086488fcc7ab15061b7a831adee42b741..c1290da71c627039c5059357faf5eec22f1699d6 100644 (file)
@@ -425,7 +425,7 @@ static struct snd_soc_dai_link neo1973_dai[] = {
        .platform_name = "samsung-audio",
        .cpu_dai_name = "s3c24xx-iis",
        .codec_dai_name = "wm8753-hifi",
-       .codec_name = "wm8753-codec.0-001a",
+       .codec_name = "wm8753.0-001a",
        .init = neo1973_wm8753_init,
        .ops = &neo1973_hifi_ops,
 },
@@ -434,7 +434,7 @@ static struct snd_soc_dai_link neo1973_dai[] = {
        .stream_name = "Voice",
        .cpu_dai_name = "dfbmcs320-pcm",
        .codec_dai_name = "wm8753-voice",
-       .codec_name = "wm8753-codec.0-001a",
+       .codec_name = "wm8753.0-001a",
        .ops = &neo1973_voice_ops,
 },
 };
index 668897314ba54348fd8e377c6e49c770d39448b4..e20e8cc645aa39e55fae174c443b5169b883d141 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/bitops.h>
 #include <linux/debugfs.h>
 #include <linux/platform_device.h>
+#include <linux/ctype.h>
 #include <linux/slab.h>
 #include <sound/ac97_codec.h>
 #include <sound/core.h>
@@ -1961,9 +1962,20 @@ static void snd_soc_instantiate_card(struct snd_soc_card *card)
                 "%s", card->name);
        snprintf(card->snd_card->longname, sizeof(card->snd_card->longname),
                 "%s", card->long_name ? card->long_name : card->name);
-       if (card->driver_name)
-               strlcpy(card->snd_card->driver, card->driver_name,
-                       sizeof(card->snd_card->driver));
+       snprintf(card->snd_card->driver, sizeof(card->snd_card->driver),
+                "%s", card->driver_name ? card->driver_name : card->name);
+       for (i = 0; i < ARRAY_SIZE(card->snd_card->driver); i++) {
+               switch (card->snd_card->driver[i]) {
+               case '_':
+               case '-':
+               case '\0':
+                       break;
+               default:
+                       if (!isalnum(card->snd_card->driver[i]))
+                               card->snd_card->driver[i] = '_';
+                       break;
+               }
+       }
 
        if (card->late_probe) {
                ret = card->late_probe(card);
index 32ab7fc4579ac18ee7155aa4891e709c76173bf1..d5ec2060d1211351532cbd5bbadd54db40cfc28e 100644 (file)
@@ -67,6 +67,7 @@ static int dapm_up_seq[] = {
        [snd_soc_dapm_out_drv] = 10,
        [snd_soc_dapm_hp] = 10,
        [snd_soc_dapm_spk] = 10,
+       [snd_soc_dapm_line] = 10,
        [snd_soc_dapm_post] = 11,
 };
 
@@ -75,6 +76,7 @@ static int dapm_down_seq[] = {
        [snd_soc_dapm_adc] = 1,
        [snd_soc_dapm_hp] = 2,
        [snd_soc_dapm_spk] = 2,
+       [snd_soc_dapm_line] = 2,
        [snd_soc_dapm_out_drv] = 2,
        [snd_soc_dapm_pga] = 4,
        [snd_soc_dapm_mixer_named_ctl] = 5,
@@ -2615,9 +2617,13 @@ static void soc_dapm_shutdown_codec(struct snd_soc_dapm_context *dapm)
         * standby.
         */
        if (powerdown) {
-               snd_soc_dapm_set_bias_level(dapm, SND_SOC_BIAS_PREPARE);
+               if (dapm->bias_level == SND_SOC_BIAS_ON)
+                       snd_soc_dapm_set_bias_level(dapm,
+                                                   SND_SOC_BIAS_PREPARE);
                dapm_seq_run(dapm, &down_list, 0, false);
-               snd_soc_dapm_set_bias_level(dapm, SND_SOC_BIAS_STANDBY);
+               if (dapm->bias_level == SND_SOC_BIAS_PREPARE)
+                       snd_soc_dapm_set_bias_level(dapm,
+                                                   SND_SOC_BIAS_STANDBY);
        }
 }
 
@@ -2630,7 +2636,9 @@ void snd_soc_dapm_shutdown(struct snd_soc_card *card)
 
        list_for_each_entry(codec, &card->codec_dev_list, list) {
                soc_dapm_shutdown_codec(&codec->dapm);
-               snd_soc_dapm_set_bias_level(&codec->dapm, SND_SOC_BIAS_OFF);
+               if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY)
+                       snd_soc_dapm_set_bias_level(&codec->dapm,
+                                                   SND_SOC_BIAS_OFF);
        }
 }
 
index ec921ec99c2621eaa58b7d0c5e4eb97b861971de..cd987de341f53e72d8933798849de2eea1b95e80 100644 (file)
@@ -57,7 +57,36 @@ int snd_soc_params_to_bclk(struct snd_pcm_hw_params *params)
 }
 EXPORT_SYMBOL_GPL(snd_soc_params_to_bclk);
 
-static struct snd_soc_platform_driver dummy_platform;
+static const struct snd_pcm_hardware dummy_dma_hardware = {
+       .formats                = 0xffffffff,
+       .channels_min           = 1,
+       .channels_max           = UINT_MAX,
+
+       /* Random values to keep userspace happy when checking constraints */
+       .info                   = SNDRV_PCM_INFO_INTERLEAVED |
+                                 SNDRV_PCM_INFO_BLOCK_TRANSFER,
+       .buffer_bytes_max       = 128*1024,
+       .period_bytes_min       = PAGE_SIZE,
+       .period_bytes_max       = PAGE_SIZE*2,
+       .periods_min            = 2,
+       .periods_max            = 128,
+};
+
+static int dummy_dma_open(struct snd_pcm_substream *substream)
+{
+       snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
+
+       return 0;
+}
+
+static struct snd_pcm_ops dummy_dma_ops = {
+       .open           = dummy_dma_open,
+       .ioctl          = snd_pcm_lib_ioctl,
+};
+
+static struct snd_soc_platform_driver dummy_platform = {
+       .ops = &dummy_dma_ops,
+};
 
 static __devinit int snd_soc_dummy_probe(struct platform_device *pdev)
 {
index fb5d68fa7ff46fdf17e189fc740edb5ddb77a9ea..96c381e16bc72c5d5067046184b70da98476b027 100644 (file)
@@ -459,7 +459,8 @@ static void kill_stream_urbs(struct ua101_stream *stream)
        unsigned int i;
 
        for (i = 0; i < stream->queue_length; ++i)
-               usb_kill_urb(&stream->urbs[i]->urb);
+               if (stream->urbs[i])
+                       usb_kill_urb(&stream->urbs[i]->urb);
 }
 
 static int enable_iso_interface(struct ua101 *ua, unsigned int intf_index)
@@ -484,6 +485,9 @@ static void disable_iso_interface(struct ua101 *ua, unsigned int intf_index)
 {
        struct usb_host_interface *alts;
 
+       if (!ua->intf[intf_index])
+               return;
+
        alts = ua->intf[intf_index]->cur_altsetting;
        if (alts->desc.bAlternateSetting != 0) {
                int err = usb_set_interface(ua->dev,
@@ -1144,27 +1148,37 @@ static void free_stream_urbs(struct ua101_stream *stream)
 {
        unsigned int i;
 
-       for (i = 0; i < stream->queue_length; ++i)
+       for (i = 0; i < stream->queue_length; ++i) {
                kfree(stream->urbs[i]);
+               stream->urbs[i] = NULL;
+       }
 }
 
 static void free_usb_related_resources(struct ua101 *ua,
                                       struct usb_interface *interface)
 {
        unsigned int i;
+       struct usb_interface *intf;
 
+       mutex_lock(&ua->mutex);
        free_stream_urbs(&ua->capture);
        free_stream_urbs(&ua->playback);
+       mutex_unlock(&ua->mutex);
        free_stream_buffers(ua, &ua->capture);
        free_stream_buffers(ua, &ua->playback);
 
-       for (i = 0; i < ARRAY_SIZE(ua->intf); ++i)
-               if (ua->intf[i]) {
-                       usb_set_intfdata(ua->intf[i], NULL);
-                       if (ua->intf[i] != interface)
+       for (i = 0; i < ARRAY_SIZE(ua->intf); ++i) {
+               mutex_lock(&ua->mutex);
+               intf = ua->intf[i];
+               ua->intf[i] = NULL;
+               mutex_unlock(&ua->mutex);
+               if (intf) {
+                       usb_set_intfdata(intf, NULL);
+                       if (intf != interface)
                                usb_driver_release_interface(&ua101_driver,
-                                                            ua->intf[i]);
+                                                            intf);
                }
+       }
 }
 
 static void ua101_card_free(struct snd_card *card)
index cdd19d7fe500b2a6315b3023212f1313032c5999..0de7cbd99ea0440b63632ea97d163d78cae54f43 100644 (file)
@@ -765,10 +765,60 @@ static void usb_mixer_elem_free(struct snd_kcontrol *kctl)
  * interface to ALSA control for feature/mixer units
  */
 
+/* volume control quirks */
+static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+                                 struct snd_kcontrol *kctl)
+{
+       switch (cval->mixer->chip->usb_id) {
+       case USB_ID(0x0471, 0x0101):
+       case USB_ID(0x0471, 0x0104):
+       case USB_ID(0x0471, 0x0105):
+       case USB_ID(0x0672, 0x1041):
+       /* quirk for UDA1321/N101.
+        * note that detection between firmware 2.1.1.7 (N101)
+        * and later 2.1.1.21 is not very clear from datasheets.
+        * I hope that the min value is -15360 for newer firmware --jk
+        */
+               if (!strcmp(kctl->id.name, "PCM Playback Volume") &&
+                   cval->min == -15616) {
+                       snd_printk(KERN_INFO
+                                "set volume quirk for UDA1321/N101 chip\n");
+                       cval->max = -256;
+               }
+               break;
+
+       case USB_ID(0x046d, 0x09a4):
+               if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
+                       snd_printk(KERN_INFO
+                               "set volume quirk for QuickCam E3500\n");
+                       cval->min = 6080;
+                       cval->max = 8768;
+                       cval->res = 192;
+               }
+               break;
+
+       case USB_ID(0x046d, 0x0808):
+       case USB_ID(0x046d, 0x0809):
+       case USB_ID(0x046d, 0x0991):
+       /* Most audio usb devices lie about volume resolution.
+        * Most Logitech webcams have res = 384.
+        * Proboly there is some logitech magic behind this number --fishor
+        */
+               if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
+                       snd_printk(KERN_INFO
+                               "set resolution quirk: cval->res = 384\n");
+                       cval->res = 384;
+               }
+               break;
+
+       }
+}
+
 /*
  * retrieve the minimum and maximum values for the specified control
  */
-static int get_min_max(struct usb_mixer_elem_info *cval, int default_min)
+static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval,
+                                  int default_min, struct snd_kcontrol *kctl)
 {
        /* for failsafe */
        cval->min = default_min;
@@ -844,6 +894,9 @@ static int get_min_max(struct usb_mixer_elem_info *cval, int default_min)
                cval->initialized = 1;
        }
 
+       if (kctl)
+               volume_control_quirks(cval, kctl);
+
        /* USB descriptions contain the dB scale in 1/256 dB unit
         * while ALSA TLV contains in 1/100 dB unit
         */
@@ -864,6 +917,7 @@ static int get_min_max(struct usb_mixer_elem_info *cval, int default_min)
        return 0;
 }
 
+#define get_min_max(cval, def) get_min_max_with_quirks(cval, def, NULL)
 
 /* get a feature/mixer unit info */
 static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
@@ -881,8 +935,17 @@ static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol, struct snd_ctl_
                uinfo->value.integer.min = 0;
                uinfo->value.integer.max = 1;
        } else {
-               if (! cval->initialized)
-                       get_min_max(cval,  0);
+               if (!cval->initialized) {
+                       get_min_max_with_quirks(cval, 0, kcontrol);
+                       if (cval->initialized && cval->dBmin >= cval->dBmax) {
+                               kcontrol->vd[0].access &=
+                                       ~(SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+                                         SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK);
+                               snd_ctl_notify(cval->mixer->chip->card,
+                                              SNDRV_CTL_EVENT_MASK_INFO,
+                                              &kcontrol->id);
+                       }
+               }
                uinfo->value.integer.min = 0;
                uinfo->value.integer.max =
                        (cval->max - cval->min + cval->res - 1) / cval->res;
@@ -1036,9 +1099,6 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
                cval->ch_readonly = readonly_mask;
        }
 
-       /* get min/max values */
-       get_min_max(cval, 0);
-
        /* if all channels in the mask are marked read-only, make the control
         * read-only. set_cur_mix_value() will check the mask again and won't
         * issue write commands to read-only channels. */
@@ -1060,6 +1120,9 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
                len = snd_usb_copy_string_desc(state, nameid,
                                kctl->id.name, sizeof(kctl->id.name));
 
+       /* get min/max values */
+       get_min_max_with_quirks(cval, 0, kctl);
+
        switch (control) {
        case UAC_FU_MUTE:
        case UAC_FU_VOLUME:
@@ -1109,51 +1172,6 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
                break;
        }
 
-       /* volume control quirks */
-       switch (state->chip->usb_id) {
-       case USB_ID(0x0471, 0x0101):
-       case USB_ID(0x0471, 0x0104):
-       case USB_ID(0x0471, 0x0105):
-       case USB_ID(0x0672, 0x1041):
-       /* quirk for UDA1321/N101.
-        * note that detection between firmware 2.1.1.7 (N101)
-        * and later 2.1.1.21 is not very clear from datasheets.
-        * I hope that the min value is -15360 for newer firmware --jk
-        */
-               if (!strcmp(kctl->id.name, "PCM Playback Volume") &&
-                   cval->min == -15616) {
-                       snd_printk(KERN_INFO
-                                "set volume quirk for UDA1321/N101 chip\n");
-                       cval->max = -256;
-               }
-               break;
-
-       case USB_ID(0x046d, 0x09a4):
-               if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
-                       snd_printk(KERN_INFO
-                               "set volume quirk for QuickCam E3500\n");
-                       cval->min = 6080;
-                       cval->max = 8768;
-                       cval->res = 192;
-               }
-               break;
-
-       case USB_ID(0x046d, 0x0808):
-       case USB_ID(0x046d, 0x0809):
-       case USB_ID(0x046d, 0x0991):
-       /* Most audio usb devices lie about volume resolution.
-        * Most Logitech webcams have res = 384.
-        * Proboly there is some logitech magic behind this number --fishor
-        */
-               if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
-                       snd_printk(KERN_INFO
-                               "set resolution quirk: cval->res = 384\n");
-                       cval->res = 384;
-               }
-               break;
-
-       }
-
        range = (cval->max - cval->min) / cval->res;
        /* Are there devices with volume range more than 255? I use a bit more
         * to be sure. 384 is a resolution magic number found on Logitech
index b8dcbf407bbbbe10bf4daf39b65d4f6e06ec65b7..506c0fa679f6f4048c0a89f0e7ea1083ec4e1a3a 100644 (file)
@@ -670,6 +670,9 @@ static int snd_usb_pcm_check_knot(struct snd_pcm_runtime *runtime,
        int count = 0, needs_knot = 0;
        int err;
 
+       kfree(subs->rate_list.list);
+       subs->rate_list.list = NULL;
+
        list_for_each_entry(fp, &subs->fmt_list, list) {
                if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS)
                        return 0;
index c400ade3ff08b8c18d8225a9688b38f9e1c487cc..1e7a47a86605078d2934720f168da85cbf8145ce 100644 (file)
@@ -674,7 +674,7 @@ dotry:
                inurb->transfer_buffer_length =
                        inurb->number_of_packets *
                        inurb->iso_frame_desc[0].length;
-               preempt_disable();
+
                if (u == 0) {
                        int now;
                        struct usb_device *dev = inurb->dev;
@@ -686,19 +686,17 @@ dotry:
                }
                err = usb_submit_urb(inurb, GFP_ATOMIC);
                if (err < 0) {
-                       preempt_enable();
                        snd_printk(KERN_ERR"usb_submit_urb(sk->inurb[%i])"
                                   " returned %i\n", u, err);
                        return err;
                }
                err = usb_submit_urb(outurb, GFP_ATOMIC);
                if (err < 0) {
-                       preempt_enable();
                        snd_printk(KERN_ERR"usb_submit_urb(sk->outurb[%i])"
                                   " returned %i\n", u, err);
                        return err;
                }
-               preempt_enable();
+
                if (inurb->start_frame != outurb->start_frame) {
                        snd_printd(KERN_DEBUG
                                   "u[%i] start_frames differ in:%u out:%u\n",
index a57b66e853c24e3d68b73d80ce1831494320db4f..185a96d66dd18ce21b210f9747d7ab61b03b1fda 100644 (file)
@@ -1,2 +1,8 @@
 
 #include "../../../arch/x86/lib/memcpy_64.S"
+/*
+ * We need to provide note.GNU-stack section, saying that we want
+ * NOT executable stack. Otherwise the final linking will assume that
+ * the ELF stack should not be restricted at all and set it RWX.
+ */
+.section .note.GNU-stack,"",@progbits
index 0239eb87b2321ea9e4fe208605020e83b1d7c718..ad2183c98acb3f5531d9cd3ef0530c123bb667e6 100644 (file)
@@ -348,6 +348,7 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
 
        data->cpu = data->pid = data->tid = -1;
        data->stream_id = data->id = data->time = -1ULL;
+       data->period = 1;
 
        if (event->header.type != PERF_RECORD_SAMPLE) {
                if (!sample_id_all)
index 627a02e03c57ab381d97cf2be9756997851d752d..fb695991935bfdadf2b734e6440c3739043f5dfc 100644 (file)
@@ -158,6 +158,18 @@ struct hist_entry *__hists__add_entry(struct hists *self,
                if (!cmp) {
                        he->period += period;
                        ++he->nr_events;
+
+                       /* If the map of an existing hist_entry has
+                        * become out-of-date due to an exec() or
+                        * similar, update it.  Otherwise we will
+                        * mis-adjust symbol addresses when computing
+                        * the history counter to increment.
+                        */
+                       if (he->ms.map != entry.ms.map) {
+                               he->ms.map = entry.ms.map;
+                               if (he->ms.map)
+                                       he->ms.map->referenced = true;
+                       }
                        goto out;
                }
 
index f0223166e76165cd1e25d86cade2d7bb8337c9c6..8c50da86885b8f63706ab34059f2298b5e3ae3db 100644 (file)
@@ -1869,8 +1869,10 @@ static int __del_trace_probe_event(int fd, struct str_node *ent)
 
        pr_debug("Writing event: %s\n", buf);
        ret = write(fd, buf, strlen(buf));
-       if (ret < 0)
+       if (ret < 0) {
+               ret = -errno;
                goto error;
+       }
 
        printf("Remove event: %s\n", ent->s);
        return 0;
index f44fa541d56e67c6bb6c976e78123e99657ffbee..0ffbc8e5b4b5ef5a054ca3eab4a331c2c6c9f7be 100644 (file)
@@ -122,6 +122,9 @@ static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
                }
        }
        va_end(ap);
+
+       if (n >= (int)size)
+               return size - 1;
        return n;
 }
 
index 0a7ed5b5e281c88b321de87ced66a3d29ebb003d..bf54c48871dc1b65afa02102c506ee3aa5a1353b 100644 (file)
@@ -1537,6 +1537,8 @@ process_flags(struct event *event, struct print_arg *arg, char **tok)
        field = malloc_or_die(sizeof(*field));
 
        type = process_arg(event, field, &token);
+       while (type == EVENT_OP)
+               type = process_op(event, field, &token);
        if (test_type_token(type, token, EVENT_DELIM, ","))
                goto out_free;
 
@@ -1580,6 +1582,8 @@ process_symbols(struct event *event, struct print_arg *arg, char **tok)
        field = malloc_or_die(sizeof(*field));
 
        type = process_arg(event, field, &token);
+       while (type == EVENT_OP)
+               type = process_op(event, field, &token);
        if (test_type_token(type, token, EVENT_DELIM, ","))
                goto out_free;
 
index b9c7986316997b4f820ed1567a71ff911ae3d107..f17dfee1e21b2db8040427f0a747d10f4041a49c 100644 (file)
@@ -324,7 +324,7 @@ static void *start_thread_helper(void *arg)
 
                ret = t->in(t, t->buf, t->buf_size);
                if (ret > 0) {
-                       ret = t->out(t, t->buf, t->buf_size);
+                       ret = t->out(t, t->buf, ret);
                        name = out_name;
                        op = "write";
                } else {
index 62a9caf0563c2205327245b2a1103f1bbff94e70..fb0f6e469bb4b042850b48db6a0445ad2a8b004d 100644 (file)
@@ -285,6 +285,11 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
        }
 }
 
+void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+       kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
+}
+
 static int kvm_iommu_unmap_memslots(struct kvm *kvm)
 {
        int i, idx;
@@ -293,10 +298,9 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm)
        idx = srcu_read_lock(&kvm->srcu);
        slots = kvm_memslots(kvm);
 
-       for (i = 0; i < slots->nmemslots; i++) {
-               kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
-                                   slots->memslots[i].npages);
-       }
+       for (i = 0; i < slots->nmemslots; i++)
+               kvm_iommu_unmap_pages(kvm, &slots->memslots[i]);
+
        srcu_read_unlock(&kvm->srcu, idx);
 
        return 0;
index 96ebc0679415baeff88d7764b1c0e956820adad2..6b39ba9540e8cd417eba5c1004bd57b782cbf7c7 100644 (file)
@@ -796,12 +796,13 @@ skip_lpage:
        if (r)
                goto out_free;
 
-       /* map the pages in iommu page table */
+       /* map/unmap the pages in iommu page table */
        if (npages) {
                r = kvm_iommu_map_pages(kvm, &new);
                if (r)
                        goto out_free;
-       }
+       } else
+               kvm_iommu_unmap_pages(kvm, &old);
 
        r = -ENOMEM;
        slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);