Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 9 Feb 2015 23:24:03 +0000 (15:24 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 9 Feb 2015 23:24:03 +0000 (15:24 -0800)
Pull core locking updates from Ingo Molnar:
 "The main changes are:

   - mutex, completions and rtmutex micro-optimizations
   - lock debugging fix
   - various cleanups in the MCS and the futex code"

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/rtmutex: Optimize setting task running after being blocked
  locking/rwsem: Use task->state helpers
  sched/completion: Add lock-free checking of the blocking case
  sched/completion: Remove unnecessary ->wait.lock serialization when reading completion state
  locking/mutex: Explicitly mark task as running after wakeup
  futex: Fix argument handling in futex_lock_pi() calls
  doc: Fix misnamed FUTEX_CMP_REQUEUE_PI op constants
  locking/Documentation: Update code path
  softirq/preempt: Add missing current->preempt_disable_ip update
  locking/osq: No need for load/acquire when acquire-polling
  locking/mcs: Better differentiate between MCS variants
  locking/mutex: Introduce ww_mutex_set_context_slowpath()
  locking/mutex: Move MCS related comments to proper location
  locking/mutex: Checking the stamp is WW only

343 files changed:
.mailmap
Documentation/RCU/stallwarn.txt
Documentation/RCU/trace.txt
Documentation/devicetree/bindings/mfd/max77686.txt
Documentation/devicetree/bindings/regulator/da9211.txt
Documentation/devicetree/bindings/regulator/isl9305.txt
Documentation/devicetree/bindings/regulator/mt6397-regulator.txt [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/pfuze100.txt
Documentation/devicetree/bindings/spi/sh-msiof.txt
Documentation/devicetree/bindings/spi/spi-sirf.txt [new file with mode: 0644]
Documentation/devicetree/bindings/spi/spi-st-ssc.txt [new file with mode: 0644]
Documentation/hwmon/ina2xx
Documentation/memory-barriers.txt
Documentation/networking/netlink_mmap.txt
MAINTAINERS
Makefile
arch/arm/boot/compressed/head.S
arch/arm/boot/dts/exynos4.dtsi
arch/arm/kernel/entry-v7m.S
arch/arm/kvm/Kconfig
arch/arm/mm/Kconfig
arch/arm/mm/context.c
arch/arm/mm/dma-mapping.c
arch/arm64/kvm/Kconfig
arch/mips/Kconfig
arch/mips/boot/elf2ecoff.c
arch/mips/cavium-octeon/smp.c
arch/mips/configs/malta_defconfig
arch/mips/include/asm/fpu.h
arch/mips/include/asm/fw/arc/hinv.h
arch/mips/include/asm/mips-cm.h
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/syscall.h
arch/mips/include/asm/thread_info.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/jz4740/irq.c
arch/mips/kernel/elf.c
arch/mips/kernel/irq_cpu.c
arch/mips/kernel/process.c
arch/mips/kernel/ptrace.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/smp-cmp.c
arch/mips/kernel/smp-mt.c
arch/mips/kernel/smp.c
arch/mips/kernel/traps.c
arch/mips/kvm/Kconfig
arch/mips/mm/tlb-r4k.c
arch/mn10300/include/asm/cacheflush.h
arch/nios2/mm/fault.c
arch/powerpc/include/asm/cacheflush.h
arch/powerpc/kvm/Kconfig
arch/s390/include/asm/cacheflush.h
arch/s390/kvm/Kconfig
arch/sparc/include/asm/cacheflush_64.h
arch/tile/kvm/Kconfig
arch/x86/Kconfig
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kvm/Kconfig
arch/x86/pci/common.c
arch/x86/pci/intel_mid_pci.c
block/blk-mq-sysfs.c
block/blk-mq.c
block/blk-mq.h
block/blk-sysfs.c
drivers/acpi/acpi_lpss.c
drivers/base/regmap/internal.h
drivers/base/regmap/regmap-ac97.c
drivers/base/regmap/regmap-i2c.c
drivers/base/regmap/regmap.c
drivers/char/random.c
drivers/clk/Kconfig
drivers/cpufreq/Kconfig
drivers/devfreq/Kconfig
drivers/gpio/gpio-mcp23s08.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpiolib-sysfs.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_module.c
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
drivers/gpu/drm/cirrus/cirrus_drv.c
drivers/gpu/drm/cirrus/cirrus_drv.h
drivers/gpu/drm/cirrus/cirrus_main.c
drivers/gpu/drm/cirrus/cirrus_mode.c
drivers/gpu/drm/radeon/radeon_benchmark.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_test.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/hwmon/Kconfig
drivers/hwmon/abx500.c
drivers/hwmon/ad7314.c
drivers/hwmon/adc128d818.c
drivers/hwmon/ads7828.c
drivers/hwmon/ina2xx.c
drivers/hwmon/jc42.c
drivers/hwmon/nct7802.c
drivers/hwmon/tmp102.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
drivers/irqchip/irq-mips-gic.c
drivers/isdn/hardware/eicon/message.c
drivers/md/Kconfig
drivers/md/bitmap.c
drivers/md/raid5.c
drivers/net/Kconfig
drivers/net/caif/caif_hsi.c
drivers/net/ethernet/amd/Kconfig
drivers/net/ethernet/amd/nmclan_cs.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/cirrus/Kconfig
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/hyperv/netvsc.c
drivers/net/macvtap.c
drivers/net/ppp/ppp_deflate.c
drivers/net/tun.c
drivers/net/usb/sr9700.c
drivers/net/usb/sr9700.h
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wan/Kconfig
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/pci/host/pcie-designware.c
drivers/pci/quirks.c
drivers/regulator/Kconfig
drivers/regulator/Makefile
drivers/regulator/axp20x-regulator.c
drivers/regulator/core.c
drivers/regulator/da9211-regulator.c
drivers/regulator/fan53555.c
drivers/regulator/internal.h
drivers/regulator/isl9305.c
drivers/regulator/lp872x.c
drivers/regulator/max14577.c
drivers/regulator/max77686.c
drivers/regulator/max77843.c [new file with mode: 0644]
drivers/regulator/max8649.c
drivers/regulator/mt6397-regulator.c [new file with mode: 0644]
drivers/regulator/of_regulator.c
drivers/regulator/pfuze100-regulator.c
drivers/regulator/qcom_rpm-regulator.c
drivers/regulator/rk808-regulator.c
drivers/regulator/rt5033-regulator.c
drivers/regulator/tps65023-regulator.c
drivers/scsi/device_handler/scsi_dh.c
drivers/scsi/sd.c
drivers/spi/Kconfig
drivers/spi/Makefile
drivers/spi/spi-atmel.c
drivers/spi/spi-au1550.c
drivers/spi/spi-bcm2835.c
drivers/spi/spi-bcm63xx.c
drivers/spi/spi-bitbang.c
drivers/spi/spi-butterfly.c
drivers/spi/spi-coldfire-qspi.c
drivers/spi/spi-davinci.c
drivers/spi/spi-dln2.c [new file with mode: 0644]
drivers/spi/spi-dw-mid.c
drivers/spi/spi-dw-pci.c
drivers/spi/spi-dw.c
drivers/spi/spi-falcon.c
drivers/spi/spi-fsl-cpm.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-fsl-lib.c
drivers/spi/spi-fsl-lib.h
drivers/spi/spi-gpio.c
drivers/spi/spi-img-spfi.c
drivers/spi/spi-imx.c
drivers/spi/spi-lm70llp.c
drivers/spi/spi-meson-spifc.c
drivers/spi/spi-mxs.c
drivers/spi/spi-omap-100k.c
drivers/spi/spi-omap-uwire.c
drivers/spi/spi-omap2-mcspi.c
drivers/spi/spi-orion.c
drivers/spi/spi-pxa2xx-dma.c
drivers/spi/spi-pxa2xx-pxadma.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-pxa2xx.h
drivers/spi/spi-qup.c
drivers/spi/spi-rockchip.c
drivers/spi/spi-rspi.c
drivers/spi/spi-s3c64xx.c
drivers/spi/spi-sc18is602.c
drivers/spi/spi-sh-hspi.c
drivers/spi/spi-sh-msiof.c
drivers/spi/spi-sh.c
drivers/spi/spi-sirf.c
drivers/spi/spi-st-ssc4.c [new file with mode: 0644]
drivers/spi/spi-ti-qspi.c
drivers/spi/spi-topcliff-pch.c
drivers/spi/spi-xilinx.c
drivers/spi/spi.c
drivers/spi/spidev.c
drivers/vhost/net.c
fs/aio.c
fs/btrfs/Kconfig
fs/btrfs/tree-log.c
fs/cifs/cifs_debug.c
fs/cifs/file.c
fs/cifs/smbencrypt.c
fs/nilfs2/nilfs.h
fs/nilfs2/segment.c
fs/nilfs2/segment.h
fs/notify/Kconfig
fs/quota/Kconfig
include/linux/compiler.h
include/linux/if_vlan.h
include/linux/mlx4/device.h
include/linux/pxa2xx_ssp.h
include/linux/rculist.h
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/rcutree.h
include/linux/regmap.h
include/linux/regulator/da9211.h
include/linux/regulator/driver.h
include/linux/regulator/machine.h
include/linux/regulator/mt6397-regulator.h [new file with mode: 0644]
include/linux/regulator/pfuze100.h
include/linux/spi/at86rf230.h
include/linux/spi/l4f00242t03.h
include/linux/spi/lms283gf05.h
include/linux/spi/mxs-spi.h
include/linux/spi/pxa2xx_spi.h
include/linux/spi/rspi.h
include/linux/spi/sh_hspi.h
include/linux/spi/sh_msiof.h
include/linux/spi/spi.h
include/linux/spi/tle62x0.h
include/linux/spi/tsc2005.h
include/linux/srcu.h
include/linux/tracepoint.h
include/linux/wait.h
include/net/flow_keys.h
include/net/ip.h
include/net/ipv6.h
include/net/netfilter/nf_tables.h
include/net/netns/ipv4.h
include/net/sch_generic.h
include/net/tcp.h
include/rdma/ib_verbs.h
include/sound/ak4113.h
include/sound/ak4114.h
include/sound/soc.h
include/trace/events/tlb.h
include/uapi/rdma/ib_user_verbs.h
init/Kconfig
kernel/cpu.c
kernel/notifier.c
kernel/power/Kconfig
kernel/rcu/Makefile
kernel/rcu/rcu.h
kernel/rcu/rcutorture.c
kernel/rcu/srcu.c
kernel/rcu/tiny.c
kernel/rcu/tiny_plugin.h
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_plugin.h
kernel/rcu/tree_trace.c
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/smpboot.c
kernel/softirq.c
kernel/time/hrtimer.c
lib/Kconfig.debug
lib/checksum.c
mm/Kconfig
mm/memcontrol.c
mm/nommu.c
mm/pagewalk.c
mm/shmem.c
net/bridge/netfilter/nft_reject_bridge.c
net/caif/chnl_net.c
net/core/dev.c
net/core/rtnetlink.c
net/ipv4/ip_output.c
net/ipv4/route.c
net/ipv4/tcp_bic.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_scalable.c
net/ipv4/tcp_veno.c
net/ipv4/tcp_yeah.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/output_core.c
net/ipv6/sit.c
net/ipv6/udp_offload.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_masq.c
net/netfilter/nft_nat.c
net/netfilter/nft_redir.c
net/netlink/af_netlink.c
net/rds/sysctl.c
net/sched/cls_api.c
net/sched/sch_fq.c
net/sctp/sm_make_chunk.c
security/tomoyo/Kconfig
sound/i2c/other/ak4113.c
sound/i2c/other/ak4114.c
sound/soc/atmel/atmel_ssc_dai.c
sound/soc/codecs/rt5640.c
sound/soc/codecs/sgtl5000.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/wm8731.c
sound/soc/codecs/wm9705.c
sound/soc/codecs/wm9712.c
sound/soc/codecs/wm9713.c
sound/soc/intel/sst-haswell-ipc.c
sound/soc/intel/sst/sst_acpi.c
sound/soc/soc-ac97.c
tools/lib/lockdep/.gitignore [new file with mode: 0644]
tools/lib/lockdep/Makefile
tools/testing/selftests/rcutorture/bin/cpus2use.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
tools/testing/selftests/rcutorture/bin/parse-build.sh
tools/testing/selftests/rcutorture/bin/parse-console.sh

index d357e1bd2a434665ae545d9ed970edd77f15f7d9..0d971cfb07724828cde3e2fdb7bb184b649054fd 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -73,6 +73,7 @@ Juha Yrjola <juha.yrjola@nokia.com>
 Juha Yrjola <juha.yrjola@solidboot.com>
 Kay Sievers <kay.sievers@vrfy.org>
 Kenneth W Chen <kenneth.w.chen@intel.com>
+Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
 Koushik <raghavendra.koushik@neterion.com>
 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
 Leonid I Ananiev <leonid.i.ananiev@intel.com>
index ed186a902d312a23913bb3e4ba0f272306fd1cb3..b57c0c1cdac609ca008001b9490b42c58b043574 100644 (file)
@@ -15,7 +15,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT
        21 seconds.
 
        This configuration parameter may be changed at runtime via the
-       /sys/module/rcutree/parameters/rcu_cpu_stall_timeout, however
+       /sys/module/rcupdate/parameters/rcu_cpu_stall_timeout, however
        this parameter is checked only at the beginning of a cycle.
        So if you are 10 seconds into a 40-second stall, setting this
        sysfs parameter to (say) five will shorten the timeout for the
@@ -152,6 +152,15 @@ no non-lazy callbacks ("." is printed otherwise, as shown above) and
 "D" indicates that dyntick-idle processing is enabled ("." is printed
 otherwise, for example, if disabled via the "nohz=" kernel boot parameter).
 
+If the relevant grace-period kthread has been unable to run prior to
+the stall warning, the following additional line is printed:
+
+       rcu_preempt kthread starved for 2023 jiffies!
+
+Starving the grace-period kthreads of CPU time can of course result in
+RCU CPU stall warnings even when all CPUs and tasks have passed through
+the required quiescent states.
+
 
 Multiple Warnings From One Stall
 
@@ -187,6 +196,11 @@ o  For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the
        behavior, you might need to replace some of the cond_resched()
        calls with calls to cond_resched_rcu_qs().
 
+o      Anything that prevents RCU's grace-period kthreads from running.
+       This can result in the "All QSes seen" console-log message.
+       This message will include information on when the kthread last
+       ran and how often it should be expected to run.
+
 o      A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might
        happen to preempt a low-priority task in the middle of an RCU
        read-side critical section.   This is especially damaging if
index b63b9bb3bc0c4dc24064599055f976ac00e0ee85..08651da15448e27f8f2fb574c7cd51cde67815c6 100644 (file)
@@ -56,14 +56,14 @@ rcuboost:
 
 The output of "cat rcu/rcu_preempt/rcudata" looks as follows:
 
-  0!c=30455 g=30456 pq=1 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716
-  1!c=30719 g=30720 pq=1 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982
-  2!c=30150 g=30151 pq=1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458
-  3 c=31249 g=31250 pq=1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622
-  4!c=29502 g=29503 pq=1 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521
-  5 c=31201 g=31202 pq=1 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698
-  6!c=30253 g=30254 pq=1 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353
-  7 c=31178 g=31178 pq=1 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969
+  0!c=30455 g=30456 pq=1/0 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716
+  1!c=30719 g=30720 pq=1/0 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982
+  2!c=30150 g=30151 pq=1/1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458
+  3 c=31249 g=31250 pq=1/1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622
+  4!c=29502 g=29503 pq=1/0 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521
+  5 c=31201 g=31202 pq=1/0 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698
+  6!c=30253 g=30254 pq=1/0 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353
+  7 c=31178 g=31178 pq=1/0 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969
 
 This file has one line per CPU, or eight for this 8-CPU system.
 The fields are as follows:
@@ -188,14 +188,14 @@ o "ca" is the number of RCU callbacks that have been adopted by this
 Kernels compiled with CONFIG_RCU_BOOST=y display the following from
 /debug/rcu/rcu_preempt/rcudata:
 
-  0!c=12865 g=12866 pq=1 qp=1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871
-  1 c=14407 g=14408 pq=1 qp=0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485
-  2 c=14407 g=14408 pq=1 qp=0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490
-  3 c=14407 g=14408 pq=1 qp=0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290
-  4 c=14405 g=14406 pq=1 qp=1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114
-  5!c=14168 g=14169 pq=1 qp=0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722
-  6 c=14404 g=14405 pq=1 qp=0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811
-  7 c=14407 g=14408 pq=1 qp=1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042
+  0!c=12865 g=12866 pq=1/0 qp=1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871
+  1 c=14407 g=14408 pq=1/0 qp=0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485
+  2 c=14407 g=14408 pq=1/0 qp=0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490
+  3 c=14407 g=14408 pq=1/0 qp=0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290
+  4 c=14405 g=14406 pq=1/0 qp=1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114
+  5!c=14168 g=14169 pq=1/0 qp=0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722
+  6 c=14404 g=14405 pq=1/0 qp=0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811
+  7 c=14407 g=14408 pq=1/0 qp=1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042
 
 This is similar to the output discussed above, but contains the following
 additional fields:
index 75fdfaf41831d9fcd76be1b5706dadf1e2a2cb7d..e39f0bc1f55e02ccd705b85ae96714fd0a7e5263 100644 (file)
@@ -39,6 +39,12 @@ to get matched with their hardware counterparts as follow:
        -BUCKn  :       1-4.
   Use standard regulator bindings for it ('regulator-off-in-suspend').
 
+  LDO20, LDO21, LDO22, BUCK8 and BUCK9 can be configured to GPIO enable
+  control. To turn this feature on this property must be added to the regulator
+  sub-node:
+       - maxim,ena-gpios :     one GPIO specifier enable control (the gpio
+                               flags are actually ignored and always
+                               ACTIVE_HIGH is used)
 
 Example:
 
@@ -65,4 +71,12 @@ Example:
                                regulator-always-on;
                                regulator-boot-on;
                        };
+
+                       buck9_reg {
+                               regulator-compatible = "BUCK9";
+                               regulator-name = "CAM_ISP_CORE_1.2V";
+                               regulator-min-microvolt = <1000000>;
+                               regulator-max-microvolt = <1200000>;
+                               maxim,ena-gpios = <&gpm0 3 GPIO_ACTIVE_HIGH>;
+                       };
        }
index 240019a82f9a57083f4f06402250f2fbf41ec4ce..eb618907c7dee35446ae8b2153cf566e91cdb689 100644 (file)
@@ -11,6 +11,7 @@ Required properties:
   BUCKA and BUCKB.
 
 Optional properties:
+- enable-gpios: platform gpio for control of BUCKA/BUCKB.
 - Any optional property defined in regulator.txt
 
 Example 1) DA9211
@@ -27,6 +28,7 @@ Example 1) DA9211
                                regulator-max-microvolt = <1570000>;
                                regulator-min-microamp  = <2000000>;
                                regulator-max-microamp  = <5000000>;
+                               enable-gpios = <&gpio 27 0>;
                        };
                        BUCKB {
                                regulator-name = "VBUCKB";
@@ -34,11 +36,12 @@ Example 1) DA9211
                                regulator-max-microvolt = <1570000>;
                                regulator-min-microamp  = <2000000>;
                                regulator-max-microamp  = <5000000>;
+                               enable-gpios = <&gpio 17 0>;
                        };
                };
        };
 
-Example 2) DA92113
+Example 2) DA9213
        pmic: da9213@68 {
                compatible = "dlg,da9213";
                reg = <0x68>;
@@ -51,6 +54,7 @@ Example 2) DA92113
                                regulator-max-microvolt = <1570000>;
                                regulator-min-microamp  = <3000000>;
                                regulator-max-microamp  = <6000000>;
+                               enable-gpios = <&gpio 27 0>;
                        };
                        BUCKB {
                                regulator-name = "VBUCKB";
@@ -58,6 +62,7 @@ Example 2) DA92113
                                regulator-max-microvolt = <1570000>;
                                regulator-min-microamp  = <3000000>;
                                regulator-max-microamp  = <6000000>;
+                               enable-gpios = <&gpio 17 0>;
                        };
                };
        };
index a626fc1bbf0d0bd1797191e91e31959e09c338a3..d6e7c9ec9413c0ddb0d6c66ee25812cce1683702 100644 (file)
@@ -2,7 +2,7 @@ Intersil ISL9305/ISL9305H voltage regulator
 
 Required properties:
 
-- compatible: "isl,isl9305" or "isl,isl9305h"
+- compatible: "isil,isl9305" or "isil,isl9305h"
 - reg: I2C slave address, usually 0x68.
 - regulators: A node that houses a sub-node for each regulator within the
   device. Each sub-node is identified using the node's name, with valid
@@ -19,7 +19,7 @@ Optional properties:
 Example
 
        pmic: isl9305@68 {
-               compatible = "isl,isl9305";
+               compatible = "isil,isl9305";
                reg = <0x68>;
 
                VINDCD1-supply = <&system_power>;
diff --git a/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt b/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt
new file mode 100644 (file)
index 0000000..a42b1d6
--- /dev/null
@@ -0,0 +1,217 @@
+Mediatek MT6397 Regulator Driver
+
+Required properties:
+- compatible: "mediatek,mt6397-regulator"
+- mt6397regulator: List of regulators provided by this controller. It is named
+  according to its regulator type, buck_<name> and ldo_<name>.
+  The definition for each of these nodes is defined using the standard binding
+  for regulators at Documentation/devicetree/bindings/regulator/regulator.txt.
+
+The valid names for regulators are::
+BUCK:
+  buck_vpca15, buck_vpca7, buck_vsramca15, buck_vsramca7, buck_vcore, buck_vgpu,
+  buck_vdrm, buck_vio18
+LDO:
+  ldo_vtcxo, ldo_va28, ldo_vcama, ldo_vio28, ldo_vusb, ldo_vmc, ldo_vmch,
+  ldo_vemc3v3, ldo_vgp1, ldo_vgp2, ldo_vgp3, ldo_vgp4, ldo_vgp5, ldo_vgp6,
+  ldo_vibr
+
+Example:
+       pmic {
+               compatible = "mediatek,mt6397";
+
+               mt6397regulator: mt6397regulator {
+                       compatible = "mediatek,mt6397-regulator";
+
+                       mt6397_vpca15_reg: buck_vpca15 {
+                               regulator-compatible = "buck_vpca15";
+                               regulator-name = "vpca15";
+                               regulator-min-microvolt = < 850000>;
+                               regulator-max-microvolt = <1350000>;
+                               regulator-ramp-delay = <12500>;
+                               regulator-enable-ramp-delay = <200>;
+                       };
+
+                       mt6397_vpca7_reg: buck_vpca7 {
+                               regulator-compatible = "buck_vpca7";
+                               regulator-name = "vpca7";
+                               regulator-min-microvolt = < 850000>;
+                               regulator-max-microvolt = <1350000>;
+                               regulator-ramp-delay = <12500>;
+                               regulator-enable-ramp-delay = <115>;
+                       };
+
+                       mt6397_vsramca15_reg: buck_vsramca15 {
+                               regulator-compatible = "buck_vsramca15";
+                               regulator-name = "vsramca15";
+                               regulator-min-microvolt = < 850000>;
+                               regulator-max-microvolt = <1350000>;
+                               regulator-ramp-delay = <12500>;
+                               regulator-enable-ramp-delay = <115>;
+
+                       };
+
+                       mt6397_vsramca7_reg: buck_vsramca7 {
+                               regulator-compatible = "buck_vsramca7";
+                               regulator-name = "vsramca7";
+                               regulator-min-microvolt = < 850000>;
+                               regulator-max-microvolt = <1350000>;
+                               regulator-ramp-delay = <12500>;
+                               regulator-enable-ramp-delay = <115>;
+
+                       };
+
+                       mt6397_vcore_reg: buck_vcore {
+                               regulator-compatible = "buck_vcore";
+                               regulator-name = "vcore";
+                               regulator-min-microvolt = < 850000>;
+                               regulator-max-microvolt = <1350000>;
+                               regulator-ramp-delay = <12500>;
+                               regulator-enable-ramp-delay = <115>;
+                       };
+
+                       mt6397_vgpu_reg: buck_vgpu {
+                               regulator-compatible = "buck_vgpu";
+                               regulator-name = "vgpu";
+                               regulator-min-microvolt = < 700000>;
+                               regulator-max-microvolt = <1350000>;
+                               regulator-ramp-delay = <12500>;
+                               regulator-enable-ramp-delay = <115>;
+                       };
+
+                       mt6397_vdrm_reg: buck_vdrm {
+                               regulator-compatible = "buck_vdrm";
+                               regulator-name = "vdrm";
+                               regulator-min-microvolt = < 800000>;
+                               regulator-max-microvolt = <1400000>;
+                               regulator-ramp-delay = <12500>;
+                               regulator-enable-ramp-delay = <500>;
+                       };
+
+                       mt6397_vio18_reg: buck_vio18 {
+                               regulator-compatible = "buck_vio18";
+                               regulator-name = "vio18";
+                               regulator-min-microvolt = <1500000>;
+                               regulator-max-microvolt = <2120000>;
+                               regulator-ramp-delay = <12500>;
+                               regulator-enable-ramp-delay = <500>;
+                       };
+
+                       mt6397_vtcxo_reg: ldo_vtcxo {
+                               regulator-compatible = "ldo_vtcxo";
+                               regulator-name = "vtcxo";
+                               regulator-min-microvolt = <2800000>;
+                               regulator-max-microvolt = <2800000>;
+                               regulator-enable-ramp-delay = <90>;
+                       };
+
+                       mt6397_va28_reg: ldo_va28 {
+                               regulator-compatible = "ldo_va28";
+                               regulator-name = "va28";
+                               /* fixed output 2.8 V */
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vcama_reg: ldo_vcama {
+                               regulator-compatible = "ldo_vcama";
+                               regulator-name = "vcama";
+                               regulator-min-microvolt = <1500000>;
+                               regulator-max-microvolt = <2800000>;
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vio28_reg: ldo_vio28 {
+                               regulator-compatible = "ldo_vio28";
+                               regulator-name = "vio28";
+                               /* fixed output 2.8 V */
+                               regulator-enable-ramp-delay = <240>;
+                       };
+
+                       mt6397_usb_reg: ldo_vusb {
+                               regulator-compatible = "ldo_vusb";
+                               regulator-name = "vusb";
+                               /* fixed output 3.3 V */
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vmc_reg: ldo_vmc {
+                               regulator-compatible = "ldo_vmc";
+                               regulator-name = "vmc";
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vmch_reg: ldo_vmch {
+                               regulator-compatible = "ldo_vmch";
+                               regulator-name = "vmch";
+                               regulator-min-microvolt = <3000000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vemc_3v3_reg: ldo_vemc3v3 {
+                               regulator-compatible = "ldo_vemc3v3";
+                               regulator-name = "vemc_3v3";
+                               regulator-min-microvolt = <3000000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vgp1_reg: ldo_vgp1 {
+                               regulator-compatible = "ldo_vgp1";
+                               regulator-name = "vcamd";
+                               regulator-min-microvolt = <1220000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <240>;
+                       };
+
+                       mt6397_vgp2_reg: ldo_vgp2 {
+                               egulator-compatible = "ldo_vgp2";
+                               regulator-name = "vcamio";
+                               regulator-min-microvolt = <1000000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vgp3_reg: ldo_vgp3 {
+                               regulator-compatible = "ldo_vgp3";
+                               regulator-name = "vcamaf";
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vgp4_reg: ldo_vgp4 {
+                               regulator-compatible = "ldo_vgp4";
+                               regulator-name = "vgp4";
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vgp5_reg: ldo_vgp5 {
+                               regulator-compatible = "ldo_vgp5";
+                               regulator-name = "vgp5";
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <3000000>;
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vgp6_reg: ldo_vgp6 {
+                               regulator-compatible = "ldo_vgp6";
+                               regulator-name = "vgp6";
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vibr_reg: ldo_vibr {
+                               regulator-compatible = "ldo_vibr";
+                               regulator-name = "vibr";
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <218>;
+                       };
+               };
+       };
index 34ef5d16d0f1697c51b6759b8a572f33d6b85894..9b40db88f637bd9ed7f06c5eb809e7b16ddd660d 100644 (file)
@@ -1,7 +1,7 @@
 PFUZE100 family of regulators
 
 Required properties:
-- compatible: "fsl,pfuze100" or "fsl,pfuze200"
+- compatible: "fsl,pfuze100", "fsl,pfuze200", "fsl,pfuze3000"
 - reg: I2C slave address
 
 Required child node:
@@ -14,6 +14,8 @@ Required child node:
   sw1ab,sw1c,sw2,sw3a,sw3b,sw4,swbst,vsnvs,vrefddr,vgen1~vgen6
   --PFUZE200
   sw1ab,sw2,sw3a,sw3b,swbst,vsnvs,vrefddr,vgen1~vgen6
+  --PFUZE3000
+  sw1a,sw1b,sw2,sw3,swbst,vsnvs,vrefddr,vldo1,vldo2,vccsd,v33,vldo3,vldo4
 
 Each regulator is defined using the standard binding for regulators.
 
@@ -205,3 +207,93 @@ Example 2: PFUZE200
                        };
                };
        };
+
+Example 3: PFUZE3000
+
+       pmic: pfuze3000@08 {
+               compatible = "fsl,pfuze3000";
+               reg = <0x08>;
+
+               regulators {
+                       sw1a_reg: sw1a {
+                               regulator-min-microvolt = <700000>;
+                               regulator-max-microvolt = <1475000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                               regulator-ramp-delay = <6250>;
+                       };
+                       /* use sw1c_reg to align with pfuze100/pfuze200 */
+                       sw1c_reg: sw1b {
+                               regulator-min-microvolt = <700000>;
+                               regulator-max-microvolt = <1475000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                               regulator-ramp-delay = <6250>;
+                       };
+
+                       sw2_reg: sw2 {
+                               regulator-min-microvolt = <2500000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       sw3a_reg: sw3 {
+                               regulator-min-microvolt = <900000>;
+                               regulator-max-microvolt = <1650000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       swbst_reg: swbst {
+                               regulator-min-microvolt = <5000000>;
+                               regulator-max-microvolt = <5150000>;
+                       };
+
+                       snvs_reg: vsnvs {
+                               regulator-min-microvolt = <1000000>;
+                               regulator-max-microvolt = <3000000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       vref_reg: vrefddr {
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       vgen1_reg: vldo1 {
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                       };
+
+                       vgen2_reg: vldo2 {
+                               regulator-min-microvolt = <800000>;
+                               regulator-max-microvolt = <1550000>;
+                       };
+
+                       vgen3_reg: vccsd {
+                               regulator-min-microvolt = <2850000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                       };
+
+                       vgen4_reg: v33 {
+                               regulator-min-microvolt = <2850000>;
+                               regulator-max-microvolt = <3300000>;
+                       };
+
+                       vgen5_reg: vldo3 {
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                       };
+
+                       vgen6_reg: vldo4 {
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                       };
+               };
+       };
index d11c3721e7cd144995a717b6f29d804a018a0299..4c388bb2f0a224b5247bb03b9c52172b4a48de8c 100644 (file)
@@ -30,6 +30,22 @@ Optional properties:
                         specifiers, one for transmission, and one for
                         reception.
 - dma-names            : Must contain a list of two DMA names, "tx" and "rx".
+- renesas,dtdl         : delay sync signal (setup) in transmit mode.
+                        Must contain one of the following values:
+                        0   (no bit delay)
+                        50  (0.5-clock-cycle delay)
+                        100 (1-clock-cycle delay)
+                        150 (1.5-clock-cycle delay)
+                        200 (2-clock-cycle delay)
+
+- renesas,syncdl       : delay sync signal (hold) in transmit mode.
+                        Must contain one of the following values:
+                        0   (no bit delay)
+                        50  (0.5-clock-cycle delay)
+                        100 (1-clock-cycle delay)
+                        150 (1.5-clock-cycle delay)
+                        200 (2-clock-cycle delay)
+                        300 (3-clock-cycle delay)
 
 Optional properties, deprecated for soctype-specific bindings:
 - renesas,tx-fifo-size : Overrides the default tx fifo size given in words
diff --git a/Documentation/devicetree/bindings/spi/spi-sirf.txt b/Documentation/devicetree/bindings/spi/spi-sirf.txt
new file mode 100644 (file)
index 0000000..4c7adb8
--- /dev/null
@@ -0,0 +1,41 @@
+* CSR SiRFprimaII Serial Peripheral Interface
+
+Required properties:
+- compatible : Should be "sirf,prima2-spi"
+- reg : Offset and length of the register set for the device
+- interrupts : Should contain SPI interrupt
+- resets: phandle to the reset controller asserting this device in
+          reset
+  See ../reset/reset.txt for details.
+- dmas : Must contain an entry for each entry in clock-names.
+  See ../dma/dma.txt for details.
+- dma-names : Must include the following entries:
+  - rx
+  - tx
+- clocks : Must contain an entry for each entry in clock-names.
+  See ../clocks/clock-bindings.txt for details.
+
+- #address-cells: Number of cells required to define a chip select
+                  address on the SPI bus. Should be set to 1.
+- #size-cells:    Should be zero.
+
+Optional properties:
+- spi-max-frequency: Specifies maximum SPI clock frequency,
+                     Units - Hz. Definition as per
+                     Documentation/devicetree/bindings/spi/spi-bus.txt
+- cs-gpios:     should specify GPIOs used for chipselects.
+
+Example:
+
+spi0: spi@b00d0000 {
+       compatible = "sirf,prima2-spi";
+       reg = <0xb00d0000 0x10000>;
+       interrupts = <15>;
+       dmas = <&dmac1 9>,
+               <&dmac1 4>;
+       dma-names = "rx", "tx";
+       #address-cells = <1>;
+       #size-cells = <0>;
+       clocks = <&clks 19>;
+       resets = <&rstc 26>;
+};
diff --git a/Documentation/devicetree/bindings/spi/spi-st-ssc.txt b/Documentation/devicetree/bindings/spi/spi-st-ssc.txt
new file mode 100644 (file)
index 0000000..fe54959
--- /dev/null
@@ -0,0 +1,40 @@
+STMicroelectronics SSC (SPI) Controller
+---------------------------------------
+
+Required properties:
+- compatible   : "st,comms-ssc4-spi"
+- reg          : Offset and length of the device's register set
+- interrupts   : The interrupt specifier
+- clock-names  : Must contain "ssc"
+- clocks       : Must contain an entry for each name in clock-names
+                   See ../clk/*
+- pinctrl-names        : Uses "default", can use "sleep" if provided
+                   See ../pinctrl/pinctrl-binding.txt
+
+Optional properties:
+- cs-gpios     : List of GPIO chip selects
+                   See ../spi/spi-bus.txt
+
+Child nodes represent devices on the SPI bus
+  See ../spi/spi-bus.txt
+
+Example:
+       spi@9840000 {
+               compatible      = "st,comms-ssc4-spi";
+               reg             = <0x9840000 0x110>;
+               interrupts      = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+               clocks          = <&clk_s_c0_flexgen CLK_EXT2F_A9>;
+               clock-names     = "ssc";
+               pinctrl-0       = <&pinctrl_spi0_default>;
+               pinctrl-names   = "default";
+               cs-gpios        = <&pio17 5 0>;
+               #address-cells  = <1>;
+               #size-cells     = <0>;
+
+               st95hf@0{
+                       compatible              = "st,st95hf";
+                       reg                     = <0>;
+                       spi-max-frequency       = <1000000>;
+                       interrupts              = <2 IRQ_TYPE_EDGE_FALLING>;
+               };
+       };
index 4223c2d3b508be75e5764e67fe979b23aec90cc7..cfd31d94c8727251179a0c2afcc4f9b2a4580156 100644 (file)
@@ -26,6 +26,12 @@ Supported chips:
     Datasheet: Publicly available at the Texas Instruments website
                http://www.ti.com/
 
+  * Texas Instruments INA231
+    Prefix: 'ina231'
+    Addresses: I2C 0x40 - 0x4f
+    Datasheet: Publicly available at the Texas Instruments website
+               http://www.ti.com/
+
 Author: Lothar Felten <l-felten@ti.com>
 
 Description
@@ -41,9 +47,18 @@ interface. The INA220 monitors both shunt drop and supply voltage.
 The INA226 is a current shunt and power monitor with an I2C interface.
 The INA226 monitors both a shunt voltage drop and bus supply voltage.
 
-The INA230 is a high or low side current shunt and power monitor with an I2C
-interface. The INA230 monitors both a shunt voltage drop and bus supply voltage.
+INA230 and INA231 are high or low side current shunt and power monitors
+with an I2C interface. The chips monitor both a shunt voltage drop and
+bus supply voltage.
 
-The shunt value in micro-ohms can be set via platform data or device tree.
-Please refer to the Documentation/devicetree/bindings/i2c/ina2xx.txt for bindings
+The shunt value in micro-ohms can be set via platform data or device tree at
+compile-time or via the shunt_resistor attribute in sysfs at run-time. Please
+refer to the Documentation/devicetree/bindings/i2c/ina2xx.txt for bindings
 if the device tree is used.
+
+Additionally ina226 supports update_interval attribute as described in
+Documentation/hwmon/sysfs-interface. Internally the interval is the sum of
+bus and shunt voltage conversion times multiplied by the averaging rate. We
+don't touch the conversion times and only modify the number of averages. The
+lower limit of the update_interval is 2 ms, the upper limit is 2253 ms.
+The actual programmed interval may vary from the desired value.
index 70a09f8a0383b2cdc25ab35f6aef43e8ac2905b4..ca2387ef27ab0c38cdb4b5e74ffbcde9509d61d8 100644 (file)
@@ -269,6 +269,50 @@ And there are a number of things that _must_ or _must_not_ be assumed:
        STORE *(A + 4) = Y; STORE *A = X;
        STORE {*A, *(A + 4) } = {X, Y};
 
+And there are anti-guarantees:
+
+ (*) These guarantees do not apply to bitfields, because compilers often
+     generate code to modify these using non-atomic read-modify-write
+     sequences.  Do not attempt to use bitfields to synchronize parallel
+     algorithms.
+
+ (*) Even in cases where bitfields are protected by locks, all fields
+     in a given bitfield must be protected by one lock.  If two fields
+     in a given bitfield are protected by different locks, the compiler's
+     non-atomic read-modify-write sequences can cause an update to one
+     field to corrupt the value of an adjacent field.
+
+ (*) These guarantees apply only to properly aligned and sized scalar
+     variables.  "Properly sized" currently means variables that are
+     the same size as "char", "short", "int" and "long".  "Properly
+     aligned" means the natural alignment, thus no constraints for
+     "char", two-byte alignment for "short", four-byte alignment for
+     "int", and either four-byte or eight-byte alignment for "long",
+     on 32-bit and 64-bit systems, respectively.  Note that these
+     guarantees were introduced into the C11 standard, so beware when
+     using older pre-C11 compilers (for example, gcc 4.6).  The portion
+     of the standard containing this guarantee is Section 3.14, which
+     defines "memory location" as follows:
+
+       memory location
+               either an object of scalar type, or a maximal sequence
+               of adjacent bit-fields all having nonzero width
+
+               NOTE 1: Two threads of execution can update and access
+               separate memory locations without interfering with
+               each other.
+
+               NOTE 2: A bit-field and an adjacent non-bit-field member
+               are in separate memory locations. The same applies
+               to two bit-fields, if one is declared inside a nested
+               structure declaration and the other is not, or if the two
+               are separated by a zero-length bit-field declaration,
+               or if they are separated by a non-bit-field member
+               declaration. It is not safe to concurrently update two
+               bit-fields in the same structure if all members declared
+               between them are also bit-fields, no matter what the
+               sizes of those intervening bit-fields happen to be.
+
 
 =========================
 WHAT ARE MEMORY BARRIERS?
@@ -750,7 +794,7 @@ In summary:
       However, they do -not- guarantee any other sort of ordering:
       Not prior loads against later loads, nor prior stores against
       later anything.  If you need these other forms of ordering,
-      use smb_rmb(), smp_wmb(), or, in the case of prior stores and
+      use smp_rmb(), smp_wmb(), or, in the case of prior stores and
       later loads, smp_mb().
 
   (*) If both legs of the "if" statement begin with identical stores
index c6af4bac5aa8f914a83305831e10f285c1699fb2..54f10478e8e30ccda77da9d345c01b9ace781e07 100644 (file)
@@ -199,16 +199,9 @@ frame header.
 TX limitations
 --------------
 
-Kernel processing usually involves validation of the message received by
-user-space, then processing its contents. The kernel must assure that
-userspace is not able to modify the message contents after they have been
-validated. In order to do so, the message is copied from the ring frame
-to an allocated buffer if either of these conditions is false:
-
-- only a single mapping of the ring exists
-- the file descriptor is not shared between processes
-
-This means that for threaded programs, the kernel will fall back to copying.
+As of Jan 2015 the message is always copied from the ring frame to an
+allocated buffer due to unresolved security concerns.
+See commit 4682a0358639b29cf ("netlink: Always copy on mmap TX.").
 
 Example
 -------
index aaa039dee99947e455db6f0499ca191572b552bf..d66a97dd3a12548e0f79c59154260724da11f282 100644 (file)
@@ -4953,6 +4953,16 @@ F:       Documentation/input/multi-touch-protocol.txt
 F:     drivers/input/input-mt.c
 K:     \b(ABS|SYN)_MT_
 
+INTEL ASoC BDW/HSW DRIVERS
+M:     Jie Yang <yang.jie@linux.intel.com>
+L:     alsa-devel@alsa-project.org
+S:     Supported
+F:     sound/soc/intel/sst-haswell*
+F:     sound/soc/intel/sst-dsp*
+F:     sound/soc/intel/sst-firmware.c
+F:     sound/soc/intel/broadwell.c
+F:     sound/soc/intel/haswell.c
+
 INTEL C600 SERIES SAS CONTROLLER DRIVER
 M:     Intel SCU Linux support <intel-linux-scu@intel.com>
 M:     Artur Paszkiewicz <artur.paszkiewicz@intel.com>
@@ -9241,7 +9251,6 @@ F:        drivers/net/ethernet/dlink/sundance.c
 
 SUPERH
 L:     linux-sh@vger.kernel.org
-W:     http://www.linux-sh.org
 Q:     http://patchwork.kernel.org/project/linux-sh/list/
 S:     Orphan
 F:     Documentation/sh/
index c8e17c05f9163ae74dd9434e4f68b15188190558..b15036b1890cae7031b56c4718edbd5baddfacbb 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 19
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
 NAME = Diseased Newt
 
 # *DOCUMENTATION*
index 68be9017593df10f235ca228a6837aaa11f5c818..132c70e2d2f11cfb4e655ddcb05931be5bd03498 100644 (file)
@@ -263,16 +263,37 @@ restart:  adr     r0, LC0
                 * OK... Let's do some funky business here.
                 * If we do have a DTB appended to zImage, and we do have
                 * an ATAG list around, we want the later to be translated
-                * and folded into the former here.  To be on the safe side,
-                * let's temporarily move  the stack away into the malloc
-                * area.  No GOT fixup has occurred yet, but none of the
-                * code we're about to call uses any global variable.
+                * and folded into the former here. No GOT fixup has occurred
+                * yet, but none of the code we're about to call uses any
+                * global variable.
                */
-               add     sp, sp, #0x10000
+
+               /* Get the initial DTB size */
+               ldr     r5, [r6, #4]
+#ifndef __ARMEB__
+               /* convert to little endian */
+               eor     r1, r5, r5, ror #16
+               bic     r1, r1, #0x00ff0000
+               mov     r5, r5, ror #8
+               eor     r5, r5, r1, lsr #8
+#endif
+               /* 50% DTB growth should be good enough */
+               add     r5, r5, r5, lsr #1
+               /* preserve 64-bit alignment */
+               add     r5, r5, #7
+               bic     r5, r5, #7
+               /* clamp to 32KB min and 1MB max */
+               cmp     r5, #(1 << 15)
+               movlo   r5, #(1 << 15)
+               cmp     r5, #(1 << 20)
+               movhi   r5, #(1 << 20)
+               /* temporarily relocate the stack past the DTB work space */
+               add     sp, sp, r5
+
                stmfd   sp!, {r0-r3, ip, lr}
                mov     r0, r8
                mov     r1, r6
-               sub     r2, sp, r6
+               mov     r2, r5
                bl      atags_to_fdt
 
                /*
@@ -285,11 +306,11 @@ restart:  adr     r0, LC0
                bic     r0, r0, #1
                add     r0, r0, #0x100
                mov     r1, r6
-               sub     r2, sp, r6
+               mov     r2, r5
                bleq    atags_to_fdt
 
                ldmfd   sp!, {r0-r3, ip, lr}
-               sub     sp, sp, #0x10000
+               sub     sp, sp, r5
 #endif
 
                mov     r8, r6                  @ use the appended device tree
@@ -306,7 +327,7 @@ restart:    adr     r0, LC0
                subs    r1, r5, r1
                addhi   r9, r9, r1
 
-               /* Get the dtb's size */
+               /* Get the current DTB size */
                ldr     r5, [r6, #4]
 #ifndef __ARMEB__
                /* convert r5 (dtb size) to little endian */
index b8168f1f8139baf3a1420ba17061ad18dfa41151..24ff27049ce015bf6c1203b73d97bfaa12ff0e37 100644 (file)
        };
 
        i2s1: i2s@13960000 {
-               compatible = "samsung,s5pv210-i2s";
+               compatible = "samsung,s3c6410-i2s";
                reg = <0x13960000 0x100>;
                clocks = <&clock CLK_I2S1>;
                clock-names = "iis";
        };
 
        i2s2: i2s@13970000 {
-               compatible = "samsung,s5pv210-i2s";
+               compatible = "samsung,s3c6410-i2s";
                reg = <0x13970000 0x100>;
                clocks = <&clock CLK_I2S2>;
                clock-names = "iis";
index 2260f1855820fa2d2961025b2683c2e30982a8e0..8944f4991c3cfd4e76585cdc11b78e3c3b2257d2 100644 (file)
 
 __invalid_entry:
        v7m_exception_entry
+#ifdef CONFIG_PRINTK
        adr     r0, strerr
        mrs     r1, ipsr
        mov     r2, lr
        bl      printk
+#endif
        mov     r0, sp
        bl      show_regs
 1:     b       1b
index 466bd299b1a8aad54949364d976d9c5430c2375e..3afee5f40f4f1d7ff6d10b6a95b1b6434415c63e 100644 (file)
@@ -23,6 +23,7 @@ config KVM
        select HAVE_KVM_CPU_RELAX_INTERCEPT
        select KVM_MMIO
        select KVM_ARM_HOST
+       select SRCU
        depends on ARM_VIRT_EXT && ARM_LPAE
        ---help---
          Support hosting virtualized guest machines. You will also
index 03823e784f63e7acf91fdda4d5f58927ccf527d9..c43c714555661337048b72a5a21a6b5357659567 100644 (file)
@@ -1012,6 +1012,7 @@ config ARCH_SUPPORTS_BIG_ENDIAN
 
 config ARM_KERNMEM_PERMS
        bool "Restrict kernel memory permissions"
+       depends on MMU
        help
          If this is set, kernel memory other than kernel text (and rodata)
          will be made non-executable. The tradeoff is that each region is
index 91892569710f5ab79127218e808e84ef14ef33eb..845769e413323120b6d7b4afed7640746413bc98 100644 (file)
@@ -144,21 +144,17 @@ static void flush_context(unsigned int cpu)
        /* Update the list of reserved ASIDs and the ASID bitmap. */
        bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
        for_each_possible_cpu(i) {
-               if (i == cpu) {
-                       asid = 0;
-               } else {
-                       asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
-                       /*
-                        * If this CPU has already been through a
-                        * rollover, but hasn't run another task in
-                        * the meantime, we must preserve its reserved
-                        * ASID, as this is the only trace we have of
-                        * the process it is still running.
-                        */
-                       if (asid == 0)
-                               asid = per_cpu(reserved_asids, i);
-                       __set_bit(asid & ~ASID_MASK, asid_map);
-               }
+               asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
+               /*
+                * If this CPU has already been through a
+                * rollover, but hasn't run another task in
+                * the meantime, we must preserve its reserved
+                * ASID, as this is the only trace we have of
+                * the process it is still running.
+                */
+               if (asid == 0)
+                       asid = per_cpu(reserved_asids, i);
+               __set_bit(asid & ~ASID_MASK, asid_map);
                per_cpu(reserved_asids, i) = asid;
        }
 
index a673c7f7e208efe1bc9b2e2d4ff0cb8ab6a1c5da..903dba064a034c7e5d9fff950d3fa334301130d9 100644 (file)
@@ -2048,6 +2048,9 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
 {
        struct dma_iommu_mapping *mapping = dev->archdata.mapping;
 
+       if (!mapping)
+               return;
+
        __arm_iommu_detach_device(dev);
        arm_iommu_release_mapping(mapping);
 }
index 8ba85e9ea388d1778c54eabdd5ca34afad114faf..b334084d3675e33761ec618b9f776650c02f2251 100644 (file)
@@ -26,6 +26,7 @@ config KVM
        select KVM_ARM_HOST
        select KVM_ARM_VGIC
        select KVM_ARM_TIMER
+       select SRCU
        ---help---
          Support hosting virtualized guest machines.
 
index 3289969ee423a9edbe7fb12359f56cea2bd2be2a..843713c05b79fe69f8bbc057a17a5e200dfc54da 100644 (file)
@@ -2656,27 +2656,21 @@ config TRAD_SIGNALS
        bool
 
 config MIPS32_COMPAT
-       bool "Kernel support for Linux/MIPS 32-bit binary compatibility"
-       depends on 64BIT
-       help
-         Select this option if you want Linux/MIPS 32-bit binary
-         compatibility. Since all software available for Linux/MIPS is
-         currently 32-bit you should say Y here.
+       bool
 
 config COMPAT
        bool
-       depends on MIPS32_COMPAT
-       select ARCH_WANT_OLD_COMPAT_IPC
-       default y
 
 config SYSVIPC_COMPAT
        bool
-       depends on COMPAT && SYSVIPC
-       default y
 
 config MIPS32_O32
        bool "Kernel support for o32 binaries"
-       depends on MIPS32_COMPAT
+       depends on 64BIT
+       select ARCH_WANT_OLD_COMPAT_IPC
+       select COMPAT
+       select MIPS32_COMPAT
+       select SYSVIPC_COMPAT if SYSVIPC
        help
          Select this option if you want to run o32 binaries.  These are pure
          32-bit binaries as used by the 32-bit Linux/MIPS port.  Most of
@@ -2686,7 +2680,10 @@ config MIPS32_O32
 
 config MIPS32_N32
        bool "Kernel support for n32 binaries"
-       depends on MIPS32_COMPAT
+       depends on 64BIT
+       select COMPAT
+       select MIPS32_COMPAT
+       select SYSVIPC_COMPAT if SYSVIPC
        help
          Select this option if you want to run n32 binaries.  These are
          64-bit binaries using 32-bit quantities for addressing and certain
index 8585078ae50e90d0cf3bc87217b68cf55512dd23..2a4c52e27f416e146e5c268edad9fd867e79c5fe 100644 (file)
@@ -49,7 +49,8 @@
 /*
  * Some extra ELF definitions
  */
-#define PT_MIPS_REGINFO 0x70000000     /* Register usage information */
+#define PT_MIPS_REGINFO        0x70000000      /* Register usage information */
+#define PT_MIPS_ABIFLAGS       0x70000003      /* Records ABI related flags  */
 
 /* -------------------------------------------------------------------- */
 
@@ -349,39 +350,46 @@ int main(int argc, char *argv[])
 
        for (i = 0; i < ex.e_phnum; i++) {
                /* Section types we can ignore... */
-               if (ph[i].p_type == PT_NULL || ph[i].p_type == PT_NOTE ||
-                   ph[i].p_type == PT_PHDR
-                   || ph[i].p_type == PT_MIPS_REGINFO)
+               switch (ph[i].p_type) {
+               case PT_NULL:
+               case PT_NOTE:
+               case PT_PHDR:
+               case PT_MIPS_REGINFO:
+               case PT_MIPS_ABIFLAGS:
                        continue;
-               /* Section types we can't handle... */
-               else if (ph[i].p_type != PT_LOAD) {
-                       fprintf(stderr,
-                               "Program header %d type %d can't be converted.\n",
-                               ex.e_phnum, ph[i].p_type);
-                       exit(1);
-               }
-               /* Writable (data) segment? */
-               if (ph[i].p_flags & PF_W) {
-                       struct sect ndata, nbss;
 
-                       ndata.vaddr = ph[i].p_vaddr;
-                       ndata.len = ph[i].p_filesz;
-                       nbss.vaddr = ph[i].p_vaddr + ph[i].p_filesz;
-                       nbss.len = ph[i].p_memsz - ph[i].p_filesz;
+               case PT_LOAD:
+                       /* Writable (data) segment? */
+                       if (ph[i].p_flags & PF_W) {
+                               struct sect ndata, nbss;
+
+                               ndata.vaddr = ph[i].p_vaddr;
+                               ndata.len = ph[i].p_filesz;
+                               nbss.vaddr = ph[i].p_vaddr + ph[i].p_filesz;
+                               nbss.len = ph[i].p_memsz - ph[i].p_filesz;
 
-                       combine(&data, &ndata, 0);
-                       combine(&bss, &nbss, 1);
-               } else {
-                       struct sect ntxt;
+                               combine(&data, &ndata, 0);
+                               combine(&bss, &nbss, 1);
+                       } else {
+                               struct sect ntxt;
 
-                       ntxt.vaddr = ph[i].p_vaddr;
-                       ntxt.len = ph[i].p_filesz;
+                               ntxt.vaddr = ph[i].p_vaddr;
+                               ntxt.len = ph[i].p_filesz;
 
-                       combine(&text, &ntxt, 0);
+                               combine(&text, &ntxt, 0);
+                       }
+                       /* Remember the lowest segment start address. */
+                       if (ph[i].p_vaddr < cur_vma)
+                               cur_vma = ph[i].p_vaddr;
+                       break;
+
+               default:
+                       /* Section types we can't handle... */
+                       fprintf(stderr,
+                               "Program header %d type %d can't be converted.\n",
+                               ex.e_phnum, ph[i].p_type);
+                       exit(1);
                }
-               /* Remember the lowest segment start address. */
-               if (ph[i].p_vaddr < cur_vma)
-                       cur_vma = ph[i].p_vaddr;
        }
 
        /* Sections must be in order to be converted... */
index ecd903dd1c456788db99adf1be0c6536a2a41bf4..8b1eeffa12edf0fbc2446ba81ab19750e5d49891 100644 (file)
@@ -240,9 +240,7 @@ static int octeon_cpu_disable(void)
 
        set_cpu_online(cpu, false);
        cpu_clear(cpu, cpu_callin_map);
-       local_irq_disable();
        octeon_fixup_irqs();
-       local_irq_enable();
 
        flush_cache_all();
        local_flush_tlb_all();
index f57b96dcf7df57894e4fcc6d582a2f8036670d33..61a4460d67d32b2c7d8d266f40c316c70aef5e7c 100644 (file)
@@ -132,7 +132,6 @@ CONFIG_IP_NF_MATCH_ECN=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_ULOG=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -175,7 +174,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m
 CONFIG_BRIDGE_EBT_REDIRECT=m
 CONFIG_BRIDGE_EBT_SNAT=m
 CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_ULOG=m
 CONFIG_BRIDGE_EBT_NFLOG=m
 CONFIG_IP_SCTP=m
 CONFIG_BRIDGE=m
@@ -220,8 +218,6 @@ CONFIG_NET_ACT_SKBEDIT=m
 CONFIG_NET_CLS_IND=y
 CONFIG_CFG80211=m
 CONFIG_MAC80211=m
-CONFIG_MAC80211_RC_PID=y
-CONFIG_MAC80211_RC_DEFAULT_PID=y
 CONFIG_MAC80211_MESH=y
 CONFIG_RFKILL=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -248,19 +244,13 @@ CONFIG_ATA_OVER_ETH=m
 CONFIG_IDE=y
 CONFIG_BLK_DEV_IDECD=y
 CONFIG_IDE_GENERIC=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_PIIX=y
-CONFIG_BLK_DEV_IT8213=m
-CONFIG_BLK_DEV_TC86C001=m
 CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=m
-CONFIG_BLK_DEV_SD=m
+CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_CHR_DEV_OSST=m
 CONFIG_BLK_DEV_SR=m
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
-CONFIG_SCSI_MULTI_LUN=y
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_LOGGING=y
 CONFIG_SCSI_SCAN_ASYNC=y
@@ -273,6 +263,8 @@ CONFIG_SCSI_AACRAID=m
 CONFIG_SCSI_AIC7XXX=m
 CONFIG_AIC7XXX_RESET_DELAY_MS=15000
 # CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_ATA=y
+CONFIG_ATA_PIIX=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
@@ -340,6 +332,7 @@ CONFIG_UIO=m
 CONFIG_UIO_CIF=m
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_REISERFS_PROC_INFO=y
 CONFIG_REISERFS_FS_XATTR=y
@@ -441,4 +434,3 @@ CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC16=m
index 994d219396761317d8fe626ec01d671c595d2b00..affebb78f5d6573dbf97f62630ad1e6a35026602 100644 (file)
@@ -64,7 +64,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
                        return SIGFPE;
 
                /* set FRE */
-               write_c0_config5(read_c0_config5() | MIPS_CONF5_FRE);
+               set_c0_config5(MIPS_CONF5_FRE);
                goto fr_common;
 
        case FPU_64BIT:
@@ -74,8 +74,10 @@ static inline int __enable_fpu(enum fpu_mode mode)
 #endif
                /* fall through */
        case FPU_32BIT:
-               /* clear FRE */
-               write_c0_config5(read_c0_config5() & ~MIPS_CONF5_FRE);
+               if (cpu_has_fre) {
+                       /* clear FRE */
+                       clear_c0_config5(MIPS_CONF5_FRE);
+               }
 fr_common:
                /* set CU1 & change FR appropriately */
                fr = (int)mode & FPU_FR_MASK;
@@ -182,25 +184,32 @@ static inline int init_fpu(void)
        int ret = 0;
 
        if (cpu_has_fpu) {
+               unsigned int config5;
+
                ret = __own_fpu();
-               if (!ret) {
-                       unsigned int config5 = read_c0_config5();
-
-                       /*
-                        * Ensure FRE is clear whilst running _init_fpu, since
-                        * single precision FP instructions are used. If FRE
-                        * was set then we'll just end up initialising all 32
-                        * 64b registers.
-                        */
-                       write_c0_config5(config5 & ~MIPS_CONF5_FRE);
-                       enable_fpu_hazard();
+               if (ret)
+                       return ret;
 
+               if (!cpu_has_fre) {
                        _init_fpu();
 
-                       /* Restore FRE */
-                       write_c0_config5(config5);
-                       enable_fpu_hazard();
+                       return 0;
                }
+
+               /*
+                * Ensure FRE is clear whilst running _init_fpu, since
+                * single precision FP instructions are used. If FRE
+                * was set then we'll just end up initialising all 32
+                * 64b registers.
+                */
+               config5 = clear_c0_config5(MIPS_CONF5_FRE);
+               enable_fpu_hazard();
+
+               _init_fpu();
+
+               /* Restore FRE */
+               write_c0_config5(config5);
+               enable_fpu_hazard();
        } else
                fpu_emulator_init_fpu();
 
index f8d37d1df5de53cd091b696e1c5372878e38d000..9fac64a2635307846c8ddcc2aaf775390de56387 100644 (file)
@@ -119,7 +119,7 @@ union key_u {
 #define SGI_ARCS_REV   10                      /* rev .10, 3/04/92 */
 #endif
 
-typedef struct component {
+typedef struct {
        CONFIGCLASS     Class;
        CONFIGTYPE      Type;
        IDENTIFIERFLAG  Flags;
@@ -140,7 +140,7 @@ struct cfgdata {
 };
 
 /* System ID */
-typedef struct systemid {
+typedef struct {
        CHAR VendorId[8];
        CHAR ProductId[8];
 } SYSTEMID;
@@ -166,7 +166,7 @@ typedef enum memorytype {
 #endif /* _NT_PROM */
 } MEMORYTYPE;
 
-typedef struct memorydescriptor {
+typedef struct {
        MEMORYTYPE      Type;
        LONG            BasePage;
        LONG            PageCount;
index b95a827d763ee22427a5f87050598eaf8e1d9da6..59c0901bdd847c6c51450eb92f67839f5380d44a 100644 (file)
@@ -89,9 +89,9 @@ static inline bool mips_cm_has_l2sync(void)
 
 /* Macros to ease the creation of register access functions */
 #define BUILD_CM_R_(name, off)                                 \
-static inline u32 *addr_gcr_##name(void)                       \
+static inline u32 __iomem *addr_gcr_##name(void)               \
 {                                                              \
-       return (u32 *)(mips_cm_base + (off));                   \
+       return (u32 __iomem *)(mips_cm_base + (off));           \
 }                                                              \
                                                                \
 static inline u32 read_gcr_##name(void)                                \
index 5e4aef304b0217c239dc015dda4b9c8c348c7d89..5b720d8c2745b2e8b891f38c5256db82a1232bc5 100644 (file)
@@ -1386,12 +1386,27 @@ do {                                                                    \
        __res;                                                          \
 })
 
+#define _write_32bit_cp1_register(dest, val, gas_hardfloat)            \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       "       .set    push                                    \n"     \
+       "       .set    reorder                                 \n"     \
+       "       "STR(gas_hardfloat)"                            \n"     \
+       "       ctc1    %0,"STR(dest)"                          \n"     \
+       "       .set    pop                                     \n"     \
+       : : "r" (val));                                                 \
+} while (0)
+
 #ifdef GAS_HAS_SET_HARDFLOAT
 #define read_32bit_cp1_register(source)                                        \
        _read_32bit_cp1_register(source, .set hardfloat)
+#define write_32bit_cp1_register(dest, val)                            \
+       _write_32bit_cp1_register(dest, val, .set hardfloat)
 #else
 #define read_32bit_cp1_register(source)                                        \
        _read_32bit_cp1_register(source, )
+#define write_32bit_cp1_register(dest, val)                            \
+       _write_32bit_cp1_register(dest, val, )
 #endif
 
 #ifdef HAVE_AS_DSP
index bb7963753730d817117f209e6bac10f4943a0b56..6499d93ae68d7096d63416a349d9afcdcc0cb3ae 100644 (file)
 static inline long syscall_get_nr(struct task_struct *task,
                                  struct pt_regs *regs)
 {
-       /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
-       if ((config_enabled(CONFIG_32BIT) ||
-           test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
-           (regs->regs[2] == __NR_syscall))
-               return regs->regs[4];
-       else
-               return regs->regs[2];
+       return current_thread_info()->syscall;
 }
 
 static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
index 99eea59604e984b61907a5a6a82feeca9660a609..e4440f92b366f7a1d90e4af10dd2477acdf0223a 100644 (file)
@@ -36,6 +36,7 @@ struct thread_info {
                                                 */
        struct restart_block    restart_block;
        struct pt_regs          *regs;
+       long                    syscall;        /* syscall number */
 };
 
 /*
index d001bb1ad177e7b6e2df2fbb7e42e894784b2e91..c03088f9f514e7c21f7ae0e185f8be0456af372b 100644 (file)
 #define __NR_getrandom                 (__NR_Linux + 353)
 #define __NR_memfd_create              (__NR_Linux + 354)
 #define __NR_bpf                       (__NR_Linux + 355)
+#define __NR_execveat                  (__NR_Linux + 356)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            355
+#define __NR_Linux_syscalls            356
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux                 4000
-#define __NR_O32_Linux_syscalls                355
+#define __NR_O32_Linux_syscalls                356
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 #define __NR_getrandom                 (__NR_Linux + 313)
 #define __NR_memfd_create              (__NR_Linux + 314)
 #define __NR_bpf                       (__NR_Linux + 315)
+#define __NR_execveat                  (__NR_Linux + 316)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            315
+#define __NR_Linux_syscalls            316
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         315
+#define __NR_64_Linux_syscalls         316
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 #define __NR_getrandom                 (__NR_Linux + 317)
 #define __NR_memfd_create              (__NR_Linux + 318)
 #define __NR_bpf                       (__NR_Linux + 319)
+#define __NR_execveat                  (__NR_Linux + 320)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            319
+#define __NR_Linux_syscalls            320
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                319
+#define __NR_N32_Linux_syscalls                320
 
 #endif /* _UAPI_ASM_UNISTD_H */
index 2531da1d3add4d0af7dcc3a3d1cfb82ae4c61216..97206b3deb9777b963bf5f47cbb0b0de6d1e5bb4 100644 (file)
@@ -30,6 +30,9 @@
 #include <asm/irq_cpu.h>
 
 #include <asm/mach-jz4740/base.h>
+#include <asm/mach-jz4740/irq.h>
+
+#include "irq.h"
 
 static void __iomem *jz_intc_base;
 
index c92b15df6893f555549bf3cc51bf6c39f78e1875..a5b5b56485c1618c34af3daea2b67e795a0f8c2b 100644 (file)
@@ -19,8 +19,8 @@ enum {
 int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
                     bool is_interp, struct arch_elf_state *state)
 {
-       struct elfhdr *ehdr = _ehdr;
-       struct elf_phdr *phdr = _phdr;
+       struct elf32_hdr *ehdr = _ehdr;
+       struct elf32_phdr *phdr = _phdr;
        struct mips_elf_abiflags_v0 abiflags;
        int ret;
 
@@ -48,7 +48,7 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
        return 0;
 }
 
-static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi)
+static inline unsigned get_fp_abi(struct elf32_hdr *ehdr, int in_abi)
 {
        /* If the ABI requirement is provided, simply return that */
        if (in_abi != -1)
@@ -65,7 +65,7 @@ static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi)
 int arch_check_elf(void *_ehdr, bool has_interpreter,
                   struct arch_elf_state *state)
 {
-       struct elfhdr *ehdr = _ehdr;
+       struct elf32_hdr *ehdr = _ehdr;
        unsigned fp_abi, interp_fp_abi, abi0, abi1;
 
        /* Ignore non-O32 binaries */
index 590c2c980fd38b6b3d3b3740d3987cadbfc78b76..6eb7a3f515fc82d97cb4cbbc1f1160dfb5d345d8 100644 (file)
@@ -57,6 +57,8 @@ static struct irq_chip mips_cpu_irq_controller = {
        .irq_mask_ack   = mask_mips_irq,
        .irq_unmask     = unmask_mips_irq,
        .irq_eoi        = unmask_mips_irq,
+       .irq_disable    = mask_mips_irq,
+       .irq_enable     = unmask_mips_irq,
 };
 
 /*
@@ -93,6 +95,8 @@ static struct irq_chip mips_mt_cpu_irq_controller = {
        .irq_mask_ack   = mips_mt_cpu_irq_ack,
        .irq_unmask     = unmask_mips_irq,
        .irq_eoi        = unmask_mips_irq,
+       .irq_disable    = mask_mips_irq,
+       .irq_enable     = unmask_mips_irq,
 };
 
 asmlinkage void __weak plat_irq_dispatch(void)
index eb76434828e8b403e536ca6d496eeb00baf5a368..85bff5d513e5b42ae483e414c14a4a844793b9a1 100644 (file)
@@ -82,6 +82,30 @@ void flush_thread(void)
 {
 }
 
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+       /*
+        * Save any process state which is live in hardware registers to the
+        * parent context prior to duplication. This prevents the new child
+        * state becoming stale if the parent is preempted before copy_thread()
+        * gets a chance to save the parent's live hardware registers to the
+        * child context.
+        */
+       preempt_disable();
+
+       if (is_msa_enabled())
+               save_msa(current);
+       else if (is_fpu_owner())
+               _save_fp(current);
+
+       save_dsp(current);
+
+       preempt_enable();
+
+       *dst = *src;
+       return 0;
+}
+
 int copy_thread(unsigned long clone_flags, unsigned long usp,
        unsigned long arg, struct task_struct *p)
 {
@@ -92,18 +116,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
 
        childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
 
-       preempt_disable();
-
-       if (is_msa_enabled())
-               save_msa(p);
-       else if (is_fpu_owner())
-               save_fp(p);
-
-       if (cpu_has_dsp)
-               save_dsp(p);
-
-       preempt_enable();
-
        /* set up new TSS. */
        childregs = (struct pt_regs *) childksp - 1;
        /*  Put the stack after the struct pt_regs.  */
index 9d1487d832932a0b3dcb4d2c694e773e753a8767..51045281259403c55fcefac09d510f874a3047bb 100644 (file)
@@ -770,6 +770,8 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
        long ret = 0;
        user_exit();
 
+       current_thread_info()->syscall = syscall;
+
        if (secure_computing() == -1)
                return -1;
 
index 00cad1005a16d1fc1925166ec5746e5ac9890154..6e8de80bb4468c82378d3c4af8ef3d08cefd5cf1 100644 (file)
@@ -181,6 +181,7 @@ illegal_syscall:
        sll     t1, t0, 2
        beqz    v0, einval
        lw      t2, sys_call_table(t1)          # syscall routine
+       sw      a0, PT_R2(sp)                   # call routine directly on restart
 
        /* Some syscalls like execve get their arguments from struct pt_regs
           and claim zero arguments in the syscall table. Thus we have to
@@ -580,3 +581,4 @@ EXPORT(sys_call_table)
        PTR     sys_getrandom
        PTR     sys_memfd_create
        PTR     sys_bpf                         /* 4355 */
+       PTR     sys_execveat
index 5251565e344b48f1931f500f52494eadd4e51a04..ad4d44635c7601162ca0dd8f1b626df28eeeafb2 100644 (file)
@@ -435,4 +435,5 @@ EXPORT(sys_call_table)
        PTR     sys_getrandom
        PTR     sys_memfd_create
        PTR     sys_bpf                         /* 5315 */
+       PTR     sys_execveat
        .size   sys_call_table,.-sys_call_table
index 77e74398b828770fa8814ed831c2efd6cb412b40..446cc654da56c5f5fcaad749242dd98d593776e1 100644 (file)
@@ -428,4 +428,5 @@ EXPORT(sysn32_call_table)
        PTR     sys_getrandom
        PTR     sys_memfd_create
        PTR     sys_bpf
+       PTR     compat_sys_execveat             /* 6320 */
        .size   sysn32_call_table,.-sysn32_call_table
index 6f8db9f728e8d7d3f22c0518434ebb844e544953..d07b210fbeff3667f49737dbec9d9d30d3f119ed 100644 (file)
@@ -186,6 +186,7 @@ LEAF(sys32_syscall)
        dsll    t1, t0, 3
        beqz    v0, einval
        ld      t2, sys32_call_table(t1)                # syscall routine
+       sd      a0, PT_R2(sp)           # call routine directly on restart
 
        move    a0, a1                  # shift argument registers
        move    a1, a2
@@ -565,4 +566,5 @@ EXPORT(sys32_call_table)
        PTR     sys_getrandom
        PTR     sys_memfd_create
        PTR     sys_bpf                         /* 4355 */
+       PTR     compat_sys_execveat
        .size   sys32_call_table,.-sys32_call_table
index 1e0a93c5a3e7d2f6a4970dc8694538e3e1b3dd3a..e36a859af66677034a8a4203714a306ec2c0ea51 100644 (file)
@@ -44,8 +44,8 @@ static void cmp_init_secondary(void)
        struct cpuinfo_mips *c __maybe_unused = &current_cpu_data;
 
        /* Assume GIC is present */
-       change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
-                                STATUSF_IP7);
+       change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 |
+                                STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7);
 
        /* Enable per-cpu interrupts: platform specific */
 
index ad86951b73bdcd8dca4f81e760a77f064ffadd4a..17ea705f6c405081d89a1b5dba30916c1832d73c 100644 (file)
@@ -161,7 +161,8 @@ static void vsmp_init_secondary(void)
 #ifdef CONFIG_MIPS_GIC
        /* This is Malta specific: IPI,performance and timer interrupts */
        if (gic_present)
-               change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
+               change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
+                                        STATUSF_IP4 | STATUSF_IP5 |
                                         STATUSF_IP6 | STATUSF_IP7);
        else
 #endif
index c94c4e92e17d4bb964a5ca8e7ae77523c4fa44a4..1c0d8c50b7e120482e7891c82653e49a5b63a953 100644 (file)
@@ -123,10 +123,10 @@ asmlinkage void start_secondary(void)
        unsigned int cpu;
 
        cpu_probe();
-       cpu_report();
        per_cpu_trap_init(false);
        mips_clockevent_init();
        mp_ops->init_secondary();
+       cpu_report();
 
        /*
         * XXX parity protection should be folded in here when it's converted
index ad3d2031c327737f64c53a43ab6264a20ca5a354..c3b41e24c05a47337509b9579d5b1302ba6f6e80 100644 (file)
@@ -1231,7 +1231,8 @@ static int enable_restore_fp_context(int msa)
 
                /* Restore the scalar FP control & status register */
                if (!was_fpu_owner)
-                       asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31));
+                       write_32bit_cp1_register(CP1_STATUS,
+                                                current->thread.fpu.fcr31);
        }
 
 out:
index 30e334e823bd60822285efa9da808a991e6e19bb..2ae12825529f8fe37e6e6b13c79eee39d4c4753c 100644 (file)
@@ -20,6 +20,7 @@ config KVM
        select PREEMPT_NOTIFIERS
        select ANON_INODES
        select KVM_MMIO
+       select SRCU
        ---help---
          Support for hosting Guest kernels.
          Currently supported on MIPS32 processors.
index e90b2e899291620ee8d1b89f8674bf93b5450274..30639a6e9b8ca3ad3677afb0cbd553b9c9472c18 100644 (file)
@@ -489,6 +489,8 @@ static void r4k_tlb_configure(void)
 #ifdef CONFIG_64BIT
                pg |= PG_ELPA;
 #endif
+               if (cpu_has_rixiex)
+                       pg |= PG_IEC;
                write_c0_pagegrain(pg);
        }
 
index faed90240dedd1f3f88ba34105a268efa1dd6fd2..6d6df839948f6640244e6b472287f68f569c2f63 100644 (file)
@@ -159,13 +159,6 @@ extern void flush_icache_range(unsigned long start, unsigned long end);
 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
        memcpy(dst, src, len)
 
-/*
- * Internal debugging function
- */
-#ifdef CONFIG_DEBUG_PAGEALLOC
-extern void kernel_map_pages(struct page *page, int numpages, int enable);
-#endif
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_CACHEFLUSH_H */
index 34429d5a0ccde65e0bd18a88dbc47a2968d6c793..d194c0427b26ee8de18ca5bf1f301df681a6ab70 100644 (file)
@@ -159,9 +159,11 @@ bad_area:
 bad_area_nosemaphore:
        /* User mode accesses just cause a SIGSEGV */
        if (user_mode(regs)) {
-               pr_alert("%s: unhandled page fault (%d) at 0x%08lx, "
-                       "cause %ld\n", current->comm, SIGSEGV, address, cause);
-               show_regs(regs);
+               if (unhandled_signal(current, SIGSEGV) && printk_ratelimit()) {
+                       pr_info("%s: unhandled page fault (%d) at 0x%08lx, "
+                               "cause %ld\n", current->comm, SIGSEGV, address, cause);
+                       show_regs(regs);
+               }
                _exception(SIGSEGV, regs, code, address);
                return;
        }
index 5b9312220e849e40dad8d7516b303b619431d08b..30b35fff2deaba9e01da24b3d019df4db1c3e66a 100644 (file)
@@ -60,13 +60,6 @@ extern void flush_dcache_phys_range(unsigned long start, unsigned long stop);
 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
        memcpy(dst, src, len)
 
-
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-/* internal debugging function */
-void kernel_map_pages(struct page *page, int numpages, int enable);
-#endif
-
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_POWERPC_CACHEFLUSH_H */
index f5769f19ae256906dd750dd6fcd4ad5448e55bbd..11850f310fb41fb50c639e558da1921a167af7ac 100644 (file)
@@ -21,6 +21,7 @@ config KVM
        select PREEMPT_NOTIFIERS
        select ANON_INODES
        select HAVE_KVM_EVENTFD
+       select SRCU
 
 config KVM_BOOK3S_HANDLER
        bool
index 3e20383d09219f8bbbe26ddd731e362870e9676a..58fae7d098cf0993f7484b3f2df7191fe1afe14d 100644 (file)
@@ -4,10 +4,6 @@
 /* Caches aren't brain-dead on the s390. */
 #include <asm-generic/cacheflush.h>
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
-void kernel_map_pages(struct page *page, int numpages, int enable);
-#endif
-
 int set_memory_ro(unsigned long addr, int numpages);
 int set_memory_rw(unsigned long addr, int numpages);
 int set_memory_nx(unsigned long addr, int numpages);
index 646db9c467d136d211650b0d6cdaea63ff85d2a1..5fce52cf0e57dc4831d0737ddd3c3a2cfbc386d7 100644 (file)
@@ -28,6 +28,7 @@ config KVM
        select HAVE_KVM_IRQCHIP
        select HAVE_KVM_IRQFD
        select HAVE_KVM_IRQ_ROUTING
+       select SRCU
        ---help---
          Support hosting paravirtualized guest machines using the SIE
          virtualization capability on the mainframe. This should work
index 38965379e350ffe7144087e83a3b4afe20228962..68513c41e10def4a09cb1d842fcc64cb2c34a122 100644 (file)
@@ -74,11 +74,6 @@ void flush_ptrace_access(struct vm_area_struct *, struct page *,
 #define flush_cache_vmap(start, end)           do { } while (0)
 #define flush_cache_vunmap(start, end)         do { } while (0)
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
-/* internal debugging function */
-void kernel_map_pages(struct page *page, int numpages, int enable);
-#endif
-
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _SPARC64_CACHEFLUSH_H */
index 2298cb1daff74e411ac0a616252e84988bfdd06f..1e968f7550dc0363c0ddff9d68626cd59afb119c 100644 (file)
@@ -21,6 +21,7 @@ config KVM
        depends on HAVE_KVM && MODULES
        select PREEMPT_NOTIFIERS
        select ANON_INODES
+       select SRCU
        ---help---
          Support hosting paravirtualized guest machines.
 
index 0dc9d0144a27957d2bd2cdadf3b141a3195ccab0..85588a891c063a8910778c3deff07edfb97f6714 100644 (file)
@@ -138,6 +138,7 @@ config X86
        select HAVE_ACPI_APEI_NMI if ACPI
        select ACPI_LEGACY_TABLES_LOOKUP if ACPI
        select X86_FEATURE_NAMES if PROC_FS
+       select SRCU
 
 config INSTRUCTION_DECODER
        def_bool y
index 15c29096136ba5e071c213d1412004740aa1f459..36a83617eb21cc19245794a89c986eba45179d3a 100644 (file)
@@ -552,7 +552,7 @@ static int __init microcode_init(void)
        int error;
 
        if (paravirt_enabled() || dis_ucode_ldr)
-               return 0;
+               return -EINVAL;
 
        if (c->x86_vendor == X86_VENDOR_INTEL)
                microcode_ops = init_intel_microcode();
index f9d16ff56c6b18df942da02b08aa6fc20fbc6068..7dc7ba577ecded7fcc2b8f77b480200f68b6433a 100644 (file)
@@ -40,6 +40,7 @@ config KVM
        select HAVE_KVM_MSI
        select HAVE_KVM_CPU_RELAX_INTERCEPT
        select KVM_VFIO
+       select SRCU
        ---help---
          Support hosting fully virtualized guest machines using hardware
          virtualization extensions.  You will need a fairly recent
index 7b20bccf3648dfb0fcb534f0290c023e12a44f9a..2fb384724ebb52d1cf0ba6131b418e81609ca55c 100644 (file)
@@ -448,6 +448,22 @@ static const struct dmi_system_id pciprobe_dmi_table[] __initconst = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"),
                },
        },
+        {
+                .callback = set_scan_all,
+                .ident = "Stratus/NEC ftServer",
+                .matches = {
+                        DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
+                        DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R32"),
+                },
+        },
+        {
+                .callback = set_scan_all,
+                .ident = "Stratus/NEC ftServer",
+                .matches = {
+                        DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
+                        DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R31"),
+                },
+        },
        {}
 };
 
index 44b9271580b5b0532bddf121af554cc0ec951779..852aa4c92da027cb07fb64c77c855aaf0877a1da 100644 (file)
@@ -293,7 +293,6 @@ static void mrst_power_off_unused_dev(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0801, mrst_power_off_unused_dev);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0809, mrst_power_off_unused_dev);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x080C, mrst_power_off_unused_dev);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0812, mrst_power_off_unused_dev);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0815, mrst_power_off_unused_dev);
 
 /*
index 6774a0e698675927be5c78dc34b0087d873b5ebe..1630a20d5dcfa550ebe9c8815927d51b70bd9d56 100644 (file)
 
 static void blk_mq_sysfs_release(struct kobject *kobj)
 {
-       struct request_queue *q;
-
-       q = container_of(kobj, struct request_queue, mq_kobj);
-       free_percpu(q->queue_ctx);
-}
-
-static void blk_mq_ctx_release(struct kobject *kobj)
-{
-       struct blk_mq_ctx *ctx;
-
-       ctx = container_of(kobj, struct blk_mq_ctx, kobj);
-       kobject_put(&ctx->queue->mq_kobj);
-}
-
-static void blk_mq_hctx_release(struct kobject *kobj)
-{
-       struct blk_mq_hw_ctx *hctx;
-
-       hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
-       kfree(hctx);
 }
 
 struct blk_mq_ctx_sysfs_entry {
@@ -338,13 +318,13 @@ static struct kobj_type blk_mq_ktype = {
 static struct kobj_type blk_mq_ctx_ktype = {
        .sysfs_ops      = &blk_mq_sysfs_ops,
        .default_attrs  = default_ctx_attrs,
-       .release        = blk_mq_ctx_release,
+       .release        = blk_mq_sysfs_release,
 };
 
 static struct kobj_type blk_mq_hw_ktype = {
        .sysfs_ops      = &blk_mq_hw_sysfs_ops,
        .default_attrs  = default_hw_ctx_attrs,
-       .release        = blk_mq_hctx_release,
+       .release        = blk_mq_sysfs_release,
 };
 
 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
@@ -375,7 +355,6 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
                return ret;
 
        hctx_for_each_ctx(hctx, ctx, i) {
-               kobject_get(&q->mq_kobj);
                ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
                if (ret)
                        break;
index 9ee3b87c44984d336dbd4c82572fd3a4c3d35e90..2390c5541e71fb09c3353004224d76b4c81bddc8 100644 (file)
@@ -1867,6 +1867,27 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
        mutex_unlock(&set->tag_list_lock);
 }
 
+/*
+ * It is the actual release handler for mq, but we do it from
+ * request queue's release handler for avoiding use-after-free
+ * and headache because q->mq_kobj shouldn't have been introduced,
+ * but we can't group ctx/kctx kobj without it.
+ */
+void blk_mq_release(struct request_queue *q)
+{
+       struct blk_mq_hw_ctx *hctx;
+       unsigned int i;
+
+       /* hctx kobj stays in hctx */
+       queue_for_each_hw_ctx(q, hctx, i)
+               kfree(hctx);
+
+       kfree(q->queue_hw_ctx);
+
+       /* ctx kobj stays in queue_ctx */
+       free_percpu(q->queue_ctx);
+}
+
 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 {
        struct blk_mq_hw_ctx **hctxs;
@@ -2000,10 +2021,8 @@ void blk_mq_free_queue(struct request_queue *q)
 
        percpu_ref_exit(&q->mq_usage_counter);
 
-       kfree(q->queue_hw_ctx);
        kfree(q->mq_map);
 
-       q->queue_hw_ctx = NULL;
        q->mq_map = NULL;
 
        mutex_lock(&all_q_mutex);
index 4f4f943c22c3d1e907ef2c18224b8635f0057e82..6a48c4c0d8a2a6efb881ea29b772df3bba9d5540 100644 (file)
@@ -62,6 +62,8 @@ extern void blk_mq_sysfs_unregister(struct request_queue *q);
 
 extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
 
+void blk_mq_release(struct request_queue *q);
+
 /*
  * Basic implementation of sparser bitmap, allowing the user to spread
  * the bits over more cachelines.
index 935ea2aa0730289a6de653aa28a53a4132ef5368..faaf36ade7ebdc2fdd363f174978bfb5683a4f9a 100644 (file)
@@ -517,6 +517,8 @@ static void blk_release_queue(struct kobject *kobj)
 
        if (!q->mq_ops)
                blk_free_flush_queue(q->fq);
+       else
+               blk_mq_release(q);
 
        blk_trace_shutdown(q);
 
index 4f3febf8a58954b2ca8ced8c057106529ab1f30b..e75737fd7eefbc80de3dc8731fcbec94ab321f89 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * ACPI support for Intel Lynxpoint LPSS.
  *
- * Copyright (C) 2013, 2014, Intel Corporation
+ * Copyright (C) 2013, Intel Corporation
  * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
  *          Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  *
@@ -60,8 +60,6 @@ ACPI_MODULE_NAME("acpi_lpss");
 #define LPSS_CLK_DIVIDER               BIT(2)
 #define LPSS_LTR                       BIT(3)
 #define LPSS_SAVE_CTX                  BIT(4)
-#define LPSS_DEV_PROXY                 BIT(5)
-#define LPSS_PROXY_REQ                 BIT(6)
 
 struct lpss_private_data;
 
@@ -72,10 +70,8 @@ struct lpss_device_desc {
        void (*setup)(struct lpss_private_data *pdata);
 };
 
-static struct device *proxy_device;
-
 static struct lpss_device_desc lpss_dma_desc = {
-       .flags = LPSS_CLK | LPSS_PROXY_REQ,
+       .flags = LPSS_CLK,
 };
 
 struct lpss_private_data {
@@ -150,24 +146,22 @@ static struct lpss_device_desc byt_pwm_dev_desc = {
 };
 
 static struct lpss_device_desc byt_uart_dev_desc = {
-       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX |
-                LPSS_DEV_PROXY,
+       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
        .prv_offset = 0x800,
        .setup = lpss_uart_setup,
 };
 
 static struct lpss_device_desc byt_spi_dev_desc = {
-       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX |
-                LPSS_DEV_PROXY,
+       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
        .prv_offset = 0x400,
 };
 
 static struct lpss_device_desc byt_sdio_dev_desc = {
-       .flags = LPSS_CLK | LPSS_DEV_PROXY,
+       .flags = LPSS_CLK,
 };
 
 static struct lpss_device_desc byt_i2c_dev_desc = {
-       .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_DEV_PROXY,
+       .flags = LPSS_CLK | LPSS_SAVE_CTX,
        .prv_offset = 0x800,
        .setup = byt_i2c_setup,
 };
@@ -374,8 +368,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
        adev->driver_data = pdata;
        pdev = acpi_create_platform_device(adev);
        if (!IS_ERR_OR_NULL(pdev)) {
-               if (!proxy_device && dev_desc->flags & LPSS_DEV_PROXY)
-                       proxy_device = &pdev->dev;
                return 1;
        }
 
@@ -600,14 +592,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
        if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
                acpi_lpss_save_ctx(dev, pdata);
 
-       ret = acpi_dev_runtime_suspend(dev);
-       if (ret)
-               return ret;
-
-       if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device)
-               return pm_runtime_put_sync_suspend(proxy_device);
-
-       return 0;
+       return acpi_dev_runtime_suspend(dev);
 }
 
 static int acpi_lpss_runtime_resume(struct device *dev)
@@ -615,12 +600,6 @@ static int acpi_lpss_runtime_resume(struct device *dev)
        struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
        int ret;
 
-       if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device) {
-               ret = pm_runtime_get_sync(proxy_device);
-               if (ret)
-                       return ret;
-       }
-
        ret = acpi_dev_runtime_resume(dev);
        if (ret)
                return ret;
index 0da5865df5b1b4f092fef5652814e7cc96b8993e..beb8b27d4621a6d9f839065c1296fa8ab67f3032 100644 (file)
@@ -51,9 +51,11 @@ struct regmap_async {
 struct regmap {
        union {
                struct mutex mutex;
-               spinlock_t spinlock;
+               struct {
+                       spinlock_t spinlock;
+                       unsigned long spinlock_flags;
+               };
        };
-       unsigned long spinlock_flags;
        regmap_lock lock;
        regmap_unlock unlock;
        void *lock_arg; /* This is passed to lock/unlock functions */
@@ -233,6 +235,10 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
 
 void regmap_async_complete_cb(struct regmap_async *async, int ret);
 
+enum regmap_endian regmap_get_val_endian(struct device *dev,
+                                        const struct regmap_bus *bus,
+                                        const struct regmap_config *config);
+
 extern struct regcache_ops regcache_rbtree_ops;
 extern struct regcache_ops regcache_lzo_ops;
 extern struct regcache_ops regcache_flat_ops;
index e4c45d2299c167c65d9542f395f6f8be50b240d6..8d304e2a943d3c62776267534a13483375d3a200 100644 (file)
@@ -74,8 +74,8 @@ static int regmap_ac97_reg_write(void *context, unsigned int reg,
 }
 
 static const struct regmap_bus ac97_regmap_bus = {
-               .reg_write = regmap_ac97_reg_write,
-               .reg_read = regmap_ac97_reg_read,
+       .reg_write = regmap_ac97_reg_write,
+       .reg_read = regmap_ac97_reg_read,
 };
 
 /**
index 053150a7f9f27ca5dd70e1e341e95b3a4d7d338f..4b76e33110a2d1adb14e661e290f2fddccfe6b42 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/i2c.h>
 #include <linux/module.h>
 
+#include "internal.h"
 
 static int regmap_smbus_byte_reg_read(void *context, unsigned int reg,
                                      unsigned int *val)
@@ -87,6 +88,42 @@ static struct regmap_bus regmap_smbus_word = {
        .reg_read = regmap_smbus_word_reg_read,
 };
 
+static int regmap_smbus_word_read_swapped(void *context, unsigned int reg,
+                                         unsigned int *val)
+{
+       struct device *dev = context;
+       struct i2c_client *i2c = to_i2c_client(dev);
+       int ret;
+
+       if (reg > 0xff)
+               return -EINVAL;
+
+       ret = i2c_smbus_read_word_swapped(i2c, reg);
+       if (ret < 0)
+               return ret;
+
+       *val = ret;
+
+       return 0;
+}
+
+static int regmap_smbus_word_write_swapped(void *context, unsigned int reg,
+                                          unsigned int val)
+{
+       struct device *dev = context;
+       struct i2c_client *i2c = to_i2c_client(dev);
+
+       if (val > 0xffff || reg > 0xff)
+               return -EINVAL;
+
+       return i2c_smbus_write_word_swapped(i2c, reg, val);
+}
+
+static struct regmap_bus regmap_smbus_word_swapped = {
+       .reg_write = regmap_smbus_word_write_swapped,
+       .reg_read = regmap_smbus_word_read_swapped,
+};
+
 static int regmap_i2c_write(void *context, const void *data, size_t count)
 {
        struct device *dev = context;
@@ -180,7 +217,14 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
        else if (config->val_bits == 16 && config->reg_bits == 8 &&
                 i2c_check_functionality(i2c->adapter,
                                         I2C_FUNC_SMBUS_WORD_DATA))
-               return &regmap_smbus_word;
+               switch (regmap_get_val_endian(&i2c->dev, NULL, config)) {
+               case REGMAP_ENDIAN_LITTLE:
+                       return &regmap_smbus_word;
+               case REGMAP_ENDIAN_BIG:
+                       return &regmap_smbus_word_swapped;
+               default:                /* everything else is not supported */
+                       break;
+               }
        else if (config->val_bits == 8 && config->reg_bits == 8 &&
                 i2c_check_functionality(i2c->adapter,
                                         I2C_FUNC_SMBUS_BYTE_DATA))
index d2f8a818d20068af51a2d30d74d37e7ebea77db2..f99b098ddabfbd23dae3ab3ee7733b9ff24e28ef 100644 (file)
@@ -473,9 +473,9 @@ static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
        return REGMAP_ENDIAN_BIG;
 }
 
-static enum regmap_endian regmap_get_val_endian(struct device *dev,
-                                       const struct regmap_bus *bus,
-                                       const struct regmap_config *config)
+enum regmap_endian regmap_get_val_endian(struct device *dev,
+                                        const struct regmap_bus *bus,
+                                        const struct regmap_config *config)
 {
        struct device_node *np;
        enum regmap_endian endian;
@@ -513,6 +513,7 @@ static enum regmap_endian regmap_get_val_endian(struct device *dev,
        /* Use this if no other value was found */
        return REGMAP_ENDIAN_BIG;
 }
+EXPORT_SYMBOL_GPL(regmap_get_val_endian);
 
 /**
  * regmap_init(): Initialise register map
index 04645c09fe5e5eee7f6699c451e9e4f6f8368958..9cd6968e2f924bf7eb5c545c4298445651d5665d 100644 (file)
@@ -569,19 +569,19 @@ static void fast_mix(struct fast_pool *f)
        __u32 c = f->pool[2],   d = f->pool[3];
 
        a += b;                 c += d;
-       b = rol32(a, 6);        d = rol32(c, 27);
+       b = rol32(b, 6);        d = rol32(d, 27);
        d ^= a;                 b ^= c;
 
        a += b;                 c += d;
-       b = rol32(a, 16);       d = rol32(c, 14);
+       b = rol32(b, 16);       d = rol32(d, 14);
        d ^= a;                 b ^= c;
 
        a += b;                 c += d;
-       b = rol32(a, 6);        d = rol32(c, 27);
+       b = rol32(b, 6);        d = rol32(d, 27);
        d ^= a;                 b ^= c;
 
        a += b;                 c += d;
-       b = rol32(a, 16);       d = rol32(c, 14);
+       b = rol32(b, 16);       d = rol32(d, 14);
        d ^= a;                 b ^= c;
 
        f->pool[0] = a;  f->pool[1] = b;
index 3f44f292d066f03c2bd3029f4631d3a5183a70de..91f86131bb7aa62b0c4632e2defa1f8b2f4e6abc 100644 (file)
@@ -13,6 +13,7 @@ config COMMON_CLK
        bool
        select HAVE_CLK_PREPARE
        select CLKDEV_LOOKUP
+       select SRCU
        ---help---
          The common clock framework is a single definition of struct
          clk, useful across many platforms, as well as an
index 29b2ef5a68b9318b3791c37e8e1b94c12b6553d6..a171fef2c2b66d0732e01f349f9fa13b01f8af2e 100644 (file)
@@ -2,6 +2,7 @@ menu "CPU Frequency scaling"
 
 config CPU_FREQ
        bool "CPU Frequency scaling"
+       select SRCU
        help
          CPU Frequency scaling allows you to change the clock speed of 
          CPUs on the fly. This is a nice method to save power, because 
index faf4e70c42e0467f072cde73d6cc972ff27509ae..3891f6781298c39aee61e68010bf6f42396def1f 100644 (file)
@@ -1,5 +1,6 @@
 menuconfig PM_DEVFREQ
        bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
+       select SRCU
        help
          A device may have a list of frequencies and voltages available.
          devfreq, a generic DVFS framework can be registered for a device
index da9c316059bc876ba459832ca7ce01513c389897..eea5d7e578c994bd28b04271837d06fe3fee3d69 100644 (file)
@@ -801,9 +801,11 @@ static int mcp230xx_probe(struct i2c_client *client,
                client->irq = irq_of_parse_and_map(client->dev.of_node, 0);
        } else {
                pdata = dev_get_platdata(&client->dev);
-               if (!pdata || !gpio_is_valid(pdata->base)) {
-                       dev_dbg(&client->dev, "invalid platform data\n");
-                       return -EINVAL;
+               if (!pdata) {
+                       pdata = devm_kzalloc(&client->dev,
+                                       sizeof(struct mcp23s08_platform_data),
+                                       GFP_KERNEL);
+                       pdata->base = -1;
                }
        }
 
@@ -924,10 +926,11 @@ static int mcp23s08_probe(struct spi_device *spi)
        } else {
                type = spi_get_device_id(spi)->driver_data;
                pdata = dev_get_platdata(&spi->dev);
-               if (!pdata || !gpio_is_valid(pdata->base)) {
-                       dev_dbg(&spi->dev,
-                                       "invalid or missing platform data\n");
-                       return -EINVAL;
+               if (!pdata) {
+                       pdata = devm_kzalloc(&spi->dev,
+                                       sizeof(struct mcp23s08_platform_data),
+                                       GFP_KERNEL);
+                       pdata->base = -1;
                }
 
                for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
index 30646cfe0efa91e2378fa0e5871dec14cd7ece7d..f476ae2eb0b3c8610e54377cf7e3010079e916bf 100644 (file)
@@ -88,6 +88,8 @@ struct gpio_bank {
 #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
 #define LINE_USED(line, offset) (line & (BIT(offset)))
 
+static void omap_gpio_unmask_irq(struct irq_data *d);
+
 static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
 {
        return bank->chip.base + gpio_irq;
@@ -477,6 +479,16 @@ static int omap_gpio_is_input(struct gpio_bank *bank, int mask)
        return readl_relaxed(reg) & mask;
 }
 
+static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned gpio,
+                              unsigned offset)
+{
+       if (!LINE_USED(bank->mod_usage, offset)) {
+               omap_enable_gpio_module(bank, offset);
+               omap_set_gpio_direction(bank, offset, 1);
+       }
+       bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
+}
+
 static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
 {
        struct gpio_bank *bank = omap_irq_data_get_bank(d);
@@ -506,15 +518,11 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
        spin_lock_irqsave(&bank->lock, flags);
        offset = GPIO_INDEX(bank, gpio);
        retval = omap_set_gpio_triggering(bank, offset, type);
-       if (!LINE_USED(bank->mod_usage, offset)) {
-               omap_enable_gpio_module(bank, offset);
-               omap_set_gpio_direction(bank, offset, 1);
-       } else if (!omap_gpio_is_input(bank, BIT(offset))) {
+       omap_gpio_init_irq(bank, gpio, offset);
+       if (!omap_gpio_is_input(bank, BIT(offset))) {
                spin_unlock_irqrestore(&bank->lock, flags);
                return -EINVAL;
        }
-
-       bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
        spin_unlock_irqrestore(&bank->lock, flags);
 
        if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
@@ -792,6 +800,24 @@ exit:
        pm_runtime_put(bank->dev);
 }
 
+static unsigned int omap_gpio_irq_startup(struct irq_data *d)
+{
+       struct gpio_bank *bank = omap_irq_data_get_bank(d);
+       unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
+       unsigned long flags;
+       unsigned offset = GPIO_INDEX(bank, gpio);
+
+       if (!BANK_USED(bank))
+               pm_runtime_get_sync(bank->dev);
+
+       spin_lock_irqsave(&bank->lock, flags);
+       omap_gpio_init_irq(bank, gpio, offset);
+       spin_unlock_irqrestore(&bank->lock, flags);
+       omap_gpio_unmask_irq(d);
+
+       return 0;
+}
+
 static void omap_gpio_irq_shutdown(struct irq_data *d)
 {
        struct gpio_bank *bank = omap_irq_data_get_bank(d);
@@ -1181,6 +1207,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
        if (!irqc)
                return -ENOMEM;
 
+       irqc->irq_startup = omap_gpio_irq_startup,
        irqc->irq_shutdown = omap_gpio_irq_shutdown,
        irqc->irq_ack = omap_gpio_ack_irq,
        irqc->irq_mask = omap_gpio_mask_irq,
index f62aa115d79ab4f9fe7e249dbb146dfdde44153e..7722ed53bd651faae15692621d099551ef9bf308 100644 (file)
@@ -648,6 +648,7 @@ int gpiod_export_link(struct device *dev, const char *name,
                if (tdev != NULL) {
                        status = sysfs_create_link(&dev->kobj, &tdev->kobj,
                                                name);
+                       put_device(tdev);
                } else {
                        status = -ENODEV;
                }
@@ -695,7 +696,7 @@ int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
        }
 
        status = sysfs_set_active_low(desc, dev, value);
-
+       put_device(dev);
 unlock:
        mutex_unlock(&sysfs_lock);
 
index 0d8694f015c1a58a829da1fef863c99a3fba47f5..0fd592799d58dc6fdbd6c37bb5e826ed0f89cf9a 100644 (file)
@@ -822,7 +822,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
         * Unconditionally decrement this counter, regardless of the queue's
         * type.
         */
-       dqm->total_queue_count++;
+       dqm->total_queue_count--;
        pr_debug("Total of %d queues are accountable so far\n",
                        dqm->total_queue_count);
        mutex_unlock(&dqm->lock);
index a8be6df8534753fbaed2103a57a3f054838bc29a..1c385c23dd0b8e2ad4155972d53bc9fe8195d03a 100644 (file)
@@ -95,10 +95,10 @@ static int __init kfd_module_init(void)
        }
 
        /* Verify module parameters */
-       if ((max_num_of_queues_per_device < 0) ||
+       if ((max_num_of_queues_per_device < 1) ||
                (max_num_of_queues_per_device >
                        KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
-               pr_err("kfd: max_num_of_queues_per_device must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
+               pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
                return -1;
        }
 
index f37cf5efe642ca23b42fc45de8ca0c69617d9937..2fda1927bff794e7ff626169277d28aff8d1acba 100644 (file)
@@ -315,7 +315,11 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
        BUG_ON(!pqm);
 
        pqn = get_queue_by_qid(pqm, qid);
-       BUG_ON(!pqn);
+       if (!pqn) {
+               pr_debug("amdkfd: No queue %d exists for update operation\n",
+                               qid);
+               return -EFAULT;
+       }
 
        pqn->q->properties.queue_address = p->queue_address;
        pqn->q->properties.queue_size = p->queue_size;
index c2a1cba1e984546d63f033a4cf4b07ff3279bb16..b9140032962d943e658a9bc18af8be8418bc479f 100644 (file)
 #include "cirrus_drv.h"
 
 int cirrus_modeset = -1;
+int cirrus_bpp = 24;
 
 MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
 module_param_named(modeset, cirrus_modeset, int, 0400);
+MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:24)");
+module_param_named(bpp, cirrus_bpp, int, 0400);
 
 /*
  * This is the generic driver code. This binds the driver to the drm core,
index 693a4565c4ffb2a0629d48ec206ba3bb4accf830..705061537a27694c3834374a22bb297510a041be 100644 (file)
@@ -262,4 +262,7 @@ static inline void cirrus_bo_unreserve(struct cirrus_bo *bo)
 
 int cirrus_bo_push_sysram(struct cirrus_bo *bo);
 int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
+
+extern int cirrus_bpp;
+
 #endif                         /* __CIRRUS_DRV_H__ */
index 4c2d68e9102d6304b8fd5cc655266300706f32da..e4b976658087100304cd76f66330c16b9dcb7271 100644 (file)
@@ -320,6 +320,8 @@ bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
        const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */
        const int max_size = cdev->mc.vram_size;
 
+       if (bpp > cirrus_bpp)
+               return false;
        if (bpp > 32)
                return false;
 
index 99d4a74ffeaffd2582ca78353ae2156a90c796f5..61385f2298bf752eb87b1645582d62d47719f3be 100644 (file)
@@ -501,8 +501,13 @@ static int cirrus_vga_get_modes(struct drm_connector *connector)
        int count;
 
        /* Just add a static list of modes */
-       count = drm_add_modes_noedid(connector, 1280, 1024);
-       drm_set_preferred_mode(connector, 1024, 768);
+       if (cirrus_bpp <= 24) {
+               count = drm_add_modes_noedid(connector, 1280, 1024);
+               drm_set_preferred_mode(connector, 1024, 768);
+       } else {
+               count = drm_add_modes_noedid(connector, 800, 600);
+               drm_set_preferred_mode(connector, 800, 600);
+       }
        return count;
 }
 
index 9e7f23dd14bd5992d73b72ddec32d084ee906aed..87d5fb21cb61cc8709e4f685fa6c3a24ecbb9e9e 100644 (file)
@@ -34,7 +34,8 @@
 
 static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
                                    uint64_t saddr, uint64_t daddr,
-                                   int flag, int n)
+                                   int flag, int n,
+                                   struct reservation_object *resv)
 {
        unsigned long start_jiffies;
        unsigned long end_jiffies;
@@ -47,12 +48,12 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
                case RADEON_BENCHMARK_COPY_DMA:
                        fence = radeon_copy_dma(rdev, saddr, daddr,
                                                size / RADEON_GPU_PAGE_SIZE,
-                                               NULL);
+                                               resv);
                        break;
                case RADEON_BENCHMARK_COPY_BLIT:
                        fence = radeon_copy_blit(rdev, saddr, daddr,
                                                 size / RADEON_GPU_PAGE_SIZE,
-                                                NULL);
+                                                resv);
                        break;
                default:
                        DRM_ERROR("Unknown copy method\n");
@@ -120,7 +121,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
 
        if (rdev->asic->copy.dma) {
                time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
-                                               RADEON_BENCHMARK_COPY_DMA, n);
+                                               RADEON_BENCHMARK_COPY_DMA, n,
+                                               dobj->tbo.resv);
                if (time < 0)
                        goto out_cleanup;
                if (time > 0)
@@ -130,7 +132,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
 
        if (rdev->asic->copy.blit) {
                time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
-                                               RADEON_BENCHMARK_COPY_BLIT, n);
+                                               RADEON_BENCHMARK_COPY_BLIT, n,
+                                               dobj->tbo.resv);
                if (time < 0)
                        goto out_cleanup;
                if (time > 0)
index 102116902a070f728c434a8ee67215ede0cffb70..913fafa597ad210180c03e03618002a702cda441 100644 (file)
@@ -960,6 +960,9 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
        if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV &&
            pll->flags & RADEON_PLL_USE_REF_DIV)
                ref_div_max = pll->reference_div;
+       else if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP)
+               /* fix for problems on RS880 */
+               ref_div_max = min(pll->max_ref_div, 7u);
        else
                ref_div_max = pll->max_ref_div;
 
index d0b4f7d1140d6a391a9f32c7ec466f099b99b5fe..ac3c1310b953182acb0db6db41add071fd88e737 100644 (file)
@@ -146,7 +146,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
        struct radeon_bo_va *bo_va;
        int r;
 
-       if (rdev->family < CHIP_CAYMAN) {
+       if ((rdev->family < CHIP_CAYMAN) ||
+           (!rdev->accel_working)) {
                return 0;
        }
 
@@ -176,7 +177,8 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
        struct radeon_bo_va *bo_va;
        int r;
 
-       if (rdev->family < CHIP_CAYMAN) {
+       if ((rdev->family < CHIP_CAYMAN) ||
+           (!rdev->accel_working)) {
                return;
        }
 
index 3cf9c1fa64756fb6b4430d5e351f21aee874ad1d..686411e4e4f6a3620289be34106ae5d38c9f6b93 100644 (file)
@@ -605,14 +605,14 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                        return -ENOMEM;
                }
 
-               vm = &fpriv->vm;
-               r = radeon_vm_init(rdev, vm);
-               if (r) {
-                       kfree(fpriv);
-                       return r;
-               }
-
                if (rdev->accel_working) {
+                       vm = &fpriv->vm;
+                       r = radeon_vm_init(rdev, vm);
+                       if (r) {
+                               kfree(fpriv);
+                               return r;
+                       }
+
                        r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
                        if (r) {
                                radeon_vm_fini(rdev, vm);
@@ -668,9 +668,9 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
                                        radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
                                radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
                        }
+                       radeon_vm_fini(rdev, vm);
                }
 
-               radeon_vm_fini(rdev, vm);
                kfree(fpriv);
                file_priv->driver_priv = NULL;
        }
index 07b506b410080f482b4d41948c3bd627ae4bb39e..791818165c761f7fdc1807b8f0b7e22de0aed24f 100644 (file)
@@ -119,11 +119,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
                if (ring == R600_RING_TYPE_DMA_INDEX)
                        fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
                                                size / RADEON_GPU_PAGE_SIZE,
-                                               NULL);
+                                               vram_obj->tbo.resv);
                else
                        fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
                                                 size / RADEON_GPU_PAGE_SIZE,
-                                                NULL);
+                                                vram_obj->tbo.resv);
                if (IS_ERR(fence)) {
                        DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
                        r = PTR_ERR(fence);
@@ -170,11 +170,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
                if (ring == R600_RING_TYPE_DMA_INDEX)
                        fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
                                                size / RADEON_GPU_PAGE_SIZE,
-                                               NULL);
+                                               vram_obj->tbo.resv);
                else
                        fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
                                                 size / RADEON_GPU_PAGE_SIZE,
-                                                NULL);
+                                                vram_obj->tbo.resv);
                if (IS_ERR(fence)) {
                        DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
                        r = PTR_ERR(fence);
index 06d2246d07f19a086cddced0d1c9ffd32a2f86fd..2a5a4a9e772d6668ee844b94c61219a0c3100340 100644 (file)
@@ -743,9 +743,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
         */
 
        /* NI is optimized for 256KB fragments, SI and newer for 64KB */
-       uint64_t frag_flags = rdev->family == CHIP_CAYMAN ?
+       uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) ||
+                              (rdev->family == CHIP_ARUBA)) ?
                        R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
-       uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80;
+       uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) ||
+                              (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80;
 
        uint64_t frag_start = ALIGN(pe_start, frag_align);
        uint64_t frag_end = pe_end & ~(frag_align - 1);
index a7de26d1ac801383e2ecc57acad47f082c016ce4..d931cbbed24069a072385725f5c1fd454e04acdb 100644 (file)
@@ -1389,6 +1389,7 @@ config SENSORS_ADS1015
 config SENSORS_ADS7828
        tristate "Texas Instruments ADS7828 and compatibles"
        depends on I2C
+       select REGMAP_I2C
        help
          If you say yes here you get support for Texas Instruments ADS7828 and
          ADS7830 8-channel A/D converters. ADS7828 resolution is 12-bit, while
@@ -1430,8 +1431,8 @@ config SENSORS_INA2XX
        tristate "Texas Instruments INA219 and compatibles"
        depends on I2C
        help
-         If you say yes here you get support for INA219, INA220, INA226, and
-         INA230 power monitor chips.
+         If you say yes here you get support for INA219, INA220, INA226,
+         INA230, and INA231 power monitor chips.
 
          The INA2xx driver is configured for the default configuration of
          the part as described in the datasheet.
index 13875968c844f03e00791b2d1340e62a547a9905..6cb89c0ebab6df03f7e8b38fc81cecd3136e57de 100644 (file)
@@ -221,7 +221,7 @@ static ssize_t show_min(struct device *dev,
        struct abx500_temp *data = dev_get_drvdata(dev);
        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 
-       return sprintf(buf, "%ld\n", data->min[attr->index]);
+       return sprintf(buf, "%lu\n", data->min[attr->index]);
 }
 
 static ssize_t show_max(struct device *dev,
@@ -230,7 +230,7 @@ static ssize_t show_max(struct device *dev,
        struct abx500_temp *data = dev_get_drvdata(dev);
        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 
-       return sprintf(buf, "%ld\n", data->max[attr->index]);
+       return sprintf(buf, "%lu\n", data->max[attr->index]);
 }
 
 static ssize_t show_max_hyst(struct device *dev,
@@ -239,7 +239,7 @@ static ssize_t show_max_hyst(struct device *dev,
        struct abx500_temp *data = dev_get_drvdata(dev);
        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 
-       return sprintf(buf, "%ld\n", data->max_hyst[attr->index]);
+       return sprintf(buf, "%lu\n", data->max_hyst[attr->index]);
 }
 
 static ssize_t show_min_alarm(struct device *dev,
index f4f9b219bf1619392e203dfb95f897c742e17357..11955467fc0f48a53f4cf776802cd3494820ccca 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/err.h>
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
+#include <linux/bitops.h>
 
 /*
  * AD7314 temperature masks
@@ -67,7 +68,7 @@ static ssize_t ad7314_show_temperature(struct device *dev,
        switch (spi_get_device_id(chip->spi_dev)->driver_data) {
        case ad7314:
                data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT;
-               data = (data << 6) >> 6;
+               data = sign_extend32(data, 9);
 
                return sprintf(buf, "%d\n", 250 * data);
        case adt7301:
@@ -78,7 +79,7 @@ static ssize_t ad7314_show_temperature(struct device *dev,
                 * register.  1lsb - 31.25 milli degrees centigrade
                 */
                data = ret & ADT7301_TEMP_MASK;
-               data = (data << 2) >> 2;
+               data = sign_extend32(data, 13);
 
                return sprintf(buf, "%d\n",
                               DIV_ROUND_CLOSEST(data * 3125, 100));
index 0625e50d7a6e524b49cbc8eb9374264dbaf4ff15..ad2b47e403452a230c9f3e57454517a25e63289f 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/err.h>
 #include <linux/regulator/consumer.h>
 #include <linux/mutex.h>
+#include <linux/bitops.h>
 
 /* Addresses to scan
  * The chip also supports addresses 0x35..0x37. Don't scan those addresses
@@ -189,7 +190,7 @@ static ssize_t adc128_show_temp(struct device *dev,
        if (IS_ERR(data))
                return PTR_ERR(data);
 
-       temp = (data->temp[index] << 7) >> 7;   /* sign extend */
+       temp = sign_extend32(data->temp[index], 8);
        return sprintf(buf, "%d\n", temp * 500);/* 0.5 degrees C resolution */
 }
 
index a622d40eec1788ca73e138e41a7518aa98049cd5..bce4e9ff21bff76606484f0a04ad1bf52b1ffee2 100644 (file)
 #include <linux/hwmon-sysfs.h>
 #include <linux/i2c.h>
 #include <linux/init.h>
-#include <linux/jiffies.h>
 #include <linux/module.h>
-#include <linux/mutex.h>
 #include <linux/platform_data/ads7828.h>
+#include <linux/regmap.h>
 #include <linux/slab.h>
 
 /* The ADS7828 registers */
-#define ADS7828_NCH            8       /* 8 channels supported */
 #define ADS7828_CMD_SD_SE      0x80    /* Single ended inputs */
 #define ADS7828_CMD_PD1                0x04    /* Internal vref OFF && A/D ON */
 #define ADS7828_CMD_PD3                0x0C    /* Internal vref ON && A/D ON */
@@ -50,17 +48,9 @@ enum ads7828_chips { ads7828, ads7830 };
 
 /* Client specific data */
 struct ads7828_data {
-       struct i2c_client *client;
-       struct mutex update_lock;       /* Mutex protecting updates */
-       unsigned long last_updated;     /* Last updated time (in jiffies) */
-       u16 adc_input[ADS7828_NCH];     /* ADS7828_NCH samples */
-       bool valid;                     /* Validity flag */
-       bool diff_input;                /* Differential input */
-       bool ext_vref;                  /* External voltage reference */
-       unsigned int vref_mv;           /* voltage reference value */
+       struct regmap *regmap;
        u8 cmd_byte;                    /* Command byte without channel bits */
        unsigned int lsb_resol;         /* Resolution of the ADC sample LSB */
-       s32 (*read_channel)(const struct i2c_client *client, u8 command);
 };
 
 /* Command byte C2,C1,C0 - see datasheet */
@@ -69,42 +59,22 @@ static inline u8 ads7828_cmd_byte(u8 cmd, int ch)
        return cmd | (((ch >> 1) | (ch & 0x01) << 2) << 4);
 }
 
-/* Update data for the device (all 8 channels) */
-static struct ads7828_data *ads7828_update_device(struct device *dev)
-{
-       struct ads7828_data *data = dev_get_drvdata(dev);
-       struct i2c_client *client = data->client;
-
-       mutex_lock(&data->update_lock);
-
-       if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
-                       || !data->valid) {
-               unsigned int ch;
-               dev_dbg(&client->dev, "Starting ads7828 update\n");
-
-               for (ch = 0; ch < ADS7828_NCH; ch++) {
-                       u8 cmd = ads7828_cmd_byte(data->cmd_byte, ch);
-                       data->adc_input[ch] = data->read_channel(client, cmd);
-               }
-               data->last_updated = jiffies;
-               data->valid = true;
-       }
-
-       mutex_unlock(&data->update_lock);
-
-       return data;
-}
-
 /* sysfs callback function */
 static ssize_t ads7828_show_in(struct device *dev, struct device_attribute *da,
                               char *buf)
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
-       struct ads7828_data *data = ads7828_update_device(dev);
-       unsigned int value = DIV_ROUND_CLOSEST(data->adc_input[attr->index] *
-                                              data->lsb_resol, 1000);
+       struct ads7828_data *data = dev_get_drvdata(dev);
+       u8 cmd = ads7828_cmd_byte(data->cmd_byte, attr->index);
+       unsigned int regval;
+       int err;
 
-       return sprintf(buf, "%d\n", value);
+       err = regmap_read(data->regmap, cmd, &regval);
+       if (err < 0)
+               return err;
+
+       return sprintf(buf, "%d\n",
+                      DIV_ROUND_CLOSEST(regval * data->lsb_resol, 1000));
 }
 
 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ads7828_show_in, NULL, 0);
@@ -130,6 +100,16 @@ static struct attribute *ads7828_attrs[] = {
 
 ATTRIBUTE_GROUPS(ads7828);
 
+static const struct regmap_config ads2828_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 16,
+};
+
+static const struct regmap_config ads2830_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+};
+
 static int ads7828_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
@@ -137,42 +117,40 @@ static int ads7828_probe(struct i2c_client *client,
        struct ads7828_platform_data *pdata = dev_get_platdata(dev);
        struct ads7828_data *data;
        struct device *hwmon_dev;
+       unsigned int vref_mv = ADS7828_INT_VREF_MV;
+       bool diff_input = false;
+       bool ext_vref = false;
 
        data = devm_kzalloc(dev, sizeof(struct ads7828_data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
        if (pdata) {
-               data->diff_input = pdata->diff_input;
-               data->ext_vref = pdata->ext_vref;
-               if (data->ext_vref)
-                       data->vref_mv = pdata->vref_mv;
+               diff_input = pdata->diff_input;
+               ext_vref = pdata->ext_vref;
+               if (ext_vref && pdata->vref_mv)
+                       vref_mv = pdata->vref_mv;
        }
 
-       /* Bound Vref with min/max values if it was provided */
-       if (data->vref_mv)
-               data->vref_mv = clamp_val(data->vref_mv,
-                                         ADS7828_EXT_VREF_MV_MIN,
-                                         ADS7828_EXT_VREF_MV_MAX);
-       else
-               data->vref_mv = ADS7828_INT_VREF_MV;
+       /* Bound Vref with min/max values */
+       vref_mv = clamp_val(vref_mv, ADS7828_EXT_VREF_MV_MIN,
+                           ADS7828_EXT_VREF_MV_MAX);
 
        /* ADS7828 uses 12-bit samples, while ADS7830 is 8-bit */
        if (id->driver_data == ads7828) {
-               data->lsb_resol = DIV_ROUND_CLOSEST(data->vref_mv * 1000, 4096);
-               data->read_channel = i2c_smbus_read_word_swapped;
+               data->lsb_resol = DIV_ROUND_CLOSEST(vref_mv * 1000, 4096);
+               data->regmap = devm_regmap_init_i2c(client,
+                                                   &ads2828_regmap_config);
        } else {
-               data->lsb_resol = DIV_ROUND_CLOSEST(data->vref_mv * 1000, 256);
-               data->read_channel = i2c_smbus_read_byte_data;
+               data->lsb_resol = DIV_ROUND_CLOSEST(vref_mv * 1000, 256);
+               data->regmap = devm_regmap_init_i2c(client,
+                                                   &ads2830_regmap_config);
        }
 
-       data->cmd_byte = data->ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3;
-       if (!data->diff_input)
+       data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3;
+       if (!diff_input)
                data->cmd_byte |= ADS7828_CMD_SD_SE;
 
-       data->client = client;
-       mutex_init(&data->update_lock);
-
        hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
                                                           data,
                                                           ads7828_groups);
index e01feba909c3688ea152b75d6c23b645948632ec..d1542b7d4bc3c3840a19d2480aafce78720cc03a 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/hwmon-sysfs.h>
 #include <linux/jiffies.h>
 #include <linux/of.h>
+#include <linux/delay.h>
 
 #include <linux/platform_data/ina2xx.h>
 
@@ -51,7 +52,6 @@
 #define INA226_ALERT_LIMIT             0x07
 #define INA226_DIE_ID                  0xFF
 
-
 /* register count */
 #define INA219_REGISTERS               6
 #define INA226_REGISTERS               8
 
 /* worst case is 68.10 ms (~14.6Hz, ina219) */
 #define INA2XX_CONVERSION_RATE         15
+#define INA2XX_MAX_DELAY               69 /* worst case delay in ms */
+
+#define INA2XX_RSHUNT_DEFAULT          10000
+
+/* bit mask for reading the averaging setting in the configuration register */
+#define INA226_AVG_RD_MASK             0x0E00
+
+#define INA226_READ_AVG(reg)           (((reg) & INA226_AVG_RD_MASK) >> 9)
+#define INA226_SHIFT_AVG(val)          ((val) << 9)
+
+/* common attrs, ina226 attrs and NULL */
+#define INA2XX_MAX_ATTRIBUTE_GROUPS    3
+
+/*
+ * Both bus voltage and shunt voltage conversion times for ina226 are set
+ * to 0b0100 on POR, which translates to 2200 microseconds in total.
+ */
+#define INA226_TOTAL_CONV_TIME_DEFAULT 2200
 
 enum ina2xx_ids { ina219, ina226 };
 
@@ -81,11 +99,16 @@ struct ina2xx_data {
        struct i2c_client *client;
        const struct ina2xx_config *config;
 
+       long rshunt;
+       u16 curr_config;
+
        struct mutex update_lock;
        bool valid;
        unsigned long last_updated;
+       int update_interval; /* in jiffies */
 
        int kind;
+       const struct attribute_group *groups[INA2XX_MAX_ATTRIBUTE_GROUPS];
        u16 regs[INA2XX_MAX_REGISTERS];
 };
 
@@ -110,34 +133,156 @@ static const struct ina2xx_config ina2xx_config[] = {
        },
 };
 
-static struct ina2xx_data *ina2xx_update_device(struct device *dev)
+/*
+ * Available averaging rates for ina226. The indices correspond with
+ * the bit values expected by the chip (according to the ina226 datasheet,
+ * table 3 AVG bit settings, found at
+ * http://www.ti.com/lit/ds/symlink/ina226.pdf.
+ */
+static const int ina226_avg_tab[] = { 1, 4, 16, 64, 128, 256, 512, 1024 };
+
+static int ina226_avg_bits(int avg)
+{
+       int i;
+
+       /* Get the closest average from the tab. */
+       for (i = 0; i < ARRAY_SIZE(ina226_avg_tab) - 1; i++) {
+               if (avg <= (ina226_avg_tab[i] + ina226_avg_tab[i + 1]) / 2)
+                       break;
+       }
+
+       return i; /* Return 0b0111 for values greater than 1024. */
+}
+
+static int ina226_reg_to_interval(u16 config)
+{
+       int avg = ina226_avg_tab[INA226_READ_AVG(config)];
+
+       /*
+        * Multiply the total conversion time by the number of averages.
+        * Return the result in milliseconds.
+        */
+       return DIV_ROUND_CLOSEST(avg * INA226_TOTAL_CONV_TIME_DEFAULT, 1000);
+}
+
+static u16 ina226_interval_to_reg(int interval, u16 config)
+{
+       int avg, avg_bits;
+
+       avg = DIV_ROUND_CLOSEST(interval * 1000,
+                               INA226_TOTAL_CONV_TIME_DEFAULT);
+       avg_bits = ina226_avg_bits(avg);
+
+       return (config & ~INA226_AVG_RD_MASK) | INA226_SHIFT_AVG(avg_bits);
+}
+
+static void ina226_set_update_interval(struct ina2xx_data *data)
+{
+       int ms;
+
+       ms = ina226_reg_to_interval(data->curr_config);
+       data->update_interval = msecs_to_jiffies(ms);
+}
+
+static int ina2xx_calibrate(struct ina2xx_data *data)
+{
+       u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
+                                   data->rshunt);
+
+       return i2c_smbus_write_word_swapped(data->client,
+                                           INA2XX_CALIBRATION, val);
+}
+
+/*
+ * Initialize the configuration and calibration registers.
+ */
+static int ina2xx_init(struct ina2xx_data *data)
 {
-       struct ina2xx_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
-       struct ina2xx_data *ret = data;
+       int ret;
 
-       mutex_lock(&data->update_lock);
+       /* device configuration */
+       ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG,
+                                          data->curr_config);
+       if (ret < 0)
+               return ret;
 
-       if (time_after(jiffies, data->last_updated +
-                      HZ / INA2XX_CONVERSION_RATE) || !data->valid) {
+       /*
+        * Set current LSB to 1mA, shunt is in uOhms
+        * (equation 13 in datasheet).
+        */
+       return ina2xx_calibrate(data);
+}
 
-               int i;
+static int ina2xx_do_update(struct device *dev)
+{
+       struct ina2xx_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
+       int i, rv, retry;
 
-               dev_dbg(&client->dev, "Starting ina2xx update\n");
+       dev_dbg(&client->dev, "Starting ina2xx update\n");
 
+       for (retry = 5; retry; retry--) {
                /* Read all registers */
                for (i = 0; i < data->config->registers; i++) {
-                       int rv = i2c_smbus_read_word_swapped(client, i);
-                       if (rv < 0) {
-                               ret = ERR_PTR(rv);
-                               goto abort;
-                       }
+                       rv = i2c_smbus_read_word_swapped(client, i);
+                       if (rv < 0)
+                               return rv;
                        data->regs[i] = rv;
                }
+
+               /*
+                * If the current value in the calibration register is 0, the
+                * power and current registers will also remain at 0. In case
+                * the chip has been reset let's check the calibration
+                * register and reinitialize if needed.
+                */
+               if (data->regs[INA2XX_CALIBRATION] == 0) {
+                       dev_warn(dev, "chip not calibrated, reinitializing\n");
+
+                       rv = ina2xx_init(data);
+                       if (rv < 0)
+                               return rv;
+
+                       /*
+                        * Let's make sure the power and current registers
+                        * have been updated before trying again.
+                        */
+                       msleep(INA2XX_MAX_DELAY);
+                       continue;
+               }
+
                data->last_updated = jiffies;
                data->valid = 1;
+
+               return 0;
        }
-abort:
+
+       /*
+        * If we're here then although all write operations succeeded, the
+        * chip still returns 0 in the calibration register. Nothing more we
+        * can do here.
+        */
+       dev_err(dev, "unable to reinitialize the chip\n");
+       return -ENODEV;
+}
+
+static struct ina2xx_data *ina2xx_update_device(struct device *dev)
+{
+       struct ina2xx_data *data = dev_get_drvdata(dev);
+       struct ina2xx_data *ret = data;
+       unsigned long after;
+       int rv;
+
+       mutex_lock(&data->update_lock);
+
+       after = data->last_updated + data->update_interval;
+       if (time_after(jiffies, after) || !data->valid) {
+               rv = ina2xx_do_update(dev);
+               if (rv < 0)
+                       ret = ERR_PTR(rv);
+       }
+
        mutex_unlock(&data->update_lock);
        return ret;
 }
@@ -164,6 +309,10 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg)
                /* signed register, LSB=1mA (selected), in mA */
                val = (s16)data->regs[reg];
                break;
+       case INA2XX_CALIBRATION:
+               val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
+                                       data->regs[reg]);
+               break;
        default:
                /* programmer goofed */
                WARN_ON_ONCE(1);
@@ -187,6 +336,85 @@ static ssize_t ina2xx_show_value(struct device *dev,
                        ina2xx_get_value(data, attr->index));
 }
 
+static ssize_t ina2xx_set_shunt(struct device *dev,
+                               struct device_attribute *da,
+                               const char *buf, size_t count)
+{
+       struct ina2xx_data *data = ina2xx_update_device(dev);
+       unsigned long val;
+       int status;
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       status = kstrtoul(buf, 10, &val);
+       if (status < 0)
+               return status;
+
+       if (val == 0 ||
+           /* Values greater than the calibration factor make no sense. */
+           val > data->config->calibration_factor)
+               return -EINVAL;
+
+       mutex_lock(&data->update_lock);
+       data->rshunt = val;
+       status = ina2xx_calibrate(data);
+       mutex_unlock(&data->update_lock);
+       if (status < 0)
+               return status;
+
+       return count;
+}
+
+static ssize_t ina226_set_interval(struct device *dev,
+                                  struct device_attribute *da,
+                                  const char *buf, size_t count)
+{
+       struct ina2xx_data *data = dev_get_drvdata(dev);
+       unsigned long val;
+       int status;
+
+       status = kstrtoul(buf, 10, &val);
+       if (status < 0)
+               return status;
+
+       if (val > INT_MAX || val == 0)
+               return -EINVAL;
+
+       mutex_lock(&data->update_lock);
+       data->curr_config = ina226_interval_to_reg(val,
+                                                  data->regs[INA2XX_CONFIG]);
+       status = i2c_smbus_write_word_swapped(data->client,
+                                             INA2XX_CONFIG,
+                                             data->curr_config);
+
+       ina226_set_update_interval(data);
+       /* Make sure the next access re-reads all registers. */
+       data->valid = 0;
+       mutex_unlock(&data->update_lock);
+       if (status < 0)
+               return status;
+
+       return count;
+}
+
+static ssize_t ina226_show_interval(struct device *dev,
+                                   struct device_attribute *da, char *buf)
+{
+       struct ina2xx_data *data = ina2xx_update_device(dev);
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       /*
+        * We don't use data->update_interval here as we want to display
+        * the actual interval used by the chip and jiffies_to_msecs()
+        * doesn't seem to be accurate enough.
+        */
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+                       ina226_reg_to_interval(data->regs[INA2XX_CONFIG]));
+}
+
 /* shunt voltage */
 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ina2xx_show_value, NULL,
                          INA2XX_SHUNT_VOLTAGE);
@@ -203,15 +431,37 @@ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ina2xx_show_value, NULL,
 static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
                          INA2XX_POWER);
 
+/* shunt resistance */
+static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
+                         ina2xx_show_value, ina2xx_set_shunt,
+                         INA2XX_CALIBRATION);
+
+/* update interval (ina226 only) */
+static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR,
+                         ina226_show_interval, ina226_set_interval, 0);
+
 /* pointers to created device attributes */
 static struct attribute *ina2xx_attrs[] = {
        &sensor_dev_attr_in0_input.dev_attr.attr,
        &sensor_dev_attr_in1_input.dev_attr.attr,
        &sensor_dev_attr_curr1_input.dev_attr.attr,
        &sensor_dev_attr_power1_input.dev_attr.attr,
+       &sensor_dev_attr_shunt_resistor.dev_attr.attr,
        NULL,
 };
-ATTRIBUTE_GROUPS(ina2xx);
+
+static const struct attribute_group ina2xx_group = {
+       .attrs = ina2xx_attrs,
+};
+
+static struct attribute *ina226_attrs[] = {
+       &sensor_dev_attr_update_interval.dev_attr.attr,
+       NULL,
+};
+
+static const struct attribute_group ina226_group = {
+       .attrs = ina226_attrs,
+};
 
 static int ina2xx_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
@@ -221,9 +471,8 @@ static int ina2xx_probe(struct i2c_client *client,
        struct device *dev = &client->dev;
        struct ina2xx_data *data;
        struct device *hwmon_dev;
-       long shunt = 10000; /* default shunt value 10mOhms */
        u32 val;
-       int ret;
+       int ret, group = 0;
 
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
                return -ENODEV;
@@ -234,50 +483,52 @@ static int ina2xx_probe(struct i2c_client *client,
 
        if (dev_get_platdata(dev)) {
                pdata = dev_get_platdata(dev);
-               shunt = pdata->shunt_uohms;
+               data->rshunt = pdata->shunt_uohms;
        } else if (!of_property_read_u32(dev->of_node,
                                         "shunt-resistor", &val)) {
-               shunt = val;
+               data->rshunt = val;
+       } else {
+               data->rshunt = INA2XX_RSHUNT_DEFAULT;
        }
 
-       if (shunt <= 0)
-               return -ENODEV;
-
        /* set the device type */
        data->kind = id->driver_data;
        data->config = &ina2xx_config[data->kind];
-
-       /* device configuration */
-       ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG,
-                                          data->config->config_default);
-       if (ret < 0) {
-               dev_err(dev,
-                       "error writing to the config register: %d", ret);
-               return -ENODEV;
-       }
+       data->curr_config = data->config->config_default;
+       data->client = client;
 
        /*
-        * Set current LSB to 1mA, shunt is in uOhms
-        * (equation 13 in datasheet).
+        * Ina226 has a variable update_interval. For ina219 we
+        * use a constant value.
         */
-       ret = i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION,
-                               data->config->calibration_factor / shunt);
+       if (data->kind == ina226)
+               ina226_set_update_interval(data);
+       else
+               data->update_interval = HZ / INA2XX_CONVERSION_RATE;
+
+       if (data->rshunt <= 0 ||
+           data->rshunt > data->config->calibration_factor)
+               return -ENODEV;
+
+       ret = ina2xx_init(data);
        if (ret < 0) {
-               dev_err(dev,
-                       "error writing to the calibration register: %d", ret);
+               dev_err(dev, "error configuring the device: %d\n", ret);
                return -ENODEV;
        }
 
-       data->client = client;
        mutex_init(&data->update_lock);
 
+       data->groups[group++] = &ina2xx_group;
+       if (data->kind == ina226)
+               data->groups[group++] = &ina226_group;
+
        hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
-                                                          data, ina2xx_groups);
+                                                          data, data->groups);
        if (IS_ERR(hwmon_dev))
                return PTR_ERR(hwmon_dev);
 
        dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n",
-                id->name, shunt);
+                id->name, data->rshunt);
 
        return 0;
 }
@@ -287,6 +538,7 @@ static const struct i2c_device_id ina2xx_id[] = {
        { "ina220", ina219 },
        { "ina226", ina226 },
        { "ina230", ina226 },
+       { "ina231", ina226 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, ina2xx_id);
index 388f8bcd898e879c84dd2497f4efee307456bfb0..996bdfd5cf25f93679857407951253d1f1e5b5ca 100644 (file)
@@ -201,7 +201,7 @@ struct jc42_data {
 #define JC42_TEMP_MIN          0
 #define JC42_TEMP_MAX          125000
 
-static u16 jc42_temp_to_reg(int temp, bool extended)
+static u16 jc42_temp_to_reg(long temp, bool extended)
 {
        int ntemp = clamp_val(temp,
                              extended ? JC42_TEMP_MIN_EXTENDED :
@@ -213,11 +213,7 @@ static u16 jc42_temp_to_reg(int temp, bool extended)
 
 static int jc42_temp_from_reg(s16 reg)
 {
-       reg &= 0x1fff;
-
-       /* sign extend register */
-       if (reg & 0x1000)
-               reg |= 0xf000;
+       reg = sign_extend32(reg, 12);
 
        /* convert from 0.0625 to 0.001 resolution */
        return reg * 125 / 2;
@@ -308,15 +304,18 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
                                  const char *buf, size_t count)
 {
        struct jc42_data *data = dev_get_drvdata(dev);
-       unsigned long val;
+       long val;
        int diff, hyst;
        int err;
        int ret = count;
 
-       if (kstrtoul(buf, 10, &val) < 0)
+       if (kstrtol(buf, 10, &val) < 0)
                return -EINVAL;
 
+       val = clamp_val(val, (data->extended ? JC42_TEMP_MIN_EXTENDED :
+                             JC42_TEMP_MIN) - 6000, JC42_TEMP_MAX);
        diff = jc42_temp_from_reg(data->temp[t_crit]) - val;
+
        hyst = 0;
        if (diff > 0) {
                if (diff < 2250)
index ec5678289e4a5828e6d2248ac29e092240b3fc6f..55765790907b3768eb1c4b23e2e3bd77d4eaf294 100644 (file)
@@ -779,7 +779,7 @@ static bool nct7802_regmap_is_volatile(struct device *dev, unsigned int reg)
        return reg != REG_BANK && reg <= 0x20;
 }
 
-static struct regmap_config nct7802_regmap_config = {
+static const struct regmap_config nct7802_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
        .cache_type = REGCACHE_RBTREE,
index ba9f478f64ee68e7092d9fa546663cd02a7cf2cb..9da2735f14243ed30c1365124195d6dcc9e0baf1 100644 (file)
@@ -253,7 +253,7 @@ static int tmp102_remove(struct i2c_client *client)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int tmp102_suspend(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
@@ -279,17 +279,10 @@ static int tmp102_resume(struct device *dev)
        config &= ~TMP102_CONF_SD;
        return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config);
 }
-
-static const struct dev_pm_ops tmp102_dev_pm_ops = {
-       .suspend        = tmp102_suspend,
-       .resume         = tmp102_resume,
-};
-
-#define TMP102_DEV_PM_OPS (&tmp102_dev_pm_ops)
-#else
-#define        TMP102_DEV_PM_OPS NULL
 #endif /* CONFIG_PM */
 
+static SIMPLE_DEV_PM_OPS(tmp102_dev_pm_ops, tmp102_suspend, tmp102_resume);
+
 static const struct i2c_device_id tmp102_id[] = {
        { "tmp102", 0 },
        { }
@@ -298,7 +291,7 @@ MODULE_DEVICE_TABLE(i2c, tmp102_id);
 
 static struct i2c_driver tmp102_driver = {
        .driver.name    = DRIVER_NAME,
-       .driver.pm      = TMP102_DEV_PM_OPS,
+       .driver.pm      = &tmp102_dev_pm_ops,
        .probe          = tmp102_probe,
        .remove         = tmp102_remove,
        .id_table       = tmp102_id,
index b716b08156446e186c9ae608f3f4e6343c6f200f..643c08a025a52d015431b8a27be1ddcacbd36845 100644 (file)
@@ -258,6 +258,5 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
 
 IB_UVERBS_DECLARE_EX_CMD(create_flow);
 IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
-IB_UVERBS_DECLARE_EX_CMD(query_device);
 
 #endif /* UVERBS_H */
index 532d8eba8b0203ab65a2a8ed1f096389253b1dca..b7943ff16ed3f2edece8ec4cc3c7931594bd943a 100644 (file)
@@ -400,52 +400,6 @@ err:
        return ret;
 }
 
-static void copy_query_dev_fields(struct ib_uverbs_file *file,
-                                 struct ib_uverbs_query_device_resp *resp,
-                                 struct ib_device_attr *attr)
-{
-       resp->fw_ver            = attr->fw_ver;
-       resp->node_guid         = file->device->ib_dev->node_guid;
-       resp->sys_image_guid    = attr->sys_image_guid;
-       resp->max_mr_size       = attr->max_mr_size;
-       resp->page_size_cap     = attr->page_size_cap;
-       resp->vendor_id         = attr->vendor_id;
-       resp->vendor_part_id    = attr->vendor_part_id;
-       resp->hw_ver            = attr->hw_ver;
-       resp->max_qp            = attr->max_qp;
-       resp->max_qp_wr         = attr->max_qp_wr;
-       resp->device_cap_flags  = attr->device_cap_flags;
-       resp->max_sge           = attr->max_sge;
-       resp->max_sge_rd        = attr->max_sge_rd;
-       resp->max_cq            = attr->max_cq;
-       resp->max_cqe           = attr->max_cqe;
-       resp->max_mr            = attr->max_mr;
-       resp->max_pd            = attr->max_pd;
-       resp->max_qp_rd_atom    = attr->max_qp_rd_atom;
-       resp->max_ee_rd_atom    = attr->max_ee_rd_atom;
-       resp->max_res_rd_atom   = attr->max_res_rd_atom;
-       resp->max_qp_init_rd_atom       = attr->max_qp_init_rd_atom;
-       resp->max_ee_init_rd_atom       = attr->max_ee_init_rd_atom;
-       resp->atomic_cap                = attr->atomic_cap;
-       resp->max_ee                    = attr->max_ee;
-       resp->max_rdd                   = attr->max_rdd;
-       resp->max_mw                    = attr->max_mw;
-       resp->max_raw_ipv6_qp           = attr->max_raw_ipv6_qp;
-       resp->max_raw_ethy_qp           = attr->max_raw_ethy_qp;
-       resp->max_mcast_grp             = attr->max_mcast_grp;
-       resp->max_mcast_qp_attach       = attr->max_mcast_qp_attach;
-       resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
-       resp->max_ah                    = attr->max_ah;
-       resp->max_fmr                   = attr->max_fmr;
-       resp->max_map_per_fmr           = attr->max_map_per_fmr;
-       resp->max_srq                   = attr->max_srq;
-       resp->max_srq_wr                = attr->max_srq_wr;
-       resp->max_srq_sge               = attr->max_srq_sge;
-       resp->max_pkeys                 = attr->max_pkeys;
-       resp->local_ca_ack_delay        = attr->local_ca_ack_delay;
-       resp->phys_port_cnt             = file->device->ib_dev->phys_port_cnt;
-}
-
 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
                               const char __user *buf,
                               int in_len, int out_len)
@@ -466,7 +420,47 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
                return ret;
 
        memset(&resp, 0, sizeof resp);
-       copy_query_dev_fields(file, &resp, &attr);
+
+       resp.fw_ver                    = attr.fw_ver;
+       resp.node_guid                 = file->device->ib_dev->node_guid;
+       resp.sys_image_guid            = attr.sys_image_guid;
+       resp.max_mr_size               = attr.max_mr_size;
+       resp.page_size_cap             = attr.page_size_cap;
+       resp.vendor_id                 = attr.vendor_id;
+       resp.vendor_part_id            = attr.vendor_part_id;
+       resp.hw_ver                    = attr.hw_ver;
+       resp.max_qp                    = attr.max_qp;
+       resp.max_qp_wr                 = attr.max_qp_wr;
+       resp.device_cap_flags          = attr.device_cap_flags;
+       resp.max_sge                   = attr.max_sge;
+       resp.max_sge_rd                = attr.max_sge_rd;
+       resp.max_cq                    = attr.max_cq;
+       resp.max_cqe                   = attr.max_cqe;
+       resp.max_mr                    = attr.max_mr;
+       resp.max_pd                    = attr.max_pd;
+       resp.max_qp_rd_atom            = attr.max_qp_rd_atom;
+       resp.max_ee_rd_atom            = attr.max_ee_rd_atom;
+       resp.max_res_rd_atom           = attr.max_res_rd_atom;
+       resp.max_qp_init_rd_atom       = attr.max_qp_init_rd_atom;
+       resp.max_ee_init_rd_atom       = attr.max_ee_init_rd_atom;
+       resp.atomic_cap                = attr.atomic_cap;
+       resp.max_ee                    = attr.max_ee;
+       resp.max_rdd                   = attr.max_rdd;
+       resp.max_mw                    = attr.max_mw;
+       resp.max_raw_ipv6_qp           = attr.max_raw_ipv6_qp;
+       resp.max_raw_ethy_qp           = attr.max_raw_ethy_qp;
+       resp.max_mcast_grp             = attr.max_mcast_grp;
+       resp.max_mcast_qp_attach       = attr.max_mcast_qp_attach;
+       resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
+       resp.max_ah                    = attr.max_ah;
+       resp.max_fmr                   = attr.max_fmr;
+       resp.max_map_per_fmr           = attr.max_map_per_fmr;
+       resp.max_srq                   = attr.max_srq;
+       resp.max_srq_wr                = attr.max_srq_wr;
+       resp.max_srq_sge               = attr.max_srq_sge;
+       resp.max_pkeys                 = attr.max_pkeys;
+       resp.local_ca_ack_delay        = attr.local_ca_ack_delay;
+       resp.phys_port_cnt             = file->device->ib_dev->phys_port_cnt;
 
        if (copy_to_user((void __user *) (unsigned long) cmd.response,
                         &resp, sizeof resp))
@@ -3293,52 +3287,3 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
 
        return ret ? ret : in_len;
 }
-
-int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
-                             struct ib_udata *ucore,
-                             struct ib_udata *uhw)
-{
-       struct ib_uverbs_ex_query_device_resp resp;
-       struct ib_uverbs_ex_query_device  cmd;
-       struct ib_device_attr attr;
-       struct ib_device *device;
-       int err;
-
-       device = file->device->ib_dev;
-       if (ucore->inlen < sizeof(cmd))
-               return -EINVAL;
-
-       err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
-       if (err)
-               return err;
-
-       if (cmd.reserved)
-               return -EINVAL;
-
-       err = device->query_device(device, &attr);
-       if (err)
-               return err;
-
-       memset(&resp, 0, sizeof(resp));
-       copy_query_dev_fields(file, &resp.base, &attr);
-       resp.comp_mask = 0;
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-       if (cmd.comp_mask & IB_USER_VERBS_EX_QUERY_DEVICE_ODP) {
-               resp.odp_caps.general_caps = attr.odp_caps.general_caps;
-               resp.odp_caps.per_transport_caps.rc_odp_caps =
-                       attr.odp_caps.per_transport_caps.rc_odp_caps;
-               resp.odp_caps.per_transport_caps.uc_odp_caps =
-                       attr.odp_caps.per_transport_caps.uc_odp_caps;
-               resp.odp_caps.per_transport_caps.ud_odp_caps =
-                       attr.odp_caps.per_transport_caps.ud_odp_caps;
-               resp.comp_mask |= IB_USER_VERBS_EX_QUERY_DEVICE_ODP;
-       }
-#endif
-
-       err = ib_copy_to_udata(ucore, &resp, sizeof(resp));
-       if (err)
-               return err;
-
-       return 0;
-}
index e6c23b9eab336818fa785bae49f5c78c47221fbb..5db1a8cc388da0c5de517bf69b3d8136b94a1bbf 100644 (file)
@@ -123,7 +123,6 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
                                    struct ib_udata *uhw) = {
        [IB_USER_VERBS_EX_CMD_CREATE_FLOW]      = ib_uverbs_ex_create_flow,
        [IB_USER_VERBS_EX_CMD_DESTROY_FLOW]     = ib_uverbs_ex_destroy_flow,
-       [IB_USER_VERBS_EX_CMD_QUERY_DEVICE]     = ib_uverbs_ex_query_device
 };
 
 static void ib_uverbs_add_one(struct ib_device *device);
index 8a87404e9c76e763709e478b17b00485f38e678f..03bf81211a5401c366522c68213ecf27cdf4b326 100644 (file)
@@ -1331,8 +1331,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
                (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
                (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)         |
                (1ull << IB_USER_VERBS_CMD_OPEN_QP);
-       dev->ib_dev.uverbs_ex_cmd_mask =
-               (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
 
        dev->ib_dev.query_device        = mlx5_ib_query_device;
        dev->ib_dev.query_port          = mlx5_ib_query_port;
index 8ba80a6d3a46d17daf233945e76d8760c107a752..d7562beb542367faf1b93d7ba66e8ef879c73bf4 100644 (file)
@@ -98,15 +98,9 @@ enum {
 
        IPOIB_MCAST_FLAG_FOUND    = 0,  /* used in set_multicast_list */
        IPOIB_MCAST_FLAG_SENDONLY = 1,
-       /*
-        * For IPOIB_MCAST_FLAG_BUSY
-        * When set, in flight join and mcast->mc is unreliable
-        * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or
-        *   haven't started yet
-        * When clear and mcast->mc is valid pointer, join was successful
-        */
-       IPOIB_MCAST_FLAG_BUSY     = 2,
+       IPOIB_MCAST_FLAG_BUSY     = 2,  /* joining or already joined */
        IPOIB_MCAST_FLAG_ATTACHED = 3,
+       IPOIB_MCAST_JOIN_STARTED  = 4,
 
        MAX_SEND_CQE              = 16,
        IPOIB_CM_COPYBREAK        = 256,
@@ -323,7 +317,6 @@ struct ipoib_dev_priv {
        struct list_head multicast_list;
        struct rb_root multicast_tree;
 
-       struct workqueue_struct *wq;
        struct delayed_work mcast_task;
        struct work_struct carrier_on_task;
        struct work_struct flush_light;
@@ -484,10 +477,10 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work);
 void ipoib_pkey_event(struct work_struct *work);
 void ipoib_ib_dev_cleanup(struct net_device *dev);
 
-int ipoib_ib_dev_open(struct net_device *dev);
+int ipoib_ib_dev_open(struct net_device *dev, int flush);
 int ipoib_ib_dev_up(struct net_device *dev);
-int ipoib_ib_dev_down(struct net_device *dev);
-int ipoib_ib_dev_stop(struct net_device *dev);
+int ipoib_ib_dev_down(struct net_device *dev, int flush);
+int ipoib_ib_dev_stop(struct net_device *dev, int flush);
 void ipoib_pkey_dev_check_presence(struct net_device *dev);
 
 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
@@ -499,7 +492,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
 
 void ipoib_mcast_restart_task(struct work_struct *work);
 int ipoib_mcast_start_thread(struct net_device *dev);
-int ipoib_mcast_stop_thread(struct net_device *dev);
+int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
 
 void ipoib_mcast_dev_down(struct net_device *dev);
 void ipoib_mcast_dev_flush(struct net_device *dev);
index 56959adb6c7da51ccbb6d20307247b7cb69ad55a..933efcea0d03f11b4da3967b8eedc137da21e08a 100644 (file)
@@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
        }
 
        spin_lock_irq(&priv->lock);
-       queue_delayed_work(priv->wq,
+       queue_delayed_work(ipoib_workqueue,
                           &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
        /* Add this entry to passive ids list head, but do not re-add it
         * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
@@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
                        spin_lock_irqsave(&priv->lock, flags);
                        list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
                        ipoib_cm_start_rx_drain(priv);
-                       queue_work(priv->wq, &priv->cm.rx_reap_task);
+                       queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
                        spin_unlock_irqrestore(&priv->lock, flags);
                } else
                        ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
@@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
                                spin_lock_irqsave(&priv->lock, flags);
                                list_move(&p->list, &priv->cm.rx_reap_list);
                                spin_unlock_irqrestore(&priv->lock, flags);
-                               queue_work(priv->wq, &priv->cm.rx_reap_task);
+                               queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
                        }
                        return;
                }
@@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
 
                if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
                        list_move(&tx->list, &priv->cm.reap_list);
-                       queue_work(priv->wq, &priv->cm.reap_task);
+                       queue_work(ipoib_workqueue, &priv->cm.reap_task);
                }
 
                clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
@@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
 
                if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
                        list_move(&tx->list, &priv->cm.reap_list);
-                       queue_work(priv->wq, &priv->cm.reap_task);
+                       queue_work(ipoib_workqueue, &priv->cm.reap_task);
                }
 
                spin_unlock_irqrestore(&priv->lock, flags);
@@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
        tx->dev = dev;
        list_add(&tx->list, &priv->cm.start_list);
        set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
-       queue_work(priv->wq, &priv->cm.start_task);
+       queue_work(ipoib_workqueue, &priv->cm.start_task);
        return tx;
 }
 
@@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
        if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
                spin_lock_irqsave(&priv->lock, flags);
                list_move(&tx->list, &priv->cm.reap_list);
-               queue_work(priv->wq, &priv->cm.reap_task);
+               queue_work(ipoib_workqueue, &priv->cm.reap_task);
                ipoib_dbg(priv, "Reap connection for gid %pI6\n",
                          tx->neigh->daddr + 4);
                tx->neigh = NULL;
@@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
 
        skb_queue_tail(&priv->cm.skb_queue, skb);
        if (e)
-               queue_work(priv->wq, &priv->cm.skb_task);
+               queue_work(ipoib_workqueue, &priv->cm.skb_task);
 }
 
 static void ipoib_cm_rx_reap(struct work_struct *work)
@@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work)
        }
 
        if (!list_empty(&priv->cm.passive_ids))
-               queue_delayed_work(priv->wq,
+               queue_delayed_work(ipoib_workqueue,
                                   &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
        spin_unlock_irq(&priv->lock);
 }
index fe65abb5150c76b2eb941b3b2331930bc5b2b81e..72626c3481749b962fe96b79722d7c8e9c99c585 100644 (file)
@@ -655,7 +655,7 @@ void ipoib_reap_ah(struct work_struct *work)
        __ipoib_reap_ah(dev);
 
        if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
-               queue_delayed_work(priv->wq, &priv->ah_reap_task,
+               queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
                                   round_jiffies_relative(HZ));
 }
 
@@ -664,7 +664,7 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx)
        drain_tx_cq((struct net_device *)ctx);
 }
 
-int ipoib_ib_dev_open(struct net_device *dev)
+int ipoib_ib_dev_open(struct net_device *dev, int flush)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        int ret;
@@ -696,7 +696,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
        }
 
        clear_bit(IPOIB_STOP_REAPER, &priv->flags);
-       queue_delayed_work(priv->wq, &priv->ah_reap_task,
+       queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
                           round_jiffies_relative(HZ));
 
        if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
@@ -706,7 +706,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
 dev_stop:
        if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
                napi_enable(&priv->napi);
-       ipoib_ib_dev_stop(dev);
+       ipoib_ib_dev_stop(dev, flush);
        return -1;
 }
 
@@ -738,7 +738,7 @@ int ipoib_ib_dev_up(struct net_device *dev)
        return ipoib_mcast_start_thread(dev);
 }
 
-int ipoib_ib_dev_down(struct net_device *dev)
+int ipoib_ib_dev_down(struct net_device *dev, int flush)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
@@ -747,7 +747,7 @@ int ipoib_ib_dev_down(struct net_device *dev)
        clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
        netif_carrier_off(dev);
 
-       ipoib_mcast_stop_thread(dev);
+       ipoib_mcast_stop_thread(dev, flush);
        ipoib_mcast_dev_flush(dev);
 
        ipoib_flush_paths(dev);
@@ -807,7 +807,7 @@ void ipoib_drain_cq(struct net_device *dev)
        local_bh_enable();
 }
 
-int ipoib_ib_dev_stop(struct net_device *dev)
+int ipoib_ib_dev_stop(struct net_device *dev, int flush)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ib_qp_attr qp_attr;
@@ -880,7 +880,8 @@ timeout:
        /* Wait for all AHs to be reaped */
        set_bit(IPOIB_STOP_REAPER, &priv->flags);
        cancel_delayed_work(&priv->ah_reap_task);
-       flush_workqueue(priv->wq);
+       if (flush)
+               flush_workqueue(ipoib_workqueue);
 
        begin = jiffies;
 
@@ -917,7 +918,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
                    (unsigned long) dev);
 
        if (dev->flags & IFF_UP) {
-               if (ipoib_ib_dev_open(dev)) {
+               if (ipoib_ib_dev_open(dev, 1)) {
                        ipoib_transport_dev_cleanup(dev);
                        return -ENODEV;
                }
@@ -1039,12 +1040,12 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
        }
 
        if (level >= IPOIB_FLUSH_NORMAL)
-               ipoib_ib_dev_down(dev);
+               ipoib_ib_dev_down(dev, 0);
 
        if (level == IPOIB_FLUSH_HEAVY) {
                if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
-                       ipoib_ib_dev_stop(dev);
-               if (ipoib_ib_dev_open(dev) != 0)
+                       ipoib_ib_dev_stop(dev, 0);
+               if (ipoib_ib_dev_open(dev, 0) != 0)
                        return;
                if (netif_queue_stopped(dev))
                        netif_start_queue(dev);
@@ -1096,7 +1097,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
         */
        ipoib_flush_paths(dev);
 
-       ipoib_mcast_stop_thread(dev);
+       ipoib_mcast_stop_thread(dev, 1);
        ipoib_mcast_dev_flush(dev);
 
        ipoib_transport_dev_cleanup(dev);
index 6bad17d4d5880886f88ef48d8424abe4347cdc50..58b5aa3b6f2dded5d2e6d15aff080551aa9eddd9 100644 (file)
@@ -108,7 +108,7 @@ int ipoib_open(struct net_device *dev)
 
        set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
 
-       if (ipoib_ib_dev_open(dev)) {
+       if (ipoib_ib_dev_open(dev, 1)) {
                if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
                        return 0;
                goto err_disable;
@@ -139,7 +139,7 @@ int ipoib_open(struct net_device *dev)
        return 0;
 
 err_stop:
-       ipoib_ib_dev_stop(dev);
+       ipoib_ib_dev_stop(dev, 1);
 
 err_disable:
        clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
@@ -157,8 +157,8 @@ static int ipoib_stop(struct net_device *dev)
 
        netif_stop_queue(dev);
 
-       ipoib_ib_dev_down(dev);
-       ipoib_ib_dev_stop(dev);
+       ipoib_ib_dev_down(dev, 1);
+       ipoib_ib_dev_stop(dev, 0);
 
        if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
                struct ipoib_dev_priv *cpriv;
@@ -839,7 +839,7 @@ static void ipoib_set_mcast_list(struct net_device *dev)
                return;
        }
 
-       queue_work(priv->wq, &priv->restart_task);
+       queue_work(ipoib_workqueue, &priv->restart_task);
 }
 
 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
@@ -954,7 +954,7 @@ static void ipoib_reap_neigh(struct work_struct *work)
        __ipoib_reap_neigh(priv);
 
        if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
-               queue_delayed_work(priv->wq, &priv->neigh_reap_task,
+               queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
                                   arp_tbl.gc_interval);
 }
 
@@ -1133,7 +1133,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
 
        /* start garbage collection */
        clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
-       queue_delayed_work(priv->wq, &priv->neigh_reap_task,
+       queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
                           arp_tbl.gc_interval);
 
        return 0;
@@ -1262,13 +1262,15 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
+       if (ipoib_neigh_hash_init(priv) < 0)
+               goto out;
        /* Allocate RX/TX "rings" to hold queued skbs */
        priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
                                GFP_KERNEL);
        if (!priv->rx_ring) {
                printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
                       ca->name, ipoib_recvq_size);
-               goto out;
+               goto out_neigh_hash_cleanup;
        }
 
        priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
@@ -1283,24 +1285,16 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
        if (ipoib_ib_dev_init(dev, ca, port))
                goto out_tx_ring_cleanup;
 
-       /*
-        * Must be after ipoib_ib_dev_init so we can allocate a per
-        * device wq there and use it here
-        */
-       if (ipoib_neigh_hash_init(priv) < 0)
-               goto out_dev_uninit;
-
        return 0;
 
-out_dev_uninit:
-       ipoib_ib_dev_cleanup(dev);
-
 out_tx_ring_cleanup:
        vfree(priv->tx_ring);
 
 out_rx_ring_cleanup:
        kfree(priv->rx_ring);
 
+out_neigh_hash_cleanup:
+       ipoib_neigh_hash_uninit(dev);
 out:
        return -ENOMEM;
 }
@@ -1323,12 +1317,6 @@ void ipoib_dev_cleanup(struct net_device *dev)
        }
        unregister_netdevice_many(&head);
 
-       /*
-        * Must be before ipoib_ib_dev_cleanup or we delete an in use
-        * work queue
-        */
-       ipoib_neigh_hash_uninit(dev);
-
        ipoib_ib_dev_cleanup(dev);
 
        kfree(priv->rx_ring);
@@ -1336,6 +1324,8 @@ void ipoib_dev_cleanup(struct net_device *dev)
 
        priv->rx_ring = NULL;
        priv->tx_ring = NULL;
+
+       ipoib_neigh_hash_uninit(dev);
 }
 
 static const struct header_ops ipoib_header_ops = {
@@ -1646,7 +1636,7 @@ register_failed:
        /* Stop GC if started before flush */
        set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
        cancel_delayed_work(&priv->neigh_reap_task);
-       flush_workqueue(priv->wq);
+       flush_workqueue(ipoib_workqueue);
 
 event_failed:
        ipoib_dev_cleanup(priv->dev);
@@ -1717,7 +1707,7 @@ static void ipoib_remove_one(struct ib_device *device)
                /* Stop GC */
                set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
                cancel_delayed_work(&priv->neigh_reap_task);
-               flush_workqueue(priv->wq);
+               flush_workqueue(ipoib_workqueue);
 
                unregister_netdev(priv->dev);
                free_netdev(priv->dev);
@@ -1758,13 +1748,8 @@ static int __init ipoib_init_module(void)
         * unregister_netdev() and linkwatch_event take the rtnl lock,
         * so flush_scheduled_work() can deadlock during device
         * removal.
-        *
-        * In addition, bringing one device up and another down at the
-        * same time can deadlock a single workqueue, so we have this
-        * global fallback workqueue, but we also attempt to open a
-        * per device workqueue each time we bring an interface up
         */
-       ipoib_workqueue = create_singlethread_workqueue("ipoib_flush");
+       ipoib_workqueue = create_singlethread_workqueue("ipoib");
        if (!ipoib_workqueue) {
                ret = -ENOMEM;
                goto err_fs;
index bc50dd0d0e4dad7790725b0414d807d42fe82493..ffb83b5f7e805e411f1506d66a53f8465b90c439 100644 (file)
@@ -190,6 +190,12 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
                spin_unlock_irq(&priv->lock);
                priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
                set_qkey = 1;
+
+               if (!ipoib_cm_admin_enabled(dev)) {
+                       rtnl_lock();
+                       dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
+                       rtnl_unlock();
+               }
        }
 
        if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
@@ -271,27 +277,16 @@ ipoib_mcast_sendonly_join_complete(int status,
        struct ipoib_mcast *mcast = multicast->context;
        struct net_device *dev = mcast->dev;
 
-       /*
-        * We have to take the mutex to force mcast_sendonly_join to
-        * return from ib_sa_multicast_join and set mcast->mc to a
-        * valid value.  Otherwise we were racing with ourselves in
-        * that we might fail here, but get a valid return from
-        * ib_sa_multicast_join after we had cleared mcast->mc here,
-        * resulting in mis-matched joins and leaves and a deadlock
-        */
-       mutex_lock(&mcast_mutex);
-
        /* We trap for port events ourselves. */
        if (status == -ENETRESET)
-               goto out;
+               return 0;
 
        if (!status)
                status = ipoib_mcast_join_finish(mcast, &multicast->rec);
 
        if (status) {
                if (mcast->logcount++ < 20)
-                       ipoib_dbg_mcast(netdev_priv(dev), "sendonly multicast "
-                                       "join failed for %pI6, status %d\n",
+                       ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n",
                                        mcast->mcmember.mgid.raw, status);
 
                /* Flush out any queued packets */
@@ -301,15 +296,11 @@ ipoib_mcast_sendonly_join_complete(int status,
                        dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
                }
                netif_tx_unlock_bh(dev);
+
+               /* Clear the busy flag so we try again */
+               status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
+                                           &mcast->flags);
        }
-out:
-       clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
-       if (status)
-               mcast->mc = NULL;
-       complete(&mcast->done);
-       if (status == -ENETRESET)
-               status = 0;
-       mutex_unlock(&mcast_mutex);
        return status;
 }
 
@@ -327,14 +318,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
        int ret = 0;
 
        if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
-               ipoib_dbg_mcast(priv, "device shutting down, no sendonly "
-                               "multicast joins\n");
+               ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
                return -ENODEV;
        }
 
-       if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
-               ipoib_dbg_mcast(priv, "multicast entry busy, skipping "
-                               "sendonly join\n");
+       if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
+               ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
                return -EBUSY;
        }
 
@@ -342,9 +331,6 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
        rec.port_gid = priv->local_gid;
        rec.pkey     = cpu_to_be16(priv->pkey);
 
-       mutex_lock(&mcast_mutex);
-       init_completion(&mcast->done);
-       set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
        mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca,
                                         priv->port, &rec,
                                         IB_SA_MCMEMBER_REC_MGID        |
@@ -357,14 +343,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
        if (IS_ERR(mcast->mc)) {
                ret = PTR_ERR(mcast->mc);
                clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
-               complete(&mcast->done);
-               ipoib_warn(priv, "ib_sa_join_multicast for sendonly join "
-                          "failed (ret = %d)\n", ret);
+               ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n",
+                          ret);
        } else {
-               ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting "
-                               "sendonly join\n", mcast->mcmember.mgid.raw);
+               ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n",
+                               mcast->mcmember.mgid.raw);
        }
-       mutex_unlock(&mcast_mutex);
 
        return ret;
 }
@@ -375,29 +359,18 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
                                                   carrier_on_task);
        struct ib_port_attr attr;
 
+       /*
+        * Take rtnl_lock to avoid racing with ipoib_stop() and
+        * turning the carrier back on while a device is being
+        * removed.
+        */
        if (ib_query_port(priv->ca, priv->port, &attr) ||
            attr.state != IB_PORT_ACTIVE) {
                ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
                return;
        }
 
-       /*
-        * Take rtnl_lock to avoid racing with ipoib_stop() and
-        * turning the carrier back on while a device is being
-        * removed.  However, ipoib_stop() will attempt to flush
-        * the workqueue while holding the rtnl lock, so loop
-        * on trylock until either we get the lock or we see
-        * FLAG_ADMIN_UP go away as that signals that we are bailing
-        * and can safely ignore the carrier on work.
-        */
-       while (!rtnl_trylock()) {
-               if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
-                       return;
-               else
-                       msleep(20);
-       }
-       if (!ipoib_cm_admin_enabled(priv->dev))
-               dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu));
+       rtnl_lock();
        netif_carrier_on(priv->dev);
        rtnl_unlock();
 }
@@ -412,63 +385,60 @@ static int ipoib_mcast_join_complete(int status,
        ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n",
                        mcast->mcmember.mgid.raw, status);
 
-       /*
-        * We have to take the mutex to force mcast_join to
-        * return from ib_sa_multicast_join and set mcast->mc to a
-        * valid value.  Otherwise we were racing with ourselves in
-        * that we might fail here, but get a valid return from
-        * ib_sa_multicast_join after we had cleared mcast->mc here,
-        * resulting in mis-matched joins and leaves and a deadlock
-        */
-       mutex_lock(&mcast_mutex);
-
        /* We trap for port events ourselves. */
-       if (status == -ENETRESET)
+       if (status == -ENETRESET) {
+               status = 0;
                goto out;
+       }
 
        if (!status)
                status = ipoib_mcast_join_finish(mcast, &multicast->rec);
 
        if (!status) {
                mcast->backoff = 1;
+               mutex_lock(&mcast_mutex);
                if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
-                       queue_delayed_work(priv->wq, &priv->mcast_task, 0);
+                       queue_delayed_work(ipoib_workqueue,
+                                          &priv->mcast_task, 0);
+               mutex_unlock(&mcast_mutex);
 
                /*
-                * Defer carrier on work to priv->wq to avoid a
+                * Defer carrier on work to ipoib_workqueue to avoid a
                 * deadlock on rtnl_lock here.
                 */
                if (mcast == priv->broadcast)
-                       queue_work(priv->wq, &priv->carrier_on_task);
-       } else {
-               if (mcast->logcount++ < 20) {
-                       if (status == -ETIMEDOUT || status == -EAGAIN) {
-                               ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
-                                               mcast->mcmember.mgid.raw, status);
-                       } else {
-                               ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
-                                          mcast->mcmember.mgid.raw, status);
-                       }
-               }
+                       queue_work(ipoib_workqueue, &priv->carrier_on_task);
 
-               mcast->backoff *= 2;
-               if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
-                       mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
+               status = 0;
+               goto out;
        }
-out:
+
+       if (mcast->logcount++ < 20) {
+               if (status == -ETIMEDOUT || status == -EAGAIN) {
+                       ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
+                                       mcast->mcmember.mgid.raw, status);
+               } else {
+                       ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
+                                  mcast->mcmember.mgid.raw, status);
+               }
+       }
+
+       mcast->backoff *= 2;
+       if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
+               mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
+
+       /* Clear the busy flag so we try again */
+       status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+
+       mutex_lock(&mcast_mutex);
        spin_lock_irq(&priv->lock);
-       clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
-       if (status)
-               mcast->mc = NULL;
-       complete(&mcast->done);
-       if (status == -ENETRESET)
-               status = 0;
-       if (status && test_bit(IPOIB_MCAST_RUN, &priv->flags))
-               queue_delayed_work(priv->wq, &priv->mcast_task,
+       if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
+               queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
                                   mcast->backoff * HZ);
        spin_unlock_irq(&priv->lock);
        mutex_unlock(&mcast_mutex);
-
+out:
+       complete(&mcast->done);
        return status;
 }
 
@@ -517,9 +487,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
                rec.hop_limit     = priv->broadcast->mcmember.hop_limit;
        }
 
-       mutex_lock(&mcast_mutex);
-       init_completion(&mcast->done);
        set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+       init_completion(&mcast->done);
+       set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags);
+
        mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
                                         &rec, comp_mask, GFP_KERNEL,
                                         ipoib_mcast_join_complete, mcast);
@@ -533,11 +504,13 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
                if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
                        mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
 
+               mutex_lock(&mcast_mutex);
                if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
-                       queue_delayed_work(priv->wq, &priv->mcast_task,
+                       queue_delayed_work(ipoib_workqueue,
+                                          &priv->mcast_task,
                                           mcast->backoff * HZ);
+               mutex_unlock(&mcast_mutex);
        }
-       mutex_unlock(&mcast_mutex);
 }
 
 void ipoib_mcast_join_task(struct work_struct *work)
@@ -574,8 +547,8 @@ void ipoib_mcast_join_task(struct work_struct *work)
                        ipoib_warn(priv, "failed to allocate broadcast group\n");
                        mutex_lock(&mcast_mutex);
                        if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
-                               queue_delayed_work(priv->wq, &priv->mcast_task,
-                                                  HZ);
+                               queue_delayed_work(ipoib_workqueue,
+                                                  &priv->mcast_task, HZ);
                        mutex_unlock(&mcast_mutex);
                        return;
                }
@@ -590,8 +563,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
        }
 
        if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
-               if (IS_ERR_OR_NULL(priv->broadcast->mc) &&
-                   !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
+               if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
                        ipoib_mcast_join(dev, priv->broadcast, 0);
                return;
        }
@@ -599,33 +571,23 @@ void ipoib_mcast_join_task(struct work_struct *work)
        while (1) {
                struct ipoib_mcast *mcast = NULL;
 
-               /*
-                * Need the mutex so our flags are consistent, need the
-                * priv->lock so we don't race with list removals in either
-                * mcast_dev_flush or mcast_restart_task
-                */
-               mutex_lock(&mcast_mutex);
                spin_lock_irq(&priv->lock);
                list_for_each_entry(mcast, &priv->multicast_list, list) {
-                       if (IS_ERR_OR_NULL(mcast->mc) &&
-                           !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) &&
-                           !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
+                       if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
+                           && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
+                           && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
                                /* Found the next unjoined group */
                                break;
                        }
                }
                spin_unlock_irq(&priv->lock);
-               mutex_unlock(&mcast_mutex);
 
                if (&mcast->list == &priv->multicast_list) {
                        /* All done */
                        break;
                }
 
-               if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
-                       ipoib_mcast_sendonly_join(mcast);
-               else
-                       ipoib_mcast_join(dev, mcast, 1);
+               ipoib_mcast_join(dev, mcast, 1);
                return;
        }
 
@@ -642,13 +604,13 @@ int ipoib_mcast_start_thread(struct net_device *dev)
 
        mutex_lock(&mcast_mutex);
        if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
-               queue_delayed_work(priv->wq, &priv->mcast_task, 0);
+               queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
        mutex_unlock(&mcast_mutex);
 
        return 0;
 }
 
-int ipoib_mcast_stop_thread(struct net_device *dev)
+int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
@@ -659,7 +621,8 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
        cancel_delayed_work(&priv->mcast_task);
        mutex_unlock(&mcast_mutex);
 
-       flush_workqueue(priv->wq);
+       if (flush)
+               flush_workqueue(ipoib_workqueue);
 
        return 0;
 }
@@ -670,9 +633,6 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
        int ret = 0;
 
        if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
-               ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n");
-
-       if (!IS_ERR_OR_NULL(mcast->mc))
                ib_sa_free_multicast(mcast->mc);
 
        if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
@@ -725,8 +685,6 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
                memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
                __ipoib_mcast_add(dev, mcast);
                list_add_tail(&mcast->list, &priv->multicast_list);
-               if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
-                       queue_delayed_work(priv->wq, &priv->mcast_task, 0);
        }
 
        if (!mcast->ah) {
@@ -740,6 +698,8 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
                if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
                        ipoib_dbg_mcast(priv, "no address vector, "
                                        "but multicast join already started\n");
+               else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
+                       ipoib_mcast_sendonly_join(mcast);
 
                /*
                 * If lookup completes between here and out:, don't
@@ -799,12 +759,9 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
 
        spin_unlock_irqrestore(&priv->lock, flags);
 
-       /*
-        * make sure the in-flight joins have finished before we attempt
-        * to leave
-        */
+       /* seperate between the wait to the leave*/
        list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
-               if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+               if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags))
                        wait_for_completion(&mcast->done);
 
        list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
@@ -837,6 +794,8 @@ void ipoib_mcast_restart_task(struct work_struct *work)
 
        ipoib_dbg_mcast(priv, "restarting multicast task\n");
 
+       ipoib_mcast_stop_thread(dev, 0);
+
        local_irq_save(flags);
        netif_addr_lock(dev);
        spin_lock(&priv->lock);
@@ -921,38 +880,14 @@ void ipoib_mcast_restart_task(struct work_struct *work)
        netif_addr_unlock(dev);
        local_irq_restore(flags);
 
-       /*
-        * make sure the in-flight joins have finished before we attempt
-        * to leave
-        */
-       list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
-               if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
-                       wait_for_completion(&mcast->done);
-
-       /*
-        * We have to cancel outside of the spinlock, but we have to
-        * take the rtnl lock or else we race with the removal of
-        * entries from the remove list in mcast_dev_flush as part
-        * of ipoib_stop().  We detect the drop of the ADMIN_UP flag
-        * to signal that we have hit this particular race, and we
-        * return since we know we don't need to do anything else
-        * anyway.
-        */
-       while (!rtnl_trylock()) {
-               if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
-                       return;
-               else
-                       msleep(20);
-       }
+       /* We have to cancel outside of the spinlock */
        list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
                ipoib_mcast_leave(mcast->dev, mcast);
                ipoib_mcast_free(mcast);
        }
-       /*
-        * Restart our join task if needed
-        */
-       ipoib_mcast_start_thread(dev);
-       rtnl_unlock();
+
+       if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+               ipoib_mcast_start_thread(dev);
 }
 
 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
index b72a753eb41dc3031608269c56434ed507b96f5f..c56d5d44c53b3f11725b6d6da220ea2c440fe496 100644 (file)
@@ -145,20 +145,10 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
        int ret, size;
        int i;
 
-       /*
-        * the various IPoIB tasks assume they will never race against
-        * themselves, so always use a single thread workqueue
-        */
-       priv->wq = create_singlethread_workqueue("ipoib_wq");
-       if (!priv->wq) {
-               printk(KERN_WARNING "ipoib: failed to allocate device WQ\n");
-               return -ENODEV;
-       }
-
        priv->pd = ib_alloc_pd(priv->ca);
        if (IS_ERR(priv->pd)) {
                printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name);
-               goto out_free_wq;
+               return -ENODEV;
        }
 
        priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE);
@@ -252,10 +242,6 @@ out_free_mr:
 
 out_free_pd:
        ib_dealloc_pd(priv->pd);
-
-out_free_wq:
-       destroy_workqueue(priv->wq);
-       priv->wq = NULL;
        return -ENODEV;
 }
 
@@ -284,12 +270,6 @@ void ipoib_transport_dev_cleanup(struct net_device *dev)
 
        if (ib_dealloc_pd(priv->pd))
                ipoib_warn(priv, "ib_dealloc_pd failed\n");
-
-       if (priv->wq) {
-               flush_workqueue(priv->wq);
-               destroy_workqueue(priv->wq);
-               priv->wq = NULL;
-       }
 }
 
 void ipoib_event(struct ib_event_handler *handler,
index 2b0468e3df6a6a727ca7fda39de7f920fbb3c40e..56b96c63dc4bbc5479a3f51ef712fcbb70b4f5b6 100644 (file)
@@ -37,6 +37,7 @@ static struct irq_domain *gic_irq_domain;
 static int gic_shared_intrs;
 static int gic_vpes;
 static unsigned int gic_cpu_pin;
+static unsigned int timer_cpu_pin;
 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
 
 static void __gic_irq_dispatch(void);
@@ -616,6 +617,8 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
                        gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val);
                        break;
                case GIC_LOCAL_INT_TIMER:
+                       /* CONFIG_MIPS_CMP workaround (see __gic_init) */
+                       val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
                        gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val);
                        break;
                case GIC_LOCAL_INT_PERFCTR:
@@ -713,12 +716,36 @@ static void __init __gic_init(unsigned long gic_base_addr,
        if (cpu_has_veic) {
                /* Always use vector 1 in EIC mode */
                gic_cpu_pin = 0;
+               timer_cpu_pin = gic_cpu_pin;
                set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
                               __gic_irq_dispatch);
        } else {
                gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
                irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
                                        gic_irq_dispatch);
+               /*
+                * With the CMP implementation of SMP (deprecated), other CPUs
+                * are started by the bootloader and put into a timer based
+                * waiting poll loop. We must not re-route those CPU's local
+                * timer interrupts as the wait instruction will never finish,
+                * so just handle whatever CPU interrupt it is routed to by
+                * default.
+                *
+                * This workaround should be removed when CMP support is
+                * dropped.
+                */
+               if (IS_ENABLED(CONFIG_MIPS_CMP) &&
+                   gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
+                       timer_cpu_pin = gic_read(GIC_REG(VPE_LOCAL,
+                                                        GIC_VPE_TIMER_MAP)) &
+                                       GIC_MAP_MSK;
+                       irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
+                                               GIC_CPU_PIN_OFFSET +
+                                               timer_cpu_pin,
+                                               gic_irq_dispatch);
+               } else {
+                       timer_cpu_pin = gic_cpu_pin;
+               }
        }
 
        gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
index 0b380603a578543bcd275d248ade94f3f07b24a8..d7c286656a25721ec58fa16a5f3fdb277a0747a5 100644 (file)
@@ -1474,7 +1474,7 @@ static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
                                        add_ai(plci, &parms[5]);
                                        sig_req(plci, REJECT, 0);
                                }
-                               else if (Reject == 1 || Reject > 9)
+                               else if (Reject == 1 || Reject >= 9)
                                {
                                        add_ai(plci, &parms[5]);
                                        sig_req(plci, HANGUP, 0);
index 5bdedf6df153cf25d54e91a2cca5d9e3b6b8b1cf..c355a226a0247c824770457179731bc05d3a0667 100644 (file)
@@ -5,6 +5,7 @@
 menuconfig MD
        bool "Multiple devices driver support (RAID and LVM)"
        depends on BLOCK
+       select SRCU
        help
          Support multiple physical spindles through a single logical device.
          Required for RAID and logical volume management.
index da3604e73e8abafbd8e127dbfc659360d23310e5..1695ee5f3ffc30b883c83c1926745ba22e48a7e1 100644 (file)
@@ -72,6 +72,19 @@ __acquires(bitmap->lock)
        /* this page has not been allocated yet */
 
        spin_unlock_irq(&bitmap->lock);
+       /* It is possible that this is being called inside a
+        * prepare_to_wait/finish_wait loop from raid5c:make_request().
+        * In general it is not permitted to sleep in that context as it
+        * can cause the loop to spin freely.
+        * That doesn't apply here as we can only reach this point
+        * once with any loop.
+        * When this function completes, either bp[page].map or
+        * bp[page].hijacked.  In either case, this function will
+        * abort before getting to this point again.  So there is
+        * no risk of a free-spin, and so it is safe to assert
+        * that sleeping here is allowed.
+        */
+       sched_annotate_sleep();
        mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
        spin_lock_irq(&bitmap->lock);
 
index c1b0d52bfcb0f7b014bbc48f4823e3b9b028dcce..b98765f6f77fd9f1e11ecdcd8809928e7b821716 100644 (file)
@@ -3195,6 +3195,11 @@ static void handle_stripe_dirtying(struct r5conf *conf,
                                          (unsigned long long)sh->sector,
                                          rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
        }
+
+       if (rcw > disks && rmw > disks &&
+           !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+               set_bit(STRIPE_DELAYED, &sh->state);
+
        /* now if nothing is locked, and if we have enough data,
         * we can start a write request
         */
index d6607ee9c85506bb4fcc5d2077b8dc4e582d4b4b..84673ebcf428846fadf26ae881c39172fd45d612 100644 (file)
@@ -197,6 +197,7 @@ config NETCONSOLE_DYNAMIC
 
 config NETPOLL
        def_bool NETCONSOLE
+       select SRCU
 
 config NET_POLL_CONTROLLER
        def_bool NETPOLL
index 5e40a8b68cbe1964a242868ee978551442a3bfd0..b3b922adc0e4f68ed15ff34537c21e1bd7e5e81f 100644 (file)
@@ -1415,7 +1415,6 @@ static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
 
        cfhsi = netdev_priv(dev);
        cfhsi_netlink_parms(data, cfhsi);
-       dev_net_set(cfhsi->ndev, src_net);
 
        get_ops = symbol_get(cfhsi_get_ops);
        if (!get_ops) {
index 7a5e4aa5415e2652a13d9624a70003a5e2f6ad3d..77f1f6048dddf0bdf94d9b771f252ef70147be78 100644 (file)
@@ -45,7 +45,7 @@ config AMD8111_ETH
 
 config LANCE
        tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
-       depends on ISA && ISA_DMA_API
+       depends on ISA && ISA_DMA_API && !ARM
        ---help---
          If you have a network (Ethernet) card of this type, say Y and read
          the Ethernet-HOWTO, available from
@@ -142,7 +142,7 @@ config PCMCIA_NMCLAN
 
 config NI65
        tristate "NI6510 support"
-       depends on ISA && ISA_DMA_API
+       depends on ISA && ISA_DMA_API && !ARM
        ---help---
          If you have a network (Ethernet) card of this type, say Y and read
          the Ethernet-HOWTO, available from
index 5b22764ba88d2304c3502cd987420e7f955fd793..27245efe9f50098eee594cb844e5e1647978bf3d 100644 (file)
@@ -952,6 +952,8 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
   do {
     /* WARNING: MACE_IR is a READ/CLEAR port! */
     status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR);
+    if (!(status & ~MACE_IMR_DEFAULT) && IntrCnt == MACE_MAX_IR_ITERATIONS)
+      return IRQ_NONE;
 
     pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status);
 
index 7bb5f07dbeef3e806174047cc883e3fb6ca149c3..e5ffb2ccb67d1d053d47f0f6cc219e86fb6b086b 100644 (file)
@@ -523,6 +523,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
        hw_feat->sph           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
        hw_feat->tso           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
        hw_feat->dma_debug     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
+       hw_feat->rss           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
        hw_feat->tc_cnt        = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
        hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
                                                  HASHTBLSZ);
@@ -552,13 +553,14 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
                break;
        }
 
-       /* The Queue and Channel counts are zero based so increment them
+       /* The Queue, Channel and TC counts are zero based so increment them
         * to get the actual number
         */
        hw_feat->rx_q_cnt++;
        hw_feat->tx_q_cnt++;
        hw_feat->rx_ch_cnt++;
        hw_feat->tx_ch_cnt++;
+       hw_feat->tc_cnt++;
 
        DBGPR("<--xgbe_get_all_hw_features\n");
 }
index 83a50280bb7098149624f4bfe341822845e2148f..793f3b73eeff61216b2deaed21da60330f9ae898 100644 (file)
@@ -369,6 +369,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
                if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
                        break;
 
+               /* read fpqnum field after dataaddr field */
+               dma_rmb();
                if (is_rx_desc(raw_desc))
                        ret = xgene_enet_rx_frame(ring, raw_desc);
                else
index 7403dff8f14a4cf38090f8b32c57645d91b1770e..905ac5f5d9a60037746998d2f038ee738c04ce4f 100644 (file)
@@ -32,7 +32,8 @@ config CS89x0
          will be called cs89x0.
 
 config CS89x0_PLATFORM
-       bool "CS89x0 platform driver support"
+       bool "CS89x0 platform driver support" if HAS_IOPORT_MAP
+       default !HAS_IOPORT_MAP
        depends on CS89x0
        help
          Say Y to compile the cs89x0 driver as a platform driver. This
index 3e1a9c1a67a95ffdaddbcca9739a9e37dee66eac..fda12fb32ec77a8538a0f1d1370d2e653c91856c 100644 (file)
@@ -1586,7 +1586,7 @@ static int gfar_write_filer_table(struct gfar_private *priv,
                return -EBUSY;
 
        /* Fill regular entries */
-       for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
+       for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
             i++)
                gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
        /* Fill the rest with fall-troughs */
index 63c807c9b21c0f7d68f330bab3be65f2d806f173..edea13b0ee85277a2ca6b66ddba50f928efc6afe 100644 (file)
@@ -1907,7 +1907,8 @@ static void igbvf_watchdog_task(struct work_struct *work)
 
 static int igbvf_tso(struct igbvf_adapter *adapter,
                      struct igbvf_ring *tx_ring,
-                     struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+                    struct sk_buff *skb, u32 tx_flags, u8 *hdr_len,
+                    __be16 protocol)
 {
        struct e1000_adv_tx_context_desc *context_desc;
        struct igbvf_buffer *buffer_info;
@@ -1927,7 +1928,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
        l4len = tcp_hdrlen(skb);
        *hdr_len += l4len;
 
-       if (skb->protocol == htons(ETH_P_IP)) {
+       if (protocol == htons(ETH_P_IP)) {
                struct iphdr *iph = ip_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
@@ -1958,7 +1959,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
        /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
        tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
 
-       if (skb->protocol == htons(ETH_P_IP))
+       if (protocol == htons(ETH_P_IP))
                tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
        tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
 
@@ -1984,7 +1985,8 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
 
 static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
                                  struct igbvf_ring *tx_ring,
-                                 struct sk_buff *skb, u32 tx_flags)
+                                struct sk_buff *skb, u32 tx_flags,
+                                __be16 protocol)
 {
        struct e1000_adv_tx_context_desc *context_desc;
        unsigned int i;
@@ -2011,7 +2013,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
                tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
 
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
-                       switch (skb->protocol) {
+                       switch (protocol) {
                        case htons(ETH_P_IP):
                                tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
                                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
@@ -2211,6 +2213,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
        u8 hdr_len = 0;
        int count = 0;
        int tso = 0;
+       __be16 protocol = vlan_get_protocol(skb);
 
        if (test_bit(__IGBVF_DOWN, &adapter->state)) {
                dev_kfree_skb_any(skb);
@@ -2239,13 +2242,13 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
                tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
        }
 
-       if (skb->protocol == htons(ETH_P_IP))
+       if (protocol == htons(ETH_P_IP))
                tx_flags |= IGBVF_TX_FLAGS_IPV4;
 
        first = tx_ring->next_to_use;
 
        tso = skb_is_gso(skb) ?
-               igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0;
+               igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0;
        if (unlikely(tso < 0)) {
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
@@ -2253,7 +2256,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
 
        if (tso)
                tx_flags |= IGBVF_TX_FLAGS_TSO;
-       else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+       else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) &&
                 (skb->ip_summed == CHECKSUM_PARTIAL))
                tx_flags |= IGBVF_TX_FLAGS_CSUM;
 
index 2ed2c7de230444f88c3f06451d7cc8a7167f5f05..67b02bde179e9df1472f5b751b30e11b8f3ba34f 100644 (file)
@@ -7227,11 +7227,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
                if (!vhdr)
                        goto out_drop;
 
-               protocol = vhdr->h_vlan_encapsulated_proto;
                tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
                                  IXGBE_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
        }
+       protocol = vlan_get_protocol(skb);
 
        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
            adapter->ptp_clock &&
index 62a0d8e0f17da5ce75d48f9ff39fca5354c4447f..38c7a0be81977e91a901ea0d93b11db7c6652f51 100644 (file)
@@ -3099,7 +3099,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
        /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
        type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
 
-       if (skb->protocol == htons(ETH_P_IP)) {
+       if (first->protocol == htons(ETH_P_IP)) {
                struct iphdr *iph = ip_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
@@ -3156,7 +3156,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                u8 l4_hdr = 0;
-               switch (skb->protocol) {
+               switch (first->protocol) {
                case htons(ETH_P_IP):
                        vlan_macip_lens |= skb_network_header_len(skb);
                        type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
index bdd4eea2247cc1b09c19551a21d1c08f2c65e9d5..210691c89b6cbfdb3b7ac000cb75013e1890545f 100644 (file)
@@ -235,7 +235,8 @@ do {                                                                        \
 extern int mlx4_log_num_mgm_entry_size;
 extern int log_mtts_per_seg;
 
-#define MLX4_MAX_NUM_SLAVES    (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF)
+#define MLX4_MAX_NUM_SLAVES    (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \
+                                    MLX4_MFUNC_MAX))
 #define ALL_SLAVES 0xff
 
 struct mlx4_bitmap {
index 18e5de72e9b4c2c9b95848bf444597251942039e..4e1f58cf19ce4013deb51010fd130cc6f2f0860c 100644 (file)
@@ -967,7 +967,12 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
        tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
                                              budget);
        work_done = qlcnic_process_rcv_ring(sds_ring, budget);
-       if ((work_done < budget) && tx_complete) {
+
+       /* Check if we need a repoll */
+       if (!tx_complete)
+               work_done = budget;
+
+       if (work_done < budget) {
                napi_complete(&sds_ring->napi);
                if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
                        qlcnic_enable_sds_intr(adapter, sds_ring);
@@ -992,6 +997,9 @@ static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
                napi_complete(&tx_ring->napi);
                if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
                        qlcnic_enable_tx_intr(adapter, tx_ring);
+       } else {
+               /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/
+               work_done = budget;
        }
 
        return work_done;
@@ -1950,7 +1958,12 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
 
        tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
        work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
-       if ((work_done < budget) && tx_complete) {
+
+       /* Check if we need a repoll */
+       if (!tx_complete)
+               work_done = budget;
+
+       if (work_done < budget) {
                napi_complete(&sds_ring->napi);
                qlcnic_enable_sds_intr(adapter, sds_ring);
        }
@@ -1973,7 +1986,12 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
 
        tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
        work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
-       if ((work_done < budget) && tx_complete) {
+
+       /* Check if we need a repoll */
+       if (!tx_complete)
+               work_done = budget;
+
+       if (work_done < budget) {
                napi_complete(&sds_ring->napi);
                qlcnic_enable_sds_intr(adapter, sds_ring);
        }
@@ -1995,6 +2013,9 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
                napi_complete(&tx_ring->napi);
                if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
                        qlcnic_enable_tx_intr(adapter, tx_ring);
+       } else {
+               /* need a repoll */
+               work_done = budget;
        }
 
        return work_done;
index 6c904a6cad2a177036b42190cffb17a25e194708..ef5aed3b122530a785d971c30adfa1d4c9186f08 100644 (file)
@@ -2351,23 +2351,29 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
 {
        struct ql_adapter *qdev = netdev_priv(ndev);
        int status = 0;
+       bool need_restart = netif_running(ndev);
 
-       status = ql_adapter_down(qdev);
-       if (status) {
-               netif_err(qdev, link, qdev->ndev,
-                         "Failed to bring down the adapter\n");
-               return status;
+       if (need_restart) {
+               status = ql_adapter_down(qdev);
+               if (status) {
+                       netif_err(qdev, link, qdev->ndev,
+                                 "Failed to bring down the adapter\n");
+                       return status;
+               }
        }
 
        /* update the features with resent change */
        ndev->features = features;
 
-       status = ql_adapter_up(qdev);
-       if (status) {
-               netif_err(qdev, link, qdev->ndev,
-                         "Failed to bring up the adapter\n");
-               return status;
+       if (need_restart) {
+               status = ql_adapter_up(qdev);
+               if (status) {
+                       netif_err(qdev, link, qdev->ndev,
+                                 "Failed to bring up the adapter\n");
+                       return status;
+               }
        }
+
        return status;
 }
 
index d2835bf7b4fbef1744bf2bd6d840acfe47863a39..3699b98d5b2c26c9c50220fa6f69c5d97ec1beb3 100644 (file)
@@ -1119,6 +1119,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
                        skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
                        skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
                }
+               nskb->queue_mapping = skb->queue_mapping;
                dev_kfree_skb(skb);
                skb = nskb;
        }
index 9f49c0129a78a63f9a473012162ab74260f9449a..7cd4eb38abfa1591ed23b16141e9adf68aa204fe 100644 (file)
@@ -716,7 +716,7 @@ int netvsc_send(struct hv_device *device,
        u64 req_id;
        unsigned int section_index = NETVSC_INVALID_INDEX;
        u32 msg_size = 0;
-       struct sk_buff *skb;
+       struct sk_buff *skb = NULL;
        u16 q_idx = packet->q_idx;
 
 
@@ -743,8 +743,6 @@ int netvsc_send(struct hv_device *device,
                                                           packet);
                        skb = (struct sk_buff *)
                              (unsigned long)packet->send_completion_tid;
-                       if (skb)
-                               dev_kfree_skb_any(skb);
                        packet->page_buf_cnt = 0;
                }
        }
@@ -810,6 +808,13 @@ int netvsc_send(struct hv_device *device,
                           packet, ret);
        }
 
+       if (ret != 0) {
+               if (section_index != NETVSC_INVALID_INDEX)
+                       netvsc_free_send_slot(net_device, section_index);
+       } else if (skb) {
+               dev_kfree_skb_any(skb);
+       }
+
        return ret;
 }
 
index 7df221788cd4dc7ae4d8c3ed2f53789296d00deb..919f4fccc322a9b227dcbddaa854ca14b4e18247 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/fs.h>
 #include <linux/uio.h>
 
-#include <net/ipv6.h>
 #include <net/net_namespace.h>
 #include <net/rtnetlink.h>
 #include <net/sock.h>
@@ -81,7 +80,7 @@ static struct cdev macvtap_cdev;
 static const struct proto_ops macvtap_socket_ops;
 
 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
-                     NETIF_F_TSO6)
+                     NETIF_F_TSO6 | NETIF_F_UFO)
 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
 
@@ -586,11 +585,7 @@ static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q,
                        gso_type = SKB_GSO_TCPV6;
                        break;
                case VIRTIO_NET_HDR_GSO_UDP:
-                       pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
-                                    current->comm);
                        gso_type = SKB_GSO_UDP;
-                       if (skb->protocol == htons(ETH_P_IPV6))
-                               ipv6_proxy_select_ident(skb);
                        break;
                default:
                        return -EINVAL;
@@ -636,6 +631,8 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
                        vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
                else if (sinfo->gso_type & SKB_GSO_TCPV6)
                        vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+               else if (sinfo->gso_type & SKB_GSO_UDP)
+                       vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
                else
                        BUG();
                if (sinfo->gso_type & SKB_GSO_TCP_ECN)
@@ -965,6 +962,9 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
                        if (arg & TUN_F_TSO6)
                                feature_mask |= NETIF_F_TSO6;
                }
+
+               if (arg & TUN_F_UFO)
+                       feature_mask |= NETIF_F_UFO;
        }
 
        /* tun/tap driver inverts the usage for TSO offloads, where
@@ -975,7 +975,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
         * When user space turns off TSO, we turn off GSO/LRO so that
         * user-space will not receive TSO frames.
         */
-       if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
+       if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
                features |= RX_OFFLOADS;
        else
                features &= ~RX_OFFLOADS;
@@ -1090,7 +1090,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
        case TUNSETOFFLOAD:
                /* let the user check for future flags */
                if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
-                           TUN_F_TSO_ECN))
+                           TUN_F_TSO_ECN | TUN_F_UFO))
                        return -EINVAL;
 
                rtnl_lock();
index 602c625d95d5e26ba0c79f9ae6af2dd92f1f3c93..b5edc7f96a392d0080400ed4285cfb84e86d9e5c 100644 (file)
@@ -246,7 +246,7 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
        /*
         * See if we managed to reduce the size of the packet.
         */
-       if (olen < isize) {
+       if (olen < isize && olen <= osize) {
                state->stats.comp_bytes += olen;
                state->stats.comp_packets++;
        } else {
index 8c8dc16839a79473e976d9de2132cc6df911e418..10f9e4021b5ab9799c2e445fa1f6f9c4799293b3 100644 (file)
@@ -65,7 +65,6 @@
 #include <linux/nsproxy.h>
 #include <linux/virtio_net.h>
 #include <linux/rcupdate.h>
-#include <net/ipv6.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
@@ -187,7 +186,7 @@ struct tun_struct {
        struct net_device       *dev;
        netdev_features_t       set_features;
 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
-                         NETIF_F_TSO6)
+                         NETIF_F_TSO6|NETIF_F_UFO)
 
        int                     vnet_hdr_sz;
        int                     sndbuf;
@@ -1167,8 +1166,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                break;
        }
 
-       skb_reset_network_header(skb);
-
        if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
                pr_debug("GSO!\n");
                switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
@@ -1179,20 +1176,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
                        break;
                case VIRTIO_NET_HDR_GSO_UDP:
-               {
-                       static bool warned;
-
-                       if (!warned) {
-                               warned = true;
-                               netdev_warn(tun->dev,
-                                           "%s: using disabled UFO feature; please fix this program\n",
-                                           current->comm);
-                       }
                        skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
-                       if (skb->protocol == htons(ETH_P_IPV6))
-                               ipv6_proxy_select_ident(skb);
                        break;
-               }
                default:
                        tun->dev->stats.rx_frame_errors++;
                        kfree_skb(skb);
@@ -1221,6 +1206,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
        }
 
+       skb_reset_network_header(skb);
        skb_probe_transport_header(skb, 0);
 
        rxhash = skb_get_hash(skb);
@@ -1298,6 +1284,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                                gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
                        else if (sinfo->gso_type & SKB_GSO_TCPV6)
                                gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+                       else if (sinfo->gso_type & SKB_GSO_UDP)
+                               gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
                        else {
                                pr_err("unexpected GSO type: "
                                       "0x%x, gso_size %d, hdr_len %d\n",
@@ -1746,6 +1734,11 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
                                features |= NETIF_F_TSO6;
                        arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
                }
+
+               if (arg & TUN_F_UFO) {
+                       features |= NETIF_F_UFO;
+                       arg &= ~TUN_F_UFO;
+               }
        }
 
        /* This gives the user a way to test for new features in future by
index 99b69af142742523d873762bcdd9c58c0b2b8b0e..4a1e9c489f1f455388ffee289d65e1d6b36cba42 100644 (file)
@@ -77,7 +77,7 @@ static int wait_phy_eeprom_ready(struct usbnet *dev, int phy)
                int ret;
 
                udelay(1);
-               ret = sr_read_reg(dev, EPCR, &tmp);
+               ret = sr_read_reg(dev, SR_EPCR, &tmp);
                if (ret < 0)
                        return ret;
 
@@ -98,15 +98,15 @@ static int sr_share_read_word(struct usbnet *dev, int phy, u8 reg,
 
        mutex_lock(&dev->phy_mutex);
 
-       sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
-       sr_write_reg(dev, EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR);
+       sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
+       sr_write_reg(dev, SR_EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR);
 
        ret = wait_phy_eeprom_ready(dev, phy);
        if (ret < 0)
                goto out_unlock;
 
-       sr_write_reg(dev, EPCR, 0x0);
-       ret = sr_read(dev, EPDR, 2, value);
+       sr_write_reg(dev, SR_EPCR, 0x0);
+       ret = sr_read(dev, SR_EPDR, 2, value);
 
        netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n",
                   phy, reg, *value, ret);
@@ -123,19 +123,19 @@ static int sr_share_write_word(struct usbnet *dev, int phy, u8 reg,
 
        mutex_lock(&dev->phy_mutex);
 
-       ret = sr_write(dev, EPDR, 2, &value);
+       ret = sr_write(dev, SR_EPDR, 2, &value);
        if (ret < 0)
                goto out_unlock;
 
-       sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
-       sr_write_reg(dev, EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) :
+       sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
+       sr_write_reg(dev, SR_EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) :
                    (EPCR_WEP | EPCR_ERPRW));
 
        ret = wait_phy_eeprom_ready(dev, phy);
        if (ret < 0)
                goto out_unlock;
 
-       sr_write_reg(dev, EPCR, 0x0);
+       sr_write_reg(dev, SR_EPCR, 0x0);
 
 out_unlock:
        mutex_unlock(&dev->phy_mutex);
@@ -188,7 +188,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
        if (loc == MII_BMSR) {
                u8 value;
 
-               sr_read_reg(dev, NSR, &value);
+               sr_read_reg(dev, SR_NSR, &value);
                if (value & NSR_LINKST)
                        rc = 1;
        }
@@ -228,7 +228,7 @@ static u32 sr9700_get_link(struct net_device *netdev)
        int rc = 0;
 
        /* Get the Link Status directly */
-       sr_read_reg(dev, NSR, &value);
+       sr_read_reg(dev, SR_NSR, &value);
        if (value & NSR_LINKST)
                rc = 1;
 
@@ -281,8 +281,8 @@ static void sr9700_set_multicast(struct net_device *netdev)
                }
        }
 
-       sr_write_async(dev, MAR, SR_MCAST_SIZE, hashes);
-       sr_write_reg_async(dev, RCR, rx_ctl);
+       sr_write_async(dev, SR_MAR, SR_MCAST_SIZE, hashes);
+       sr_write_reg_async(dev, SR_RCR, rx_ctl);
 }
 
 static int sr9700_set_mac_address(struct net_device *netdev, void *p)
@@ -297,7 +297,7 @@ static int sr9700_set_mac_address(struct net_device *netdev, void *p)
        }
 
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-       sr_write_async(dev, PAR, 6, netdev->dev_addr);
+       sr_write_async(dev, SR_PAR, 6, netdev->dev_addr);
 
        return 0;
 }
@@ -340,7 +340,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
        mii->phy_id_mask = 0x1f;
        mii->reg_num_mask = 0x1f;
 
-       sr_write_reg(dev, NCR, NCR_RST);
+       sr_write_reg(dev, SR_NCR, NCR_RST);
        udelay(20);
 
        /* read MAC
@@ -348,17 +348,17 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
         * EEPROM automatically to PAR. In case there is no EEPROM externally,
         * a default MAC address is stored in PAR for making chip work properly.
         */
-       if (sr_read(dev, PAR, ETH_ALEN, netdev->dev_addr) < 0) {
+       if (sr_read(dev, SR_PAR, ETH_ALEN, netdev->dev_addr) < 0) {
                netdev_err(netdev, "Error reading MAC address\n");
                ret = -ENODEV;
                goto out;
        }
 
        /* power up and reset phy */
-       sr_write_reg(dev, PRR, PRR_PHY_RST);
+       sr_write_reg(dev, SR_PRR, PRR_PHY_RST);
        /* at least 10ms, here 20ms for safe */
        mdelay(20);
-       sr_write_reg(dev, PRR, 0);
+       sr_write_reg(dev, SR_PRR, 0);
        /* at least 1ms, here 2ms for reading right register */
        udelay(2 * 1000);
 
index fd687c575e742efce8d99e2fa28bb74dbba0e7dd..258b030277e753d56b7961a04128dce2d97da70a 100644 (file)
 /* sr9700 spec. register table on Linux platform */
 
 /* Network Control Reg */
-#define        NCR                     0x00
+#define        SR_NCR                  0x00
 #define                NCR_RST                 (1 << 0)
 #define                NCR_LBK                 (3 << 1)
 #define                NCR_FDX                 (1 << 3)
 #define                NCR_WAKEEN              (1 << 6)
 /* Network Status Reg */
-#define        NSR                     0x01
+#define        SR_NSR                  0x01
 #define                NSR_RXRDY               (1 << 0)
 #define                NSR_RXOV                (1 << 1)
 #define                NSR_TX1END              (1 << 2)
@@ -30,7 +30,7 @@
 #define                NSR_LINKST              (1 << 6)
 #define                NSR_SPEED               (1 << 7)
 /* Tx Control Reg */
-#define        TCR                     0x02
+#define        SR_TCR                  0x02
 #define                TCR_CRC_DIS             (1 << 1)
 #define                TCR_PAD_DIS             (1 << 2)
 #define                TCR_LC_CARE             (1 << 3)
@@ -38,7 +38,7 @@
 #define                TCR_EXCECM              (1 << 5)
 #define                TCR_LF_EN               (1 << 6)
 /* Tx Status Reg for Packet Index 1 */
-#define        TSR1            0x03
+#define        SR_TSR1         0x03
 #define                TSR1_EC                 (1 << 2)
 #define                TSR1_COL                (1 << 3)
 #define                TSR1_LC                 (1 << 4)
@@ -46,7 +46,7 @@
 #define                TSR1_LOC                (1 << 6)
 #define                TSR1_TLF                (1 << 7)
 /* Tx Status Reg for Packet Index 2 */
-#define        TSR2            0x04
+#define        SR_TSR2         0x04
 #define                TSR2_EC                 (1 << 2)
 #define                TSR2_COL                (1 << 3)
 #define                TSR2_LC                 (1 << 4)
@@ -54,7 +54,7 @@
 #define                TSR2_LOC                (1 << 6)
 #define                TSR2_TLF                (1 << 7)
 /* Rx Control Reg*/
-#define        RCR                     0x05
+#define        SR_RCR                  0x05
 #define                RCR_RXEN                (1 << 0)
 #define                RCR_PRMSC               (1 << 1)
 #define                RCR_RUNT                (1 << 2)
 #define                RCR_DIS_CRC             (1 << 4)
 #define                RCR_DIS_LONG    (1 << 5)
 /* Rx Status Reg */
-#define        RSR                     0x06
+#define        SR_RSR                  0x06
 #define                RSR_AE                  (1 << 2)
 #define                RSR_MF                  (1 << 6)
 #define                RSR_RF                  (1 << 7)
 /* Rx Overflow Counter Reg */
-#define        ROCR            0x07
+#define        SR_ROCR         0x07
 #define                ROCR_ROC                (0x7F << 0)
 #define                ROCR_RXFU               (1 << 7)
 /* Back Pressure Threshold Reg */
-#define        BPTR            0x08
+#define        SR_BPTR         0x08
 #define                BPTR_JPT                (0x0F << 0)
 #define                BPTR_BPHW               (0x0F << 4)
 /* Flow Control Threshold Reg */
-#define        FCTR            0x09
+#define        SR_FCTR         0x09
 #define                FCTR_LWOT               (0x0F << 0)
 #define                FCTR_HWOT               (0x0F << 4)
 /* rx/tx Flow Control Reg */
-#define        FCR                     0x0A
+#define        SR_FCR                  0x0A
 #define                FCR_FLCE                (1 << 0)
 #define                FCR_BKPA                (1 << 4)
 #define                FCR_TXPEN               (1 << 5)
 #define                FCR_TXPF                (1 << 6)
 #define                FCR_TXP0                (1 << 7)
 /* Eeprom & Phy Control Reg */
-#define        EPCR            0x0B
+#define        SR_EPCR         0x0B
 #define                EPCR_ERRE               (1 << 0)
 #define                EPCR_ERPRW              (1 << 1)
 #define                EPCR_ERPRR              (1 << 2)
 #define                EPCR_EPOS               (1 << 3)
 #define                EPCR_WEP                (1 << 4)
 /* Eeprom & Phy Address Reg */
-#define        EPAR            0x0C
+#define        SR_EPAR         0x0C
 #define                EPAR_EROA               (0x3F << 0)
 #define                EPAR_PHY_ADR_MASK       (0x03 << 6)
 #define                EPAR_PHY_ADR            (0x01 << 6)
 /* Eeprom &    Phy Data Reg */
-#define        EPDR            0x0D    /* 0x0D ~ 0x0E for Data Reg Low & High */
+#define        SR_EPDR         0x0D    /* 0x0D ~ 0x0E for Data Reg Low & High */
 /* Wakeup Control Reg */
-#define        WCR                     0x0F
+#define        SR_WCR                  0x0F
 #define                WCR_MAGICST             (1 << 0)
 #define                WCR_LINKST              (1 << 2)
 #define                WCR_MAGICEN             (1 << 3)
 #define                WCR_LINKEN              (1 << 5)
 /* Physical Address Reg */
-#define        PAR                     0x10    /* 0x10 ~ 0x15 6 bytes for PAR */
+#define        SR_PAR                  0x10    /* 0x10 ~ 0x15 6 bytes for PAR */
 /* Multicast Address Reg */
-#define        MAR                     0x16    /* 0x16 ~ 0x1D 8 bytes for MAR */
+#define        SR_MAR                  0x16    /* 0x16 ~ 0x1D 8 bytes for MAR */
 /* 0x1e unused */
 /* Phy Reset Reg */
-#define        PRR                     0x1F
+#define        SR_PRR                  0x1F
 #define                PRR_PHY_RST             (1 << 0)
 /* Tx sdram Write Pointer Address Low */
-#define        TWPAL           0x20
+#define        SR_TWPAL                0x20
 /* Tx sdram Write Pointer Address High */
-#define        TWPAH           0x21
+#define        SR_TWPAH                0x21
 /* Tx sdram Read Pointer Address Low */
-#define        TRPAL           0x22
+#define        SR_TRPAL                0x22
 /* Tx sdram Read Pointer Address High */
-#define        TRPAH           0x23
+#define        SR_TRPAH                0x23
 /* Rx sdram Write Pointer Address Low */
-#define        RWPAL           0x24
+#define        SR_RWPAL                0x24
 /* Rx sdram Write Pointer Address High */
-#define        RWPAH           0x25
+#define        SR_RWPAH                0x25
 /* Rx sdram Read Pointer Address Low */
-#define        RRPAL           0x26
+#define        SR_RRPAL                0x26
 /* Rx sdram Read Pointer Address High */
-#define        RRPAH           0x27
+#define        SR_RRPAH                0x27
 /* Vendor ID register */
-#define        VID                     0x28    /* 0x28 ~ 0x29 2 bytes for VID */
+#define        SR_VID                  0x28    /* 0x28 ~ 0x29 2 bytes for VID */
 /* Product ID register */
-#define        PID                     0x2A    /* 0x2A ~ 0x2B 2 bytes for PID */
+#define        SR_PID                  0x2A    /* 0x2A ~ 0x2B 2 bytes for PID */
 /* CHIP Revision register */
-#define        CHIPR           0x2C
+#define        SR_CHIPR                0x2C
 /* 0x2D --> 0xEF unused */
 /* USB Device Address */
-#define        USBDA           0xF0
+#define        SR_USBDA                0xF0
 #define                USBDA_USBFA             (0x7F << 0)
 /* RX packet Counter Reg */
-#define        RXC                     0xF1
+#define        SR_RXC                  0xF1
 /* Tx packet Counter & USB Status Reg */
-#define        TXC_USBS        0xF2
+#define        SR_TXC_USBS             0xF2
 #define                TXC_USBS_TXC0           (1 << 0)
 #define                TXC_USBS_TXC1           (1 << 1)
 #define                TXC_USBS_TXC2           (1 << 2)
 #define                TXC_USBS_SUSFLAG        (1 << 6)
 #define                TXC_USBS_RXFAULT        (1 << 7)
 /* USB Control register */
-#define        USBC            0xF4
+#define        SR_USBC                 0xF4
 #define                USBC_EP3NAK             (1 << 4)
 #define                USBC_EP3ACK             (1 << 5)
 
index 5ca97713bfb33b5d5a7770f3ae6a2000a19df423..059fdf1bf5eed9ff91c728c2a471f03791b5877c 100644 (file)
@@ -490,17 +490,8 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
                        break;
                case VIRTIO_NET_HDR_GSO_UDP:
-               {
-                       static bool warned;
-
-                       if (!warned) {
-                               warned = true;
-                               netdev_warn(dev,
-                                           "host using disabled UFO feature; please fix it\n");
-                       }
                        skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
                        break;
-               }
                case VIRTIO_NET_HDR_GSO_TCPV6:
                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
                        break;
@@ -888,6 +879,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
                        hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
                else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
                        hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+               else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+                       hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
                else
                        BUG();
                if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
@@ -1748,7 +1741,7 @@ static int virtnet_probe(struct virtio_device *vdev)
                        dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
 
                if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
-                       dev->hw_features |= NETIF_F_TSO
+                       dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
                                | NETIF_F_TSO_ECN | NETIF_F_TSO6;
                }
                /* Individual feature bits: what can host handle? */
@@ -1758,9 +1751,11 @@ static int virtnet_probe(struct virtio_device *vdev)
                        dev->hw_features |= NETIF_F_TSO6;
                if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
                        dev->hw_features |= NETIF_F_TSO_ECN;
+               if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
+                       dev->hw_features |= NETIF_F_UFO;
 
                if (gso)
-                       dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
+                       dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
                /* (!csum && gso) case will be fixed by register_netdev() */
        }
        if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
@@ -1798,7 +1793,8 @@ static int virtnet_probe(struct virtio_device *vdev)
        /* If we can receive ANY GSO packets, we must allocate large ones. */
        if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
            virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
-           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
+           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
+           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
                vi->big_packets = true;
 
        if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
@@ -1994,9 +1990,9 @@ static struct virtio_device_id id_table[] = {
 static unsigned int features[] = {
        VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
        VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
-       VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6,
+       VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
        VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
-       VIRTIO_NET_F_GUEST_ECN,
+       VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
        VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
        VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
        VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
index 7fbd89fbe107878f5c2be4358bb13420ca6389ec..a8c755dcab1417a27f8939096a23ded65a6af65d 100644 (file)
@@ -2432,10 +2432,10 @@ static void vxlan_sock_work(struct work_struct *work)
        dev_put(vxlan->dev);
 }
 
-static int vxlan_newlink(struct net *net, struct net_device *dev,
+static int vxlan_newlink(struct net *src_net, struct net_device *dev,
                         struct nlattr *tb[], struct nlattr *data[])
 {
-       struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+       struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_rdst *dst = &vxlan->default_dst;
        __u32 vni;
@@ -2445,7 +2445,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
        if (!data[IFLA_VXLAN_ID])
                return -EINVAL;
 
-       vxlan->net = dev_net(dev);
+       vxlan->net = src_net;
 
        vni = nla_get_u32(data[IFLA_VXLAN_ID]);
        dst->remote_vni = vni;
@@ -2481,7 +2481,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
        if (data[IFLA_VXLAN_LINK] &&
            (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
                struct net_device *lowerdev
-                        = __dev_get_by_index(net, dst->remote_ifindex);
+                        = __dev_get_by_index(src_net, dst->remote_ifindex);
 
                if (!lowerdev) {
                        pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
@@ -2557,7 +2557,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
            nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
                vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
 
-       if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET,
+       if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
                           vxlan->dst_port)) {
                pr_info("duplicate VNI %u\n", vni);
                return -EEXIST;
index 94e234975c6114d0e46fde4c71c34625596fe6f4..a2fdd15f285a2367a5b25c3f10e69a1327348ed1 100644 (file)
@@ -25,7 +25,7 @@ if WAN
 # There is no way to detect a comtrol sv11 - force it modular for now.
 config HOSTESS_SV11
        tristate "Comtrol Hostess SV-11 support"
-       depends on ISA && m && ISA_DMA_API && INET && HDLC
+       depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
        help
          Driver for Comtrol Hostess SV-11 network card which
          operates on low speed synchronous serial links at up to
@@ -37,7 +37,7 @@ config HOSTESS_SV11
 # The COSA/SRP driver has not been tested as non-modular yet.
 config COSA
        tristate "COSA/SRP sync serial boards support"
-       depends on ISA && m && ISA_DMA_API && HDLC
+       depends on ISA && m && ISA_DMA_API && HDLC && VIRT_TO_BUS
        ---help---
          Driver for COSA and SRP synchronous serial boards.
 
@@ -87,7 +87,7 @@ config LANMEDIA
 # There is no way to detect a Sealevel board. Force it modular
 config SEALEVEL_4021
        tristate "Sealevel Systems 4021 support"
-       depends on ISA && m && ISA_DMA_API && INET && HDLC
+       depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
        help
          This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
 
index 9259a732e8a4a6d20740a1ad9170962040b8d1f8..037f74f0fcf68fee152822b94dc85872b88fab92 100644 (file)
@@ -578,6 +578,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
                goto err_rx_unbind;
        }
        queue->task = task;
+       get_task_struct(task);
 
        task = kthread_create(xenvif_dealloc_kthread,
                              (void *)queue, "%s-dealloc", queue->name);
@@ -634,6 +635,7 @@ void xenvif_disconnect(struct xenvif *vif)
 
                if (queue->task) {
                        kthread_stop(queue->task);
+                       put_task_struct(queue->task);
                        queue->task = NULL;
                }
 
index 908e65e9b8219783ae4b99e5d06e4701478c830d..c8ce701a7efb35280d5d4cac33a7463e89b243d2 100644 (file)
@@ -2109,8 +2109,7 @@ int xenvif_kthread_guest_rx(void *data)
                 */
                if (unlikely(vif->disabled && queue->id == 0)) {
                        xenvif_carrier_off(vif);
-                       xenvif_rx_queue_purge(queue);
-                       continue;
+                       break;
                }
 
                if (!skb_queue_empty(&queue->rx_queue))
index df781cdf13c1871e265eb933e8baadde09e2dd0a..17ca98657a2866820233d2760f339d26a2278cf7 100644 (file)
@@ -283,6 +283,9 @@ static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
        struct msi_msg msg;
        struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
 
+       if (desc->msi_attrib.is_msix)
+               return -EINVAL;
+
        irq = assign_irq(1, desc, &pos);
        if (irq < 0)
                return irq;
index e52356aa09b87adc778a29bbc3ca40c1ffdf638d..903d5078b5ede8872fc71ec662ecadf230e13950 100644 (file)
@@ -324,18 +324,52 @@ static void quirk_s3_64M(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3,     PCI_DEVICE_ID_S3_868,           quirk_s3_64M);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3,     PCI_DEVICE_ID_S3_968,           quirk_s3_64M);
 
+static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
+                    const char *name)
+{
+       u32 region;
+       struct pci_bus_region bus_region;
+       struct resource *res = dev->resource + pos;
+
+       pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region);
+
+       if (!region)
+               return;
+
+       res->name = pci_name(dev);
+       res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK;
+       res->flags |=
+               (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN);
+       region &= ~(size - 1);
+
+       /* Convert from PCI bus to resource space */
+       bus_region.start = region;
+       bus_region.end = region + size - 1;
+       pcibios_bus_to_resource(dev->bus, res, &bus_region);
+
+       dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
+                name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
+}
+
 /*
  * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
  * ver. 1.33  20070103) don't set the correct ISA PCI region header info.
  * BAR0 should be 8 bytes; instead, it may be set to something like 8k
  * (which conflicts w/ BAR1's memory range).
+ *
+ * CS553x's ISA PCI BARs may also be read-only (ref:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward).
  */
 static void quirk_cs5536_vsa(struct pci_dev *dev)
 {
+       static char *name = "CS5536 ISA bridge";
+
        if (pci_resource_len(dev, 0) != 8) {
-               struct resource *res = &dev->resource[0];
-               res->end = res->start + 8 - 1;
-               dev_info(&dev->dev, "CS5536 ISA bridge bug detected (incorrect header); workaround applied\n");
+               quirk_io(dev, 0,   8, name);    /* SMB */
+               quirk_io(dev, 1, 256, name);    /* GPIO */
+               quirk_io(dev, 2,  64, name);    /* MFGPT */
+               dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n",
+                        name);
        }
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
index c3a60b57a865eae77b232ff6d94a2700df3fa5d7..a6f116aa523532b56194407438419c5bd5d238d9 100644 (file)
@@ -414,6 +414,14 @@ config REGULATOR_MAX77802
          Exynos5420/Exynos5800 SoCs to control various voltages.
          It includes support for control of voltage and ramp speed.
 
+config REGULATOR_MAX77843
+       tristate "Maxim 77843 regulator"
+       depends on MFD_MAX77843
+       help
+         This driver controls a Maxim 77843 regulator.
+         The regulator include two 'SAFEOUT' for USB(Universal Serial Bus)
+         This is suitable for Exynos5433 SoC chips.
+
 config REGULATOR_MC13XXX_CORE
        tristate
 
@@ -433,6 +441,15 @@ config REGULATOR_MC13892
          Say y here to support the regulators found on the Freescale MC13892
          PMIC.
 
+config REGULATOR_MT6397
+       tristate "MediaTek MT6397 PMIC"
+       depends on MFD_MT6397
+       help
+         Say y here to select this option to enable the power regulator of
+         MediaTek MT6397 PMIC.
+         This driver supports the control of different power rails of device
+         through regulator interface.
+
 config REGULATOR_PALMAS
        tristate "TI Palmas PMIC Regulators"
        depends on MFD_PALMAS
index 1f28ebfc6f3a09b3b555a23a2633a6402d1c141b..2c4da15e1545a71076616fbd94f3648ad453473a 100644 (file)
@@ -55,9 +55,11 @@ obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
 obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o
 obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o
 obj-$(CONFIG_REGULATOR_MAX77802) += max77802.o
+obj-$(CONFIG_REGULATOR_MAX77843) += max77843.o
 obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
 obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
 obj-$(CONFIG_REGULATOR_MC13XXX_CORE) +=  mc13xxx-regulator-core.o
+obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
 obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
 obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
 obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
index f23d7e1f2ee7c3602cd23bd8d30f305e5bf0d886..e4331f5e5d7d065e25fa835201359696c01bb2e9 100644 (file)
 
 #define AXP20X_FREQ_DCDC_MASK          0x0f
 
-#define AXP20X_DESC_IO(_id, _supply, _min, _max, _step, _vreg, _vmask, _ereg,   \
-                      _emask, _enable_val, _disable_val)                       \
+#define AXP20X_DESC_IO(_id, _match, _supply, _min, _max, _step, _vreg, _vmask, \
+                      _ereg, _emask, _enable_val, _disable_val)                \
        [AXP20X_##_id] = {                                                      \
                .name           = #_id,                                         \
                .supply_name    = (_supply),                                    \
+               .of_match       = of_match_ptr(_match),                         \
+               .regulators_node = of_match_ptr("regulators"),                  \
                .type           = REGULATOR_VOLTAGE,                            \
                .id             = AXP20X_##_id,                                 \
                .n_voltages     = (((_max) - (_min)) / (_step) + 1),            \
                .ops            = &axp20x_ops,                                  \
        }
 
-#define AXP20X_DESC(_id, _supply, _min, _max, _step, _vreg, _vmask, _ereg,     \
-                   _emask)                                                     \
+#define AXP20X_DESC(_id, _match, _supply, _min, _max, _step, _vreg, _vmask,    \
+                   _ereg, _emask)                                              \
        [AXP20X_##_id] = {                                                      \
                .name           = #_id,                                         \
                .supply_name    = (_supply),                                    \
+               .of_match       = of_match_ptr(_match),                         \
+               .regulators_node = of_match_ptr("regulators"),                  \
                .type           = REGULATOR_VOLTAGE,                            \
                .id             = AXP20X_##_id,                                 \
                .n_voltages     = (((_max) - (_min)) / (_step) + 1),            \
                .ops            = &axp20x_ops,                                  \
        }
 
-#define AXP20X_DESC_FIXED(_id, _supply, _volt)                                 \
+#define AXP20X_DESC_FIXED(_id, _match, _supply, _volt)                         \
        [AXP20X_##_id] = {                                                      \
                .name           = #_id,                                         \
                .supply_name    = (_supply),                                    \
+               .of_match       = of_match_ptr(_match),                         \
+               .regulators_node = of_match_ptr("regulators"),                  \
                .type           = REGULATOR_VOLTAGE,                            \
                .id             = AXP20X_##_id,                                 \
                .n_voltages     = 1,                                            \
                .ops            = &axp20x_ops_fixed                             \
        }
 
-#define AXP20X_DESC_TABLE(_id, _supply, _table, _vreg, _vmask, _ereg, _emask)  \
+#define AXP20X_DESC_TABLE(_id, _match, _supply, _table, _vreg, _vmask, _ereg,  \
+                         _emask)                                               \
        [AXP20X_##_id] = {                                                      \
                .name           = #_id,                                         \
                .supply_name    = (_supply),                                    \
+               .of_match       = of_match_ptr(_match),                         \
+               .regulators_node = of_match_ptr("regulators"),                  \
                .type           = REGULATOR_VOLTAGE,                            \
                .id             = AXP20X_##_id,                                 \
                .n_voltages     = ARRAY_SIZE(_table),                           \
@@ -127,36 +136,20 @@ static struct regulator_ops axp20x_ops = {
 };
 
 static const struct regulator_desc axp20x_regulators[] = {
-       AXP20X_DESC(DCDC2, "vin2", 700, 2275, 25, AXP20X_DCDC2_V_OUT, 0x3f,
-                   AXP20X_PWR_OUT_CTRL, 0x10),
-       AXP20X_DESC(DCDC3, "vin3", 700, 3500, 25, AXP20X_DCDC3_V_OUT, 0x7f,
-                   AXP20X_PWR_OUT_CTRL, 0x02),
-       AXP20X_DESC_FIXED(LDO1, "acin", 1300),
-       AXP20X_DESC(LDO2, "ldo24in", 1800, 3300, 100, AXP20X_LDO24_V_OUT, 0xf0,
-                   AXP20X_PWR_OUT_CTRL, 0x04),
-       AXP20X_DESC(LDO3, "ldo3in", 700, 3500, 25, AXP20X_LDO3_V_OUT, 0x7f,
-                   AXP20X_PWR_OUT_CTRL, 0x40),
-       AXP20X_DESC_TABLE(LDO4, "ldo24in", axp20x_ldo4_data, AXP20X_LDO24_V_OUT, 0x0f,
-                         AXP20X_PWR_OUT_CTRL, 0x08),
-       AXP20X_DESC_IO(LDO5, "ldo5in", 1800, 3300, 100, AXP20X_LDO5_V_OUT, 0xf0,
-                      AXP20X_GPIO0_CTRL, 0x07, AXP20X_IO_ENABLED,
-                      AXP20X_IO_DISABLED),
-};
-
-#define AXP_MATCH(_name, _id) \
-       [AXP20X_##_id] = { \
-               .name           = #_name, \
-               .driver_data    = (void *) &axp20x_regulators[AXP20X_##_id], \
-       }
-
-static struct of_regulator_match axp20x_matches[] = {
-       AXP_MATCH(dcdc2, DCDC2),
-       AXP_MATCH(dcdc3, DCDC3),
-       AXP_MATCH(ldo1, LDO1),
-       AXP_MATCH(ldo2, LDO2),
-       AXP_MATCH(ldo3, LDO3),
-       AXP_MATCH(ldo4, LDO4),
-       AXP_MATCH(ldo5, LDO5),
+       AXP20X_DESC(DCDC2, "dcdc2", "vin2", 700, 2275, 25, AXP20X_DCDC2_V_OUT,
+                   0x3f, AXP20X_PWR_OUT_CTRL, 0x10),
+       AXP20X_DESC(DCDC3, "dcdc3", "vin3", 700, 3500, 25, AXP20X_DCDC3_V_OUT,
+                   0x7f, AXP20X_PWR_OUT_CTRL, 0x02),
+       AXP20X_DESC_FIXED(LDO1, "ldo1", "acin", 1300),
+       AXP20X_DESC(LDO2, "ldo2", "ldo24in", 1800, 3300, 100,
+                   AXP20X_LDO24_V_OUT, 0xf0, AXP20X_PWR_OUT_CTRL, 0x04),
+       AXP20X_DESC(LDO3, "ldo3", "ldo3in", 700, 3500, 25, AXP20X_LDO3_V_OUT,
+                   0x7f, AXP20X_PWR_OUT_CTRL, 0x40),
+       AXP20X_DESC_TABLE(LDO4, "ldo4", "ldo24in", axp20x_ldo4_data,
+                         AXP20X_LDO24_V_OUT, 0x0f, AXP20X_PWR_OUT_CTRL, 0x08),
+       AXP20X_DESC_IO(LDO5, "ldo5", "ldo5in", 1800, 3300, 100,
+                      AXP20X_LDO5_V_OUT, 0xf0, AXP20X_GPIO0_CTRL, 0x07,
+                      AXP20X_IO_ENABLED, AXP20X_IO_DISABLED),
 };
 
 static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
@@ -193,13 +186,6 @@ static int axp20x_regulator_parse_dt(struct platform_device *pdev)
        if (!regulators) {
                dev_warn(&pdev->dev, "regulators node not found\n");
        } else {
-               ret = of_regulator_match(&pdev->dev, regulators, axp20x_matches,
-                                        ARRAY_SIZE(axp20x_matches));
-               if (ret < 0) {
-                       dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
-                       return ret;
-               }
-
                dcdcfreq = 1500;
                of_property_read_u32(regulators, "x-powers,dcdc-freq", &dcdcfreq);
                ret = axp20x_set_dcdc_freq(pdev, dcdcfreq);
@@ -233,23 +219,17 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
 {
        struct regulator_dev *rdev;
        struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
-       struct regulator_config config = { };
-       struct regulator_init_data *init_data;
+       struct regulator_config config = {
+               .dev = pdev->dev.parent,
+               .regmap = axp20x->regmap,
+       };
        int ret, i;
        u32 workmode;
 
-       ret = axp20x_regulator_parse_dt(pdev);
-       if (ret)
-               return ret;
+       /* This only sets the dcdc freq. Ignore any errors */
+       axp20x_regulator_parse_dt(pdev);
 
        for (i = 0; i < AXP20X_REG_ID_MAX; i++) {
-               init_data = axp20x_matches[i].init_data;
-
-               config.dev = pdev->dev.parent;
-               config.init_data = init_data;
-               config.regmap = axp20x->regmap;
-               config.of_node = axp20x_matches[i].of_node;
-
                rdev = devm_regulator_register(&pdev->dev, &axp20x_regulators[i],
                                               &config);
                if (IS_ERR(rdev)) {
@@ -259,7 +239,8 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
                        return PTR_ERR(rdev);
                }
 
-               ret = of_property_read_u32(axp20x_matches[i].of_node, "x-powers,dcdc-workmode",
+               ret = of_property_read_u32(rdev->dev.of_node,
+                                          "x-powers,dcdc-workmode",
                                           &workmode);
                if (!ret) {
                        if (axp20x_set_dcdc_workmode(rdev, i, workmode))
index 9c48fb32f6601bf4065db65cd2a099bc57f3812d..b899947d839d87b03608d1f9bf4b4208cf57aa01 100644 (file)
@@ -632,49 +632,34 @@ static ssize_t regulator_bypass_show(struct device *dev,
 static DEVICE_ATTR(bypass, 0444,
                   regulator_bypass_show, NULL);
 
-/*
- * These are the only attributes are present for all regulators.
- * Other attributes are a function of regulator functionality.
- */
-static struct attribute *regulator_dev_attrs[] = {
-       &dev_attr_name.attr,
-       &dev_attr_num_users.attr,
-       &dev_attr_type.attr,
-       NULL,
-};
-ATTRIBUTE_GROUPS(regulator_dev);
-
-static void regulator_dev_release(struct device *dev)
-{
-       struct regulator_dev *rdev = dev_get_drvdata(dev);
-       kfree(rdev);
-}
-
-static struct class regulator_class = {
-       .name = "regulator",
-       .dev_release = regulator_dev_release,
-       .dev_groups = regulator_dev_groups,
-};
-
 /* Calculate the new optimum regulator operating mode based on the new total
  * consumer load. All locks held by caller */
-static void drms_uA_update(struct regulator_dev *rdev)
+static int drms_uA_update(struct regulator_dev *rdev)
 {
        struct regulator *sibling;
        int current_uA = 0, output_uV, input_uV, err;
        unsigned int mode;
 
+       /*
+        * first check to see if we can set modes at all, otherwise just
+        * tell the consumer everything is OK.
+        */
        err = regulator_check_drms(rdev);
-       if (err < 0 || !rdev->desc->ops->get_optimum_mode ||
-           (!rdev->desc->ops->get_voltage &&
-            !rdev->desc->ops->get_voltage_sel) ||
-           !rdev->desc->ops->set_mode)
-               return;
+       if (err < 0)
+               return 0;
+
+       if (!rdev->desc->ops->get_optimum_mode)
+               return 0;
+
+       if (!rdev->desc->ops->set_mode)
+               return -EINVAL;
 
        /* get output voltage */
        output_uV = _regulator_get_voltage(rdev);
-       if (output_uV <= 0)
-               return;
+       if (output_uV <= 0) {
+               rdev_err(rdev, "invalid output voltage found\n");
+               return -EINVAL;
+       }
 
        /* get input voltage */
        input_uV = 0;
@@ -682,8 +667,10 @@ static void drms_uA_update(struct regulator_dev *rdev)
                input_uV = regulator_get_voltage(rdev->supply);
        if (input_uV <= 0)
                input_uV = rdev->constraints->input_uV;
-       if (input_uV <= 0)
-               return;
+       if (input_uV <= 0) {
+               rdev_err(rdev, "invalid input voltage found\n");
+               return -EINVAL;
+       }
 
        /* calc total requested load */
        list_for_each_entry(sibling, &rdev->consumer_list, list)
@@ -695,8 +682,17 @@ static void drms_uA_update(struct regulator_dev *rdev)
 
        /* check the new mode is allowed */
        err = regulator_mode_constrain(rdev, &mode);
-       if (err == 0)
-               rdev->desc->ops->set_mode(rdev, mode);
+       if (err < 0) {
+               rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
+                        current_uA, input_uV, output_uV);
+               return err;
+       }
+
+       err = rdev->desc->ops->set_mode(rdev, mode);
+       if (err < 0)
+               rdev_err(rdev, "failed to set optimum mode %x\n", mode);
+
+       return err;
 }
 
 static int suspend_set_state(struct regulator_dev *rdev,
@@ -3026,75 +3022,13 @@ EXPORT_SYMBOL_GPL(regulator_get_mode);
 int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
 {
        struct regulator_dev *rdev = regulator->rdev;
-       struct regulator *consumer;
-       int ret, output_uV, input_uV = 0, total_uA_load = 0;
-       unsigned int mode;
-
-       if (rdev->supply)
-               input_uV = regulator_get_voltage(rdev->supply);
+       int ret;
 
        mutex_lock(&rdev->mutex);
-
-       /*
-        * first check to see if we can set modes at all, otherwise just
-        * tell the consumer everything is OK.
-        */
        regulator->uA_load = uA_load;
-       ret = regulator_check_drms(rdev);
-       if (ret < 0) {
-               ret = 0;
-               goto out;
-       }
-
-       if (!rdev->desc->ops->get_optimum_mode)
-               goto out;
-
-       /*
-        * we can actually do this so any errors are indicators of
-        * potential real failure.
-        */
-       ret = -EINVAL;
-
-       if (!rdev->desc->ops->set_mode)
-               goto out;
-
-       /* get output voltage */
-       output_uV = _regulator_get_voltage(rdev);
-       if (output_uV <= 0) {
-               rdev_err(rdev, "invalid output voltage found\n");
-               goto out;
-       }
-
-       /* No supply? Use constraint voltage */
-       if (input_uV <= 0)
-               input_uV = rdev->constraints->input_uV;
-       if (input_uV <= 0) {
-               rdev_err(rdev, "invalid input voltage found\n");
-               goto out;
-       }
-
-       /* calc total requested load for this regulator */
-       list_for_each_entry(consumer, &rdev->consumer_list, list)
-               total_uA_load += consumer->uA_load;
-
-       mode = rdev->desc->ops->get_optimum_mode(rdev,
-                                                input_uV, output_uV,
-                                                total_uA_load);
-       ret = regulator_mode_constrain(rdev, &mode);
-       if (ret < 0) {
-               rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
-                        total_uA_load, input_uV, output_uV);
-               goto out;
-       }
-
-       ret = rdev->desc->ops->set_mode(rdev, mode);
-       if (ret < 0) {
-               rdev_err(rdev, "failed to set optimum mode %x\n", mode);
-               goto out;
-       }
-       ret = mode;
-out:
+       ret = drms_uA_update(rdev);
        mutex_unlock(&rdev->mutex);
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(regulator_set_optimum_mode);
@@ -3436,126 +3370,136 @@ int regulator_mode_to_status(unsigned int mode)
 }
 EXPORT_SYMBOL_GPL(regulator_mode_to_status);
 
+static struct attribute *regulator_dev_attrs[] = {
+       &dev_attr_name.attr,
+       &dev_attr_num_users.attr,
+       &dev_attr_type.attr,
+       &dev_attr_microvolts.attr,
+       &dev_attr_microamps.attr,
+       &dev_attr_opmode.attr,
+       &dev_attr_state.attr,
+       &dev_attr_status.attr,
+       &dev_attr_bypass.attr,
+       &dev_attr_requested_microamps.attr,
+       &dev_attr_min_microvolts.attr,
+       &dev_attr_max_microvolts.attr,
+       &dev_attr_min_microamps.attr,
+       &dev_attr_max_microamps.attr,
+       &dev_attr_suspend_standby_state.attr,
+       &dev_attr_suspend_mem_state.attr,
+       &dev_attr_suspend_disk_state.attr,
+       &dev_attr_suspend_standby_microvolts.attr,
+       &dev_attr_suspend_mem_microvolts.attr,
+       &dev_attr_suspend_disk_microvolts.attr,
+       &dev_attr_suspend_standby_mode.attr,
+       &dev_attr_suspend_mem_mode.attr,
+       &dev_attr_suspend_disk_mode.attr,
+       NULL
+};
+
 /*
  * To avoid cluttering sysfs (and memory) with useless state, only
  * create attributes that can be meaningfully displayed.
  */
-static int add_regulator_attributes(struct regulator_dev *rdev)
+static umode_t regulator_attr_is_visible(struct kobject *kobj,
+                                        struct attribute *attr, int idx)
 {
-       struct device *dev = &rdev->dev;
+       struct device *dev = kobj_to_dev(kobj);
+       struct regulator_dev *rdev = container_of(dev, struct regulator_dev, dev);
        const struct regulator_ops *ops = rdev->desc->ops;
-       int status = 0;
+       umode_t mode = attr->mode;
+
+       /* these three are always present */
+       if (attr == &dev_attr_name.attr ||
+           attr == &dev_attr_num_users.attr ||
+           attr == &dev_attr_type.attr)
+               return mode;
 
        /* some attributes need specific methods to be displayed */
-       if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) ||
-           (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) ||
-           (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) ||
-               (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1))) {
-               status = device_create_file(dev, &dev_attr_microvolts);
-               if (status < 0)
-                       return status;
-       }
-       if (ops->get_current_limit) {
-               status = device_create_file(dev, &dev_attr_microamps);
-               if (status < 0)
-                       return status;
-       }
-       if (ops->get_mode) {
-               status = device_create_file(dev, &dev_attr_opmode);
-               if (status < 0)
-                       return status;
-       }
-       if (rdev->ena_pin || ops->is_enabled) {
-               status = device_create_file(dev, &dev_attr_state);
-               if (status < 0)
-                       return status;
-       }
-       if (ops->get_status) {
-               status = device_create_file(dev, &dev_attr_status);
-               if (status < 0)
-                       return status;
-       }
-       if (ops->get_bypass) {
-               status = device_create_file(dev, &dev_attr_bypass);
-               if (status < 0)
-                       return status;
+       if (attr == &dev_attr_microvolts.attr) {
+               if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) ||
+                   (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) ||
+                   (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) ||
+                   (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1))
+                       return mode;
+               return 0;
        }
 
+       if (attr == &dev_attr_microamps.attr)
+               return ops->get_current_limit ? mode : 0;
+
+       if (attr == &dev_attr_opmode.attr)
+               return ops->get_mode ? mode : 0;
+
+       if (attr == &dev_attr_state.attr)
+               return (rdev->ena_pin || ops->is_enabled) ? mode : 0;
+
+       if (attr == &dev_attr_status.attr)
+               return ops->get_status ? mode : 0;
+
+       if (attr == &dev_attr_bypass.attr)
+               return ops->get_bypass ? mode : 0;
+
        /* some attributes are type-specific */
-       if (rdev->desc->type == REGULATOR_CURRENT) {
-               status = device_create_file(dev, &dev_attr_requested_microamps);
-               if (status < 0)
-                       return status;
-       }
+       if (attr == &dev_attr_requested_microamps.attr)
+               return rdev->desc->type == REGULATOR_CURRENT ? mode : 0;
 
        /* all the other attributes exist to support constraints;
         * don't show them if there are no constraints, or if the
         * relevant supporting methods are missing.
         */
        if (!rdev->constraints)
-               return status;
+               return 0;
 
        /* constraints need specific supporting methods */
-       if (ops->set_voltage || ops->set_voltage_sel) {
-               status = device_create_file(dev, &dev_attr_min_microvolts);
-               if (status < 0)
-                       return status;
-               status = device_create_file(dev, &dev_attr_max_microvolts);
-               if (status < 0)
-                       return status;
-       }
-       if (ops->set_current_limit) {
-               status = device_create_file(dev, &dev_attr_min_microamps);
-               if (status < 0)
-                       return status;
-               status = device_create_file(dev, &dev_attr_max_microamps);
-               if (status < 0)
-                       return status;
-       }
-
-       status = device_create_file(dev, &dev_attr_suspend_standby_state);
-       if (status < 0)
-               return status;
-       status = device_create_file(dev, &dev_attr_suspend_mem_state);
-       if (status < 0)
-               return status;
-       status = device_create_file(dev, &dev_attr_suspend_disk_state);
-       if (status < 0)
-               return status;
+       if (attr == &dev_attr_min_microvolts.attr ||
+           attr == &dev_attr_max_microvolts.attr)
+               return (ops->set_voltage || ops->set_voltage_sel) ? mode : 0;
+
+       if (attr == &dev_attr_min_microamps.attr ||
+           attr == &dev_attr_max_microamps.attr)
+               return ops->set_current_limit ? mode : 0;
 
-       if (ops->set_suspend_voltage) {
-               status = device_create_file(dev,
-                               &dev_attr_suspend_standby_microvolts);
-               if (status < 0)
-                       return status;
-               status = device_create_file(dev,
-                               &dev_attr_suspend_mem_microvolts);
-               if (status < 0)
-                       return status;
-               status = device_create_file(dev,
-                               &dev_attr_suspend_disk_microvolts);
-               if (status < 0)
-                       return status;
-       }
-
-       if (ops->set_suspend_mode) {
-               status = device_create_file(dev,
-                               &dev_attr_suspend_standby_mode);
-               if (status < 0)
-                       return status;
-               status = device_create_file(dev,
-                               &dev_attr_suspend_mem_mode);
-               if (status < 0)
-                       return status;
-               status = device_create_file(dev,
-                               &dev_attr_suspend_disk_mode);
-               if (status < 0)
-                       return status;
-       }
-
-       return status;
+       if (attr == &dev_attr_suspend_standby_state.attr ||
+           attr == &dev_attr_suspend_mem_state.attr ||
+           attr == &dev_attr_suspend_disk_state.attr)
+               return mode;
+
+       if (attr == &dev_attr_suspend_standby_microvolts.attr ||
+           attr == &dev_attr_suspend_mem_microvolts.attr ||
+           attr == &dev_attr_suspend_disk_microvolts.attr)
+               return ops->set_suspend_voltage ? mode : 0;
+
+       if (attr == &dev_attr_suspend_standby_mode.attr ||
+           attr == &dev_attr_suspend_mem_mode.attr ||
+           attr == &dev_attr_suspend_disk_mode.attr)
+               return ops->set_suspend_mode ? mode : 0;
+
+       return mode;
 }
 
+static const struct attribute_group regulator_dev_group = {
+       .attrs = regulator_dev_attrs,
+       .is_visible = regulator_attr_is_visible,
+};
+
+static const struct attribute_group *regulator_dev_groups[] = {
+       &regulator_dev_group,
+       NULL
+};
+
+static void regulator_dev_release(struct device *dev)
+{
+       struct regulator_dev *rdev = dev_get_drvdata(dev);
+       kfree(rdev);
+}
+
+static struct class regulator_class = {
+       .name = "regulator",
+       .dev_release = regulator_dev_release,
+       .dev_groups = regulator_dev_groups,
+};
+
 static void rdev_init_debugfs(struct regulator_dev *rdev)
 {
        rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root);
@@ -3575,7 +3519,7 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
 /**
  * regulator_register - register regulator
  * @regulator_desc: regulator to register
- * @config: runtime configuration for regulator
+ * @cfg: runtime configuration for regulator
  *
  * Called by regulator drivers to register a regulator.
  * Returns a valid pointer to struct regulator_dev on success
@@ -3583,20 +3527,21 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
  */
 struct regulator_dev *
 regulator_register(const struct regulator_desc *regulator_desc,
-                  const struct regulator_config *config)
+                  const struct regulator_config *cfg)
 {
        const struct regulation_constraints *constraints = NULL;
        const struct regulator_init_data *init_data;
-       static atomic_t regulator_no = ATOMIC_INIT(0);
+       struct regulator_config *config = NULL;
+       static atomic_t regulator_no = ATOMIC_INIT(-1);
        struct regulator_dev *rdev;
        struct device *dev;
        int ret, i;
        const char *supply = NULL;
 
-       if (regulator_desc == NULL || config == NULL)
+       if (regulator_desc == NULL || cfg == NULL)
                return ERR_PTR(-EINVAL);
 
-       dev = config->dev;
+       dev = cfg->dev;
        WARN_ON(!dev);
 
        if (regulator_desc->name == NULL || regulator_desc->ops == NULL)
@@ -3626,7 +3571,17 @@ regulator_register(const struct regulator_desc *regulator_desc,
        if (rdev == NULL)
                return ERR_PTR(-ENOMEM);
 
-       init_data = regulator_of_get_init_data(dev, regulator_desc,
+       /*
+        * Duplicate the config so the driver could override it after
+        * parsing init data.
+        */
+       config = kmemdup(cfg, sizeof(*cfg), GFP_KERNEL);
+       if (config == NULL) {
+               kfree(rdev);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       init_data = regulator_of_get_init_data(dev, regulator_desc, config,
                                               &rdev->dev.of_node);
        if (!init_data) {
                init_data = config->init_data;
@@ -3660,8 +3615,8 @@ regulator_register(const struct regulator_desc *regulator_desc,
        /* register with sysfs */
        rdev->dev.class = &regulator_class;
        rdev->dev.parent = dev;
-       dev_set_name(&rdev->dev, "regulator.%d",
-                    atomic_inc_return(&regulator_no) - 1);
+       dev_set_name(&rdev->dev, "regulator.%lu",
+                   (unsigned long) atomic_inc_return(&regulator_no));
        ret = device_register(&rdev->dev);
        if (ret != 0) {
                put_device(&rdev->dev);
@@ -3694,11 +3649,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
        if (ret < 0)
                goto scrub;
 
-       /* add attributes supported by this regulator */
-       ret = add_regulator_attributes(rdev);
-       if (ret < 0)
-               goto scrub;
-
        if (init_data && init_data->supply_regulator)
                supply = init_data->supply_regulator;
        else if (regulator_desc->supply_name)
@@ -3754,6 +3704,7 @@ add_dev:
        rdev_init_debugfs(rdev);
 out:
        mutex_unlock(&regulator_list_mutex);
+       kfree(config);
        return rdev;
 
 unset_supplies:
index c78d2106d6cb66aa6b5ca1b8df25c244c73a3686..01343419555ee3363d7bc0d3c10df731fbd74587 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/regmap.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/of_gpio.h>
 #include <linux/regulator/of_regulator.h>
 #include <linux/regulator/da9211.h>
 #include "da9211-regulator.h"
@@ -276,7 +277,10 @@ static struct da9211_pdata *da9211_parse_regulators_dt(
                        continue;
 
                pdata->init_data[n] = da9211_matches[i].init_data;
-
+               pdata->reg_node[n] = da9211_matches[i].of_node;
+               pdata->gpio_ren[n] =
+                       of_get_named_gpio(da9211_matches[i].of_node,
+                               "enable-gpios", 0);
                n++;
        }
 
@@ -364,7 +368,15 @@ static int da9211_regulator_init(struct da9211 *chip)
                config.dev = chip->dev;
                config.driver_data = chip;
                config.regmap = chip->regmap;
-               config.of_node = chip->dev->of_node;
+               config.of_node = chip->pdata->reg_node[i];
+
+               if (gpio_is_valid(chip->pdata->gpio_ren[i])) {
+                       config.ena_gpio = chip->pdata->gpio_ren[i];
+                       config.ena_gpio_initialized = true;
+               } else {
+                       config.ena_gpio = -EINVAL;
+                       config.ena_gpio_initialized = false;
+               }
 
                chip->rdev[i] = devm_regulator_register(chip->dev,
                        &da9211_regulators[i], &config);
index 6c43ab2d51211653eb1dd2374ff7844f6ff1c634..3c25db89a021af927f183b9f363b270b76e691c4 100644 (file)
@@ -147,7 +147,7 @@ static unsigned int fan53555_get_mode(struct regulator_dev *rdev)
                return REGULATOR_MODE_NORMAL;
 }
 
-static int slew_rates[] = {
+static const int slew_rates[] = {
        64000,
        32000,
        16000,
@@ -296,7 +296,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
        return PTR_ERR_OR_ZERO(di->rdev);
 }
 
-static struct regmap_config fan53555_regmap_config = {
+static const struct regmap_config fan53555_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
 };
index 80ba2a35a04bb3e6c7164380fe44d3220c54dafd..c74ac873402370b5057464b22a694672cccf4d73 100644 (file)
@@ -38,11 +38,13 @@ struct regulator {
 #ifdef CONFIG_OF
 struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
                                 const struct regulator_desc *desc,
+                                struct regulator_config *config,
                                 struct device_node **node);
 #else
 static inline struct regulator_init_data *
 regulator_of_get_init_data(struct device *dev,
                           const struct regulator_desc *desc,
+                          struct regulator_config *config,
                           struct device_node **node)
 {
        return NULL;
index 92fefd98da58e146755210cad01fa8639d1f5c02..6e3a15fe00f1ce37d51d47d0c66ee26c39a8b367 100644 (file)
@@ -177,8 +177,10 @@ static int isl9305_i2c_probe(struct i2c_client *i2c,
 
 #ifdef CONFIG_OF
 static const struct of_device_id isl9305_dt_ids[] = {
-       { .compatible = "isl,isl9305" },
-       { .compatible = "isl,isl9305h" },
+       { .compatible = "isl,isl9305" }, /* for backward compat., don't use */
+       { .compatible = "isil,isl9305" },
+       { .compatible = "isl,isl9305h" }, /* for backward compat., don't use */
+       { .compatible = "isil,isl9305h" },
        {},
 };
 #endif
index 021d64d856bb68d72745e83a8258d3c228412943..3de328ab41f3c69dde58349d6aa4fb0d18736617 100644 (file)
@@ -106,7 +106,6 @@ struct lp872x {
        struct device *dev;
        enum lp872x_id chipid;
        struct lp872x_platform_data *pdata;
-       struct regulator_dev **regulators;
        int num_regulators;
        enum lp872x_dvs_state dvs_pin;
        int dvs_gpio;
@@ -801,8 +800,6 @@ static int lp872x_regulator_register(struct lp872x *lp)
                        dev_err(lp->dev, "regulator register err");
                        return PTR_ERR(rdev);
                }
-
-               *(lp->regulators + i) = rdev;
        }
 
        return 0;
@@ -906,7 +903,7 @@ static struct lp872x_platform_data
 static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
 {
        struct lp872x *lp;
-       int ret, size, num_regulators;
+       int ret;
        const int lp872x_num_regulators[] = {
                [LP8720] = LP8720_NUM_REGULATORS,
                [LP8725] = LP8725_NUM_REGULATORS,
@@ -918,38 +915,27 @@ static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
 
        lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL);
        if (!lp)
-               goto err_mem;
-
-       num_regulators = lp872x_num_regulators[id->driver_data];
-       size = sizeof(struct regulator_dev *) * num_regulators;
+               return -ENOMEM;
 
-       lp->regulators = devm_kzalloc(&cl->dev, size, GFP_KERNEL);
-       if (!lp->regulators)
-               goto err_mem;
+       lp->num_regulators = lp872x_num_regulators[id->driver_data];
 
        lp->regmap = devm_regmap_init_i2c(cl, &lp872x_regmap_config);
        if (IS_ERR(lp->regmap)) {
                ret = PTR_ERR(lp->regmap);
                dev_err(&cl->dev, "regmap init i2c err: %d\n", ret);
-               goto err_dev;
+               return ret;
        }
 
        lp->dev = &cl->dev;
        lp->pdata = dev_get_platdata(&cl->dev);
        lp->chipid = id->driver_data;
-       lp->num_regulators = num_regulators;
        i2c_set_clientdata(cl, lp);
 
        ret = lp872x_config(lp);
        if (ret)
-               goto err_dev;
+               return ret;
 
        return lp872x_regulator_register(lp);
-
-err_mem:
-       return -ENOMEM;
-err_dev:
-       return ret;
 }
 
 static const struct of_device_id lp872x_dt_ids[] = {
index bf9a44c5fdd299872a1015b50b097aa8dc0e9fc6..b3678d289619330ffca48f338b9ddf362cd76986 100644 (file)
@@ -103,6 +103,8 @@ static struct regulator_ops max14577_charger_ops = {
 static const struct regulator_desc max14577_supported_regulators[] = {
        [MAX14577_SAFEOUT] = {
                .name           = "SAFEOUT",
+               .of_match       = of_match_ptr("SAFEOUT"),
+               .regulators_node = of_match_ptr("regulators"),
                .id             = MAX14577_SAFEOUT,
                .ops            = &max14577_safeout_ops,
                .type           = REGULATOR_VOLTAGE,
@@ -114,6 +116,8 @@ static const struct regulator_desc max14577_supported_regulators[] = {
        },
        [MAX14577_CHARGER] = {
                .name           = "CHARGER",
+               .of_match       = of_match_ptr("CHARGER"),
+               .regulators_node = of_match_ptr("regulators"),
                .id             = MAX14577_CHARGER,
                .ops            = &max14577_charger_ops,
                .type           = REGULATOR_CURRENT,
@@ -137,6 +141,8 @@ static struct regulator_ops max77836_ldo_ops = {
 static const struct regulator_desc max77836_supported_regulators[] = {
        [MAX14577_SAFEOUT] = {
                .name           = "SAFEOUT",
+               .of_match       = of_match_ptr("SAFEOUT"),
+               .regulators_node = of_match_ptr("regulators"),
                .id             = MAX14577_SAFEOUT,
                .ops            = &max14577_safeout_ops,
                .type           = REGULATOR_VOLTAGE,
@@ -148,6 +154,8 @@ static const struct regulator_desc max77836_supported_regulators[] = {
        },
        [MAX14577_CHARGER] = {
                .name           = "CHARGER",
+               .of_match       = of_match_ptr("CHARGER"),
+               .regulators_node = of_match_ptr("regulators"),
                .id             = MAX14577_CHARGER,
                .ops            = &max14577_charger_ops,
                .type           = REGULATOR_CURRENT,
@@ -157,6 +165,8 @@ static const struct regulator_desc max77836_supported_regulators[] = {
        },
        [MAX77836_LDO1] = {
                .name           = "LDO1",
+               .of_match       = of_match_ptr("LDO1"),
+               .regulators_node = of_match_ptr("regulators"),
                .id             = MAX77836_LDO1,
                .ops            = &max77836_ldo_ops,
                .type           = REGULATOR_VOLTAGE,
@@ -171,6 +181,8 @@ static const struct regulator_desc max77836_supported_regulators[] = {
        },
        [MAX77836_LDO2] = {
                .name           = "LDO2",
+               .of_match       = of_match_ptr("LDO2"),
+               .regulators_node = of_match_ptr("regulators"),
                .id             = MAX77836_LDO2,
                .ops            = &max77836_ldo_ops,
                .type           = REGULATOR_VOLTAGE,
@@ -198,43 +210,6 @@ static struct of_regulator_match max77836_regulator_matches[] = {
        { .name = "LDO2", },
 };
 
-static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev,
-               enum maxim_device_type dev_type)
-{
-       int ret;
-       struct device_node *np;
-       struct of_regulator_match *regulator_matches;
-       unsigned int regulator_matches_size;
-
-       np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
-       if (!np) {
-               dev_err(&pdev->dev, "Failed to get child OF node for regulators\n");
-               return -EINVAL;
-       }
-
-       switch (dev_type) {
-       case MAXIM_DEVICE_TYPE_MAX77836:
-               regulator_matches = max77836_regulator_matches;
-               regulator_matches_size = ARRAY_SIZE(max77836_regulator_matches);
-               break;
-       case MAXIM_DEVICE_TYPE_MAX14577:
-       default:
-               regulator_matches = max14577_regulator_matches;
-               regulator_matches_size = ARRAY_SIZE(max14577_regulator_matches);
-       }
-
-       ret = of_regulator_match(&pdev->dev, np, regulator_matches,
-                       regulator_matches_size);
-       if (ret < 0)
-               dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
-       else
-               ret = 0;
-
-       of_node_put(np);
-
-       return ret;
-}
-
 static inline struct regulator_init_data *match_init_data(int index,
                enum maxim_device_type dev_type)
 {
@@ -261,11 +236,6 @@ static inline struct device_node *match_of_node(int index,
        }
 }
 #else /* CONFIG_OF */
-static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev,
-               enum maxim_device_type dev_type)
-{
-       return 0;
-}
 static inline struct regulator_init_data *match_init_data(int index,
                enum maxim_device_type dev_type)
 {
@@ -308,16 +278,12 @@ static int max14577_regulator_probe(struct platform_device *pdev)
 {
        struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent);
        struct max14577_platform_data *pdata = dev_get_platdata(max14577->dev);
-       int i, ret;
+       int i, ret = 0;
        struct regulator_config config = {};
        const struct regulator_desc *supported_regulators;
        unsigned int supported_regulators_size;
        enum maxim_device_type dev_type = max14577->dev_type;
 
-       ret = max14577_regulator_dt_parse_pdata(pdev, dev_type);
-       if (ret)
-               return ret;
-
        switch (dev_type) {
        case MAXIM_DEVICE_TYPE_MAX77836:
                supported_regulators = max77836_supported_regulators;
@@ -329,7 +295,7 @@ static int max14577_regulator_probe(struct platform_device *pdev)
                supported_regulators_size = ARRAY_SIZE(max14577_supported_regulators);
        }
 
-       config.dev = &pdev->dev;
+       config.dev = max14577->dev;
        config.driver_data = max14577;
 
        for (i = 0; i < supported_regulators_size; i++) {
index 10d206266ac27770ec25efa16e94f5eb658a891b..15fb1416bfbde99c9724644dd25bf106695fda97 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/bug.h>
 #include <linux/err.h>
 #include <linux/gpio.h>
+#include <linux/of_gpio.h>
 #include <linux/slab.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
 #define MAX77686_DVS_MINUV     600000
 #define MAX77686_DVS_UVSTEP    12500
 
+/*
+ * Value for configuring buck[89] and LDO{20,21,22} as GPIO control.
+ * It is the same as 'off' for other regulators.
+ */
+#define MAX77686_GPIO_CONTROL          0x0
 /*
  * Values used for configuring LDOs and bucks.
  * Forcing low power mode: LDO1, 3-5, 9, 13, 17-26
@@ -82,6 +88,8 @@ enum max77686_ramp_rate {
 };
 
 struct max77686_data {
+       u64 gpio_enabled:MAX77686_REGULATORS;
+
        /* Array indexed by regulator id */
        unsigned int opmode[MAX77686_REGULATORS];
 };
@@ -100,6 +108,26 @@ static unsigned int max77686_get_opmode_shift(int id)
        }
 }
 
+/*
+ * When regulator is configured for GPIO control then it
+ * replaces "normal" mode. Any change from low power mode to normal
+ * should actually change to GPIO control.
+ * Map normal mode to proper value for such regulators.
+ */
+static unsigned int max77686_map_normal_mode(struct max77686_data *max77686,
+                                            int id)
+{
+       switch (id) {
+       case MAX77686_BUCK8:
+       case MAX77686_BUCK9:
+       case MAX77686_LDO20 ... MAX77686_LDO22:
+               if (max77686->gpio_enabled & (1 << id))
+                       return MAX77686_GPIO_CONTROL;
+       }
+
+       return MAX77686_NORMAL;
+}
+
 /* Some BUCKs and LDOs supports Normal[ON/OFF] mode during suspend */
 static int max77686_set_suspend_disable(struct regulator_dev *rdev)
 {
@@ -136,7 +164,7 @@ static int max77686_set_suspend_mode(struct regulator_dev *rdev,
                val = MAX77686_LDO_LOWPOWER_PWRREQ;
                break;
        case REGULATOR_MODE_NORMAL:                     /* ON in Normal Mode */
-               val = MAX77686_NORMAL;
+               val = max77686_map_normal_mode(max77686, id);
                break;
        default:
                pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n",
@@ -160,7 +188,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
 {
        unsigned int val;
        struct max77686_data *max77686 = rdev_get_drvdata(rdev);
-       int ret;
+       int ret, id = rdev_get_id(rdev);
 
        switch (mode) {
        case REGULATOR_MODE_STANDBY:                    /* switch off */
@@ -170,7 +198,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
                val = MAX77686_LDO_LOWPOWER_PWRREQ;
                break;
        case REGULATOR_MODE_NORMAL:                     /* ON in Normal Mode */
-               val = MAX77686_NORMAL;
+               val = max77686_map_normal_mode(max77686, id);
                break;
        default:
                pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n",
@@ -184,7 +212,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
        if (ret)
                return ret;
 
-       max77686->opmode[rdev_get_id(rdev)] = val;
+       max77686->opmode[id] = val;
        return 0;
 }
 
@@ -197,7 +225,7 @@ static int max77686_enable(struct regulator_dev *rdev)
        shift = max77686_get_opmode_shift(id);
 
        if (max77686->opmode[id] == MAX77686_OFF_PWRREQ)
-               max77686->opmode[id] = MAX77686_NORMAL;
+               max77686->opmode[id] = max77686_map_normal_mode(max77686, id);
 
        return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
                                  rdev->desc->enable_mask,
@@ -229,6 +257,36 @@ static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
                                  MAX77686_RAMP_RATE_MASK, ramp_value << 6);
 }
 
+static int max77686_of_parse_cb(struct device_node *np,
+               const struct regulator_desc *desc,
+               struct regulator_config *config)
+{
+       struct max77686_data *max77686 = config->driver_data;
+
+       switch (desc->id) {
+       case MAX77686_BUCK8:
+       case MAX77686_BUCK9:
+       case MAX77686_LDO20 ... MAX77686_LDO22:
+               config->ena_gpio = of_get_named_gpio(np,
+                                       "maxim,ena-gpios", 0);
+               config->ena_gpio_flags = GPIOF_OUT_INIT_HIGH;
+               config->ena_gpio_initialized = true;
+               break;
+       default:
+               return 0;
+       }
+
+       if (gpio_is_valid(config->ena_gpio)) {
+               max77686->gpio_enabled |= (1 << desc->id);
+
+               return regmap_update_bits(config->regmap, desc->enable_reg,
+                                         desc->enable_mask,
+                                         MAX77686_GPIO_CONTROL);
+       }
+
+       return 0;
+}
+
 static struct regulator_ops max77686_ops = {
        .list_voltage           = regulator_list_voltage_linear,
        .map_voltage            = regulator_map_voltage_linear,
@@ -283,6 +341,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
        .name           = "LDO"#num,                                    \
        .of_match       = of_match_ptr("LDO"#num),                      \
        .regulators_node        = of_match_ptr("voltage-regulators"),   \
+       .of_parse_cb    = max77686_of_parse_cb,                         \
        .id             = MAX77686_LDO##num,                            \
        .ops            = &max77686_ops,                                \
        .type           = REGULATOR_VOLTAGE,                            \
@@ -355,6 +414,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
        .name           = "BUCK"#num,                                   \
        .of_match       = of_match_ptr("BUCK"#num),                     \
        .regulators_node        = of_match_ptr("voltage-regulators"),   \
+       .of_parse_cb    = max77686_of_parse_cb,                         \
        .id             = MAX77686_BUCK##num,                           \
        .ops            = &max77686_ops,                                \
        .type           = REGULATOR_VOLTAGE,                            \
diff --git a/drivers/regulator/max77843.c b/drivers/regulator/max77843.c
new file mode 100644 (file)
index 0000000..c132ef5
--- /dev/null
@@ -0,0 +1,227 @@
+/*
+ * max77843.c - Regulator driver for the Maxim MAX77843
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ * Author: Jaewon Kim <jaewon02.kim@samsung.com>
+ * Author: Beomho Seo <beomho.seo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/max77843-private.h>
+#include <linux/regulator/of_regulator.h>
+
+enum max77843_regulator_type {
+       MAX77843_SAFEOUT1 = 0,
+       MAX77843_SAFEOUT2,
+       MAX77843_CHARGER,
+
+       MAX77843_NUM,
+};
+
+static const unsigned int max77843_safeout_voltage_table[] = {
+       4850000,
+       4900000,
+       4950000,
+       3300000,
+};
+
+static int max77843_reg_is_enabled(struct regulator_dev *rdev)
+{
+       struct regmap *regmap = rdev->regmap;
+       int ret;
+       unsigned int reg;
+
+       ret = regmap_read(regmap, rdev->desc->enable_reg, &reg);
+       if (ret) {
+               dev_err(&rdev->dev, "Fialed to read charger register\n");
+               return ret;
+       }
+
+       return (reg & rdev->desc->enable_mask) == rdev->desc->enable_mask;
+}
+
+static int max77843_reg_get_current_limit(struct regulator_dev *rdev)
+{
+       struct regmap *regmap = rdev->regmap;
+       unsigned int chg_min_uA = rdev->constraints->min_uA;
+       unsigned int chg_max_uA = rdev->constraints->max_uA;
+       unsigned int val;
+       int ret;
+       unsigned int reg, sel;
+
+       ret = regmap_read(regmap, MAX77843_CHG_REG_CHG_CNFG_02, &reg);
+       if (ret) {
+               dev_err(&rdev->dev, "Failed to read charger register\n");
+               return ret;
+       }
+
+       sel = reg & MAX77843_CHG_FAST_CHG_CURRENT_MASK;
+
+       if (sel < 0x03)
+               sel = 0;
+       else
+               sel -= 2;
+
+       val = chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel;
+       if (val > chg_max_uA)
+               return -EINVAL;
+
+       return val;
+}
+
+static int max77843_reg_set_current_limit(struct regulator_dev *rdev,
+               int min_uA, int max_uA)
+{
+       struct regmap *regmap = rdev->regmap;
+       unsigned int chg_min_uA = rdev->constraints->min_uA;
+       int sel = 0;
+
+       while (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel < min_uA)
+               sel++;
+
+       if (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel > max_uA)
+               return -EINVAL;
+
+       sel += 2;
+
+       return regmap_write(regmap, MAX77843_CHG_REG_CHG_CNFG_02, sel);
+}
+
+static struct regulator_ops max77843_charger_ops = {
+       .is_enabled             = max77843_reg_is_enabled,
+       .enable                 = regulator_enable_regmap,
+       .disable                = regulator_disable_regmap,
+       .get_current_limit      = max77843_reg_get_current_limit,
+       .set_current_limit      = max77843_reg_set_current_limit,
+};
+
+static struct regulator_ops max77843_regulator_ops = {
+       .is_enabled             = regulator_is_enabled_regmap,
+       .enable                 = regulator_enable_regmap,
+       .disable                = regulator_disable_regmap,
+       .list_voltage           = regulator_list_voltage_table,
+       .get_voltage_sel        = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel        = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_desc max77843_supported_regulators[] = {
+       [MAX77843_SAFEOUT1] = {
+               .name           = "SAFEOUT1",
+               .id             = MAX77843_SAFEOUT1,
+               .ops            = &max77843_regulator_ops,
+               .of_match       = of_match_ptr("SAFEOUT1"),
+               .regulators_node = of_match_ptr("regulators"),
+               .type           = REGULATOR_VOLTAGE,
+               .owner          = THIS_MODULE,
+               .n_voltages     = ARRAY_SIZE(max77843_safeout_voltage_table),
+               .volt_table     = max77843_safeout_voltage_table,
+               .enable_reg     = MAX77843_SYS_REG_SAFEOUTCTRL,
+               .enable_mask    = MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT1,
+               .vsel_reg       = MAX77843_SYS_REG_SAFEOUTCTRL,
+               .vsel_mask      = MAX77843_REG_SAFEOUTCTRL_SAFEOUT1_MASK,
+       },
+       [MAX77843_SAFEOUT2] = {
+               .name           = "SAFEOUT2",
+               .id             = MAX77843_SAFEOUT2,
+               .ops            = &max77843_regulator_ops,
+               .of_match       = of_match_ptr("SAFEOUT2"),
+               .regulators_node = of_match_ptr("regulators"),
+               .type           = REGULATOR_VOLTAGE,
+               .owner          = THIS_MODULE,
+               .n_voltages     = ARRAY_SIZE(max77843_safeout_voltage_table),
+               .volt_table     = max77843_safeout_voltage_table,
+               .enable_reg     = MAX77843_SYS_REG_SAFEOUTCTRL,
+               .enable_mask    = MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT2,
+               .vsel_reg       = MAX77843_SYS_REG_SAFEOUTCTRL,
+               .vsel_mask      = MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK,
+       },
+       [MAX77843_CHARGER] = {
+               .name           = "CHARGER",
+               .id             = MAX77843_CHARGER,
+               .ops            = &max77843_charger_ops,
+               .of_match       = of_match_ptr("CHARGER"),
+               .regulators_node = of_match_ptr("regulators"),
+               .type           = REGULATOR_CURRENT,
+               .owner          = THIS_MODULE,
+               .enable_reg     = MAX77843_CHG_REG_CHG_CNFG_00,
+               .enable_mask    = MAX77843_CHG_MASK,
+       },
+};
+
+static struct regmap *max77843_get_regmap(struct max77843 *max77843, int reg_id)
+{
+       switch (reg_id) {
+       case MAX77843_SAFEOUT1:
+       case MAX77843_SAFEOUT2:
+               return max77843->regmap;
+       case MAX77843_CHARGER:
+               return max77843->regmap_chg;
+       default:
+               return max77843->regmap;
+       }
+}
+
+static int max77843_regulator_probe(struct platform_device *pdev)
+{
+       struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent);
+       struct regulator_config config = {};
+       int i;
+
+       config.dev = max77843->dev;
+       config.driver_data = max77843;
+
+       for (i = 0; i < ARRAY_SIZE(max77843_supported_regulators); i++) {
+               struct regulator_dev *regulator;
+
+               config.regmap = max77843_get_regmap(max77843,
+                               max77843_supported_regulators[i].id);
+
+               regulator = devm_regulator_register(&pdev->dev,
+                               &max77843_supported_regulators[i], &config);
+               if (IS_ERR(regulator)) {
+                       dev_err(&pdev->dev,
+                                       "Failed to regiser regulator-%d\n", i);
+                       return PTR_ERR(regulator);
+               }
+       }
+
+       return 0;
+}
+
+static const struct platform_device_id max77843_regulator_id[] = {
+       { "max77843-regulator", },
+       { /* sentinel */ },
+};
+
+static struct platform_driver max77843_regulator_driver = {
+       .driver = {
+               .name = "max77843-regulator",
+       },
+       .probe          = max77843_regulator_probe,
+       .id_table       = max77843_regulator_id,
+};
+
+static int __init max77843_regulator_init(void)
+{
+       return platform_driver_register(&max77843_regulator_driver);
+}
+subsys_initcall(max77843_regulator_init);
+
+static void __exit max77843_regulator_exit(void)
+{
+       platform_driver_unregister(&max77843_regulator_driver);
+}
+module_exit(max77843_regulator_exit);
+
+MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
+MODULE_AUTHOR("Beomho Seo <beomho.seo@samsung.com>");
+MODULE_DESCRIPTION("Maxim MAX77843 regulator driver");
+MODULE_LICENSE("GPL");
index c8bddcc8f911d11d90e32d18e0d191234bae1656..81229579ece9105846b4187d4e3ccacbfbf9a89c 100644 (file)
@@ -115,7 +115,7 @@ static unsigned int max8649_get_mode(struct regulator_dev *rdev)
        return REGULATOR_MODE_NORMAL;
 }
 
-static struct regulator_ops max8649_dcdc_ops = {
+static const struct regulator_ops max8649_dcdc_ops = {
        .set_voltage_sel = regulator_set_voltage_sel_regmap,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
        .list_voltage   = regulator_list_voltage_linear,
@@ -143,7 +143,7 @@ static struct regulator_desc dcdc_desc = {
        .enable_is_inverted = true,
 };
 
-static struct regmap_config max8649_regmap_config = {
+static const struct regmap_config max8649_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
 };
diff --git a/drivers/regulator/mt6397-regulator.c b/drivers/regulator/mt6397-regulator.c
new file mode 100644 (file)
index 0000000..a5b2f47
--- /dev/null
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu <flora.fu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/mfd/mt6397/registers.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/mt6397-regulator.h>
+#include <linux/regulator/of_regulator.h>
+
+/*
+ * MT6397 regulators' information
+ *
+ * @desc: standard fields of regulator description.
+ * @qi: Mask for query enable signal status of regulators
+ * @vselon_reg: Register sections for hardware control mode of bucks
+ * @vselctrl_reg: Register for controlling the buck control mode.
+ * @vselctrl_mask: Mask for query buck's voltage control mode.
+ */
+struct mt6397_regulator_info {
+       struct regulator_desc desc;
+       u32 qi;
+       u32 vselon_reg;
+       u32 vselctrl_reg;
+       u32 vselctrl_mask;
+};
+
+#define MT6397_BUCK(match, vreg, min, max, step, volt_ranges, enreg,   \
+               vosel, vosel_mask, voselon, vosel_ctrl)                 \
+[MT6397_ID_##vreg] = {                                                 \
+       .desc = {                                                       \
+               .name = #vreg,                                          \
+               .of_match = of_match_ptr(match),                        \
+               .ops = &mt6397_volt_range_ops,                          \
+               .type = REGULATOR_VOLTAGE,                              \
+               .id = MT6397_ID_##vreg,                                 \
+               .owner = THIS_MODULE,                                   \
+               .n_voltages = (max - min)/step + 1,                     \
+               .linear_ranges = volt_ranges,                           \
+               .n_linear_ranges = ARRAY_SIZE(volt_ranges),             \
+               .vsel_reg = vosel,                                      \
+               .vsel_mask = vosel_mask,                                \
+               .enable_reg = enreg,                                    \
+               .enable_mask = BIT(0),                                  \
+       },                                                              \
+       .qi = BIT(13),                                                  \
+       .vselon_reg = voselon,                                          \
+       .vselctrl_reg = vosel_ctrl,                                     \
+       .vselctrl_mask = BIT(1),                                        \
+}
+
+#define MT6397_LDO(match, vreg, ldo_volt_table, enreg, enbit, vosel,   \
+               vosel_mask)                                             \
+[MT6397_ID_##vreg] = {                                                 \
+       .desc = {                                                       \
+               .name = #vreg,                                          \
+               .of_match = of_match_ptr(match),                        \
+               .ops = &mt6397_volt_table_ops,                          \
+               .type = REGULATOR_VOLTAGE,                              \
+               .id = MT6397_ID_##vreg,                                 \
+               .owner = THIS_MODULE,                                   \
+               .n_voltages = ARRAY_SIZE(ldo_volt_table),               \
+               .volt_table = ldo_volt_table,                           \
+               .vsel_reg = vosel,                                      \
+               .vsel_mask = vosel_mask,                                \
+               .enable_reg = enreg,                                    \
+               .enable_mask = BIT(enbit),                              \
+       },                                                              \
+       .qi = BIT(15),                                                  \
+}
+
+#define MT6397_REG_FIXED(match, vreg, enreg, enbit, volt)              \
+[MT6397_ID_##vreg] = {                                                 \
+       .desc = {                                                       \
+               .name = #vreg,                                          \
+               .of_match = of_match_ptr(match),                        \
+               .ops = &mt6397_volt_fixed_ops,                          \
+               .type = REGULATOR_VOLTAGE,                              \
+               .id = MT6397_ID_##vreg,                                 \
+               .owner = THIS_MODULE,                                   \
+               .n_voltages = 1,                                        \
+               .enable_reg = enreg,                                    \
+               .enable_mask = BIT(enbit),                              \
+               .min_uV = volt,                                         \
+       },                                                              \
+       .qi = BIT(15),                                                  \
+}
+
+static const struct regulator_linear_range buck_volt_range1[] = {
+       REGULATOR_LINEAR_RANGE(700000, 0, 0x7f, 6250),
+};
+
+static const struct regulator_linear_range buck_volt_range2[] = {
+       REGULATOR_LINEAR_RANGE(800000, 0, 0x7f, 6250),
+};
+
+static const struct regulator_linear_range buck_volt_range3[] = {
+       REGULATOR_LINEAR_RANGE(1500000, 0, 0x1f, 20000),
+};
+
+static const u32 ldo_volt_table1[] = {
+       1500000, 1800000, 2500000, 2800000,
+};
+
+static const u32 ldo_volt_table2[] = {
+       1800000, 3300000,
+};
+
+static const u32 ldo_volt_table3[] = {
+       3000000, 3300000,
+};
+
+static const u32 ldo_volt_table4[] = {
+       1220000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
+};
+
+static const u32 ldo_volt_table5[] = {
+       1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
+};
+
+static const u32 ldo_volt_table5_v2[] = {
+       1200000, 1000000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
+};
+
+static const u32 ldo_volt_table6[] = {
+       1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 2000000,
+};
+
+static const u32 ldo_volt_table7[] = {
+       1300000, 1500000, 1800000, 2000000, 2500000, 2800000, 3000000, 3300000,
+};
+
+static int mt6397_get_status(struct regulator_dev *rdev)
+{
+       int ret;
+       u32 regval;
+       struct mt6397_regulator_info *info = rdev_get_drvdata(rdev);
+
+       ret = regmap_read(rdev->regmap, info->desc.enable_reg, &regval);
+       if (ret != 0) {
+               dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret);
+               return ret;
+       }
+
+       return (regval & info->qi) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF;
+}
+
+static struct regulator_ops mt6397_volt_range_ops = {
+       .list_voltage = regulator_list_voltage_linear_range,
+       .map_voltage = regulator_map_voltage_linear_range,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .set_voltage_time_sel = regulator_set_voltage_time_sel,
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .get_status = mt6397_get_status,
+};
+
+static struct regulator_ops mt6397_volt_table_ops = {
+       .list_voltage = regulator_list_voltage_table,
+       .map_voltage = regulator_map_voltage_iterate,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .set_voltage_time_sel = regulator_set_voltage_time_sel,
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .get_status = mt6397_get_status,
+};
+
+static struct regulator_ops mt6397_volt_fixed_ops = {
+       .list_voltage = regulator_list_voltage_linear,
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .get_status = mt6397_get_status,
+};
+
+/* The array is indexed by id(MT6397_ID_XXX) */
+static struct mt6397_regulator_info mt6397_regulators[] = {
+       MT6397_BUCK("buck_vpca15", VPCA15, 700000, 1493750, 6250,
+               buck_volt_range1, MT6397_VCA15_CON7, MT6397_VCA15_CON9, 0x7f,
+               MT6397_VCA15_CON10, MT6397_VCA15_CON5),
+       MT6397_BUCK("buck_vpca7", VPCA7, 700000, 1493750, 6250,
+               buck_volt_range1, MT6397_VPCA7_CON7, MT6397_VPCA7_CON9, 0x7f,
+               MT6397_VPCA7_CON10, MT6397_VPCA7_CON5),
+       MT6397_BUCK("buck_vsramca15", VSRAMCA15, 700000, 1493750, 6250,
+               buck_volt_range1, MT6397_VSRMCA15_CON7, MT6397_VSRMCA15_CON9,
+               0x7f, MT6397_VSRMCA15_CON10, MT6397_VSRMCA15_CON5),
+       MT6397_BUCK("buck_vsramca7", VSRAMCA7, 700000, 1493750, 6250,
+               buck_volt_range1, MT6397_VSRMCA7_CON7, MT6397_VSRMCA7_CON9,
+               0x7f, MT6397_VSRMCA7_CON10, MT6397_VSRMCA7_CON5),
+       MT6397_BUCK("buck_vcore", VCORE, 700000, 1493750, 6250,
+               buck_volt_range1, MT6397_VCORE_CON7, MT6397_VCORE_CON9, 0x7f,
+               MT6397_VCORE_CON10, MT6397_VCORE_CON5),
+       MT6397_BUCK("buck_vgpu", VGPU, 700000, 1493750, 6250, buck_volt_range1,
+               MT6397_VGPU_CON7, MT6397_VGPU_CON9, 0x7f,
+               MT6397_VGPU_CON10, MT6397_VGPU_CON5),
+       MT6397_BUCK("buck_vdrm", VDRM, 800000, 1593750, 6250, buck_volt_range2,
+               MT6397_VDRM_CON7, MT6397_VDRM_CON9, 0x7f,
+               MT6397_VDRM_CON10, MT6397_VDRM_CON5),
+       MT6397_BUCK("buck_vio18", VIO18, 1500000, 2120000, 20000,
+               buck_volt_range3, MT6397_VIO18_CON7, MT6397_VIO18_CON9, 0x1f,
+               MT6397_VIO18_CON10, MT6397_VIO18_CON5),
+       MT6397_REG_FIXED("ldo_vtcxo", VTCXO, MT6397_ANALDO_CON0, 10, 2800000),
+       MT6397_REG_FIXED("ldo_va28", VA28, MT6397_ANALDO_CON1, 14, 2800000),
+       MT6397_LDO("ldo_vcama", VCAMA, ldo_volt_table1,
+               MT6397_ANALDO_CON2, 15, MT6397_ANALDO_CON6, 0xC0),
+       MT6397_REG_FIXED("ldo_vio28", VIO28, MT6397_DIGLDO_CON0, 14, 2800000),
+       MT6397_REG_FIXED("ldo_vusb", VUSB, MT6397_DIGLDO_CON1, 14, 3300000),
+       MT6397_LDO("ldo_vmc", VMC, ldo_volt_table2,
+               MT6397_DIGLDO_CON2, 12, MT6397_DIGLDO_CON29, 0x10),
+       MT6397_LDO("ldo_vmch", VMCH, ldo_volt_table3,
+               MT6397_DIGLDO_CON3, 14, MT6397_DIGLDO_CON17, 0x80),
+       MT6397_LDO("ldo_vemc3v3", VEMC3V3, ldo_volt_table3,
+               MT6397_DIGLDO_CON4, 14, MT6397_DIGLDO_CON18, 0x10),
+       MT6397_LDO("ldo_vgp1", VGP1, ldo_volt_table4,
+               MT6397_DIGLDO_CON5, 15, MT6397_DIGLDO_CON19, 0xE0),
+       MT6397_LDO("ldo_vgp2", VGP2, ldo_volt_table5,
+               MT6397_DIGLDO_CON6, 15, MT6397_DIGLDO_CON20, 0xE0),
+       MT6397_LDO("ldo_vgp3", VGP3, ldo_volt_table5,
+               MT6397_DIGLDO_CON7, 15, MT6397_DIGLDO_CON21, 0xE0),
+       MT6397_LDO("ldo_vgp4", VGP4, ldo_volt_table5,
+               MT6397_DIGLDO_CON8, 15, MT6397_DIGLDO_CON22, 0xE0),
+       MT6397_LDO("ldo_vgp5", VGP5, ldo_volt_table6,
+               MT6397_DIGLDO_CON9, 15, MT6397_DIGLDO_CON23, 0xE0),
+       MT6397_LDO("ldo_vgp6", VGP6, ldo_volt_table5,
+               MT6397_DIGLDO_CON10, 15, MT6397_DIGLDO_CON33, 0xE0),
+       MT6397_LDO("ldo_vibr", VIBR, ldo_volt_table7,
+               MT6397_DIGLDO_CON24, 15, MT6397_DIGLDO_CON25, 0xE00),
+};
+
+static int mt6397_set_buck_vosel_reg(struct platform_device *pdev)
+{
+       struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent);
+       int i;
+       u32 regval;
+
+       for (i = 0; i < MT6397_MAX_REGULATOR; i++) {
+               if (mt6397_regulators[i].vselctrl_reg) {
+                       if (regmap_read(mt6397->regmap,
+                               mt6397_regulators[i].vselctrl_reg,
+                               &regval) < 0) {
+                               dev_err(&pdev->dev,
+                                       "Failed to read buck ctrl\n");
+                               return -EIO;
+                       }
+
+                       if (regval & mt6397_regulators[i].vselctrl_mask) {
+                               mt6397_regulators[i].desc.vsel_reg =
+                               mt6397_regulators[i].vselon_reg;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int mt6397_regulator_probe(struct platform_device *pdev)
+{
+       struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent);
+       struct regulator_config config = {};
+       struct regulator_dev *rdev;
+       int i;
+       u32 reg_value, version;
+
+       /* Query buck controller to select activated voltage register part */
+       if (mt6397_set_buck_vosel_reg(pdev))
+               return -EIO;
+
+       /* Read PMIC chip revision to update constraints and voltage table */
+       if (regmap_read(mt6397->regmap, MT6397_CID, &reg_value) < 0) {
+               dev_err(&pdev->dev, "Failed to read Chip ID\n");
+               return -EIO;
+       }
+       dev_info(&pdev->dev, "Chip ID = 0x%x\n", reg_value);
+
+       version = (reg_value & 0xFF);
+       switch (version) {
+       case MT6397_REGULATOR_ID91:
+               mt6397_regulators[MT6397_ID_VGP2].desc.volt_table =
+               ldo_volt_table5_v2;
+               break;
+       default:
+               break;
+       }
+
+       for (i = 0; i < MT6397_MAX_REGULATOR; i++) {
+               config.dev = &pdev->dev;
+               config.driver_data = &mt6397_regulators[i];
+               config.regmap = mt6397->regmap;
+               rdev = devm_regulator_register(&pdev->dev,
+                               &mt6397_regulators[i].desc, &config);
+               if (IS_ERR(rdev)) {
+                       dev_err(&pdev->dev, "failed to register %s\n",
+                               mt6397_regulators[i].desc.name);
+                       return PTR_ERR(rdev);
+               }
+       }
+
+       return 0;
+}
+
+static struct platform_driver mt6397_regulator_driver = {
+       .driver = {
+               .name = "mt6397-regulator",
+       },
+       .probe = mt6397_regulator_probe,
+};
+
+module_platform_driver(mt6397_regulator_driver);
+
+MODULE_AUTHOR("Flora Fu <flora.fu@mediatek.com>");
+MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6397 PMIC");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mt6397-regulator");
index 91eaaf01052494e6579a87890ac2875cce740018..24e812c48d93076a36039e991c51bb371fb26d6e 100644 (file)
@@ -270,6 +270,7 @@ EXPORT_SYMBOL_GPL(of_regulator_match);
 
 struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
                                            const struct regulator_desc *desc,
+                                           struct regulator_config *config,
                                            struct device_node **node)
 {
        struct device_node *search, *child;
@@ -307,6 +308,16 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
                        break;
                }
 
+               if (desc->of_parse_cb) {
+                       if (desc->of_parse_cb(child, desc, config)) {
+                               dev_err(dev,
+                                       "driver callback failed to parse DT for regulator %s\n",
+                                       child->name);
+                               init_data = NULL;
+                               break;
+                       }
+               }
+
                of_node_get(child);
                *node = child;
                break;
index c879dff597eeaba773468b66a71526a9ca1fb5e8..8cc8d1877c446a48737b5d337f9faf91dc6c37e1 100644 (file)
@@ -56,7 +56,7 @@
 #define PFUZE100_VGEN5VOL      0x70
 #define PFUZE100_VGEN6VOL      0x71
 
-enum chips { PFUZE100, PFUZE200 };
+enum chips { PFUZE100, PFUZE200, PFUZE3000 = 3 };
 
 struct pfuze_regulator {
        struct regulator_desc desc;
@@ -80,9 +80,18 @@ static const int pfuze100_vsnvs[] = {
        1000000, 1100000, 1200000, 1300000, 1500000, 1800000, 3000000,
 };
 
+static const int pfuze3000_sw2lo[] = {
+       1500000, 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000,
+};
+
+static const int pfuze3000_sw2hi[] = {
+       2500000, 2800000, 2850000, 3000000, 3100000, 3150000, 3200000, 3300000,
+};
+
 static const struct i2c_device_id pfuze_device_id[] = {
        {.name = "pfuze100", .driver_data = PFUZE100},
        {.name = "pfuze200", .driver_data = PFUZE200},
+       {.name = "pfuze3000", .driver_data = PFUZE3000},
        { }
 };
 MODULE_DEVICE_TABLE(i2c, pfuze_device_id);
@@ -90,6 +99,7 @@ MODULE_DEVICE_TABLE(i2c, pfuze_device_id);
 static const struct of_device_id pfuze_dt_ids[] = {
        { .compatible = "fsl,pfuze100", .data = (void *)PFUZE100},
        { .compatible = "fsl,pfuze200", .data = (void *)PFUZE200},
+       { .compatible = "fsl,pfuze3000", .data = (void *)PFUZE3000},
        { }
 };
 MODULE_DEVICE_TABLE(of, pfuze_dt_ids);
@@ -219,6 +229,60 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
                .stby_mask = 0x20,      \
        }
 
+#define PFUZE3000_VCC_REG(_chip, _name, base, min, max, step)  {       \
+       .desc = {       \
+               .name = #_name, \
+               .n_voltages = ((max) - (min)) / (step) + 1,     \
+               .ops = &pfuze100_ldo_regulator_ops,     \
+               .type = REGULATOR_VOLTAGE,      \
+               .id = _chip ## _ ## _name,      \
+               .owner = THIS_MODULE,   \
+               .min_uV = (min),        \
+               .uV_step = (step),      \
+               .vsel_reg = (base),     \
+               .vsel_mask = 0x3,       \
+               .enable_reg = (base),   \
+               .enable_mask = 0x10,    \
+       },      \
+       .stby_reg = (base),     \
+       .stby_mask = 0x20,      \
+}
+
+
+#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step)  {       \
+       .desc = {       \
+               .name = #_name,\
+               .n_voltages = ((max) - (min)) / (step) + 1,     \
+               .ops = &pfuze100_sw_regulator_ops,      \
+               .type = REGULATOR_VOLTAGE,      \
+               .id = _chip ## _ ## _name,      \
+               .owner = THIS_MODULE,   \
+               .min_uV = (min),        \
+               .uV_step = (step),      \
+               .vsel_reg = (base) + PFUZE100_VOL_OFFSET,       \
+               .vsel_mask = 0x7,       \
+       },      \
+       .stby_reg = (base) + PFUZE100_STANDBY_OFFSET,   \
+       .stby_mask = 0x7,       \
+}
+
+#define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step)  {       \
+       .desc = {       \
+               .name = #_name,\
+               .n_voltages = ((max) - (min)) / (step) + 1,     \
+               .ops = &pfuze100_sw_regulator_ops,      \
+               .type = REGULATOR_VOLTAGE,      \
+               .id = _chip ## _ ## _name,      \
+               .owner = THIS_MODULE,   \
+               .min_uV = (min),        \
+               .uV_step = (step),      \
+               .vsel_reg = (base) + PFUZE100_VOL_OFFSET,       \
+               .vsel_mask = 0xf,       \
+       },      \
+       .stby_reg = (base) + PFUZE100_STANDBY_OFFSET,   \
+       .stby_mask = 0xf,       \
+}
+
 /* PFUZE100 */
 static struct pfuze_regulator pfuze100_regulators[] = {
        PFUZE100_SW_REG(PFUZE100, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
@@ -254,6 +318,22 @@ static struct pfuze_regulator pfuze200_regulators[] = {
        PFUZE100_VGEN_REG(PFUZE200, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
 };
 
+static struct pfuze_regulator pfuze3000_regulators[] = {
+       PFUZE100_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 700000, 1475000, 25000),
+       PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000),
+       PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+       PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
+       PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst),
+       PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
+       PFUZE100_FIXED_REG(PFUZE3000, VREFDDR, PFUZE100_VREFDDRCON, 750000),
+       PFUZE100_VGEN_REG(PFUZE3000, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000),
+       PFUZE100_VGEN_REG(PFUZE3000, VLDO2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
+       PFUZE3000_VCC_REG(PFUZE3000, VCCSD, PFUZE100_VGEN3VOL, 2850000, 3300000, 150000),
+       PFUZE3000_VCC_REG(PFUZE3000, V33, PFUZE100_VGEN4VOL, 2850000, 3300000, 150000),
+       PFUZE100_VGEN_REG(PFUZE3000, VLDO3, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
+       PFUZE100_VGEN_REG(PFUZE3000, VLDO4, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
+};
+
 static struct pfuze_regulator *pfuze_regulators;
 
 #ifdef CONFIG_OF
@@ -294,6 +374,24 @@ static struct of_regulator_match pfuze200_matches[] = {
        { .name = "vgen6",      },
 };
 
+/* PFUZE3000 */
+static struct of_regulator_match pfuze3000_matches[] = {
+
+       { .name = "sw1a",       },
+       { .name = "sw1b",       },
+       { .name = "sw2",        },
+       { .name = "sw3",        },
+       { .name = "swbst",      },
+       { .name = "vsnvs",      },
+       { .name = "vrefddr",    },
+       { .name = "vldo1",      },
+       { .name = "vldo2",      },
+       { .name = "vccsd",      },
+       { .name = "v33",        },
+       { .name = "vldo3",      },
+       { .name = "vldo4",      },
+};
+
 static struct of_regulator_match *pfuze_matches;
 
 static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
@@ -313,6 +411,11 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
        }
 
        switch (chip->chip_id) {
+       case PFUZE3000:
+               pfuze_matches = pfuze3000_matches;
+               ret = of_regulator_match(dev, parent, pfuze3000_matches,
+                                        ARRAY_SIZE(pfuze3000_matches));
+               break;
        case PFUZE200:
                pfuze_matches = pfuze200_matches;
                ret = of_regulator_match(dev, parent, pfuze200_matches,
@@ -378,7 +481,8 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip)
                 * as ID=8 in PFUZE100
                 */
                dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8");
-       } else if ((value & 0x0f) != pfuze_chip->chip_id) {
+       } else if ((value & 0x0f) != pfuze_chip->chip_id &&
+                  (value & 0xf0) >> 4 != pfuze_chip->chip_id) {
                /* device id NOT match with your setting */
                dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
                return -ENODEV;
@@ -417,7 +521,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
        int i, ret;
        const struct of_device_id *match;
        u32 regulator_num;
-       u32 sw_check_start, sw_check_end;
+       u32 sw_check_start, sw_check_end, sw_hi = 0x40;
 
        pfuze_chip = devm_kzalloc(&client->dev, sizeof(*pfuze_chip),
                        GFP_KERNEL);
@@ -458,13 +562,19 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
 
        /* use the right regulators after identify the right device */
        switch (pfuze_chip->chip_id) {
+       case PFUZE3000:
+               pfuze_regulators = pfuze3000_regulators;
+               regulator_num = ARRAY_SIZE(pfuze3000_regulators);
+               sw_check_start = PFUZE3000_SW2;
+               sw_check_end = PFUZE3000_SW2;
+               sw_hi = 1 << 3;
+               break;
        case PFUZE200:
                pfuze_regulators = pfuze200_regulators;
                regulator_num = ARRAY_SIZE(pfuze200_regulators);
                sw_check_start = PFUZE200_SW2;
                sw_check_end = PFUZE200_SW3B;
                break;
-
        case PFUZE100:
        default:
                pfuze_regulators = pfuze100_regulators;
@@ -474,7 +584,8 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
                break;
        }
        dev_info(&client->dev, "pfuze%s found.\n",
-               (pfuze_chip->chip_id == PFUZE100) ? "100" : "200");
+               (pfuze_chip->chip_id == PFUZE100) ? "100" :
+               ((pfuze_chip->chip_id == PFUZE200) ? "200" : "3000"));
 
        memcpy(pfuze_chip->regulator_descs, pfuze_regulators,
                sizeof(pfuze_chip->regulator_descs));
@@ -498,10 +609,15 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
                /* SW2~SW4 high bit check and modify the voltage value table */
                if (i >= sw_check_start && i <= sw_check_end) {
                        regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
-                       if (val & 0x40) {
-                               desc->min_uV = 800000;
-                               desc->uV_step = 50000;
-                               desc->n_voltages = 51;
+                       if (val & sw_hi) {
+                               if (pfuze_chip->chip_id == PFUZE3000) {
+                                       desc->volt_table = pfuze3000_sw2hi;
+                                       desc->n_voltages = ARRAY_SIZE(pfuze3000_sw2hi);
+                               } else {
+                                       desc->min_uV = 800000;
+                                       desc->uV_step = 50000;
+                                       desc->n_voltages = 51;
+                               }
                        }
                }
 
index 8364ff331a81838c6e84fe822b17d434bfb74aa7..e8647f7cf25e27378af00ec7799195c2a29e995a 100644 (file)
@@ -227,9 +227,11 @@ static int rpm_reg_set_mV_sel(struct regulator_dev *rdev,
                return uV;
 
        mutex_lock(&vreg->lock);
-       vreg->uV = uV;
        if (vreg->is_enabled)
-               ret = rpm_reg_write(vreg, req, vreg->uV / 1000);
+               ret = rpm_reg_write(vreg, req, uV / 1000);
+
+       if (!ret)
+               vreg->uV = uV;
        mutex_unlock(&vreg->lock);
 
        return ret;
@@ -252,9 +254,11 @@ static int rpm_reg_set_uV_sel(struct regulator_dev *rdev,
                return uV;
 
        mutex_lock(&vreg->lock);
-       vreg->uV = uV;
        if (vreg->is_enabled)
-               ret = rpm_reg_write(vreg, req, vreg->uV);
+               ret = rpm_reg_write(vreg, req, uV);
+
+       if (!ret)
+               vreg->uV = uV;
        mutex_unlock(&vreg->lock);
 
        return ret;
@@ -674,6 +678,7 @@ static int rpm_reg_probe(struct platform_device *pdev)
        vreg->desc.owner = THIS_MODULE;
        vreg->desc.type = REGULATOR_VOLTAGE;
        vreg->desc.name = pdev->dev.of_node->name;
+       vreg->desc.supply_name = "vin";
 
        vreg->rpm = dev_get_drvdata(pdev->dev.parent);
        if (!vreg->rpm) {
@@ -768,7 +773,7 @@ static int rpm_reg_probe(struct platform_device *pdev)
                        break;
                }
 
-               if (force_mode < 0) {
+               if (force_mode == -1) {
                        dev_err(&pdev->dev, "invalid force mode\n");
                        return -EINVAL;
                }
index c94a3e0f3b91b4af039740a32a883c55d836a51f..1f93b752a81cdc36a824459ebf66354aea275af8 100644 (file)
@@ -97,7 +97,7 @@ static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
                                  RK808_RAMP_RATE_MASK, ramp_value);
 }
 
-int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
+static int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
 {
        unsigned int reg;
        int sel = regulator_map_voltage_linear_range(rdev, uv, uv);
@@ -112,7 +112,7 @@ int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
                                  sel);
 }
 
-int rk808_set_suspend_enable(struct regulator_dev *rdev)
+static int rk808_set_suspend_enable(struct regulator_dev *rdev)
 {
        unsigned int reg;
 
@@ -123,7 +123,7 @@ int rk808_set_suspend_enable(struct regulator_dev *rdev)
                                  0);
 }
 
-int rk808_set_suspend_disable(struct regulator_dev *rdev)
+static int rk808_set_suspend_disable(struct regulator_dev *rdev)
 {
        unsigned int reg;
 
index 870cc49438dbe55d237dde1602010bb20105e104..96d2c18e051a071de50a490181e88151a310bed8 100644 (file)
@@ -36,6 +36,8 @@ static struct regulator_ops rt5033_buck_ops = {
 static const struct regulator_desc rt5033_supported_regulators[] = {
        [RT5033_BUCK] = {
                .name           = "BUCK",
+               .of_match       = of_match_ptr("BUCK"),
+               .regulators_node = of_match_ptr("regulators"),
                .id             = RT5033_BUCK,
                .ops            = &rt5033_buck_ops,
                .type           = REGULATOR_VOLTAGE,
@@ -50,6 +52,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = {
        },
        [RT5033_LDO] = {
                .name           = "LDO",
+               .of_match       = of_match_ptr("LDO"),
+               .regulators_node = of_match_ptr("regulators"),
                .id             = RT5033_LDO,
                .ops            = &rt5033_buck_ops,
                .type           = REGULATOR_VOLTAGE,
@@ -64,6 +68,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = {
        },
        [RT5033_SAFE_LDO] = {
                .name           = "SAFE_LDO",
+               .of_match       = of_match_ptr("SAFE_LDO"),
+               .regulators_node = of_match_ptr("regulators"),
                .id             = RT5033_SAFE_LDO,
                .ops            = &rt5033_safe_ldo_ops,
                .type           = REGULATOR_VOLTAGE,
@@ -81,7 +87,7 @@ static int rt5033_regulator_probe(struct platform_device *pdev)
        int ret, i;
        struct regulator_config config = {};
 
-       config.dev = &pdev->dev;
+       config.dev = rt5033->dev;
        config.driver_data = rt5033;
 
        for (i = 0; i < ARRAY_SIZE(rt5033_supported_regulators); i++) {
index 7380af8bd50d4e8d699f7564c6ab4bb3e4edc84b..b941e564b3f3405a647b0d13794eed5d3dffa3fc 100644 (file)
@@ -173,7 +173,7 @@ static int tps65023_dcdc_set_voltage_sel(struct regulator_dev *dev,
 }
 
 /* Operations permitted on VDCDCx */
-static struct regulator_ops tps65023_dcdc_ops = {
+static const struct regulator_ops tps65023_dcdc_ops = {
        .is_enabled = regulator_is_enabled_regmap,
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
@@ -184,7 +184,7 @@ static struct regulator_ops tps65023_dcdc_ops = {
 };
 
 /* Operations permitted on LDOx */
-static struct regulator_ops tps65023_ldo_ops = {
+static const struct regulator_ops tps65023_ldo_ops = {
        .is_enabled = regulator_is_enabled_regmap,
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
@@ -194,7 +194,7 @@ static struct regulator_ops tps65023_ldo_ops = {
        .map_voltage = regulator_map_voltage_ascend,
 };
 
-static struct regmap_config tps65023_regmap_config = {
+static const struct regmap_config tps65023_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
 };
index 1dba62c5cf6a0b364df79805f5e8ee857ba36b46..1efebc9eedfb384312de87e0bd4548599524b0dc 100644 (file)
@@ -136,11 +136,12 @@ static void __detach_handler (struct kref *kref)
        struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh;
        struct scsi_device *sdev = scsi_dh_data->sdev;
 
+       scsi_dh->detach(sdev);
+
        spin_lock_irq(sdev->request_queue->queue_lock);
        sdev->scsi_dh_data = NULL;
        spin_unlock_irq(sdev->request_queue->queue_lock);
 
-       scsi_dh->detach(sdev);
        sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name);
        module_put(scsi_dh->module);
 }
index 399516925d802fea379938b9958778daa7ee37f2..05ea0d49a3a3ddf2f039670db88c4d9f126662f4 100644 (file)
@@ -2800,9 +2800,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
         */
        sd_set_flush_flag(sdkp);
 
-       max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
-                               sdkp->max_xfer_blocks);
+       max_xfer = sdkp->max_xfer_blocks;
        max_xfer <<= ilog2(sdp->sector_size) - 9;
+
+       max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
+                               max_xfer);
        blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
        set_capacity(disk, sdkp->capacity);
        sd_config_write_same(sdkp);
index 99829985c1a194ebe6edc0e73912077bade693d3..95ccedabba4f9dca37dbd4909e3bc578ec619cd3 100644 (file)
@@ -185,6 +185,16 @@ config SPI_DAVINCI
        help
          SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
 
+config SPI_DLN2
+       tristate "Diolan DLN-2 USB SPI adapter"
+       depends on MFD_DLN2
+       help
+         If you say yes to this option, support will be included for Diolan
+         DLN2, a USB to SPI interface.
+
+         This driver can also be built as a module.  If so, the module
+         will be called spi-dln2.
+
 config SPI_EFM32
        tristate "EFM32 SPI controller"
        depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
@@ -279,7 +289,7 @@ config SPI_FSL_CPM
        depends on FSL_SOC
 
 config SPI_FSL_SPI
-       bool "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller"
+       tristate "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller"
        depends on OF
        select SPI_FSL_LIB
        select SPI_FSL_CPM if FSL_SOC
@@ -292,7 +302,6 @@ config SPI_FSL_SPI
 
 config SPI_FSL_DSPI
        tristate "Freescale DSPI controller"
-       select SPI_BITBANG
        select REGMAP_MMIO
        depends on SOC_VF610 || COMPILE_TEST
        help
@@ -300,7 +309,7 @@ config SPI_FSL_DSPI
          mode. VF610 platform uses the controller.
 
 config SPI_FSL_ESPI
-       bool "Freescale eSPI controller"
+       tristate "Freescale eSPI controller"
        depends on FSL_SOC
        select SPI_FSL_LIB
        help
@@ -460,7 +469,6 @@ config SPI_S3C24XX_FIQ
 config SPI_S3C64XX
        tristate "Samsung S3C64XX series type SPI"
        depends on (PLAT_SAMSUNG || ARCH_EXYNOS)
-       select S3C64XX_PL080 if ARCH_S3C64XX
        help
          SPI driver for Samsung S3C64XX and newer SoCs.
 
@@ -503,6 +511,13 @@ config SPI_SIRF
        help
          SPI driver for CSR SiRFprimaII SoCs
 
+config SPI_ST_SSC4
+       tristate "STMicroelectronics SPI SSC-based driver"
+       depends on ARCH_STI
+       help
+         STMicroelectronics SoCs support for SPI. If you say yes to
+         this option, support will be included for the SSC driven SPI.
+
 config SPI_SUN4I
        tristate "Allwinner A10 SoCs SPI controller"
        depends on ARCH_SUNXI || COMPILE_TEST
@@ -595,7 +610,6 @@ config SPI_XTENSA_XTFPGA
          16 bit words in SPI mode 0, automatically asserting CS on transfer
          start and deasserting on end.
 
-
 config SPI_NUC900
        tristate "Nuvoton NUC900 series SPI"
        depends on ARCH_W90X900
index 6b9d2ac629cce6ca7e886df22c2fb5b02874373c..d8cbf654976b5296aaa7561eeef6ba480631eb4f 100644 (file)
@@ -27,6 +27,7 @@ obj-$(CONFIG_SPI_CADENCE)             += spi-cadence.o
 obj-$(CONFIG_SPI_CLPS711X)             += spi-clps711x.o
 obj-$(CONFIG_SPI_COLDFIRE_QSPI)                += spi-coldfire-qspi.o
 obj-$(CONFIG_SPI_DAVINCI)              += spi-davinci.o
+obj-$(CONFIG_SPI_DLN2)                 += spi-dln2.o
 obj-$(CONFIG_SPI_DESIGNWARE)           += spi-dw.o
 obj-$(CONFIG_SPI_DW_MMIO)              += spi-dw-mmio.o
 obj-$(CONFIG_SPI_DW_PCI)               += spi-dw-midpci.o
@@ -76,6 +77,7 @@ obj-$(CONFIG_SPI_SH_HSPI)             += spi-sh-hspi.o
 obj-$(CONFIG_SPI_SH_MSIOF)             += spi-sh-msiof.o
 obj-$(CONFIG_SPI_SH_SCI)               += spi-sh-sci.o
 obj-$(CONFIG_SPI_SIRF)         += spi-sirf.o
+obj-$(CONFIG_SPI_ST_SSC4)              += spi-st-ssc4.o
 obj-$(CONFIG_SPI_SUN4I)                        += spi-sun4i.o
 obj-$(CONFIG_SPI_SUN6I)                        += spi-sun6i.o
 obj-$(CONFIG_SPI_TEGRA114)             += spi-tegra114.o
index 23d8f5f56579a83d21040ff8625e69c0c554da3b..9af7841f2e8c6010060c17a038bc8c86c792a729 100644 (file)
@@ -1046,6 +1046,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
        struct atmel_spi_device *asd;
        int                     timeout;
        int                     ret;
+       unsigned long           dma_timeout;
 
        as = spi_master_get_devdata(master);
 
@@ -1103,15 +1104,12 @@ static int atmel_spi_one_transfer(struct spi_master *master,
 
                /* interrupts are disabled, so free the lock for schedule */
                atmel_spi_unlock(as);
-               ret = wait_for_completion_timeout(&as->xfer_completion,
-                                                       SPI_DMA_TIMEOUT);
+               dma_timeout = wait_for_completion_timeout(&as->xfer_completion,
+                                                         SPI_DMA_TIMEOUT);
                atmel_spi_lock(as);
-               if (WARN_ON(ret == 0)) {
-                       dev_err(&spi->dev,
-                               "spi trasfer timeout, err %d\n", ret);
+               if (WARN_ON(dma_timeout == 0)) {
+                       dev_err(&spi->dev, "spi transfer timeout\n");
                        as->done_status = -EIO;
-               } else {
-                       ret = 0;
                }
 
                if (as->done_status)
index 326f47973684aa454c5a69d201150a88bf72763f..f45e085c01a616436f7a6b115e69bb8144012463 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/init.h>
index 98aab457b24d987330280eb70bbf0dbb49f61a81..419a782ab6d50541809f0c8b10b3d8eaeeb1ae89 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
 #include <linux/clk.h>
index c20530982e2610c136f57c8b4ad4d343707dc7af..e73e2b052c9ccf0ca3ff2ef46950600aa5ff9f3f 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
  */
 
 #include <linux/kernel.h>
index dc7d2c2d643e80e3b6fa65564cc76b435d76ce50..5ef6638d5e8a2698a6c8e85fad41c46d1f03fd06 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/spinlock.h>
index ee4f91ccd8fd53bac3c77a04de9e898d690085bc..9a95862986c8381a82ea394e0a2e90dc5457d5f6 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 #include <linux/kernel.h>
 #include <linux/init.h>
index 41b5dc4445f622d9c29110318698ef0b89128850..688956ff5095c26a8c1101dc4740b310627ed294 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
- *
 */
 
 #include <linux/kernel.h>
index b3707badb1e58c559350919bac55b9451325a6e6..5e991065f5b0166437aceb3f313ed7bd65f9bcbd 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/interrupt.h>
diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c
new file mode 100644 (file)
index 0000000..3b7d91d
--- /dev/null
@@ -0,0 +1,881 @@
+/*
+ * Driver for the Diolan DLN-2 USB-SPI adapter
+ *
+ * Copyright (c) 2014 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/dln2.h>
+#include <linux/spi/spi.h>
+#include <linux/pm_runtime.h>
+#include <asm/unaligned.h>
+
+#define DLN2_SPI_MODULE_ID             0x02
+#define DLN2_SPI_CMD(cmd)              DLN2_CMD(cmd, DLN2_SPI_MODULE_ID)
+
+/* SPI commands */
+#define DLN2_SPI_GET_PORT_COUNT                        DLN2_SPI_CMD(0x00)
+#define DLN2_SPI_ENABLE                                DLN2_SPI_CMD(0x11)
+#define DLN2_SPI_DISABLE                       DLN2_SPI_CMD(0x12)
+#define DLN2_SPI_IS_ENABLED                    DLN2_SPI_CMD(0x13)
+#define DLN2_SPI_SET_MODE                      DLN2_SPI_CMD(0x14)
+#define DLN2_SPI_GET_MODE                      DLN2_SPI_CMD(0x15)
+#define DLN2_SPI_SET_FRAME_SIZE                        DLN2_SPI_CMD(0x16)
+#define DLN2_SPI_GET_FRAME_SIZE                        DLN2_SPI_CMD(0x17)
+#define DLN2_SPI_SET_FREQUENCY                 DLN2_SPI_CMD(0x18)
+#define DLN2_SPI_GET_FREQUENCY                 DLN2_SPI_CMD(0x19)
+#define DLN2_SPI_READ_WRITE                    DLN2_SPI_CMD(0x1A)
+#define DLN2_SPI_READ                          DLN2_SPI_CMD(0x1B)
+#define DLN2_SPI_WRITE                         DLN2_SPI_CMD(0x1C)
+#define DLN2_SPI_SET_DELAY_BETWEEN_SS          DLN2_SPI_CMD(0x20)
+#define DLN2_SPI_GET_DELAY_BETWEEN_SS          DLN2_SPI_CMD(0x21)
+#define DLN2_SPI_SET_DELAY_AFTER_SS            DLN2_SPI_CMD(0x22)
+#define DLN2_SPI_GET_DELAY_AFTER_SS            DLN2_SPI_CMD(0x23)
+#define DLN2_SPI_SET_DELAY_BETWEEN_FRAMES      DLN2_SPI_CMD(0x24)
+#define DLN2_SPI_GET_DELAY_BETWEEN_FRAMES      DLN2_SPI_CMD(0x25)
+#define DLN2_SPI_SET_SS                                DLN2_SPI_CMD(0x26)
+#define DLN2_SPI_GET_SS                                DLN2_SPI_CMD(0x27)
+#define DLN2_SPI_RELEASE_SS                    DLN2_SPI_CMD(0x28)
+#define DLN2_SPI_SS_VARIABLE_ENABLE            DLN2_SPI_CMD(0x2B)
+#define DLN2_SPI_SS_VARIABLE_DISABLE           DLN2_SPI_CMD(0x2C)
+#define DLN2_SPI_SS_VARIABLE_IS_ENABLED                DLN2_SPI_CMD(0x2D)
+#define DLN2_SPI_SS_AAT_ENABLE                 DLN2_SPI_CMD(0x2E)
+#define DLN2_SPI_SS_AAT_DISABLE                        DLN2_SPI_CMD(0x2F)
+#define DLN2_SPI_SS_AAT_IS_ENABLED             DLN2_SPI_CMD(0x30)
+#define DLN2_SPI_SS_BETWEEN_FRAMES_ENABLE      DLN2_SPI_CMD(0x31)
+#define DLN2_SPI_SS_BETWEEN_FRAMES_DISABLE     DLN2_SPI_CMD(0x32)
+#define DLN2_SPI_SS_BETWEEN_FRAMES_IS_ENABLED  DLN2_SPI_CMD(0x33)
+#define DLN2_SPI_SET_CPHA                      DLN2_SPI_CMD(0x34)
+#define DLN2_SPI_GET_CPHA                      DLN2_SPI_CMD(0x35)
+#define DLN2_SPI_SET_CPOL                      DLN2_SPI_CMD(0x36)
+#define DLN2_SPI_GET_CPOL                      DLN2_SPI_CMD(0x37)
+#define DLN2_SPI_SS_MULTI_ENABLE               DLN2_SPI_CMD(0x38)
+#define DLN2_SPI_SS_MULTI_DISABLE              DLN2_SPI_CMD(0x39)
+#define DLN2_SPI_SS_MULTI_IS_ENABLED           DLN2_SPI_CMD(0x3A)
+#define DLN2_SPI_GET_SUPPORTED_MODES           DLN2_SPI_CMD(0x40)
+#define DLN2_SPI_GET_SUPPORTED_CPHA_VALUES     DLN2_SPI_CMD(0x41)
+#define DLN2_SPI_GET_SUPPORTED_CPOL_VALUES     DLN2_SPI_CMD(0x42)
+#define DLN2_SPI_GET_SUPPORTED_FRAME_SIZES     DLN2_SPI_CMD(0x43)
+#define DLN2_SPI_GET_SS_COUNT                  DLN2_SPI_CMD(0x44)
+#define DLN2_SPI_GET_MIN_FREQUENCY             DLN2_SPI_CMD(0x45)
+#define DLN2_SPI_GET_MAX_FREQUENCY             DLN2_SPI_CMD(0x46)
+#define DLN2_SPI_GET_MIN_DELAY_BETWEEN_SS      DLN2_SPI_CMD(0x47)
+#define DLN2_SPI_GET_MAX_DELAY_BETWEEN_SS      DLN2_SPI_CMD(0x48)
+#define DLN2_SPI_GET_MIN_DELAY_AFTER_SS                DLN2_SPI_CMD(0x49)
+#define DLN2_SPI_GET_MAX_DELAY_AFTER_SS                DLN2_SPI_CMD(0x4A)
+#define DLN2_SPI_GET_MIN_DELAY_BETWEEN_FRAMES  DLN2_SPI_CMD(0x4B)
+#define DLN2_SPI_GET_MAX_DELAY_BETWEEN_FRAMES  DLN2_SPI_CMD(0x4C)
+
+#define DLN2_SPI_MAX_XFER_SIZE                 256
+#define DLN2_SPI_BUF_SIZE                      (DLN2_SPI_MAX_XFER_SIZE + 16)
+#define DLN2_SPI_ATTR_LEAVE_SS_LOW             BIT(0)
+#define DLN2_TRANSFERS_WAIT_COMPLETE           1
+#define DLN2_TRANSFERS_CANCEL                  0
+#define DLN2_RPM_AUTOSUSPEND_TIMEOUT           2000
+
+struct dln2_spi {
+       struct platform_device *pdev;
+       struct spi_master *master;
+       u8 port;
+
+       /*
+        * This buffer will be used mainly for read/write operations. Since
+        * they're quite large, we cannot use the stack. Protection is not
+        * needed because all SPI communication is serialized by the SPI core.
+        */
+       void *buf;
+
+       u8 bpw;
+       u32 speed;
+       u16 mode;
+       u8 cs;
+};
+
+/*
+ * Enable/Disable SPI module. The disable command will wait for transfers to
+ * complete first.
+ */
+static int dln2_spi_enable(struct dln2_spi *dln2, bool enable)
+{
+       u16 cmd;
+       struct {
+               u8 port;
+               u8 wait_for_completion;
+       } tx;
+       unsigned len = sizeof(tx);
+
+       tx.port = dln2->port;
+
+       if (enable) {
+               cmd = DLN2_SPI_ENABLE;
+               len -= sizeof(tx.wait_for_completion);
+       } else {
+               tx.wait_for_completion = DLN2_TRANSFERS_WAIT_COMPLETE;
+               cmd = DLN2_SPI_DISABLE;
+       }
+
+       return dln2_transfer_tx(dln2->pdev, cmd, &tx, len);
+}
+
+/*
+ * Select/unselect multiple CS lines. The selected lines will be automatically
+ * toggled LOW/HIGH by the board firmware during transfers, provided they're
+ * enabled first.
+ *
+ * Ex: cs_mask = 0x03 -> CS0 & CS1 will be selected and the next WR/RD operation
+ *                       will toggle the lines LOW/HIGH automatically.
+ */
+static int dln2_spi_cs_set(struct dln2_spi *dln2, u8 cs_mask)
+{
+       struct {
+               u8 port;
+               u8 cs;
+       } tx;
+
+       tx.port = dln2->port;
+
+       /*
+        * According to Diolan docs, "a slave device can be selected by changing
+        * the corresponding bit value to 0". The rest must be set to 1. Hence
+        * the bitwise NOT in front.
+        */
+       tx.cs = ~cs_mask;
+
+       return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_SS, &tx, sizeof(tx));
+}
+
+/*
+ * Select one CS line. The other lines will be un-selected.
+ */
+static int dln2_spi_cs_set_one(struct dln2_spi *dln2, u8 cs)
+{
+       return dln2_spi_cs_set(dln2, BIT(cs));
+}
+
+/*
+ * Enable/disable CS lines for usage. The module has to be disabled first.
+ */
+static int dln2_spi_cs_enable(struct dln2_spi *dln2, u8 cs_mask, bool enable)
+{
+       struct {
+               u8 port;
+               u8 cs;
+       } tx;
+       u16 cmd;
+
+       tx.port = dln2->port;
+       tx.cs = cs_mask;
+       cmd = enable ? DLN2_SPI_SS_MULTI_ENABLE : DLN2_SPI_SS_MULTI_DISABLE;
+
+       return dln2_transfer_tx(dln2->pdev, cmd, &tx, sizeof(tx));
+}
+
+static int dln2_spi_cs_enable_all(struct dln2_spi *dln2, bool enable)
+{
+       u8 cs_mask = GENMASK(dln2->master->num_chipselect - 1, 0);
+
+       return dln2_spi_cs_enable(dln2, cs_mask, enable);
+}
+
+static int dln2_spi_get_cs_num(struct dln2_spi *dln2, u16 *cs_num)
+{
+       int ret;
+       struct {
+               u8 port;
+       } tx;
+       struct {
+               __le16 cs_count;
+       } rx;
+       unsigned rx_len = sizeof(rx);
+
+       tx.port = dln2->port;
+       ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SS_COUNT, &tx, sizeof(tx),
+                           &rx, &rx_len);
+       if (ret < 0)
+               return ret;
+       if (rx_len < sizeof(rx))
+               return -EPROTO;
+
+       *cs_num = le16_to_cpu(rx.cs_count);
+
+       dev_dbg(&dln2->pdev->dev, "cs_num = %d\n", *cs_num);
+
+       return 0;
+}
+
+static int dln2_spi_get_speed(struct dln2_spi *dln2, u16 cmd, u32 *freq)
+{
+       int ret;
+       struct {
+               u8 port;
+       } tx;
+       struct {
+               __le32 speed;
+       } rx;
+       unsigned rx_len = sizeof(rx);
+
+       tx.port = dln2->port;
+
+       ret = dln2_transfer(dln2->pdev, cmd, &tx, sizeof(tx), &rx, &rx_len);
+       if (ret < 0)
+               return ret;
+       if (rx_len < sizeof(rx))
+               return -EPROTO;
+
+       *freq = le32_to_cpu(rx.speed);
+
+       return 0;
+}
+
+/*
+ * Get bus min/max frequencies.
+ */
+static int dln2_spi_get_speed_range(struct dln2_spi *dln2, u32 *fmin, u32 *fmax)
+{
+       int ret;
+
+       ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MIN_FREQUENCY, fmin);
+       if (ret < 0)
+               return ret;
+
+       ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MAX_FREQUENCY, fmax);
+       if (ret < 0)
+               return ret;
+
+       dev_dbg(&dln2->pdev->dev, "freq_min = %d, freq_max = %d\n",
+               *fmin, *fmax);
+
+       return 0;
+}
+
+/*
+ * Set the bus speed. The module will automatically round down to the closest
+ * available frequency and returns it. The module has to be disabled first.
+ */
+static int dln2_spi_set_speed(struct dln2_spi *dln2, u32 speed)
+{
+       int ret;
+       struct {
+               u8 port;
+               __le32 speed;
+       } __packed tx;
+       struct {
+               __le32 speed;
+       } rx;
+       int rx_len = sizeof(rx);
+
+       tx.port = dln2->port;
+       tx.speed = cpu_to_le32(speed);
+
+       ret = dln2_transfer(dln2->pdev, DLN2_SPI_SET_FREQUENCY, &tx, sizeof(tx),
+                           &rx, &rx_len);
+       if (ret < 0)
+               return ret;
+       if (rx_len < sizeof(rx))
+               return -EPROTO;
+
+       return 0;
+}
+
+/*
+ * Change CPOL & CPHA. The module has to be disabled first.
+ */
+static int dln2_spi_set_mode(struct dln2_spi *dln2, u8 mode)
+{
+       struct {
+               u8 port;
+               u8 mode;
+       } tx;
+
+       tx.port = dln2->port;
+       tx.mode = mode;
+
+       return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_MODE, &tx, sizeof(tx));
+}
+
+/*
+ * Change frame size. The module has to be disabled first.
+ */
+static int dln2_spi_set_bpw(struct dln2_spi *dln2, u8 bpw)
+{
+       struct {
+               u8 port;
+               u8 bpw;
+       } tx;
+
+       tx.port = dln2->port;
+       tx.bpw = bpw;
+
+       return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_FRAME_SIZE,
+                               &tx, sizeof(tx));
+}
+
+static int dln2_spi_get_supported_frame_sizes(struct dln2_spi *dln2,
+                                             u32 *bpw_mask)
+{
+       int ret;
+       struct {
+               u8 port;
+       } tx;
+       struct {
+               u8 count;
+               u8 frame_sizes[36];
+       } *rx = dln2->buf;
+       unsigned rx_len = sizeof(*rx);
+       int i;
+
+       tx.port = dln2->port;
+
+       ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SUPPORTED_FRAME_SIZES,
+                           &tx, sizeof(tx), rx, &rx_len);
+       if (ret < 0)
+               return ret;
+       if (rx_len < sizeof(*rx))
+               return -EPROTO;
+       if (rx->count > ARRAY_SIZE(rx->frame_sizes))
+               return -EPROTO;
+
+       *bpw_mask = 0;
+       for (i = 0; i < rx->count; i++)
+               *bpw_mask |= BIT(rx->frame_sizes[i] - 1);
+
+       dev_dbg(&dln2->pdev->dev, "bpw_mask = 0x%X\n", *bpw_mask);
+
+       return 0;
+}
+
+/*
+ * Copy the data to DLN2 buffer and change the byte order to LE, requested by
+ * DLN2 module. SPI core makes sure that the data length is a multiple of word
+ * size.
+ */
+static int dln2_spi_copy_to_buf(u8 *dln2_buf, const u8 *src, u16 len, u8 bpw)
+{
+#ifdef __LITTLE_ENDIAN
+       memcpy(dln2_buf, src, len);
+#else
+       if (bpw <= 8) {
+               memcpy(dln2_buf, src, len);
+       } else if (bpw <= 16) {
+               __le16 *d = (__le16 *)dln2_buf;
+               u16 *s = (u16 *)src;
+
+               len = len / 2;
+               while (len--)
+                       *d++ = cpu_to_le16p(s++);
+       } else {
+               __le32 *d = (__le32 *)dln2_buf;
+               u32 *s = (u32 *)src;
+
+               len = len / 4;
+               while (len--)
+                       *d++ = cpu_to_le32p(s++);
+       }
+#endif
+
+       return 0;
+}
+
+/*
+ * Copy the data from DLN2 buffer and convert to CPU byte order since the DLN2
+ * buffer is LE ordered. SPI core makes sure that the data length is a multiple
+ * of word size. The RX dln2_buf is 2 byte aligned so, for BE, we have to make
+ * sure we avoid unaligned accesses for 32 bit case.
+ */
+static int dln2_spi_copy_from_buf(u8 *dest, const u8 *dln2_buf, u16 len, u8 bpw)
+{
+#ifdef __LITTLE_ENDIAN
+       memcpy(dest, dln2_buf, len);
+#else
+       if (bpw <= 8) {
+               memcpy(dest, dln2_buf, len);
+       } else if (bpw <= 16) {
+               u16 *d = (u16 *)dest;
+               __le16 *s = (__le16 *)dln2_buf;
+
+               len = len / 2;
+               while (len--)
+                       *d++ = le16_to_cpup(s++);
+       } else {
+               u32 *d = (u32 *)dest;
+               __le32 *s = (__le32 *)dln2_buf;
+
+               len = len / 4;
+               while (len--)
+                       *d++ = get_unaligned_le32(s++);
+       }
+#endif
+
+       return 0;
+}
+
+/*
+ * Perform one write operation.
+ */
+static int dln2_spi_write_one(struct dln2_spi *dln2, const u8 *data,
+                             u16 data_len, u8 attr)
+{
+       struct {
+               u8 port;
+               __le16 size;
+               u8 attr;
+               u8 buf[DLN2_SPI_MAX_XFER_SIZE];
+       } __packed *tx = dln2->buf;
+       unsigned tx_len;
+
+       BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE);
+
+       if (data_len > DLN2_SPI_MAX_XFER_SIZE)
+               return -EINVAL;
+
+       tx->port = dln2->port;
+       tx->size = cpu_to_le16(data_len);
+       tx->attr = attr;
+
+       dln2_spi_copy_to_buf(tx->buf, data, data_len, dln2->bpw);
+
+       tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE;
+       return dln2_transfer_tx(dln2->pdev, DLN2_SPI_WRITE, tx, tx_len);
+}
+
+/*
+ * Perform one read operation.
+ */
+static int dln2_spi_read_one(struct dln2_spi *dln2, u8 *data,
+                            u16 data_len, u8 attr)
+{
+       int ret;
+       struct {
+               u8 port;
+               __le16 size;
+               u8 attr;
+       } __packed tx;
+       struct {
+               __le16 size;
+               u8 buf[DLN2_SPI_MAX_XFER_SIZE];
+       } __packed *rx = dln2->buf;
+       unsigned rx_len = sizeof(*rx);
+
+       BUILD_BUG_ON(sizeof(*rx) > DLN2_SPI_BUF_SIZE);
+
+       if (data_len > DLN2_SPI_MAX_XFER_SIZE)
+               return -EINVAL;
+
+       tx.port = dln2->port;
+       tx.size = cpu_to_le16(data_len);
+       tx.attr = attr;
+
+       ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ, &tx, sizeof(tx),
+                           rx, &rx_len);
+       if (ret < 0)
+               return ret;
+       if (rx_len < sizeof(rx->size) + data_len)
+               return -EPROTO;
+       if (le16_to_cpu(rx->size) != data_len)
+               return -EPROTO;
+
+       dln2_spi_copy_from_buf(data, rx->buf, data_len, dln2->bpw);
+
+       return 0;
+}
+
+/*
+ * Perform one write & read operation.
+ */
+static int dln2_spi_read_write_one(struct dln2_spi *dln2, const u8 *tx_data,
+                                  u8 *rx_data, u16 data_len, u8 attr)
+{
+       int ret;
+       struct {
+               u8 port;
+               __le16 size;
+               u8 attr;
+               u8 buf[DLN2_SPI_MAX_XFER_SIZE];
+       } __packed *tx;
+       struct {
+               __le16 size;
+               u8 buf[DLN2_SPI_MAX_XFER_SIZE];
+       } __packed *rx;
+       unsigned tx_len, rx_len;
+
+       BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE ||
+                    sizeof(*rx) > DLN2_SPI_BUF_SIZE);
+
+       if (data_len > DLN2_SPI_MAX_XFER_SIZE)
+               return -EINVAL;
+
+       /*
+        * Since this is a pseudo full-duplex communication, we're perfectly
+        * safe to use the same buffer for both tx and rx. When DLN2 sends the
+        * response back, with the rx data, we don't need the tx buffer anymore.
+        */
+       tx = dln2->buf;
+       rx = dln2->buf;
+
+       tx->port = dln2->port;
+       tx->size = cpu_to_le16(data_len);
+       tx->attr = attr;
+
+       dln2_spi_copy_to_buf(tx->buf, tx_data, data_len, dln2->bpw);
+
+       tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE;
+       rx_len = sizeof(*rx);
+
+       ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ_WRITE, tx, tx_len,
+                           rx, &rx_len);
+       if (ret < 0)
+               return ret;
+       if (rx_len < sizeof(rx->size) + data_len)
+               return -EPROTO;
+       if (le16_to_cpu(rx->size) != data_len)
+               return -EPROTO;
+
+       dln2_spi_copy_from_buf(rx_data, rx->buf, data_len, dln2->bpw);
+
+       return 0;
+}
+
+/*
+ * Read/Write wrapper. It will automatically split an operation into multiple
+ * single ones due to device buffer constraints.
+ */
+static int dln2_spi_rdwr(struct dln2_spi *dln2, const u8 *tx_data,
+                        u8 *rx_data, u16 data_len, u8 attr) {
+       int ret;
+       u16 len;
+       u8 temp_attr;
+       u16 remaining = data_len;
+       u16 offset;
+
+       do {
+               if (remaining > DLN2_SPI_MAX_XFER_SIZE) {
+                       len = DLN2_SPI_MAX_XFER_SIZE;
+                       temp_attr = DLN2_SPI_ATTR_LEAVE_SS_LOW;
+               } else {
+                       len = remaining;
+                       temp_attr = attr;
+               }
+
+               offset = data_len - remaining;
+
+               if (tx_data && rx_data) {
+                       ret = dln2_spi_read_write_one(dln2,
+                                                     tx_data + offset,
+                                                     rx_data + offset,
+                                                     len, temp_attr);
+               } else if (tx_data) {
+                       ret = dln2_spi_write_one(dln2,
+                                                tx_data + offset,
+                                                len, temp_attr);
+               } else if (rx_data) {
+                       ret = dln2_spi_read_one(dln2,
+                                               rx_data + offset,
+                                               len, temp_attr);
+                } else {
+                       return -EINVAL;
+                }
+
+               if (ret < 0)
+                       return ret;
+
+               remaining -= len;
+       } while (remaining);
+
+       return 0;
+}
+
+static int dln2_spi_prepare_message(struct spi_master *master,
+                                   struct spi_message *message)
+{
+       int ret;
+       struct dln2_spi *dln2 = spi_master_get_devdata(master);
+       struct spi_device *spi = message->spi;
+
+       if (dln2->cs != spi->chip_select) {
+               ret = dln2_spi_cs_set_one(dln2, spi->chip_select);
+               if (ret < 0)
+                       return ret;
+
+               dln2->cs = spi->chip_select;
+       }
+
+       return 0;
+}
+
+static int dln2_spi_transfer_setup(struct dln2_spi *dln2, u32 speed,
+                                  u8 bpw, u8 mode)
+{
+       int ret;
+       bool bus_setup_change;
+
+       bus_setup_change = dln2->speed != speed || dln2->mode != mode ||
+                          dln2->bpw != bpw;
+
+       if (!bus_setup_change)
+               return 0;
+
+       ret = dln2_spi_enable(dln2, false);
+       if (ret < 0)
+               return ret;
+
+       if (dln2->speed != speed) {
+               ret = dln2_spi_set_speed(dln2, speed);
+               if (ret < 0)
+                       return ret;
+
+               dln2->speed = speed;
+       }
+
+       if (dln2->mode != mode) {
+               ret = dln2_spi_set_mode(dln2, mode & 0x3);
+               if (ret < 0)
+                       return ret;
+
+               dln2->mode = mode;
+       }
+
+       if (dln2->bpw != bpw) {
+               ret = dln2_spi_set_bpw(dln2, bpw);
+               if (ret < 0)
+                       return ret;
+
+               dln2->bpw = bpw;
+       }
+
+       return dln2_spi_enable(dln2, true);
+}
+
+static int dln2_spi_transfer_one(struct spi_master *master,
+                                struct spi_device *spi,
+                                struct spi_transfer *xfer)
+{
+       struct dln2_spi *dln2 = spi_master_get_devdata(master);
+       int status;
+       u8 attr = 0;
+
+       status = dln2_spi_transfer_setup(dln2, xfer->speed_hz,
+                                        xfer->bits_per_word,
+                                        spi->mode);
+       if (status < 0) {
+               dev_err(&dln2->pdev->dev, "Cannot setup transfer\n");
+               return status;
+       }
+
+       if (!xfer->cs_change && !spi_transfer_is_last(master, xfer))
+               attr = DLN2_SPI_ATTR_LEAVE_SS_LOW;
+
+       status = dln2_spi_rdwr(dln2, xfer->tx_buf, xfer->rx_buf,
+                              xfer->len, attr);
+       if (status < 0)
+               dev_err(&dln2->pdev->dev, "write/read failed!\n");
+
+       return status;
+}
+
+static int dln2_spi_probe(struct platform_device *pdev)
+{
+       struct spi_master *master;
+       struct dln2_spi *dln2;
+       struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev);
+       int ret;
+
+       master = spi_alloc_master(&pdev->dev, sizeof(*dln2));
+       if (!master)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, master);
+
+       dln2 = spi_master_get_devdata(master);
+
+       dln2->buf = devm_kmalloc(&pdev->dev, DLN2_SPI_BUF_SIZE, GFP_KERNEL);
+       if (!dln2->buf) {
+               ret = -ENOMEM;
+               goto exit_free_master;
+       }
+
+       dln2->master = master;
+       dln2->pdev = pdev;
+       dln2->port = pdata->port;
+       /* cs/mode can never be 0xff, so the first transfer will set them */
+       dln2->cs = 0xff;
+       dln2->mode = 0xff;
+
+       /* disable SPI module before continuing with the setup */
+       ret = dln2_spi_enable(dln2, false);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to disable SPI module\n");
+               goto exit_free_master;
+       }
+
+       ret = dln2_spi_get_cs_num(dln2, &master->num_chipselect);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to get number of CS pins\n");
+               goto exit_free_master;
+       }
+
+       ret = dln2_spi_get_speed_range(dln2,
+                                      &master->min_speed_hz,
+                                      &master->max_speed_hz);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to read bus min/max freqs\n");
+               goto exit_free_master;
+       }
+
+       ret = dln2_spi_get_supported_frame_sizes(dln2,
+                                                &master->bits_per_word_mask);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to read supported frame sizes\n");
+               goto exit_free_master;
+       }
+
+       ret = dln2_spi_cs_enable_all(dln2, true);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to enable CS pins\n");
+               goto exit_free_master;
+       }
+
+       master->bus_num = -1;
+       master->mode_bits = SPI_CPOL | SPI_CPHA;
+       master->prepare_message = dln2_spi_prepare_message;
+       master->transfer_one = dln2_spi_transfer_one;
+       master->auto_runtime_pm = true;
+
+       /* enable SPI module, we're good to go */
+       ret = dln2_spi_enable(dln2, true);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to enable SPI module\n");
+               goto exit_free_master;
+       }
+
+       pm_runtime_set_autosuspend_delay(&pdev->dev,
+                                        DLN2_RPM_AUTOSUSPEND_TIMEOUT);
+       pm_runtime_use_autosuspend(&pdev->dev);
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+
+       ret = devm_spi_register_master(&pdev->dev, master);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to register master\n");
+               goto exit_register;
+       }
+
+       return ret;
+
+exit_register:
+       pm_runtime_disable(&pdev->dev);
+       pm_runtime_set_suspended(&pdev->dev);
+
+       if (dln2_spi_enable(dln2, false) < 0)
+               dev_err(&pdev->dev, "Failed to disable SPI module\n");
+exit_free_master:
+       spi_master_put(master);
+
+       return ret;
+}
+
+static int dln2_spi_remove(struct platform_device *pdev)
+{
+       struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+       struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+       pm_runtime_disable(&pdev->dev);
+
+       if (dln2_spi_enable(dln2, false) < 0)
+               dev_err(&pdev->dev, "Failed to disable SPI module\n");
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dln2_spi_suspend(struct device *dev)
+{
+       int ret;
+       struct spi_master *master = dev_get_drvdata(dev);
+       struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+       ret = spi_master_suspend(master);
+       if (ret < 0)
+               return ret;
+
+       if (!pm_runtime_suspended(dev)) {
+               ret = dln2_spi_enable(dln2, false);
+               if (ret < 0)
+                       return ret;
+       }
+
+       /*
+        * USB power may be cut off during sleep. Resetting the following
+        * parameters will force the board to be set up before first transfer.
+        */
+       dln2->cs = 0xff;
+       dln2->speed = 0;
+       dln2->bpw = 0;
+       dln2->mode = 0xff;
+
+       return 0;
+}
+
+static int dln2_spi_resume(struct device *dev)
+{
+       int ret;
+       struct spi_master *master = dev_get_drvdata(dev);
+       struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+       if (!pm_runtime_suspended(dev)) {
+               ret = dln2_spi_cs_enable_all(dln2, true);
+               if (ret < 0)
+                       return ret;
+
+               ret = dln2_spi_enable(dln2, true);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int dln2_spi_runtime_suspend(struct device *dev)
+{
+       struct spi_master *master = dev_get_drvdata(dev);
+       struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+       return dln2_spi_enable(dln2, false);
+}
+
+static int dln2_spi_runtime_resume(struct device *dev)
+{
+       struct spi_master *master = dev_get_drvdata(dev);
+       struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+       return  dln2_spi_enable(dln2, true);
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops dln2_spi_pm = {
+       SET_SYSTEM_SLEEP_PM_OPS(dln2_spi_suspend, dln2_spi_resume)
+       SET_RUNTIME_PM_OPS(dln2_spi_runtime_suspend,
+                          dln2_spi_runtime_resume, NULL)
+};
+
+static struct platform_driver spi_dln2_driver = {
+       .driver = {
+               .name   = "dln2-spi",
+               .pm     = &dln2_spi_pm,
+       },
+       .probe          = dln2_spi_probe,
+       .remove         = dln2_spi_remove,
+};
+module_platform_driver(spi_dln2_driver);
+
+MODULE_DESCRIPTION("Driver for the Diolan DLN2 SPI master interface");
+MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dln2-spi");
index a67d37c7e3c00f9518694f267aa75efd5c73398a..a0197fd4e95c40b26817fe942d2069a0381a41e5 100644 (file)
@@ -247,9 +247,9 @@ static struct dw_spi_dma_ops mid_dma_ops = {
 
 /* Some specific info for SPI0 controller on Intel MID */
 
-/* HW info for MRST CLk Control Unit, one 32b reg */
+/* HW info for MRST Clk Control Unit, 32b reg per controller */
 #define MRST_SPI_CLK_BASE      100000000       /* 100m */
-#define MRST_CLK_SPI0_REG      0xff11d86c
+#define MRST_CLK_SPI_REG       0xff11d86c
 #define CLK_SPI_BDIV_OFFSET    0
 #define CLK_SPI_BDIV_MASK      0x00000007
 #define CLK_SPI_CDIV_OFFSET    9
@@ -261,16 +261,17 @@ int dw_spi_mid_init(struct dw_spi *dws)
        void __iomem *clk_reg;
        u32 clk_cdiv;
 
-       clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16);
+       clk_reg = ioremap_nocache(MRST_CLK_SPI_REG, 16);
        if (!clk_reg)
                return -ENOMEM;
 
-       /* get SPI controller operating freq info */
-       clk_cdiv  = (readl(clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET;
+       /* Get SPI controller operating freq info */
+       clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32));
+       clk_cdiv &= CLK_SPI_CDIV_MASK;
+       clk_cdiv >>= CLK_SPI_CDIV_OFFSET;
        dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
-       iounmap(clk_reg);
 
-       dws->num_cs = 16;
+       iounmap(clk_reg);
 
 #ifdef CONFIG_SPI_DW_MID_DMA
        dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
index ba68da12cdf090e11b1d013bcc48315c74c44013..5ba331047cbefdea6d948ca20c8d6d3898cafbb0 100644 (file)
@@ -30,10 +30,20 @@ struct dw_spi_pci {
 
 struct spi_pci_desc {
        int     (*setup)(struct dw_spi *);
+       u16     num_cs;
+       u16     bus_num;
 };
 
-static struct spi_pci_desc spi_pci_mid_desc = {
+static struct spi_pci_desc spi_pci_mid_desc_1 = {
        .setup = dw_spi_mid_init,
+       .num_cs = 32,
+       .bus_num = 0,
+};
+
+static struct spi_pci_desc spi_pci_mid_desc_2 = {
+       .setup = dw_spi_mid_init,
+       .num_cs = 4,
+       .bus_num = 1,
 };
 
 static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -65,18 +75,23 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        dws->regs = pcim_iomap_table(pdev)[pci_bar];
 
-       dws->bus_num = 0;
-       dws->num_cs = 4;
        dws->irq = pdev->irq;
 
        /*
         * Specific handling for paltforms, like dma setup,
         * clock rate, FIFO depth.
         */
-       if (desc && desc->setup) {
-               ret = desc->setup(dws);
-               if (ret)
-                       return ret;
+       if (desc) {
+               dws->num_cs = desc->num_cs;
+               dws->bus_num = desc->bus_num;
+
+               if (desc->setup) {
+                       ret = desc->setup(dws);
+                       if (ret)
+                               return ret;
+               }
+       } else {
+               return -ENODEV;
        }
 
        ret = dw_spi_add_host(&pdev->dev, dws);
@@ -121,7 +136,14 @@ static SIMPLE_DEV_PM_OPS(dw_spi_pm_ops, spi_suspend, spi_resume);
 
 static const struct pci_device_id pci_ids[] = {
        /* Intel MID platform SPI controller 0 */
-       { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc},
+       /*
+        * The access to the device 8086:0801 is disabled by HW, since it's
+        * exclusively used by SCU to communicate with MSIC.
+        */
+       /* Intel MID platform SPI controller 1 */
+       { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc_1},
+       /* Intel MID platform SPI controller 2 */
+       { PCI_VDEVICE(INTEL, 0x0812), (kernel_ulong_t)&spi_pci_mid_desc_2},
        {},
 };
 
index 8edcd1b84562109799281fb48867df8ce73ac7b2..5a97a62b298ac1a526d4c1bde932f3eb2d5ad574 100644 (file)
@@ -608,7 +608,7 @@ static void dw_spi_cleanup(struct spi_device *spi)
 }
 
 /* Restart the controller, disable all interrupts, clean rx fifo */
-static void spi_hw_init(struct dw_spi *dws)
+static void spi_hw_init(struct device *dev, struct dw_spi *dws)
 {
        spi_enable_chip(dws, 0);
        spi_mask_intr(dws, 0xff);
@@ -626,9 +626,10 @@ static void spi_hw_init(struct dw_spi *dws)
                        if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
                                break;
                }
+               dw_writew(dws, DW_SPI_TXFLTR, 0);
 
                dws->fifo_len = (fifo == 2) ? 0 : fifo - 1;
-               dw_writew(dws, DW_SPI_TXFLTR, 0);
+               dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
        }
 }
 
@@ -668,7 +669,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
        master->dev.of_node = dev->of_node;
 
        /* Basic HW init */
-       spi_hw_init(dws);
+       spi_hw_init(dev, dws);
 
        if (dws->dma_ops && dws->dma_ops->dma_init) {
                ret = dws->dma_ops->dma_init(dws);
@@ -731,7 +732,7 @@ int dw_spi_resume_host(struct dw_spi *dws)
 {
        int ret;
 
-       spi_hw_init(dws);
+       spi_hw_init(&dws->master->dev, dws);
        ret = spi_master_resume(dws->master);
        if (ret)
                dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
index 912b9037e9cf511985ca15ce0364f64f2c57fbff..286b2c81fc6bb0d7500568df47753b47094da8a2 100644 (file)
@@ -353,16 +353,6 @@ static int falcon_sflash_setup(struct spi_device *spi)
        return 0;
 }
 
-static int falcon_sflash_prepare_xfer(struct spi_master *master)
-{
-       return 0;
-}
-
-static int falcon_sflash_unprepare_xfer(struct spi_master *master)
-{
-       return 0;
-}
-
 static int falcon_sflash_xfer_one(struct spi_master *master,
                                        struct spi_message *m)
 {
@@ -420,9 +410,7 @@ static int falcon_sflash_probe(struct platform_device *pdev)
        master->mode_bits = SPI_MODE_3;
        master->flags = SPI_MASTER_HALF_DUPLEX;
        master->setup = falcon_sflash_setup;
-       master->prepare_transfer_hardware = falcon_sflash_prepare_xfer;
        master->transfer_one_message = falcon_sflash_xfer_one;
-       master->unprepare_transfer_hardware = falcon_sflash_unprepare_xfer;
        master->dev.of_node = pdev->dev.of_node;
 
        ret = devm_spi_register_master(&pdev->dev, master);
index e85ab1cb17a24a4decb61d28fc592a82f5b314d8..9c46a3058743b75228256f55e64b8419b49c1f3d 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/fsl_devices.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/spi/spi.h>
 #include <linux/types.h>
@@ -68,6 +69,7 @@ void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi)
                }
        }
 }
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_reinit_txrx);
 
 static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
 {
@@ -162,6 +164,7 @@ err_rx_dma:
                dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
        return -ENOMEM;
 }
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs);
 
 void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
 {
@@ -174,6 +177,7 @@ void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
                dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
        mspi->xfer_in_progress = NULL;
 }
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete);
 
 void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
 {
@@ -198,6 +202,7 @@ void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
        else
                complete(&mspi->done);
 }
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_irq);
 
 static void *fsl_spi_alloc_dummy_rx(void)
 {
@@ -375,6 +380,7 @@ err_pram:
        fsl_spi_free_dummy_rx();
        return -ENOMEM;
 }
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_init);
 
 void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
 {
@@ -389,3 +395,6 @@ void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
        cpm_muram_free(cpm_muram_offset(mspi->pram));
        fsl_spi_free_dummy_rx();
 }
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_free);
+
+MODULE_LICENSE("GPL");
index 4cda994d3f40cf86116b85ff21e71f7d8e33ed4d..d1a39249704a7e3a16bcd861bc7f16f5a9ed02dd 100644 (file)
@@ -106,7 +106,7 @@ struct chip_data {
 };
 
 struct fsl_dspi {
-       struct spi_bitbang      bitbang;
+       struct spi_master       *master;
        struct platform_device  *pdev;
 
        struct regmap           *regmap;
@@ -114,6 +114,7 @@ struct fsl_dspi {
        struct clk              *clk;
 
        struct spi_transfer     *cur_transfer;
+       struct spi_message      *cur_msg;
        struct chip_data        *cur_chip;
        size_t                  len;
        void                    *tx;
@@ -123,6 +124,7 @@ struct fsl_dspi {
        char                    dataflags;
        u8                      cs;
        u16                     void_write_data;
+       u32                     cs_change;
 
        wait_queue_head_t       waitq;
        u32                     waitflags;
@@ -225,6 +227,8 @@ static int dspi_transfer_write(struct fsl_dspi *dspi)
                if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) {
                        /* last transfer in the transfer */
                        dspi_pushr |= SPI_PUSHR_EOQ;
+                       if ((dspi->cs_change) && (!dspi->len))
+                               dspi_pushr &= ~SPI_PUSHR_CONT;
                } else if (tx_word && (dspi->len == 1))
                        dspi_pushr |= SPI_PUSHR_EOQ;
 
@@ -246,6 +250,7 @@ static int dspi_transfer_read(struct fsl_dspi *dspi)
        int rx_count = 0;
        int rx_word = is_double_byte_mode(dspi);
        u16 d;
+
        while ((dspi->rx < dspi->rx_end)
                        && (rx_count < DSPI_FIFO_SIZE)) {
                if (rx_word) {
@@ -276,86 +281,89 @@ static int dspi_transfer_read(struct fsl_dspi *dspi)
        return rx_count;
 }
 
-static int dspi_txrx_transfer(struct spi_device *spi, struct spi_transfer *t)
+static int dspi_transfer_one_message(struct spi_master *master,
+               struct spi_message *message)
 {
-       struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
-       dspi->cur_transfer = t;
-       dspi->cur_chip = spi_get_ctldata(spi);
-       dspi->cs = spi->chip_select;
-       dspi->void_write_data = dspi->cur_chip->void_write_data;
-
-       dspi->dataflags = 0;
-       dspi->tx = (void *)t->tx_buf;
-       dspi->tx_end = dspi->tx + t->len;
-       dspi->rx = t->rx_buf;
-       dspi->rx_end = dspi->rx + t->len;
-       dspi->len = t->len;
-
-       if (!dspi->rx)
-               dspi->dataflags |= TRAN_STATE_RX_VOID;
-
-       if (!dspi->tx)
-               dspi->dataflags |= TRAN_STATE_TX_VOID;
-
-       regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val);
-       regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), dspi->cur_chip->ctar_val);
-       regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
-
-       if (t->speed_hz)
+       struct fsl_dspi *dspi = spi_master_get_devdata(master);
+       struct spi_device *spi = message->spi;
+       struct spi_transfer *transfer;
+       int status = 0;
+       message->actual_length = 0;
+
+       list_for_each_entry(transfer, &message->transfers, transfer_list) {
+               dspi->cur_transfer = transfer;
+               dspi->cur_msg = message;
+               dspi->cur_chip = spi_get_ctldata(spi);
+               dspi->cs = spi->chip_select;
+               if (dspi->cur_transfer->transfer_list.next
+                               == &dspi->cur_msg->transfers)
+                       transfer->cs_change = 1;
+               dspi->cs_change = transfer->cs_change;
+               dspi->void_write_data = dspi->cur_chip->void_write_data;
+
+               dspi->dataflags = 0;
+               dspi->tx = (void *)transfer->tx_buf;
+               dspi->tx_end = dspi->tx + transfer->len;
+               dspi->rx = transfer->rx_buf;
+               dspi->rx_end = dspi->rx + transfer->len;
+               dspi->len = transfer->len;
+
+               if (!dspi->rx)
+                       dspi->dataflags |= TRAN_STATE_RX_VOID;
+
+               if (!dspi->tx)
+                       dspi->dataflags |= TRAN_STATE_TX_VOID;
+
+               regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val);
+               regmap_update_bits(dspi->regmap, SPI_MCR,
+                               SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
+                               SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
                regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
                                dspi->cur_chip->ctar_val);
+               if (transfer->speed_hz)
+                       regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
+                                       dspi->cur_chip->ctar_val);
 
-       dspi_transfer_write(dspi);
+               regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
+               message->actual_length += dspi_transfer_write(dspi);
 
-       if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
-               dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
-       dspi->waitflags = 0;
-
-       return t->len - dspi->len;
-}
+               if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
+                       dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
+               dspi->waitflags = 0;
 
-static void dspi_chipselect(struct spi_device *spi, int value)
-{
-       struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
-       unsigned int pushr;
-
-       regmap_read(dspi->regmap, SPI_PUSHR, &pushr);
-
-       switch (value) {
-       case BITBANG_CS_ACTIVE:
-               pushr |= SPI_PUSHR_CONT;
-               break;
-       case BITBANG_CS_INACTIVE:
-               pushr &= ~SPI_PUSHR_CONT;
-               break;
+               if (transfer->delay_usecs)
+                       udelay(transfer->delay_usecs);
        }
 
-       regmap_write(dspi->regmap, SPI_PUSHR, pushr);
+       message->status = status;
+       spi_finalize_current_message(master);
+
+       return status;
 }
 
-static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
+static int dspi_setup(struct spi_device *spi)
 {
        struct chip_data *chip;
        struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
        unsigned char br = 0, pbr = 0, fmsz = 0;
 
+       if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) {
+               fmsz = spi->bits_per_word - 1;
+       } else {
+               pr_err("Invalid wordsize\n");
+               return -ENODEV;
+       }
+
        /* Only alloc on first setup */
        chip = spi_get_ctldata(spi);
        if (chip == NULL) {
-               chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data),
-                                   GFP_KERNEL);
+               chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
                if (!chip)
                        return -ENOMEM;
        }
 
        chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS |
                SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF;
-       if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) {
-               fmsz = spi->bits_per_word - 1;
-       } else {
-               pr_err("Invalid wordsize\n");
-               return -ENODEV;
-       }
 
        chip->void_write_data = 0;
 
@@ -374,34 +382,34 @@ static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
        return 0;
 }
 
-static int dspi_setup(struct spi_device *spi)
+static void dspi_cleanup(struct spi_device *spi)
 {
-       if (!spi->max_speed_hz)
-               return -EINVAL;
+       struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
+
+       dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
+                       spi->master->bus_num, spi->chip_select);
 
-       return dspi_setup_transfer(spi, NULL);
+       kfree(chip);
 }
 
 static irqreturn_t dspi_interrupt(int irq, void *dev_id)
 {
        struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
 
-       regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF);
+       struct spi_message *msg = dspi->cur_msg;
 
+       regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF);
        dspi_transfer_read(dspi);
 
        if (!dspi->len) {
                if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM)
                        regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
-                               SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16));
+                       SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16));
 
                dspi->waitflags = 1;
                wake_up_interruptible(&dspi->waitq);
-       } else {
-               dspi_transfer_write(dspi);
-
-               return IRQ_HANDLED;
-       }
+       } else
+               msg->actual_length += dspi_transfer_write(dspi);
 
        return IRQ_HANDLED;
 }
@@ -460,13 +468,14 @@ static int dspi_probe(struct platform_device *pdev)
 
        dspi = spi_master_get_devdata(master);
        dspi->pdev = pdev;
-       dspi->bitbang.master = master;
-       dspi->bitbang.chipselect = dspi_chipselect;
-       dspi->bitbang.setup_transfer = dspi_setup_transfer;
-       dspi->bitbang.txrx_bufs = dspi_txrx_transfer;
-       dspi->bitbang.master->setup = dspi_setup;
-       dspi->bitbang.master->dev.of_node = pdev->dev.of_node;
+       dspi->master = master;
+
+       master->transfer = NULL;
+       master->setup = dspi_setup;
+       master->transfer_one_message = dspi_transfer_one_message;
+       master->dev.of_node = pdev->dev.of_node;
 
+       master->cleanup = dspi_cleanup;
        master->mode_bits = SPI_CPOL | SPI_CPHA;
        master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
                                        SPI_BPW_MASK(16);
@@ -525,7 +534,7 @@ static int dspi_probe(struct platform_device *pdev)
        init_waitqueue_head(&dspi->waitq);
        platform_set_drvdata(pdev, master);
 
-       ret = spi_bitbang_start(&dspi->bitbang);
+       ret = spi_register_master(master);
        if (ret != 0) {
                dev_err(&pdev->dev, "Problem registering DSPI master\n");
                goto out_clk_put;
@@ -547,9 +556,9 @@ static int dspi_remove(struct platform_device *pdev)
        struct fsl_dspi *dspi = spi_master_get_devdata(master);
 
        /* Disconnect from the SPI framework */
-       spi_bitbang_stop(&dspi->bitbang);
        clk_disable_unprepare(dspi->clk);
-       spi_master_put(dspi->bitbang.master);
+       spi_unregister_master(dspi->master);
+       spi_master_put(dspi->master);
 
        return 0;
 }
index 446b737e153261f473008cb1ac774cc68e94dc1f..cb35d2f0d0e63cf0d5dcf2d7aaddb22c54590c4a 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <linux/of_platform.h>
 #include <linux/spi/spi.h>
 #ifdef CONFIG_FSL_SOC
@@ -35,7 +36,8 @@ void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \
        type *rx = mpc8xxx_spi->rx;                                       \
        *rx++ = (type)(data >> mpc8xxx_spi->rx_shift);                    \
        mpc8xxx_spi->rx = rx;                                             \
-}
+}                                                                        \
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_rx_buf_##type);
 
 #define MPC8XXX_SPI_TX_BUF(type)                               \
 u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \
@@ -47,7 +49,8 @@ u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi)        \
        data = *tx++ << mpc8xxx_spi->tx_shift;                  \
        mpc8xxx_spi->tx = tx;                                   \
        return data;                                            \
-}
+}                                                              \
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_tx_buf_##type);
 
 MPC8XXX_SPI_RX_BUF(u8)
 MPC8XXX_SPI_RX_BUF(u16)
@@ -60,6 +63,7 @@ struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata)
 {
        return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata);
 }
+EXPORT_SYMBOL_GPL(to_of_pinfo);
 
 const char *mpc8xxx_spi_strmode(unsigned int flags)
 {
@@ -75,6 +79,7 @@ const char *mpc8xxx_spi_strmode(unsigned int flags)
        }
        return "CPU";
 }
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_strmode);
 
 void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
                        unsigned int irq)
@@ -102,13 +107,12 @@ void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
        mpc8xxx_spi->rx_shift = 0;
        mpc8xxx_spi->tx_shift = 0;
 
-       init_completion(&mpc8xxx_spi->done);
-
        master->bus_num = pdata->bus_num;
        master->num_chipselect = pdata->max_chipselect;
 
        init_completion(&mpc8xxx_spi->done);
 }
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_probe);
 
 int mpc8xxx_spi_remove(struct device *dev)
 {
@@ -127,6 +131,7 @@ int mpc8xxx_spi_remove(struct device *dev)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_remove);
 
 int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
 {
@@ -173,3 +178,6 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(of_mpc8xxx_spi_probe);
+
+MODULE_LICENSE("GPL");
index b4ed04e8862fd82017d42374d27c6aa4261d763d..1326a392adcad162dd303617a03bf675d40a7a68 100644 (file)
@@ -28,7 +28,7 @@ struct mpc8xxx_spi {
        /* rx & tx bufs from the spi_transfer */
        const void *tx;
        void *rx;
-#ifdef CONFIG_SPI_FSL_ESPI
+#if IS_ENABLED(CONFIG_SPI_FSL_ESPI)
        int len;
 #endif
 
@@ -68,7 +68,7 @@ struct mpc8xxx_spi {
 
        unsigned int flags;
 
-#ifdef CONFIG_SPI_FSL_SPI
+#if IS_ENABLED(CONFIG_SPI_FSL_SPI)
        int type;
        int native_chipselects;
        u8 max_bits_per_word;
index aee4e7589568c7cb2e3f69c02e9f44db4ba3ffb5..1c34c9314c8a1fab3c5bae9948775467f66017a9 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -92,7 +88,7 @@ struct spi_gpio {
 
 /*----------------------------------------------------------------------*/
 
-static inline struct spi_gpio * __pure
+static inline struct spi_gpio *__pure
 spi_to_spi_gpio(const struct spi_device *spi)
 {
        const struct spi_bitbang        *bang;
@@ -103,7 +99,7 @@ spi_to_spi_gpio(const struct spi_device *spi)
        return spi_gpio;
 }
 
-static inline struct spi_gpio_platform_data * __pure
+static inline struct spi_gpio_platform_data *__pure
 spi_to_pdata(const struct spi_device *spi)
 {
        return &spi_to_spi_gpio(spi)->pdata;
index aad6683db81b9a0154d12d3fc71fd5d45fb8bfc3..c01567d53581c0dcdfc6fcb61e7f6e821d0e0b39 100644 (file)
@@ -160,16 +160,16 @@ static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf,
        unsigned int count = 0;
        u32 status;
 
-       while (count < max) {
+       while (count < max / 4) {
                spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
                status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
                if (status & SPFI_INTERRUPT_SDFUL)
                        break;
-               spfi_writel(spfi, buf[count / 4], SPFI_TX_32BIT_VALID_DATA);
-               count += 4;
+               spfi_writel(spfi, buf[count], SPFI_TX_32BIT_VALID_DATA);
+               count++;
        }
 
-       return count;
+       return count * 4;
 }
 
 static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf,
@@ -196,17 +196,17 @@ static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf,
        unsigned int count = 0;
        u32 status;
 
-       while (count < max) {
+       while (count < max / 4) {
                spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT,
                            SPFI_INTERRUPT_CLEAR);
                status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
                if (!(status & SPFI_INTERRUPT_GDEX32BIT))
                        break;
-               buf[count / 4] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
-               count += 4;
+               buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
+               count++;
        }
 
-       return count;
+       return count * 4;
 }
 
 static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf,
@@ -251,17 +251,15 @@ static int img_spfi_start_pio(struct spi_master *master,
               time_before(jiffies, timeout)) {
                unsigned int tx_count, rx_count;
 
-               switch (xfer->bits_per_word) {
-               case 32:
+               if (tx_bytes >= 4)
                        tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes);
-                       rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes);
-                       break;
-               case 8:
-               default:
+               else
                        tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes);
+
+               if (rx_bytes >= 4)
+                       rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes);
+               else
                        rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes);
-                       break;
-               }
 
                tx_buf += tx_count;
                rx_buf += rx_count;
@@ -331,14 +329,11 @@ static int img_spfi_start_dma(struct spi_master *master,
 
        if (xfer->rx_buf) {
                rxconf.direction = DMA_DEV_TO_MEM;
-               switch (xfer->bits_per_word) {
-               case 32:
+               if (xfer->len % 4 == 0) {
                        rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA;
                        rxconf.src_addr_width = 4;
                        rxconf.src_maxburst = 4;
-                       break;
-               case 8:
-               default:
+               } else {
                        rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
                        rxconf.src_addr_width = 1;
                        rxconf.src_maxburst = 4;
@@ -358,18 +353,14 @@ static int img_spfi_start_dma(struct spi_master *master,
 
        if (xfer->tx_buf) {
                txconf.direction = DMA_MEM_TO_DEV;
-               switch (xfer->bits_per_word) {
-               case 32:
+               if (xfer->len % 4 == 0) {
                        txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA;
                        txconf.dst_addr_width = 4;
                        txconf.dst_maxburst = 4;
-                       break;
-               case 8:
-               default:
+               } else {
                        txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
                        txconf.dst_addr_width = 1;
                        txconf.dst_maxburst = 4;
-                       break;
                }
                dmaengine_slave_config(spfi->tx_ch, &txconf);
 
@@ -508,9 +499,7 @@ static void img_spfi_set_cs(struct spi_device *spi, bool enable)
 static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi,
                             struct spi_transfer *xfer)
 {
-       if (xfer->bits_per_word == 8 && xfer->len > SPFI_8BIT_FIFO_SIZE)
-               return true;
-       if (xfer->bits_per_word == 32 && xfer->len > SPFI_32BIT_FIFO_SIZE)
+       if (xfer->len > SPFI_32BIT_FIFO_SIZE)
                return true;
        return false;
 }
index 961b97d43b430914ed317b9623d06f940e39d797..6fea4af51c413f27640c626ad61a2bcdca0b6bac 100644 (file)
@@ -89,7 +89,6 @@ struct spi_imx_data {
 
        struct completion xfer_done;
        void __iomem *base;
-       int irq;
        struct clk *clk_per;
        struct clk *clk_ipg;
        unsigned long spi_clk;
@@ -823,6 +822,10 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
        struct dma_slave_config slave_config = {};
        int ret;
 
+       /* use pio mode for i.mx6dl chip TKT238285 */
+       if (of_machine_is_compatible("fsl,imx6dl"))
+               return 0;
+
        /* Prepare for TX DMA: */
        master->dma_tx = dma_request_slave_channel(dev, "tx");
        if (!master->dma_tx) {
@@ -892,6 +895,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
 {
        struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
        int ret;
+       unsigned long timeout;
        u32 dma;
        int left;
        struct spi_master *master = spi_imx->bitbang.master;
@@ -939,17 +943,17 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
        dma_async_issue_pending(master->dma_tx);
        dma_async_issue_pending(master->dma_rx);
        /* Wait SDMA to finish the data transfer.*/
-       ret = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
+       timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
                                                IMX_DMA_TIMEOUT);
-       if (!ret) {
+       if (!timeout) {
                pr_warn("%s %s: I/O Error in DMA TX\n",
                        dev_driver_string(&master->dev),
                        dev_name(&master->dev));
                dmaengine_terminate_all(master->dma_tx);
        } else {
-               ret = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
-                               IMX_DMA_TIMEOUT);
-               if (!ret) {
+               timeout = wait_for_completion_timeout(
+                               &spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT);
+               if (!timeout) {
                        pr_warn("%s %s: I/O Error in DMA RX\n",
                                dev_driver_string(&master->dev),
                                dev_name(&master->dev));
@@ -964,9 +968,9 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
        spi_imx->dma_finished = 1;
        spi_imx->devtype_data->trigger(spi_imx);
 
-       if (!ret)
+       if (!timeout)
                ret = -ETIMEDOUT;
-       else if (ret > 0)
+       else
                ret = transfer->len;
 
        return ret;
@@ -1076,7 +1080,7 @@ static int spi_imx_probe(struct platform_device *pdev)
        struct spi_master *master;
        struct spi_imx_data *spi_imx;
        struct resource *res;
-       int i, ret, num_cs;
+       int i, ret, num_cs, irq;
 
        if (!np && !mxc_platform_info) {
                dev_err(&pdev->dev, "can't get the platform data\n");
@@ -1143,16 +1147,16 @@ static int spi_imx_probe(struct platform_device *pdev)
                goto out_master_put;
        }
 
-       spi_imx->irq = platform_get_irq(pdev, 0);
-       if (spi_imx->irq < 0) {
-               ret = spi_imx->irq;
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               ret = irq;
                goto out_master_put;
        }
 
-       ret = devm_request_irq(&pdev->dev, spi_imx->irq, spi_imx_isr, 0,
+       ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
                               dev_name(&pdev->dev), spi_imx);
        if (ret) {
-               dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret);
+               dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
                goto out_master_put;
        }
 
index 41c5765be7469de4283ed1c2d9d47802abe17e48..ba72347cb99d9242742792f2c3692ff150b9a921 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/init.h>
index 1bbac0378bf7bcdc00d336428cfec4440f0701ce..5468fc70dbf8d06432ce1cc1e8b2ef20c96a5a72 100644 (file)
@@ -85,7 +85,7 @@ struct meson_spifc {
        struct device *dev;
 };
 
-static struct regmap_config spifc_regmap_config = {
+static const struct regmap_config spifc_regmap_config = {
        .reg_bits = 32,
        .val_bits = 32,
        .reg_stride = 4,
index 4045a1e580e1c20f7a85b5cc27c98d4fc5dda51a..5b0e9a3e83f6944d90f23de597a3a9ff504e3a7b 100644 (file)
@@ -282,9 +282,8 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi,
        dmaengine_submit(desc);
        dma_async_issue_pending(ssp->dmach);
 
-       ret = wait_for_completion_timeout(&spi->c,
-                               msecs_to_jiffies(SSP_TIMEOUT));
-       if (!ret) {
+       if (!wait_for_completion_timeout(&spi->c,
+                                        msecs_to_jiffies(SSP_TIMEOUT))) {
                dev_err(ssp->dev, "DMA transfer timeout\n");
                ret = -ETIMEDOUT;
                dmaengine_terminate_all(ssp->dmach);
index 79399ae9c84c485718d6390c234278b8b5a4f235..d890d309dff9b553364654ae343f322c65d1c51a 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
  */
 #include <linux/kernel.h>
 #include <linux/init.h>
index daf1ada5cd11a89e73be62ff059db220a76eac39..3c0844457c075d0c5f3ed98fce96dd7951888f05 100644 (file)
  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 #include <linux/kernel.h>
 #include <linux/init.h>
index 3bc3cbabbbc0f350d8db5a370e6fb45d18a5468f..4df8942058deed3928e61a4b2bc4061c56eec7d7 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
  */
 
 #include <linux/kernel.h>
index 3dec9e0b99b83c242a68456aa88c2c10f485bb71..861664776672cfab25200b22dba1227d28af08d6 100644 (file)
 /* Runtime PM autosuspend timeout: PM is fairly light on this driver */
 #define SPI_AUTOSUSPEND_TIMEOUT                200
 
-#define ORION_NUM_CHIPSELECTS          1 /* only one slave is supported*/
+/* Some SoCs using this driver support up to 8 chip selects.
+ * It is up to the implementer to only use the chip selects
+ * that are available.
+ */
+#define ORION_NUM_CHIPSELECTS          8
+
 #define ORION_SPI_WAIT_RDY_MAX_LOOP    2000 /* in usec */
 
 #define ORION_SPI_IF_CTRL_REG          0x00
 #define ARMADA_SPI_CLK_PRESCALE_MASK   0xDF
 #define ORION_SPI_MODE_MASK            (ORION_SPI_MODE_CPOL | \
                                         ORION_SPI_MODE_CPHA)
+#define ORION_SPI_CS_MASK      0x1C
+#define ORION_SPI_CS_SHIFT     2
+#define ORION_SPI_CS(cs)       ((cs << ORION_SPI_CS_SHIFT) & \
+                                       ORION_SPI_CS_MASK)
 
 enum orion_spi_type {
        ORION_SPI,
@@ -215,9 +224,18 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
        return 0;
 }
 
-static void orion_spi_set_cs(struct orion_spi *orion_spi, int enable)
+static void orion_spi_set_cs(struct spi_device *spi, bool enable)
 {
-       if (enable)
+       struct orion_spi *orion_spi;
+
+       orion_spi = spi_master_get_devdata(spi->master);
+
+       orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, ORION_SPI_CS_MASK);
+       orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG,
+                               ORION_SPI_CS(spi->chip_select));
+
+       /* Chip select logic is inverted from spi_set_cs */
+       if (!enable)
                orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
        else
                orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
@@ -332,64 +350,31 @@ out:
        return xfer->len - count;
 }
 
-static int orion_spi_transfer_one_message(struct spi_master *master,
-                                          struct spi_message *m)
+static int orion_spi_transfer_one(struct spi_master *master,
+                                       struct spi_device *spi,
+                                       struct spi_transfer *t)
 {
-       struct orion_spi *orion_spi = spi_master_get_devdata(master);
-       struct spi_device *spi = m->spi;
-       struct spi_transfer *t = NULL;
-       int par_override = 0;
        int status = 0;
-       int cs_active = 0;
-
-       /* Load defaults */
-       status = orion_spi_setup_transfer(spi, NULL);
 
+       status = orion_spi_setup_transfer(spi, t);
        if (status < 0)
-               goto msg_done;
-
-       list_for_each_entry(t, &m->transfers, transfer_list) {
-               if (par_override || t->speed_hz || t->bits_per_word) {
-                       par_override = 1;
-                       status = orion_spi_setup_transfer(spi, t);
-                       if (status < 0)
-                               break;
-                       if (!t->speed_hz && !t->bits_per_word)
-                               par_override = 0;
-               }
-
-               if (!cs_active) {
-                       orion_spi_set_cs(orion_spi, 1);
-                       cs_active = 1;
-               }
+               return status;
 
-               if (t->len)
-                       m->actual_length += orion_spi_write_read(spi, t);
+       if (t->len)
+               orion_spi_write_read(spi, t);
 
-               if (t->delay_usecs)
-                       udelay(t->delay_usecs);
-
-               if (t->cs_change) {
-                       orion_spi_set_cs(orion_spi, 0);
-                       cs_active = 0;
-               }
-       }
-
-msg_done:
-       if (cs_active)
-               orion_spi_set_cs(orion_spi, 0);
-
-       m->status = status;
-       spi_finalize_current_message(master);
+       return status;
+}
 
-       return 0;
+static int orion_spi_setup(struct spi_device *spi)
+{
+       return orion_spi_setup_transfer(spi, NULL);
 }
 
 static int orion_spi_reset(struct orion_spi *orion_spi)
 {
        /* Verify that the CS is deasserted */
-       orion_spi_set_cs(orion_spi, 0);
-
+       orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
        return 0;
 }
 
@@ -442,9 +427,10 @@ static int orion_spi_probe(struct platform_device *pdev)
 
        /* we support only mode 0, and no options */
        master->mode_bits = SPI_CPHA | SPI_CPOL;
-
-       master->transfer_one_message = orion_spi_transfer_one_message;
+       master->set_cs = orion_spi_set_cs;
+       master->transfer_one = orion_spi_transfer_one;
        master->num_chipselect = ORION_NUM_CHIPSELECTS;
+       master->setup = orion_spi_setup;
        master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
        master->auto_runtime_pm = true;
 
index 62a9297e96acdb74576378b1d2e9e2184cf2127e..66a173939be81e5f4b944b287991c7c99957ae49 100644 (file)
@@ -111,23 +111,24 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
         * by using ->dma_running.
         */
        if (atomic_dec_and_test(&drv_data->dma_running)) {
-               void __iomem *reg = drv_data->ioaddr;
-
                /*
                 * If the other CPU is still handling the ROR interrupt we
                 * might not know about the error yet. So we re-check the
                 * ROR bit here before we clear the status register.
                 */
                if (!error) {
-                       u32 status = read_SSSR(reg) & drv_data->mask_sr;
+                       u32 status = pxa2xx_spi_read(drv_data, SSSR)
+                                    & drv_data->mask_sr;
                        error = status & SSSR_ROR;
                }
 
                /* Clear status & disable interrupts */
-               write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+               pxa2xx_spi_write(drv_data, SSCR1,
+                                pxa2xx_spi_read(drv_data, SSCR1)
+                                & ~drv_data->dma_cr1);
                write_SSSR_CS(drv_data, drv_data->clear_sr);
                if (!pxa25x_ssp_comp(drv_data))
-                       write_SSTO(0, reg);
+                       pxa2xx_spi_write(drv_data, SSTO, 0);
 
                if (!error) {
                        pxa2xx_spi_unmap_dma_buffers(drv_data);
@@ -139,7 +140,9 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
                        msg->state = pxa2xx_spi_next_transfer(drv_data);
                } else {
                        /* In case we got an error we disable the SSP now */
-                       write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+                       pxa2xx_spi_write(drv_data, SSCR0,
+                                        pxa2xx_spi_read(drv_data, SSCR0)
+                                        & ~SSCR0_SSE);
 
                        msg->state = ERROR_STATE;
                }
@@ -247,7 +250,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
 {
        u32 status;
 
-       status = read_SSSR(drv_data->ioaddr) & drv_data->mask_sr;
+       status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
        if (status & SSSR_ROR) {
                dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
 
index e8a26f25d5c0a1464ab259c545e2b1d2031a9741..2e0796a0003f470a56508180be7c75a7a505983f 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/delay.h>
@@ -25,6 +21,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/pxa2xx_spi.h>
 
+#include <mach/dma.h>
 #include "spi-pxa2xx.h"
 
 #define DMA_INT_MASK           (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
@@ -118,11 +115,11 @@ static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
        drv_data->dma_mapped = 0;
 }
 
-static int wait_ssp_rx_stall(void const __iomem *ioaddr)
+static int wait_ssp_rx_stall(struct driver_data *drv_data)
 {
        unsigned long limit = loops_per_jiffy << 1;
 
-       while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
+       while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit)
                cpu_relax();
 
        return limit;
@@ -141,17 +138,18 @@ static int wait_dma_channel_stop(int channel)
 static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
                                      const char *msg)
 {
-       void __iomem *reg = drv_data->ioaddr;
-
        /* Stop and reset */
        DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
        DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
        write_SSSR_CS(drv_data, drv_data->clear_sr);
-       write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+       pxa2xx_spi_write(drv_data, SSCR1,
+                        pxa2xx_spi_read(drv_data, SSCR1)
+                        & ~drv_data->dma_cr1);
        if (!pxa25x_ssp_comp(drv_data))
-               write_SSTO(0, reg);
+               pxa2xx_spi_write(drv_data, SSTO, 0);
        pxa2xx_spi_flush(drv_data);
-       write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+       pxa2xx_spi_write(drv_data, SSCR0,
+                        pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
 
        pxa2xx_spi_unmap_dma_buffers(drv_data);
 
@@ -163,11 +161,12 @@ static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
 
 static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
        struct spi_message *msg = drv_data->cur_msg;
 
        /* Clear and disable interrupts on SSP and DMA channels*/
-       write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+       pxa2xx_spi_write(drv_data, SSCR1,
+                        pxa2xx_spi_read(drv_data, SSCR1)
+                        & ~drv_data->dma_cr1);
        write_SSSR_CS(drv_data, drv_data->clear_sr);
        DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
        DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
@@ -228,7 +227,7 @@ void pxa2xx_spi_dma_handler(int channel, void *data)
                && (drv_data->ssp_type == PXA25x_SSP)) {
 
                /* Wait for rx to stall */
-               if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
+               if (wait_ssp_rx_stall(drv_data) == 0)
                        dev_err(&drv_data->pdev->dev,
                                "dma_handler: ssp rx stall failed\n");
 
@@ -240,9 +239,8 @@ void pxa2xx_spi_dma_handler(int channel, void *data)
 irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
 {
        u32 irq_status;
-       void __iomem *reg = drv_data->ioaddr;
 
-       irq_status = read_SSSR(reg) & drv_data->mask_sr;
+       irq_status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
        if (irq_status & SSSR_ROR) {
                pxa2xx_spi_dma_error_stop(drv_data,
                                          "dma_transfer: fifo overrun");
@@ -252,7 +250,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
        /* Check for false positive timeout */
        if ((irq_status & SSSR_TINT)
                && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
-               write_SSSR(SSSR_TINT, reg);
+               pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
                return IRQ_HANDLED;
        }
 
@@ -261,7 +259,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
                /* Clear and disable timeout interrupt, do the rest in
                 * dma_transfer_complete */
                if (!pxa25x_ssp_comp(drv_data))
-                       write_SSTO(0, reg);
+                       pxa2xx_spi_write(drv_data, SSTO, 0);
 
                /* finish this transfer, start the next */
                pxa2xx_spi_dma_transfer_complete(drv_data);
index 23822e7df6c1c6e1e2caa18ea19cfcb069c3796d..6f72ad01e0410257a42bc8739f8962abfbaf3b5e 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/init.h>
@@ -45,8 +41,6 @@ MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:pxa2xx-spi");
 
-#define MAX_BUSES 3
-
 #define TIMOUT_DFLT            1000
 
 /*
@@ -162,7 +156,6 @@ pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data)
 
 static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
        u32 mask;
 
        switch (drv_data->ssp_type) {
@@ -174,7 +167,7 @@ static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
                break;
        }
 
-       return (read_SSSR(reg) & mask) == mask;
+       return (pxa2xx_spi_read(drv_data, SSSR) & mask) == mask;
 }
 
 static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
@@ -253,9 +246,6 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
        unsigned offset = 0x400;
        u32 value, orig;
 
-       if (!is_lpss_ssp(drv_data))
-               return;
-
        /*
         * Perform auto-detection of the LPSS SSP private registers. They
         * can be either at 1k or 2k offset from the base address.
@@ -304,9 +294,6 @@ static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
 {
        u32 value;
 
-       if (!is_lpss_ssp(drv_data))
-               return;
-
        value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL);
        if (enable)
                value &= ~SPI_CS_CONTROL_CS_HIGH;
@@ -320,7 +307,7 @@ static void cs_assert(struct driver_data *drv_data)
        struct chip_data *chip = drv_data->cur_chip;
 
        if (drv_data->ssp_type == CE4100_SSP) {
-               write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr);
+               pxa2xx_spi_write(drv_data, SSSR, drv_data->cur_chip->frm);
                return;
        }
 
@@ -334,7 +321,8 @@ static void cs_assert(struct driver_data *drv_data)
                return;
        }
 
-       lpss_ssp_cs_control(drv_data, true);
+       if (is_lpss_ssp(drv_data))
+               lpss_ssp_cs_control(drv_data, true);
 }
 
 static void cs_deassert(struct driver_data *drv_data)
@@ -354,20 +342,18 @@ static void cs_deassert(struct driver_data *drv_data)
                return;
        }
 
-       lpss_ssp_cs_control(drv_data, false);
+       if (is_lpss_ssp(drv_data))
+               lpss_ssp_cs_control(drv_data, false);
 }
 
 int pxa2xx_spi_flush(struct driver_data *drv_data)
 {
        unsigned long limit = loops_per_jiffy << 1;
 
-       void __iomem *reg = drv_data->ioaddr;
-
        do {
-               while (read_SSSR(reg) & SSSR_RNE) {
-                       read_SSDR(reg);
-               }
-       } while ((read_SSSR(reg) & SSSR_BSY) && --limit);
+               while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+                       pxa2xx_spi_read(drv_data, SSDR);
+       } while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit);
        write_SSSR_CS(drv_data, SSSR_ROR);
 
        return limit;
@@ -375,14 +361,13 @@ int pxa2xx_spi_flush(struct driver_data *drv_data)
 
 static int null_writer(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
        u8 n_bytes = drv_data->n_bytes;
 
        if (pxa2xx_spi_txfifo_full(drv_data)
                || (drv_data->tx == drv_data->tx_end))
                return 0;
 
-       write_SSDR(0, reg);
+       pxa2xx_spi_write(drv_data, SSDR, 0);
        drv_data->tx += n_bytes;
 
        return 1;
@@ -390,12 +375,11 @@ static int null_writer(struct driver_data *drv_data)
 
 static int null_reader(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
        u8 n_bytes = drv_data->n_bytes;
 
-       while ((read_SSSR(reg) & SSSR_RNE)
-               && (drv_data->rx < drv_data->rx_end)) {
-               read_SSDR(reg);
+       while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+              && (drv_data->rx < drv_data->rx_end)) {
+               pxa2xx_spi_read(drv_data, SSDR);
                drv_data->rx += n_bytes;
        }
 
@@ -404,13 +388,11 @@ static int null_reader(struct driver_data *drv_data)
 
 static int u8_writer(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
-
        if (pxa2xx_spi_txfifo_full(drv_data)
                || (drv_data->tx == drv_data->tx_end))
                return 0;
 
-       write_SSDR(*(u8 *)(drv_data->tx), reg);
+       pxa2xx_spi_write(drv_data, SSDR, *(u8 *)(drv_data->tx));
        ++drv_data->tx;
 
        return 1;
@@ -418,11 +400,9 @@ static int u8_writer(struct driver_data *drv_data)
 
 static int u8_reader(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
-
-       while ((read_SSSR(reg) & SSSR_RNE)
-               && (drv_data->rx < drv_data->rx_end)) {
-               *(u8 *)(drv_data->rx) = read_SSDR(reg);
+       while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+              && (drv_data->rx < drv_data->rx_end)) {
+               *(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
                ++drv_data->rx;
        }
 
@@ -431,13 +411,11 @@ static int u8_reader(struct driver_data *drv_data)
 
 static int u16_writer(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
-
        if (pxa2xx_spi_txfifo_full(drv_data)
                || (drv_data->tx == drv_data->tx_end))
                return 0;
 
-       write_SSDR(*(u16 *)(drv_data->tx), reg);
+       pxa2xx_spi_write(drv_data, SSDR, *(u16 *)(drv_data->tx));
        drv_data->tx += 2;
 
        return 1;
@@ -445,11 +423,9 @@ static int u16_writer(struct driver_data *drv_data)
 
 static int u16_reader(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
-
-       while ((read_SSSR(reg) & SSSR_RNE)
-               && (drv_data->rx < drv_data->rx_end)) {
-               *(u16 *)(drv_data->rx) = read_SSDR(reg);
+       while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+              && (drv_data->rx < drv_data->rx_end)) {
+               *(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
                drv_data->rx += 2;
        }
 
@@ -458,13 +434,11 @@ static int u16_reader(struct driver_data *drv_data)
 
 static int u32_writer(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
-
        if (pxa2xx_spi_txfifo_full(drv_data)
                || (drv_data->tx == drv_data->tx_end))
                return 0;
 
-       write_SSDR(*(u32 *)(drv_data->tx), reg);
+       pxa2xx_spi_write(drv_data, SSDR, *(u32 *)(drv_data->tx));
        drv_data->tx += 4;
 
        return 1;
@@ -472,11 +446,9 @@ static int u32_writer(struct driver_data *drv_data)
 
 static int u32_reader(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
-
-       while ((read_SSSR(reg) & SSSR_RNE)
-               && (drv_data->rx < drv_data->rx_end)) {
-               *(u32 *)(drv_data->rx) = read_SSDR(reg);
+       while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+              && (drv_data->rx < drv_data->rx_end)) {
+               *(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
                drv_data->rx += 4;
        }
 
@@ -552,27 +524,25 @@ static void giveback(struct driver_data *drv_data)
 
 static void reset_sccr1(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
        struct chip_data *chip = drv_data->cur_chip;
        u32 sccr1_reg;
 
-       sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1;
+       sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
        sccr1_reg &= ~SSCR1_RFT;
        sccr1_reg |= chip->threshold;
-       write_SSCR1(sccr1_reg, reg);
+       pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
 }
 
 static void int_error_stop(struct driver_data *drv_data, const char* msg)
 {
-       void __iomem *reg = drv_data->ioaddr;
-
        /* Stop and reset SSP */
        write_SSSR_CS(drv_data, drv_data->clear_sr);
        reset_sccr1(drv_data);
        if (!pxa25x_ssp_comp(drv_data))
-               write_SSTO(0, reg);
+               pxa2xx_spi_write(drv_data, SSTO, 0);
        pxa2xx_spi_flush(drv_data);
-       write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+       pxa2xx_spi_write(drv_data, SSCR0,
+                        pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
 
        dev_err(&drv_data->pdev->dev, "%s\n", msg);
 
@@ -582,13 +552,11 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
 
 static void int_transfer_complete(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
-
        /* Stop SSP */
        write_SSSR_CS(drv_data, drv_data->clear_sr);
        reset_sccr1(drv_data);
        if (!pxa25x_ssp_comp(drv_data))
-               write_SSTO(0, reg);
+               pxa2xx_spi_write(drv_data, SSTO, 0);
 
        /* Update total byte transferred return count actual bytes read */
        drv_data->cur_msg->actual_length += drv_data->len -
@@ -607,12 +575,10 @@ static void int_transfer_complete(struct driver_data *drv_data)
 
 static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
+       u32 irq_mask = (pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE) ?
+                      drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
 
-       u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ?
-                       drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
-
-       u32 irq_status = read_SSSR(reg) & irq_mask;
+       u32 irq_status = pxa2xx_spi_read(drv_data, SSSR) & irq_mask;
 
        if (irq_status & SSSR_ROR) {
                int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
@@ -620,7 +586,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
        }
 
        if (irq_status & SSSR_TINT) {
-               write_SSSR(SSSR_TINT, reg);
+               pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
                if (drv_data->read(drv_data)) {
                        int_transfer_complete(drv_data);
                        return IRQ_HANDLED;
@@ -644,7 +610,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
                u32 bytes_left;
                u32 sccr1_reg;
 
-               sccr1_reg = read_SSCR1(reg);
+               sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
                sccr1_reg &= ~SSCR1_TIE;
 
                /*
@@ -670,7 +636,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
 
                        pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre);
                }
-               write_SSCR1(sccr1_reg, reg);
+               pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
        }
 
        /* We did something */
@@ -680,7 +646,6 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
 static irqreturn_t ssp_int(int irq, void *dev_id)
 {
        struct driver_data *drv_data = dev_id;
-       void __iomem *reg = drv_data->ioaddr;
        u32 sccr1_reg;
        u32 mask = drv_data->mask_sr;
        u32 status;
@@ -700,11 +665,11 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
         * are all set to one. That means that the device is already
         * powered off.
         */
-       status = read_SSSR(reg);
+       status = pxa2xx_spi_read(drv_data, SSSR);
        if (status == ~0)
                return IRQ_NONE;
 
-       sccr1_reg = read_SSCR1(reg);
+       sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
 
        /* Ignore possible writes if we don't need to write */
        if (!(sccr1_reg & SSCR1_TIE))
@@ -715,10 +680,14 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
 
        if (!drv_data->cur_msg) {
 
-               write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
-               write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
+               pxa2xx_spi_write(drv_data, SSCR0,
+                                pxa2xx_spi_read(drv_data, SSCR0)
+                                & ~SSCR0_SSE);
+               pxa2xx_spi_write(drv_data, SSCR1,
+                                pxa2xx_spi_read(drv_data, SSCR1)
+                                & ~drv_data->int_cr1);
                if (!pxa25x_ssp_comp(drv_data))
-                       write_SSTO(0, reg);
+                       pxa2xx_spi_write(drv_data, SSTO, 0);
                write_SSSR_CS(drv_data, drv_data->clear_sr);
 
                dev_err(&drv_data->pdev->dev,
@@ -787,7 +756,6 @@ static void pump_transfers(unsigned long data)
        struct spi_transfer *transfer = NULL;
        struct spi_transfer *previous = NULL;
        struct chip_data *chip = NULL;
-       void __iomem *reg = drv_data->ioaddr;
        u32 clk_div = 0;
        u8 bits = 0;
        u32 speed = 0;
@@ -931,7 +899,7 @@ static void pump_transfers(unsigned long data)
 
                /* Clear status and start DMA engine */
                cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
-               write_SSSR(drv_data->clear_sr, reg);
+               pxa2xx_spi_write(drv_data, SSSR, drv_data->clear_sr);
 
                pxa2xx_spi_dma_start(drv_data);
        } else {
@@ -944,39 +912,43 @@ static void pump_transfers(unsigned long data)
        }
 
        if (is_lpss_ssp(drv_data)) {
-               if ((read_SSIRF(reg) & 0xff) != chip->lpss_rx_threshold)
-                       write_SSIRF(chip->lpss_rx_threshold, reg);
-               if ((read_SSITF(reg) & 0xffff) != chip->lpss_tx_threshold)
-                       write_SSITF(chip->lpss_tx_threshold, reg);
+               if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
+                   != chip->lpss_rx_threshold)
+                       pxa2xx_spi_write(drv_data, SSIRF,
+                                        chip->lpss_rx_threshold);
+               if ((pxa2xx_spi_read(drv_data, SSITF) & 0xffff)
+                   != chip->lpss_tx_threshold)
+                       pxa2xx_spi_write(drv_data, SSITF,
+                                        chip->lpss_tx_threshold);
        }
 
        if (is_quark_x1000_ssp(drv_data) &&
-           (read_DDS_RATE(reg) != chip->dds_rate))
-               write_DDS_RATE(chip->dds_rate, reg);
+           (pxa2xx_spi_read(drv_data, DDS_RATE) != chip->dds_rate))
+               pxa2xx_spi_write(drv_data, DDS_RATE, chip->dds_rate);
 
        /* see if we need to reload the config registers */
-       if ((read_SSCR0(reg) != cr0) ||
-           (read_SSCR1(reg) & change_mask) != (cr1 & change_mask)) {
-
+       if ((pxa2xx_spi_read(drv_data, SSCR0) != cr0)
+           || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
+           != (cr1 & change_mask)) {
                /* stop the SSP, and update the other bits */
-               write_SSCR0(cr0 & ~SSCR0_SSE, reg);
+               pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
                if (!pxa25x_ssp_comp(drv_data))
-                       write_SSTO(chip->timeout, reg);
+                       pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
                /* first set CR1 without interrupt and service enables */
-               write_SSCR1(cr1 & change_mask, reg);
+               pxa2xx_spi_write(drv_data, SSCR1, cr1 & change_mask);
                /* restart the SSP */
-               write_SSCR0(cr0, reg);
+               pxa2xx_spi_write(drv_data, SSCR0, cr0);
 
        } else {
                if (!pxa25x_ssp_comp(drv_data))
-                       write_SSTO(chip->timeout, reg);
+                       pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
        }
 
        cs_assert(drv_data);
 
        /* after chip select, release the data by enabling service
         * requests and interrupts, without changing any mode bits */
-       write_SSCR1(cr1, reg);
+       pxa2xx_spi_write(drv_data, SSCR1, cr1);
 }
 
 static int pxa2xx_spi_transfer_one_message(struct spi_master *master,
@@ -1005,8 +977,8 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_master *master)
        struct driver_data *drv_data = spi_master_get_devdata(master);
 
        /* Disable the SSP now */
-       write_SSCR0(read_SSCR0(drv_data->ioaddr) & ~SSCR0_SSE,
-                   drv_data->ioaddr);
+       pxa2xx_spi_write(drv_data, SSCR0,
+                        pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
 
        return 0;
 }
@@ -1289,6 +1261,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
        struct driver_data *drv_data;
        struct ssp_device *ssp;
        int status;
+       u32 tmp;
 
        platform_info = dev_get_platdata(dev);
        if (!platform_info) {
@@ -1386,38 +1359,35 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
        drv_data->max_clk_rate = clk_get_rate(ssp->clk);
 
        /* Load default SSP configuration */
-       write_SSCR0(0, drv_data->ioaddr);
+       pxa2xx_spi_write(drv_data, SSCR0, 0);
        switch (drv_data->ssp_type) {
        case QUARK_X1000_SSP:
-               write_SSCR1(QUARK_X1000_SSCR1_RxTresh(
-                                       RX_THRESH_QUARK_X1000_DFLT) |
-                           QUARK_X1000_SSCR1_TxTresh(
-                                       TX_THRESH_QUARK_X1000_DFLT),
-                           drv_data->ioaddr);
+               tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT)
+                     | QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT);
+               pxa2xx_spi_write(drv_data, SSCR1, tmp);
 
                /* using the Motorola SPI protocol and use 8 bit frame */
-               write_SSCR0(QUARK_X1000_SSCR0_Motorola
-                           | QUARK_X1000_SSCR0_DataSize(8),
-                           drv_data->ioaddr);
+               pxa2xx_spi_write(drv_data, SSCR0,
+                                QUARK_X1000_SSCR0_Motorola
+                                | QUARK_X1000_SSCR0_DataSize(8));
                break;
        default:
-               write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) |
-                           SSCR1_TxTresh(TX_THRESH_DFLT),
-                           drv_data->ioaddr);
-               write_SSCR0(SSCR0_SCR(2)
-                           | SSCR0_Motorola
-                           | SSCR0_DataSize(8),
-                           drv_data->ioaddr);
+               tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
+                     SSCR1_TxTresh(TX_THRESH_DFLT);
+               pxa2xx_spi_write(drv_data, SSCR1, tmp);
+               tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
+               pxa2xx_spi_write(drv_data, SSCR0, tmp);
                break;
        }
 
        if (!pxa25x_ssp_comp(drv_data))
-               write_SSTO(0, drv_data->ioaddr);
+               pxa2xx_spi_write(drv_data, SSTO, 0);
 
        if (!is_quark_x1000_ssp(drv_data))
-               write_SSPSP(0, drv_data->ioaddr);
+               pxa2xx_spi_write(drv_data, SSPSP, 0);
 
-       lpss_ssp_setup(drv_data);
+       if (is_lpss_ssp(drv_data))
+               lpss_ssp_setup(drv_data);
 
        tasklet_init(&drv_data->pump_transfers, pump_transfers,
                     (unsigned long)drv_data);
@@ -1460,7 +1430,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
        pm_runtime_get_sync(&pdev->dev);
 
        /* Disable the SSP at the peripheral and SOC level */
-       write_SSCR0(0, drv_data->ioaddr);
+       pxa2xx_spi_write(drv_data, SSCR0, 0);
        clk_disable_unprepare(ssp->clk);
 
        /* Release DMA */
@@ -1497,7 +1467,7 @@ static int pxa2xx_spi_suspend(struct device *dev)
        status = spi_master_suspend(drv_data->master);
        if (status != 0)
                return status;
-       write_SSCR0(0, drv_data->ioaddr);
+       pxa2xx_spi_write(drv_data, SSCR0, 0);
 
        if (!pm_runtime_suspended(dev))
                clk_disable_unprepare(ssp->clk);
@@ -1518,7 +1488,8 @@ static int pxa2xx_spi_resume(struct device *dev)
                clk_prepare_enable(ssp->clk);
 
        /* Restore LPSS private register bits */
-       lpss_ssp_setup(drv_data);
+       if (is_lpss_ssp(drv_data))
+               lpss_ssp_setup(drv_data);
 
        /* Start the queue running */
        status = spi_master_resume(drv_data->master);
index 6bec59c90cd4be52d4772b5f953c6fa00bb7c173..85a58c9068694fa7a8b80629a455f3b3125efe12 100644 (file)
@@ -115,23 +115,17 @@ struct chip_data {
        void (*cs_control)(u32 command);
 };
 
-#define DEFINE_SSP_REG(reg, off) \
-static inline u32 read_##reg(void const __iomem *p) \
-{ return __raw_readl(p + (off)); } \
-\
-static inline void write_##reg(u32 v, void __iomem *p) \
-{ __raw_writel(v, p + (off)); }
-
-DEFINE_SSP_REG(SSCR0, 0x00)
-DEFINE_SSP_REG(SSCR1, 0x04)
-DEFINE_SSP_REG(SSSR, 0x08)
-DEFINE_SSP_REG(SSITR, 0x0c)
-DEFINE_SSP_REG(SSDR, 0x10)
-DEFINE_SSP_REG(DDS_RATE, 0x28)  /* DDS Clock Rate */
-DEFINE_SSP_REG(SSTO, 0x28)
-DEFINE_SSP_REG(SSPSP, 0x2c)
-DEFINE_SSP_REG(SSITF, SSITF)
-DEFINE_SSP_REG(SSIRF, SSIRF)
+static inline u32 pxa2xx_spi_read(const struct driver_data *drv_data,
+                                 unsigned reg)
+{
+       return __raw_readl(drv_data->ioaddr + reg);
+}
+
+static  inline void pxa2xx_spi_write(const struct driver_data *drv_data,
+                                    unsigned reg, u32 val)
+{
+       __raw_writel(val, drv_data->ioaddr + reg);
+}
 
 #define START_STATE ((void *)0)
 #define RUNNING_STATE ((void *)1)
@@ -155,13 +149,11 @@ static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
 
 static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
 {
-       void __iomem *reg = drv_data->ioaddr;
-
        if (drv_data->ssp_type == CE4100_SSP ||
            drv_data->ssp_type == QUARK_X1000_SSP)
-               val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK;
+               val |= pxa2xx_spi_read(drv_data, SSSR) & SSSR_ALT_FRM_MASK;
 
-       write_SSSR(val, reg);
+       pxa2xx_spi_write(drv_data, SSSR, val);
 }
 
 extern int pxa2xx_spi_flush(struct driver_data *drv_data);
index e7fb5a0d2e8dc35900099cbc471f66b58b3fa088..ff9cdbdb6672371df54b6c59ce54579a46758602 100644 (file)
@@ -337,7 +337,7 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
 static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
 {
        struct spi_qup *controller = spi_master_get_devdata(spi->master);
-       u32 config, iomode, mode;
+       u32 config, iomode, mode, control;
        int ret, n_words, w_size;
 
        if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
@@ -392,6 +392,15 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
 
        writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
 
+       control = readl_relaxed(controller->base + SPI_IO_CONTROL);
+
+       if (spi->mode & SPI_CPOL)
+               control |= SPI_IO_C_CLK_IDLE_HIGH;
+       else
+               control &= ~SPI_IO_C_CLK_IDLE_HIGH;
+
+       writel_relaxed(control, controller->base + SPI_IO_CONTROL);
+
        config = readl_relaxed(controller->base + SPI_CONFIG);
 
        if (spi->mode & SPI_LOOP)
index daabbabd26b051744fcc07417c53f4d0a8b03a4f..1a777dc261d6f5bfa2e56dc437fb7d957d2b0891 100644 (file)
@@ -437,6 +437,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
        rs->state &= ~TXBUSY;
        spin_unlock_irqrestore(&rs->lock, flags);
 
+       rxdesc = NULL;
        if (rs->rx) {
                rxconf.direction = rs->dma_rx.direction;
                rxconf.src_addr = rs->dma_rx.addr;
@@ -453,6 +454,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
                rxdesc->callback_param = rs;
        }
 
+       txdesc = NULL;
        if (rs->tx) {
                txconf.direction = rs->dma_tx.direction;
                txconf.dst_addr = rs->dma_tx.addr;
@@ -470,7 +472,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
        }
 
        /* rx must be started before tx due to spi instinct */
-       if (rs->rx) {
+       if (rxdesc) {
                spin_lock_irqsave(&rs->lock, flags);
                rs->state |= RXBUSY;
                spin_unlock_irqrestore(&rs->lock, flags);
@@ -478,7 +480,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
                dma_async_issue_pending(rs->dma_rx.ch);
        }
 
-       if (rs->tx) {
+       if (txdesc) {
                spin_lock_irqsave(&rs->lock, flags);
                rs->state |= TXBUSY;
                spin_unlock_irqrestore(&rs->lock, flags);
index 2071f788c6fb3b8b376cade7b7319f6779a5f1be..46ce47076e63d143f10b298f386877bb960b8f56 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #include <linux/module.h>
index 37b19836f5cb45fe66eb484b398f84b8c775daa8..9231c34b5a5c73bc9c32175d346cdb4e9c232cd6 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/init.h>
index 237f2e7a717999087e464c41e329ad43e3ff3455..5a56acf8a43e697f6e569e1ee768bc3a9cf6120a 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/kernel.h>
index fc29233d0650904f648a2d08ee9a37fcb5e25a7c..20e800e70442b59753f90957a67eb845abab9dbe 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #include <linux/clk.h>
index 3ab7a21445fc253406eaf92abb87ce99974ce828..e57eec0b2f46a64f99ad104251baa505396b0555 100644 (file)
@@ -82,6 +82,8 @@ struct sh_msiof_spi_priv {
 #define MDR1_SYNCMD_LR  0x30000000 /*   L/R mode */
 #define MDR1_SYNCAC_SHIFT       25 /* Sync Polarity (1 = Active-low) */
 #define MDR1_BITLSB_SHIFT       24 /* MSB/LSB First (1 = LSB first) */
+#define MDR1_DTDL_SHIFT                 20 /* Data Pin Bit Delay for MSIOF_SYNC */
+#define MDR1_SYNCDL_SHIFT       16 /* Frame Sync Signal Timing Delay */
 #define MDR1_FLD_MASK   0x0000000c /* Frame Sync Signal Interval (0-3) */
 #define MDR1_FLD_SHIFT           2
 #define MDR1_XXSTP      0x00000001 /* Transmission/Reception Stop on FIFO */
@@ -241,42 +243,80 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
 
 static struct {
        unsigned short div;
-       unsigned short scr;
-} const sh_msiof_spi_clk_table[] = {
-       { 1,    SCR_BRPS( 1) | SCR_BRDV_DIV_1 },
-       { 2,    SCR_BRPS( 1) | SCR_BRDV_DIV_2 },
-       { 4,    SCR_BRPS( 1) | SCR_BRDV_DIV_4 },
-       { 8,    SCR_BRPS( 1) | SCR_BRDV_DIV_8 },
-       { 16,   SCR_BRPS( 1) | SCR_BRDV_DIV_16 },
-       { 32,   SCR_BRPS( 1) | SCR_BRDV_DIV_32 },
-       { 64,   SCR_BRPS(32) | SCR_BRDV_DIV_2 },
-       { 128,  SCR_BRPS(32) | SCR_BRDV_DIV_4 },
-       { 256,  SCR_BRPS(32) | SCR_BRDV_DIV_8 },
-       { 512,  SCR_BRPS(32) | SCR_BRDV_DIV_16 },
-       { 1024, SCR_BRPS(32) | SCR_BRDV_DIV_32 },
+       unsigned short brdv;
+} const sh_msiof_spi_div_table[] = {
+       { 1,    SCR_BRDV_DIV_1 },
+       { 2,    SCR_BRDV_DIV_2 },
+       { 4,    SCR_BRDV_DIV_4 },
+       { 8,    SCR_BRDV_DIV_8 },
+       { 16,   SCR_BRDV_DIV_16 },
+       { 32,   SCR_BRDV_DIV_32 },
 };
 
 static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
                                      unsigned long parent_rate, u32 spi_hz)
 {
        unsigned long div = 1024;
+       u32 brps, scr;
        size_t k;
 
        if (!WARN_ON(!spi_hz || !parent_rate))
                div = DIV_ROUND_UP(parent_rate, spi_hz);
 
-       /* TODO: make more fine grained */
-
-       for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_clk_table); k++) {
-               if (sh_msiof_spi_clk_table[k].div >= div)
+       for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) {
+               brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div);
+               if (brps <= 32) /* max of brdv is 32 */
                        break;
        }
 
-       k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_clk_table) - 1);
+       k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1);
 
-       sh_msiof_write(p, TSCR, sh_msiof_spi_clk_table[k].scr);
+       scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps);
+       sh_msiof_write(p, TSCR, scr);
        if (!(p->chipdata->master_flags & SPI_MASTER_MUST_TX))
-               sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr);
+               sh_msiof_write(p, RSCR, scr);
+}
+
+static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
+{
+       /*
+        * DTDL/SYNCDL bit      : p->info->dtdl or p->info->syncdl
+        * b'000                : 0
+        * b'001                : 100
+        * b'010                : 200
+        * b'011 (SYNCDL only)  : 300
+        * b'101                : 50
+        * b'110                : 150
+        */
+       if (dtdl_or_syncdl % 100)
+               return dtdl_or_syncdl / 100 + 5;
+       else
+               return dtdl_or_syncdl / 100;
+}
+
+static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
+{
+       u32 val;
+
+       if (!p->info)
+               return 0;
+
+       /* check if DTDL and SYNCDL is allowed value */
+       if (p->info->dtdl > 200 || p->info->syncdl > 300) {
+               dev_warn(&p->pdev->dev, "DTDL or SYNCDL is too large\n");
+               return 0;
+       }
+
+       /* check if the sum of DTDL and SYNCDL becomes an integer value  */
+       if ((p->info->dtdl + p->info->syncdl) % 100) {
+               dev_warn(&p->pdev->dev, "the sum of DTDL/SYNCDL is not good\n");
+               return 0;
+       }
+
+       val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT;
+       val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT;
+
+       return val;
 }
 
 static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
@@ -296,6 +336,7 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
        tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
        tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
        tmp |= lsb_first << MDR1_BITLSB_SHIFT;
+       tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
        sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
        if (p->chipdata->master_flags & SPI_MASTER_MUST_TX) {
                /* These bits are reserved if RX needs TX */
@@ -501,7 +542,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
                gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
 
 
-       pm_runtime_put_sync(&p->pdev->dev);
+       pm_runtime_put(&p->pdev->dev);
 
        return 0;
 }
@@ -595,8 +636,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
        }
 
        /* wait for tx fifo to be emptied / rx fifo to be filled */
-       ret = wait_for_completion_timeout(&p->done, HZ);
-       if (!ret) {
+       if (!wait_for_completion_timeout(&p->done, HZ)) {
                dev_err(&p->pdev->dev, "PIO timeout\n");
                ret = -ETIMEDOUT;
                goto stop_reset;
@@ -706,8 +746,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
        }
 
        /* wait for tx fifo to be emptied / rx fifo to be filled */
-       ret = wait_for_completion_timeout(&p->done, HZ);
-       if (!ret) {
+       if (!wait_for_completion_timeout(&p->done, HZ)) {
                dev_err(&p->pdev->dev, "DMA timeout\n");
                ret = -ETIMEDOUT;
                goto stop_reset;
@@ -957,6 +996,8 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
                                        &info->tx_fifo_override);
        of_property_read_u32(np, "renesas,rx-fifo-size",
                                        &info->rx_fifo_override);
+       of_property_read_u32(np, "renesas,dtdl", &info->dtdl);
+       of_property_read_u32(np, "renesas,syncdl", &info->syncdl);
 
        info->num_chipselect = num_cs;
 
index 1cfc906dd1741a4faaabb445fb2a2f0b586bb2a3..502501187c9e839ea1222bbffedf5e57acb17a6a 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #include <linux/module.h>
index d075191476f00218b271c6466c47a60614cb9bc6..f5715c9f68b0e0cb3dd2f7f568fed38798820796 100644 (file)
@@ -818,7 +818,6 @@ static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
 
 static const struct of_device_id spi_sirfsoc_of_match[] = {
        { .compatible = "sirf,prima2-spi", },
-       { .compatible = "sirf,marco-spi", },
        {}
 };
 MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
new file mode 100644 (file)
index 0000000..2faeaa7
--- /dev/null
@@ -0,0 +1,504 @@
+/*
+ *  Copyright (c) 2008-2014 STMicroelectronics Limited
+ *
+ *  Author: Angus Clark <Angus.Clark@st.com>
+ *          Patrice Chotard <patrice.chotard@st.com>
+ *          Lee Jones <lee.jones@linaro.org>
+ *
+ *  SPI master mode controller driver, used in STMicroelectronics devices.
+ *
+ *  May be copied or modified under the terms of the GNU General Public
+ *  License Version 2.0 only.  See linux/COPYING for more information.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+/* SSC registers */
+#define SSC_BRG                                0x000
+#define SSC_TBUF                       0x004
+#define SSC_RBUF                       0x008
+#define SSC_CTL                                0x00C
+#define SSC_IEN                                0x010
+#define SSC_I2C                                0x018
+
+/* SSC Control */
+#define SSC_CTL_DATA_WIDTH_9           0x8
+#define SSC_CTL_DATA_WIDTH_MSK         0xf
+#define SSC_CTL_BM                     0xf
+#define SSC_CTL_HB                     BIT(4)
+#define SSC_CTL_PH                     BIT(5)
+#define SSC_CTL_PO                     BIT(6)
+#define SSC_CTL_SR                     BIT(7)
+#define SSC_CTL_MS                     BIT(8)
+#define SSC_CTL_EN                     BIT(9)
+#define SSC_CTL_LPB                    BIT(10)
+#define SSC_CTL_EN_TX_FIFO             BIT(11)
+#define SSC_CTL_EN_RX_FIFO             BIT(12)
+#define SSC_CTL_EN_CLST_RX             BIT(13)
+
+/* SSC Interrupt Enable */
+#define SSC_IEN_TEEN                   BIT(2)
+
+#define FIFO_SIZE                      8
+
+struct spi_st {
+       /* SSC SPI Controller */
+       void __iomem            *base;
+       struct clk              *clk;
+       struct device           *dev;
+
+       /* SSC SPI current transaction */
+       const u8                *tx_ptr;
+       u8                      *rx_ptr;
+       u16                     bytes_per_word;
+       unsigned int            words_remaining;
+       unsigned int            baud;
+       struct completion       done;
+};
+
+static int spi_st_clk_enable(struct spi_st *spi_st)
+{
+       /*
+        * Current platforms use one of the core clocks for SPI and I2C.
+        * If we attempt to disable the clock, the system will hang.
+        *
+        * TODO: Remove this when platform supports power domains.
+        */
+       return 0;
+
+       return clk_prepare_enable(spi_st->clk);
+}
+
+static void spi_st_clk_disable(struct spi_st *spi_st)
+{
+       /*
+        * Current platforms use one of the core clocks for SPI and I2C.
+        * If we attempt to disable the clock, the system will hang.
+        *
+        * TODO: Remove this when platform supports power domains.
+        */
+       return;
+
+       clk_disable_unprepare(spi_st->clk);
+}
+
+/* Load the TX FIFO */
+static void ssc_write_tx_fifo(struct spi_st *spi_st)
+{
+       unsigned int count, i;
+       uint32_t word = 0;
+
+       if (spi_st->words_remaining > FIFO_SIZE)
+               count = FIFO_SIZE;
+       else
+               count = spi_st->words_remaining;
+
+       for (i = 0; i < count; i++) {
+               if (spi_st->tx_ptr) {
+                       if (spi_st->bytes_per_word == 1) {
+                               word = *spi_st->tx_ptr++;
+                       } else {
+                               word = *spi_st->tx_ptr++;
+                               word = *spi_st->tx_ptr++ | (word << 8);
+                       }
+               }
+               writel_relaxed(word, spi_st->base + SSC_TBUF);
+       }
+}
+
+/* Read the RX FIFO */
+static void ssc_read_rx_fifo(struct spi_st *spi_st)
+{
+       unsigned int count, i;
+       uint32_t word = 0;
+
+       if (spi_st->words_remaining > FIFO_SIZE)
+               count = FIFO_SIZE;
+       else
+               count = spi_st->words_remaining;
+
+       for (i = 0; i < count; i++) {
+               word = readl_relaxed(spi_st->base + SSC_RBUF);
+
+               if (spi_st->rx_ptr) {
+                       if (spi_st->bytes_per_word == 1) {
+                               *spi_st->rx_ptr++ = (uint8_t)word;
+                       } else {
+                               *spi_st->rx_ptr++ = (word >> 8);
+                               *spi_st->rx_ptr++ = word & 0xff;
+                       }
+               }
+       }
+       spi_st->words_remaining -= count;
+}
+
+static int spi_st_transfer_one(struct spi_master *master,
+                              struct spi_device *spi, struct spi_transfer *t)
+{
+       struct spi_st *spi_st = spi_master_get_devdata(master);
+       uint32_t ctl = 0;
+
+       /* Setup transfer */
+       spi_st->tx_ptr = t->tx_buf;
+       spi_st->rx_ptr = t->rx_buf;
+
+       if (spi->bits_per_word > 8) {
+               /*
+                * Anything greater than 8 bits-per-word requires 2
+                * bytes-per-word in the RX/TX buffers
+                */
+               spi_st->bytes_per_word = 2;
+               spi_st->words_remaining = t->len / 2;
+
+       } else if (spi->bits_per_word == 8 && !(t->len & 0x1)) {
+               /*
+                * If transfer is even-length, and 8 bits-per-word, then
+                * implement as half-length 16 bits-per-word transfer
+                */
+               spi_st->bytes_per_word = 2;
+               spi_st->words_remaining = t->len / 2;
+
+               /* Set SSC_CTL to 16 bits-per-word */
+               ctl = readl_relaxed(spi_st->base + SSC_CTL);
+               writel_relaxed((ctl | 0xf), spi_st->base + SSC_CTL);
+
+               readl_relaxed(spi_st->base + SSC_RBUF);
+
+       } else {
+               spi_st->bytes_per_word = 1;
+               spi_st->words_remaining = t->len;
+       }
+
+       reinit_completion(&spi_st->done);
+
+       /* Start transfer by writing to the TX FIFO */
+       ssc_write_tx_fifo(spi_st);
+       writel_relaxed(SSC_IEN_TEEN, spi_st->base + SSC_IEN);
+
+       /* Wait for transfer to complete */
+       wait_for_completion(&spi_st->done);
+
+       /* Restore SSC_CTL if necessary */
+       if (ctl)
+               writel_relaxed(ctl, spi_st->base + SSC_CTL);
+
+       spi_finalize_current_transfer(spi->master);
+
+       return t->len;
+}
+
+static void spi_st_cleanup(struct spi_device *spi)
+{
+       int cs = spi->cs_gpio;
+
+       if (gpio_is_valid(cs))
+               devm_gpio_free(&spi->dev, cs);
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS  (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH)
+static int spi_st_setup(struct spi_device *spi)
+{
+       struct spi_st *spi_st = spi_master_get_devdata(spi->master);
+       u32 spi_st_clk, sscbrg, var;
+       u32 hz = spi->max_speed_hz;
+       int cs = spi->cs_gpio;
+       int ret;
+
+       if (!hz)  {
+               dev_err(&spi->dev, "max_speed_hz unspecified\n");
+               return -EINVAL;
+       }
+
+       if (!gpio_is_valid(cs)) {
+               dev_err(&spi->dev, "%d is not a valid gpio\n", cs);
+               return -EINVAL;
+       }
+
+       if (devm_gpio_request(&spi->dev, cs, dev_name(&spi->dev))) {
+               dev_err(&spi->dev, "could not request gpio:%d\n", cs);
+               return -EINVAL;
+       }
+
+       ret = gpio_direction_output(cs, spi->mode & SPI_CS_HIGH);
+       if (ret)
+               return ret;
+
+       spi_st_clk = clk_get_rate(spi_st->clk);
+
+       /* Set SSC_BRF */
+       sscbrg = spi_st_clk / (2 * hz);
+       if (sscbrg < 0x07 || sscbrg > BIT(16)) {
+               dev_err(&spi->dev,
+                       "baudrate %d outside valid range %d\n", sscbrg, hz);
+               return -EINVAL;
+       }
+
+       spi_st->baud = spi_st_clk / (2 * sscbrg);
+       if (sscbrg == BIT(16)) /* 16-bit counter wraps */
+               sscbrg = 0x0;
+
+       writel_relaxed(sscbrg, spi_st->base + SSC_BRG);
+
+       dev_dbg(&spi->dev,
+               "setting baudrate:target= %u hz, actual= %u hz, sscbrg= %u\n",
+               hz, spi_st->baud, sscbrg);
+
+        /* Set SSC_CTL and enable SSC */
+        var = readl_relaxed(spi_st->base + SSC_CTL);
+        var |= SSC_CTL_MS;
+
+        if (spi->mode & SPI_CPOL)
+               var |= SSC_CTL_PO;
+        else
+               var &= ~SSC_CTL_PO;
+
+        if (spi->mode & SPI_CPHA)
+               var |= SSC_CTL_PH;
+        else
+               var &= ~SSC_CTL_PH;
+
+        if ((spi->mode & SPI_LSB_FIRST) == 0)
+               var |= SSC_CTL_HB;
+        else
+               var &= ~SSC_CTL_HB;
+
+        if (spi->mode & SPI_LOOP)
+               var |= SSC_CTL_LPB;
+        else
+               var &= ~SSC_CTL_LPB;
+
+        var &= ~SSC_CTL_DATA_WIDTH_MSK;
+        var |= (spi->bits_per_word - 1);
+
+        var |= SSC_CTL_EN_TX_FIFO | SSC_CTL_EN_RX_FIFO;
+        var |= SSC_CTL_EN;
+
+        writel_relaxed(var, spi_st->base + SSC_CTL);
+
+        /* Clear the status register */
+        readl_relaxed(spi_st->base + SSC_RBUF);
+
+        return 0;
+}
+
+/* Interrupt fired when TX shift register becomes empty */
+static irqreturn_t spi_st_irq(int irq, void *dev_id)
+{
+       struct spi_st *spi_st = (struct spi_st *)dev_id;
+
+       /* Read RX FIFO */
+       ssc_read_rx_fifo(spi_st);
+
+       /* Fill TX FIFO */
+       if (spi_st->words_remaining) {
+               ssc_write_tx_fifo(spi_st);
+       } else {
+               /* TX/RX complete */
+               writel_relaxed(0x0, spi_st->base + SSC_IEN);
+               /*
+                * read SSC_IEN to ensure that this bit is set
+                * before re-enabling interrupt
+                */
+               readl(spi_st->base + SSC_IEN);
+               complete(&spi_st->done);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int spi_st_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct spi_master *master;
+       struct resource *res;
+       struct spi_st *spi_st;
+       int irq, ret = 0;
+       u32 var;
+
+       master = spi_alloc_master(&pdev->dev, sizeof(*spi_st));
+       if (!master)
+               return -ENOMEM;
+
+       master->dev.of_node             = np;
+       master->mode_bits               = MODEBITS;
+       master->setup                   = spi_st_setup;
+       master->cleanup                 = spi_st_cleanup;
+       master->transfer_one            = spi_st_transfer_one;
+       master->bits_per_word_mask      = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+       master->auto_runtime_pm         = true;
+       master->bus_num                 = pdev->id;
+       spi_st                          = spi_master_get_devdata(master);
+
+       spi_st->clk = devm_clk_get(&pdev->dev, "ssc");
+       if (IS_ERR(spi_st->clk)) {
+               dev_err(&pdev->dev, "Unable to request clock\n");
+               return PTR_ERR(spi_st->clk);
+       }
+
+       ret = spi_st_clk_enable(spi_st);
+       if (ret)
+               return ret;
+
+       init_completion(&spi_st->done);
+
+       /* Get resources */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       spi_st->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(spi_st->base)) {
+               ret = PTR_ERR(spi_st->base);
+               goto clk_disable;
+       }
+
+       /* Disable I2C and Reset SSC */
+       writel_relaxed(0x0, spi_st->base + SSC_I2C);
+       var = readw_relaxed(spi_st->base + SSC_CTL);
+       var |= SSC_CTL_SR;
+       writel_relaxed(var, spi_st->base + SSC_CTL);
+
+       udelay(1);
+       var = readl_relaxed(spi_st->base + SSC_CTL);
+       var &= ~SSC_CTL_SR;
+       writel_relaxed(var, spi_st->base + SSC_CTL);
+
+       /* Set SSC into slave mode before reconfiguring PIO pins */
+       var = readl_relaxed(spi_st->base + SSC_CTL);
+       var &= ~SSC_CTL_MS;
+       writel_relaxed(var, spi_st->base + SSC_CTL);
+
+       irq = irq_of_parse_and_map(np, 0);
+       if (!irq) {
+               dev_err(&pdev->dev, "IRQ missing or invalid\n");
+               ret = -EINVAL;
+               goto clk_disable;
+       }
+
+       ret = devm_request_irq(&pdev->dev, irq, spi_st_irq, 0,
+                              pdev->name, spi_st);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to request irq %d\n", irq);
+               goto clk_disable;
+       }
+
+       /* by default the device is on */
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+
+       platform_set_drvdata(pdev, master);
+
+       ret = devm_spi_register_master(&pdev->dev, master);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to register master\n");
+               goto clk_disable;
+       }
+
+       return 0;
+
+clk_disable:
+       spi_st_clk_disable(spi_st);
+
+       return ret;
+}
+
+static int spi_st_remove(struct platform_device *pdev)
+{
+       struct spi_master *master = platform_get_drvdata(pdev);
+       struct spi_st *spi_st = spi_master_get_devdata(master);
+
+       spi_st_clk_disable(spi_st);
+
+       pinctrl_pm_select_sleep_state(&pdev->dev);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int spi_st_runtime_suspend(struct device *dev)
+{
+       struct spi_master *master = dev_get_drvdata(dev);
+       struct spi_st *spi_st = spi_master_get_devdata(master);
+
+       writel_relaxed(0, spi_st->base + SSC_IEN);
+       pinctrl_pm_select_sleep_state(dev);
+
+       spi_st_clk_disable(spi_st);
+
+       return 0;
+}
+
+static int spi_st_runtime_resume(struct device *dev)
+{
+       struct spi_master *master = dev_get_drvdata(dev);
+       struct spi_st *spi_st = spi_master_get_devdata(master);
+       int ret;
+
+       ret = spi_st_clk_enable(spi_st);
+       pinctrl_pm_select_default_state(dev);
+
+       return ret;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int spi_st_suspend(struct device *dev)
+{
+       struct spi_master *master = dev_get_drvdata(dev);
+       int ret;
+
+       ret = spi_master_suspend(master);
+       if (ret)
+               return ret;
+
+       return pm_runtime_force_suspend(dev);
+}
+
+static int spi_st_resume(struct device *dev)
+{
+       struct spi_master *master = dev_get_drvdata(dev);
+       int ret;
+
+       ret = spi_master_resume(master);
+       if (ret)
+               return ret;
+
+       return pm_runtime_force_resume(dev);
+}
+#endif
+
+static const struct dev_pm_ops spi_st_pm = {
+       SET_SYSTEM_SLEEP_PM_OPS(spi_st_suspend, spi_st_resume)
+       SET_RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL)
+};
+
+static struct of_device_id stm_spi_match[] = {
+       { .compatible = "st,comms-ssc4-spi", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, stm_spi_match);
+
+static struct platform_driver spi_st_driver = {
+       .driver = {
+               .name = "spi-st",
+               .pm = &spi_st_pm,
+               .of_match_table = of_match_ptr(stm_spi_match),
+       },
+       .probe = spi_st_probe,
+       .remove = spi_st_remove,
+};
+module_platform_driver(spi_st_driver);
+
+MODULE_AUTHOR("Patrice Chotard <patrice.chotard@st.com>");
+MODULE_DESCRIPTION("STM SSC SPI driver");
+MODULE_LICENSE("GPL v2");
index 6146c4cd6583df60f72f0c526b4becb7e682db08..884a716e50cb822ce49dcb0e925a2c1856130bcb 100644 (file)
@@ -201,7 +201,7 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
 
 static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
 {
-       int wlen, count, ret;
+       int wlen, count;
        unsigned int cmd;
        const u8 *txbuf;
 
@@ -230,9 +230,8 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
                }
 
                ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
-               ret = wait_for_completion_timeout(&qspi->transfer_complete,
-                                                 QSPI_COMPLETION_TIMEOUT);
-               if (ret == 0) {
+               if (!wait_for_completion_timeout(&qspi->transfer_complete,
+                                                QSPI_COMPLETION_TIMEOUT)) {
                        dev_err(qspi->dev, "write timed out\n");
                        return -ETIMEDOUT;
                }
@@ -245,7 +244,7 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
 
 static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
 {
-       int wlen, count, ret;
+       int wlen, count;
        unsigned int cmd;
        u8 *rxbuf;
 
@@ -268,9 +267,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
        while (count) {
                dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
                ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
-               ret = wait_for_completion_timeout(&qspi->transfer_complete,
-                               QSPI_COMPLETION_TIMEOUT);
-               if (ret == 0) {
+               if (!wait_for_completion_timeout(&qspi->transfer_complete,
+                                                QSPI_COMPLETION_TIMEOUT)) {
                        dev_err(qspi->dev, "read timed out\n");
                        return -ETIMEDOUT;
                }
index be692ad504423e42beed4800fb9fadc90a3b3b3f..93dfcee0f987b705be1329893c950ad19989ccd8 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
  */
 
 #include <linux/delay.h>
index 79bd84f43430d8c718a1369f167337053b328309..133f53a9c1d4eb9b5235b88de03dac9c5d2ea27e 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/spi/xilinx_spi.h>
 #include <linux/io.h>
 
+#define XILINX_SPI_MAX_CS      32
+
 #define XILINX_SPI_NAME "xilinx_spi"
 
 /* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
@@ -34,7 +36,8 @@
 #define XSPI_CR_MASTER_MODE    0x04
 #define XSPI_CR_CPOL           0x08
 #define XSPI_CR_CPHA           0x10
-#define XSPI_CR_MODE_MASK      (XSPI_CR_CPHA | XSPI_CR_CPOL)
+#define XSPI_CR_MODE_MASK      (XSPI_CR_CPHA | XSPI_CR_CPOL | \
+                                XSPI_CR_LSB_FIRST | XSPI_CR_LOOP)
 #define XSPI_CR_TXFIFO_RESET   0x20
 #define XSPI_CR_RXFIFO_RESET   0x40
 #define XSPI_CR_MANUAL_SSELECT 0x80
@@ -85,12 +88,11 @@ struct xilinx_spi {
 
        u8 *rx_ptr;             /* pointer in the Tx buffer */
        const u8 *tx_ptr;       /* pointer in the Rx buffer */
-       int remaining_bytes;    /* the number of bytes left to transfer */
-       u8 bits_per_word;
+       u8 bytes_per_word;
+       int buffer_size;        /* buffer size in words */
+       u32 cs_inactive;        /* Level of the CS pins when inactive*/
        unsigned int (*read_fn)(void __iomem *);
        void (*write_fn)(u32, void __iomem *);
-       void (*tx_fn)(struct xilinx_spi *);
-       void (*rx_fn)(struct xilinx_spi *);
 };
 
 static void xspi_write32(u32 val, void __iomem *addr)
@@ -113,49 +115,51 @@ static unsigned int xspi_read32_be(void __iomem *addr)
        return ioread32be(addr);
 }
 
-static void xspi_tx8(struct xilinx_spi *xspi)
+static void xilinx_spi_tx(struct xilinx_spi *xspi)
 {
-       xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET);
-       xspi->tx_ptr++;
-}
-
-static void xspi_tx16(struct xilinx_spi *xspi)
-{
-       xspi->write_fn(*(u16 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET);
-       xspi->tx_ptr += 2;
-}
+       u32 data = 0;
 
-static void xspi_tx32(struct xilinx_spi *xspi)
-{
-       xspi->write_fn(*(u32 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET);
-       xspi->tx_ptr += 4;
-}
-
-static void xspi_rx8(struct xilinx_spi *xspi)
-{
-       u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
-       if (xspi->rx_ptr) {
-               *xspi->rx_ptr = data & 0xff;
-               xspi->rx_ptr++;
+       if (!xspi->tx_ptr) {
+               xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
+               return;
        }
-}
 
-static void xspi_rx16(struct xilinx_spi *xspi)
-{
-       u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
-       if (xspi->rx_ptr) {
-               *(u16 *)(xspi->rx_ptr) = data & 0xffff;
-               xspi->rx_ptr += 2;
+       switch (xspi->bytes_per_word) {
+       case 1:
+               data = *(u8 *)(xspi->tx_ptr);
+               break;
+       case 2:
+               data = *(u16 *)(xspi->tx_ptr);
+               break;
+       case 4:
+               data = *(u32 *)(xspi->tx_ptr);
+               break;
        }
+
+       xspi->write_fn(data, xspi->regs + XSPI_TXD_OFFSET);
+       xspi->tx_ptr += xspi->bytes_per_word;
 }
 
-static void xspi_rx32(struct xilinx_spi *xspi)
+static void xilinx_spi_rx(struct xilinx_spi *xspi)
 {
        u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
-       if (xspi->rx_ptr) {
+
+       if (!xspi->rx_ptr)
+               return;
+
+       switch (xspi->bytes_per_word) {
+       case 1:
+               *(u8 *)(xspi->rx_ptr) = data;
+               break;
+       case 2:
+               *(u16 *)(xspi->rx_ptr) = data;
+               break;
+       case 4:
                *(u32 *)(xspi->rx_ptr) = data;
-               xspi->rx_ptr += 4;
+               break;
        }
+
+       xspi->rx_ptr += xspi->bytes_per_word;
 }
 
 static void xspi_init_hw(struct xilinx_spi *xspi)
@@ -165,46 +169,56 @@ static void xspi_init_hw(struct xilinx_spi *xspi)
        /* Reset the SPI device */
        xspi->write_fn(XIPIF_V123B_RESET_MASK,
                regs_base + XIPIF_V123B_RESETR_OFFSET);
-       /* Disable all the interrupts just in case */
-       xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET);
-       /* Enable the global IPIF interrupt */
-       xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
-               regs_base + XIPIF_V123B_DGIER_OFFSET);
+       /* Enable the transmit empty interrupt, which we use to determine
+        * progress on the transmission.
+        */
+       xspi->write_fn(XSPI_INTR_TX_EMPTY,
+                       regs_base + XIPIF_V123B_IIER_OFFSET);
+       /* Disable the global IPIF interrupt */
+       xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
        /* Deselect the slave on the SPI bus */
        xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET);
        /* Disable the transmitter, enable Manual Slave Select Assertion,
         * put SPI controller into master mode, and enable it */
-       xspi->write_fn(XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT |
-               XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET |
-               XSPI_CR_RXFIFO_RESET, regs_base + XSPI_CR_OFFSET);
+       xspi->write_fn(XSPI_CR_MANUAL_SSELECT | XSPI_CR_MASTER_MODE |
+               XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | XSPI_CR_RXFIFO_RESET,
+               regs_base + XSPI_CR_OFFSET);
 }
 
 static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
 {
        struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+       u16 cr;
+       u32 cs;
 
        if (is_on == BITBANG_CS_INACTIVE) {
                /* Deselect the slave on the SPI bus */
-               xspi->write_fn(0xffff, xspi->regs + XSPI_SSR_OFFSET);
-       } else if (is_on == BITBANG_CS_ACTIVE) {
-               /* Set the SPI clock phase and polarity */
-               u16 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET)
-                        & ~XSPI_CR_MODE_MASK;
-               if (spi->mode & SPI_CPHA)
-                       cr |= XSPI_CR_CPHA;
-               if (spi->mode & SPI_CPOL)
-                       cr |= XSPI_CR_CPOL;
-               xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
-
-               /* We do not check spi->max_speed_hz here as the SPI clock
-                * frequency is not software programmable (the IP block design
-                * parameter)
-                */
-
-               /* Activate the chip select */
-               xspi->write_fn(~(0x0001 << spi->chip_select),
-                       xspi->regs + XSPI_SSR_OFFSET);
+               xspi->write_fn(xspi->cs_inactive, xspi->regs + XSPI_SSR_OFFSET);
+               return;
        }
+
+       /* Set the SPI clock phase and polarity */
+       cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_MODE_MASK;
+       if (spi->mode & SPI_CPHA)
+               cr |= XSPI_CR_CPHA;
+       if (spi->mode & SPI_CPOL)
+               cr |= XSPI_CR_CPOL;
+       if (spi->mode & SPI_LSB_FIRST)
+               cr |= XSPI_CR_LSB_FIRST;
+       if (spi->mode & SPI_LOOP)
+               cr |= XSPI_CR_LOOP;
+       xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+
+       /* We do not check spi->max_speed_hz here as the SPI clock
+        * frequency is not software programmable (the IP block design
+        * parameter)
+        */
+
+       cs = xspi->cs_inactive;
+       cs ^= BIT(spi->chip_select);
+
+       /* Activate the chip select */
+       xspi->write_fn(cs, xspi->regs + XSPI_SSR_OFFSET);
 }
 
 /* spi_bitbang requires custom setup_transfer() to be defined if there is a
@@ -213,85 +227,85 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
 static int xilinx_spi_setup_transfer(struct spi_device *spi,
                struct spi_transfer *t)
 {
-       return 0;
-}
+       struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
 
-static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi)
-{
-       u8 sr;
+       if (spi->mode & SPI_CS_HIGH)
+               xspi->cs_inactive &= ~BIT(spi->chip_select);
+       else
+               xspi->cs_inactive |= BIT(spi->chip_select);
 
-       /* Fill the Tx FIFO with as many bytes as possible */
-       sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
-       while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) {
-               if (xspi->tx_ptr)
-                       xspi->tx_fn(xspi);
-               else
-                       xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
-               xspi->remaining_bytes -= xspi->bits_per_word / 8;
-               sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
-       }
+       return 0;
 }
 
 static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
 {
        struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
-       u32 ipif_ier;
+       int remaining_words;    /* the number of words left to transfer */
+       bool use_irq = false;
+       u16 cr = 0;
 
        /* We get here with transmitter inhibited */
 
        xspi->tx_ptr = t->tx_buf;
        xspi->rx_ptr = t->rx_buf;
-       xspi->remaining_bytes = t->len;
+       remaining_words = t->len / xspi->bytes_per_word;
        reinit_completion(&xspi->done);
 
+       if (xspi->irq >= 0 &&  remaining_words > xspi->buffer_size) {
+               use_irq = true;
+               xspi->write_fn(XSPI_INTR_TX_EMPTY,
+                               xspi->regs + XIPIF_V123B_IISR_OFFSET);
+               /* Enable the global IPIF interrupt */
+               xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
+                               xspi->regs + XIPIF_V123B_DGIER_OFFSET);
+               /* Inhibit irq to avoid spurious irqs on tx_empty*/
+               cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
+               xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
+                              xspi->regs + XSPI_CR_OFFSET);
+       }
 
-       /* Enable the transmit empty interrupt, which we use to determine
-        * progress on the transmission.
-        */
-       ipif_ier = xspi->read_fn(xspi->regs + XIPIF_V123B_IIER_OFFSET);
-       xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY,
-               xspi->regs + XIPIF_V123B_IIER_OFFSET);
+       while (remaining_words) {
+               int n_words, tx_words, rx_words;
 
-       for (;;) {
-               u16 cr;
-               u8 sr;
+               n_words = min(remaining_words, xspi->buffer_size);
 
-               xilinx_spi_fill_tx_fifo(xspi);
+               tx_words = n_words;
+               while (tx_words--)
+                       xilinx_spi_tx(xspi);
 
                /* Start the transfer by not inhibiting the transmitter any
                 * longer
                 */
-               cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
-                                                       ~XSPI_CR_TRANS_INHIBIT;
-               xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
 
-               wait_for_completion(&xspi->done);
+               if (use_irq) {
+                       xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+                       wait_for_completion(&xspi->done);
+               } else
+                       while (!(xspi->read_fn(xspi->regs + XSPI_SR_OFFSET) &
+                                               XSPI_SR_TX_EMPTY_MASK))
+                               ;
 
                /* A transmit has just completed. Process received data and
                 * check for more data to transmit. Always inhibit the
                 * transmitter while the Isr refills the transmit register/FIFO,
                 * or make sure it is stopped if we're done.
                 */
-               cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
-               xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
+               if (use_irq)
+                       xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
                               xspi->regs + XSPI_CR_OFFSET);
 
                /* Read out all the data from the Rx FIFO */
-               sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
-               while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
-                       xspi->rx_fn(xspi);
-                       sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
-               }
-
-               /* See if there is more data to send */
-               if (xspi->remaining_bytes <= 0)
-                       break;
+               rx_words = n_words;
+               while (rx_words--)
+                       xilinx_spi_rx(xspi);
+
+               remaining_words -= n_words;
        }
 
-       /* Disable the transmit empty interrupt */
-       xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET);
+       if (use_irq)
+               xspi->write_fn(0, xspi->regs + XIPIF_V123B_DGIER_OFFSET);
 
-       return t->len - xspi->remaining_bytes;
+       return t->len;
 }
 
 
@@ -316,6 +330,28 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static int xilinx_spi_find_buffer_size(struct xilinx_spi *xspi)
+{
+       u8 sr;
+       int n_words = 0;
+
+       /*
+        * Before the buffer_size detection we reset the core
+        * to make sure we start with a clean state.
+        */
+       xspi->write_fn(XIPIF_V123B_RESET_MASK,
+               xspi->regs + XIPIF_V123B_RESETR_OFFSET);
+
+       /* Fill the Tx FIFO with as many words as possible */
+       do {
+               xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
+               sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
+               n_words++;
+       } while (!(sr & XSPI_SR_TX_FULL_MASK));
+
+       return n_words;
+}
+
 static const struct of_device_id xilinx_spi_of_match[] = {
        { .compatible = "xlnx,xps-spi-2.00.a", },
        { .compatible = "xlnx,xps-spi-2.00.b", },
@@ -348,14 +384,21 @@ static int xilinx_spi_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
+       if (num_cs > XILINX_SPI_MAX_CS) {
+               dev_err(&pdev->dev, "Invalid number of spi slaves\n");
+               return -EINVAL;
+       }
+
        master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi));
        if (!master)
                return -ENODEV;
 
        /* the spi->mode bits understood by this driver: */
-       master->mode_bits = SPI_CPOL | SPI_CPHA;
+       master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP |
+                           SPI_CS_HIGH;
 
        xspi = spi_master_get_devdata(master);
+       xspi->cs_inactive = 0xffffffff;
        xspi->bitbang.master = master;
        xspi->bitbang.chipselect = xilinx_spi_chipselect;
        xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
@@ -392,35 +435,20 @@ static int xilinx_spi_probe(struct platform_device *pdev)
        }
 
        master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word);
-       xspi->bits_per_word = bits_per_word;
-       if (xspi->bits_per_word == 8) {
-               xspi->tx_fn = xspi_tx8;
-               xspi->rx_fn = xspi_rx8;
-       } else if (xspi->bits_per_word == 16) {
-               xspi->tx_fn = xspi_tx16;
-               xspi->rx_fn = xspi_rx16;
-       } else if (xspi->bits_per_word == 32) {
-               xspi->tx_fn = xspi_tx32;
-               xspi->rx_fn = xspi_rx32;
-       } else {
-               ret = -EINVAL;
-               goto put_master;
-       }
-
-       /* SPI controller initializations */
-       xspi_init_hw(xspi);
+       xspi->bytes_per_word = bits_per_word / 8;
+       xspi->buffer_size = xilinx_spi_find_buffer_size(xspi);
 
        xspi->irq = platform_get_irq(pdev, 0);
-       if (xspi->irq < 0) {
-               ret = xspi->irq;
-               goto put_master;
+       if (xspi->irq >= 0) {
+               /* Register for SPI Interrupt */
+               ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0,
+                               dev_name(&pdev->dev), xspi);
+               if (ret)
+                       goto put_master;
        }
 
-       /* Register for SPI Interrupt */
-       ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0,
-                              dev_name(&pdev->dev), xspi);
-       if (ret)
-               goto put_master;
+       /* SPI controller initializations */
+       xspi_init_hw(xspi);
 
        ret = spi_bitbang_start(&xspi->bitbang);
        if (ret) {
index 66a70e9bc7438d0090b90a815ccea59812603110..c64a3e59fce30a7f9658afcca246a6d6872627da 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/kernel.h>
@@ -788,7 +784,7 @@ static int spi_transfer_one_message(struct spi_master *master,
        struct spi_transfer *xfer;
        bool keep_cs = false;
        int ret = 0;
-       int ms = 1;
+       unsigned long ms = 1;
 
        spi_set_cs(msg->spi, true);
 
@@ -875,31 +871,59 @@ void spi_finalize_current_transfer(struct spi_master *master)
 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
 
 /**
- * spi_pump_messages - kthread work function which processes spi message queue
- * @work: pointer to kthread work struct contained in the master struct
+ * __spi_pump_messages - function which processes spi message queue
+ * @master: master to process queue for
+ * @in_kthread: true if we are in the context of the message pump thread
  *
  * This function checks if there is any spi message in the queue that
  * needs processing and if so call out to the driver to initialize hardware
  * and transfer each message.
  *
+ * Note that it is called both from the kthread itself and also from
+ * inside spi_sync(); the queue extraction handling at the top of the
+ * function should deal with this safely.
  */
-static void spi_pump_messages(struct kthread_work *work)
+static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
 {
-       struct spi_master *master =
-               container_of(work, struct spi_master, pump_messages);
        unsigned long flags;
        bool was_busy = false;
        int ret;
 
-       /* Lock queue and check for queue work */
+       /* Lock queue */
        spin_lock_irqsave(&master->queue_lock, flags);
+
+       /* Make sure we are not already running a message */
+       if (master->cur_msg) {
+               spin_unlock_irqrestore(&master->queue_lock, flags);
+               return;
+       }
+
+       /* If another context is idling the device then defer */
+       if (master->idling) {
+               queue_kthread_work(&master->kworker, &master->pump_messages);
+               spin_unlock_irqrestore(&master->queue_lock, flags);
+               return;
+       }
+
+       /* Check if the queue is idle */
        if (list_empty(&master->queue) || !master->running) {
                if (!master->busy) {
                        spin_unlock_irqrestore(&master->queue_lock, flags);
                        return;
                }
+
+               /* Only do teardown in the thread */
+               if (!in_kthread) {
+                       queue_kthread_work(&master->kworker,
+                                          &master->pump_messages);
+                       spin_unlock_irqrestore(&master->queue_lock, flags);
+                       return;
+               }
+
                master->busy = false;
+               master->idling = true;
                spin_unlock_irqrestore(&master->queue_lock, flags);
+
                kfree(master->dummy_rx);
                master->dummy_rx = NULL;
                kfree(master->dummy_tx);
@@ -913,14 +937,13 @@ static void spi_pump_messages(struct kthread_work *work)
                        pm_runtime_put_autosuspend(master->dev.parent);
                }
                trace_spi_master_idle(master);
-               return;
-       }
 
-       /* Make sure we are not already running a message */
-       if (master->cur_msg) {
+               spin_lock_irqsave(&master->queue_lock, flags);
+               master->idling = false;
                spin_unlock_irqrestore(&master->queue_lock, flags);
                return;
        }
+
        /* Extract head of queue */
        master->cur_msg =
                list_first_entry(&master->queue, struct spi_message, queue);
@@ -985,13 +1008,22 @@ static void spi_pump_messages(struct kthread_work *work)
        }
 }
 
+/**
+ * spi_pump_messages - kthread work function which processes spi message queue
+ * @work: pointer to kthread work struct contained in the master struct
+ */
+static void spi_pump_messages(struct kthread_work *work)
+{
+       struct spi_master *master =
+               container_of(work, struct spi_master, pump_messages);
+
+       __spi_pump_messages(master, true);
+}
+
 static int spi_init_queue(struct spi_master *master)
 {
        struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
 
-       INIT_LIST_HEAD(&master->queue);
-       spin_lock_init(&master->queue_lock);
-
        master->running = false;
        master->busy = false;
 
@@ -1161,12 +1193,9 @@ static int spi_destroy_queue(struct spi_master *master)
        return 0;
 }
 
-/**
- * spi_queued_transfer - transfer function for queued transfers
- * @spi: spi device which is requesting transfer
- * @msg: spi message which is to handled is queued to driver queue
- */
-static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
+static int __spi_queued_transfer(struct spi_device *spi,
+                                struct spi_message *msg,
+                                bool need_pump)
 {
        struct spi_master *master = spi->master;
        unsigned long flags;
@@ -1181,13 +1210,23 @@ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
        msg->status = -EINPROGRESS;
 
        list_add_tail(&msg->queue, &master->queue);
-       if (!master->busy)
+       if (!master->busy && need_pump)
                queue_kthread_work(&master->kworker, &master->pump_messages);
 
        spin_unlock_irqrestore(&master->queue_lock, flags);
        return 0;
 }
 
+/**
+ * spi_queued_transfer - transfer function for queued transfers
+ * @spi: spi device which is requesting transfer
+ * @msg: spi message which is to handled is queued to driver queue
+ */
+static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+       return __spi_queued_transfer(spi, msg, true);
+}
+
 static int spi_master_initialize_queue(struct spi_master *master)
 {
        int ret;
@@ -1609,6 +1648,8 @@ int spi_register_master(struct spi_master *master)
                dynamic = 1;
        }
 
+       INIT_LIST_HEAD(&master->queue);
+       spin_lock_init(&master->queue_lock);
        spin_lock_init(&master->bus_lock_spinlock);
        mutex_init(&master->bus_lock_mutex);
        master->bus_lock_flag = 0;
@@ -2114,19 +2155,46 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
        DECLARE_COMPLETION_ONSTACK(done);
        int status;
        struct spi_master *master = spi->master;
+       unsigned long flags;
+
+       status = __spi_validate(spi, message);
+       if (status != 0)
+               return status;
 
        message->complete = spi_complete;
        message->context = &done;
+       message->spi = spi;
 
        if (!bus_locked)
                mutex_lock(&master->bus_lock_mutex);
 
-       status = spi_async_locked(spi, message);
+       /* If we're not using the legacy transfer method then we will
+        * try to transfer in the calling context so special case.
+        * This code would be less tricky if we could remove the
+        * support for driver implemented message queues.
+        */
+       if (master->transfer == spi_queued_transfer) {
+               spin_lock_irqsave(&master->bus_lock_spinlock, flags);
+
+               trace_spi_message_submit(message);
+
+               status = __spi_queued_transfer(spi, message, false);
+
+               spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+       } else {
+               status = spi_async_locked(spi, message);
+       }
 
        if (!bus_locked)
                mutex_unlock(&master->bus_lock_mutex);
 
        if (status == 0) {
+               /* Push out the messages in the calling context if we
+                * can.
+                */
+               if (master->transfer == spi_queued_transfer)
+                       __spi_pump_messages(master, false);
+
                wait_for_completion(&done);
                status = message->status;
        }
index 6941e04afb8c4526e329a8096601d0e8098f9721..4eb7a980e67075a018a9be2dd4146927a3f38a6c 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/init.h>
@@ -317,6 +313,37 @@ done:
        return status;
 }
 
+static struct spi_ioc_transfer *
+spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
+               unsigned *n_ioc)
+{
+       struct spi_ioc_transfer *ioc;
+       u32     tmp;
+
+       /* Check type, command number and direction */
+       if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC
+                       || _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
+                       || _IOC_DIR(cmd) != _IOC_WRITE)
+               return ERR_PTR(-ENOTTY);
+
+       tmp = _IOC_SIZE(cmd);
+       if ((tmp % sizeof(struct spi_ioc_transfer)) != 0)
+               return ERR_PTR(-EINVAL);
+       *n_ioc = tmp / sizeof(struct spi_ioc_transfer);
+       if (*n_ioc == 0)
+               return NULL;
+
+       /* copy into scratch area */
+       ioc = kmalloc(tmp, GFP_KERNEL);
+       if (!ioc)
+               return ERR_PTR(-ENOMEM);
+       if (__copy_from_user(ioc, u_ioc, tmp)) {
+               kfree(ioc);
+               return ERR_PTR(-EFAULT);
+       }
+       return ioc;
+}
+
 static long
 spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
@@ -456,32 +483,15 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 
        default:
                /* segmented and/or full-duplex I/O request */
-               if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
-                               || _IOC_DIR(cmd) != _IOC_WRITE) {
-                       retval = -ENOTTY;
-                       break;
-               }
-
-               tmp = _IOC_SIZE(cmd);
-               if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) {
-                       retval = -EINVAL;
-                       break;
-               }
-               n_ioc = tmp / sizeof(struct spi_ioc_transfer);
-               if (n_ioc == 0)
-                       break;
-
-               /* copy into scratch area */
-               ioc = kmalloc(tmp, GFP_KERNEL);
-               if (!ioc) {
-                       retval = -ENOMEM;
-                       break;
-               }
-               if (__copy_from_user(ioc, (void __user *)arg, tmp)) {
-                       kfree(ioc);
-                       retval = -EFAULT;
+               /* Check message and copy into scratch area */
+               ioc = spidev_get_ioc_message(cmd,
+                               (struct spi_ioc_transfer __user *)arg, &n_ioc);
+               if (IS_ERR(ioc)) {
+                       retval = PTR_ERR(ioc);
                        break;
                }
+               if (!ioc)
+                       break;  /* n_ioc is also 0 */
 
                /* translate to spi_message, execute */
                retval = spidev_message(spidev, ioc, n_ioc);
@@ -495,9 +505,68 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 }
 
 #ifdef CONFIG_COMPAT
+static long
+spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
+               unsigned long arg)
+{
+       struct spi_ioc_transfer __user  *u_ioc;
+       int                             retval = 0;
+       struct spidev_data              *spidev;
+       struct spi_device               *spi;
+       unsigned                        n_ioc, n;
+       struct spi_ioc_transfer         *ioc;
+
+       u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg);
+       if (!access_ok(VERIFY_READ, u_ioc, _IOC_SIZE(cmd)))
+               return -EFAULT;
+
+       /* guard against device removal before, or while,
+        * we issue this ioctl.
+        */
+       spidev = filp->private_data;
+       spin_lock_irq(&spidev->spi_lock);
+       spi = spi_dev_get(spidev->spi);
+       spin_unlock_irq(&spidev->spi_lock);
+
+       if (spi == NULL)
+               return -ESHUTDOWN;
+
+       /* SPI_IOC_MESSAGE needs the buffer locked "normally" */
+       mutex_lock(&spidev->buf_lock);
+
+       /* Check message and copy into scratch area */
+       ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc);
+       if (IS_ERR(ioc)) {
+               retval = PTR_ERR(ioc);
+               goto done;
+       }
+       if (!ioc)
+               goto done;      /* n_ioc is also 0 */
+
+       /* Convert buffer pointers */
+       for (n = 0; n < n_ioc; n++) {
+               ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf);
+               ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf);
+       }
+
+       /* translate to spi_message, execute */
+       retval = spidev_message(spidev, ioc, n_ioc);
+       kfree(ioc);
+
+done:
+       mutex_unlock(&spidev->buf_lock);
+       spi_dev_put(spi);
+       return retval;
+}
+
 static long
 spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
+       if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC
+                       && _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0))
+                       && _IOC_DIR(cmd) == _IOC_WRITE)
+               return spidev_compat_ioc_message(filp, cmd, arg);
+
        return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
 }
 #else
index d415d69dc2378cbc7568bbcdcc53e601770ee761..9484d5652ca5a23e051c54b1b0544ce8868f3876 100644 (file)
@@ -650,8 +650,10 @@ static void handle_rx(struct vhost_net *net)
                        break;
                }
                /* TODO: Should check and handle checksum. */
+
+               hdr.num_buffers = cpu_to_vhost16(vq, headcount);
                if (likely(mergeable) &&
-                   memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
+                   memcpy_toiovecend(nvq->hdr, (void *)&hdr.num_buffers,
                                      offsetof(typeof(hdr), num_buffers),
                                      sizeof hdr.num_buffers)) {
                        vq_err(vq, "Failed num_buffers write");
index 1b7893ecc29654f6d20cf420b24ee20dddbf9038..c428871f10934a87aa4b5a7038b8c0519b6bc80a 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1140,6 +1140,13 @@ static long aio_read_events_ring(struct kioctx *ctx,
        long ret = 0;
        int copy_ret;
 
+       /*
+        * The mutex can block and wake us up and that will cause
+        * wait_event_interruptible_hrtimeout() to schedule without sleeping
+        * and repeat. This should be rare enough that it doesn't cause
+        * peformance issues. See the comment in read_events() for more detail.
+        */
+       sched_annotate_sleep();
        mutex_lock(&ctx->ring_lock);
 
        /* Access to ->ring_pages here is protected by ctx->ring_lock. */
index a66768ebc8d19d394f2cd0818d56178a50f84803..80e9c18ea64f69b68f84e3953256654774bd0b7e 100644 (file)
@@ -8,6 +8,7 @@ config BTRFS_FS
        select LZO_DECOMPRESS
        select RAID6_PQ
        select XOR_BLOCKS
+       select SRCU
 
        help
          Btrfs is a general purpose copy-on-write filesystem with extents,
index 9a02da16f2beee18f53d7244cde93f3f11cc9866..1a9585d4380a330f96ba7d82f63cb87314d1701e 100644 (file)
@@ -2591,6 +2591,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
        }
 
        if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
+               blk_finish_plug(&plug);
                mutex_unlock(&log_root_tree->log_mutex);
                ret = root_log_ctx.log_ret;
                goto out;
index 9c56ef776407ad28e30760e2a0f0c1caede8fea0..7febcf2475c5ab675c04dfd2fddaa3ed574522a0 100644 (file)
@@ -606,9 +606,11 @@ cifs_security_flags_handle_must_flags(unsigned int *flags)
                *flags = CIFSSEC_MUST_NTLMV2;
        else if ((*flags & CIFSSEC_MUST_NTLM) == CIFSSEC_MUST_NTLM)
                *flags = CIFSSEC_MUST_NTLM;
-       else if ((*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN)
+       else if (CIFSSEC_MUST_LANMAN &&
+                (*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN)
                *flags = CIFSSEC_MUST_LANMAN;
-       else if ((*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT)
+       else if (CIFSSEC_MUST_PLNTXT &&
+                (*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT)
                *flags = CIFSSEC_MUST_PLNTXT;
 
        *flags |= signflags;
index 96b7e9b7706dc58b767863fdeadf3cadfb4e5324..74f12877493ac6c3f87792192b8aa8c1190f3c05 100644 (file)
@@ -366,6 +366,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
        struct cifsLockInfo *li, *tmp;
        struct cifs_fid fid;
        struct cifs_pending_open open;
+       bool oplock_break_cancelled;
 
        spin_lock(&cifs_file_list_lock);
        if (--cifs_file->count > 0) {
@@ -397,7 +398,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
        }
        spin_unlock(&cifs_file_list_lock);
 
-       cancel_work_sync(&cifs_file->oplock_break);
+       oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
 
        if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
                struct TCP_Server_Info *server = tcon->ses->server;
@@ -409,6 +410,9 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
                _free_xid(xid);
        }
 
+       if (oplock_break_cancelled)
+               cifs_done_oplock_break(cifsi);
+
        cifs_del_pending_open(&open);
 
        /*
index 6c1566366a6613cbc494fe46344c8fd00342a82b..a4232ec4f2ba45386b4f25db484f7f30135b01c2 100644 (file)
@@ -221,7 +221,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
        }
 
        rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
-       memset(wpwd, 0, 129 * sizeof(__le16));
+       memzero_explicit(wpwd, sizeof(wpwd));
 
        return rc;
 }
index 91093cd74f0da15e7f972d9c695f221e5afb454d..385704027575525474673b78d385d7ead580ef6d 100644 (file)
@@ -141,7 +141,6 @@ enum {
  * @ti_save: Backup of journal_info field of task_struct
  * @ti_flags: Flags
  * @ti_count: Nest level
- * @ti_garbage:        List of inode to be put when releasing semaphore
  */
 struct nilfs_transaction_info {
        u32                     ti_magic;
@@ -150,7 +149,6 @@ struct nilfs_transaction_info {
                                   one of other filesystems has a bug. */
        unsigned short          ti_flags;
        unsigned short          ti_count;
-       struct list_head        ti_garbage;
 };
 
 /* ti_magic */
index 7ef18fc656c28568047c30c24adb0bc029bada5c..469086b9f99bc8e20053e492237d48a3373bdb37 100644 (file)
@@ -305,7 +305,6 @@ static void nilfs_transaction_lock(struct super_block *sb,
        ti->ti_count = 0;
        ti->ti_save = cur_ti;
        ti->ti_magic = NILFS_TI_MAGIC;
-       INIT_LIST_HEAD(&ti->ti_garbage);
        current->journal_info = ti;
 
        for (;;) {
@@ -332,8 +331,6 @@ static void nilfs_transaction_unlock(struct super_block *sb)
 
        up_write(&nilfs->ns_segctor_sem);
        current->journal_info = ti->ti_save;
-       if (!list_empty(&ti->ti_garbage))
-               nilfs_dispose_list(nilfs, &ti->ti_garbage, 0);
 }
 
 static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
@@ -746,6 +743,15 @@ static void nilfs_dispose_list(struct the_nilfs *nilfs,
        }
 }
 
+static void nilfs_iput_work_func(struct work_struct *work)
+{
+       struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
+                                                sc_iput_work);
+       struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
+
+       nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
+}
+
 static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
                                     struct nilfs_root *root)
 {
@@ -1900,8 +1906,8 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
 static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
                                             struct the_nilfs *nilfs)
 {
-       struct nilfs_transaction_info *ti = current->journal_info;
        struct nilfs_inode_info *ii, *n;
+       int defer_iput = false;
 
        spin_lock(&nilfs->ns_inode_lock);
        list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
@@ -1912,9 +1918,24 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
                clear_bit(NILFS_I_BUSY, &ii->i_state);
                brelse(ii->i_bh);
                ii->i_bh = NULL;
-               list_move_tail(&ii->i_dirty, &ti->ti_garbage);
+               list_del_init(&ii->i_dirty);
+               if (!ii->vfs_inode.i_nlink) {
+                       /*
+                        * Defer calling iput() to avoid a deadlock
+                        * over I_SYNC flag for inodes with i_nlink == 0
+                        */
+                       list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
+                       defer_iput = true;
+               } else {
+                       spin_unlock(&nilfs->ns_inode_lock);
+                       iput(&ii->vfs_inode);
+                       spin_lock(&nilfs->ns_inode_lock);
+               }
        }
        spin_unlock(&nilfs->ns_inode_lock);
+
+       if (defer_iput)
+               schedule_work(&sci->sc_iput_work);
 }
 
 /*
@@ -2583,6 +2604,8 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
        INIT_LIST_HEAD(&sci->sc_segbufs);
        INIT_LIST_HEAD(&sci->sc_write_logs);
        INIT_LIST_HEAD(&sci->sc_gc_inodes);
+       INIT_LIST_HEAD(&sci->sc_iput_queue);
+       INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
        init_timer(&sci->sc_timer);
 
        sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
@@ -2609,6 +2632,8 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
                ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
                nilfs_transaction_unlock(sci->sc_super);
 
+               flush_work(&sci->sc_iput_work);
+
        } while (ret && retrycount-- > 0);
 }
 
@@ -2633,6 +2658,9 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
                || sci->sc_seq_request != sci->sc_seq_done);
        spin_unlock(&sci->sc_state_lock);
 
+       if (flush_work(&sci->sc_iput_work))
+               flag = true;
+
        if (flag || !nilfs_segctor_confirm(sci))
                nilfs_segctor_write_out(sci);
 
@@ -2642,6 +2670,12 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
                nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
        }
 
+       if (!list_empty(&sci->sc_iput_queue)) {
+               nilfs_warning(sci->sc_super, __func__,
+                             "iput queue is not empty\n");
+               nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
+       }
+
        WARN_ON(!list_empty(&sci->sc_segbufs));
        WARN_ON(!list_empty(&sci->sc_write_logs));
 
index 38a1d0013314395938ceb78fd1f1e906705206f1..a48d6de1e02cc276019fb150fee8e0bc6c41bcbe 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/types.h>
 #include <linux/fs.h>
 #include <linux/buffer_head.h>
+#include <linux/workqueue.h>
 #include <linux/nilfs2_fs.h>
 #include "nilfs.h"
 
@@ -92,6 +93,8 @@ struct nilfs_segsum_pointer {
  * @sc_nblk_inc: Block count of current generation
  * @sc_dirty_files: List of files to be written
  * @sc_gc_inodes: List of GC inodes having blocks to be written
+ * @sc_iput_queue: list of inodes for which iput should be done
+ * @sc_iput_work: work struct to defer iput call
  * @sc_freesegs: array of segment numbers to be freed
  * @sc_nfreesegs: number of segments on @sc_freesegs
  * @sc_dsync_inode: inode whose data pages are written for a sync operation
@@ -135,6 +138,8 @@ struct nilfs_sc_info {
 
        struct list_head        sc_dirty_files;
        struct list_head        sc_gc_inodes;
+       struct list_head        sc_iput_queue;
+       struct work_struct      sc_iput_work;
 
        __u64                  *sc_freesegs;
        size_t                  sc_nfreesegs;
index 22c629eedd82d70425704ee86b4ddf816b7bd174..2a24249b30af845d4514552da47fb94e70765db3 100644 (file)
@@ -1,5 +1,6 @@
 config FSNOTIFY
        def_bool n
+       select SRCU
 
 source "fs/notify/dnotify/Kconfig"
 source "fs/notify/inotify/Kconfig"
index c51df1dd237e74a0127da81f95d77f570d463b84..4a09975aac907e563182879362f816d36773f481 100644 (file)
@@ -5,6 +5,7 @@
 config QUOTA
        bool "Quota support"
        select QUOTACTL
+       select SRCU
        help
          If you say Y here, you will be able to set per user limits for disk
          usage (also called disk quotas). Currently, it works for the
index 33063f872ee3cd698a233a0f0defa67aa1b6bd01..176bf816875edcb76fe9dd39470c5e1c139a4551 100644 (file)
@@ -385,7 +385,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
 
 /* Is this type a native word size -- useful for atomic operations */
 #ifndef __native_word
-# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
 #endif
 
 /* Compile time object size, -1 for unknown */
index 515a35e2a48ab7e55d550fcd164466080773b3ce..960e666c51e44620686b6aad9fdbfbc3d325b95e 100644 (file)
@@ -472,27 +472,59 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
 /**
  * vlan_get_protocol - get protocol EtherType.
  * @skb: skbuff to query
+ * @type: first vlan protocol
+ * @depth: buffer to store length of eth and vlan tags in bytes
  *
  * Returns the EtherType of the packet, regardless of whether it is
  * vlan encapsulated (normal or hardware accelerated) or not.
  */
-static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
+static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
+                                        int *depth)
 {
-       __be16 protocol = 0;
-
-       if (vlan_tx_tag_present(skb) ||
-            skb->protocol != cpu_to_be16(ETH_P_8021Q))
-               protocol = skb->protocol;
-       else {
-               __be16 proto, *protop;
-               protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr,
-                                               h_vlan_encapsulated_proto),
-                                               sizeof(proto), &proto);
-               if (likely(protop))
-                       protocol = *protop;
+       unsigned int vlan_depth = skb->mac_len;
+
+       /* if type is 802.1Q/AD then the header should already be
+        * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
+        * ETH_HLEN otherwise
+        */
+       if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
+               if (vlan_depth) {
+                       if (WARN_ON(vlan_depth < VLAN_HLEN))
+                               return 0;
+                       vlan_depth -= VLAN_HLEN;
+               } else {
+                       vlan_depth = ETH_HLEN;
+               }
+               do {
+                       struct vlan_hdr *vh;
+
+                       if (unlikely(!pskb_may_pull(skb,
+                                                   vlan_depth + VLAN_HLEN)))
+                               return 0;
+
+                       vh = (struct vlan_hdr *)(skb->data + vlan_depth);
+                       type = vh->h_vlan_encapsulated_proto;
+                       vlan_depth += VLAN_HLEN;
+               } while (type == htons(ETH_P_8021Q) ||
+                        type == htons(ETH_P_8021AD));
        }
 
-       return protocol;
+       if (depth)
+               *depth = vlan_depth;
+
+       return type;
+}
+
+/**
+ * vlan_get_protocol - get protocol EtherType.
+ * @skb: skbuff to query
+ *
+ * Returns the EtherType of the packet, regardless of whether it is
+ * vlan encapsulated (normal or hardware accelerated) or not.
+ */
+static inline __be16 vlan_get_protocol(struct sk_buff *skb)
+{
+       return __vlan_get_protocol(skb, skb->protocol, NULL);
 }
 
 static inline void vlan_set_encap_proto(struct sk_buff *skb,
index 25c791e295fd5355650a3db42bc69ddc1adf2cc8..5f3a9aa7225d917d5d769d565d3ceb58ad5c4aa0 100644 (file)
@@ -97,7 +97,7 @@ enum {
        MLX4_MAX_NUM_PF         = 16,
        MLX4_MAX_NUM_VF         = 126,
        MLX4_MAX_NUM_VF_P_PORT  = 64,
-       MLX4_MFUNC_MAX          = 80,
+       MLX4_MFUNC_MAX          = 128,
        MLX4_MAX_EQ_NUM         = 1024,
        MLX4_MFUNC_EQ_NUM       = 4,
        MLX4_MFUNC_MAX_EQES     = 8,
index 77aed9ea1d264386376bf3694b87ae3dab07275a..dab545bb66b3114597da2d4a851539d0ab0d89a8 100644 (file)
@@ -37,6 +37,7 @@
 #define SSDR           (0x10)  /* SSP Data Write/Data Read Register */
 
 #define SSTO           (0x28)  /* SSP Time Out Register */
+#define DDS_RATE       (0x28)  /* SSP DDS Clock Rate Register (Intel Quark) */
 #define SSPSP          (0x2C)  /* SSP Programmable Serial Protocol */
 #define SSTSA          (0x30)  /* SSP Tx Timeslot Active */
 #define SSRSA          (0x34)  /* SSP Rx Timeslot Active */
index 529bc946f450359158503332e6d563fa46ce6f47..a18b16f1dc0e44f7f5a3b99ea4f43d64c67b8a0c 100644 (file)
@@ -524,11 +524,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
  * @member:    the name of the hlist_node within the struct.
  */
 #define hlist_for_each_entry_continue_rcu(pos, member)                 \
-       for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
-                       typeof(*(pos)), member);                        \
+       for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
+                       &(pos)->member)), typeof(*(pos)), member);      \
             pos;                                                       \
-            pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
-                       typeof(*(pos)), member))
+            pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
+                       &(pos)->member)), typeof(*(pos)), member))
 
 /**
  * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
@@ -536,11 +536,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
  * @member:    the name of the hlist_node within the struct.
  */
 #define hlist_for_each_entry_continue_rcu_bh(pos, member)              \
-       for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
-                       typeof(*(pos)), member);                        \
+       for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(  \
+                       &(pos)->member)), typeof(*(pos)), member);      \
             pos;                                                       \
-            pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
-                       typeof(*(pos)), member))
+            pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(  \
+                       &(pos)->member)), typeof(*(pos)), member))
 
 /**
  * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
index ed4f5939a452cb424671f87dc9911fe318cb1fc3..78097491cd99a693f41b45e2b12ea1a4119cb52d 100644 (file)
@@ -331,12 +331,13 @@ static inline void rcu_init_nohz(void)
 extern struct srcu_struct tasks_rcu_exit_srcu;
 #define rcu_note_voluntary_context_switch(t) \
        do { \
+               rcu_all_qs(); \
                if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
                        ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
        } while (0)
 #else /* #ifdef CONFIG_TASKS_RCU */
 #define TASKS_RCU(x) do { } while (0)
-#define rcu_note_voluntary_context_switch(t)   do { } while (0)
+#define rcu_note_voluntary_context_switch(t)   rcu_all_qs()
 #endif /* #else #ifdef CONFIG_TASKS_RCU */
 
 /**
@@ -582,11 +583,11 @@ static inline void rcu_preempt_sleep_check(void)
 })
 #define __rcu_dereference_check(p, c, space) \
 ({ \
-       typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
+       /* Dependency order vs. p above. */ \
+       typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
        rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
        rcu_dereference_sparse(p, space); \
-       smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
-       ((typeof(*p) __force __kernel *)(_________p1)); \
+       ((typeof(*p) __force __kernel *)(________p1)); \
 })
 #define __rcu_dereference_protected(p, c, space) \
 ({ \
@@ -603,10 +604,10 @@ static inline void rcu_preempt_sleep_check(void)
 })
 #define __rcu_dereference_index_check(p, c) \
 ({ \
-       typeof(p) _________p1 = ACCESS_ONCE(p); \
+       /* Dependency order vs. p above. */ \
+       typeof(p) _________p1 = lockless_dereference(p); \
        rcu_lockdep_assert(c, \
                           "suspicious rcu_dereference_index_check() usage"); \
-       smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
        (_________p1); \
 })
 
index 0e5366200154e87e394b61c52a60fad72f2050ff..937edaeb150deb17759a9c0c715630fd0cc9a729 100644 (file)
@@ -92,17 +92,49 @@ static inline void rcu_virt_note_context_switch(int cpu)
 }
 
 /*
- * Return the number of grace periods.
+ * Return the number of grace periods started.
  */
-static inline long rcu_batches_completed(void)
+static inline unsigned long rcu_batches_started(void)
 {
        return 0;
 }
 
 /*
- * Return the number of bottom-half grace periods.
+ * Return the number of bottom-half grace periods started.
  */
-static inline long rcu_batches_completed_bh(void)
+static inline unsigned long rcu_batches_started_bh(void)
+{
+       return 0;
+}
+
+/*
+ * Return the number of sched grace periods started.
+ */
+static inline unsigned long rcu_batches_started_sched(void)
+{
+       return 0;
+}
+
+/*
+ * Return the number of grace periods completed.
+ */
+static inline unsigned long rcu_batches_completed(void)
+{
+       return 0;
+}
+
+/*
+ * Return the number of bottom-half grace periods completed.
+ */
+static inline unsigned long rcu_batches_completed_bh(void)
+{
+       return 0;
+}
+
+/*
+ * Return the number of sched grace periods completed.
+ */
+static inline unsigned long rcu_batches_completed_sched(void)
 {
        return 0;
 }
@@ -154,7 +186,10 @@ static inline bool rcu_is_watching(void)
        return true;
 }
 
-
 #endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
 
+static inline void rcu_all_qs(void)
+{
+}
+
 #endif /* __LINUX_RCUTINY_H */
index 52953790dcca2089527ec9f48725fd7d31fb0da0..d2e583a6aacacf09ee9dc3bf3646b6a3cff3494e 100644 (file)
@@ -81,9 +81,12 @@ void cond_synchronize_rcu(unsigned long oldstate);
 
 extern unsigned long rcutorture_testseq;
 extern unsigned long rcutorture_vernum;
-long rcu_batches_completed(void);
-long rcu_batches_completed_bh(void);
-long rcu_batches_completed_sched(void);
+unsigned long rcu_batches_started(void);
+unsigned long rcu_batches_started_bh(void);
+unsigned long rcu_batches_started_sched(void);
+unsigned long rcu_batches_completed(void);
+unsigned long rcu_batches_completed_bh(void);
+unsigned long rcu_batches_completed_sched(void);
 void show_rcu_gp_kthreads(void);
 
 void rcu_force_quiescent_state(void);
@@ -97,4 +100,6 @@ extern int rcu_scheduler_active __read_mostly;
 
 bool rcu_is_watching(void);
 
+void rcu_all_qs(void);
+
 #endif /* __LINUX_RCUTREE_H */
index 4419b99d8d6ec19010815c7ed759e513abccb95d..116655d922691b3143b8c9d840b944fed3a70adb 100644 (file)
@@ -468,7 +468,7 @@ bool regmap_reg_in_ranges(unsigned int reg,
  *
  * @reg: Offset of the register within the regmap bank
  * @lsb: lsb of the register field.
- * @reg: msb of the register field.
+ * @msb: msb of the register field.
  * @id_size: port size if it has some ports
  * @id_offset: address offset for each ports
  */
index 5479394fefcec75bce9a090093d9d5d6e81e5b5d..5dd65acc2a69e5188bb1f5d95ac1223c67fd36bc 100644 (file)
@@ -32,6 +32,8 @@ struct da9211_pdata {
         * 2 : 2 phase 2 buck
         */
        int num_buck;
+       int gpio_ren[DA9211_MAX_REGULATORS];
+       struct device_node *reg_node[DA9211_MAX_REGULATORS];
        struct regulator_init_data *init_data[DA9211_MAX_REGULATORS];
 };
 #endif
index 5f1e9ca47417febff0811df407abe43d99563786..d4ad5b5a02bb478a422b349406efba00997bab76 100644 (file)
@@ -21,6 +21,7 @@
 
 struct regmap;
 struct regulator_dev;
+struct regulator_config;
 struct regulator_init_data;
 struct regulator_enable_gpio;
 
@@ -205,6 +206,15 @@ enum regulator_type {
  * @supply_name: Identifying the regulator supply
  * @of_match: Name used to identify regulator in DT.
  * @regulators_node: Name of node containing regulator definitions in DT.
+ * @of_parse_cb: Optional callback called only if of_match is present.
+ *               Will be called for each regulator parsed from DT, during
+ *               init_data parsing.
+ *               The regulator_config passed as argument to the callback will
+ *               be a copy of config passed to regulator_register, valid only
+ *               for this particular call. Callback may freely change the
+ *               config but it cannot store it for later usage.
+ *               Callback should return 0 on success or negative ERRNO
+ *               indicating failure.
  * @id: Numerical identifier for the regulator.
  * @ops: Regulator operations table.
  * @irq: Interrupt number for the regulator.
@@ -251,6 +261,9 @@ struct regulator_desc {
        const char *supply_name;
        const char *of_match;
        const char *regulators_node;
+       int (*of_parse_cb)(struct device_node *,
+                           const struct regulator_desc *,
+                           struct regulator_config *);
        int id;
        bool continuous_voltage_range;
        unsigned n_voltages;
index 0b08d05d470b56cacec467d5368810517d4a1752..b07562e082c416a2b98fc08818f35111c8237288 100644 (file)
@@ -191,15 +191,22 @@ struct regulator_init_data {
        void *driver_data;      /* core does not touch this */
 };
 
-int regulator_suspend_prepare(suspend_state_t state);
-int regulator_suspend_finish(void);
-
 #ifdef CONFIG_REGULATOR
 void regulator_has_full_constraints(void);
+int regulator_suspend_prepare(suspend_state_t state);
+int regulator_suspend_finish(void);
 #else
 static inline void regulator_has_full_constraints(void)
 {
 }
+static inline int regulator_suspend_prepare(suspend_state_t state)
+{
+       return 0;
+}
+static inline int regulator_suspend_finish(void)
+{
+       return 0;
+}
 #endif
 
 #endif
diff --git a/include/linux/regulator/mt6397-regulator.h b/include/linux/regulator/mt6397-regulator.h
new file mode 100644 (file)
index 0000000..30cc596
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu <flora.fu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6397_H
+#define __LINUX_REGULATOR_MT6397_H
+
+enum {
+       MT6397_ID_VPCA15 = 0,
+       MT6397_ID_VPCA7,
+       MT6397_ID_VSRAMCA15,
+       MT6397_ID_VSRAMCA7,
+       MT6397_ID_VCORE,
+       MT6397_ID_VGPU,
+       MT6397_ID_VDRM,
+       MT6397_ID_VIO18 = 7,
+       MT6397_ID_VTCXO,
+       MT6397_ID_VA28,
+       MT6397_ID_VCAMA,
+       MT6397_ID_VIO28,
+       MT6397_ID_VUSB,
+       MT6397_ID_VMC,
+       MT6397_ID_VMCH,
+       MT6397_ID_VEMC3V3,
+       MT6397_ID_VGP1,
+       MT6397_ID_VGP2,
+       MT6397_ID_VGP3,
+       MT6397_ID_VGP4,
+       MT6397_ID_VGP5,
+       MT6397_ID_VGP6,
+       MT6397_ID_VIBR,
+       MT6397_ID_RG_MAX,
+};
+
+#define MT6397_MAX_REGULATOR   MT6397_ID_RG_MAX
+#define MT6397_REGULATOR_ID97  0x97
+#define MT6397_REGULATOR_ID91  0x91
+
+#endif /* __LINUX_REGULATOR_MT6397_H */
index 364f7a7c43db3db23e67805983e90b3dd0187837..70c6c66c5bcf16cc4be324a1aaf40b1981e56413 100644 (file)
 #define PFUZE200_VGEN5         11
 #define PFUZE200_VGEN6         12
 
+#define PFUZE3000_SW1A         0
+#define PFUZE3000_SW1B         1
+#define PFUZE3000_SW2          2
+#define PFUZE3000_SW3          3
+#define PFUZE3000_SWBST                4
+#define PFUZE3000_VSNVS                5
+#define PFUZE3000_VREFDDR      6
+#define PFUZE3000_VLDO1                7
+#define PFUZE3000_VLDO2                8
+#define PFUZE3000_VCCSD                9
+#define PFUZE3000_V33          10
+#define PFUZE3000_VLDO3                11
+#define PFUZE3000_VLDO4                12
+
 struct regulator_init_data;
 
 struct pfuze_regulator_platform_data {
index b2b1afbb32024ebbb80be196ea276a7ff53ae43e..cd519a11c2c6723d5d679ffed8cb4320eba6e318 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * Written by:
  * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
  */
index bc8677c8eba92c2ed22e23a1089d19486062a512..e69e9b51b21a39b1eacb0284cc08f95c7e56fb53 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
 
 #ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_
index 555d254e660662483b8b09ecd075554295848ea6..fdd1d1d51da5d89efc352039f8ec2333fec5872a 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
 
 #ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_
index 4835486f58e5abbe71d2535a6f563241ae4d3126..381d368b91b413d1fe99462e13a11e61e8dda5b1 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  */
 
 #ifndef __LINUX_SPI_MXS_SPI_H__
index d5a316550177299fefaee37d6bf3e6a0e3322a8a..6d36dacec4baa1cd88a6bfebd259c4b685a34876 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 #ifndef __linux_pxa2xx_spi_h
 #define __linux_pxa2xx_spi_h
@@ -57,7 +53,6 @@ struct pxa2xx_spi_chip {
 #if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
 
 #include <linux/clk.h>
-#include <mach/dma.h>
 
 extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info);
 
index e546b2ceb6231ddfa83298cfae0dcabee910c1e8..a693188cc08b40a7663dcfff821191d88df3ffb3 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #ifndef __LINUX_SPI_RENESAS_SPI_H__
index a1121f872ac1482fc5a96c96462d7d63afda2d86..aa0d440ab4f060a38f4ed7f1c59e48b4c349b736 100644 (file)
@@ -9,10 +9,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #ifndef SH_HSPI_H
 #define SH_HSPI_H
index 88a14d81c49e3f5a19e89e26b93c41e09af2f427..b087a85f5f72a3511c0c97fa36b22f27b0b6cc1a 100644 (file)
@@ -7,6 +7,8 @@ struct sh_msiof_spi_info {
        u16 num_chipselect;
        unsigned int dma_tx_id;
        unsigned int dma_rx_id;
+       u32 dtdl;
+       u32 syncdl;
 };
 
 #endif /* __SPI_SH_MSIOF_H__ */
index a6ef2a8e6de4bc47b976494c0f59b9b4fa9d2c9d..ed9489d893a487f250868f8de603c84e707feaf8 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef __LINUX_SPI_H
@@ -260,6 +256,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
  * @pump_messages: work struct for scheduling work to the message pump
  * @queue_lock: spinlock to syncronise access to message queue
  * @queue: message queue
+ * @idling: the device is entering idle state
  * @cur_msg: the currently in-flight message
  * @cur_msg_prepared: spi_prepare_message was called for the currently
  *                    in-flight message
@@ -425,6 +422,7 @@ struct spi_master {
        spinlock_t                      queue_lock;
        struct list_head                queue;
        struct spi_message              *cur_msg;
+       bool                            idling;
        bool                            busy;
        bool                            running;
        bool                            rt;
index 60b59187e590a9cb2c01cc99d6ea422c7d4d998f..414c6fddfcf097433515a9783b34200656046f96 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
 
 struct tle62x0_pdata {
index 8f721e465e05b968ed1b5223c4b1e7eb61be78e2..563b3b1799a86cca2c09501028fbad990809b104 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
  */
 
 #ifndef _LINUX_SPI_TSC2005_H
index a2783cb5d2753f6c6eb04e48086f461abedb8b6f..9cfd9623fb0325a692c4929cac048912644dc0ac 100644 (file)
@@ -45,7 +45,7 @@ struct rcu_batch {
 #define RCU_BATCH_INIT(name) { NULL, &(name.head) }
 
 struct srcu_struct {
-       unsigned completed;
+       unsigned long completed;
        struct srcu_struct_array __percpu *per_cpu_ref;
        spinlock_t queue_lock; /* protect ->batch_queue, ->running */
        bool running;
@@ -102,13 +102,11 @@ void process_srcu(struct work_struct *work);
  * define and init a srcu struct at build time.
  * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it.
  */
-#define DEFINE_SRCU(name)                                              \
+#define __DEFINE_SRCU(name, is_static)                                 \
        static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
-       struct srcu_struct name = __SRCU_STRUCT_INIT(name);
-
-#define DEFINE_STATIC_SRCU(name)                                       \
-       static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
-       static struct srcu_struct name = __SRCU_STRUCT_INIT(name);
+       is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+#define DEFINE_SRCU(name)              __DEFINE_SRCU(name, /* not static */)
+#define DEFINE_STATIC_SRCU(name)       __DEFINE_SRCU(name, static)
 
 /**
  * call_srcu() - Queue a callback for invocation after an SRCU grace period
@@ -135,7 +133,7 @@ int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
 void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
 void synchronize_srcu(struct srcu_struct *sp);
 void synchronize_srcu_expedited(struct srcu_struct *sp);
-long srcu_batches_completed(struct srcu_struct *sp);
+unsigned long srcu_batches_completed(struct srcu_struct *sp);
 void srcu_barrier(struct srcu_struct *sp);
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
index e08e21e5f601bcec81ea21131c4e58e69c611850..c72851328ca9cc071620cfdef3892ca91db6d96a 100644 (file)
@@ -173,7 +173,7 @@ extern void syscall_unregfunc(void);
                                TP_PROTO(data_proto),                   \
                                TP_ARGS(data_args),                     \
                                TP_CONDITION(cond),,);                  \
-               if (IS_ENABLED(CONFIG_LOCKDEP)) {                       \
+               if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) {             \
                        rcu_read_lock_sched_notrace();                  \
                        rcu_dereference_sched(__tracepoint_##name.funcs);\
                        rcu_read_unlock_sched_notrace();                \
index 2232ed16635ae0929c97f62e1c8c1b09109f0649..37423e0e1379872c5ac54a7eae953f9f26568a82 100644 (file)
@@ -363,7 +363,6 @@ do {                                                                        \
  */
 #define wait_event_cmd(wq, condition, cmd1, cmd2)                      \
 do {                                                                   \
-       might_sleep();                                                  \
        if (condition)                                                  \
                break;                                                  \
        __wait_event_cmd(wq, condition, cmd1, cmd2);                    \
index 7ee2df083542365e9d317fa1dbc2bbcfbbe34aa6..dc8fd81412bf319b7ba59c8b12c65c9363db75f6 100644 (file)
@@ -22,9 +22,9 @@ struct flow_keys {
                __be32 ports;
                __be16 port16[2];
        };
-       u16 thoff;
-       u16 n_proto;
-       u8 ip_proto;
+       u16     thoff;
+       __be16  n_proto;
+       u8      ip_proto;
 };
 
 bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
index f7cbd703d15d24edca61cf9e159cb1ce3857cb5b..09cf5aebb28368fbb93c974f14a78c3fa9a6f408 100644 (file)
@@ -181,7 +181,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
        return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
 }
 
-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
+void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
                           const struct ip_options *sopt,
                           __be32 daddr, __be32 saddr,
                           const struct ip_reply_arg *arg,
index 4292929392b0127479c49c5da3bb33f053f4428c..6e416f6d3e3cbe9ed17406b1c504f33023d76509 100644 (file)
@@ -671,6 +671,9 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
        return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
 }
 
+u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst,
+                       struct in6_addr *src);
+void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
 void ipv6_proxy_select_ident(struct sk_buff *skb);
 
 int ip6_dst_hoplimit(struct dst_entry *dst);
@@ -708,7 +711,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
                                        __be32 flowlabel, bool autolabel)
 {
        if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) {
-               __be32 hash;
+               u32 hash;
 
                hash = skb_get_hash(skb);
 
@@ -718,7 +721,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
                 */
                hash ^= hash >> 12;
 
-               flowlabel = hash & IPV6_FLOWLABEL_MASK;
+               flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
        }
 
        return flowlabel;
index 3ae969e3acf016474413e1209381c60091befe1a..9eaaa788458607004cb5f160e77c38de02da17ec 100644 (file)
@@ -530,6 +530,8 @@ enum nft_chain_type {
 
 int nft_chain_validate_dependency(const struct nft_chain *chain,
                                  enum nft_chain_type type);
+int nft_chain_validate_hooks(const struct nft_chain *chain,
+                             unsigned int hook_flags);
 
 struct nft_stats {
        u64                     bytes;
index 24945cefc4fde6bfaf9c4560080c91b2e3b12d0d..0ffef1a38efcc2f75e78dab09aa5e111f5b8b72f 100644 (file)
@@ -52,6 +52,7 @@ struct netns_ipv4 {
        struct inet_peer_base   *peers;
        struct tcpm_hash_bucket *tcp_metrics_hash;
        unsigned int            tcp_metrics_hash_log;
+       struct sock  * __percpu *tcp_sk;
        struct netns_frags      frags;
 #ifdef CONFIG_NETFILTER
        struct xt_table         *iptable_filter;
index 3d282cbb66bf1015d28f140ed3bc031ac43afaed..c605d305c577074d11bee6f19479dda8a4949ee3 100644 (file)
@@ -79,6 +79,9 @@ struct Qdisc {
        struct netdev_queue     *dev_queue;
 
        struct gnet_stats_rate_est64    rate_est;
+       struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+       struct gnet_stats_queue __percpu *cpu_qstats;
+
        struct Qdisc            *next_sched;
        struct sk_buff          *gso_skb;
        /*
@@ -86,15 +89,9 @@ struct Qdisc {
         */
        unsigned long           state;
        struct sk_buff_head     q;
-       union {
-               struct gnet_stats_basic_packed bstats;
-               struct gnet_stats_basic_cpu __percpu *cpu_bstats;
-       } __packed;
+       struct gnet_stats_basic_packed bstats;
        unsigned int            __state;
-       union {
-               struct gnet_stats_queue qstats;
-               struct gnet_stats_queue __percpu *cpu_qstats;
-       } __packed;
+       struct gnet_stats_queue qstats;
        struct rcu_head         rcu_head;
        int                     padded;
        atomic_t                refcnt;
index f50f29faf76f1fbcc5237de63c76f7c8200f4a6b..9d9111ef43ae305ad60e11c4b848b2b5f0a7eec3 100644 (file)
@@ -834,8 +834,8 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
 void tcp_get_allowed_congestion_control(char *buf, size_t len);
 int tcp_set_allowed_congestion_control(char *allowed);
 int tcp_set_congestion_control(struct sock *sk, const char *name);
-void tcp_slow_start(struct tcp_sock *tp, u32 acked);
-void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
+u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
+void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
 
 u32 tcp_reno_ssthresh(struct sock *sk);
 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
index 0d74f1de99aa89dee233ed408815459e2ad66d5f..65994a19e84055e7b4f4d8cde83a07eb41c1a69a 100644 (file)
@@ -1707,10 +1707,7 @@ static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t
 
 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
 {
-       size_t copy_sz;
-
-       copy_sz = min_t(size_t, len, udata->outlen);
-       return copy_to_user(udata->outbuf, src, copy_sz) ? -EFAULT : 0;
+       return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
 }
 
 /**
index 2609048c1d442581a2530f79cc2135aa679f8cb1..3a34f6edc2d1ff6bb9280b3bd9b1a203c5393c68 100644 (file)
@@ -286,7 +286,7 @@ struct ak4113 {
        ak4113_write_t *write;
        ak4113_read_t *read;
        void *private_data;
-       unsigned int init:1;
+       atomic_t wq_processing;
        spinlock_t lock;
        unsigned char regmap[AK4113_WRITABLE_REGS];
        struct snd_kcontrol *kctls[AK4113_CONTROLS];
index 52f02a60dba731e500bbac2a355b1bd5d12a676f..069299a88915b6f27506c06471b08a7519590e88 100644 (file)
@@ -168,7 +168,7 @@ struct ak4114 {
        ak4114_write_t * write;
        ak4114_read_t * read;
        void * private_data;
-       unsigned int init: 1;
+       atomic_t wq_processing;
        spinlock_t lock;
        unsigned char regmap[6];
        unsigned char txcsb[5];
index b4fca9aed2a2b00296ce040428b830b93095d562..ac8b333acb4dd721596db8ddf9bf7fcbc7ab6c1d 100644 (file)
@@ -498,6 +498,7 @@ int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg,
                                unsigned int mask, unsigned int value);
 
 #ifdef CONFIG_SND_SOC_AC97_BUS
+struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec);
 struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec);
 void snd_soc_free_ac97_codec(struct snd_ac97 *ac97);
 
index 13391d288107a6aa4325c34ba16a00450b7af814..0e763576515398be7bcbea3e1d4b50a25201c853 100644 (file)
        { TLB_LOCAL_SHOOTDOWN,          "local shootdown" },            \
        { TLB_LOCAL_MM_SHOOTDOWN,       "local mm shootdown" }
 
-TRACE_EVENT(tlb_flush,
+TRACE_EVENT_CONDITION(tlb_flush,
 
        TP_PROTO(int reason, unsigned long pages),
        TP_ARGS(reason, pages),
 
+       TP_CONDITION(cpu_online(smp_processor_id())),
+
        TP_STRUCT__entry(
                __field(          int, reason)
                __field(unsigned long,  pages)
index 4275b961bf60f65ec9d93a2dbe680e66d184b6a2..867cc5084afbfce8ab24cfd87d5f52cda93697de 100644 (file)
@@ -90,7 +90,6 @@ enum {
 };
 
 enum {
-       IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
        IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
        IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
 };
@@ -202,32 +201,6 @@ struct ib_uverbs_query_device_resp {
        __u8  reserved[4];
 };
 
-enum {
-       IB_USER_VERBS_EX_QUERY_DEVICE_ODP =             1ULL << 0,
-};
-
-struct ib_uverbs_ex_query_device {
-       __u32 comp_mask;
-       __u32 reserved;
-};
-
-struct ib_uverbs_odp_caps {
-       __u64 general_caps;
-       struct {
-               __u32 rc_odp_caps;
-               __u32 uc_odp_caps;
-               __u32 ud_odp_caps;
-       } per_transport_caps;
-       __u32 reserved;
-};
-
-struct ib_uverbs_ex_query_device_resp {
-       struct ib_uverbs_query_device_resp base;
-       __u32 comp_mask;
-       __u32 reserved;
-       struct ib_uverbs_odp_caps odp_caps;
-};
-
 struct ib_uverbs_query_port {
        __u64 response;
        __u8  port_num;
index 9afb971497f4c9972b0dcf14071edf9ef81320be..1354ac09b5163a097ea8e0ca3146716cdc051363 100644 (file)
@@ -470,7 +470,6 @@ choice
 config TREE_RCU
        bool "Tree-based hierarchical RCU"
        depends on !PREEMPT && SMP
-       select IRQ_WORK
        help
          This option selects the RCU implementation that is
          designed for very large SMP system with hundreds or
@@ -480,7 +479,6 @@ config TREE_RCU
 config PREEMPT_RCU
        bool "Preemptible tree-based hierarchical RCU"
        depends on PREEMPT
-       select IRQ_WORK
        help
          This option selects the RCU implementation that is
          designed for very large SMP systems with hundreds or
@@ -501,9 +499,17 @@ config TINY_RCU
 
 endchoice
 
+config SRCU
+       bool
+       help
+         This option selects the sleepable version of RCU. This version
+         permits arbitrary sleeping or blocking within RCU read-side critical
+         sections.
+
 config TASKS_RCU
        bool "Task_based RCU implementation using voluntary context switch"
        default n
+       select SRCU
        help
          This option enables a task-based RCU implementation that uses
          only voluntary context switch (not preemption!), idle, and
@@ -668,9 +674,10 @@ config RCU_BOOST
 
 config RCU_KTHREAD_PRIO
        int "Real-time priority to use for RCU worker threads"
-       range 1 99
-       depends on RCU_BOOST
-       default 1
+       range 1 99 if RCU_BOOST
+       range 0 99 if !RCU_BOOST
+       default 1 if RCU_BOOST
+       default 0 if !RCU_BOOST
        help
          This option specifies the SCHED_FIFO priority value that will be
          assigned to the rcuc/n and rcub/n threads and is also the value
@@ -1595,6 +1602,7 @@ config PERF_EVENTS
        depends on HAVE_PERF_EVENTS
        select ANON_INODES
        select IRQ_WORK
+       select SRCU
        help
          Enable kernel support for various performance events provided
          by software and hardware.
index 5d220234b3ca5aa2feebe4b80ddba1bfd63a8eda..1972b161c61e98fbe3e3ce003744cf1d2e8c5b1c 100644 (file)
@@ -58,22 +58,23 @@ static int cpu_hotplug_disabled;
 
 static struct {
        struct task_struct *active_writer;
-       struct mutex lock; /* Synchronizes accesses to refcount, */
+       /* wait queue to wake up the active_writer */
+       wait_queue_head_t wq;
+       /* verifies that no writer will get active while readers are active */
+       struct mutex lock;
        /*
         * Also blocks the new readers during
         * an ongoing cpu hotplug operation.
         */
-       int refcount;
-       /* And allows lockless put_online_cpus(). */
-       atomic_t puts_pending;
+       atomic_t refcount;
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        struct lockdep_map dep_map;
 #endif
 } cpu_hotplug = {
        .active_writer = NULL,
+       .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
        .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
-       .refcount = 0,
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        .dep_map = {.name = "cpu_hotplug.lock" },
 #endif
@@ -86,15 +87,6 @@ static struct {
 #define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
 #define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
 
-static void apply_puts_pending(int max)
-{
-       int delta;
-
-       if (atomic_read(&cpu_hotplug.puts_pending) >= max) {
-               delta = atomic_xchg(&cpu_hotplug.puts_pending, 0);
-               cpu_hotplug.refcount -= delta;
-       }
-}
 
 void get_online_cpus(void)
 {
@@ -103,8 +95,7 @@ void get_online_cpus(void)
                return;
        cpuhp_lock_acquire_read();
        mutex_lock(&cpu_hotplug.lock);
-       apply_puts_pending(65536);
-       cpu_hotplug.refcount++;
+       atomic_inc(&cpu_hotplug.refcount);
        mutex_unlock(&cpu_hotplug.lock);
 }
 EXPORT_SYMBOL_GPL(get_online_cpus);
@@ -116,8 +107,7 @@ bool try_get_online_cpus(void)
        if (!mutex_trylock(&cpu_hotplug.lock))
                return false;
        cpuhp_lock_acquire_tryread();
-       apply_puts_pending(65536);
-       cpu_hotplug.refcount++;
+       atomic_inc(&cpu_hotplug.refcount);
        mutex_unlock(&cpu_hotplug.lock);
        return true;
 }
@@ -125,20 +115,18 @@ EXPORT_SYMBOL_GPL(try_get_online_cpus);
 
 void put_online_cpus(void)
 {
+       int refcount;
+
        if (cpu_hotplug.active_writer == current)
                return;
-       if (!mutex_trylock(&cpu_hotplug.lock)) {
-               atomic_inc(&cpu_hotplug.puts_pending);
-               cpuhp_lock_release();
-               return;
-       }
 
-       if (WARN_ON(!cpu_hotplug.refcount))
-               cpu_hotplug.refcount++; /* try to fix things up */
+       refcount = atomic_dec_return(&cpu_hotplug.refcount);
+       if (WARN_ON(refcount < 0)) /* try to fix things up */
+               atomic_inc(&cpu_hotplug.refcount);
+
+       if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
+               wake_up(&cpu_hotplug.wq);
 
-       if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
-               wake_up_process(cpu_hotplug.active_writer);
-       mutex_unlock(&cpu_hotplug.lock);
        cpuhp_lock_release();
 
 }
@@ -168,18 +156,20 @@ EXPORT_SYMBOL_GPL(put_online_cpus);
  */
 void cpu_hotplug_begin(void)
 {
-       cpu_hotplug.active_writer = current;
+       DEFINE_WAIT(wait);
 
+       cpu_hotplug.active_writer = current;
        cpuhp_lock_acquire();
+
        for (;;) {
                mutex_lock(&cpu_hotplug.lock);
-               apply_puts_pending(1);
-               if (likely(!cpu_hotplug.refcount))
-                       break;
-               __set_current_state(TASK_UNINTERRUPTIBLE);
+               prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
+               if (likely(!atomic_read(&cpu_hotplug.refcount)))
+                               break;
                mutex_unlock(&cpu_hotplug.lock);
                schedule();
        }
+       finish_wait(&cpu_hotplug.wq, &wait);
 }
 
 void cpu_hotplug_done(void)
index 4803da6eab62f182354707c10f48be35a8b54fb5..ae9fc7cc360ebea6088db4ae0206e452d856214b 100644 (file)
@@ -402,6 +402,7 @@ int raw_notifier_call_chain(struct raw_notifier_head *nh,
 }
 EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
 
+#ifdef CONFIG_SRCU
 /*
  *     SRCU notifier chain routines.    Registration and unregistration
  *     use a mutex, and call_chain is synchronized by SRCU (no locks).
@@ -528,6 +529,8 @@ void srcu_init_notifier_head(struct srcu_notifier_head *nh)
 }
 EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
 
+#endif /* CONFIG_SRCU */
+
 static ATOMIC_NOTIFIER_HEAD(die_chain);
 
 int notrace notify_die(enum die_val val, const char *str,
index 48b28d387c7f77b2e3d36bc751b3e221c7634d67..7e01f78f041778abe405c9115c15e10a77f64d03 100644 (file)
@@ -251,6 +251,7 @@ config APM_EMULATION
 
 config PM_OPP
        bool
+       select SRCU
        ---help---
          SOCs have a standard set of tuples consisting of frequency and
          voltage pairs that the device will support per voltage domain. This
index e6fae503d1bc54519a45ba838fe3d8622e4333f4..50a808424b06af45fdd0fd6ab3ae732e1917680d 100644 (file)
@@ -1,4 +1,5 @@
-obj-y += update.o srcu.o
+obj-y += update.o
+obj-$(CONFIG_SRCU) += srcu.o
 obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
 obj-$(CONFIG_TREE_RCU) += tree.o
 obj-$(CONFIG_PREEMPT_RCU) += tree.o
index 07bb02eda844bf16eee540ba01a99beb7c52ac60..80adef7d4c3d01d9ef9ed95c483956d2a858854f 100644 (file)
@@ -137,4 +137,10 @@ int rcu_jiffies_till_stall_check(void);
 
 void rcu_early_boot_tests(void);
 
+/*
+ * This function really isn't for public consumption, but RCU is special in
+ * that context switches can allow the state machine to make progress.
+ */
+extern void resched_cpu(int cpu);
+
 #endif /* __LINUX_RCU_H */
index 4d559baf06e0c7171a7a86acbeed9926fc8c9d7d..30d42aa55d83de9b111bd05945114641409f3596 100644 (file)
@@ -244,7 +244,8 @@ struct rcu_torture_ops {
        int (*readlock)(void);
        void (*read_delay)(struct torture_random_state *rrsp);
        void (*readunlock)(int idx);
-       int (*completed)(void);
+       unsigned long (*started)(void);
+       unsigned long (*completed)(void);
        void (*deferred_free)(struct rcu_torture *p);
        void (*sync)(void);
        void (*exp_sync)(void);
@@ -296,11 +297,6 @@ static void rcu_torture_read_unlock(int idx) __releases(RCU)
        rcu_read_unlock();
 }
 
-static int rcu_torture_completed(void)
-{
-       return rcu_batches_completed();
-}
-
 /*
  * Update callback in the pipe.  This should be invoked after a grace period.
  */
@@ -356,7 +352,7 @@ rcu_torture_cb(struct rcu_head *p)
                cur_ops->deferred_free(rp);
 }
 
-static int rcu_no_completed(void)
+static unsigned long rcu_no_completed(void)
 {
        return 0;
 }
@@ -377,7 +373,8 @@ static struct rcu_torture_ops rcu_ops = {
        .readlock       = rcu_torture_read_lock,
        .read_delay     = rcu_read_delay,
        .readunlock     = rcu_torture_read_unlock,
-       .completed      = rcu_torture_completed,
+       .started        = rcu_batches_started,
+       .completed      = rcu_batches_completed,
        .deferred_free  = rcu_torture_deferred_free,
        .sync           = synchronize_rcu,
        .exp_sync       = synchronize_rcu_expedited,
@@ -407,11 +404,6 @@ static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
        rcu_read_unlock_bh();
 }
 
-static int rcu_bh_torture_completed(void)
-{
-       return rcu_batches_completed_bh();
-}
-
 static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
 {
        call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
@@ -423,7 +415,8 @@ static struct rcu_torture_ops rcu_bh_ops = {
        .readlock       = rcu_bh_torture_read_lock,
        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
        .readunlock     = rcu_bh_torture_read_unlock,
-       .completed      = rcu_bh_torture_completed,
+       .started        = rcu_batches_started_bh,
+       .completed      = rcu_batches_completed_bh,
        .deferred_free  = rcu_bh_torture_deferred_free,
        .sync           = synchronize_rcu_bh,
        .exp_sync       = synchronize_rcu_bh_expedited,
@@ -466,6 +459,7 @@ static struct rcu_torture_ops rcu_busted_ops = {
        .readlock       = rcu_torture_read_lock,
        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
        .readunlock     = rcu_torture_read_unlock,
+       .started        = rcu_no_completed,
        .completed      = rcu_no_completed,
        .deferred_free  = rcu_busted_torture_deferred_free,
        .sync           = synchronize_rcu_busted,
@@ -510,7 +504,7 @@ static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
        srcu_read_unlock(&srcu_ctl, idx);
 }
 
-static int srcu_torture_completed(void)
+static unsigned long srcu_torture_completed(void)
 {
        return srcu_batches_completed(&srcu_ctl);
 }
@@ -564,6 +558,7 @@ static struct rcu_torture_ops srcu_ops = {
        .readlock       = srcu_torture_read_lock,
        .read_delay     = srcu_read_delay,
        .readunlock     = srcu_torture_read_unlock,
+       .started        = NULL,
        .completed      = srcu_torture_completed,
        .deferred_free  = srcu_torture_deferred_free,
        .sync           = srcu_torture_synchronize,
@@ -600,7 +595,8 @@ static struct rcu_torture_ops sched_ops = {
        .readlock       = sched_torture_read_lock,
        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
        .readunlock     = sched_torture_read_unlock,
-       .completed      = rcu_no_completed,
+       .started        = rcu_batches_started_sched,
+       .completed      = rcu_batches_completed_sched,
        .deferred_free  = rcu_sched_torture_deferred_free,
        .sync           = synchronize_sched,
        .exp_sync       = synchronize_sched_expedited,
@@ -638,6 +634,7 @@ static struct rcu_torture_ops tasks_ops = {
        .readlock       = tasks_torture_read_lock,
        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
        .readunlock     = tasks_torture_read_unlock,
+       .started        = rcu_no_completed,
        .completed      = rcu_no_completed,
        .deferred_free  = rcu_tasks_torture_deferred_free,
        .sync           = synchronize_rcu_tasks,
@@ -1015,8 +1012,8 @@ static void rcutorture_trace_dump(void)
 static void rcu_torture_timer(unsigned long unused)
 {
        int idx;
-       int completed;
-       int completed_end;
+       unsigned long started;
+       unsigned long completed;
        static DEFINE_TORTURE_RANDOM(rand);
        static DEFINE_SPINLOCK(rand_lock);
        struct rcu_torture *p;
@@ -1024,7 +1021,10 @@ static void rcu_torture_timer(unsigned long unused)
        unsigned long long ts;
 
        idx = cur_ops->readlock();
-       completed = cur_ops->completed();
+       if (cur_ops->started)
+               started = cur_ops->started();
+       else
+               started = cur_ops->completed();
        ts = rcu_trace_clock_local();
        p = rcu_dereference_check(rcu_torture_current,
                                  rcu_read_lock_bh_held() ||
@@ -1047,14 +1047,16 @@ static void rcu_torture_timer(unsigned long unused)
                /* Should not happen, but... */
                pipe_count = RCU_TORTURE_PIPE_LEN;
        }
-       completed_end = cur_ops->completed();
+       completed = cur_ops->completed();
        if (pipe_count > 1) {
                do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
-                                         completed, completed_end);
+                                         started, completed);
                rcutorture_trace_dump();
        }
        __this_cpu_inc(rcu_torture_count[pipe_count]);
-       completed = completed_end - completed;
+       completed = completed - started;
+       if (cur_ops->started)
+               completed++;
        if (completed > RCU_TORTURE_PIPE_LEN) {
                /* Should not happen, but... */
                completed = RCU_TORTURE_PIPE_LEN;
@@ -1073,8 +1075,8 @@ static void rcu_torture_timer(unsigned long unused)
 static int
 rcu_torture_reader(void *arg)
 {
-       int completed;
-       int completed_end;
+       unsigned long started;
+       unsigned long completed;
        int idx;
        DEFINE_TORTURE_RANDOM(rand);
        struct rcu_torture *p;
@@ -1093,7 +1095,10 @@ rcu_torture_reader(void *arg)
                                mod_timer(&t, jiffies + 1);
                }
                idx = cur_ops->readlock();
-               completed = cur_ops->completed();
+               if (cur_ops->started)
+                       started = cur_ops->started();
+               else
+                       started = cur_ops->completed();
                ts = rcu_trace_clock_local();
                p = rcu_dereference_check(rcu_torture_current,
                                          rcu_read_lock_bh_held() ||
@@ -1114,14 +1119,16 @@ rcu_torture_reader(void *arg)
                        /* Should not happen, but... */
                        pipe_count = RCU_TORTURE_PIPE_LEN;
                }
-               completed_end = cur_ops->completed();
+               completed = cur_ops->completed();
                if (pipe_count > 1) {
                        do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
-                                                 ts, completed, completed_end);
+                                                 ts, started, completed);
                        rcutorture_trace_dump();
                }
                __this_cpu_inc(rcu_torture_count[pipe_count]);
-               completed = completed_end - completed;
+               completed = completed - started;
+               if (cur_ops->started)
+                       completed++;
                if (completed > RCU_TORTURE_PIPE_LEN) {
                        /* Should not happen, but... */
                        completed = RCU_TORTURE_PIPE_LEN;
@@ -1420,6 +1427,9 @@ static int rcu_torture_barrier(void *arg)
                cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
                if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
                        n_rcu_torture_barrier_error++;
+                       pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
+                              atomic_read(&barrier_cbs_invoked),
+                              n_barrier_cbs);
                        WARN_ON_ONCE(1);
                }
                n_barrier_successes++;
index e037f3eb2f7bf2f44a6de08cdcfe89b05d4d5bca..445bf8ffe3fb27dfc58aa411b476ef105bc33645 100644 (file)
@@ -546,7 +546,7 @@ EXPORT_SYMBOL_GPL(srcu_barrier);
  * Report the number of batches, correlated with, but not necessarily
  * precisely the same as, the number of grace periods that have elapsed.
  */
-long srcu_batches_completed(struct srcu_struct *sp)
+unsigned long srcu_batches_completed(struct srcu_struct *sp)
 {
        return sp->completed;
 }
index 0db5649f88179958d7ab26f982036a70c388a817..cc9ceca7bde1fd5f0036ad841033071e5fec0499 100644 (file)
@@ -47,54 +47,14 @@ static void __call_rcu(struct rcu_head *head,
                       void (*func)(struct rcu_head *rcu),
                       struct rcu_ctrlblk *rcp);
 
-static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
-
 #include "tiny_plugin.h"
 
-/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcu/tree.c. */
-static void rcu_idle_enter_common(long long newval)
-{
-       if (newval) {
-               RCU_TRACE(trace_rcu_dyntick(TPS("--="),
-                                           rcu_dynticks_nesting, newval));
-               rcu_dynticks_nesting = newval;
-               return;
-       }
-       RCU_TRACE(trace_rcu_dyntick(TPS("Start"),
-                                   rcu_dynticks_nesting, newval));
-       if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
-               struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
-
-               RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"),
-                                           rcu_dynticks_nesting, newval));
-               ftrace_dump(DUMP_ALL);
-               WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
-                         current->pid, current->comm,
-                         idle->pid, idle->comm); /* must be idle task! */
-       }
-       rcu_sched_qs(); /* implies rcu_bh_inc() */
-       barrier();
-       rcu_dynticks_nesting = newval;
-}
-
 /*
  * Enter idle, which is an extended quiescent state if we have fully
- * entered that mode (i.e., if the new value of dynticks_nesting is zero).
+ * entered that mode.
  */
 void rcu_idle_enter(void)
 {
-       unsigned long flags;
-       long long newval;
-
-       local_irq_save(flags);
-       WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
-       if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) ==
-           DYNTICK_TASK_NEST_VALUE)
-               newval = 0;
-       else
-               newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE;
-       rcu_idle_enter_common(newval);
-       local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(rcu_idle_enter);
 
@@ -103,55 +63,14 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter);
  */
 void rcu_irq_exit(void)
 {
-       unsigned long flags;
-       long long newval;
-
-       local_irq_save(flags);
-       newval = rcu_dynticks_nesting - 1;
-       WARN_ON_ONCE(newval < 0);
-       rcu_idle_enter_common(newval);
-       local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(rcu_irq_exit);
 
-/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcu/tree.c. */
-static void rcu_idle_exit_common(long long oldval)
-{
-       if (oldval) {
-               RCU_TRACE(trace_rcu_dyntick(TPS("++="),
-                                           oldval, rcu_dynticks_nesting));
-               return;
-       }
-       RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting));
-       if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
-               struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
-
-               RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"),
-                         oldval, rcu_dynticks_nesting));
-               ftrace_dump(DUMP_ALL);
-               WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
-                         current->pid, current->comm,
-                         idle->pid, idle->comm); /* must be idle task! */
-       }
-}
-
 /*
  * Exit idle, so that we are no longer in an extended quiescent state.
  */
 void rcu_idle_exit(void)
 {
-       unsigned long flags;
-       long long oldval;
-
-       local_irq_save(flags);
-       oldval = rcu_dynticks_nesting;
-       WARN_ON_ONCE(rcu_dynticks_nesting < 0);
-       if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK)
-               rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
-       else
-               rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
-       rcu_idle_exit_common(oldval);
-       local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(rcu_idle_exit);
 
@@ -160,15 +79,6 @@ EXPORT_SYMBOL_GPL(rcu_idle_exit);
  */
 void rcu_irq_enter(void)
 {
-       unsigned long flags;
-       long long oldval;
-
-       local_irq_save(flags);
-       oldval = rcu_dynticks_nesting;
-       rcu_dynticks_nesting++;
-       WARN_ON_ONCE(rcu_dynticks_nesting == 0);
-       rcu_idle_exit_common(oldval);
-       local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(rcu_irq_enter);
 
@@ -179,22 +89,12 @@ EXPORT_SYMBOL_GPL(rcu_irq_enter);
  */
 bool notrace __rcu_is_watching(void)
 {
-       return rcu_dynticks_nesting;
+       return true;
 }
 EXPORT_SYMBOL(__rcu_is_watching);
 
 #endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
 
-/*
- * Test whether the current CPU was interrupted from idle.  Nested
- * interrupts don't count, we must be running at the first interrupt
- * level.
- */
-static int rcu_is_cpu_rrupt_from_idle(void)
-{
-       return rcu_dynticks_nesting <= 1;
-}
-
 /*
  * Helper function for rcu_sched_qs() and rcu_bh_qs().
  * Also irqs are disabled to avoid confusion due to interrupt handlers
@@ -250,7 +150,7 @@ void rcu_bh_qs(void)
 void rcu_check_callbacks(int user)
 {
        RCU_TRACE(check_cpu_stalls());
-       if (user || rcu_is_cpu_rrupt_from_idle())
+       if (user)
                rcu_sched_qs();
        else if (!in_softirq())
                rcu_bh_qs();
@@ -357,6 +257,11 @@ static void __call_rcu(struct rcu_head *head,
        rcp->curtail = &head->next;
        RCU_TRACE(rcp->qlen++);
        local_irq_restore(flags);
+
+       if (unlikely(is_idle_task(current))) {
+               /* force scheduling for rcu_sched_qs() */
+               resched_cpu(0);
+       }
 }
 
 /*
@@ -383,6 +288,8 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
 void __init rcu_init(void)
 {
        open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
+       RCU_TRACE(reset_cpu_stall_ticks(&rcu_sched_ctrlblk));
+       RCU_TRACE(reset_cpu_stall_ticks(&rcu_bh_ctrlblk));
 
        rcu_early_boot_tests();
 }
index 858c5656912724c64e1ea20d5497edc9e8946b7d..f94e209a10d615a5a4f7d379e623918f533ce814 100644 (file)
@@ -145,17 +145,16 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
        rcp->ticks_this_gp++;
        j = jiffies;
        js = ACCESS_ONCE(rcp->jiffies_stall);
-       if (*rcp->curtail && ULONG_CMP_GE(j, js)) {
+       if (rcp->rcucblist && ULONG_CMP_GE(j, js)) {
                pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
-                      rcp->name, rcp->ticks_this_gp, rcu_dynticks_nesting,
+                      rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
                       jiffies - rcp->gp_start, rcp->qlen);
                dump_stack();
-       }
-       if (*rcp->curtail && ULONG_CMP_GE(j, js))
                ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
                        3 * rcu_jiffies_till_stall_check() + 3;
-       else if (ULONG_CMP_GE(j, js))
+       } else if (ULONG_CMP_GE(j, js)) {
                ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
+       }
 }
 
 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
index 7680fc2750361a91fc8f749a006555899aa92677..48d640ca1a05b8c0f83fe2b217b925a6dec69fa4 100644 (file)
@@ -156,6 +156,10 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
 static void invoke_rcu_core(void);
 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
 
+/* rcuc/rcub kthread realtime priority */
+static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
+module_param(kthread_prio, int, 0644);
+
 /*
  * Track the rcutorture test sequence number and the update version
  * number within a given test.  The rcutorture_testseq is incremented
@@ -215,6 +219,9 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
 #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
 };
 
+DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
+EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
+
 /*
  * Let the RCU core know that this CPU has gone through the scheduler,
  * which is a quiescent state.  This is called when the need for a
@@ -284,6 +291,22 @@ void rcu_note_context_switch(void)
 }
 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
 
+/*
+ * Register a quiesecent state for all RCU flavors.  If there is an
+ * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
+ * dyntick-idle quiescent state visible to other CPUs (but only for those
+ * RCU flavors in desparate need of a quiescent state, which will normally
+ * be none of them).  Either way, do a lightweight quiescent state for
+ * all RCU flavors.
+ */
+void rcu_all_qs(void)
+{
+       if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
+               rcu_momentary_dyntick_idle();
+       this_cpu_inc(rcu_qs_ctr);
+}
+EXPORT_SYMBOL_GPL(rcu_all_qs);
+
 static long blimit = 10;       /* Maximum callbacks per rcu_do_batch. */
 static long qhimark = 10000;   /* If this many pending, ignore blimit. */
 static long qlowmark = 100;    /* Once only this many pending, use blimit. */
@@ -315,18 +338,54 @@ static void force_quiescent_state(struct rcu_state *rsp);
 static int rcu_pending(void);
 
 /*
- * Return the number of RCU-sched batches processed thus far for debug & stats.
+ * Return the number of RCU batches started thus far for debug & stats.
+ */
+unsigned long rcu_batches_started(void)
+{
+       return rcu_state_p->gpnum;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_started);
+
+/*
+ * Return the number of RCU-sched batches started thus far for debug & stats.
+ */
+unsigned long rcu_batches_started_sched(void)
+{
+       return rcu_sched_state.gpnum;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
+
+/*
+ * Return the number of RCU BH batches started thus far for debug & stats.
  */
-long rcu_batches_completed_sched(void)
+unsigned long rcu_batches_started_bh(void)
+{
+       return rcu_bh_state.gpnum;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
+
+/*
+ * Return the number of RCU batches completed thus far for debug & stats.
+ */
+unsigned long rcu_batches_completed(void)
+{
+       return rcu_state_p->completed;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed);
+
+/*
+ * Return the number of RCU-sched batches completed thus far for debug & stats.
+ */
+unsigned long rcu_batches_completed_sched(void)
 {
        return rcu_sched_state.completed;
 }
 EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
 
 /*
- * Return the number of RCU BH batches processed thus far for debug & stats.
+ * Return the number of RCU BH batches completed thus far for debug & stats.
  */
-long rcu_batches_completed_bh(void)
+unsigned long rcu_batches_completed_bh(void)
 {
        return rcu_bh_state.completed;
 }
@@ -759,39 +818,71 @@ void rcu_irq_enter(void)
 /**
  * rcu_nmi_enter - inform RCU of entry to NMI context
  *
- * If the CPU was idle with dynamic ticks active, and there is no
- * irq handler running, this updates rdtp->dynticks_nmi to let the
- * RCU grace-period handling know that the CPU is active.
+ * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and
+ * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know
+ * that the CPU is active.  This implementation permits nested NMIs, as
+ * long as the nesting level does not overflow an int.  (You will probably
+ * run out of stack space first.)
  */
 void rcu_nmi_enter(void)
 {
        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+       int incby = 2;
 
-       if (rdtp->dynticks_nmi_nesting == 0 &&
-           (atomic_read(&rdtp->dynticks) & 0x1))
-               return;
-       rdtp->dynticks_nmi_nesting++;
-       smp_mb__before_atomic();  /* Force delay from prior write. */
-       atomic_inc(&rdtp->dynticks);
-       /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
-       smp_mb__after_atomic();  /* See above. */
-       WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+       /* Complain about underflow. */
+       WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
+
+       /*
+        * If idle from RCU viewpoint, atomically increment ->dynticks
+        * to mark non-idle and increment ->dynticks_nmi_nesting by one.
+        * Otherwise, increment ->dynticks_nmi_nesting by two.  This means
+        * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
+        * to be in the outermost NMI handler that interrupted an RCU-idle
+        * period (observation due to Andy Lutomirski).
+        */
+       if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
+               smp_mb__before_atomic();  /* Force delay from prior write. */
+               atomic_inc(&rdtp->dynticks);
+               /* atomic_inc() before later RCU read-side crit sects */
+               smp_mb__after_atomic();  /* See above. */
+               WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+               incby = 1;
+       }
+       rdtp->dynticks_nmi_nesting += incby;
+       barrier();
 }
 
 /**
  * rcu_nmi_exit - inform RCU of exit from NMI context
  *
- * If the CPU was idle with dynamic ticks active, and there is no
- * irq handler running, this updates rdtp->dynticks_nmi to let the
- * RCU grace-period handling know that the CPU is no longer active.
+ * If we are returning from the outermost NMI handler that interrupted an
+ * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
+ * to let the RCU grace-period handling know that the CPU is back to
+ * being RCU-idle.
  */
 void rcu_nmi_exit(void)
 {
        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 
-       if (rdtp->dynticks_nmi_nesting == 0 ||
-           --rdtp->dynticks_nmi_nesting != 0)
+       /*
+        * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
+        * (We are exiting an NMI handler, so RCU better be paying attention
+        * to us!)
+        */
+       WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
+       WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+
+       /*
+        * If the nesting level is not 1, the CPU wasn't RCU-idle, so
+        * leave it in non-RCU-idle state.
+        */
+       if (rdtp->dynticks_nmi_nesting != 1) {
+               rdtp->dynticks_nmi_nesting -= 2;
                return;
+       }
+
+       /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
+       rdtp->dynticks_nmi_nesting = 0;
        /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
        smp_mb__before_atomic();  /* See above. */
        atomic_inc(&rdtp->dynticks);
@@ -898,16 +989,13 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
                return 1;
        } else {
+               if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4,
+                                rdp->mynode->gpnum))
+                       ACCESS_ONCE(rdp->gpwrap) = true;
                return 0;
        }
 }
 
-/*
- * This function really isn't for public consumption, but RCU is special in
- * that context switches can allow the state machine to make progress.
- */
-extern void resched_cpu(int cpu);
-
 /*
  * Return true if the specified CPU has passed through a quiescent
  * state by virtue of being in or having passed through an dynticks
@@ -1011,6 +1099,22 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
        j1 = rcu_jiffies_till_stall_check();
        ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
        rsp->jiffies_resched = j + j1 / 2;
+       rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs);
+}
+
+/*
+ * Complain about starvation of grace-period kthread.
+ */
+static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
+{
+       unsigned long gpa;
+       unsigned long j;
+
+       j = jiffies;
+       gpa = ACCESS_ONCE(rsp->gp_activity);
+       if (j - gpa > 2 * HZ)
+               pr_err("%s kthread starved for %ld jiffies!\n",
+                      rsp->name, j - gpa);
 }
 
 /*
@@ -1033,11 +1137,13 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
        }
 }
 
-static void print_other_cpu_stall(struct rcu_state *rsp)
+static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
 {
        int cpu;
        long delta;
        unsigned long flags;
+       unsigned long gpa;
+       unsigned long j;
        int ndetected = 0;
        struct rcu_node *rnp = rcu_get_root(rsp);
        long totqlen = 0;
@@ -1075,30 +1181,34 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
        }
 
-       /*
-        * Now rat on any tasks that got kicked up to the root rcu_node
-        * due to CPU offlining.
-        */
-       rnp = rcu_get_root(rsp);
-       raw_spin_lock_irqsave(&rnp->lock, flags);
-       ndetected += rcu_print_task_stall(rnp);
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
-
        print_cpu_stall_info_end();
        for_each_possible_cpu(cpu)
                totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
        pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
               smp_processor_id(), (long)(jiffies - rsp->gp_start),
               (long)rsp->gpnum, (long)rsp->completed, totqlen);
-       if (ndetected == 0)
-               pr_err("INFO: Stall ended before state dump start\n");
-       else
+       if (ndetected) {
                rcu_dump_cpu_stacks(rsp);
+       } else {
+               if (ACCESS_ONCE(rsp->gpnum) != gpnum ||
+                   ACCESS_ONCE(rsp->completed) == gpnum) {
+                       pr_err("INFO: Stall ended before state dump start\n");
+               } else {
+                       j = jiffies;
+                       gpa = ACCESS_ONCE(rsp->gp_activity);
+                       pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld\n",
+                              rsp->name, j - gpa, j, gpa,
+                              jiffies_till_next_fqs);
+                       /* In this case, the current CPU might be at fault. */
+                       sched_show_task(current);
+               }
+       }
 
        /* Complain about tasks blocking the grace period. */
-
        rcu_print_detail_task_stall(rsp);
 
+       rcu_check_gp_kthread_starvation(rsp);
+
        force_quiescent_state(rsp);  /* Kick them all. */
 }
 
@@ -1123,6 +1233,9 @@ static void print_cpu_stall(struct rcu_state *rsp)
        pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
                jiffies - rsp->gp_start,
                (long)rsp->gpnum, (long)rsp->completed, totqlen);
+
+       rcu_check_gp_kthread_starvation(rsp);
+
        rcu_dump_cpu_stacks(rsp);
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -1193,7 +1306,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
                   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
 
                /* They had a few time units to dump stack, so complain. */
-               print_other_cpu_stall(rsp);
+               print_other_cpu_stall(rsp, gpnum);
        }
 }
 
@@ -1530,7 +1643,8 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
        bool ret;
 
        /* Handle the ends of any preceding grace periods first. */
-       if (rdp->completed == rnp->completed) {
+       if (rdp->completed == rnp->completed &&
+           !unlikely(ACCESS_ONCE(rdp->gpwrap))) {
 
                /* No grace period end, so just accelerate recent callbacks. */
                ret = rcu_accelerate_cbs(rsp, rnp, rdp);
@@ -1545,7 +1659,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
                trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
        }
 
-       if (rdp->gpnum != rnp->gpnum) {
+       if (rdp->gpnum != rnp->gpnum || unlikely(ACCESS_ONCE(rdp->gpwrap))) {
                /*
                 * If the current grace period is waiting for this CPU,
                 * set up to detect a quiescent state, otherwise don't
@@ -1554,8 +1668,10 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
                rdp->gpnum = rnp->gpnum;
                trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
                rdp->passed_quiesce = 0;
+               rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
                rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
                zero_cpu_stall_ticks(rdp);
+               ACCESS_ONCE(rdp->gpwrap) = false;
        }
        return ret;
 }
@@ -1569,7 +1685,8 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
        local_irq_save(flags);
        rnp = rdp->mynode;
        if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) &&
-            rdp->completed == ACCESS_ONCE(rnp->completed)) || /* w/out lock. */
+            rdp->completed == ACCESS_ONCE(rnp->completed) &&
+            !unlikely(ACCESS_ONCE(rdp->gpwrap))) || /* w/out lock. */
            !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
                local_irq_restore(flags);
                return;
@@ -1589,6 +1706,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
        struct rcu_data *rdp;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
+       ACCESS_ONCE(rsp->gp_activity) = jiffies;
        rcu_bind_gp_kthread();
        raw_spin_lock_irq(&rnp->lock);
        smp_mb__after_unlock_lock();
@@ -1649,6 +1767,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
                                            rnp->grphi, rnp->qsmask);
                raw_spin_unlock_irq(&rnp->lock);
                cond_resched_rcu_qs();
+               ACCESS_ONCE(rsp->gp_activity) = jiffies;
        }
 
        mutex_unlock(&rsp->onoff_mutex);
@@ -1665,6 +1784,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
        unsigned long maxj;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
+       ACCESS_ONCE(rsp->gp_activity) = jiffies;
        rsp->n_force_qs++;
        if (fqs_state == RCU_SAVE_DYNTICK) {
                /* Collect dyntick-idle snapshots. */
@@ -1703,6 +1823,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
        struct rcu_data *rdp;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
+       ACCESS_ONCE(rsp->gp_activity) = jiffies;
        raw_spin_lock_irq(&rnp->lock);
        smp_mb__after_unlock_lock();
        gp_duration = jiffies - rsp->gp_start;
@@ -1739,6 +1860,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
                nocb += rcu_future_gp_cleanup(rsp, rnp);
                raw_spin_unlock_irq(&rnp->lock);
                cond_resched_rcu_qs();
+               ACCESS_ONCE(rsp->gp_activity) = jiffies;
        }
        rnp = rcu_get_root(rsp);
        raw_spin_lock_irq(&rnp->lock);
@@ -1788,6 +1910,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
                        if (rcu_gp_init(rsp))
                                break;
                        cond_resched_rcu_qs();
+                       ACCESS_ONCE(rsp->gp_activity) = jiffies;
                        WARN_ON(signal_pending(current));
                        trace_rcu_grace_period(rsp->name,
                                               ACCESS_ONCE(rsp->gpnum),
@@ -1831,9 +1954,11 @@ static int __noreturn rcu_gp_kthread(void *arg)
                                                       ACCESS_ONCE(rsp->gpnum),
                                                       TPS("fqsend"));
                                cond_resched_rcu_qs();
+                               ACCESS_ONCE(rsp->gp_activity) = jiffies;
                        } else {
                                /* Deal with stray signal. */
                                cond_resched_rcu_qs();
+                               ACCESS_ONCE(rsp->gp_activity) = jiffies;
                                WARN_ON(signal_pending(current));
                                trace_rcu_grace_period(rsp->name,
                                                       ACCESS_ONCE(rsp->gpnum),
@@ -2010,8 +2135,10 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
        rnp = rdp->mynode;
        raw_spin_lock_irqsave(&rnp->lock, flags);
        smp_mb__after_unlock_lock();
-       if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum ||
-           rnp->completed == rnp->gpnum) {
+       if ((rdp->passed_quiesce == 0 &&
+            rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) ||
+           rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum ||
+           rdp->gpwrap) {
 
                /*
                 * The grace period in which this quiescent state was
@@ -2020,6 +2147,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
                 * within the current grace period.
                 */
                rdp->passed_quiesce = 0;        /* need qs for new gp. */
+               rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
                return;
        }
@@ -2064,7 +2192,8 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
         * Was there a quiescent state since the beginning of the grace
         * period? If no, then exit and wait for the next call.
         */
-       if (!rdp->passed_quiesce)
+       if (!rdp->passed_quiesce &&
+           rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr))
                return;
 
        /*
@@ -2194,6 +2323,46 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
                               TPS("cpuofl"));
 }
 
+/*
+ * All CPUs for the specified rcu_node structure have gone offline,
+ * and all tasks that were preempted within an RCU read-side critical
+ * section while running on one of those CPUs have since exited their RCU
+ * read-side critical section.  Some other CPU is reporting this fact with
+ * the specified rcu_node structure's ->lock held and interrupts disabled.
+ * This function therefore goes up the tree of rcu_node structures,
+ * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
+ * the leaf rcu_node structure's ->qsmaskinit field has already been
+ * updated
+ *
+ * This function does check that the specified rcu_node structure has
+ * all CPUs offline and no blocked tasks, so it is OK to invoke it
+ * prematurely.  That said, invoking it after the fact will cost you
+ * a needless lock acquisition.  So once it has done its work, don't
+ * invoke it again.
+ */
+static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
+{
+       long mask;
+       struct rcu_node *rnp = rnp_leaf;
+
+       if (rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
+               return;
+       for (;;) {
+               mask = rnp->grpmask;
+               rnp = rnp->parent;
+               if (!rnp)
+                       break;
+               raw_spin_lock(&rnp->lock); /* irqs already disabled. */
+               smp_mb__after_unlock_lock(); /* GP memory ordering. */
+               rnp->qsmaskinit &= ~mask;
+               if (rnp->qsmaskinit) {
+                       raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+                       return;
+               }
+               raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+       }
+}
+
 /*
  * The CPU has been completely removed, and some other CPU is reporting
  * this fact from process context.  Do the remainder of the cleanup,
@@ -2204,8 +2373,6 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
 static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
 {
        unsigned long flags;
-       unsigned long mask;
-       int need_report = 0;
        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
        struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
 
@@ -2219,40 +2386,15 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
        /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
        rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
        rcu_adopt_orphan_cbs(rsp, flags);
+       raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);
 
-       /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
-       mask = rdp->grpmask;    /* rnp->grplo is constant. */
-       do {
-               raw_spin_lock(&rnp->lock);      /* irqs already disabled. */
-               smp_mb__after_unlock_lock();
-               rnp->qsmaskinit &= ~mask;
-               if (rnp->qsmaskinit != 0) {
-                       if (rnp != rdp->mynode)
-                               raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
-                       break;
-               }
-               if (rnp == rdp->mynode)
-                       need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
-               else
-                       raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
-               mask = rnp->grpmask;
-               rnp = rnp->parent;
-       } while (rnp != NULL);
-
-       /*
-        * We still hold the leaf rcu_node structure lock here, and
-        * irqs are still disabled.  The reason for this subterfuge is
-        * because invoking rcu_report_unblock_qs_rnp() with ->orphan_lock
-        * held leads to deadlock.
-        */
-       raw_spin_unlock(&rsp->orphan_lock); /* irqs remain disabled. */
-       rnp = rdp->mynode;
-       if (need_report & RCU_OFL_TASKS_NORM_GP)
-               rcu_report_unblock_qs_rnp(rnp, flags);
-       else
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
-       if (need_report & RCU_OFL_TASKS_EXP_GP)
-               rcu_report_exp_rnp(rsp, rnp, true);
+       /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
+       raw_spin_lock_irqsave(&rnp->lock, flags);
+       smp_mb__after_unlock_lock();    /* Enforce GP memory-order guarantee. */
+       rnp->qsmaskinit &= ~rdp->grpmask;
+       if (rnp->qsmaskinit == 0 && !rcu_preempt_has_tasks(rnp))
+               rcu_cleanup_dead_rnp(rnp);
+       rcu_report_qs_rnp(rdp->grpmask, rsp, rnp, flags); /* Rlses rnp->lock. */
        WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
                  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
                  cpu, rdp->qlen, rdp->nxtlist);
@@ -2268,6 +2410,10 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
 {
 }
 
+static void __maybe_unused rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
+{
+}
+
 static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
 {
 }
@@ -2464,12 +2610,6 @@ static void force_qs_rnp(struct rcu_state *rsp,
                }
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
        }
-       rnp = rcu_get_root(rsp);
-       if (rnp->qsmask == 0) {
-               raw_spin_lock_irqsave(&rnp->lock, flags);
-               smp_mb__after_unlock_lock();
-               rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
-       }
 }
 
 /*
@@ -2569,7 +2709,7 @@ static void rcu_process_callbacks(struct softirq_action *unused)
  * Schedule RCU callback invocation.  If the specified type of RCU
  * does not support RCU priority boosting, just do a direct call,
  * otherwise wake up the per-CPU kernel kthread.  Note that because we
- * are running on the current CPU with interrupts disabled, the
+ * are running on the current CPU with softirqs disabled, the
  * rcu_cpu_kthread_task cannot disappear out from under us.
  */
 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
@@ -3109,9 +3249,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
 
        /* Is the RCU core waiting for a quiescent state from this CPU? */
        if (rcu_scheduler_fully_active &&
-           rdp->qs_pending && !rdp->passed_quiesce) {
+           rdp->qs_pending && !rdp->passed_quiesce &&
+           rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
                rdp->n_rp_qs_pending++;
-       } else if (rdp->qs_pending && rdp->passed_quiesce) {
+       } else if (rdp->qs_pending &&
+                  (rdp->passed_quiesce ||
+                   rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) {
                rdp->n_rp_report_qs++;
                return 1;
        }
@@ -3135,7 +3278,8 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
        }
 
        /* Has a new RCU grace period started? */
-       if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
+       if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum ||
+           unlikely(ACCESS_ONCE(rdp->gpwrap))) { /* outside lock */
                rdp->n_rp_gp_started++;
                return 1;
        }
@@ -3318,6 +3462,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
                        } else {
                                _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
                                                   rsp->n_barrier_done);
+                               smp_mb__before_atomic();
                                atomic_inc(&rsp->barrier_cpu_count);
                                __call_rcu(&rdp->barrier_head,
                                           rcu_barrier_callback, rsp, cpu, 0);
@@ -3385,9 +3530,6 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
        /* Set up local state, ensuring consistent view of global state. */
        raw_spin_lock_irqsave(&rnp->lock, flags);
        rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
-       init_callback_list(rdp);
-       rdp->qlen_lazy = 0;
-       ACCESS_ONCE(rdp->qlen) = 0;
        rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
        WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
        WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
@@ -3444,6 +3586,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
                        rdp->gpnum = rnp->completed;
                        rdp->completed = rnp->completed;
                        rdp->passed_quiesce = 0;
+                       rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
                        rdp->qs_pending = 0;
                        trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
                }
@@ -3535,17 +3678,35 @@ static int rcu_pm_notify(struct notifier_block *self,
 static int __init rcu_spawn_gp_kthread(void)
 {
        unsigned long flags;
+       int kthread_prio_in = kthread_prio;
        struct rcu_node *rnp;
        struct rcu_state *rsp;
+       struct sched_param sp;
        struct task_struct *t;
 
+       /* Force priority into range. */
+       if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
+               kthread_prio = 1;
+       else if (kthread_prio < 0)
+               kthread_prio = 0;
+       else if (kthread_prio > 99)
+               kthread_prio = 99;
+       if (kthread_prio != kthread_prio_in)
+               pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
+                        kthread_prio, kthread_prio_in);
+
        rcu_scheduler_fully_active = 1;
        for_each_rcu_flavor(rsp) {
-               t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
+               t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
                BUG_ON(IS_ERR(t));
                rnp = rcu_get_root(rsp);
                raw_spin_lock_irqsave(&rnp->lock, flags);
                rsp->gp_kthread = t;
+               if (kthread_prio) {
+                       sp.sched_priority = kthread_prio;
+                       sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+               }
+               wake_up_process(t);
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
        }
        rcu_spawn_nocb_kthreads();
index 8e7b1843896ebcc0fe13ed51da7cea68f14f0a36..119de399eb2f7e532f607e85f2d31c1b9e324541 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/threads.h>
 #include <linux/cpumask.h>
 #include <linux/seqlock.h>
-#include <linux/irq_work.h>
 
 /*
  * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
@@ -172,11 +171,6 @@ struct rcu_node {
                                /*  queued on this rcu_node structure that */
                                /*  are blocking the current grace period, */
                                /*  there can be no such task. */
-       struct completion boost_completion;
-                               /* Used to ensure that the rt_mutex used */
-                               /*  to carry out the boosting is fully */
-                               /*  released with no future boostee accesses */
-                               /*  before that rt_mutex is re-initialized. */
        struct rt_mutex boost_mtx;
                                /* Used only for the priority-boosting */
                                /*  side effect, not as a lock. */
@@ -257,9 +251,12 @@ struct rcu_data {
                                        /*  in order to detect GP end. */
        unsigned long   gpnum;          /* Highest gp number that this CPU */
                                        /*  is aware of having started. */
+       unsigned long   rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
+                                       /*  for rcu_all_qs() invocations. */
        bool            passed_quiesce; /* User-mode/idle loop etc. */
        bool            qs_pending;     /* Core waits for quiesc state. */
        bool            beenonline;     /* CPU online at least once. */
+       bool            gpwrap;         /* Possible gpnum/completed wrap. */
        struct rcu_node *mynode;        /* This CPU's leaf of hierarchy */
        unsigned long grpmask;          /* Mask to apply to leaf qsmask. */
 #ifdef CONFIG_RCU_CPU_STALL_INFO
@@ -340,14 +337,10 @@ struct rcu_data {
 #ifdef CONFIG_RCU_NOCB_CPU
        struct rcu_head *nocb_head;     /* CBs waiting for kthread. */
        struct rcu_head **nocb_tail;
-       atomic_long_t nocb_q_count;     /* # CBs waiting for kthread */
-       atomic_long_t nocb_q_count_lazy; /*  (approximate). */
+       atomic_long_t nocb_q_count;     /* # CBs waiting for nocb */
+       atomic_long_t nocb_q_count_lazy; /*  invocation (all stages). */
        struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
        struct rcu_head **nocb_follower_tail;
-       atomic_long_t nocb_follower_count; /* # CBs ready to invoke. */
-       atomic_long_t nocb_follower_count_lazy; /*  (approximate). */
-       int nocb_p_count;               /* # CBs being invoked by kthread */
-       int nocb_p_count_lazy;          /*  (approximate). */
        wait_queue_head_t nocb_wq;      /* For nocb kthreads to sleep on. */
        struct task_struct *nocb_kthread;
        int nocb_defer_wakeup;          /* Defer wakeup of nocb_kthread. */
@@ -356,8 +349,6 @@ struct rcu_data {
        struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
                                        /* CBs waiting for GP. */
        struct rcu_head **nocb_gp_tail;
-       long nocb_gp_count;
-       long nocb_gp_count_lazy;
        bool nocb_leader_sleep;         /* Is the nocb leader thread asleep? */
        struct rcu_data *nocb_next_follower;
                                        /* Next follower in wakeup chain. */
@@ -488,10 +479,14 @@ struct rcu_state {
                                                /*  due to no GP active. */
        unsigned long gp_start;                 /* Time at which GP started, */
                                                /*  but in jiffies. */
+       unsigned long gp_activity;              /* Time of last GP kthread */
+                                               /*  activity in jiffies. */
        unsigned long jiffies_stall;            /* Time at which to check */
                                                /*  for CPU stalls. */
        unsigned long jiffies_resched;          /* Time at which to resched */
                                                /*  a reluctant CPU. */
+       unsigned long n_force_qs_gpstart;       /* Snapshot of n_force_qs at */
+                                               /*  GP start. */
        unsigned long gp_max;                   /* Maximum GP duration in */
                                                /*  jiffies. */
        const char *name;                       /* Name of structure. */
@@ -514,13 +509,6 @@ extern struct list_head rcu_struct_flavors;
 #define for_each_rcu_flavor(rsp) \
        list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
 
-/* Return values for rcu_preempt_offline_tasks(). */
-
-#define RCU_OFL_TASKS_NORM_GP  0x1             /* Tasks blocking normal */
-                                               /*  GP were moved to root. */
-#define RCU_OFL_TASKS_EXP_GP   0x2             /* Tasks blocking expedited */
-                                               /*  GP were moved to root. */
-
 /*
  * RCU implementation internal declarations:
  */
@@ -546,27 +534,16 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
 
 /* Forward declarations for rcutree_plugin.h */
 static void rcu_bootup_announce(void);
-long rcu_batches_completed(void);
 static void rcu_preempt_note_context_switch(void);
 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
 #ifdef CONFIG_HOTPLUG_CPU
-static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
-                                     unsigned long flags);
+static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 static void rcu_print_detail_task_stall(struct rcu_state *rsp);
 static int rcu_print_task_stall(struct rcu_node *rnp);
 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
-#ifdef CONFIG_HOTPLUG_CPU
-static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
-                                    struct rcu_node *rnp,
-                                    struct rcu_data *rdp);
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
 static void rcu_preempt_check_callbacks(void);
 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
-#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU)
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
-                              bool wake);
-#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU) */
 static void __init __rcu_init_preempt(void);
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
@@ -622,24 +599,15 @@ static void rcu_dynticks_task_exit(void);
 #endif /* #ifndef RCU_TREE_NONCORE */
 
 #ifdef CONFIG_RCU_TRACE
-#ifdef CONFIG_RCU_NOCB_CPU
-/* Sum up queue lengths for tracing. */
+/* Read out queue lengths for tracing. */
 static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
 {
-       *ql = atomic_long_read(&rdp->nocb_q_count) +
-             rdp->nocb_p_count +
-             atomic_long_read(&rdp->nocb_follower_count) +
-             rdp->nocb_p_count + rdp->nocb_gp_count;
-       *qll = atomic_long_read(&rdp->nocb_q_count_lazy) +
-              rdp->nocb_p_count_lazy +
-              atomic_long_read(&rdp->nocb_follower_count_lazy) +
-              rdp->nocb_p_count_lazy + rdp->nocb_gp_count_lazy;
-}
+#ifdef CONFIG_RCU_NOCB_CPU
+       *ql = atomic_long_read(&rdp->nocb_q_count);
+       *qll = atomic_long_read(&rdp->nocb_q_count_lazy);
 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
-static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
-{
        *ql = 0;
        *qll = 0;
-}
 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
+}
 #endif /* #ifdef CONFIG_RCU_TRACE */
index 3ec85cb5d544b8588fd574a80e19bd564079533f..2e850a51bb8fe285179fee76124dbc375851f09a 100644 (file)
 
 #include "../locking/rtmutex_common.h"
 
-/* rcuc/rcub kthread realtime priority */
-static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
-module_param(kthread_prio, int, 0644);
-
 /*
  * Control variables for per-CPU and per-rcu_node kthreads.  These
  * handle all flavors of RCU.
@@ -103,6 +99,8 @@ RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
 static struct rcu_state *rcu_state_p = &rcu_preempt_state;
 
 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+                              bool wake);
 
 /*
  * Tell them what RCU they are running.
@@ -113,25 +111,6 @@ static void __init rcu_bootup_announce(void)
        rcu_bootup_announce_oddness();
 }
 
-/*
- * Return the number of RCU-preempt batches processed thus far
- * for debug and statistics.
- */
-static long rcu_batches_completed_preempt(void)
-{
-       return rcu_preempt_state.completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
-
-/*
- * Return the number of RCU batches processed thus far for debug & stats.
- */
-long rcu_batches_completed(void)
-{
-       return rcu_batches_completed_preempt();
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed);
-
 /*
  * Record a preemptible-RCU quiescent state for the specified CPU.  Note
  * that this just means that the task currently running on the CPU is
@@ -306,6 +285,15 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
        return np;
 }
 
+/*
+ * Return true if the specified rcu_node structure has tasks that were
+ * preempted within an RCU read-side critical section.
+ */
+static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
+{
+       return !list_empty(&rnp->blkd_tasks);
+}
+
 /*
  * Handle special cases during rcu_read_unlock(), such as needing to
  * notify RCU core processing or task having blocked during the RCU
@@ -313,9 +301,10 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
  */
 void rcu_read_unlock_special(struct task_struct *t)
 {
-       int empty;
-       int empty_exp;
-       int empty_exp_now;
+       bool empty;
+       bool empty_exp;
+       bool empty_norm;
+       bool empty_exp_now;
        unsigned long flags;
        struct list_head *np;
 #ifdef CONFIG_RCU_BOOST
@@ -367,7 +356,8 @@ void rcu_read_unlock_special(struct task_struct *t)
                                break;
                        raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
                }
-               empty = !rcu_preempt_blocked_readers_cgp(rnp);
+               empty = !rcu_preempt_has_tasks(rnp);
+               empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
                empty_exp = !rcu_preempted_readers_exp(rnp);
                smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
                np = rcu_next_node_entry(t, rnp);
@@ -386,6 +376,14 @@ void rcu_read_unlock_special(struct task_struct *t)
                drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
 #endif /* #ifdef CONFIG_RCU_BOOST */
 
+               /*
+                * If this was the last task on the list, go see if we
+                * need to propagate ->qsmaskinit bit clearing up the
+                * rcu_node tree.
+                */
+               if (!empty && !rcu_preempt_has_tasks(rnp))
+                       rcu_cleanup_dead_rnp(rnp);
+
                /*
                 * If this was the last task on the current list, and if
                 * we aren't waiting on any CPUs, report the quiescent state.
@@ -393,7 +391,7 @@ void rcu_read_unlock_special(struct task_struct *t)
                 * so we must take a snapshot of the expedited state.
                 */
                empty_exp_now = !rcu_preempted_readers_exp(rnp);
-               if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
+               if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
                        trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
                                                         rnp->gpnum,
                                                         0, rnp->qsmask,
@@ -408,10 +406,8 @@ void rcu_read_unlock_special(struct task_struct *t)
 
 #ifdef CONFIG_RCU_BOOST
                /* Unboost if we were boosted. */
-               if (drop_boost_mutex) {
+               if (drop_boost_mutex)
                        rt_mutex_unlock(&rnp->boost_mtx);
-                       complete(&rnp->boost_completion);
-               }
 #endif /* #ifdef CONFIG_RCU_BOOST */
 
                /*
@@ -519,99 +515,13 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
 {
        WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
-       if (!list_empty(&rnp->blkd_tasks))
+       if (rcu_preempt_has_tasks(rnp))
                rnp->gp_tasks = rnp->blkd_tasks.next;
        WARN_ON_ONCE(rnp->qsmask);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
 
-/*
- * Handle tasklist migration for case in which all CPUs covered by the
- * specified rcu_node have gone offline.  Move them up to the root
- * rcu_node.  The reason for not just moving them to the immediate
- * parent is to remove the need for rcu_read_unlock_special() to
- * make more than two attempts to acquire the target rcu_node's lock.
- * Returns true if there were tasks blocking the current RCU grace
- * period.
- *
- * Returns 1 if there was previously a task blocking the current grace
- * period on the specified rcu_node structure.
- *
- * The caller must hold rnp->lock with irqs disabled.
- */
-static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
-                                    struct rcu_node *rnp,
-                                    struct rcu_data *rdp)
-{
-       struct list_head *lp;
-       struct list_head *lp_root;
-       int retval = 0;
-       struct rcu_node *rnp_root = rcu_get_root(rsp);
-       struct task_struct *t;
-
-       if (rnp == rnp_root) {
-               WARN_ONCE(1, "Last CPU thought to be offlined?");
-               return 0;  /* Shouldn't happen: at least one CPU online. */
-       }
-
-       /* If we are on an internal node, complain bitterly. */
-       WARN_ON_ONCE(rnp != rdp->mynode);
-
-       /*
-        * Move tasks up to root rcu_node.  Don't try to get fancy for
-        * this corner-case operation -- just put this node's tasks
-        * at the head of the root node's list, and update the root node's
-        * ->gp_tasks and ->exp_tasks pointers to those of this node's,
-        * if non-NULL.  This might result in waiting for more tasks than
-        * absolutely necessary, but this is a good performance/complexity
-        * tradeoff.
-        */
-       if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
-               retval |= RCU_OFL_TASKS_NORM_GP;
-       if (rcu_preempted_readers_exp(rnp))
-               retval |= RCU_OFL_TASKS_EXP_GP;
-       lp = &rnp->blkd_tasks;
-       lp_root = &rnp_root->blkd_tasks;
-       while (!list_empty(lp)) {
-               t = list_entry(lp->next, typeof(*t), rcu_node_entry);
-               raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
-               smp_mb__after_unlock_lock();
-               list_del(&t->rcu_node_entry);
-               t->rcu_blocked_node = rnp_root;
-               list_add(&t->rcu_node_entry, lp_root);
-               if (&t->rcu_node_entry == rnp->gp_tasks)
-                       rnp_root->gp_tasks = rnp->gp_tasks;
-               if (&t->rcu_node_entry == rnp->exp_tasks)
-                       rnp_root->exp_tasks = rnp->exp_tasks;
-#ifdef CONFIG_RCU_BOOST
-               if (&t->rcu_node_entry == rnp->boost_tasks)
-                       rnp_root->boost_tasks = rnp->boost_tasks;
-#endif /* #ifdef CONFIG_RCU_BOOST */
-               raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
-       }
-
-       rnp->gp_tasks = NULL;
-       rnp->exp_tasks = NULL;
-#ifdef CONFIG_RCU_BOOST
-       rnp->boost_tasks = NULL;
-       /*
-        * In case root is being boosted and leaf was not.  Make sure
-        * that we boost the tasks blocking the current grace period
-        * in this case.
-        */
-       raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
-       smp_mb__after_unlock_lock();
-       if (rnp_root->boost_tasks != NULL &&
-           rnp_root->boost_tasks != rnp_root->gp_tasks &&
-           rnp_root->boost_tasks != rnp_root->exp_tasks)
-               rnp_root->boost_tasks = rnp_root->gp_tasks;
-       raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
-#endif /* #ifdef CONFIG_RCU_BOOST */
-
-       return retval;
-}
-
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
 /*
@@ -771,7 +681,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
        smp_mb__after_unlock_lock();
-       if (list_empty(&rnp->blkd_tasks)) {
+       if (!rcu_preempt_has_tasks(rnp)) {
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
        } else {
                rnp->exp_tasks = rnp->blkd_tasks.next;
@@ -932,15 +842,6 @@ static void __init rcu_bootup_announce(void)
        rcu_bootup_announce_oddness();
 }
 
-/*
- * Return the number of RCU batches processed thus far for debug & stats.
- */
-long rcu_batches_completed(void)
-{
-       return rcu_batches_completed_sched();
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed);
-
 /*
  * Because preemptible RCU does not exist, we never have to check for
  * CPUs being in quiescent states.
@@ -960,11 +861,12 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
 
 #ifdef CONFIG_HOTPLUG_CPU
 
-/* Because preemptible RCU does not exist, no quieting of tasks. */
-static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
-       __releases(rnp->lock)
+/*
+ * Because there is no preemptible RCU, there can be no readers blocked.
+ */
+static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
 {
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       return false;
 }
 
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
@@ -996,23 +898,6 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
        WARN_ON_ONCE(rnp->qsmask);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-
-/*
- * Because preemptible RCU does not exist, it never needs to migrate
- * tasks that were blocked within RCU read-side critical sections, and
- * such non-existent tasks cannot possibly have been blocking the current
- * grace period.
- */
-static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
-                                    struct rcu_node *rnp,
-                                    struct rcu_data *rdp)
-{
-       return 0;
-}
-
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-
 /*
  * Because preemptible RCU does not exist, it never has any callbacks
  * to check.
@@ -1031,20 +916,6 @@ void synchronize_rcu_expedited(void)
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 
-#ifdef CONFIG_HOTPLUG_CPU
-
-/*
- * Because preemptible RCU does not exist, there is never any need to
- * report on tasks preempted in RCU read-side critical sections during
- * expedited RCU grace periods.
- */
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
-                              bool wake)
-{
-}
-
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-
 /*
  * Because preemptible RCU does not exist, rcu_barrier() is just
  * another name for rcu_barrier_sched().
@@ -1080,7 +951,7 @@ void exit_rcu(void)
 
 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
 {
-       if (list_empty(&rnp->blkd_tasks))
+       if (!rcu_preempt_has_tasks(rnp))
                rnp->n_balk_blkd_tasks++;
        else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
                rnp->n_balk_exp_gp_tasks++;
@@ -1127,7 +998,8 @@ static int rcu_boost(struct rcu_node *rnp)
        struct task_struct *t;
        struct list_head *tb;
 
-       if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
+       if (ACCESS_ONCE(rnp->exp_tasks) == NULL &&
+           ACCESS_ONCE(rnp->boost_tasks) == NULL)
                return 0;  /* Nothing left to boost. */
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -1175,15 +1047,11 @@ static int rcu_boost(struct rcu_node *rnp)
         */
        t = container_of(tb, struct task_struct, rcu_node_entry);
        rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
-       init_completion(&rnp->boost_completion);
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
        /* Lock only for side effect: boosts task t's priority. */
        rt_mutex_lock(&rnp->boost_mtx);
        rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
 
-       /* Wait for boostee to be done w/boost_mtx before reinitializing. */
-       wait_for_completion(&rnp->boost_completion);
-
        return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
               ACCESS_ONCE(rnp->boost_tasks) != NULL;
 }
@@ -1416,12 +1284,8 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
        for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
                if ((mask & 0x1) && cpu != outgoingcpu)
                        cpumask_set_cpu(cpu, cm);
-       if (cpumask_weight(cm) == 0) {
+       if (cpumask_weight(cm) == 0)
                cpumask_setall(cm);
-               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
-                       cpumask_clear_cpu(cpu, cm);
-               WARN_ON_ONCE(cpumask_weight(cm) == 0);
-       }
        set_cpus_allowed_ptr(t, cm);
        free_cpumask_var(cm);
 }
@@ -1446,12 +1310,8 @@ static void __init rcu_spawn_boost_kthreads(void)
        for_each_possible_cpu(cpu)
                per_cpu(rcu_cpu_has_work, cpu) = 0;
        BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
-       rnp = rcu_get_root(rcu_state_p);
-       (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
-       if (NUM_RCU_NODES > 1) {
-               rcu_for_each_leaf_node(rcu_state_p, rnp)
-                       (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
-       }
+       rcu_for_each_leaf_node(rcu_state_p, rnp)
+               (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
 }
 
 static void rcu_prepare_kthreads(int cpu)
@@ -1605,7 +1465,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
                 * completed since we last checked and there are
                 * callbacks not yet ready to invoke.
                 */
-               if (rdp->completed != rnp->completed &&
+               if ((rdp->completed != rnp->completed ||
+                    unlikely(ACCESS_ONCE(rdp->gpwrap))) &&
                    rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
                        note_gp_changes(rsp, rdp);
 
@@ -1898,11 +1759,12 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
                ticks_value = rsp->gpnum - rdp->gpnum;
        }
        print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
-       pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
+       pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
               cpu, ticks_value, ticks_title,
               atomic_read(&rdtp->dynticks) & 0xfff,
               rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
               rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
+              ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
               fast_no_hz);
 }
 
@@ -2056,9 +1918,26 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
 static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
 {
        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+       unsigned long ret;
+#ifdef CONFIG_PROVE_RCU
        struct rcu_head *rhp;
+#endif /* #ifdef CONFIG_PROVE_RCU */
 
-       /* No-CBs CPUs might have callbacks on any of three lists. */
+       /*
+        * Check count of all no-CBs callbacks awaiting invocation.
+        * There needs to be a barrier before this function is called,
+        * but associated with a prior determination that no more
+        * callbacks would be posted.  In the worst case, the first
+        * barrier in _rcu_barrier() suffices (but the caller cannot
+        * necessarily rely on this, not a substitute for the caller
+        * getting the concurrency design right!).  There must also be
+        * a barrier between the following load an posting of a callback
+        * (if a callback is in fact needed).  This is associated with an
+        * atomic_inc() in the caller.
+        */
+       ret = atomic_long_read(&rdp->nocb_q_count);
+
+#ifdef CONFIG_PROVE_RCU
        rhp = ACCESS_ONCE(rdp->nocb_head);
        if (!rhp)
                rhp = ACCESS_ONCE(rdp->nocb_gp_head);
@@ -2072,8 +1951,9 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
                       cpu, rhp->func);
                WARN_ON_ONCE(1);
        }
+#endif /* #ifdef CONFIG_PROVE_RCU */
 
-       return !!rhp;
+       return !!ret;
 }
 
 /*
@@ -2095,9 +1975,10 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
        struct task_struct *t;
 
        /* Enqueue the callback on the nocb list and update counts. */
+       atomic_long_add(rhcount, &rdp->nocb_q_count);
+       /* rcu_barrier() relies on ->nocb_q_count add before xchg. */
        old_rhpp = xchg(&rdp->nocb_tail, rhtp);
        ACCESS_ONCE(*old_rhpp) = rhp;
-       atomic_long_add(rhcount, &rdp->nocb_q_count);
        atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
        smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
 
@@ -2288,9 +2169,6 @@ wait_again:
                /* Move callbacks to wait-for-GP list, which is empty. */
                ACCESS_ONCE(rdp->nocb_head) = NULL;
                rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
-               rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
-               rdp->nocb_gp_count_lazy =
-                       atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
                gotcbs = true;
        }
 
@@ -2338,9 +2216,6 @@ wait_again:
                /* Append callbacks to follower's "done" list. */
                tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail);
                *tail = rdp->nocb_gp_head;
-               atomic_long_add(rdp->nocb_gp_count, &rdp->nocb_follower_count);
-               atomic_long_add(rdp->nocb_gp_count_lazy,
-                               &rdp->nocb_follower_count_lazy);
                smp_mb__after_atomic(); /* Store *tail before wakeup. */
                if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
                        /*
@@ -2415,13 +2290,11 @@ static int rcu_nocb_kthread(void *arg)
                trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
                ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
                tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
-               c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
-               cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
-               rdp->nocb_p_count += c;
-               rdp->nocb_p_count_lazy += cl;
 
                /* Each pass through the following loop invokes a callback. */
-               trace_rcu_batch_start(rdp->rsp->name, cl, c, -1);
+               trace_rcu_batch_start(rdp->rsp->name,
+                                     atomic_long_read(&rdp->nocb_q_count_lazy),
+                                     atomic_long_read(&rdp->nocb_q_count), -1);
                c = cl = 0;
                while (list) {
                        next = list->next;
@@ -2443,9 +2316,9 @@ static int rcu_nocb_kthread(void *arg)
                        list = next;
                }
                trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
-               ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c;
-               ACCESS_ONCE(rdp->nocb_p_count_lazy) =
-                                               rdp->nocb_p_count_lazy - cl;
+               smp_mb__before_atomic();  /* _add after CB invocation. */
+               atomic_long_add(-c, &rdp->nocb_q_count);
+               atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
                rdp->n_nocbs_invoked += c;
        }
        return 0;
index 5cdc62e1beeb635a36ee87098a7f38110a651382..fbb6240509ea7768210e989a63d7ee007a793297 100644 (file)
@@ -46,6 +46,8 @@
 #define RCU_TREE_NONCORE
 #include "tree.h"
 
+DECLARE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
+
 static int r_open(struct inode *inode, struct file *file,
                                        const struct seq_operations *op)
 {
@@ -115,11 +117,13 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
 
        if (!rdp->beenonline)
                return;
-       seq_printf(m, "%3d%cc=%ld g=%ld pq=%d qp=%d",
+       seq_printf(m, "%3d%cc=%ld g=%ld pq=%d/%d qp=%d",
                   rdp->cpu,
                   cpu_is_offline(rdp->cpu) ? '!' : ' ',
                   ulong2long(rdp->completed), ulong2long(rdp->gpnum),
-                  rdp->passed_quiesce, rdp->qs_pending);
+                  rdp->passed_quiesce,
+                  rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
+                  rdp->qs_pending);
        seq_printf(m, " dt=%d/%llx/%d df=%lu",
                   atomic_read(&rdp->dynticks->dynticks),
                   rdp->dynticks->dynticks_nesting,
index e628cb11b5606306e11f456471022fa65488976b..5eab11d4b747df5a5ff75a9e6558444c37b5c18a 100644 (file)
@@ -1814,6 +1814,10 @@ void __dl_clear_params(struct task_struct *p)
        dl_se->dl_period = 0;
        dl_se->flags = 0;
        dl_se->dl_bw = 0;
+
+       dl_se->dl_throttled = 0;
+       dl_se->dl_new = 1;
+       dl_se->dl_yielded = 0;
 }
 
 /*
@@ -1839,7 +1843,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
 #endif
 
        RB_CLEAR_NODE(&p->dl.rb_node);
-       hrtimer_init(&p->dl.dl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       init_dl_task_timer(&p->dl);
        __dl_clear_params(p);
 
        INIT_LIST_HEAD(&p->rt.run_list);
@@ -2049,6 +2053,9 @@ static inline int dl_bw_cpus(int i)
  * allocated bandwidth to reflect the new situation.
  *
  * This function is called while holding p's rq->lock.
+ *
+ * XXX we should delay bw change until the task's 0-lag point, see
+ * __setparam_dl().
  */
 static int dl_overflow(struct task_struct *p, int policy,
                       const struct sched_attr *attr)
@@ -3251,15 +3258,31 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
 {
        struct sched_dl_entity *dl_se = &p->dl;
 
-       init_dl_task_timer(dl_se);
        dl_se->dl_runtime = attr->sched_runtime;
        dl_se->dl_deadline = attr->sched_deadline;
        dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
        dl_se->flags = attr->sched_flags;
        dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
-       dl_se->dl_throttled = 0;
-       dl_se->dl_new = 1;
-       dl_se->dl_yielded = 0;
+
+       /*
+        * Changing the parameters of a task is 'tricky' and we're not doing
+        * the correct thing -- also see task_dead_dl() and switched_from_dl().
+        *
+        * What we SHOULD do is delay the bandwidth release until the 0-lag
+        * point. This would include retaining the task_struct until that time
+        * and change dl_overflow() to not immediately decrement the current
+        * amount.
+        *
+        * Instead we retain the current runtime/deadline and let the new
+        * parameters take effect after the current reservation period lapses.
+        * This is safe (albeit pessimistic) because the 0-lag point is always
+        * before the current scheduling deadline.
+        *
+        * We can still have temporary overloads because we do not delay the
+        * change in bandwidth until that time; so admission control is
+        * not on the safe side. It does however guarantee tasks will never
+        * consume more than promised.
+        */
 }
 
 /*
@@ -4642,6 +4665,9 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
        struct dl_bw *cur_dl_b;
        unsigned long flags;
 
+       if (!cpumask_weight(cur))
+               return ret;
+
        rcu_read_lock_sched();
        cur_dl_b = dl_bw_of(cpumask_any(cur));
        trial_cpus = cpumask_weight(trial);
index b52092f2636d50e8a816b2e7e20a648b00d6bb70..726470d47f87959fe530858f3e99024ecad6d63f 100644 (file)
@@ -1094,6 +1094,7 @@ static void task_dead_dl(struct task_struct *p)
         * Since we are TASK_DEAD we won't slip out of the domain!
         */
        raw_spin_lock_irq(&dl_b->lock);
+       /* XXX we should retain the bw until 0-lag */
        dl_b->total_bw -= p->dl.dl_bw;
        raw_spin_unlock_irq(&dl_b->lock);
 
@@ -1614,8 +1615,8 @@ static void cancel_dl_timer(struct rq *rq, struct task_struct *p)
 
 static void switched_from_dl(struct rq *rq, struct task_struct *p)
 {
+       /* XXX we should retain the bw until 0-lag */
        cancel_dl_timer(rq, p);
-
        __dl_clear_params(p);
 
        /*
index 40667cbf371ba9e8732e6c30940cc146752ee0c3..fe331fc391f53382f4999c0cd0f13367a827b3cc 100644 (file)
@@ -1730,7 +1730,7 @@ static int preferred_group_nid(struct task_struct *p, int nid)
        nodes = node_online_map;
        for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
                unsigned long max_faults = 0;
-               nodemask_t max_group;
+               nodemask_t max_group = NODE_MASK_NONE;
                int a, b;
 
                /* Are there nodes at this distance from each other? */
index f032fb5284e3a340108359a0a17eccfa3b20e115..40190f28db3590140cb903d3f596883c61faaa74 100644 (file)
@@ -280,6 +280,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
        unsigned int cpu;
        int ret = 0;
 
+       get_online_cpus();
        mutex_lock(&smpboot_threads_lock);
        for_each_online_cpu(cpu) {
                ret = __smpboot_create_thread(plug_thread, cpu);
@@ -292,6 +293,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
        list_add(&plug_thread->list, &hotplug_threads);
 out:
        mutex_unlock(&smpboot_threads_lock);
+       put_online_cpus();
        return ret;
 }
 EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
index b9ae81643f1dd87c8e3cf2249b6d43430c556375..479e4436f787646c92c42e0dbe2d940b366fbf60 100644 (file)
@@ -660,9 +660,8 @@ static void run_ksoftirqd(unsigned int cpu)
                 * in the task stack here.
                 */
                __do_softirq();
-               rcu_note_context_switch();
                local_irq_enable();
-               cond_resched();
+               cond_resched_rcu_qs();
                return;
        }
        local_irq_enable();
index 37e50aadd471195bebb3dc32c9ad026dbbcf2ab7..d8c724cda37b3db78983661a3f93e95195f6fd7c 100644 (file)
@@ -122,7 +122,7 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
        mono = ktime_get_update_offsets_tick(&off_real, &off_boot, &off_tai);
        boot = ktime_add(mono, off_boot);
        xtim = ktime_add(mono, off_real);
-       tai = ktime_add(xtim, off_tai);
+       tai = ktime_add(mono, off_tai);
 
        base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
        base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
index 5f2ce616c0462db9b9055528110268385b2b653e..a2ca213c71ca8d2e0a93051851477db8cacb5ac1 100644 (file)
@@ -1215,6 +1215,7 @@ config RCU_TORTURE_TEST
        tristate "torture tests for RCU"
        depends on DEBUG_KERNEL
        select TORTURE_TEST
+       select SRCU
        default n
        help
          This option provides a kernel module that runs torture tests
@@ -1257,7 +1258,7 @@ config RCU_CPU_STALL_TIMEOUT
 config RCU_CPU_STALL_INFO
        bool "Print additional diagnostics on RCU CPU stall"
        depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL
-       default n
+       default y
        help
          For each stalled CPU that is aware of the current RCU grace
          period, print out additional per-CPU diagnostic information
index 129775eb6de63a6155cea830df17b6c2476eb024..8b39e86dbab5ea2a1afad17f6a15368f846916d6 100644 (file)
@@ -181,6 +181,15 @@ csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
 EXPORT_SYMBOL(csum_partial_copy);
 
 #ifndef csum_tcpudp_nofold
+static inline u32 from64to32(u64 x)
+{
+       /* add up 32-bit and 32-bit for 32+c bit */
+       x = (x & 0xffffffff) + (x >> 32);
+       /* add up carry.. */
+       x = (x & 0xffffffff) + (x >> 32);
+       return (u32)x;
+}
+
 __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
                        unsigned short len,
                        unsigned short proto,
@@ -195,8 +204,7 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
 #else
        s += (proto + len) << 8;
 #endif
-       s += (s >> 32);
-       return (__force __wsum)s;
+       return (__force __wsum)from64to32(s);
 }
 EXPORT_SYMBOL(csum_tcpudp_nofold);
 #endif
index 1d1ae6b078fdd9121abbd01409f01437bb67e1e8..4395b12869c832dba4a62747658f2238dd1257f7 100644 (file)
@@ -325,6 +325,7 @@ config VIRT_TO_BUS
 
 config MMU_NOTIFIER
        bool
+       select SRCU
 
 config KSM
        bool "Enable KSM for page merging"
index 683b4782019b2c32626645155bb8175f3bf3a4a5..2f6893c2f01b6c9649992910d902b94c49ae8ece 100644 (file)
@@ -5773,7 +5773,7 @@ void mem_cgroup_uncharge_list(struct list_head *page_list)
  * mem_cgroup_migrate - migrate a charge to another page
  * @oldpage: currently charged page
  * @newpage: page to transfer the charge to
- * @lrucare: both pages might be on the LRU already
+ * @lrucare: either or both pages might be on the LRU already
  *
  * Migrate the charge from @oldpage to @newpage.
  *
index b51eadf6d9528fa69ea80ad6d1c994763656e8da..28bd8c4dff6feb3b0194118781a4d1267d42bded 100644 (file)
@@ -59,6 +59,7 @@
 #endif
 
 void *high_memory;
+EXPORT_SYMBOL(high_memory);
 struct page *mem_map;
 unsigned long max_mapnr;
 unsigned long highest_memmap_pfn;
index ad83195521f2da08e136cc8674d3b37c54e3ab86..b264bda46e1be6601f35f0c2080eb00056a2435c 100644 (file)
@@ -199,7 +199,10 @@ int walk_page_range(unsigned long addr, unsigned long end,
                         */
                        if ((vma->vm_start <= addr) &&
                            (vma->vm_flags & VM_PFNMAP)) {
-                               next = vma->vm_end;
+                               if (walk->pte_hole)
+                                       err = walk->pte_hole(addr, next, walk);
+                               if (err)
+                                       break;
                                pgd = pgd_offset(walk->mm, next);
                                continue;
                        }
index 73ba1df7c8ba1bcf17f0ef2ee0930c13db56730e..993e6ba689ccd442aa33e42489e71f0e558d1943 100644 (file)
@@ -1013,7 +1013,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
                 */
                oldpage = newpage;
        } else {
-               mem_cgroup_migrate(oldpage, newpage, false);
+               mem_cgroup_migrate(oldpage, newpage, true);
                lru_cache_add_anon(newpage);
                *pagep = newpage;
        }
index b0330aecbf974e10ec8f64d3e93b012be5d689be..3244aead09267dd77b0392a0aac7afa462593305 100644 (file)
@@ -265,22 +265,12 @@ out:
        data[NFT_REG_VERDICT].verdict = NF_DROP;
 }
 
-static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain)
+static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
+                                     const struct nft_expr *expr,
+                                     const struct nft_data **data)
 {
-       struct nft_base_chain *basechain;
-
-       if (chain->flags & NFT_BASE_CHAIN) {
-               basechain = nft_base_chain(chain);
-
-               switch (basechain->ops[0].hooknum) {
-               case NF_BR_PRE_ROUTING:
-               case NF_BR_LOCAL_IN:
-                       break;
-               default:
-                       return -EOPNOTSUPP;
-               }
-       }
-       return 0;
+       return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) |
+                                                   (1 << NF_BR_LOCAL_IN));
 }
 
 static int nft_reject_bridge_init(const struct nft_ctx *ctx,
@@ -290,7 +280,7 @@ static int nft_reject_bridge_init(const struct nft_ctx *ctx,
        struct nft_reject *priv = nft_expr_priv(expr);
        int icmp_code, err;
 
-       err = nft_reject_bridge_validate_hooks(ctx->chain);
+       err = nft_reject_bridge_validate(ctx, expr, NULL);
        if (err < 0)
                return err;
 
@@ -341,13 +331,6 @@ nla_put_failure:
        return -1;
 }
 
-static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
-                                     const struct nft_expr *expr,
-                                     const struct nft_data **data)
-{
-       return nft_reject_bridge_validate_hooks(ctx->chain);
-}
-
 static struct nft_expr_type nft_reject_bridge_type;
 static const struct nft_expr_ops nft_reject_bridge_ops = {
        .type           = &nft_reject_bridge_type,
index 4589ff67bfa95f2ab5ce6c6ccccd48f970a21807..67a4a36febd1a6bf554a36b4690272777cb008a5 100644 (file)
@@ -470,7 +470,6 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
        ASSERT_RTNL();
        caifdev = netdev_priv(dev);
        caif_netlink_parms(data, &caifdev->conn_req);
-       dev_net_set(caifdev->netdev, src_net);
 
        ret = register_netdevice(dev);
        if (ret)
index 171420e75b03e5b9a020e6d417b72efbed5d5aba..7fe82929f5094ee37a49dccd9df3a3e2b63a64c7 100644 (file)
@@ -2352,7 +2352,6 @@ EXPORT_SYMBOL(skb_checksum_help);
 
 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
 {
-       unsigned int vlan_depth = skb->mac_len;
        __be16 type = skb->protocol;
 
        /* Tunnel gso handlers can set protocol to ethernet. */
@@ -2366,35 +2365,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
                type = eth->h_proto;
        }
 
-       /* if skb->protocol is 802.1Q/AD then the header should already be
-        * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
-        * ETH_HLEN otherwise
-        */
-       if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
-               if (vlan_depth) {
-                       if (WARN_ON(vlan_depth < VLAN_HLEN))
-                               return 0;
-                       vlan_depth -= VLAN_HLEN;
-               } else {
-                       vlan_depth = ETH_HLEN;
-               }
-               do {
-                       struct vlan_hdr *vh;
-
-                       if (unlikely(!pskb_may_pull(skb,
-                                                   vlan_depth + VLAN_HLEN)))
-                               return 0;
-
-                       vh = (struct vlan_hdr *)(skb->data + vlan_depth);
-                       type = vh->h_vlan_encapsulated_proto;
-                       vlan_depth += VLAN_HLEN;
-               } while (type == htons(ETH_P_8021Q) ||
-                        type == htons(ETH_P_8021AD));
-       }
-
-       *depth = vlan_depth;
-
-       return type;
+       return __vlan_get_protocol(skb, type, depth);
 }
 
 /**
@@ -5323,7 +5294,7 @@ void netdev_upper_dev_unlink(struct net_device *dev,
 }
 EXPORT_SYMBOL(netdev_upper_dev_unlink);
 
-void netdev_adjacent_add_links(struct net_device *dev)
+static void netdev_adjacent_add_links(struct net_device *dev)
 {
        struct netdev_adjacent *iter;
 
@@ -5348,7 +5319,7 @@ void netdev_adjacent_add_links(struct net_device *dev)
        }
 }
 
-void netdev_adjacent_del_links(struct net_device *dev)
+static void netdev_adjacent_del_links(struct net_device *dev)
 {
        struct netdev_adjacent *iter;
 
@@ -6656,7 +6627,7 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
        if (!queue)
                return NULL;
        netdev_init_one_queue(dev, queue, NULL);
-       queue->qdisc = &noop_qdisc;
+       RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
        queue->qdisc_sleeping = &noop_qdisc;
        rcu_assign_pointer(dev->ingress_queue, queue);
 #endif
index 9cf6fe9ddc0c99e189916dee672d16e6c4efe19a..446cbaf811857171c96a01e3529a790b77cf0b8f 100644 (file)
@@ -2895,12 +2895,16 @@ static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
                        goto errout;
        }
 
+       if (!skb->len)
+               goto errout;
+
        rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
        return 0;
 errout:
        WARN_ON(err == -EMSGSIZE);
        kfree_skb(skb);
-       rtnl_set_sk_err(net, RTNLGRP_LINK, err);
+       if (err)
+               rtnl_set_sk_err(net, RTNLGRP_LINK, err);
        return err;
 }
 
index b50861b22b6bea036b1a99ddf141d7ed2d6cf6cd..c373c0708d9799b0f17a969412efde7623455dbd 100644 (file)
@@ -1506,23 +1506,8 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
 /*
  *     Generic function to send a packet as reply to another packet.
  *     Used to send some TCP resets/acks so far.
- *
- *     Use a fake percpu inet socket to avoid false sharing and contention.
  */
-static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = {
-       .sk = {
-               .__sk_common = {
-                       .skc_refcnt = ATOMIC_INIT(1),
-               },
-               .sk_wmem_alloc  = ATOMIC_INIT(1),
-               .sk_allocation  = GFP_ATOMIC,
-               .sk_flags       = (1UL << SOCK_USE_WRITE_QUEUE),
-       },
-       .pmtudisc       = IP_PMTUDISC_WANT,
-       .uc_ttl         = -1,
-};
-
-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
+void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
                           const struct ip_options *sopt,
                           __be32 daddr, __be32 saddr,
                           const struct ip_reply_arg *arg,
@@ -1532,9 +1517,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
        struct ipcm_cookie ipc;
        struct flowi4 fl4;
        struct rtable *rt = skb_rtable(skb);
+       struct net *net = sock_net(sk);
        struct sk_buff *nskb;
-       struct sock *sk;
-       struct inet_sock *inet;
        int err;
 
        if (__ip_options_echo(&replyopts.opt.opt, skb, sopt))
@@ -1565,15 +1549,11 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
        if (IS_ERR(rt))
                return;
 
-       inet = &get_cpu_var(unicast_sock);
+       inet_sk(sk)->tos = arg->tos;
 
-       inet->tos = arg->tos;
-       sk = &inet->sk;
        sk->sk_priority = skb->priority;
        sk->sk_protocol = ip_hdr(skb)->protocol;
        sk->sk_bound_dev_if = arg->bound_dev_if;
-       sock_net_set(sk, net);
-       __skb_queue_head_init(&sk->sk_write_queue);
        sk->sk_sndbuf = sysctl_wmem_default;
        err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
                             len, 0, &ipc, &rt, MSG_DONTWAIT);
@@ -1589,13 +1569,10 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
                          arg->csumoffset) = csum_fold(csum_add(nskb->csum,
                                                                arg->csum));
                nskb->ip_summed = CHECKSUM_NONE;
-               skb_orphan(nskb);
                skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
                ip_push_pending_frames(sk, &fl4);
        }
 out:
-       put_cpu_var(unicast_sock);
-
        ip_rt_put(rt);
 }
 
index d58dd0ec3e5302c2862c8fe53bfd43ca05a3e669..52e1f2bf0ca2ff2fe3d85086465068da4bbb43f2 100644 (file)
@@ -966,6 +966,9 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
        if (dst->dev->mtu < mtu)
                return;
 
+       if (rt->rt_pmtu && rt->rt_pmtu < mtu)
+               return;
+
        if (mtu < ip_rt_min_pmtu)
                mtu = ip_rt_min_pmtu;
 
index bb395d46a3898136afe615c73ac311fd2832f6f1..c037644eafb7caadcb196b1c8b676bbc42abdb93 100644 (file)
@@ -150,7 +150,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                tcp_slow_start(tp, acked);
        else {
                bictcp_update(ca, tp->snd_cwnd);
-               tcp_cong_avoid_ai(tp, ca->cnt);
+               tcp_cong_avoid_ai(tp, ca->cnt, 1);
        }
 }
 
index 27ead0dd16bc7e444e96781ff01b10c444678396..8670e68e2ce67a9c6a8d8185d31ea8a49f81f183 100644 (file)
@@ -291,26 +291,32 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
  * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
  * returns the leftover acks to adjust cwnd in congestion avoidance mode.
  */
-void tcp_slow_start(struct tcp_sock *tp, u32 acked)
+u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
 {
        u32 cwnd = tp->snd_cwnd + acked;
 
        if (cwnd > tp->snd_ssthresh)
                cwnd = tp->snd_ssthresh + 1;
+       acked -= cwnd - tp->snd_cwnd;
        tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
+
+       return acked;
 }
 EXPORT_SYMBOL_GPL(tcp_slow_start);
 
-/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */
-void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w)
+/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
+ * for every packet that was ACKed.
+ */
+void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
 {
+       tp->snd_cwnd_cnt += acked;
        if (tp->snd_cwnd_cnt >= w) {
-               if (tp->snd_cwnd < tp->snd_cwnd_clamp)
-                       tp->snd_cwnd++;
-               tp->snd_cwnd_cnt = 0;
-       } else {
-               tp->snd_cwnd_cnt++;
+               u32 delta = tp->snd_cwnd_cnt / w;
+
+               tp->snd_cwnd_cnt -= delta * w;
+               tp->snd_cwnd += delta;
        }
+       tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
 }
 EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
 
@@ -329,11 +335,13 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                return;
 
        /* In "safe" area, increase. */
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
-               tcp_slow_start(tp, acked);
+       if (tp->snd_cwnd <= tp->snd_ssthresh) {
+               acked = tcp_slow_start(tp, acked);
+               if (!acked)
+                       return;
+       }
        /* In dangerous area, increase slowly. */
-       else
-               tcp_cong_avoid_ai(tp, tp->snd_cwnd);
+       tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
 }
 EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
 
index 6b6002416a73950d493661ea1459870f49917efc..4b276d1ed9807057986bd3b050e2e901bf1afec0 100644 (file)
@@ -93,9 +93,7 @@ struct bictcp {
        u32     epoch_start;    /* beginning of an epoch */
        u32     ack_cnt;        /* number of acks */
        u32     tcp_cwnd;       /* estimated tcp cwnd */
-#define ACK_RATIO_SHIFT        4
-#define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT)
-       u16     delayed_ack;    /* estimate the ratio of Packets/ACKs << 4 */
+       u16     unused;
        u8      sample_cnt;     /* number of samples to decide curr_rtt */
        u8      found;          /* the exit point is found? */
        u32     round_start;    /* beginning of each round */
@@ -114,7 +112,6 @@ static inline void bictcp_reset(struct bictcp *ca)
        ca->bic_K = 0;
        ca->delay_min = 0;
        ca->epoch_start = 0;
-       ca->delayed_ack = 2 << ACK_RATIO_SHIFT;
        ca->ack_cnt = 0;
        ca->tcp_cwnd = 0;
        ca->found = 0;
@@ -205,23 +202,30 @@ static u32 cubic_root(u64 a)
 /*
  * Compute congestion window to use.
  */
-static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
+static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked)
 {
        u32 delta, bic_target, max_cnt;
        u64 offs, t;
 
-       ca->ack_cnt++;  /* count the number of ACKs */
+       ca->ack_cnt += acked;   /* count the number of ACKed packets */
 
        if (ca->last_cwnd == cwnd &&
            (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32)
                return;
 
+       /* The CUBIC function can update ca->cnt at most once per jiffy.
+        * On all cwnd reduction events, ca->epoch_start is set to 0,
+        * which will force a recalculation of ca->cnt.
+        */
+       if (ca->epoch_start && tcp_time_stamp == ca->last_time)
+               goto tcp_friendliness;
+
        ca->last_cwnd = cwnd;
        ca->last_time = tcp_time_stamp;
 
        if (ca->epoch_start == 0) {
                ca->epoch_start = tcp_time_stamp;       /* record beginning */
-               ca->ack_cnt = 1;                        /* start counting */
+               ca->ack_cnt = acked;                    /* start counting */
                ca->tcp_cwnd = cwnd;                    /* syn with cubic */
 
                if (ca->last_max_cwnd <= cwnd) {
@@ -283,6 +287,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
        if (ca->last_max_cwnd == 0 && ca->cnt > 20)
                ca->cnt = 20;   /* increase cwnd 5% per RTT */
 
+tcp_friendliness:
        /* TCP Friendly */
        if (tcp_friendliness) {
                u32 scale = beta_scale;
@@ -301,7 +306,6 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
                }
        }
 
-       ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack;
        if (ca->cnt == 0)                       /* cannot be zero */
                ca->cnt = 1;
 }
@@ -317,11 +321,12 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (tp->snd_cwnd <= tp->snd_ssthresh) {
                if (hystart && after(ack, ca->end_seq))
                        bictcp_hystart_reset(sk);
-               tcp_slow_start(tp, acked);
-       } else {
-               bictcp_update(ca, tp->snd_cwnd);
-               tcp_cong_avoid_ai(tp, ca->cnt);
+               acked = tcp_slow_start(tp, acked);
+               if (!acked)
+                       return;
        }
+       bictcp_update(ca, tp->snd_cwnd, acked);
+       tcp_cong_avoid_ai(tp, ca->cnt, acked);
 }
 
 static u32 bictcp_recalc_ssthresh(struct sock *sk)
@@ -411,20 +416,10 @@ static void hystart_update(struct sock *sk, u32 delay)
  */
 static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
 {
-       const struct inet_connection_sock *icsk = inet_csk(sk);
        const struct tcp_sock *tp = tcp_sk(sk);
        struct bictcp *ca = inet_csk_ca(sk);
        u32 delay;
 
-       if (icsk->icsk_ca_state == TCP_CA_Open) {
-               u32 ratio = ca->delayed_ack;
-
-               ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
-               ratio += cnt;
-
-               ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT);
-       }
-
        /* Some calls are for duplicates without timetamps */
        if (rtt_us < 0)
                return;
index a3f72d7fc06c07c43e1c00b67970eaee074e4593..d22f54482babf8bbd41972596d01326e4b06f060 100644 (file)
@@ -683,7 +683,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
                arg.bound_dev_if = sk->sk_bound_dev_if;
 
        arg.tos = ip_hdr(skb)->tos;
-       ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
+       ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
+                             skb, &TCP_SKB_CB(skb)->header.h4.opt,
                              ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
                              &arg, arg.iov[0].iov_len);
 
@@ -767,7 +768,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
        if (oif)
                arg.bound_dev_if = oif;
        arg.tos = tos;
-       ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
+       ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
+                             skb, &TCP_SKB_CB(skb)->header.h4.opt,
                              ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
                              &arg, arg.iov[0].iov_len);
 
@@ -2428,14 +2430,39 @@ struct proto tcp_prot = {
 };
 EXPORT_SYMBOL(tcp_prot);
 
+static void __net_exit tcp_sk_exit(struct net *net)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
+       free_percpu(net->ipv4.tcp_sk);
+}
+
 static int __net_init tcp_sk_init(struct net *net)
 {
+       int res, cpu;
+
+       net->ipv4.tcp_sk = alloc_percpu(struct sock *);
+       if (!net->ipv4.tcp_sk)
+               return -ENOMEM;
+
+       for_each_possible_cpu(cpu) {
+               struct sock *sk;
+
+               res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
+                                          IPPROTO_TCP, net);
+               if (res)
+                       goto fail;
+               *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
+       }
        net->ipv4.sysctl_tcp_ecn = 2;
        return 0;
-}
 
-static void __net_exit tcp_sk_exit(struct net *net)
-{
+fail:
+       tcp_sk_exit(net);
+
+       return res;
 }
 
 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
index 6824afb65d9335532fe2bf61edce82cab8c3fd9c..333bcb2415ffca51e06f3042ae3d94b8e21c0725 100644 (file)
@@ -25,7 +25,8 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (tp->snd_cwnd <= tp->snd_ssthresh)
                tcp_slow_start(tp, acked);
        else
-               tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT));
+               tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
+                                 1);
 }
 
 static u32 tcp_scalable_ssthresh(struct sock *sk)
index a4d2d2d88dcae7c00cf4db83d8b13ce6b143b3b4..112151eeee45bff0c37ac92d78d165ba92bd4d0a 100644 (file)
@@ -159,7 +159,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                                /* In the "non-congestive state", increase cwnd
                                 *  every rtt.
                                 */
-                               tcp_cong_avoid_ai(tp, tp->snd_cwnd);
+                               tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
                        } else {
                                /* In the "congestive state", increase cwnd
                                 * every other rtt.
index cd72732185989b41d1a3f9167eacbf44c235cc01..17d35662930d054fb6fb379a2cddb9600e6b75e3 100644 (file)
@@ -92,7 +92,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 
        } else {
                /* Reno */
-               tcp_cong_avoid_ai(tp, tp->snd_cwnd);
+               tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
        }
 
        /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
index 13cda4c6313bad7ecca5b20b5be7c8ac28bcf1ab..01ccc28a686f8cd6bc2b6c69b6d48548b627fbdc 100644 (file)
@@ -417,7 +417,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                if (code == ICMPV6_HDR_FIELD)
                        teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
 
-               if (teli && teli == info - 2) {
+               if (teli && teli == be32_to_cpu(info) - 2) {
                        tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
                        if (tel->encap_limit == 0) {
                                net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
@@ -429,7 +429,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                }
                break;
        case ICMPV6_PKT_TOOBIG:
-               mtu = info - offset;
+               mtu = be32_to_cpu(info) - offset;
                if (mtu < IPV6_MIN_MTU)
                        mtu = IPV6_MIN_MTU;
                t->dev->mtu = mtu;
index ce69a12ae48c29276871dacb30eebea086b291a3..d28f2a2efb32e4c06eadc45dc8167bb2cb766d32 100644 (file)
@@ -537,20 +537,6 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
        skb_copy_secmark(to, from);
 }
 
-static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
-{
-       static u32 ip6_idents_hashrnd __read_mostly;
-       u32 hash, id;
-
-       net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
-
-       hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
-       hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
-
-       id = ip_idents_reserve(hash, 1);
-       fhdr->identification = htonl(id);
-}
-
 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 {
        struct sk_buff *frag;
index 97f41a3e68d98b89d6b8f643780f8a883bd46f55..54520a0bd5e3b5feac3d4bc4221fac77ab990031 100644 (file)
@@ -9,6 +9,24 @@
 #include <net/addrconf.h>
 #include <net/secure_seq.h>
 
+u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst, struct in6_addr *src)
+{
+       u32 hash, id;
+
+       hash = __ipv6_addr_jhash(dst, hashrnd);
+       hash = __ipv6_addr_jhash(src, hash);
+
+       /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
+        * set the hight order instead thus minimizing possible future
+        * collisions.
+        */
+       id = ip_idents_reserve(hash, 1);
+       if (unlikely(!id))
+               id = 1 << 31;
+
+       return id;
+}
+
 /* This function exists only for tap drivers that must support broken
  * clients requesting UFO without specifying an IPv6 fragment ID.
  *
@@ -22,7 +40,7 @@ void ipv6_proxy_select_ident(struct sk_buff *skb)
        static u32 ip6_proxy_idents_hashrnd __read_mostly;
        struct in6_addr buf[2];
        struct in6_addr *addrs;
-       u32 hash, id;
+       u32 id;
 
        addrs = skb_header_pointer(skb,
                                   skb_network_offset(skb) +
@@ -34,14 +52,25 @@ void ipv6_proxy_select_ident(struct sk_buff *skb)
        net_get_random_once(&ip6_proxy_idents_hashrnd,
                            sizeof(ip6_proxy_idents_hashrnd));
 
-       hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd);
-       hash = __ipv6_addr_jhash(&addrs[0], hash);
-
-       id = ip_idents_reserve(hash, 1);
-       skb_shinfo(skb)->ip6_frag_id = htonl(id);
+       id = __ipv6_select_ident(ip6_proxy_idents_hashrnd,
+                                &addrs[1], &addrs[0]);
+       skb_shinfo(skb)->ip6_frag_id = id;
 }
 EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
 
+void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
+{
+       static u32 ip6_idents_hashrnd __read_mostly;
+       u32 id;
+
+       net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
+
+       id = __ipv6_select_ident(ip6_idents_hashrnd, &rt->rt6i_dst.addr,
+                                &rt->rt6i_src.addr);
+       fhdr->identification = htonl(id);
+}
+EXPORT_SYMBOL(ipv6_select_ident);
+
 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
 {
        u16 offset = sizeof(struct ipv6hdr);
index 213546bd6d5de5eeccfabe45c6dd4d575b475266..cdbfe5af6187c5c604462b70b26a268f4041e289 100644 (file)
@@ -1506,12 +1506,12 @@ static bool ipip6_netlink_encap_parms(struct nlattr *data[],
 
        if (data[IFLA_IPTUN_ENCAP_SPORT]) {
                ret = true;
-               ipencap->sport = nla_get_u16(data[IFLA_IPTUN_ENCAP_SPORT]);
+               ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
        }
 
        if (data[IFLA_IPTUN_ENCAP_DPORT]) {
                ret = true;
-               ipencap->dport = nla_get_u16(data[IFLA_IPTUN_ENCAP_DPORT]);
+               ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
        }
 
        return ret;
@@ -1707,9 +1707,9 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
 
        if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
                        tunnel->encap.type) ||
-           nla_put_u16(skb, IFLA_IPTUN_ENCAP_SPORT,
+           nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
                        tunnel->encap.sport) ||
-           nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT,
+           nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
                        tunnel->encap.dport) ||
            nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
                        tunnel->encap.flags))
index b6aa8ed182579614d3738f107e839eb2e82e5371..a56276996b72b3f5d43121d6bf93422a58eefe62 100644 (file)
@@ -52,6 +52,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
 
                skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
 
+               /* Set the IPv6 fragment id if not set yet */
+               if (!skb_shinfo(skb)->ip6_frag_id)
+                       ipv6_proxy_select_ident(skb);
+
                segs = NULL;
                goto out;
        }
@@ -108,7 +112,11 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
                fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
                fptr->nexthdr = nexthdr;
                fptr->reserved = 0;
-               fptr->identification = skb_shinfo(skb)->ip6_frag_id;
+               if (skb_shinfo(skb)->ip6_frag_id)
+                       fptr->identification = skb_shinfo(skb)->ip6_frag_id;
+               else
+                       ipv6_select_ident(fptr,
+                                         (struct rt6_info *)skb_dst(skb));
 
                /* Fragment the skb. ipv6 header and the remaining fields of the
                 * fragment header are updated in ipv6_gso_segment()
index 990decba1fe418e36e59a1f081fcf0e47188da29..b87ca32efa0b4e6edc7f251c2c32c4ba3b55659c 100644 (file)
@@ -659,16 +659,24 @@ static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
        return err;
 }
 
-static int ip_vs_route_me_harder(int af, struct sk_buff *skb)
+static int ip_vs_route_me_harder(int af, struct sk_buff *skb,
+                                unsigned int hooknum)
 {
+       if (!sysctl_snat_reroute(skb))
+               return 0;
+       /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
+       if (NF_INET_LOCAL_IN == hooknum)
+               return 0;
 #ifdef CONFIG_IP_VS_IPV6
        if (af == AF_INET6) {
-               if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0)
+               struct dst_entry *dst = skb_dst(skb);
+
+               if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
+                   ip6_route_me_harder(skb) != 0)
                        return 1;
        } else
 #endif
-               if ((sysctl_snat_reroute(skb) ||
-                    skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
+               if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
                    ip_route_me_harder(skb, RTN_LOCAL) != 0)
                        return 1;
 
@@ -791,7 +799,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
                                union nf_inet_addr *snet,
                                __u8 protocol, struct ip_vs_conn *cp,
                                struct ip_vs_protocol *pp,
-                               unsigned int offset, unsigned int ihl)
+                               unsigned int offset, unsigned int ihl,
+                               unsigned int hooknum)
 {
        unsigned int verdict = NF_DROP;
 
@@ -821,7 +830,7 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
 #endif
                ip_vs_nat_icmp(skb, pp, cp, 1);
 
-       if (ip_vs_route_me_harder(af, skb))
+       if (ip_vs_route_me_harder(af, skb, hooknum))
                goto out;
 
        /* do the statistics and put it back */
@@ -916,7 +925,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
 
        snet.ip = iph->saddr;
        return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
-                                   pp, ciph.len, ihl);
+                                   pp, ciph.len, ihl, hooknum);
 }
 
 #ifdef CONFIG_IP_VS_IPV6
@@ -981,7 +990,8 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
        snet.in6 = ciph.saddr.in6;
        writable = ciph.len;
        return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
-                                   pp, writable, sizeof(struct ipv6hdr));
+                                   pp, writable, sizeof(struct ipv6hdr),
+                                   hooknum);
 }
 #endif
 
@@ -1040,7 +1050,8 @@ static inline bool is_new_conn(const struct sk_buff *skb,
  */
 static unsigned int
 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
-               struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
+               struct ip_vs_conn *cp, struct ip_vs_iphdr *iph,
+               unsigned int hooknum)
 {
        struct ip_vs_protocol *pp = pd->pp;
 
@@ -1078,7 +1089,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
         * if it came from this machine itself.  So re-compute
         * the routing information.
         */
-       if (ip_vs_route_me_harder(af, skb))
+       if (ip_vs_route_me_harder(af, skb, hooknum))
                goto drop;
 
        IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
@@ -1181,7 +1192,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
        cp = pp->conn_out_get(af, skb, &iph, 0);
 
        if (likely(cp))
-               return handle_response(af, skb, pd, cp, &iph);
+               return handle_response(af, skb, pd, cp, &iph, hooknum);
        if (sysctl_nat_icmp_send(net) &&
            (pp->protocol == IPPROTO_TCP ||
             pp->protocol == IPPROTO_UDP ||
index 3b3ddb4fb9ee122a5b6d3a39450be38a64d6f614..1ff04bcd487154ecea5b6fc4514b799f162eeddb 100644 (file)
@@ -1134,9 +1134,11 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
        /* Restore old counters on this cpu, no problem. Per-cpu statistics
         * are not exposed to userspace.
         */
+       preempt_disable();
        stats = this_cpu_ptr(newstats);
        stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
        stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
+       preempt_enable();
 
        return newstats;
 }
@@ -1262,8 +1264,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
                trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN,
                                        sizeof(struct nft_trans_chain));
-               if (trans == NULL)
+               if (trans == NULL) {
+                       free_percpu(stats);
                        return -ENOMEM;
+               }
 
                nft_trans_chain_stats(trans) = stats;
                nft_trans_chain_update(trans) = true;
@@ -1319,8 +1323,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                hookfn = type->hooks[hooknum];
 
                basechain = kzalloc(sizeof(*basechain), GFP_KERNEL);
-               if (basechain == NULL)
+               if (basechain == NULL) {
+                       module_put(type->owner);
                        return -ENOMEM;
+               }
 
                if (nla[NFTA_CHAIN_COUNTERS]) {
                        stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
@@ -3753,6 +3759,24 @@ int nft_chain_validate_dependency(const struct nft_chain *chain,
 }
 EXPORT_SYMBOL_GPL(nft_chain_validate_dependency);
 
+int nft_chain_validate_hooks(const struct nft_chain *chain,
+                            unsigned int hook_flags)
+{
+       struct nft_base_chain *basechain;
+
+       if (chain->flags & NFT_BASE_CHAIN) {
+               basechain = nft_base_chain(chain);
+
+               if ((1 << basechain->ops[0].hooknum) & hook_flags)
+                       return 0;
+
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nft_chain_validate_hooks);
+
 /*
  * Loop detection - walk through the ruleset beginning at the destination chain
  * of a new jump until either the source chain is reached (loop) or all
index d1ffd5eb3a9b5b86495b53adb5b8bc27c17921df..9aea747b43eab7b91b3b458cd0a5b89f2d9e6927 100644 (file)
@@ -21,6 +21,21 @@ const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = {
 };
 EXPORT_SYMBOL_GPL(nft_masq_policy);
 
+int nft_masq_validate(const struct nft_ctx *ctx,
+                     const struct nft_expr *expr,
+                     const struct nft_data **data)
+{
+       int err;
+
+       err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+       if (err < 0)
+               return err;
+
+       return nft_chain_validate_hooks(ctx->chain,
+                                       (1 << NF_INET_POST_ROUTING));
+}
+EXPORT_SYMBOL_GPL(nft_masq_validate);
+
 int nft_masq_init(const struct nft_ctx *ctx,
                  const struct nft_expr *expr,
                  const struct nlattr * const tb[])
@@ -28,8 +43,8 @@ int nft_masq_init(const struct nft_ctx *ctx,
        struct nft_masq *priv = nft_expr_priv(expr);
        int err;
 
-       err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
-       if (err < 0)
+       err = nft_masq_validate(ctx, expr, NULL);
+       if (err)
                return err;
 
        if (tb[NFTA_MASQ_FLAGS] == NULL)
@@ -60,12 +75,5 @@ nla_put_failure:
 }
 EXPORT_SYMBOL_GPL(nft_masq_dump);
 
-int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
-                     const struct nft_data **data)
-{
-       return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
-}
-EXPORT_SYMBOL_GPL(nft_masq_validate);
-
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
index aff54fb1c8a09fdb99fd4caab0959a80f28ffc37..a0837c6c9283dc90b043750ffe1d7217c95ab239 100644 (file)
@@ -88,17 +88,40 @@ static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = {
        [NFTA_NAT_FLAGS]         = { .type = NLA_U32 },
 };
 
-static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
-                       const struct nlattr * const tb[])
+static int nft_nat_validate(const struct nft_ctx *ctx,
+                           const struct nft_expr *expr,
+                           const struct nft_data **data)
 {
        struct nft_nat *priv = nft_expr_priv(expr);
-       u32 family;
        int err;
 
        err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
        if (err < 0)
                return err;
 
+       switch (priv->type) {
+       case NFT_NAT_SNAT:
+               err = nft_chain_validate_hooks(ctx->chain,
+                                              (1 << NF_INET_POST_ROUTING) |
+                                              (1 << NF_INET_LOCAL_IN));
+               break;
+       case NFT_NAT_DNAT:
+               err = nft_chain_validate_hooks(ctx->chain,
+                                              (1 << NF_INET_PRE_ROUTING) |
+                                              (1 << NF_INET_LOCAL_OUT));
+               break;
+       }
+
+       return err;
+}
+
+static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+                       const struct nlattr * const tb[])
+{
+       struct nft_nat *priv = nft_expr_priv(expr);
+       u32 family;
+       int err;
+
        if (tb[NFTA_NAT_TYPE] == NULL ||
            (tb[NFTA_NAT_REG_ADDR_MIN] == NULL &&
             tb[NFTA_NAT_REG_PROTO_MIN] == NULL))
@@ -115,6 +138,10 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
                return -EINVAL;
        }
 
+       err = nft_nat_validate(ctx, expr, NULL);
+       if (err < 0)
+               return err;
+
        if (tb[NFTA_NAT_FAMILY] == NULL)
                return -EINVAL;
 
@@ -219,13 +246,6 @@ nla_put_failure:
        return -1;
 }
 
-static int nft_nat_validate(const struct nft_ctx *ctx,
-                           const struct nft_expr *expr,
-                           const struct nft_data **data)
-{
-       return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
-}
-
 static struct nft_expr_type nft_nat_type;
 static const struct nft_expr_ops nft_nat_ops = {
        .type           = &nft_nat_type,
index 9e8093f283113117ac7618c742624742e618ed3a..d7e9e93a4e90f498f7a002c33840c928a3ab17e2 100644 (file)
@@ -23,6 +23,22 @@ const struct nla_policy nft_redir_policy[NFTA_REDIR_MAX + 1] = {
 };
 EXPORT_SYMBOL_GPL(nft_redir_policy);
 
+int nft_redir_validate(const struct nft_ctx *ctx,
+                      const struct nft_expr *expr,
+                      const struct nft_data **data)
+{
+       int err;
+
+       err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+       if (err < 0)
+               return err;
+
+       return nft_chain_validate_hooks(ctx->chain,
+                                       (1 << NF_INET_PRE_ROUTING) |
+                                       (1 << NF_INET_LOCAL_OUT));
+}
+EXPORT_SYMBOL_GPL(nft_redir_validate);
+
 int nft_redir_init(const struct nft_ctx *ctx,
                   const struct nft_expr *expr,
                   const struct nlattr * const tb[])
@@ -30,7 +46,7 @@ int nft_redir_init(const struct nft_ctx *ctx,
        struct nft_redir *priv = nft_expr_priv(expr);
        int err;
 
-       err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+       err = nft_redir_validate(ctx, expr, NULL);
        if (err < 0)
                return err;
 
@@ -88,12 +104,5 @@ nla_put_failure:
 }
 EXPORT_SYMBOL_GPL(nft_redir_dump);
 
-int nft_redir_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
-                      const struct nft_data **data)
-{
-       return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
-}
-EXPORT_SYMBOL_GPL(nft_redir_validate);
-
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
index 02fdde28dada498c1715b18ffa166d72ac6807d6..75532efa51cd6d54389366b0f4cc7a4de34e0fbc 100644 (file)
@@ -1438,7 +1438,7 @@ static void netlink_undo_bind(int group, long unsigned int groups,
 
        for (undo = 0; undo < group; undo++)
                if (test_bit(undo, &groups))
-                       nlk->netlink_unbind(sock_net(sk), undo);
+                       nlk->netlink_unbind(sock_net(sk), undo + 1);
 }
 
 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
@@ -1476,7 +1476,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
                for (group = 0; group < nlk->ngroups; group++) {
                        if (!test_bit(group, &groups))
                                continue;
-                       err = nlk->netlink_bind(net, group);
+                       err = nlk->netlink_bind(net, group + 1);
                        if (!err)
                                continue;
                        netlink_undo_bind(group, groups, sk);
index c3b0cd43eb56689e395581c4757402bad531e271..c173f69e1479bfaf643b9e5c69f4c9a151b18c67 100644 (file)
@@ -71,14 +71,14 @@ static struct ctl_table rds_sysctl_rds_table[] = {
        {
                .procname       = "max_unacked_packets",
                .data           = &rds_sysctl_max_unacked_packets,
-               .maxlen         = sizeof(unsigned long),
+               .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
                .procname       = "max_unacked_bytes",
                .data           = &rds_sysctl_max_unacked_bytes,
-               .maxlen         = sizeof(unsigned long),
+               .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
index aad6a679fb135e9c6492a1b1cd3c1b638d823453..baef987fe2c036ae61f7108455ce1d828ec40e6c 100644 (file)
@@ -556,8 +556,9 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
 }
 EXPORT_SYMBOL(tcf_exts_change);
 
-#define tcf_exts_first_act(ext) \
-               list_first_entry(&(exts)->actions, struct tc_action, list)
+#define tcf_exts_first_act(ext)                                        \
+       list_first_entry_or_null(&(exts)->actions,              \
+                                struct tc_action, list)
 
 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
 {
@@ -603,7 +604,7 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
 {
 #ifdef CONFIG_NET_CLS_ACT
        struct tc_action *a = tcf_exts_first_act(exts);
-       if (tcf_action_copy_stats(skb, a, 1) < 0)
+       if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
                return -1;
 #endif
        return 0;
index 9b05924cc386ecc2cdb9816be27e439637fb37b3..333cd94ba381ff62f9512d6b95474a2aecc3ea88 100644 (file)
@@ -670,8 +670,14 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
        if (tb[TCA_FQ_FLOW_PLIMIT])
                q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
 
-       if (tb[TCA_FQ_QUANTUM])
-               q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
+       if (tb[TCA_FQ_QUANTUM]) {
+               u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
+
+               if (quantum > 0)
+                       q->quantum = quantum;
+               else
+                       err = -EINVAL;
+       }
 
        if (tb[TCA_FQ_INITIAL_QUANTUM])
                q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
index e49e231cef529ecaf4bf2bc3e6a168b8f8b6fc06..06320c8c1c8660cdd4e1a56968353169236fb2df 100644 (file)
@@ -2608,7 +2608,7 @@ do_addr_param:
 
                addr_param = param.v + sizeof(sctp_addip_param_t);
 
-               af = sctp_get_af_specific(param_type2af(param.p->type));
+               af = sctp_get_af_specific(param_type2af(addr_param->p.type));
                if (af == NULL)
                        break;
 
index 8eb779b9d77f2ad56c7ccdc5db081bc4b0a05028..604e718d68d35ecb305b3ad71ae774a848ea5a02 100644 (file)
@@ -5,6 +5,7 @@ config SECURITY_TOMOYO
        select SECURITYFS
        select SECURITY_PATH
        select SECURITY_NETWORK
+       select SRCU
        default n
        help
          This selects TOMOYO Linux, pathname-based access control.
index 1a3a6fa27158b0096de715c8e322ec0cddeb62ec..c6bba99a90b285a939865eb57bd97de3d7646d31 100644 (file)
@@ -56,8 +56,7 @@ static inline unsigned char reg_read(struct ak4113 *ak4113, unsigned char reg)
 
 static void snd_ak4113_free(struct ak4113 *chip)
 {
-       chip->init = 1; /* don't schedule new work */
-       mb();
+       atomic_inc(&chip->wq_processing);       /* don't schedule new work */
        cancel_delayed_work_sync(&chip->work);
        kfree(chip);
 }
@@ -89,6 +88,7 @@ int snd_ak4113_create(struct snd_card *card, ak4113_read_t *read,
        chip->write = write;
        chip->private_data = private_data;
        INIT_DELAYED_WORK(&chip->work, ak4113_stats);
+       atomic_set(&chip->wq_processing, 0);
 
        for (reg = 0; reg < AK4113_WRITABLE_REGS ; reg++)
                chip->regmap[reg] = pgm[reg];
@@ -139,13 +139,11 @@ static void ak4113_init_regs(struct ak4113 *chip)
 
 void snd_ak4113_reinit(struct ak4113 *chip)
 {
-       chip->init = 1;
-       mb();
-       flush_delayed_work(&chip->work);
+       if (atomic_inc_return(&chip->wq_processing) == 1)
+               cancel_delayed_work_sync(&chip->work);
        ak4113_init_regs(chip);
        /* bring up statistics / event queing */
-       chip->init = 0;
-       if (chip->kctls[0])
+       if (atomic_dec_and_test(&chip->wq_processing))
                schedule_delayed_work(&chip->work, HZ / 10);
 }
 EXPORT_SYMBOL_GPL(snd_ak4113_reinit);
@@ -632,8 +630,9 @@ static void ak4113_stats(struct work_struct *work)
 {
        struct ak4113 *chip = container_of(work, struct ak4113, work.work);
 
-       if (!chip->init)
+       if (atomic_inc_return(&chip->wq_processing) == 1)
                snd_ak4113_check_rate_and_errors(chip, chip->check_flags);
 
-       schedule_delayed_work(&chip->work, HZ / 10);
+       if (atomic_dec_and_test(&chip->wq_processing))
+               schedule_delayed_work(&chip->work, HZ / 10);
 }
index c7f56339415d31052812e1c3d6066c275298411e..b70e6eccbd035372e48098188f32aa99e83b4ea5 100644 (file)
@@ -66,8 +66,7 @@ static void reg_dump(struct ak4114 *ak4114)
 
 static void snd_ak4114_free(struct ak4114 *chip)
 {
-       chip->init = 1; /* don't schedule new work */
-       mb();
+       atomic_inc(&chip->wq_processing);       /* don't schedule new work */
        cancel_delayed_work_sync(&chip->work);
        kfree(chip);
 }
@@ -100,6 +99,7 @@ int snd_ak4114_create(struct snd_card *card,
        chip->write = write;
        chip->private_data = private_data;
        INIT_DELAYED_WORK(&chip->work, ak4114_stats);
+       atomic_set(&chip->wq_processing, 0);
 
        for (reg = 0; reg < 6; reg++)
                chip->regmap[reg] = pgm[reg];
@@ -152,13 +152,11 @@ static void ak4114_init_regs(struct ak4114 *chip)
 
 void snd_ak4114_reinit(struct ak4114 *chip)
 {
-       chip->init = 1;
-       mb();
-       flush_delayed_work(&chip->work);
+       if (atomic_inc_return(&chip->wq_processing) == 1)
+               cancel_delayed_work_sync(&chip->work);
        ak4114_init_regs(chip);
        /* bring up statistics / event queing */
-       chip->init = 0;
-       if (chip->kctls[0])
+       if (atomic_dec_and_test(&chip->wq_processing))
                schedule_delayed_work(&chip->work, HZ / 10);
 }
 
@@ -612,10 +610,10 @@ static void ak4114_stats(struct work_struct *work)
 {
        struct ak4114 *chip = container_of(work, struct ak4114, work.work);
 
-       if (!chip->init)
+       if (atomic_inc_return(&chip->wq_processing) == 1)
                snd_ak4114_check_rate_and_errors(chip, chip->check_flags);
-
-       schedule_delayed_work(&chip->work, HZ / 10);
+       if (atomic_dec_and_test(&chip->wq_processing))
+               schedule_delayed_work(&chip->work, HZ / 10);
 }
 
 EXPORT_SYMBOL(snd_ak4114_create);
index 99ff35e2a25d012ba052b00fef7c2d1963a8e5c3..35e44e463cfe580f538e7c1d7524fd3b9cc4249f 100644 (file)
@@ -348,7 +348,6 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
        struct atmel_pcm_dma_params *dma_params;
        int dir, channels, bits;
        u32 tfmr, rfmr, tcmr, rcmr;
-       int start_event;
        int ret;
        int fslen, fslen_ext;
 
@@ -457,19 +456,10 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
                 * The SSC transmit clock is obtained from the BCLK signal on
                 * on the TK line, and the SSC receive clock is
                 * generated from the transmit clock.
-                *
-                *  For single channel data, one sample is transferred
-                * on the falling edge of the LRC clock.
-                * For two channel data, one sample is
-                * transferred on both edges of the LRC clock.
                 */
-               start_event = ((channels == 1)
-                               ? SSC_START_FALLING_RF
-                               : SSC_START_EDGE_RF);
-
                rcmr =    SSC_BF(RCMR_PERIOD, 0)
                        | SSC_BF(RCMR_STTDLY, START_DELAY)
-                       | SSC_BF(RCMR_START, start_event)
+                       | SSC_BF(RCMR_START, SSC_START_FALLING_RF)
                        | SSC_BF(RCMR_CKI, SSC_CKI_RISING)
                        | SSC_BF(RCMR_CKO, SSC_CKO_NONE)
                        | SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ?
@@ -478,14 +468,14 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
                rfmr =    SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
                        | SSC_BF(RFMR_FSOS, SSC_FSOS_NONE)
                        | SSC_BF(RFMR_FSLEN, 0)
-                       | SSC_BF(RFMR_DATNB, 0)
+                       | SSC_BF(RFMR_DATNB, (channels - 1))
                        | SSC_BIT(RFMR_MSBF)
                        | SSC_BF(RFMR_LOOP, 0)
                        | SSC_BF(RFMR_DATLEN, (bits - 1));
 
                tcmr =    SSC_BF(TCMR_PERIOD, 0)
                        | SSC_BF(TCMR_STTDLY, START_DELAY)
-                       | SSC_BF(TCMR_START, start_event)
+                       | SSC_BF(TCMR_START, SSC_START_FALLING_RF)
                        | SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
                        | SSC_BF(TCMR_CKO, SSC_CKO_NONE)
                        | SSC_BF(TCMR_CKS, ssc->clk_from_rk_pin ?
@@ -495,7 +485,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
                        | SSC_BF(TFMR_FSDEN, 0)
                        | SSC_BF(TFMR_FSOS, SSC_FSOS_NONE)
                        | SSC_BF(TFMR_FSLEN, 0)
-                       | SSC_BF(TFMR_DATNB, 0)
+                       | SSC_BF(TFMR_DATNB, (channels - 1))
                        | SSC_BIT(TFMR_MSBF)
                        | SSC_BF(TFMR_DATDEF, 0)
                        | SSC_BF(TFMR_DATLEN, (bits - 1));
@@ -512,7 +502,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
                rcmr =    SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period)
                        | SSC_BF(RCMR_STTDLY, 1)
                        | SSC_BF(RCMR_START, SSC_START_RISING_RF)
-                       | SSC_BF(RCMR_CKI, SSC_CKI_RISING)
+                       | SSC_BF(RCMR_CKI, SSC_CKI_FALLING)
                        | SSC_BF(RCMR_CKO, SSC_CKO_NONE)
                        | SSC_BF(RCMR_CKS, SSC_CKS_DIV);
 
@@ -527,7 +517,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
                tcmr =    SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period)
                        | SSC_BF(TCMR_STTDLY, 1)
                        | SSC_BF(TCMR_START, SSC_START_RISING_RF)
-                       | SSC_BF(TCMR_CKI, SSC_CKI_RISING)
+                       | SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
                        | SSC_BF(TCMR_CKO, SSC_CKO_CONTINUOUS)
                        | SSC_BF(TCMR_CKS, SSC_CKS_DIV);
 
@@ -556,7 +546,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
                rcmr =    SSC_BF(RCMR_PERIOD, 0)
                        | SSC_BF(RCMR_STTDLY, START_DELAY)
                        | SSC_BF(RCMR_START, SSC_START_RISING_RF)
-                       | SSC_BF(RCMR_CKI, SSC_CKI_RISING)
+                       | SSC_BF(RCMR_CKI, SSC_CKI_FALLING)
                        | SSC_BF(RCMR_CKO, SSC_CKO_NONE)
                        | SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ?
                                           SSC_CKS_PIN : SSC_CKS_CLOCK);
index c3f2decd643c9ba7eb57eec8a644091bc440376d..1ff726c292491eaf14f09ad235e779b65bca0041 100644 (file)
@@ -2124,6 +2124,7 @@ MODULE_DEVICE_TABLE(of, rt5640_of_match);
 static struct acpi_device_id rt5640_acpi_match[] = {
        { "INT33CA", 0 },
        { "10EC5640", 0 },
+       { "10EC5642", 0 },
        { },
 };
 MODULE_DEVICE_TABLE(acpi, rt5640_acpi_match);
index 29cf7ce610f4707feeaba22425daedbe50491ba4..aa98be32bb60cfdb37ed150b4cffabf7dd4ee071 100644 (file)
@@ -483,21 +483,21 @@ static int sgtl5000_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
        /* setting i2s data format */
        switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
        case SND_SOC_DAIFMT_DSP_A:
-               i2sctl |= SGTL5000_I2S_MODE_PCM;
+               i2sctl |= SGTL5000_I2S_MODE_PCM << SGTL5000_I2S_MODE_SHIFT;
                break;
        case SND_SOC_DAIFMT_DSP_B:
-               i2sctl |= SGTL5000_I2S_MODE_PCM;
+               i2sctl |= SGTL5000_I2S_MODE_PCM << SGTL5000_I2S_MODE_SHIFT;
                i2sctl |= SGTL5000_I2S_LRALIGN;
                break;
        case SND_SOC_DAIFMT_I2S:
-               i2sctl |= SGTL5000_I2S_MODE_I2S_LJ;
+               i2sctl |= SGTL5000_I2S_MODE_I2S_LJ << SGTL5000_I2S_MODE_SHIFT;
                break;
        case SND_SOC_DAIFMT_RIGHT_J:
-               i2sctl |= SGTL5000_I2S_MODE_RJ;
+               i2sctl |= SGTL5000_I2S_MODE_RJ << SGTL5000_I2S_MODE_SHIFT;
                i2sctl |= SGTL5000_I2S_LRPOL;
                break;
        case SND_SOC_DAIFMT_LEFT_J:
-               i2sctl |= SGTL5000_I2S_MODE_I2S_LJ;
+               i2sctl |= SGTL5000_I2S_MODE_I2S_LJ << SGTL5000_I2S_MODE_SHIFT;
                i2sctl |= SGTL5000_I2S_LRALIGN;
                break;
        default:
@@ -1462,6 +1462,9 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
        if (ret)
                return ret;
 
+       /* Need 8 clocks before I2C accesses */
+       udelay(1);
+
        /* read chip information */
        ret = regmap_read(sgtl5000->regmap, SGTL5000_CHIP_ID, &reg);
        if (ret)
index b7ebce054b4ebe673ceeb23fa73118a1c9c74998..dd222b10ce132bfdfd15aecc06f19fca41e1bd7f 100644 (file)
@@ -1046,7 +1046,7 @@ static int aic3x_prepare(struct snd_pcm_substream *substream,
                delay += aic3x->tdm_delay;
 
        /* Configure data delay */
-       snd_soc_write(codec, AIC3X_ASD_INTF_CTRLC, aic3x->tdm_delay);
+       snd_soc_write(codec, AIC3X_ASD_INTF_CTRLC, delay);
 
        return 0;
 }
index b9211b42f6e919f0603eabda95ca332eea71dfb6..b115ed815db97329bb9d6a4c6d7b50faf42f5f40 100644 (file)
@@ -717,6 +717,8 @@ static int wm8731_i2c_probe(struct i2c_client *i2c,
        if (wm8731 == NULL)
                return -ENOMEM;
 
+       mutex_init(&wm8731->lock);
+
        wm8731->regmap = devm_regmap_init_i2c(i2c, &wm8731_regmap);
        if (IS_ERR(wm8731->regmap)) {
                ret = PTR_ERR(wm8731->regmap);
index 3eddb18fefd156eaf1acd3b050b106b3d32ff3b5..5cc457ef8894f7c69ae7c4981ab7e2d6d08d135b 100644 (file)
@@ -344,23 +344,27 @@ static int wm9705_soc_probe(struct snd_soc_codec *codec)
        struct snd_ac97 *ac97;
        int ret = 0;
 
-       ac97 = snd_soc_new_ac97_codec(codec);
+       ac97 = snd_soc_alloc_ac97_codec(codec);
        if (IS_ERR(ac97)) {
                ret = PTR_ERR(ac97);
                dev_err(codec->dev, "Failed to register AC97 codec\n");
                return ret;
        }
 
-       snd_soc_codec_set_drvdata(codec, ac97);
-
        ret = wm9705_reset(codec);
        if (ret)
-               goto reset_err;
+               goto err_put_device;
+
+       ret = device_add(&ac97->dev);
+       if (ret)
+               goto err_put_device;
+
+       snd_soc_codec_set_drvdata(codec, ac97);
 
        return 0;
 
-reset_err:
-       snd_soc_free_ac97_codec(ac97);
+err_put_device:
+       put_device(&ac97->dev);
        return ret;
 }
 
index e04643d2bb2412e334112cbb6ef15c8942cbf558..9517571e820d9576b9505be863a73f3c02116cb8 100644 (file)
@@ -666,7 +666,7 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec)
        struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
        int ret = 0;
 
-       wm9712->ac97 = snd_soc_new_ac97_codec(codec);
+       wm9712->ac97 = snd_soc_alloc_ac97_codec(codec);
        if (IS_ERR(wm9712->ac97)) {
                ret = PTR_ERR(wm9712->ac97);
                dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret);
@@ -675,15 +675,19 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec)
 
        ret = wm9712_reset(codec, 0);
        if (ret < 0)
-               goto reset_err;
+               goto err_put_device;
+
+       ret = device_add(&wm9712->ac97->dev);
+       if (ret)
+               goto err_put_device;
 
        /* set alc mux to none */
        ac97_write(codec, AC97_VIDEO, ac97_read(codec, AC97_VIDEO) | 0x3000);
 
        return 0;
 
-reset_err:
-       snd_soc_free_ac97_codec(wm9712->ac97);
+err_put_device:
+       put_device(&wm9712->ac97->dev);
        return ret;
 }
 
index 71b9d5b0734d22c54284172184fede22fc4e5c48..6ab1122a3872dedeefe0460ac065a2f675e231a1 100644 (file)
@@ -1225,7 +1225,7 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
        struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
        int ret = 0, reg;
 
-       wm9713->ac97 = snd_soc_new_ac97_codec(codec);
+       wm9713->ac97 = snd_soc_alloc_ac97_codec(codec);
        if (IS_ERR(wm9713->ac97))
                return PTR_ERR(wm9713->ac97);
 
@@ -1234,7 +1234,11 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
        wm9713_reset(codec, 0);
        ret = wm9713_reset(codec, 1);
        if (ret < 0)
-               goto reset_err;
+               goto err_put_device;
+
+       ret = device_add(&wm9713->ac97->dev);
+       if (ret)
+               goto err_put_device;
 
        /* unmute the adc - move to kcontrol */
        reg = ac97_read(codec, AC97_CD) & 0x7fff;
@@ -1242,8 +1246,8 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
 
        return 0;
 
-reset_err:
-       snd_soc_free_ac97_codec(wm9713->ac97);
+err_put_device:
+       put_device(&wm9713->ac97->dev);
        return ret;
 }
 
index 5bf14040c24a27f33b41dd1f0ee97a2c8d09779b..8156cc1accb79aec1c21751d51cd1dd2817bc48c 100644 (file)
@@ -651,11 +651,11 @@ static void hsw_notification_work(struct work_struct *work)
        }
 
        /* tell DSP that notification has been handled */
-       sst_dsp_shim_update_bits_unlocked(hsw->dsp, SST_IPCD,
+       sst_dsp_shim_update_bits(hsw->dsp, SST_IPCD,
                SST_IPCD_BUSY | SST_IPCD_DONE, SST_IPCD_DONE);
 
        /* unmask busy interrupt */
-       sst_dsp_shim_update_bits_unlocked(hsw->dsp, SST_IMRX, SST_IMRX_BUSY, 0);
+       sst_dsp_shim_update_bits(hsw->dsp, SST_IMRX, SST_IMRX_BUSY, 0);
 }
 
 static struct ipc_message *reply_find_msg(struct sst_hsw *hsw, u32 header)
index 2ac72eb5e75d82e1a6e8e11e4b176984692215d3..b3360139c41a905ba9575574d42b7477dd0cbc26 100644 (file)
@@ -350,7 +350,7 @@ static struct sst_machines sst_acpi_bytcr[] = {
 
 /* Cherryview-based platforms: CherryTrail and Braswell */
 static struct sst_machines sst_acpi_chv[] = {
-       {"10EC5670", "cht-bsw", "cht-bsw-rt5672", NULL, "fw_sst_22a8.bin",
+       {"10EC5670", "cht-bsw", "cht-bsw-rt5672", NULL, "intel/fw_sst_22a8.bin",
                                                &chv_platform_data },
        {},
 };
index 2e10e9a383768a5ed5132fb182b8ef3753c39540..08d7259bbaabad727709281234905011e990f07d 100644 (file)
@@ -48,15 +48,18 @@ static void soc_ac97_device_release(struct device *dev)
 }
 
 /**
- * snd_soc_new_ac97_codec - initailise AC97 device
- * @codec: audio codec
+ * snd_soc_alloc_ac97_codec() - Allocate new a AC'97 device
+ * @codec: The CODEC for which to create the AC'97 device
  *
- * Initialises AC97 codec resources for use by ad-hoc devices only.
+ * Allocated a new snd_ac97 device and intializes it, but does not yet register
+ * it. The caller is responsible to either call device_add(&ac97->dev) to
+ * register the device, or to call put_device(&ac97->dev) to free the device.
+ *
+ * Returns: A snd_ac97 device or a PTR_ERR in case of an error.
  */
-struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec)
+struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec)
 {
        struct snd_ac97 *ac97;
-       int ret;
 
        ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL);
        if (ac97 == NULL)
@@ -73,7 +76,28 @@ struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec)
                     codec->component.card->snd_card->number, 0,
                     codec->component.name);
 
-       ret = device_register(&ac97->dev);
+       device_initialize(&ac97->dev);
+
+       return ac97;
+}
+EXPORT_SYMBOL(snd_soc_alloc_ac97_codec);
+
+/**
+ * snd_soc_new_ac97_codec - initailise AC97 device
+ * @codec: audio codec
+ *
+ * Initialises AC97 codec resources for use by ad-hoc devices only.
+ */
+struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec)
+{
+       struct snd_ac97 *ac97;
+       int ret;
+
+       ac97 = snd_soc_alloc_ac97_codec(codec);
+       if (IS_ERR(ac97))
+               return ac97;
+
+       ret = device_add(&ac97->dev);
        if (ret) {
                put_device(&ac97->dev);
                return ERR_PTR(ret);
diff --git a/tools/lib/lockdep/.gitignore b/tools/lib/lockdep/.gitignore
new file mode 100644 (file)
index 0000000..cc0e7a9
--- /dev/null
@@ -0,0 +1 @@
+liblockdep.so.*
index 52f9279c6c13097344dd5e273ab10631c49f00ec..4b866c54f624bd2adca1b21b241b2937a89494e5 100644 (file)
@@ -104,7 +104,7 @@ N           =
 
 export Q VERBOSE
 
-INCLUDES = -I. -I/usr/local/include -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES)
+INCLUDES = -I. -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES)
 
 # Set compile option CFLAGS if not set elsewhere
 CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g
index abe14b7f36e987189419bb45d10594189e336e26..bb99cde3f5f97a216cd85b4654ea0da3cc2b048b 100755 (executable)
@@ -24,7 +24,7 @@
 
 ncpus=`grep '^processor' /proc/cpuinfo | wc -l`
 idlecpus=`mpstat | tail -1 | \
-       awk -v ncpus=$ncpus '{ print ncpus * ($7 + $12) / 100 }'`
+       awk -v ncpus=$ncpus '{ print ncpus * ($7 + $NF) / 100 }'`
 awk -v ncpus=$ncpus -v idlecpus=$idlecpus < /dev/null '
 BEGIN {
        cpus2use = idlecpus;
index d6cc07fc137fc35c78329586081a1135e5b5b850..559e01ac86be9731cf9040d682132869f42d3ec9 100755 (executable)
@@ -30,6 +30,7 @@ else
        echo Unreadable results directory: $i
        exit 1
 fi
+. tools/testing/selftests/rcutorture/bin/functions.sh
 
 configfile=`echo $i | sed -e 's/^.*\///'`
 ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'`
@@ -48,4 +49,21 @@ else
                title="$title ($ngpsps per second)"
        fi
        echo $title
+       nclosecalls=`grep --binary-files=text 'torture: Reader Batch' $i/console.log | tail -1 | awk '{for (i=NF-8;i<=NF;i++) sum+=$i; } END {print sum}'`
+       if test -z "$nclosecalls"
+       then
+               exit 0
+       fi
+       if test "$nclosecalls" -eq 0
+       then
+               exit 0
+       fi
+       # Compute number of close calls per tenth of an hour
+       nclosecalls10=`awk -v nclosecalls=$nclosecalls -v dur=$dur 'BEGIN { print int(nclosecalls * 36000 / dur) }' < /dev/null`
+       if test $nclosecalls10 -gt 5 -a $nclosecalls -gt 1
+       then
+               print_bug $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
+       else
+               print_warning $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
+       fi
 fi
index 8ca9f21f2efcba8ab113585192d9cf285ebca77a..5236e073919d2e508e6073d201fa157670a8688e 100755 (executable)
@@ -8,9 +8,9 @@
 #
 # Usage: kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args
 #
-# qemu-args defaults to "-nographic", along with arguments specifying the
-#                      number of CPUs and other options generated from
-#                      the underlying CPU architecture.
+# qemu-args defaults to "-enable-kvm -soundhw pcspk -nographic", along with
+#                      arguments specifying the number of CPUs and other
+#                      options generated from the underlying CPU architecture.
 # boot_args defaults to value returned by the per_version_boot_params
 #                      shell function.
 #
@@ -138,7 +138,7 @@ then
 fi
 
 # Generate -smp qemu argument.
-qemu_args="-nographic $qemu_args"
+qemu_args="-enable-kvm -soundhw pcspk -nographic $qemu_args"
 cpu_count=`configNR_CPUS.sh $config_template`
 cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"`
 vcpus=`identify_qemu_vcpus`
@@ -168,6 +168,7 @@ then
        touch $resdir/buildonly
        exit 0
 fi
+echo "NOTE: $QEMU either did not run or was interactive" > $builddir/console.log
 echo $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
 ( $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) &
 qemu_pid=$!
index 499d1e598e425c390e5ebac5293a72cebddb01b2..a6b57622c2e589c67f36455f49c853a5203640ab 100755 (executable)
 #
 # Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
 
-T=$1
+F=$1
 title=$2
+T=/tmp/parse-build.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
 
 . functions.sh
 
-if grep -q CC < $T
+if grep -q CC < $F
 then
        :
 else
@@ -39,18 +42,21 @@ else
        exit 1
 fi
 
-if grep -q "error:" < $T
+if grep -q "error:" < $F
 then
        print_bug $title build errors:
-       grep "error:" < $T
+       grep "error:" < $F
        exit 2
 fi
-exit 0
 
-if egrep -q "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T
+grep warning: < $F > $T/warnings
+grep "include/linux/*rcu*\.h:" $T/warnings > $T/hwarnings
+grep "kernel/rcu/[^/]*:" $T/warnings > $T/cwarnings
+cat $T/hwarnings $T/cwarnings > $T/rcuwarnings
+if test -s $T/rcuwarnings
 then
        print_warning $title build errors:
-       egrep "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T
+       cat $T/rcuwarnings
        exit 2
 fi
 exit 0
index f962ba4cf68b6a06121b1d6f2c11ce8ed8df63ee..d8f35cf116be2ca6fb5095caec9ef25daa90e2ac 100755 (executable)
@@ -36,7 +36,7 @@ if grep -Pq '\x00' < $file
 then
        print_warning Console output contains nul bytes, old qemu still running?
 fi
-egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T
+egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|Stall ended before state dump start' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T
 if test -s $T
 then
        print_warning Assertion failure in $file $title