Merge tag 'v4.4.5' into linux-linaro-lsk-v4.4
authorAlex Shi <alex.shi@linaro.org>
Mon, 14 Mar 2016 07:16:14 +0000 (15:16 +0800)
committerAlex Shi <alex.shi@linaro.org>
Mon, 14 Mar 2016 07:16:14 +0000 (15:16 +0800)
 This is the 4.4.5 stable release

424 files changed:
Documentation/filesystems/efivarfs.txt
Makefile
arch/arc/include/asm/irqflags-arcv2.h
arch/arc/kernel/entry-arcv2.S
arch/arc/kernel/mcip.c
arch/arm/Kconfig.debug
arch/arm/boot/dts/sama5d2-pinfunc.h
arch/arm/include/asm/psci.h
arch/arm/include/asm/xen/page-coherent.h
arch/arm/kvm/guest.c
arch/arm/mach-omap2/gpmc-onenand.c
arch/arm64/Makefile
arch/arm64/include/asm/pgtable.h
arch/arm64/kvm/guest.c
arch/arm64/mm/init.c
arch/mips/include/asm/page.h
arch/mips/include/asm/pgtable.h
arch/mips/include/asm/syscall.h
arch/mips/kernel/traps.c
arch/mips/kvm/mips.c
arch/mips/loongson64/loongson-3/hpet.c
arch/mips/loongson64/loongson-3/smp.c
arch/mips/mm/sc-mips.c
arch/mips/mm/tlbex.c
arch/parisc/kernel/ptrace.c
arch/parisc/kernel/syscall.S
arch/powerpc/kernel/eeh_driver.c
arch/s390/include/asm/fpu/internal.h
arch/s390/include/asm/kvm_host.h
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/compat_signal.c
arch/s390/kvm/kvm-s390.c
arch/s390/mm/extable.c
arch/sparc/kernel/sys_sparc_64.c
arch/um/os-Linux/start_up.c
arch/x86/entry/entry_64_compat.S
arch/x86/include/asm/irq.h
arch/x86/kernel/acpi/sleep.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/irq.c
arch/x86/kvm/emulate.c
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/mpx.c
block/bio.c
block/blk-settings.c
drivers/acpi/acpi_video.c
drivers/acpi/nfit.c
drivers/acpi/video_detect.c
drivers/android/binder.c
drivers/ata/ahci.c
drivers/ata/libahci.c
drivers/ata/libata-scsi.c
drivers/ata/libata-sff.c
drivers/ata/pata_rb532_cf.c
drivers/bluetooth/btusb.c
drivers/clk/samsung/clk-cpu.c
drivers/clocksource/tcb_clksrc.c
drivers/clocksource/vt8500_timer.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/pxa2xx-cpufreq.c
drivers/dma/at_xdmac.c
drivers/dma/dw/core.c
drivers/dma/pxa_dma.c
drivers/edac/edac_device.c
drivers/edac/edac_mc.c
drivers/edac/edac_mc_sysfs.c
drivers/edac/edac_pci.c
drivers/firmware/efi/efivars.c
drivers/firmware/efi/vars.c
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/iceland_smc.c
drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/gma500/gem.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
drivers/gpu/drm/i915/intel_hotplug.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_platform.c
drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
drivers/gpu/drm/qxl/qxl_ioctl.c
drivers/gpu/drm/radeon/dce6_afmt.c
drivers/gpu/drm/radeon/evergreen_hdmi.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_audio.c
drivers/gpu/drm/radeon/radeon_audio.h
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_sa.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/sid.h
drivers/gpu/drm/radeon/vce_v1_0.c
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/hv/channel.c
drivers/hwmon/ads1015.c
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/gpio-fan.c
drivers/hwtracing/coresight/coresight.c
drivers/i2c/busses/i2c-brcmstb.c
drivers/i2c/busses/i2c-i801.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/qib/qib_qp.c
drivers/infiniband/hw/qib/qib_verbs_mcast.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/dmar.c
drivers/iommu/intel-iommu.c
drivers/irqchip/irq-atmel-aic-common.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-mxs.c
drivers/irqchip/irq-omap-intc.c
drivers/md/bcache/btree.c
drivers/md/bcache/super.c
drivers/md/bcache/writeback.c
drivers/md/bcache/writeback.h
drivers/md/dm-exception-store.h
drivers/md/dm-snap-persistent.c
drivers/md/dm-snap-transient.c
drivers/md/dm-snap.c
drivers/md/dm-thin.c
drivers/md/dm.c
drivers/md/persistent-data/dm-space-map-metadata.c
drivers/media/dvb-core/dvb_frontend.c
drivers/media/dvb-frontends/tda1004x.c
drivers/media/i2c/adv7604.c
drivers/media/rc/sunxi-cir.c
drivers/media/tuners/si2157.c
drivers/media/usb/gspca/ov534.c
drivers/media/usb/gspca/topro.c
drivers/media/v4l2-core/videobuf2-v4l2.c
drivers/misc/cxl/pci.c
drivers/misc/cxl/vphb.c
drivers/misc/mei/main.c
drivers/mmc/core/mmc.c
drivers/mmc/core/sd.c
drivers/mmc/core/sdio.c
drivers/mmc/host/mmci.c
drivers/mmc/host/pxamci.c
drivers/mmc/host/sdhci-acpi.c
drivers/mmc/host/sdhci-pci-core.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/sdhci.h
drivers/mmc/host/usdhi6rol0.c
drivers/mtd/ubi/upd.c
drivers/net/bonding/bond_main.c
drivers/net/can/usb/ems_usb.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cisco/enic/enic.h
drivers/net/ethernet/cisco/enic/vnic_dev.c
drivers/net/ethernet/mellanox/mlx4/en_clock.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_port.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/rocker/rocker.c
drivers/net/phy/dp83640.c
drivers/net/ppp/pppoe.c
drivers/net/ppp/pptp.c
drivers/net/usb/qmi_wwan.c
drivers/net/vxlan.c
drivers/net/wireless/iwlwifi/dvm/lib.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/realtek/rtlwifi/pci.c
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
drivers/of/irq.c
drivers/pci/host/pci-keystone-dw.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/pcie/aer/aerdrv.c
drivers/pci/pcie/aer/aerdrv.h
drivers/pci/pcie/aer/aerdrv_core.c
drivers/pci/xen-pcifront.c
drivers/phy/phy-core.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/toshiba_acpi.c
drivers/regulator/Kconfig
drivers/regulator/axp20x-regulator.c
drivers/s390/block/dasd.c
drivers/s390/block/dasd_alias.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_tmpl.c
drivers/scsi/sd.c
drivers/sh/pm_runtime.c
drivers/spi/spi-atmel.c
drivers/spi/spi-omap2-mcspi.c
drivers/staging/panel/panel.c
drivers/staging/speakup/serialio.c
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_tmr.c
drivers/target/target_core_transport.c
drivers/thermal/cpu_cooling.c
drivers/thermal/step_wise.c
drivers/thermal/thermal_core.c
drivers/thermal/thermal_core.h
drivers/usb/chipidea/otg.c
drivers/usb/class/cdc-acm.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/ep0.c
drivers/usb/dwc3/gadget.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/vfio/pci/vfio_pci.c
drivers/vfio/platform/vfio_platform_common.c
drivers/vfio/vfio_iommu_type1.c
drivers/video/console/fbcon.c
drivers/virtio/virtio_balloon.c
drivers/virtio/virtio_pci_common.c
drivers/xen/xen-pciback/pciback_ops.c
drivers/xen/xen-scsiback.c
fs/btrfs/async-thread.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/inode-map.c
fs/btrfs/inode-map.h
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/root-tree.c
fs/btrfs/send.c
fs/btrfs/super.c
fs/btrfs/volumes.c
fs/cifs/cifsfs.h
fs/cifs/cifssmb.c
fs/cifs/smb2pdu.c
fs/dcache.c
fs/direct-io.c
fs/efivarfs/file.c
fs/efivarfs/inode.c
fs/efivarfs/internal.h
fs/efivarfs/super.c
fs/ext4/inode.c
fs/fs-writeback.c
fs/hostfs/hostfs_kern.c
fs/hpfs/namei.c
fs/jffs2/README.Locking
fs/jffs2/build.c
fs/jffs2/file.c
fs/jffs2/gc.c
fs/jffs2/nodelist.h
fs/locks.c
fs/namei.c
fs/nfs/nfs4proc.c
fs/ocfs2/aops.c
fs/super.c
include/asm-generic/cputime_nsecs.h
include/drm/drm_cache.h
include/drm/drm_dp_mst_helper.h
include/drm/drm_fixed.h
include/linux/ata.h
include/linux/bio.h
include/linux/blkdev.h
include/linux/ceph/messenger.h
include/linux/cgroup-defs.h
include/linux/cpuset.h
include/linux/dcache.h
include/linux/efi.h
include/linux/hyperv.h
include/linux/libata.h
include/linux/module.h
include/linux/nfs_fs.h
include/linux/shmem_fs.h
include/linux/skbuff.h
include/linux/thermal.h
include/linux/trace_events.h
include/linux/ucs2_string.h
include/linux/writeback.h
include/net/af_unix.h
include/net/dst_metadata.h
include/net/inet_connection_sock.h
include/net/ip6_route.h
include/net/ip_fib.h
include/net/scm.h
include/net/tcp.h
include/target/target_core_backend.h
include/target/target_core_base.h
include/uapi/linux/Kbuild
kernel/bpf/verifier.c
kernel/cgroup.c
kernel/cpuset.c
kernel/irq/handle.c
kernel/memremap.c
kernel/module.c
kernel/resource.c
kernel/seccomp.c
kernel/time/posix-clock.c
kernel/time/tick-sched.c
kernel/time/timekeeping.c
kernel/trace/trace_events.c
kernel/trace/trace_events_filter.c
kernel/workqueue.c
lib/Kconfig
lib/ucs2_string.c
mm/balloon_compaction.c
mm/memory.c
mm/migrate.c
mm/shmem.c
net/bluetooth/6lowpan.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_request.c
net/bluetooth/smp.c
net/bridge/br.c
net/ceph/messenger.c
net/ceph/osd_client.c
net/core/dev.c
net/core/flow_dissector.c
net/core/scm.c
net/core/skbuff.c
net/core/sysctl_net_core.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/ipv4/devinet.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_fragment.c
net/ipv4/ip_sockglue.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/ping.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_ipv4.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/datagram.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_output.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/iucv/af_iucv.c
net/l2tp/l2tp_netlink.c
net/mac80211/ibss.c
net/mac80211/mesh.c
net/mac80211/mesh.h
net/mac80211/mlme.c
net/mac80211/scan.c
net/openvswitch/vport-vxlan.c
net/rfkill/core.c
net/sched/sch_api.c
net/sctp/protocol.c
net/sctp/socket.c
net/sunrpc/cache.c
net/switchdev/switchdev.c
net/tipc/bcast.c
net/tipc/node.c
net/tipc/subscr.c
net/unix/af_unix.c
net/unix/diag.c
net/unix/garbage.c
scripts/link-vmlinux.sh
security/smack/smack_lsm.c
security/yama/yama_lsm.c
sound/core/control_compat.c
sound/core/pcm_compat.c
sound/core/rawmidi_compat.c
sound/core/seq/oss/seq_oss.c
sound/core/seq/oss/seq_oss_device.h
sound/core/seq/oss/seq_oss_init.c
sound/core/timer_compat.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/pci/rme9652/hdsp.c
sound/pci/rme9652/hdspm.c
sound/usb/quirks.c
tools/hv/hv_vss_daemon.c
tools/perf/util/stat.c
tools/testing/selftests/efivarfs/efivarfs.sh
tools/testing/selftests/efivarfs/open-unlink.c
virt/kvm/arm/vgic.c
virt/kvm/async_pf.c

index c477af086e6569398d85b3f938e9f067b1976f7e..686a64bba775e0af9990cf305ab81f1ac4c8c278 100644 (file)
@@ -14,3 +14,10 @@ filesystem.
 efivarfs is typically mounted like this,
 
        mount -t efivarfs none /sys/firmware/efi/efivars
+
+Due to the presence of numerous firmware bugs where removing non-standard
+UEFI variables causes the system firmware to fail to POST, efivarfs
+files that are not well-known standardized variables are created
+as immutable files.  This doesn't prevent removal - "chattr -i" will work -
+but it does prevent this kind of failure from being accomplished
+accidentally.
index 802be10c40c5ac7a0952da9b2c04411c219a18d9..d13322ade3a00ac94c89019522198a3d962d6405 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 4
-SUBLEVEL = 3
+SUBLEVEL = 5
 EXTRAVERSION =
 NAME = Blurry Fish Butt
 
index 258b0e5ad3329a614e59204df55d91873e969623..68b6092349d7e2ac41d5375613bbd84872eb2d90 100644 (file)
@@ -22,6 +22,7 @@
 #define AUX_IRQ_CTRL           0x00E
 #define AUX_IRQ_ACT            0x043   /* Active Intr across all levels */
 #define AUX_IRQ_LVL_PEND       0x200   /* Pending Intr across all levels */
+#define AUX_IRQ_HINT           0x201   /* For generating Soft Interrupts */
 #define AUX_IRQ_PRIORITY       0x206
 #define ICAUSE                 0x40a
 #define AUX_IRQ_SELECT         0x40b
@@ -112,6 +113,16 @@ static inline int arch_irqs_disabled(void)
        return arch_irqs_disabled_flags(arch_local_save_flags());
 }
 
+static inline void arc_softirq_trigger(int irq)
+{
+       write_aux_reg(AUX_IRQ_HINT, irq);
+}
+
+static inline void arc_softirq_clear(int irq)
+{
+       write_aux_reg(AUX_IRQ_HINT, 0);
+}
+
 #else
 
 .macro IRQ_DISABLE  scratch
index cbfec79137bf77735fa675d0eb2be57da217ba63..c1264607bbff3dca457fd47b8f078b62993b7af7 100644 (file)
@@ -45,11 +45,12 @@ VECTOR      reserved                ; Reserved slots
 VECTOR handle_interrupt        ; (16) Timer0
 VECTOR handle_interrupt        ; unused (Timer1)
 VECTOR handle_interrupt        ; unused (WDT)
-VECTOR handle_interrupt        ; (19) ICI (inter core interrupt)
-VECTOR handle_interrupt
-VECTOR handle_interrupt
-VECTOR handle_interrupt
-VECTOR handle_interrupt        ; (23) End of fixed IRQs
+VECTOR handle_interrupt        ; (19) Inter core Interrupt (IPI)
+VECTOR handle_interrupt        ; (20) perf Interrupt
+VECTOR handle_interrupt        ; (21) Software Triggered Intr (Self IPI)
+VECTOR handle_interrupt        ; unused
+VECTOR handle_interrupt        ; (23) unused
+# End of fixed IRQs
 
 .rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8
        VECTOR  handle_interrupt
@@ -211,7 +212,11 @@ debug_marker_syscall:
 ; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig
 ; entry was via Exception in DS which got preempted in kernel).
 ;
-; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling
+; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround
+;
+; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline
+; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly
+
 .Lintr_ret_to_delay_slot:
 debug_marker_ds:
 
@@ -222,18 +227,23 @@ debug_marker_ds:
        ld      r2, [sp, PT_ret]
        ld      r3, [sp, PT_status32]
 
+       ; STAT32 for Int return created from scratch
+       ; (No delay dlot, disable Further intr in trampoline)
+
        bic     r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK
        st      r0, [sp, PT_status32]
 
        mov     r1, .Lintr_ret_to_delay_slot_2
        st      r1, [sp, PT_ret]
 
+       ; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots
        st      r2, [sp, 0]
        st      r3, [sp, 4]
 
        b       .Lisr_ret_fast_path
 
 .Lintr_ret_to_delay_slot_2:
+       ; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP
        sub     sp, sp, SZ_PT_REGS
        st      r9, [sp, -4]
 
@@ -243,11 +253,19 @@ debug_marker_ds:
        ld      r9, [sp, 4]
        sr      r9, [erstatus]
 
+       ; restore AUX_USER_SP if returning to U mode
+       bbit0   r9, STATUS_U_BIT, 1f
+       ld      r9, [sp, PT_sp]
+       sr      r9, [AUX_USER_SP]
+
+1:
        ld      r9, [sp, 8]
        sr      r9, [erbta]
 
        ld      r9, [sp, -4]
        add     sp, sp, SZ_PT_REGS
+
+       ; return from pure kernel mode to delay slot
        rtie
 
 END(ret_from_exception)
index bd237acdf4f2f9601efbf4c25c45067ddd2b69d9..30d806ce0c78273c4a1b917fb9bb876fac7ca793 100644 (file)
 #include <linux/smp.h>
 #include <linux/irq.h>
 #include <linux/spinlock.h>
+#include <asm/irqflags-arcv2.h>
 #include <asm/mcip.h>
 #include <asm/setup.h>
 
+#define SOFTIRQ_IRQ    21
+
 static char smp_cpuinfo_buf[128];
 static int idu_detected;
 
@@ -22,6 +25,7 @@ static DEFINE_RAW_SPINLOCK(mcip_lock);
 static void mcip_setup_per_cpu(int cpu)
 {
        smp_ipi_irq_setup(cpu, IPI_IRQ);
+       smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
 }
 
 static void mcip_ipi_send(int cpu)
@@ -29,6 +33,12 @@ static void mcip_ipi_send(int cpu)
        unsigned long flags;
        int ipi_was_pending;
 
+       /* ARConnect can only send IPI to others */
+       if (unlikely(cpu == raw_smp_processor_id())) {
+               arc_softirq_trigger(SOFTIRQ_IRQ);
+               return;
+       }
+
        /*
         * NOTE: We must spin here if the other cpu hasn't yet
         * serviced a previous message. This can burn lots
@@ -63,6 +73,11 @@ static void mcip_ipi_clear(int irq)
        unsigned long flags;
        unsigned int __maybe_unused copy;
 
+       if (unlikely(irq == SOFTIRQ_IRQ)) {
+               arc_softirq_clear(irq);
+               return;
+       }
+
        raw_spin_lock_irqsave(&mcip_lock, flags);
 
        /* Who sent the IPI */
index 259c0ca9c99a8f510410d3b1e9f2dd40707555ca..ddbb361267d879c3c2947c9b094d7be784e2f1c2 100644 (file)
@@ -162,10 +162,9 @@ choice
                  mobile SoCs in the Kona family of chips (e.g. bcm28155,
                  bcm11351, etc...)
 
-       config DEBUG_BCM63XX
+       config DEBUG_BCM63XX_UART
                bool "Kernel low-level debugging on BCM63XX UART"
                depends on ARCH_BCM_63XX
-               select DEBUG_UART_BCM63XX
 
        config DEBUG_BERLIN_UART
                bool "Marvell Berlin SoC Debug UART"
@@ -1348,7 +1347,7 @@ config DEBUG_LL_INCLUDE
        default "debug/vf.S" if DEBUG_VF_UART
        default "debug/vt8500.S" if DEBUG_VT8500_UART0
        default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
-       default "debug/bcm63xx.S" if DEBUG_UART_BCM63XX
+       default "debug/bcm63xx.S" if DEBUG_BCM63XX_UART
        default "debug/digicolor.S" if DEBUG_DIGICOLOR_UA0
        default "mach/debug-macro.S"
 
@@ -1364,10 +1363,6 @@ config DEBUG_UART_8250
                ARCH_IOP33X || ARCH_IXP4XX || \
                ARCH_LPC32XX || ARCH_MV78XX0 || ARCH_ORION5X || ARCH_RPC
 
-# Compatibility options for BCM63xx
-config DEBUG_UART_BCM63XX
-       def_bool ARCH_BCM_63XX
-
 config DEBUG_UART_PHYS
        hex "Physical base address of debug UART"
        default 0x00100a00 if DEBUG_NETX_UART
@@ -1462,7 +1457,7 @@ config DEBUG_UART_PHYS
        default 0xfffb0000 if DEBUG_OMAP1UART1 || DEBUG_OMAP7XXUART1
        default 0xfffb0800 if DEBUG_OMAP1UART2 || DEBUG_OMAP7XXUART2
        default 0xfffb9800 if DEBUG_OMAP1UART3 || DEBUG_OMAP7XXUART3
-       default 0xfffe8600 if DEBUG_UART_BCM63XX
+       default 0xfffe8600 if DEBUG_BCM63XX_UART
        default 0xfffff700 if ARCH_IOP33X
        depends on ARCH_EP93XX || \
                DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
@@ -1474,7 +1469,7 @@ config DEBUG_UART_PHYS
                DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF2 || \
                DEBUG_RMOBILE_SCIFA0 || DEBUG_RMOBILE_SCIFA1 || \
                DEBUG_RMOBILE_SCIFA4 || DEBUG_S3C24XX_UART || \
-               DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
+               DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
                DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0 || \
                DEBUG_AT91_UART
 
@@ -1515,7 +1510,7 @@ config DEBUG_UART_VIRT
        default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT
        default 0xfc40ab00 if DEBUG_BRCMSTB_UART
        default 0xfc705000 if DEBUG_ZTE_ZX
-       default 0xfcfe8600 if DEBUG_UART_BCM63XX
+       default 0xfcfe8600 if DEBUG_BCM63XX_UART
        default 0xfd000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX
        default 0xfd000000 if ARCH_SPEAR13XX
        default 0xfd012000 if ARCH_MV78XX0
@@ -1566,7 +1561,7 @@ config DEBUG_UART_VIRT
                DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \
                DEBUG_NETX_UART || \
                DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \
-               DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
+               DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
                DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0
 
 config DEBUG_UART_8250_SHIFT
index 1afe24629d1f85de2a9ec3f8ef62c6372d014abd..b0c912feaa2f0e016b65ff27e272b82c2b0795ed 100644 (file)
@@ -90,7 +90,7 @@
 #define PIN_PA14__I2SC1_MCK            PINMUX_PIN(PIN_PA14, 4, 2)
 #define PIN_PA14__FLEXCOM3_IO2         PINMUX_PIN(PIN_PA14, 5, 1)
 #define PIN_PA14__D9                   PINMUX_PIN(PIN_PA14, 6, 2)
-#define PIN_PA15                       14
+#define PIN_PA15                       15
 #define PIN_PA15__GPIO                 PINMUX_PIN(PIN_PA15, 0, 0)
 #define PIN_PA15__SPI0_MOSI            PINMUX_PIN(PIN_PA15, 1, 1)
 #define PIN_PA15__TF1                  PINMUX_PIN(PIN_PA15, 2, 1)
index 68ee3ce17b820e9c3cc6b77d0645cb68f6276988..b4c6d99364f179abe56849fa3ecc7bd82bb1ffcc 100644 (file)
@@ -16,7 +16,7 @@
 
 extern struct smp_operations psci_smp_ops;
 
-#ifdef CONFIG_ARM_PSCI
+#if defined(CONFIG_SMP) && defined(CONFIG_ARM_PSCI)
 bool psci_smp_available(void);
 #else
 static inline bool psci_smp_available(void) { return false; }
index 0375c8caa06194591e065f94bbb3d57a0565c847..9408a994cc91792bcf89a57d67982e21c677d9d7 100644 (file)
@@ -35,14 +35,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
             dma_addr_t dev_addr, unsigned long offset, size_t size,
             enum dma_data_direction dir, struct dma_attrs *attrs)
 {
-       bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page);
+       unsigned long page_pfn = page_to_xen_pfn(page);
+       unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
+       unsigned long compound_pages =
+               (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
+       bool local = (page_pfn <= dev_pfn) &&
+               (dev_pfn - page_pfn < compound_pages);
+
        /*
-        * Dom0 is mapped 1:1, while the Linux page can be spanned accross
-        * multiple Xen page, it's not possible to have a mix of local and
-        * foreign Xen page. So if the first xen_pfn == mfn the page is local
-        * otherwise it's a foreign page grant-mapped in dom0. If the page is
-        * local we can safely call the native dma_ops function, otherwise we
-        * call the xen specific function.
+        * Dom0 is mapped 1:1, while the Linux page can span across
+        * multiple Xen pages, it's not possible for it to contain a
+        * mix of local and foreign Xen pages. So if the first xen_pfn
+        * == mfn the page is local otherwise it's a foreign page
+        * grant-mapped in dom0. If the page is local we can safely
+        * call the native dma_ops function, otherwise we call the xen
+        * specific function.
         */
        if (local)
                __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
index 96e935bbc38c8b4fd906aeacdc27ca28696b596a..3705fc2921c2ddf6a3e33526703183b248521f7a 100644 (file)
@@ -155,7 +155,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        u64 val;
 
        val = kvm_arm_timer_get_reg(vcpu, reg->id);
-       return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
+       return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
 }
 
 static unsigned long num_core_regs(void)
index 7b76ce01c21dd3aa3b4cad36ce2431f3c8350900..8633c703546a65c2e5b0071ffca0d5a12b664884 100644 (file)
@@ -101,10 +101,8 @@ static void omap2_onenand_set_async_mode(void __iomem *onenand_base)
 
 static void set_onenand_cfg(void __iomem *onenand_base)
 {
-       u32 reg;
+       u32 reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
 
-       reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
-       reg &= ~((0x7 << ONENAND_SYS_CFG1_BRL_SHIFT) | (0x7 << 9));
        reg |=  (latency << ONENAND_SYS_CFG1_BRL_SHIFT) |
                ONENAND_SYS_CFG1_BL_16;
        if (onenand_flags & ONENAND_FLAG_SYNCREAD)
@@ -123,6 +121,7 @@ static void set_onenand_cfg(void __iomem *onenand_base)
                reg |= ONENAND_SYS_CFG1_VHF;
        else
                reg &= ~ONENAND_SYS_CFG1_VHF;
+
        writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
 }
 
@@ -289,6 +288,7 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
                }
        }
 
+       onenand_async.sync_write = true;
        omap2_onenand_calc_async_timings(&t);
 
        ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async);
index cd822d8454c0536165810004089c3a7ce5d0068d..b6c90e5006e45ae01654c3b5867c3a49910dc54e 100644 (file)
@@ -27,6 +27,7 @@ $(warning LSE atomics not supported by binutils)
 endif
 
 KBUILD_CFLAGS  += -mgeneral-regs-only $(lseinstr)
+KBUILD_CFLAGS  += $(call cc-option, -mpc-relative-literal-loads)
 KBUILD_AFLAGS  += $(lseinstr)
 
 ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
index 63f52b55defe1041a7913217db490528c441c6f3..fc9f7ef2f4ab3b5d6e314e9b2ab1b218b8edac16 100644 (file)
 /*
  * VMALLOC and SPARSEMEM_VMEMMAP ranges.
  *
- * VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array
+ * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
  *     (rounded up to PUD_SIZE).
  * VMALLOC_START: beginning of the kernel VA space
  * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
  *     fixed mappings and modules
  */
-#define VMEMMAP_SIZE           ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
+#define VMEMMAP_SIZE           ALIGN((1UL << (VA_BITS - PAGE_SHIFT - 1)) * sizeof(struct page), PUD_SIZE)
 
 #ifndef CONFIG_KASAN
 #define VMALLOC_START          (VA_START)
@@ -51,7 +51,8 @@
 
 #define VMALLOC_END            (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
 
-#define vmemmap                        ((struct page *)(VMALLOC_END + SZ_64K))
+#define VMEMMAP_START          (VMALLOC_END + SZ_64K)
+#define vmemmap                        ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
 
 #define FIRST_USER_ADDRESS     0UL
 
index d250160d32bc68ae636c804b9cdfe2499bdddcb9..3039f080e2d5820ed4446aac0871dee42ffa5ad2 100644 (file)
@@ -186,7 +186,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        u64 val;
 
        val = kvm_arm_timer_get_reg(vcpu, reg->id);
-       return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
+       return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
 }
 
 /**
index 17bf39ac83ba073109118817c3ab72346ae3824b..4cb98aa8c27b2e89fd32783d45f283e8a152d80e 100644 (file)
@@ -319,8 +319,8 @@ void __init mem_init(void)
 #endif
                  MLG(VMALLOC_START, VMALLOC_END),
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
-                 MLG((unsigned long)vmemmap,
-                     (unsigned long)vmemmap + VMEMMAP_SIZE),
+                 MLG(VMEMMAP_START,
+                     VMEMMAP_START + VMEMMAP_SIZE),
                  MLM((unsigned long)virt_to_page(PAGE_OFFSET),
                      (unsigned long)virt_to_page(high_memory)),
 #endif
index 2046c023022406d8a336197db13df1a75b1e8021..21ed7150fec3f4847aef98ac45cd56241b4f50b3 100644 (file)
@@ -33,7 +33,7 @@
 #define PAGE_SHIFT     16
 #endif
 #define PAGE_SIZE      (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK      (~(PAGE_SIZE - 1))
+#define PAGE_MASK      (~((1 << PAGE_SHIFT) - 1))
 
 /*
  * This is used for calculating the real page sizes
index 8957f15e21ec4c911e8ebe017ea8cb4ea1276ad4..18826aa15a7cec740b8e6a9eddba1e7fa82ec615 100644 (file)
@@ -353,7 +353,7 @@ static inline pte_t pte_mkdirty(pte_t pte)
 static inline pte_t pte_mkyoung(pte_t pte)
 {
        pte_val(pte) |= _PAGE_ACCESSED;
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        if (!(pte_val(pte) & _PAGE_NO_READ))
                pte_val(pte) |= _PAGE_SILENT_READ;
        else
@@ -560,7 +560,7 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
 {
        pmd_val(pmd) |= _PAGE_ACCESSED;
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        if (!(pmd_val(pmd) & _PAGE_NO_READ))
                pmd_val(pmd) |= _PAGE_SILENT_READ;
        else
index 6499d93ae68d7096d63416a349d9afcdcc0cb3ae..47bc45a67e9ba187fb709b97e755900e9bcb4caf 100644 (file)
@@ -101,10 +101,8 @@ static inline void syscall_get_arguments(struct task_struct *task,
        /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
        if ((config_enabled(CONFIG_32BIT) ||
            test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
-           (regs->regs[2] == __NR_syscall)) {
+           (regs->regs[2] == __NR_syscall))
                i++;
-               n++;
-       }
 
        while (n--)
                ret |= mips_get_syscall_arg(args++, task, regs, i++);
index 886cb1976e90f682dafac7a958043d68bc4b6473..ca9a8100748915cfb66650c6ada8b2f6c4e6c1b8 100644 (file)
@@ -690,15 +690,15 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
 asmlinkage void do_ov(struct pt_regs *regs)
 {
        enum ctx_state prev_state;
-       siginfo_t info;
+       siginfo_t info = {
+               .si_signo = SIGFPE,
+               .si_code = FPE_INTOVF,
+               .si_addr = (void __user *)regs->cp0_epc,
+       };
 
        prev_state = exception_enter();
        die_if_kernel("Integer overflow", regs);
 
-       info.si_code = FPE_INTOVF;
-       info.si_signo = SIGFPE;
-       info.si_errno = 0;
-       info.si_addr = (void __user *) regs->cp0_epc;
        force_sig_info(SIGFPE, &info, current);
        exception_exit(prev_state);
 }
@@ -874,7 +874,7 @@ out:
 void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
        const char *str)
 {
-       siginfo_t info;
+       siginfo_t info = { 0 };
        char b[40];
 
 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
@@ -903,7 +903,6 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
                else
                        info.si_code = FPE_INTOVF;
                info.si_signo = SIGFPE;
-               info.si_errno = 0;
                info.si_addr = (void __user *) regs->cp0_epc;
                force_sig_info(SIGFPE, &info, current);
                break;
index b9b803facdbf7594dc700ca828a2ced604e617aa..2683d04fdda5fb7def1849618814f0410933423f 100644 (file)
@@ -702,7 +702,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
        } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
                void __user *uaddr = (void __user *)(long)reg->addr;
 
-               return copy_to_user(uaddr, vs, 16);
+               return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
        } else {
                return -EINVAL;
        }
@@ -732,7 +732,7 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
        } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
                void __user *uaddr = (void __user *)(long)reg->addr;
 
-               return copy_from_user(vs, uaddr, 16);
+               return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
        } else {
                return -EINVAL;
        }
index bf9f1a77f0e59825fcdb6ced3ce73ca78d8c7c19..a2631a52ca998c0029b27cec6fa85bac9d0782d0 100644 (file)
@@ -13,6 +13,9 @@
 #define SMBUS_PCI_REG64                0x64
 #define SMBUS_PCI_REGB4                0xb4
 
+#define HPET_MIN_CYCLES                64
+#define HPET_MIN_PROG_DELTA    (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
+
 static DEFINE_SPINLOCK(hpet_lock);
 DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
 
@@ -161,8 +164,9 @@ static int hpet_next_event(unsigned long delta,
        cnt += delta;
        hpet_write(HPET_T0_CMP, cnt);
 
-       res = ((int)(hpet_read(HPET_COUNTER) - cnt) > 0) ? -ETIME : 0;
-       return res;
+       res = (int)(cnt - hpet_read(HPET_COUNTER));
+
+       return res < HPET_MIN_CYCLES ? -ETIME : 0;
 }
 
 static irqreturn_t hpet_irq_handler(int irq, void *data)
@@ -237,7 +241,7 @@ void __init setup_hpet_timer(void)
        cd->cpumask = cpumask_of(cpu);
        clockevent_set_clock(cd, HPET_FREQ);
        cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
-       cd->min_delta_ns = 5000;
+       cd->min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA, cd);
 
        clockevents_register_device(cd);
        setup_irq(HPET_T0_IRQ, &hpet_irq);
index 1a4738a8f2d3906ccffb58bdf8d9b35ee4b04ef3..509832a9836c9ae70f52aa422ab70f85d7f8971c 100644 (file)
 #include "smp.h"
 
 DEFINE_PER_CPU(int, cpu_state);
-DEFINE_PER_CPU(uint32_t, core0_c0count);
 
 static void *ipi_set0_regs[16];
 static void *ipi_clear0_regs[16];
 static void *ipi_status0_regs[16];
 static void *ipi_en0_regs[16];
 static void *ipi_mailbox_buf[16];
+static uint32_t core0_c0count[NR_CPUS];
 
 /* read a 32bit value from ipi register */
 #define loongson3_ipi_read32(addr) readl(addr)
@@ -275,12 +275,14 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
        if (action & SMP_ASK_C0COUNT) {
                BUG_ON(cpu != 0);
                c0count = read_c0_count();
-               for (i = 1; i < num_possible_cpus(); i++)
-                       per_cpu(core0_c0count, i) = c0count;
+               c0count = c0count ? c0count : 1;
+               for (i = 1; i < nr_cpu_ids; i++)
+                       core0_c0count[i] = c0count;
+               __wbflush(); /* Let others see the result ASAP */
        }
 }
 
-#define MAX_LOOPS 1111
+#define MAX_LOOPS 800
 /*
  * SMP init and finish on secondary CPUs
  */
@@ -305,16 +307,20 @@ static void loongson3_init_secondary(void)
                cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
 
        i = 0;
-       __this_cpu_write(core0_c0count, 0);
+       core0_c0count[cpu] = 0;
        loongson3_send_ipi_single(0, SMP_ASK_C0COUNT);
-       while (!__this_cpu_read(core0_c0count)) {
+       while (!core0_c0count[cpu]) {
                i++;
                cpu_relax();
        }
 
        if (i > MAX_LOOPS)
                i = MAX_LOOPS;
-       initcount = __this_cpu_read(core0_c0count) + i;
+       if (cpu_data[cpu].package)
+               initcount = core0_c0count[cpu] + i;
+       else /* Local access is faster for loops */
+               initcount = core0_c0count[cpu] + i/2;
+
        write_c0_count(initcount);
 }
 
index 3bd0597d9c3da3a56ea6ba083ca05c67bd71c5e7..ddb8154610cc67253cd87c308ab9d6e47962d19c 100644 (file)
@@ -164,11 +164,13 @@ static int __init mips_sc_probe_cm3(void)
 
        sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK;
        sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF;
-       c->scache.sets = 64 << sets;
+       if (sets)
+               c->scache.sets = 64 << sets;
 
        line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK;
        line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF;
-       c->scache.linesz = 2 << line_sz;
+       if (line_sz)
+               c->scache.linesz = 2 << line_sz;
 
        assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK;
        assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF;
@@ -176,9 +178,12 @@ static int __init mips_sc_probe_cm3(void)
        c->scache.waysize = c->scache.sets * c->scache.linesz;
        c->scache.waybit = __ffs(c->scache.waysize);
 
-       c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
+       if (c->scache.linesz) {
+               c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
+               return 1;
+       }
 
-       return 1;
+       return 0;
 }
 
 void __weak platform_early_l2_init(void)
index 32e0be27673fefbeca6839929e61a581c8980902..29f73e00253d94fbd298ff2516fca23d17edddda 100644 (file)
@@ -242,7 +242,7 @@ static void output_pgtable_bits_defines(void)
        pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
        pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
 #endif
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        if (cpu_has_rixi) {
 #ifdef _PAGE_NO_EXEC_SHIFT
                pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
index 9585c81f755fc667370ed850cba16ee1ff91024c..ce0b2b4075c704f9ee2006c263b1ba1cf3e9afd1 100644 (file)
@@ -269,14 +269,19 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 
 long do_syscall_trace_enter(struct pt_regs *regs)
 {
-       long ret = 0;
-
        /* Do the secure computing check first. */
        secure_computing_strict(regs->gr[20]);
 
        if (test_thread_flag(TIF_SYSCALL_TRACE) &&
-           tracehook_report_syscall_entry(regs))
-               ret = -1L;
+           tracehook_report_syscall_entry(regs)) {
+               /*
+                * Tracing decided this syscall should not happen or the
+                * debugger stored an invalid system call number. Skip
+                * the system call and the system call restart handling.
+                */
+               regs->gr[20] = -1UL;
+               goto out;
+       }
 
 #ifdef CONFIG_64BIT
        if (!is_compat_task())
@@ -290,7 +295,8 @@ long do_syscall_trace_enter(struct pt_regs *regs)
                        regs->gr[24] & 0xffffffff,
                        regs->gr[23] & 0xffffffff);
 
-       return ret ? : regs->gr[20];
+out:
+       return regs->gr[20];
 }
 
 void do_syscall_trace_exit(struct pt_regs *regs)
index 3fbd7252a4b27efd106250f5b21e98910f8af8bd..fbafa0d0e2bf865db726d15f9c4beb4817afe86c 100644 (file)
@@ -343,7 +343,7 @@ tracesys_next:
 #endif
 
        comiclr,>>=     __NR_Linux_syscalls, %r20, %r0
-       b,n     .Lsyscall_nosys
+       b,n     .Ltracesys_nosys
 
        LDREGX  %r20(%r19), %r19
 
@@ -359,6 +359,9 @@ tracesys_next:
        be      0(%sr7,%r19)
        ldo     R%tracesys_exit(%r2),%r2
 
+.Ltracesys_nosys:
+       ldo     -ENOSYS(%r0),%r28               /* set errno */
+
        /* Do *not* call this function on the gateway page, because it
        makes a direct call to syscall_trace. */
        
index f69ecaa7ce338a0cc2d31ae5555024c5f8257083..52c1e273f8cd5d5d641e1f2bc7591ff8b0efdbd9 100644 (file)
@@ -418,8 +418,7 @@ static void *eeh_rmv_device(void *data, void *userdata)
                eeh_pcid_put(dev);
                if (driver->err_handler &&
                    driver->err_handler->error_detected &&
-                   driver->err_handler->slot_reset &&
-                   driver->err_handler->resume)
+                   driver->err_handler->slot_reset)
                        return NULL;
        }
 
index 2559b16da525e63427f7c82dbd1e3f2d693c7d12..17d9dcd29d45c342cce966303f2fbcc61198fcb6 100644 (file)
@@ -48,6 +48,7 @@ static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
 static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
 {
        fpregs->pad = 0;
+       fpregs->fpc = fpu->fpc;
        if (MACHINE_HAS_VX)
                convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
        else
@@ -57,6 +58,7 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
 
 static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
 {
+       fpu->fpc = fpregs->fpc;
        if (MACHINE_HAS_VX)
                convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
        else
index efaac2c3bb77a028a748de19820cd40ee528c0a0..e9a983f40a24d30b2d598ce4bcd932006efbf81f 100644 (file)
@@ -506,7 +506,6 @@ struct kvm_vcpu_arch {
        struct kvm_s390_sie_block *sie_block;
        unsigned int      host_acrs[NUM_ACRS];
        struct fpu        host_fpregs;
-       struct fpu        guest_fpregs;
        struct kvm_s390_local_interrupt local_int;
        struct hrtimer    ckc_timer;
        struct kvm_s390_pgm_info pgm;
index 9cd248f637c7a7bb8a1666df32499858d91e31c1..dc6c9c60454306b4d42ce37969432836f2b7c872 100644 (file)
@@ -181,6 +181,7 @@ int main(void)
        OFFSET(__LC_PSW_SAVE_AREA, _lowcore, psw_save_area);
        OFFSET(__LC_PREFIX_SAVE_AREA, _lowcore, prefixreg_save_area);
        OFFSET(__LC_FP_CREG_SAVE_AREA, _lowcore, fpt_creg_save_area);
+       OFFSET(__LC_TOD_PROGREG_SAVE_AREA, _lowcore, tod_progreg_save_area);
        OFFSET(__LC_CPU_TIMER_SAVE_AREA, _lowcore, cpu_timer_save_area);
        OFFSET(__LC_CLOCK_COMP_SAVE_AREA, _lowcore, clock_comp_save_area);
        OFFSET(__LC_AREGS_SAVE_AREA, _lowcore, access_regs_save_area);
index 66c94417c0ba09a471244a629c78d2457f38b17d..4af60374eba01fba89dc8b170a41428b094160b5 100644 (file)
@@ -271,7 +271,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
 
        /* Restore high gprs from signal stack */
        if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
-                            sizeof(&sregs_ext->gprs_high)))
+                            sizeof(sregs_ext->gprs_high)))
                return -EFAULT;
        for (i = 0; i < NUM_GPRS; i++)
                *(__u32 *)&regs->gprs[i] = gprs_high[i];
index 846589281b046f414837cbd3f7ab9f4d0c675a51..a08d0afd5ff6e8a99f888431c784a0f3b8d85123 100644 (file)
@@ -1268,44 +1268,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-/*
- * Backs up the current FP/VX register save area on a particular
- * destination.  Used to switch between different register save
- * areas.
- */
-static inline void save_fpu_to(struct fpu *dst)
-{
-       dst->fpc = current->thread.fpu.fpc;
-       dst->regs = current->thread.fpu.regs;
-}
-
-/*
- * Switches the FP/VX register save area from which to lazy
- * restore register contents.
- */
-static inline void load_fpu_from(struct fpu *from)
-{
-       current->thread.fpu.fpc = from->fpc;
-       current->thread.fpu.regs = from->regs;
-}
-
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        /* Save host register state */
        save_fpu_regs();
-       save_fpu_to(&vcpu->arch.host_fpregs);
-
-       if (test_kvm_facility(vcpu->kvm, 129)) {
-               current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
-               /*
-                * Use the register save area in the SIE-control block
-                * for register restore and save in kvm_arch_vcpu_put()
-                */
-               current->thread.fpu.vxrs =
-                       (__vector128 *)&vcpu->run->s.regs.vrs;
-       } else
-               load_fpu_from(&vcpu->arch.guest_fpregs);
+       vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
+       vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
 
+       /* Depending on MACHINE_HAS_VX, data stored to vrs either
+        * has vector register or floating point register format.
+        */
+       current->thread.fpu.regs = vcpu->run->s.regs.vrs;
+       current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
        if (test_fp_ctl(current->thread.fpu.fpc))
                /* User space provided an invalid FPC, let's clear it */
                current->thread.fpu.fpc = 0;
@@ -1321,19 +1295,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
        atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
        gmap_disable(vcpu->arch.gmap);
 
+       /* Save guest register state */
        save_fpu_regs();
+       vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
 
-       if (test_kvm_facility(vcpu->kvm, 129))
-               /*
-                * kvm_arch_vcpu_load() set up the register save area to
-                * the &vcpu->run->s.regs.vrs and, thus, the vector registers
-                * are already saved.  Only the floating-point control must be
-                * copied.
-                */
-               vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
-       else
-               save_fpu_to(&vcpu->arch.guest_fpregs);
-       load_fpu_from(&vcpu->arch.host_fpregs);
+       /* Restore host register state */
+       current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
+       current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
 
        save_access_regs(vcpu->run->s.regs.acrs);
        restore_access_regs(vcpu->arch.host_acrs);
@@ -1351,8 +1319,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
        memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
        vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
        vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
-       vcpu->arch.guest_fpregs.fpc = 0;
-       asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
+       /* make sure the new fpc will be lazily loaded */
+       save_fpu_regs();
+       current->thread.fpu.fpc = 0;
        vcpu->arch.sie_block->gbea = 1;
        vcpu->arch.sie_block->pp = 0;
        vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
@@ -1501,19 +1470,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
        vcpu->arch.local_int.wq = &vcpu->wq;
        vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
 
-       /*
-        * Allocate a save area for floating-point registers.  If the vector
-        * extension is available, register contents are saved in the SIE
-        * control block.  The allocated save area is still required in
-        * particular places, for example, in kvm_s390_vcpu_store_status().
-        */
-       vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
-                                              GFP_KERNEL);
-       if (!vcpu->arch.guest_fpregs.fprs) {
-               rc = -ENOMEM;
-               goto out_free_sie_block;
-       }
-
        rc = kvm_vcpu_init(vcpu, kvm, id);
        if (rc)
                goto out_free_sie_block;
@@ -1734,19 +1690,27 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 
 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
+       /* make sure the new values will be lazily loaded */
+       save_fpu_regs();
        if (test_fp_ctl(fpu->fpc))
                return -EINVAL;
-       memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
-       vcpu->arch.guest_fpregs.fpc = fpu->fpc;
-       save_fpu_regs();
-       load_fpu_from(&vcpu->arch.guest_fpregs);
+       current->thread.fpu.fpc = fpu->fpc;
+       if (MACHINE_HAS_VX)
+               convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
+       else
+               memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
        return 0;
 }
 
 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
-       memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
-       fpu->fpc = vcpu->arch.guest_fpregs.fpc;
+       /* make sure we have the latest values */
+       save_fpu_regs();
+       if (MACHINE_HAS_VX)
+               convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
+       else
+               memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
+       fpu->fpc = current->thread.fpu.fpc;
        return 0;
 }
 
@@ -2266,41 +2230,50 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
 {
        unsigned char archmode = 1;
+       freg_t fprs[NUM_FPRS];
        unsigned int px;
        u64 clkcomp;
        int rc;
 
+       px = kvm_s390_get_prefix(vcpu);
        if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
                if (write_guest_abs(vcpu, 163, &archmode, 1))
                        return -EFAULT;
-               gpa = SAVE_AREA_BASE;
+               gpa = 0;
        } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
                if (write_guest_real(vcpu, 163, &archmode, 1))
                        return -EFAULT;
-               gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
+               gpa = px;
+       } else
+               gpa -= __LC_FPREGS_SAVE_AREA;
+
+       /* manually convert vector registers if necessary */
+       if (MACHINE_HAS_VX) {
+               convert_vx_to_fp(fprs, current->thread.fpu.vxrs);
+               rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
+                                    fprs, 128);
+       } else {
+               rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
+                                    vcpu->run->s.regs.vrs, 128);
        }
-       rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
-                            vcpu->arch.guest_fpregs.fprs, 128);
-       rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
+       rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
                              vcpu->run->s.regs.gprs, 128);
-       rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
+       rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
                              &vcpu->arch.sie_block->gpsw, 16);
-       px = kvm_s390_get_prefix(vcpu);
-       rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
+       rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
                              &px, 4);
-       rc |= write_guest_abs(vcpu,
-                             gpa + offsetof(struct save_area, fp_ctrl_reg),
-                             &vcpu->arch.guest_fpregs.fpc, 4);
-       rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
+       rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
+                             &vcpu->run->s.regs.fpc, 4);
+       rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
                              &vcpu->arch.sie_block->todpr, 4);
-       rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
+       rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
                              &vcpu->arch.sie_block->cputm, 8);
        clkcomp = vcpu->arch.sie_block->ckc >> 8;
-       rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
+       rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
                              &clkcomp, 8);
-       rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
+       rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
                              &vcpu->run->s.regs.acrs, 64);
-       rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
+       rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
                              &vcpu->arch.sie_block->gcr, 128);
        return rc ? -EFAULT : 0;
 }
@@ -2313,19 +2286,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
         * it into the save area
         */
        save_fpu_regs();
-       if (test_kvm_facility(vcpu->kvm, 129)) {
-               /*
-                * If the vector extension is available, the vector registers
-                * which overlaps with floating-point registers are saved in
-                * the SIE-control block.  Hence, extract the floating-point
-                * registers and the FPC value and store them in the
-                * guest_fpregs structure.
-                */
-               vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc;
-               convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs,
-                                current->thread.fpu.vxrs);
-       } else
-               save_fpu_to(&vcpu->arch.guest_fpregs);
+       vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
        save_access_regs(vcpu->run->s.regs.acrs);
 
        return kvm_s390_store_status_unloaded(vcpu, addr);
index 4d1ee88864e8a09da7b567896a4adc1d949605a4..18c8b819b0aa96fa6cabfad631a38e02fd59dbbf 100644 (file)
@@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start,
        int i;
 
        /* Normalize entries to being relative to the start of the section */
-       for (p = start, i = 0; p < finish; p++, i += 8)
+       for (p = start, i = 0; p < finish; p++, i += 8) {
                p->insn += i;
+               p->fixup += i + 4;
+       }
        sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
        /* Denormalize all entries */
-       for (p = start, i = 0; p < finish; p++, i += 8)
+       for (p = start, i = 0; p < finish; p++, i += 8) {
                p->insn -= i;
+               p->fixup -= i + 4;
+       }
 }
 
 #ifdef CONFIG_MODULES
index 30e7ddb27a3a966e74e5ab79ad686bab26eab459..c690c8e16a96ef2758fca4e9af8080ec7af6c17a 100644 (file)
@@ -413,7 +413,7 @@ out:
 
 SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
 {
-       int ret;
+       long ret;
 
        if (personality(current->personality) == PER_LINUX32 &&
            personality(personality) == PER_LINUX)
index 47f1ff056a54fdc36657728cc3fe99dc5a7f9be0..22a358ef1b0cdaf227d5c768bc5670672fa45554 100644 (file)
@@ -94,6 +94,8 @@ static int start_ptraced_child(void)
 {
        int pid, n, status;
 
+       fflush(stdout);
+
        pid = fork();
        if (pid == 0)
                ptrace_child();
index 6a1ae3751e824d9917e65c136e9f7de3cc5f4f47..15cfebaa7688ca879a6e4d06dbb3d088b1ee50a9 100644 (file)
@@ -267,6 +267,7 @@ ENTRY(entry_INT80_compat)
         * Interrupts are off on entry.
         */
        PARAVIRT_ADJUST_EXCEPTION_FRAME
+       ASM_CLAC                        /* Do this early to minimize exposure */
        SWAPGS
 
        /*
index 881b4768644aad3537364286e468146aba73ad56..e7de5c9a4fbdd2c5d99b78d82d2c58019d20dc04 100644 (file)
@@ -23,11 +23,13 @@ extern void irq_ctx_init(int cpu);
 
 #define __ARCH_HAS_DO_SOFTIRQ
 
+struct irq_desc;
+
 #ifdef CONFIG_HOTPLUG_CPU
 #include <linux/cpumask.h>
 extern int check_irq_vectors_for_cpu_disable(void);
 extern void fixup_irqs(void);
-extern void irq_force_complete_move(int);
+extern void irq_force_complete_move(struct irq_desc *desc);
 #endif
 
 #ifdef CONFIG_HAVE_KVM
@@ -37,7 +39,6 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
 extern void (*x86_platform_ipi_callback)(void);
 extern void native_init_IRQ(void);
 
-struct irq_desc;
 extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
 
 extern __visible unsigned int do_IRQ(struct pt_regs *regs);
index d1daead5fcddd57ab4cd0675315420bf52608584..adb3eaf8fe2a5e038c2444bd8f75b55716faf607 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/cacheflush.h>
 #include <asm/realmode.h>
 
+#include <linux/ftrace.h>
 #include "../../realmode/rm/wakeup.h"
 #include "sleep.h"
 
@@ -107,7 +108,13 @@ int x86_acpi_suspend_lowlevel(void)
        saved_magic = 0x123456789abcdef0L;
 #endif /* CONFIG_64BIT */
 
+       /*
+        * Pause/unpause graph tracing around do_suspend_lowlevel as it has
+        * inconsistent call/return info after it jumps to the wakeup vector.
+        */
+       pause_graph_tracing();
        do_suspend_lowlevel();
+       unpause_graph_tracing();
        return 0;
 }
 
index f25321894ad294278c20d5ba7b4076ce2353a8fb..fdb0fbfb1197a4cf4e485c4f330b3a399b3a6d5f 100644 (file)
@@ -2521,6 +2521,7 @@ void __init setup_ioapic_dest(void)
 {
        int pin, ioapic, irq, irq_entry;
        const struct cpumask *mask;
+       struct irq_desc *desc;
        struct irq_data *idata;
        struct irq_chip *chip;
 
@@ -2536,7 +2537,9 @@ void __init setup_ioapic_dest(void)
                if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq))
                        continue;
 
-               idata = irq_get_irq_data(irq);
+               desc = irq_to_desc(irq);
+               raw_spin_lock_irq(&desc->lock);
+               idata = irq_desc_get_irq_data(desc);
 
                /*
                 * Honour affinities which have been set in early boot
@@ -2550,6 +2553,7 @@ void __init setup_ioapic_dest(void)
                /* Might be lapic_chip for irq 0 */
                if (chip->irq_set_affinity)
                        chip->irq_set_affinity(idata, mask, false);
+               raw_spin_unlock_irq(&desc->lock);
        }
 }
 #endif
index 861bc59c8f2564eac7b87193e7cd250571e1fbf4..a35f6b5473f48d8833bd549a80c799260df3dc6a 100644 (file)
@@ -30,7 +30,7 @@ struct apic_chip_data {
 
 struct irq_domain *x86_vector_domain;
 static DEFINE_RAW_SPINLOCK(vector_lock);
-static cpumask_var_t vector_cpumask;
+static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
 static struct irq_chip lapic_controller;
 #ifdef CONFIG_X86_IO_APIC
 static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
@@ -116,35 +116,47 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
         */
        static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
        static int current_offset = VECTOR_OFFSET_START % 16;
-       int cpu, err;
+       int cpu, vector;
 
-       if (d->move_in_progress)
+       /*
+        * If there is still a move in progress or the previous move has not
+        * been cleaned up completely, tell the caller to come back later.
+        */
+       if (d->move_in_progress ||
+           cpumask_intersects(d->old_domain, cpu_online_mask))
                return -EBUSY;
 
        /* Only try and allocate irqs on cpus that are present */
-       err = -ENOSPC;
        cpumask_clear(d->old_domain);
+       cpumask_clear(searched_cpumask);
        cpu = cpumask_first_and(mask, cpu_online_mask);
        while (cpu < nr_cpu_ids) {
-               int new_cpu, vector, offset;
+               int new_cpu, offset;
 
+               /* Get the possible target cpus for @mask/@cpu from the apic */
                apic->vector_allocation_domain(cpu, vector_cpumask, mask);
 
+               /*
+                * Clear the offline cpus from @vector_cpumask for searching
+                * and verify whether the result overlaps with @mask. If true,
+                * then the call to apic->cpu_mask_to_apicid_and() will
+                * succeed as well. If not, no point in trying to find a
+                * vector in this mask.
+                */
+               cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
+               if (!cpumask_intersects(vector_searchmask, mask))
+                       goto next_cpu;
+
                if (cpumask_subset(vector_cpumask, d->domain)) {
-                       err = 0;
                        if (cpumask_equal(vector_cpumask, d->domain))
-                               break;
+                               goto success;
                        /*
-                        * New cpumask using the vector is a proper subset of
-                        * the current in use mask. So cleanup the vector
-                        * allocation for the members that are not used anymore.
+                        * Mark the cpus which are not longer in the mask for
+                        * cleanup.
                         */
-                       cpumask_andnot(d->old_domain, d->domain,
-                                      vector_cpumask);
-                       d->move_in_progress =
-                          cpumask_intersects(d->old_domain, cpu_online_mask);
-                       cpumask_and(d->domain, d->domain, vector_cpumask);
-                       break;
+                       cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
+                       vector = d->cfg.vector;
+                       goto update;
                }
 
                vector = current_vector;
@@ -156,45 +168,60 @@ next:
                        vector = FIRST_EXTERNAL_VECTOR + offset;
                }
 
-               if (unlikely(current_vector == vector)) {
-                       cpumask_or(d->old_domain, d->old_domain,
-                                  vector_cpumask);
-                       cpumask_andnot(vector_cpumask, mask, d->old_domain);
-                       cpu = cpumask_first_and(vector_cpumask,
-                                               cpu_online_mask);
-                       continue;
-               }
+               /* If the search wrapped around, try the next cpu */
+               if (unlikely(current_vector == vector))
+                       goto next_cpu;
 
                if (test_bit(vector, used_vectors))
                        goto next;
 
-               for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
+               for_each_cpu(new_cpu, vector_searchmask) {
                        if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
                                goto next;
                }
                /* Found one! */
                current_vector = vector;
                current_offset = offset;
-               if (d->cfg.vector) {
+               /* Schedule the old vector for cleanup on all cpus */
+               if (d->cfg.vector)
                        cpumask_copy(d->old_domain, d->domain);
-                       d->move_in_progress =
-                          cpumask_intersects(d->old_domain, cpu_online_mask);
-               }
-               for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
+               for_each_cpu(new_cpu, vector_searchmask)
                        per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
-               d->cfg.vector = vector;
-               cpumask_copy(d->domain, vector_cpumask);
-               err = 0;
-               break;
-       }
+               goto update;
 
-       if (!err) {
-               /* cache destination APIC IDs into cfg->dest_apicid */
-               err = apic->cpu_mask_to_apicid_and(mask, d->domain,
-                                                  &d->cfg.dest_apicid);
+next_cpu:
+               /*
+                * We exclude the current @vector_cpumask from the requested
+                * @mask and try again with the next online cpu in the
+                * result. We cannot modify @mask, so we use @vector_cpumask
+                * as a temporary buffer here as it will be reassigned when
+                * calling apic->vector_allocation_domain() above.
+                */
+               cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask);
+               cpumask_andnot(vector_cpumask, mask, searched_cpumask);
+               cpu = cpumask_first_and(vector_cpumask, cpu_online_mask);
+               continue;
        }
+       return -ENOSPC;
 
-       return err;
+update:
+       /*
+        * Exclude offline cpus from the cleanup mask and set the
+        * move_in_progress flag when the result is not empty.
+        */
+       cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
+       d->move_in_progress = !cpumask_empty(d->old_domain);
+       d->cfg.vector = vector;
+       cpumask_copy(d->domain, vector_cpumask);
+success:
+       /*
+        * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
+        * as we already established, that mask & d->domain & cpu_online_mask
+        * is not empty.
+        */
+       BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
+                                           &d->cfg.dest_apicid));
+       return 0;
 }
 
 static int assign_irq_vector(int irq, struct apic_chip_data *data,
@@ -224,10 +251,8 @@ static int assign_irq_vector_policy(int irq, int node,
 static void clear_irq_vector(int irq, struct apic_chip_data *data)
 {
        struct irq_desc *desc;
-       unsigned long flags;
        int cpu, vector;
 
-       raw_spin_lock_irqsave(&vector_lock, flags);
        BUG_ON(!data->cfg.vector);
 
        vector = data->cfg.vector;
@@ -237,10 +262,13 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
        data->cfg.vector = 0;
        cpumask_clear(data->domain);
 
-       if (likely(!data->move_in_progress)) {
-               raw_spin_unlock_irqrestore(&vector_lock, flags);
+       /*
+        * If move is in progress or the old_domain mask is not empty,
+        * i.e. the cleanup IPI has not been processed yet, we need to remove
+        * the old references to desc from all cpus vector tables.
+        */
+       if (!data->move_in_progress && cpumask_empty(data->old_domain))
                return;
-       }
 
        desc = irq_to_desc(irq);
        for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
@@ -253,7 +281,6 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
                }
        }
        data->move_in_progress = 0;
-       raw_spin_unlock_irqrestore(&vector_lock, flags);
 }
 
 void init_irq_alloc_info(struct irq_alloc_info *info,
@@ -274,19 +301,24 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
 static void x86_vector_free_irqs(struct irq_domain *domain,
                                 unsigned int virq, unsigned int nr_irqs)
 {
+       struct apic_chip_data *apic_data;
        struct irq_data *irq_data;
+       unsigned long flags;
        int i;
 
        for (i = 0; i < nr_irqs; i++) {
                irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
                if (irq_data && irq_data->chip_data) {
+                       raw_spin_lock_irqsave(&vector_lock, flags);
                        clear_irq_vector(virq + i, irq_data->chip_data);
-                       free_apic_chip_data(irq_data->chip_data);
+                       apic_data = irq_data->chip_data;
+                       irq_domain_reset_irq_data(irq_data);
+                       raw_spin_unlock_irqrestore(&vector_lock, flags);
+                       free_apic_chip_data(apic_data);
 #ifdef CONFIG_X86_IO_APIC
                        if (virq + i < nr_legacy_irqs())
                                legacy_irq_data[virq + i] = NULL;
 #endif
-                       irq_domain_reset_irq_data(irq_data);
                }
        }
 }
@@ -404,6 +436,8 @@ int __init arch_early_irq_init(void)
        arch_init_htirq_domain(x86_vector_domain);
 
        BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
+       BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
+       BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
 
        return arch_early_ioapic_init();
 }
@@ -492,14 +526,7 @@ static int apic_set_affinity(struct irq_data *irq_data,
                return -EINVAL;
 
        err = assign_irq_vector(irq, data, dest);
-       if (err) {
-               if (assign_irq_vector(irq, data,
-                                     irq_data_get_affinity_mask(irq_data)))
-                       pr_err("Failed to recover vector for irq %d\n", irq);
-               return err;
-       }
-
-       return IRQ_SET_MASK_OK;
+       return err ? err : IRQ_SET_MASK_OK;
 }
 
 static struct irq_chip lapic_controller = {
@@ -511,20 +538,12 @@ static struct irq_chip lapic_controller = {
 #ifdef CONFIG_SMP
 static void __send_cleanup_vector(struct apic_chip_data *data)
 {
-       cpumask_var_t cleanup_mask;
-
-       if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
-               unsigned int i;
-
-               for_each_cpu_and(i, data->old_domain, cpu_online_mask)
-                       apic->send_IPI_mask(cpumask_of(i),
-                                           IRQ_MOVE_CLEANUP_VECTOR);
-       } else {
-               cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask);
-               apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
-               free_cpumask_var(cleanup_mask);
-       }
+       raw_spin_lock(&vector_lock);
+       cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
        data->move_in_progress = 0;
+       if (!cpumask_empty(data->old_domain))
+               apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR);
+       raw_spin_unlock(&vector_lock);
 }
 
 void send_cleanup_vector(struct irq_cfg *cfg)
@@ -568,12 +587,25 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
                        goto unlock;
 
                /*
-                * Check if the irq migration is in progress. If so, we
-                * haven't received the cleanup request yet for this irq.
+                * Nothing to cleanup if irq migration is in progress
+                * or this cpu is not set in the cleanup mask.
                 */
-               if (data->move_in_progress)
+               if (data->move_in_progress ||
+                   !cpumask_test_cpu(me, data->old_domain))
                        goto unlock;
 
+               /*
+                * We have two cases to handle here:
+                * 1) vector is unchanged but the target mask got reduced
+                * 2) vector and the target mask has changed
+                *
+                * #1 is obvious, but in #2 we have two vectors with the same
+                * irq descriptor: the old and the new vector. So we need to
+                * make sure that we only cleanup the old vector. The new
+                * vector has the current @vector number in the config and
+                * this cpu is part of the target mask. We better leave that
+                * one alone.
+                */
                if (vector == data->cfg.vector &&
                    cpumask_test_cpu(me, data->domain))
                        goto unlock;
@@ -591,6 +623,7 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
                        goto unlock;
                }
                __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
+               cpumask_clear_cpu(me, data->old_domain);
 unlock:
                raw_spin_unlock(&desc->lock);
        }
@@ -619,12 +652,48 @@ void irq_complete_move(struct irq_cfg *cfg)
        __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
 }
 
-void irq_force_complete_move(int irq)
+/*
+ * Called with @desc->lock held and interrupts disabled.
+ */
+void irq_force_complete_move(struct irq_desc *desc)
 {
-       struct irq_cfg *cfg = irq_cfg(irq);
+       struct irq_data *irqdata = irq_desc_get_irq_data(desc);
+       struct apic_chip_data *data = apic_chip_data(irqdata);
+       struct irq_cfg *cfg = data ? &data->cfg : NULL;
 
-       if (cfg)
-               __irq_complete_move(cfg, cfg->vector);
+       if (!cfg)
+               return;
+
+       __irq_complete_move(cfg, cfg->vector);
+
+       /*
+        * This is tricky. If the cleanup of @data->old_domain has not been
+        * done yet, then the following setaffinity call will fail with
+        * -EBUSY. This can leave the interrupt in a stale state.
+        *
+        * The cleanup cannot make progress because we hold @desc->lock. So in
+        * case @data->old_domain is not yet cleaned up, we need to drop the
+        * lock and acquire it again. @desc cannot go away, because the
+        * hotplug code holds the sparse irq lock.
+        */
+       raw_spin_lock(&vector_lock);
+       /* Clean out all offline cpus (including ourself) first. */
+       cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
+       while (!cpumask_empty(data->old_domain)) {
+               raw_spin_unlock(&vector_lock);
+               raw_spin_unlock(&desc->lock);
+               cpu_relax();
+               raw_spin_lock(&desc->lock);
+               /*
+                * Reevaluate apic_chip_data. It might have been cleared after
+                * we dropped @desc->lock.
+                */
+               data = apic_chip_data(irqdata);
+               if (!data)
+                       return;
+               raw_spin_lock(&vector_lock);
+       }
+       raw_spin_unlock(&vector_lock);
 }
 #endif
 
index f8062aaf5df9c830ec287f6774ad270e13d34b32..61521dc19c102114e177cbc21a4f5da9d94c20cd 100644 (file)
@@ -462,7 +462,7 @@ void fixup_irqs(void)
                 * non intr-remapping case, we can't wait till this interrupt
                 * arrives at this cpu before completing the irq move.
                 */
-               irq_force_complete_move(irq);
+               irq_force_complete_move(desc);
 
                if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
                        break_affinity = 1;
@@ -470,6 +470,15 @@ void fixup_irqs(void)
                }
 
                chip = irq_data_get_irq_chip(data);
+               /*
+                * The interrupt descriptor might have been cleaned up
+                * already, but it is not yet removed from the radix tree
+                */
+               if (!chip) {
+                       raw_spin_unlock(&desc->lock);
+                       continue;
+               }
+
                if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
                        chip->irq_mask(data);
 
index 1505587d06e97826703b21aba540ea8403bd8f2c..b9b09fec173bf2fedb0b9ee122b3c4668eb6e471 100644 (file)
@@ -650,10 +650,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
        u16 sel;
 
        la = seg_base(ctxt, addr.seg) + addr.ea;
-       *linear = la;
        *max_size = 0;
        switch (mode) {
        case X86EMUL_MODE_PROT64:
+               *linear = la;
                if (is_noncanonical_address(la))
                        goto bad;
 
@@ -662,6 +662,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
                        goto bad;
                break;
        default:
+               *linear = la = (u32)la;
                usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
                                                addr.seg);
                if (!usable)
@@ -689,7 +690,6 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
                        if (size > *max_size)
                                goto bad;
                }
-               la &= (u32)-1;
                break;
        }
        if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
index 3058a22a658d25db2bb8ecf4a37657bc1f873c1d..7be8a251363eaa59d12ba2c7878c1e1b43058d42 100644 (file)
@@ -249,7 +249,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
                        return ret;
 
                kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
-               walker->ptes[level] = pte;
+               walker->ptes[level - 1] = pte;
        }
        return 0;
 }
index 10e7693b3540708076f366529b296ac8b390292d..5fd846cd6e0e92b983d50bcfbfa0c82b5aea1224 100644 (file)
@@ -595,6 +595,8 @@ struct vcpu_vmx {
        /* Support for PML */
 #define PML_ENTITY_NUM         512
        struct page *pml_pg;
+
+       u64 current_tsc_ratio;
 };
 
 enum segment_cache_field {
@@ -2062,14 +2064,16 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
                vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
 
-               /* Setup TSC multiplier */
-               if (cpu_has_vmx_tsc_scaling())
-                       vmcs_write64(TSC_MULTIPLIER,
-                                    vcpu->arch.tsc_scaling_ratio);
-
                vmx->loaded_vmcs->cpu = cpu;
        }
 
+       /* Setup TSC multiplier */
+       if (kvm_has_tsc_control &&
+           vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) {
+               vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio;
+               vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
+       }
+
        vmx_vcpu_pi_load(vcpu, cpu);
 }
 
index 9a2ed890451339f2882be52fc1e741417c3299a7..d2945024ed3378bd9e12ee35fc541c86d10b2963 100644 (file)
@@ -6544,12 +6544,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
         * KVM_DEBUGREG_WONT_EXIT again.
         */
        if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
-               int i;
-
                WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
                kvm_x86_ops->sync_dirty_debug_regs(vcpu);
-               for (i = 0; i < KVM_NR_DB_REGS; i++)
-                       vcpu->arch.eff_db[i] = vcpu->arch.db[i];
+               kvm_update_dr0123(vcpu);
+               kvm_update_dr6(vcpu);
+               kvm_update_dr7(vcpu);
+               vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
        }
 
        /*
index b2fd67da1701433d9168ebf1d0a908330544bd7d..ef05755a190063cdf79210f7ac164aef1a0f4ec5 100644 (file)
@@ -123,7 +123,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
                break;
        }
 
-       if (regno > nr_registers) {
+       if (regno >= nr_registers) {
                WARN_ONCE(1, "decoded an instruction with an invalid register");
                return -EINVAL;
        }
index 4f184d938942dcbbbbedbac3b330f6e4bb39a4a9..d4d144363250f7dfe2e2ed41ed9fca7425c18c0a 100644 (file)
@@ -1090,9 +1090,12 @@ int bio_uncopy_user(struct bio *bio)
        if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
                /*
                 * if we're in a workqueue, the request is orphaned, so
-                * don't copy into a random user address space, just free.
+                * don't copy into a random user address space, just free
+                * and return -EINTR so user space doesn't expect any data.
                 */
-               if (current->mm && bio_data_dir(bio) == READ)
+               if (!current->mm)
+                       ret = -EINTR;
+               else if (bio_data_dir(bio) == READ)
                        ret = bio_copy_to_iter(bio, bmd->iter);
                if (bmd->is_our_pages)
                        bio_free_pages(bio);
index dd49735839789167d427dc8b64da71c6b21f08f5..c7bb666aafd100a329c67b97e616c9f9037c508e 100644 (file)
@@ -91,8 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim)
        lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
        lim->virt_boundary_mask = 0;
        lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
-       lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors =
-               BLK_SAFE_MAX_SECTORS;
+       lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
+       lim->max_dev_sectors = 0;
        lim->chunk_sectors = 0;
        lim->max_write_same_sectors = 0;
        lim->max_discard_sectors = 0;
index 3405f7a41e2576415d344eb0782323ba469ea0a4..5fdac394207afabef12a380ef8a20eca4a9bad05 100644 (file)
@@ -464,6 +464,15 @@ static struct dmi_system_id video_dmi_table[] = {
         * control on these systems, but do not register a backlight sysfs
         * as brightness control does not work.
         */
+       {
+        /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+        .callback = video_disable_backlight_sysfs_if,
+        .ident = "Toshiba Portege R700",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
+               },
+       },
        {
         /* https://bugs.freedesktop.org/show_bug.cgi?id=82634 */
         .callback = video_disable_backlight_sysfs_if,
@@ -473,6 +482,15 @@ static struct dmi_system_id video_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R830"),
                },
        },
+       {
+        /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+        .callback = video_disable_backlight_sysfs_if,
+        .ident = "Toshiba Satellite R830",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"),
+               },
+       },
        /*
         * Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set
         * but the IDs actually follow the Device ID Scheme.
index aa45d480270721280a9e8085574a14d086a079fc..11d8209e6e5d12972c0c86fcf0fa1b506ab46232 100644 (file)
@@ -468,37 +468,16 @@ static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
        nfit_mem->bdw = NULL;
 }
 
-static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
+static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
                struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
 {
        u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
        struct nfit_memdev *nfit_memdev;
        struct nfit_flush *nfit_flush;
-       struct nfit_dcr *nfit_dcr;
        struct nfit_bdw *nfit_bdw;
        struct nfit_idt *nfit_idt;
        u16 idt_idx, range_index;
 
-       list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
-               if (nfit_dcr->dcr->region_index != dcr)
-                       continue;
-               nfit_mem->dcr = nfit_dcr->dcr;
-               break;
-       }
-
-       if (!nfit_mem->dcr) {
-               dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n",
-                               spa->range_index, __to_nfit_memdev(nfit_mem)
-                               ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR");
-               return -ENODEV;
-       }
-
-       /*
-        * We've found enough to create an nvdimm, optionally
-        * find an associated BDW
-        */
-       list_add(&nfit_mem->list, &acpi_desc->dimms);
-
        list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
                if (nfit_bdw->bdw->region_index != dcr)
                        continue;
@@ -507,12 +486,12 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
        }
 
        if (!nfit_mem->bdw)
-               return 0;
+               return;
 
        nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
 
        if (!nfit_mem->spa_bdw)
-               return 0;
+               return;
 
        range_index = nfit_mem->spa_bdw->range_index;
        list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
@@ -537,8 +516,6 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
                }
                break;
        }
-
-       return 0;
 }
 
 static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
@@ -547,7 +524,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
        struct nfit_mem *nfit_mem, *found;
        struct nfit_memdev *nfit_memdev;
        int type = nfit_spa_type(spa);
-       u16 dcr;
 
        switch (type) {
        case NFIT_SPA_DCR:
@@ -558,14 +534,18 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
        }
 
        list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
-               int rc;
+               struct nfit_dcr *nfit_dcr;
+               u32 device_handle;
+               u16 dcr;
 
                if (nfit_memdev->memdev->range_index != spa->range_index)
                        continue;
                found = NULL;
                dcr = nfit_memdev->memdev->region_index;
+               device_handle = nfit_memdev->memdev->device_handle;
                list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
-                       if (__to_nfit_memdev(nfit_mem)->region_index == dcr) {
+                       if (__to_nfit_memdev(nfit_mem)->device_handle
+                                       == device_handle) {
                                found = nfit_mem;
                                break;
                        }
@@ -578,6 +558,31 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
                        if (!nfit_mem)
                                return -ENOMEM;
                        INIT_LIST_HEAD(&nfit_mem->list);
+                       list_add(&nfit_mem->list, &acpi_desc->dimms);
+               }
+
+               list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
+                       if (nfit_dcr->dcr->region_index != dcr)
+                               continue;
+                       /*
+                        * Record the control region for the dimm.  For
+                        * the ACPI 6.1 case, where there are separate
+                        * control regions for the pmem vs blk
+                        * interfaces, be sure to record the extended
+                        * blk details.
+                        */
+                       if (!nfit_mem->dcr)
+                               nfit_mem->dcr = nfit_dcr->dcr;
+                       else if (nfit_mem->dcr->windows == 0
+                                       && nfit_dcr->dcr->windows)
+                               nfit_mem->dcr = nfit_dcr->dcr;
+                       break;
+               }
+
+               if (dcr && !nfit_mem->dcr) {
+                       dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
+                                       spa->range_index, dcr);
+                       return -ENODEV;
                }
 
                if (type == NFIT_SPA_DCR) {
@@ -594,6 +599,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
                                nfit_mem->idt_dcr = nfit_idt->idt;
                                break;
                        }
+                       nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
                } else {
                        /*
                         * A single dimm may belong to multiple SPA-PM
@@ -602,13 +608,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
                         */
                        nfit_mem->memdev_pmem = nfit_memdev->memdev;
                }
-
-               if (found)
-                       continue;
-
-               rc = nfit_mem_add(acpi_desc, nfit_mem, spa);
-               if (rc)
-                       return rc;
        }
 
        return 0;
index daaf1c4e1e0f78657afb1f140b49e0cf3162d6df..80e55cb0827ba914751dca3140f2d04ca64ca6a6 100644 (file)
@@ -135,14 +135,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
                },
        },
-       {
-       .callback = video_detect_force_vendor,
-       .ident = "Dell Inspiron 5737",
-       .matches = {
-               DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-               DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
-               },
-       },
 
        /*
         * These models have a working acpi_video backlight control, and using
index a39e85f9efa98854768f39b01502274401673957..7d00b7a015ead0182feaace6231a8d7fad719b64 100644 (file)
@@ -2074,7 +2074,7 @@ static int binder_thread_write(struct binder_proc *proc,
                        if (get_user(cookie, (binder_uintptr_t __user *)ptr))
                                return -EFAULT;
 
-                       ptr += sizeof(void *);
+                       ptr += sizeof(cookie);
                        list_for_each_entry(w, &proc->delivered_death, entry) {
                                struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
 
index 99921aa0daca1168076ad7d5307f60fc4c57e48d..60a15831c00956301c95dd5805c8fb11fe9dfdb1 100644 (file)
@@ -367,15 +367,21 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
        { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
        { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
+       { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
        { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
+       { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
        { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
        { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
        { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
        { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
+       { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
+       { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
        { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
        { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
        { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
        { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
+       { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
+       { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
index 1f225cc1827f25eea8c5773751eb77cb56a7b004..998c6a85ad8943104e37a98a33bbc72ad8e36211 100644 (file)
@@ -1142,8 +1142,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
 
        /* mark esata ports */
        tmp = readl(port_mmio + PORT_CMD);
-       if ((tmp & PORT_CMD_HPCP) ||
-           ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)))
+       if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))
                ap->pflags |= ATA_PFLAG_EXTERNAL;
 }
 
index 7e959f90c0203f9b94124ab73867e4a72bd11188..e417e1a1d02c568595fa465682ade483a9d0645a 100644 (file)
@@ -675,19 +675,18 @@ static int ata_ioc32(struct ata_port *ap)
 int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
                     int cmd, void __user *arg)
 {
-       int val = -EINVAL, rc = -EINVAL;
+       unsigned long val;
+       int rc = -EINVAL;
        unsigned long flags;
 
        switch (cmd) {
-       case ATA_IOC_GET_IO32:
+       case HDIO_GET_32BIT:
                spin_lock_irqsave(ap->lock, flags);
                val = ata_ioc32(ap);
                spin_unlock_irqrestore(ap->lock, flags);
-               if (copy_to_user(arg, &val, 1))
-                       return -EFAULT;
-               return 0;
+               return put_user(val, (unsigned long __user *)arg);
 
-       case ATA_IOC_SET_IO32:
+       case HDIO_SET_32BIT:
                val = (unsigned long) arg;
                rc = 0;
                spin_lock_irqsave(ap->lock, flags);
index cdf6215a9a22beb93ede75a702797e12c5ad4870..7dbba387d12a7c9656de03ed3a1619dff301016e 100644 (file)
@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
 {
        struct ata_port *ap = qc->ap;
-       unsigned long flags;
 
        if (ap->ops->error_handler) {
                if (in_wq) {
-                       spin_lock_irqsave(ap->lock, flags);
-
                        /* EH might have kicked in while host lock is
                         * released.
                         */
@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
                                } else
                                        ata_port_freeze(ap);
                        }
-
-                       spin_unlock_irqrestore(ap->lock, flags);
                } else {
                        if (likely(!(qc->err_mask & AC_ERR_HSM)))
                                ata_qc_complete(qc);
@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
                }
        } else {
                if (in_wq) {
-                       spin_lock_irqsave(ap->lock, flags);
                        ata_sff_irq_on(ap);
                        ata_qc_complete(qc);
-                       spin_unlock_irqrestore(ap->lock, flags);
                } else
                        ata_qc_complete(qc);
        }
@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
 {
        struct ata_link *link = qc->dev->link;
        struct ata_eh_info *ehi = &link->eh_info;
-       unsigned long flags = 0;
        int poll_next;
 
+       lockdep_assert_held(ap->lock);
+
        WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
 
        /* Make sure ata_sff_qc_issue() does not throw things
@@ -1112,14 +1106,6 @@ fsm_start:
                        }
                }
 
-               /* Send the CDB (atapi) or the first data block (ata pio out).
-                * During the state transition, interrupt handler shouldn't
-                * be invoked before the data transfer is complete and
-                * hsm_task_state is changed. Hence, the following locking.
-                */
-               if (in_wq)
-                       spin_lock_irqsave(ap->lock, flags);
-
                if (qc->tf.protocol == ATA_PROT_PIO) {
                        /* PIO data out protocol.
                         * send first data block.
@@ -1135,9 +1121,6 @@ fsm_start:
                        /* send CDB */
                        atapi_send_cdb(ap, qc);
 
-               if (in_wq)
-                       spin_unlock_irqrestore(ap->lock, flags);
-
                /* if polling, ata_sff_pio_task() handles the rest.
                 * otherwise, interrupt handler takes over from here.
                 */
@@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work_struct *work)
        u8 status;
        int poll_next;
 
+       spin_lock_irq(ap->lock);
+
        BUG_ON(ap->sff_pio_task_link == NULL);
        /* qc can be NULL if timeout occurred */
        qc = ata_qc_from_tag(ap, link->active_tag);
        if (!qc) {
                ap->sff_pio_task_link = NULL;
-               return;
+               goto out_unlock;
        }
 
 fsm_start:
@@ -1381,11 +1366,14 @@ fsm_start:
         */
        status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
        if (status & ATA_BUSY) {
+               spin_unlock_irq(ap->lock);
                ata_msleep(ap, 2);
+               spin_lock_irq(ap->lock);
+
                status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
                if (status & ATA_BUSY) {
                        ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
-                       return;
+                       goto out_unlock;
                }
        }
 
@@ -1402,6 +1390,8 @@ fsm_start:
         */
        if (poll_next)
                goto fsm_start;
+out_unlock:
+       spin_unlock_irq(ap->lock);
 }
 
 /**
index 12fe0f3bb7e9b0bf8120af19badf4ddf72903565..c8b6a780a29050775f66ec60bc89773d71677683 100644 (file)
@@ -32,6 +32,8 @@
 #include <linux/libata.h>
 #include <scsi/scsi_host.h>
 
+#include <asm/mach-rc32434/rb.h>
+
 #define DRV_NAME       "pata-rb532-cf"
 #define DRV_VERSION    "0.1.0"
 #define DRV_DESC       "PATA driver for RouterBOARD 532 Compact Flash"
@@ -107,6 +109,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
        int gpio;
        struct resource *res;
        struct ata_host *ah;
+       struct cf_device *pdata;
        struct rb532_cf_info *info;
        int ret;
 
@@ -122,7 +125,13 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
                return -ENOENT;
        }
 
-       gpio = irq_to_gpio(irq);
+       pdata = dev_get_platdata(&pdev->dev);
+       if (!pdata) {
+               dev_err(&pdev->dev, "no platform data specified\n");
+               return -EINVAL;
+       }
+
+       gpio = pdata->gpio_pin;
        if (gpio < 0) {
                dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq);
                return -ENOENT;
index 92f0ee388f9e0bfddd0cdf32b684edf25741d991..968897108c76c00d39ac528edfa29012a27ec5a5 100644 (file)
@@ -153,6 +153,10 @@ static const struct usb_device_id btusb_table[] = {
        { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01),
          .driver_info = BTUSB_BCM_PATCHRAM },
 
+       /* Toshiba Corp - Broadcom based */
+       { USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01),
+         .driver_info = BTUSB_BCM_PATCHRAM },
+
        /* Intel Bluetooth USB Bootloader (RAM module) */
        { USB_DEVICE(0x8087, 0x0a5a),
          .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
index 2fe37f708dc70828ffa10fc165ecc830fff49c86..813003d6ce097f4bbbda48e7d471b73388b611ac 100644 (file)
@@ -148,6 +148,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
        unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
        unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
        unsigned long div0, div1 = 0, mux_reg;
+       unsigned long flags;
 
        /* find out the divider values to use for clock data */
        while ((cfg_data->prate * 1000) != ndata->new_rate) {
@@ -156,7 +157,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
                cfg_data++;
        }
 
-       spin_lock(cpuclk->lock);
+       spin_lock_irqsave(cpuclk->lock, flags);
 
        /*
         * For the selected PLL clock frequency, get the pre-defined divider
@@ -212,7 +213,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
                                DIV_MASK_ALL);
        }
 
-       spin_unlock(cpuclk->lock);
+       spin_unlock_irqrestore(cpuclk->lock, flags);
        return 0;
 }
 
@@ -223,6 +224,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
        const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
        unsigned long div = 0, div_mask = DIV_MASK;
        unsigned long mux_reg;
+       unsigned long flags;
 
        /* find out the divider values to use for clock data */
        if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
@@ -233,7 +235,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
                }
        }
 
-       spin_lock(cpuclk->lock);
+       spin_lock_irqsave(cpuclk->lock, flags);
 
        /* select mout_apll as the alternate parent */
        mux_reg = readl(base + E4210_SRC_CPU);
@@ -246,7 +248,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
        }
 
        exynos_set_safe_div(base, div, div_mask);
-       spin_unlock(cpuclk->lock);
+       spin_unlock_irqrestore(cpuclk->lock, flags);
        return 0;
 }
 
index 6ee91401918eba99a33c67b554da853747a0c5fa..4da2af9694a23b267cebdfa52da624e9f43b61fe 100644 (file)
@@ -98,7 +98,8 @@ static int tc_shutdown(struct clock_event_device *d)
 
        __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
        __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
-       clk_disable(tcd->clk);
+       if (!clockevent_state_detached(d))
+               clk_disable(tcd->clk);
 
        return 0;
 }
index a92e94b40b5b031e12074e26483e2e0c61b1d986..dfc3bb410b00f23d644755410dbcf9452a8a99e6 100644 (file)
@@ -50,6 +50,8 @@
 
 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
 
+#define MIN_OSCR_DELTA         16
+
 static void __iomem *regbase;
 
 static cycle_t vt8500_timer_read(struct clocksource *cs)
@@ -80,7 +82,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles,
                cpu_relax();
        writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
 
-       if ((signed)(alarm - clocksource.read(&clocksource)) <= 16)
+       if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA)
                return -ETIME;
 
        writel(1, regbase + TIMER_IER_VAL);
@@ -151,7 +153,7 @@ static void __init vt8500_timer_init(struct device_node *np)
                pr_err("%s: setup_irq failed for %s\n", __func__,
                                                        clockevent.name);
        clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
-                                       4, 0xf0000000);
+                                       MIN_OSCR_DELTA * 2, 0xf0000000);
 }
 
 CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
index b260576ddb129c9bf485a12a954d0b1db30e08c5..d994b0f652d32320c525801d140e23b2d4607329 100644 (file)
@@ -356,16 +356,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
        if (!have_governor_per_policy())
                cdata->gdbs_data = dbs_data;
 
+       policy->governor_data = dbs_data;
+
        ret = sysfs_create_group(get_governor_parent_kobj(policy),
                                 get_sysfs_attr(dbs_data));
        if (ret)
                goto reset_gdbs_data;
 
-       policy->governor_data = dbs_data;
-
        return 0;
 
 reset_gdbs_data:
+       policy->governor_data = NULL;
+
        if (!have_governor_per_policy())
                cdata->gdbs_data = NULL;
        cdata->exit(dbs_data, !policy->governor->initialized);
@@ -386,16 +388,19 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy,
        if (!cdbs->shared || cdbs->shared->policy)
                return -EBUSY;
 
-       policy->governor_data = NULL;
        if (!--dbs_data->usage_count) {
                sysfs_remove_group(get_governor_parent_kobj(policy),
                                   get_sysfs_attr(dbs_data));
 
+               policy->governor_data = NULL;
+
                if (!have_governor_per_policy())
                        cdata->gdbs_data = NULL;
 
                cdata->exit(dbs_data, policy->governor->initialized == 1);
                kfree(dbs_data);
+       } else {
+               policy->governor_data = NULL;
        }
 
        free_common_dbs_info(policy, cdata);
index 1d99c97defa9204f52ad8605fd5f25c1d2ad0540..096377232747652f99d8a89e0681da7134b716fe 100644 (file)
@@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void)
        }
 }
 #else
-static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq)
+static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
 {
        return 0;
 }
index 370c661c7d7b25c237a3376dd18da79fbd44eadd..fa00f3a186da140694dbc4529f8327668f434d2c 100644 (file)
@@ -1688,6 +1688,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
        list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
                at_xdmac_remove_xfer(atchan, desc);
 
+       clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
        clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
        spin_unlock_irqrestore(&atchan->lock, flags);
 
@@ -1820,6 +1821,8 @@ static int atmel_xdmac_resume(struct device *dev)
                atchan = to_at_xdmac_chan(chan);
                at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
                if (at_xdmac_chan_is_cyclic(atchan)) {
+                       if (at_xdmac_chan_is_paused(atchan))
+                               at_xdmac_device_resume(chan);
                        at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
                        at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
                        at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
index 7067b6ddc1db6e7662f69bde5715639c479900d8..4f099ea29f83e991349892d91f15c7d0f07d8e38 100644 (file)
@@ -536,16 +536,17 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
 
 /* Called with dwc->lock held and all DMAC interrupts disabled */
 static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
-               u32 status_err, u32 status_xfer)
+               u32 status_block, u32 status_err, u32 status_xfer)
 {
        unsigned long flags;
 
-       if (dwc->mask) {
+       if (status_block & dwc->mask) {
                void (*callback)(void *param);
                void *callback_param;
 
                dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
                                channel_readl(dwc, LLP));
+               dma_writel(dw, CLEAR.BLOCK, dwc->mask);
 
                callback = dwc->cdesc->period_callback;
                callback_param = dwc->cdesc->period_callback_param;
@@ -577,6 +578,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
                channel_writel(dwc, CTL_LO, 0);
                channel_writel(dwc, CTL_HI, 0);
 
+               dma_writel(dw, CLEAR.BLOCK, dwc->mask);
                dma_writel(dw, CLEAR.ERROR, dwc->mask);
                dma_writel(dw, CLEAR.XFER, dwc->mask);
 
@@ -585,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
 
                spin_unlock_irqrestore(&dwc->lock, flags);
        }
+
+       /* Re-enable interrupts */
+       channel_set_bit(dw, MASK.BLOCK, dwc->mask);
 }
 
 /* ------------------------------------------------------------------------- */
@@ -593,10 +598,12 @@ static void dw_dma_tasklet(unsigned long data)
 {
        struct dw_dma *dw = (struct dw_dma *)data;
        struct dw_dma_chan *dwc;
+       u32 status_block;
        u32 status_xfer;
        u32 status_err;
        int i;
 
+       status_block = dma_readl(dw, RAW.BLOCK);
        status_xfer = dma_readl(dw, RAW.XFER);
        status_err = dma_readl(dw, RAW.ERROR);
 
@@ -605,16 +612,15 @@ static void dw_dma_tasklet(unsigned long data)
        for (i = 0; i < dw->dma.chancnt; i++) {
                dwc = &dw->chan[i];
                if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
-                       dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
+                       dwc_handle_cyclic(dw, dwc, status_block, status_err,
+                                       status_xfer);
                else if (status_err & (1 << i))
                        dwc_handle_error(dw, dwc);
                else if (status_xfer & (1 << i))
                        dwc_scan_descriptors(dw, dwc);
        }
 
-       /*
-        * Re-enable interrupts.
-        */
+       /* Re-enable interrupts */
        channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
        channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
 }
@@ -635,6 +641,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
         * softirq handler.
         */
        channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+       channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
        channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
 
        status = dma_readl(dw, STATUS_INT);
@@ -645,6 +652,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
 
                /* Try to recover */
                channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
+               channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
                channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
                channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
                channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
@@ -1111,6 +1119,7 @@ static void dw_dma_off(struct dw_dma *dw)
        dma_writel(dw, CFG, 0);
 
        channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+       channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
        channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
        channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
        channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
@@ -1216,6 +1225,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
 
        /* Disable interrupts */
        channel_clear_bit(dw, MASK.XFER, dwc->mask);
+       channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
        channel_clear_bit(dw, MASK.ERROR, dwc->mask);
 
        spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1245,7 +1255,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
 int dw_dma_cyclic_start(struct dma_chan *chan)
 {
        struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
-       struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
+       struct dw_dma           *dw = to_dw_dma(chan->device);
        unsigned long           flags;
 
        if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
@@ -1255,25 +1265,10 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
 
        spin_lock_irqsave(&dwc->lock, flags);
 
-       /* Assert channel is idle */
-       if (dma_readl(dw, CH_EN) & dwc->mask) {
-               dev_err(chan2dev(&dwc->chan),
-                       "%s: BUG: Attempted to start non-idle channel\n",
-                       __func__);
-               dwc_dump_chan_regs(dwc);
-               spin_unlock_irqrestore(&dwc->lock, flags);
-               return -EBUSY;
-       }
-
-       dma_writel(dw, CLEAR.ERROR, dwc->mask);
-       dma_writel(dw, CLEAR.XFER, dwc->mask);
+       /* Enable interrupts to perform cyclic transfer */
+       channel_set_bit(dw, MASK.BLOCK, dwc->mask);
 
-       /* Setup DMAC channel registers */
-       channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
-       channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
-       channel_writel(dwc, CTL_HI, 0);
-
-       channel_set_bit(dw, CH_EN, dwc->mask);
+       dwc_dostart(dwc, dwc->cdesc->desc[0]);
 
        spin_unlock_irqrestore(&dwc->lock, flags);
 
@@ -1479,6 +1474,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
 
        dwc_chan_disable(dw, dwc);
 
+       dma_writel(dw, CLEAR.BLOCK, dwc->mask);
        dma_writel(dw, CLEAR.ERROR, dwc->mask);
        dma_writel(dw, CLEAR.XFER, dwc->mask);
 
@@ -1567,9 +1563,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
        /* Force dma off, just in case */
        dw_dma_off(dw);
 
-       /* Disable BLOCK interrupts as well */
-       channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
-
        /* Create a pool of consistent memory blocks for hardware descriptors */
        dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
                                         sizeof(struct dw_desc), 4, 0);
index fc4156afa070306cd2fee4502f361717f364706b..a59061e4221ad1528bcdb919a0b651b025e4d91f 100644 (file)
@@ -583,6 +583,8 @@ static void set_updater_desc(struct pxad_desc_sw *sw_desc,
                (PXA_DCMD_LENGTH & sizeof(u32));
        if (flags & DMA_PREP_INTERRUPT)
                updater->dcmd |= PXA_DCMD_ENDIRQEN;
+       if (sw_desc->cyclic)
+               sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
 }
 
 static bool is_desc_completed(struct virt_dma_desc *vd)
@@ -673,6 +675,10 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
                dev_dbg(&chan->vc.chan.dev->device,
                        "%s(): checking txd %p[%x]: completed=%d\n",
                        __func__, vd, vd->tx.cookie, is_desc_completed(vd));
+               if (to_pxad_sw_desc(vd)->cyclic) {
+                       vchan_cyclic_callback(vd);
+                       break;
+               }
                if (is_desc_completed(vd)) {
                        list_del(&vd->node);
                        vchan_cookie_complete(vd);
@@ -1080,7 +1086,7 @@ pxad_prep_dma_cyclic(struct dma_chan *dchan,
                return NULL;
 
        pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
-       dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH | period_len);
+       dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
        dev_dbg(&chan->vc.chan.dev->device,
                "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
                __func__, (unsigned long)buf_addr, len, period_len, dir, flags);
index 592af5f0cf391d292e05fd5c3a94b5589c7cb684..53587377e67268fd90efb4fbfed2358dcbe03717 100644 (file)
@@ -435,16 +435,13 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
  */
 void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
 {
-       int status;
-
        if (!edac_dev->edac_check)
                return;
 
-       status = cancel_delayed_work(&edac_dev->work);
-       if (status == 0) {
-               /* workq instance might be running, wait for it */
-               flush_workqueue(edac_workqueue);
-       }
+       edac_dev->op_state = OP_OFFLINE;
+
+       cancel_delayed_work_sync(&edac_dev->work);
+       flush_workqueue(edac_workqueue);
 }
 
 /*
index 77ecd6a4179aaa2e2b6504da395debe6351508d9..1b2c2187b34708215045d3413b28e4c7baa05c1a 100644 (file)
@@ -586,18 +586,10 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
  */
 static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
 {
-       int status;
-
-       if (mci->op_state != OP_RUNNING_POLL)
-               return;
-
-       status = cancel_delayed_work(&mci->work);
-       if (status == 0) {
-               edac_dbg(0, "not canceled, flush the queue\n");
+       mci->op_state = OP_OFFLINE;
 
-               /* workq instance might be running, wait for it */
-               flush_workqueue(edac_workqueue);
-       }
+       cancel_delayed_work_sync(&mci->work);
+       flush_workqueue(edac_workqueue);
 }
 
 /*
index a75acea0f674ed7ca9101ca7ce8a65e5b0961905..58aed67b7eba499c126a5d52cf3b26cd5497c2dc 100644 (file)
@@ -880,21 +880,26 @@ static struct device_type mci_attr_type = {
 int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
                                 const struct attribute_group **groups)
 {
+       char *name;
        int i, err;
 
        /*
         * The memory controller needs its own bus, in order to avoid
         * namespace conflicts at /sys/bus/edac.
         */
-       mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
-       if (!mci->bus->name)
+       name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
+       if (!name)
                return -ENOMEM;
 
+       mci->bus->name = name;
+
        edac_dbg(0, "creating bus %s\n", mci->bus->name);
 
        err = bus_register(mci->bus);
-       if (err < 0)
-               goto fail_free_name;
+       if (err < 0) {
+               kfree(name);
+               return err;
+       }
 
        /* get the /sys/devices/system/edac subsys reference */
        mci->dev.type = &mci_attr_type;
@@ -961,8 +966,8 @@ fail_unregister_dimm:
        device_unregister(&mci->dev);
 fail_unregister_bus:
        bus_unregister(mci->bus);
-fail_free_name:
-       kfree(mci->bus->name);
+       kfree(name);
+
        return err;
 }
 
@@ -993,10 +998,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
 
 void edac_unregister_sysfs(struct mem_ctl_info *mci)
 {
+       const char *name = mci->bus->name;
+
        edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
        device_unregister(&mci->dev);
        bus_unregister(mci->bus);
-       kfree(mci->bus->name);
+       kfree(name);
 }
 
 static void mc_attr_release(struct device *dev)
index 2cf44b4db80c8beac0a5575cf00d1ed4cca32c4d..b4b38603b804e839480f75c1ce38281108ef1186 100644 (file)
@@ -274,13 +274,12 @@ static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
  */
 static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
 {
-       int status;
-
        edac_dbg(0, "\n");
 
-       status = cancel_delayed_work(&pci->work);
-       if (status == 0)
-               flush_workqueue(edac_workqueue);
+       pci->op_state = OP_OFFLINE;
+
+       cancel_delayed_work_sync(&pci->work);
+       flush_workqueue(edac_workqueue);
 }
 
 /*
index 756eca8c4cf8f291025a3ad44f7cbb9981aeb5fd..10e6774ab2a2248d0a04d935b773c15a38519f46 100644 (file)
@@ -221,7 +221,7 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
        }
 
        if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
-           efivar_validate(name, data, size) == false) {
+           efivar_validate(vendor, name, data, size) == false) {
                printk(KERN_ERR "efivars: Malformed variable content\n");
                return -EINVAL;
        }
@@ -447,7 +447,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
        }
 
        if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
-           efivar_validate(name, data, size) == false) {
+           efivar_validate(new_var->VendorGuid, name, data,
+                           size) == false) {
                printk(KERN_ERR "efivars: Malformed variable content\n");
                return -EINVAL;
        }
@@ -540,38 +541,30 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
 static int
 efivar_create_sysfs_entry(struct efivar_entry *new_var)
 {
-       int i, short_name_size;
+       int short_name_size;
        char *short_name;
-       unsigned long variable_name_size;
-       efi_char16_t *variable_name;
+       unsigned long utf8_name_size;
+       efi_char16_t *variable_name = new_var->var.VariableName;
        int ret;
 
-       variable_name = new_var->var.VariableName;
-       variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t);
-
        /*
-        * Length of the variable bytes in ASCII, plus the '-' separator,
+        * Length of the variable bytes in UTF8, plus the '-' separator,
         * plus the GUID, plus trailing NUL
         */
-       short_name_size = variable_name_size / sizeof(efi_char16_t)
-                               + 1 + EFI_VARIABLE_GUID_LEN + 1;
-
-       short_name = kzalloc(short_name_size, GFP_KERNEL);
+       utf8_name_size = ucs2_utf8size(variable_name);
+       short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
 
+       short_name = kmalloc(short_name_size, GFP_KERNEL);
        if (!short_name)
                return -ENOMEM;
 
-       /* Convert Unicode to normal chars (assume top bits are 0),
-          ala UTF-8 */
-       for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
-               short_name[i] = variable_name[i] & 0xFF;
-       }
+       ucs2_as_utf8(short_name, variable_name, short_name_size);
+
        /* This is ugly, but necessary to separate one vendor's
           private variables from another's.         */
-
-       *(short_name + strlen(short_name)) = '-';
+       short_name[utf8_name_size] = '-';
        efi_guid_to_str(&new_var->var.VendorGuid,
-                        short_name + strlen(short_name));
+                        short_name + utf8_name_size + 1);
 
        new_var->kobj.kset = efivars_kset;
 
index 70a0fb10517f94ea5b28bada280d9935f0693cc7..7f2ea21c730dd76a86f3ab5a1e253878878960a7 100644 (file)
@@ -165,67 +165,133 @@ validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
 }
 
 struct variable_validate {
+       efi_guid_t vendor;
        char *name;
        bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
                         unsigned long len);
 };
 
+/*
+ * This is the list of variables we need to validate, as well as the
+ * whitelist for what we think is safe not to default to immutable.
+ *
+ * If it has a validate() method that's not NULL, it'll go into the
+ * validation routine.  If not, it is assumed valid, but still used for
+ * whitelisting.
+ *
+ * Note that it's sorted by {vendor,name}, but globbed names must come after
+ * any other name with the same prefix.
+ */
 static const struct variable_validate variable_validate[] = {
-       { "BootNext", validate_uint16 },
-       { "BootOrder", validate_boot_order },
-       { "DriverOrder", validate_boot_order },
-       { "Boot*", validate_load_option },
-       { "Driver*", validate_load_option },
-       { "ConIn", validate_device_path },
-       { "ConInDev", validate_device_path },
-       { "ConOut", validate_device_path },
-       { "ConOutDev", validate_device_path },
-       { "ErrOut", validate_device_path },
-       { "ErrOutDev", validate_device_path },
-       { "Timeout", validate_uint16 },
-       { "Lang", validate_ascii_string },
-       { "PlatformLang", validate_ascii_string },
-       { "", NULL },
+       { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
+       { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
+       { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
+       { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
+       { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
+       { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
+       { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
+       { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
+       { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
+       { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
+       { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
+       { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
+       { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
+       { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
+       { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
+       { LINUX_EFI_CRASH_GUID, "*", NULL },
+       { NULL_GUID, "", NULL },
 };
 
+static bool
+variable_matches(const char *var_name, size_t len, const char *match_name,
+                int *match)
+{
+       for (*match = 0; ; (*match)++) {
+               char c = match_name[*match];
+               char u = var_name[*match];
+
+               /* Wildcard in the matching name means we've matched */
+               if (c == '*')
+                       return true;
+
+               /* Case sensitive match */
+               if (!c && *match == len)
+                       return true;
+
+               if (c != u)
+                       return false;
+
+               if (!c)
+                       return true;
+       }
+       return true;
+}
+
 bool
-efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len)
+efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
+               unsigned long data_size)
 {
        int i;
-       u16 *unicode_name = var_name;
+       unsigned long utf8_size;
+       u8 *utf8_name;
 
-       for (i = 0; variable_validate[i].validate != NULL; i++) {
-               const char *name = variable_validate[i].name;
-               int match;
+       utf8_size = ucs2_utf8size(var_name);
+       utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
+       if (!utf8_name)
+               return false;
 
-               for (match = 0; ; match++) {
-                       char c = name[match];
-                       u16 u = unicode_name[match];
+       ucs2_as_utf8(utf8_name, var_name, utf8_size);
+       utf8_name[utf8_size] = '\0';
 
-                       /* All special variables are plain ascii */
-                       if (u > 127)
-                               return true;
+       for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
+               const char *name = variable_validate[i].name;
+               int match = 0;
 
-                       /* Wildcard in the matching name means we've matched */
-                       if (c == '*')
-                               return variable_validate[i].validate(var_name,
-                                                            match, data, len);
+               if (efi_guidcmp(vendor, variable_validate[i].vendor))
+                       continue;
 
-                       /* Case sensitive match */
-                       if (c != u)
+               if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
+                       if (variable_validate[i].validate == NULL)
                                break;
-
-                       /* Reached the end of the string while matching */
-                       if (!c)
-                               return variable_validate[i].validate(var_name,
-                                                            match, data, len);
+                       kfree(utf8_name);
+                       return variable_validate[i].validate(var_name, match,
+                                                            data, data_size);
                }
        }
-
+       kfree(utf8_name);
        return true;
 }
 EXPORT_SYMBOL_GPL(efivar_validate);
 
+bool
+efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
+                            size_t len)
+{
+       int i;
+       bool found = false;
+       int match = 0;
+
+       /*
+        * Check if our variable is in the validated variables list
+        */
+       for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
+               if (efi_guidcmp(variable_validate[i].vendor, vendor))
+                       continue;
+
+               if (variable_matches(var_name, len,
+                                    variable_validate[i].name, &match)) {
+                       found = true;
+                       break;
+               }
+       }
+
+       /*
+        * If it's in our list, it is removable.
+        */
+       return found;
+}
+EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
+
 static efi_status_t
 check_var_size(u32 attributes, unsigned long size)
 {
@@ -852,7 +918,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
 
        *set = false;
 
-       if (efivar_validate(name, data, *size) == false)
+       if (efivar_validate(*vendor, name, data, *size) == false)
                return -EINVAL;
 
        /*
index 04c2707570308df807f96b10e2cf3695cf8da092..ca066018ea34f371ebbc415363ef3d2ede455a13 100644 (file)
@@ -22,7 +22,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
        amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
 
 # add asic specific block
-amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
+amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
        ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
        amdgpu_amdkfd_gfx_v7.o
 
@@ -31,6 +31,7 @@ amdgpu-y += \
 
 # add GMC block
 amdgpu-y += \
+       gmc_v7_0.o \
        gmc_v8_0.o
 
 # add IH block
index 048cfe073dae6a1e89112b219c6a130b3d513be9..bb1099c549dfbe643428b99c0ac33489028a9558 100644 (file)
@@ -604,8 +604,6 @@ struct amdgpu_sa_manager {
        uint32_t                align;
 };
 
-struct amdgpu_sa_bo;
-
 /* sub-allocation buffer */
 struct amdgpu_sa_bo {
        struct list_head                olist;
@@ -2314,6 +2312,8 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
                                     uint32_t flags);
 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
+bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+                                 unsigned long end);
 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
 uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
                                 struct ttm_mem_reg *mem);
index 89c3dd62ba21ecd116b8490ed105573cecd86a82..119cdc2c43e7cef7fadf72736100cfcef1e200cd 100644 (file)
@@ -77,7 +77,7 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
                        } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
                                /* Don't try to start link training before we
                                 * have the dpcd */
-                               if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
+                               if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
                                        return;
 
                                /* set it to OFF so that drm_helper_connector_dpms()
index d5b421330145c87641a3b15143ca77d011eea4ca..c961fe093e12fb3d7fc4f71b358ef594bfc8621c 100644 (file)
@@ -1744,15 +1744,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
        }
 
        /* post card */
-       amdgpu_atom_asic_init(adev->mode_info.atom_context);
+       if (!amdgpu_card_posted(adev))
+               amdgpu_atom_asic_init(adev->mode_info.atom_context);
 
        r = amdgpu_resume(adev);
+       if (r)
+               DRM_ERROR("amdgpu_resume failed (%d).\n", r);
 
        amdgpu_fence_driver_resume(adev);
 
-       r = amdgpu_ib_ring_tests(adev);
-       if (r)
-               DRM_ERROR("ib ring test failed (%d).\n", r);
+       if (resume) {
+               r = amdgpu_ib_ring_tests(adev);
+               if (r)
+                       DRM_ERROR("ib ring test failed (%d).\n", r);
+       }
 
        r = amdgpu_late_init(adev);
        if (r)
@@ -1788,6 +1793,7 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
        }
 
        drm_kms_helper_poll_enable(dev);
+       drm_helper_hpd_irq_event(dev);
 
        if (fbcon) {
                amdgpu_fbdev_set_suspend(adev, 0);
index 5580d3420c3a368815364d8eccce3e1421ce6a12..0c713a908304a46caa718cab56f531d84ba5c97c 100644 (file)
@@ -72,8 +72,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
 
        struct drm_crtc *crtc = &amdgpuCrtc->base;
        unsigned long flags;
-       unsigned i;
-       int vpos, hpos, stat, min_udelay;
+       unsigned i, repcnt = 4;
+       int vpos, hpos, stat, min_udelay = 0;
        struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
 
        amdgpu_flip_wait_fence(adev, &work->excl);
@@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
         * In practice this won't execute very often unless on very fast
         * machines because the time window for this to happen is very small.
         */
-       for (;;) {
+       while (amdgpuCrtc->enabled && repcnt--) {
                /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
                 * start in hpos, and to the "fudged earlier" vblank start in
                 * vpos.
@@ -114,10 +114,22 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
                /* Sleep at least until estimated real start of hw vblank */
                spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
                min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+               if (min_udelay > vblank->framedur_ns / 2000) {
+                       /* Don't wait ridiculously long - something is wrong */
+                       repcnt = 0;
+                       break;
+               }
                usleep_range(min_udelay, 2 * min_udelay);
                spin_lock_irqsave(&crtc->dev->event_lock, flags);
        };
 
+       if (!repcnt)
+               DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
+                                "framedur %d, linedur %d, stat %d, vpos %d, "
+                                "hpos %d\n", work->crtc_id, min_udelay,
+                                vblank->framedur_ns / 1000,
+                                vblank->linedur_ns / 1000, stat, vpos, hpos);
+
        /* do the flip (mmio) */
        adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
        /* set the flip status */
index 0508c5cd103aa125f4ab22ae50a9da3a35ae75fb..8d6668cedf6db769b7607ed99d487a7a3b6d4331 100644 (file)
@@ -250,11 +250,11 @@ static struct pci_device_id pciidlist[] = {
        {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
 #endif
        /* topaz */
-       {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+       {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+       {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+       {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+       {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
        /* tonga */
        {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
        {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
index b1969f2b2038db79af8447fc9826ba8257207cf4..d4e2780c079663a643bddeaa7e2da5e15a2ff47d 100644 (file)
@@ -142,7 +142,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
 
                list_for_each_entry(bo, &node->bos, mn_list) {
 
-                       if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
+                       if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
+                                                         end))
                                continue;
 
                        r = amdgpu_bo_reserve(bo, true);
index c3ce103b6a33b2ef3a0a6f7f14877e18150ca405..b8fbbd7699e4586e13e57905b0cef818f6e43e6b 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <drm/drmP.h>
 #include <drm/amdgpu_drm.h>
+#include <drm/drm_cache.h>
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 
@@ -261,6 +262,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
                                       AMDGPU_GEM_DOMAIN_OA);
 
        bo->flags = flags;
+
+       /* For architectures that don't support WC memory,
+        * mask out the WC flag from the BO
+        */
+       if (!drm_arch_can_wc_memory())
+               bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+
        amdgpu_fill_placement_to_bo(bo, placement);
        /* Kernel allocation are uninterruptible */
        r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
@@ -399,7 +407,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                }
                if (fpfn > bo->placements[i].fpfn)
                        bo->placements[i].fpfn = fpfn;
-               if (lpfn && lpfn < bo->placements[i].lpfn)
+               if (!bo->placements[i].lpfn ||
+                   (lpfn && lpfn < bo->placements[i].lpfn))
                        bo->placements[i].lpfn = lpfn;
                bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
        }
index 22a8c7d3a3ab03e9dc3fb294c1f0eb818a6a4880..7ae15fad16edd8ff4d235a623a05373d5a9477f6 100644 (file)
@@ -595,11 +595,6 @@ force:
 
        /* update display watermarks based on new power state */
        amdgpu_display_bandwidth_update(adev);
-       /* update displays */
-       amdgpu_dpm_display_configuration_changed(adev);
-
-       adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
-       adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
 
        /* wait for the rings to drain */
        for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@@ -616,6 +611,12 @@ force:
 
        amdgpu_dpm_post_set_power_state(adev);
 
+       /* update displays */
+       amdgpu_dpm_display_configuration_changed(adev);
+
+       adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
+       adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
+
        if (adev->pm.funcs->force_performance_level) {
                if (adev->pm.dpm.thermal_active) {
                        enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
index 8b88edb0434bfb43ed4b7aae7e6f3e7dc63d0893..ca72a2e487b9b9d846b65479e9b865dd1ef71281 100644 (file)
@@ -354,12 +354,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
 
                for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
                        if (fences[i])
-                               fences[count++] = fences[i];
+                               fences[count++] = fence_get(fences[i]);
 
                if (count) {
                        spin_unlock(&sa_manager->wq.lock);
                        t = fence_wait_any_timeout(fences, count, false,
                                                   MAX_SCHEDULE_TIMEOUT);
+                       for (i = 0; i < count; ++i)
+                               fence_put(fences[i]);
+
                        r = (t > 0) ? 0 : t;
                        spin_lock(&sa_manager->wq.lock);
                } else {
index dd005c336c974ad708827c25edc4a4534772bb2c..181ce39ef5e59f8e4d4fbf2feeb617af63deb611 100644 (file)
@@ -293,7 +293,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
                fence = to_amdgpu_fence(sync->sync_to[i]);
 
                /* check if we really need to sync */
-               if (!amdgpu_fence_need_sync(fence, ring))
+               if (!amdgpu_enable_scheduler &&
+                   !amdgpu_fence_need_sync(fence, ring))
                        continue;
 
                /* prevent GPU deadlocks */
@@ -303,7 +304,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
                }
 
                if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
-                       r = fence_wait(&fence->base, true);
+                       r = fence_wait(sync->sync_to[i], true);
                        if (r)
                                return r;
                        continue;
index 8a1752ff3d8e55a1d8b8ab3061f5c9c425058d0f..1cbb16e153079e7463a0ac070d1a19461deb588e 100644 (file)
@@ -712,7 +712,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
                                                       0, PAGE_SIZE,
                                                       PCI_DMA_BIDIRECTIONAL);
                if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
-                       while (--i) {
+                       while (i--) {
                                pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
                                               PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
                                gtt->ttm.dma_address[i] = 0;
@@ -783,6 +783,25 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
        return !!gtt->userptr;
 }
 
+bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+                                 unsigned long end)
+{
+       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       unsigned long size;
+
+       if (gtt == NULL)
+               return false;
+
+       if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
+               return false;
+
+       size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
+       if (gtt->userptr > end || gtt->userptr + size <= start)
+               return false;
+
+       return true;
+}
+
 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
 {
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -808,7 +827,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
                        flags |= AMDGPU_PTE_SNOOPED;
        }
 
-       if (adev->asic_type >= CHIP_TOPAZ)
+       if (adev->asic_type >= CHIP_TONGA)
                flags |= AMDGPU_PTE_EXECUTABLE;
 
        flags |= AMDGPU_PTE_READABLE;
index b53d273eb7a1003a719fff9d1bd32a8b4fa20b78..8c5ec151ddac9565b961c5c464e6eee211838604 100644 (file)
@@ -1010,13 +1010,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
                return -EINVAL;
 
        /* make sure object fit at this offset */
-       eaddr = saddr + size;
+       eaddr = saddr + size - 1;
        if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
                return -EINVAL;
 
        last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
-       if (last_pfn > adev->vm_manager.max_pfn) {
-               dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
+       if (last_pfn >= adev->vm_manager.max_pfn) {
+               dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
                        last_pfn, adev->vm_manager.max_pfn);
                return -EINVAL;
        }
@@ -1025,7 +1025,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
        eaddr /= AMDGPU_GPU_PAGE_SIZE;
 
        spin_lock(&vm->it_lock);
-       it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
+       it = interval_tree_iter_first(&vm->va, saddr, eaddr);
        spin_unlock(&vm->it_lock);
        if (it) {
                struct amdgpu_bo_va_mapping *tmp;
@@ -1046,7 +1046,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 
        INIT_LIST_HEAD(&mapping->list);
        mapping->it.start = saddr;
-       mapping->it.last = eaddr - 1;
+       mapping->it.last = eaddr;
        mapping->offset = offset;
        mapping->flags = flags;
 
@@ -1248,7 +1248,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
        const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
                AMDGPU_VM_PTE_COUNT * 8);
-       unsigned pd_size, pd_entries, pts_size;
+       unsigned pd_size, pd_entries;
        int i, r;
 
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -1266,8 +1266,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        pd_entries = amdgpu_vm_num_pdes(adev);
 
        /* allocate page table array */
-       pts_size = pd_entries * sizeof(struct amdgpu_vm_pt);
-       vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
+       vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
        if (vm->page_tables == NULL) {
                DRM_ERROR("Cannot allocate memory for page table array\n");
                return -ENOMEM;
@@ -1327,7 +1326,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 
        for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
                amdgpu_bo_unref(&vm->page_tables[i].bo);
-       kfree(vm->page_tables);
+       drm_free_large(vm->page_tables);
 
        amdgpu_bo_unref(&vm->page_directory);
        fence_put(vm->page_directory_fence);
index 72793f93e2fcc9989e5f936ec1d43ba0593ac69e..aa491540ba85bebbcf70c1d4d1be4b088026b9c9 100644 (file)
@@ -3628,6 +3628,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
                                        unsigned vm_id, uint64_t pd_addr)
 {
        int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+       uint32_t seq = ring->fence_drv.sync_seq;
+       uint64_t addr = ring->fence_drv.gpu_addr;
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+       amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
+                                WAIT_REG_MEM_FUNCTION(3) | /* equal */
+                                WAIT_REG_MEM_ENGINE(usepfp)));   /* pfp or me */
+       amdgpu_ring_write(ring, addr & 0xfffffffc);
+       amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+       amdgpu_ring_write(ring, seq);
+       amdgpu_ring_write(ring, 0xffffffff);
+       amdgpu_ring_write(ring, 4); /* poll interval */
+
        if (usepfp) {
                /* synce CE with ME to prevent CE fetch CEIB before context switch done */
                amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
index e1dcab98e24944217d1e6a7485e5bee42587c0b8..d1054034d14b116cbd702fd66d355c43e9374b91 100644 (file)
@@ -90,7 +90,6 @@ MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
 MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
 MODULE_FIRMWARE("amdgpu/topaz_me.bin");
 MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
-MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
 MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
 
 MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
@@ -807,7 +806,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
        adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
        adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
-       if (adev->asic_type != CHIP_STONEY) {
+       if ((adev->asic_type != CHIP_STONEY) &&
+           (adev->asic_type != CHIP_TOPAZ)) {
                snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
                err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
                if (!err) {
@@ -4681,7 +4681,8 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 
        amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
        amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
-                WAIT_REG_MEM_FUNCTION(3))); /* equal */
+                                WAIT_REG_MEM_FUNCTION(3) | /* equal */
+                                WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
        amdgpu_ring_write(ring, addr & 0xfffffffc);
        amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
        amdgpu_ring_write(ring, seq);
index ed8abb58a785ef3efaba87a806e7bcdef7188a1b..272110cc18c2f2f83a6d6b1c1aa3ba8bec9d0df2 100644 (file)
@@ -42,9 +42,39 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
 
 MODULE_FIRMWARE("radeon/bonaire_mc.bin");
 MODULE_FIRMWARE("radeon/hawaii_mc.bin");
+MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
+
+static const u32 golden_settings_iceland_a11[] =
+{
+       mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+       mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+       mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+       mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
+};
+
+static const u32 iceland_mgcg_cgcg_init[] =
+{
+       mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
+};
+
+static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_TOPAZ:
+               amdgpu_program_register_sequence(adev,
+                                                iceland_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
+               amdgpu_program_register_sequence(adev,
+                                                golden_settings_iceland_a11,
+                                                (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
+               break;
+       default:
+               break;
+       }
+}
 
 /**
- * gmc8_mc_wait_for_idle - wait for MC idle callback.
+ * gmc7_mc_wait_for_idle - wait for MC idle callback.
  *
  * @adev: amdgpu_device pointer
  *
@@ -132,13 +162,20 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
        case CHIP_HAWAII:
                chip_name = "hawaii";
                break;
+       case CHIP_TOPAZ:
+               chip_name = "topaz";
+               break;
        case CHIP_KAVERI:
        case CHIP_KABINI:
                return 0;
        default: BUG();
        }
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+       if (adev->asic_type == CHIP_TOPAZ)
+               snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
+       else
+               snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+
        err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
        if (err)
                goto out;
@@ -980,6 +1017,8 @@ static int gmc_v7_0_hw_init(void *handle)
        int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       gmc_v7_0_init_golden_registers(adev);
+
        gmc_v7_0_mc_program(adev);
 
        if (!(adev->flags & AMD_IS_APU)) {
index d390284408144c923d8571da0df70b3fab38879c..ba4ad00ba8b4cc38f146de95396d43b52d3609c7 100644 (file)
@@ -42,9 +42,7 @@
 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
 
-MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
-MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
 
 static const u32 golden_settings_tonga_a11[] =
 {
@@ -75,19 +73,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
        mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 };
 
-static const u32 golden_settings_iceland_a11[] =
-{
-       mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
-       mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
-       mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
-       mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
-};
-
-static const u32 iceland_mgcg_cgcg_init[] =
-{
-       mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
-};
-
 static const u32 cz_mgcg_cgcg_init[] =
 {
        mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
@@ -102,14 +87,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
 static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
 {
        switch (adev->asic_type) {
-       case CHIP_TOPAZ:
-               amdgpu_program_register_sequence(adev,
-                                                iceland_mgcg_cgcg_init,
-                                                (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
-               amdgpu_program_register_sequence(adev,
-                                                golden_settings_iceland_a11,
-                                                (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
-               break;
        case CHIP_FIJI:
                amdgpu_program_register_sequence(adev,
                                                 fiji_mgcg_cgcg_init,
@@ -229,15 +206,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
        DRM_DEBUG("\n");
 
        switch (adev->asic_type) {
-       case CHIP_TOPAZ:
-               chip_name = "topaz";
-               break;
        case CHIP_TONGA:
                chip_name = "tonga";
                break;
        case CHIP_FIJI:
-               chip_name = "fiji";
-               break;
        case CHIP_CARRIZO:
        case CHIP_STONEY:
                return 0;
@@ -1003,7 +975,7 @@ static int gmc_v8_0_hw_init(void *handle)
 
        gmc_v8_0_mc_program(adev);
 
-       if (!(adev->flags & AMD_IS_APU)) {
+       if (adev->asic_type == CHIP_TONGA) {
                r = gmc_v8_0_mc_load_microcode(adev);
                if (r) {
                        DRM_ERROR("Failed to load MC firmware!\n");
index 966d4b2ed9dad0527a2d0246b23731f1d802d9e2..090486c182497ba8de2aaa7840242be9cf4750aa 100644 (file)
@@ -432,7 +432,7 @@ static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
                case AMDGPU_UCODE_ID_CP_ME:
                        return UCODE_ID_CP_ME_MASK;
                case AMDGPU_UCODE_ID_CP_MEC1:
-                       return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK;
+                       return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
                case AMDGPU_UCODE_ID_CP_MEC2:
                        return UCODE_ID_CP_MEC_MASK;
                case AMDGPU_UCODE_ID_RLC_G:
@@ -522,12 +522,6 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
                return -EINVAL;
        }
 
-       if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
-               return -EINVAL;
-       }
-
        if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
                        &toc->entry[toc->num_entries++])) {
                DRM_ERROR("Failed to get firmware entry for SDMA0\n");
@@ -550,8 +544,8 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
                        UCODE_ID_CP_ME_MASK |
                        UCODE_ID_CP_PFP_MASK |
                        UCODE_ID_CP_MEC_MASK |
-                       UCODE_ID_CP_MEC_JT1_MASK |
-                       UCODE_ID_CP_MEC_JT2_MASK;
+                       UCODE_ID_CP_MEC_JT1_MASK;
+
 
        if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
                DRM_ERROR("Fail to request SMU load ucode\n");
index 204903897b4fc04235da64342a1a5e21262bb37f..63d6cb3c1110ef809acf640bcc8d27bd9964b812 100644 (file)
@@ -122,25 +122,12 @@ static int tonga_dpm_hw_fini(void *handle)
 
 static int tonga_dpm_suspend(void *handle)
 {
-       return 0;
+       return tonga_dpm_hw_fini(handle);
 }
 
 static int tonga_dpm_resume(void *handle)
 {
-       int ret;
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       mutex_lock(&adev->pm.mutex);
-
-       ret = tonga_smu_start(adev);
-       if (ret) {
-               DRM_ERROR("SMU start failed\n");
-               goto fail;
-       }
-
-fail:
-       mutex_unlock(&adev->pm.mutex);
-       return ret;
+       return tonga_dpm_hw_init(handle);
 }
 
 static int tonga_dpm_set_clockgating_state(void *handle,
index 2adc1c855e856c0745b5f1a0e4da13ca924ed0cc..3e9cbe398151f7a62d1177c69f16e61f09bc1a0f 100644 (file)
@@ -60,6 +60,7 @@
 #include "vi.h"
 #include "vi_dpm.h"
 #include "gmc_v8_0.h"
+#include "gmc_v7_0.h"
 #include "gfx_v8_0.h"
 #include "sdma_v2_4.h"
 #include "sdma_v3_0.h"
@@ -1081,10 +1082,10 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
        },
        {
                .type = AMD_IP_BLOCK_TYPE_GMC,
-               .major = 8,
-               .minor = 0,
+               .major = 7,
+               .minor = 4,
                .rev = 0,
-               .funcs = &gmc_v8_0_ip_funcs,
+               .funcs = &gmc_v7_0_ip_funcs,
        },
        {
                .type = AMD_IP_BLOCK_TYPE_IH,
index 541a610667add983ed234bb9e7807ed90ffb75ce..e0b4586a26fde79d7a9eb6dbc506c8cc11d85996 100644 (file)
@@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev)
        } while (ast_read32(ast, 0x10000) != 0x01);
        data = ast_read32(ast, 0x10004);
 
-       if (data & 0x400)
+       if (data & 0x40)
                ast->dram_bus_width = 16;
        else
                ast->dram_bus_width = 32;
index 809959d56d7826364b540204a16190aaf270f9c4..39d7e2e15c11e4603d8c889cc48bc507729b495d 100644 (file)
@@ -798,12 +798,33 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
        return mstb;
 }
 
+static void drm_dp_free_mst_port(struct kref *kref);
+
+static void drm_dp_free_mst_branch_device(struct kref *kref)
+{
+       struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
+       if (mstb->port_parent) {
+               if (list_empty(&mstb->port_parent->next))
+                       kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
+       }
+       kfree(mstb);
+}
+
 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
 {
        struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
        struct drm_dp_mst_port *port, *tmp;
        bool wake_tx = false;
 
+       /*
+        * init kref again to be used by ports to remove mst branch when it is
+        * not needed anymore
+        */
+       kref_init(kref);
+
+       if (mstb->port_parent && list_empty(&mstb->port_parent->next))
+               kref_get(&mstb->port_parent->kref);
+
        /*
         * destroy all ports - don't need lock
         * as there are no more references to the mst branch
@@ -830,7 +851,8 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
 
        if (wake_tx)
                wake_up(&mstb->mgr->tx_waitq);
-       kfree(mstb);
+
+       kref_put(kref, drm_dp_free_mst_branch_device);
 }
 
 static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
@@ -878,6 +900,7 @@ static void drm_dp_destroy_port(struct kref *kref)
                         * from an EDID retrieval */
 
                        mutex_lock(&mgr->destroy_connector_lock);
+                       kref_get(&port->parent->kref);
                        list_add(&port->next, &mgr->destroy_connector_list);
                        mutex_unlock(&mgr->destroy_connector_lock);
                        schedule_work(&mgr->destroy_connector_work);
@@ -973,17 +996,17 @@ static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u
 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
                                 u8 *rad)
 {
-       int lct = port->parent->lct;
+       int parent_lct = port->parent->lct;
        int shift = 4;
-       int idx = lct / 2;
-       if (lct > 1) {
-               memcpy(rad, port->parent->rad, idx);
-               shift = (lct % 2) ? 4 : 0;
+       int idx = (parent_lct - 1) / 2;
+       if (parent_lct > 1) {
+               memcpy(rad, port->parent->rad, idx + 1);
+               shift = (parent_lct % 2) ? 4 : 0;
        } else
                rad[0] = 0;
 
        rad[idx] |= port->port_num << shift;
-       return lct + 1;
+       return parent_lct + 1;
 }
 
 /*
@@ -1013,18 +1036,27 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
        return send_link;
 }
 
-static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
-                                  struct drm_dp_mst_port *port)
+static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
 {
        int ret;
-       if (port->dpcd_rev >= 0x12) {
-               port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
-               if (!port->guid_valid) {
-                       ret = drm_dp_send_dpcd_write(mstb->mgr,
-                                                    port,
-                                                    DP_GUID,
-                                                    16, port->guid);
-                       port->guid_valid = true;
+
+       memcpy(mstb->guid, guid, 16);
+
+       if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
+               if (mstb->port_parent) {
+                       ret = drm_dp_send_dpcd_write(
+                                       mstb->mgr,
+                                       mstb->port_parent,
+                                       DP_GUID,
+                                       16,
+                                       mstb->guid);
+               } else {
+
+                       ret = drm_dp_dpcd_write(
+                                       mstb->mgr->aux,
+                                       DP_GUID,
+                                       mstb->guid,
+                                       16);
                }
        }
 }
@@ -1039,7 +1071,7 @@ static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
        snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
        for (i = 0; i < (mstb->lct - 1); i++) {
                int shift = (i % 2) ? 0 : 4;
-               int port_num = mstb->rad[i / 2] >> shift;
+               int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
                snprintf(temp, sizeof(temp), "-%d", port_num);
                strlcat(proppath, temp, proppath_size);
        }
@@ -1081,7 +1113,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
        port->dpcd_rev = port_msg->dpcd_revision;
        port->num_sdp_streams = port_msg->num_sdp_streams;
        port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
-       memcpy(port->guid, port_msg->peer_guid, 16);
 
        /* manage mstb port lists with mgr lock - take a reference
           for this list */
@@ -1094,11 +1125,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
 
        if (old_ddps != port->ddps) {
                if (port->ddps) {
-                       drm_dp_check_port_guid(mstb, port);
                        if (!port->input)
                                drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
                } else {
-                       port->guid_valid = false;
                        port->available_pbn = 0;
                        }
        }
@@ -1157,10 +1186,8 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
 
        if (old_ddps != port->ddps) {
                if (port->ddps) {
-                       drm_dp_check_port_guid(mstb, port);
                        dowork = true;
                } else {
-                       port->guid_valid = false;
                        port->available_pbn = 0;
                }
        }
@@ -1190,7 +1217,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
 
        for (i = 0; i < lct - 1; i++) {
                int shift = (i % 2) ? 0 : 4;
-               int port_num = rad[i / 2] >> shift;
+               int port_num = (rad[i / 2] >> shift) & 0xf;
 
                list_for_each_entry(port, &mstb->ports, next) {
                        if (port->port_num == port_num) {
@@ -1210,6 +1237,48 @@ out:
        return mstb;
 }
 
+static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
+       struct drm_dp_mst_branch *mstb,
+       uint8_t *guid)
+{
+       struct drm_dp_mst_branch *found_mstb;
+       struct drm_dp_mst_port *port;
+
+       if (memcmp(mstb->guid, guid, 16) == 0)
+               return mstb;
+
+
+       list_for_each_entry(port, &mstb->ports, next) {
+               if (!port->mstb)
+                       continue;
+
+               found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
+
+               if (found_mstb)
+                       return found_mstb;
+       }
+
+       return NULL;
+}
+
+static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
+       struct drm_dp_mst_topology_mgr *mgr,
+       uint8_t *guid)
+{
+       struct drm_dp_mst_branch *mstb;
+
+       /* find the port by iterating down */
+       mutex_lock(&mgr->lock);
+
+       mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
+
+       if (mstb)
+               kref_get(&mstb->kref);
+
+       mutex_unlock(&mgr->lock);
+       return mstb;
+}
+
 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
                                               struct drm_dp_mst_branch *mstb)
 {
@@ -1320,6 +1389,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
                                  struct drm_dp_sideband_msg_tx *txmsg)
 {
        struct drm_dp_mst_branch *mstb = txmsg->dst;
+       u8 req_type;
 
        /* both msg slots are full */
        if (txmsg->seqno == -1) {
@@ -1336,7 +1406,13 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
                        txmsg->seqno = 1;
                mstb->tx_slots[txmsg->seqno] = txmsg;
        }
-       hdr->broadcast = 0;
+
+       req_type = txmsg->msg[0] & 0x7f;
+       if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
+               req_type == DP_RESOURCE_STATUS_NOTIFY)
+               hdr->broadcast = 1;
+       else
+               hdr->broadcast = 0;
        hdr->path_msg = txmsg->path_msg;
        hdr->lct = mstb->lct;
        hdr->lcr = mstb->lct - 1;
@@ -1438,26 +1514,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
 }
 
 /* called holding qlock */
-static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
+static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
+                                      struct drm_dp_sideband_msg_tx *txmsg)
 {
-       struct drm_dp_sideband_msg_tx *txmsg;
        int ret;
 
        /* construct a chunk from the first msg in the tx_msg queue */
-       if (list_empty(&mgr->tx_msg_upq)) {
-               mgr->tx_up_in_progress = false;
-               return;
-       }
-
-       txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
        ret = process_single_tx_qlock(mgr, txmsg, true);
-       if (ret == 1) {
-               /* up txmsgs aren't put in slots - so free after we send it */
-               list_del(&txmsg->next);
-               kfree(txmsg);
-       } else if (ret)
+
+       if (ret != 1)
                DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
-       mgr->tx_up_in_progress = true;
+
+       txmsg->dst->tx_slots[txmsg->seqno] = NULL;
 }
 
 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
@@ -1507,6 +1575,9 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
                                       txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
                                       txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
                        }
+
+                       drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
+
                        for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
                                drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
                        }
@@ -1554,6 +1625,37 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
        return 0;
 }
 
+static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
+{
+       if (!mstb->port_parent)
+               return NULL;
+
+       if (mstb->port_parent->mstb != mstb)
+               return mstb->port_parent;
+
+       return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
+}
+
+static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
+                                                                        struct drm_dp_mst_branch *mstb,
+                                                                        int *port_num)
+{
+       struct drm_dp_mst_branch *rmstb = NULL;
+       struct drm_dp_mst_port *found_port;
+       mutex_lock(&mgr->lock);
+       if (mgr->mst_primary) {
+               found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
+
+               if (found_port) {
+                       rmstb = found_port->parent;
+                       kref_get(&rmstb->kref);
+                       *port_num = found_port->port_num;
+               }
+       }
+       mutex_unlock(&mgr->lock);
+       return rmstb;
+}
+
 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
                                   struct drm_dp_mst_port *port,
                                   int id,
@@ -1561,11 +1663,16 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
 {
        struct drm_dp_sideband_msg_tx *txmsg;
        struct drm_dp_mst_branch *mstb;
-       int len, ret;
+       int len, ret, port_num;
 
+       port_num = port->port_num;
        mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
-       if (!mstb)
-               return -EINVAL;
+       if (!mstb) {
+               mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
+
+               if (!mstb)
+                       return -EINVAL;
+       }
 
        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
        if (!txmsg) {
@@ -1574,7 +1681,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
        }
 
        txmsg->dst = mstb;
-       len = build_allocate_payload(txmsg, port->port_num,
+       len = build_allocate_payload(txmsg, port_num,
                                     id,
                                     pbn);
 
@@ -1844,11 +1951,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
        drm_dp_encode_up_ack_reply(txmsg, req_type);
 
        mutex_lock(&mgr->qlock);
-       list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
-       if (!mgr->tx_up_in_progress) {
-               process_single_up_tx_qlock(mgr);
-       }
+
+       process_single_up_tx_qlock(mgr, txmsg);
+
        mutex_unlock(&mgr->qlock);
+
+       kfree(txmsg);
        return 0;
 }
 
@@ -1927,31 +2035,17 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
                mgr->mst_primary = mstb;
                kref_get(&mgr->mst_primary->kref);
 
-               {
-                       struct drm_dp_payload reset_pay;
-                       reset_pay.start_slot = 0;
-                       reset_pay.num_slots = 0x3f;
-                       drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
-               }
-
                ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
-                                        DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+                                                        DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
                if (ret < 0) {
                        goto out_unlock;
                }
 
-
-               /* sort out guid */
-               ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
-               if (ret != 16) {
-                       DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
-                       goto out_unlock;
-               }
-
-               mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
-               if (!mgr->guid_valid) {
-                       ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
-                       mgr->guid_valid = true;
+               {
+                       struct drm_dp_payload reset_pay;
+                       reset_pay.start_slot = 0;
+                       reset_pay.num_slots = 0x3f;
+                       drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
                }
 
                queue_work(system_long_wq, &mgr->work);
@@ -2145,28 +2239,51 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
 
        if (mgr->up_req_recv.have_eomt) {
                struct drm_dp_sideband_msg_req_body msg;
-               struct drm_dp_mst_branch *mstb;
+               struct drm_dp_mst_branch *mstb = NULL;
                bool seqno;
-               mstb = drm_dp_get_mst_branch_device(mgr,
-                                                   mgr->up_req_recv.initial_hdr.lct,
-                                                   mgr->up_req_recv.initial_hdr.rad);
-               if (!mstb) {
-                       DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
-                       memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
-                       return 0;
+
+               if (!mgr->up_req_recv.initial_hdr.broadcast) {
+                       mstb = drm_dp_get_mst_branch_device(mgr,
+                                                           mgr->up_req_recv.initial_hdr.lct,
+                                                           mgr->up_req_recv.initial_hdr.rad);
+                       if (!mstb) {
+                               DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+                               memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+                               return 0;
+                       }
                }
 
                seqno = mgr->up_req_recv.initial_hdr.seqno;
                drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
 
                if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
-                       drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
+                       drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
+
+                       if (!mstb)
+                               mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
+
+                       if (!mstb) {
+                               DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+                               memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+                               return 0;
+                       }
+
                        drm_dp_update_port(mstb, &msg.u.conn_stat);
+
                        DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
                        (*mgr->cbs->hotplug)(mgr);
 
                } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
-                       drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
+                       drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
+                       if (!mstb)
+                               mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
+
+                       if (!mstb) {
+                               DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+                               memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+                               return 0;
+                       }
+
                        DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
                }
 
@@ -2346,6 +2463,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
                DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
                if (pbn == port->vcpi.pbn) {
                        *slots = port->vcpi.num_slots;
+                       drm_dp_put_port(port);
                        return true;
                }
        }
@@ -2505,32 +2623,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
  */
 int drm_dp_calc_pbn_mode(int clock, int bpp)
 {
-       fixed20_12 pix_bw;
-       fixed20_12 fbpp;
-       fixed20_12 result;
-       fixed20_12 margin, tmp;
-       u32 res;
-
-       pix_bw.full = dfixed_const(clock);
-       fbpp.full = dfixed_const(bpp);
-       tmp.full = dfixed_const(8);
-       fbpp.full = dfixed_div(fbpp, tmp);
-
-       result.full = dfixed_mul(pix_bw, fbpp);
-       margin.full = dfixed_const(54);
-       tmp.full = dfixed_const(64);
-       margin.full = dfixed_div(margin, tmp);
-       result.full = dfixed_div(result, margin);
-
-       margin.full = dfixed_const(1006);
-       tmp.full = dfixed_const(1000);
-       margin.full = dfixed_div(margin, tmp);
-       result.full = dfixed_mul(result, margin);
-
-       result.full = dfixed_div(result, tmp);
-       result.full = dfixed_ceil(result);
-       res = dfixed_trunc(result);
-       return res;
+       u64 kbps;
+       s64 peak_kbps;
+       u32 numerator;
+       u32 denominator;
+
+       kbps = clock * bpp;
+
+       /*
+        * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
+        * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
+        * common multiplier to render an integer PBN for all link rate/lane
+        * counts combinations
+        * calculate
+        * peak_kbps *= (1006/1000)
+        * peak_kbps *= (64/54)
+        * peak_kbps *= 8    convert to bytes
+        */
+
+       numerator = 64 * 1006;
+       denominator = 54 * 8 * 1000 * 1000;
+
+       kbps *= numerator;
+       peak_kbps = drm_fixp_from_fraction(kbps, denominator);
+
+       return drm_fixp2int_ceil(peak_kbps);
 }
 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
 
@@ -2538,11 +2655,23 @@ static int test_calc_pbn_mode(void)
 {
        int ret;
        ret = drm_dp_calc_pbn_mode(154000, 30);
-       if (ret != 689)
+       if (ret != 689) {
+               DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+                               154000, 30, 689, ret);
                return -EINVAL;
+       }
        ret = drm_dp_calc_pbn_mode(234000, 30);
-       if (ret != 1047)
+       if (ret != 1047) {
+               DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+                               234000, 30, 1047, ret);
+               return -EINVAL;
+       }
+       ret = drm_dp_calc_pbn_mode(297000, 24);
+       if (ret != 1063) {
+               DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+                               297000, 24, 1063, ret);
                return -EINVAL;
+       }
        return 0;
 }
 
@@ -2683,6 +2812,13 @@ static void drm_dp_tx_work(struct work_struct *work)
        mutex_unlock(&mgr->qlock);
 }
 
+static void drm_dp_free_mst_port(struct kref *kref)
+{
+       struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
+       kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
+       kfree(port);
+}
+
 static void drm_dp_destroy_connector_work(struct work_struct *work)
 {
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
@@ -2703,13 +2839,22 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
                list_del(&port->next);
                mutex_unlock(&mgr->destroy_connector_lock);
 
+               kref_init(&port->kref);
+               INIT_LIST_HEAD(&port->next);
+
                mgr->cbs->destroy_connector(mgr, port->connector);
 
                drm_dp_port_teardown_pdt(port, port->pdt);
 
-               if (!port->input && port->vcpi.vcpi > 0)
-                       drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
-               kfree(port);
+               if (!port->input && port->vcpi.vcpi > 0) {
+                       if (mgr->mst_state) {
+                               drm_dp_mst_reset_vcpi_slots(mgr, port);
+                               drm_dp_update_payload_part1(mgr);
+                               drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+                       }
+               }
+
+               kref_put(&port->kref, drm_dp_free_mst_port);
                send_hotplug = true;
        }
        if (send_hotplug)
@@ -2736,7 +2881,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
        mutex_init(&mgr->qlock);
        mutex_init(&mgr->payload_lock);
        mutex_init(&mgr->destroy_connector_lock);
-       INIT_LIST_HEAD(&mgr->tx_msg_upq);
        INIT_LIST_HEAD(&mgr->tx_msg_downq);
        INIT_LIST_HEAD(&mgr->destroy_connector_list);
        INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
index 607f493ae80132890ad97422f58f9cdf95eefc75..8090989185b2bb2d06b4522a52b4a331f17b1ab1 100644 (file)
@@ -221,6 +221,64 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
                diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
        }
 
+       /*
+        * Within a drm_vblank_pre_modeset - drm_vblank_post_modeset
+        * interval? If so then vblank irqs keep running and it will likely
+        * happen that the hardware vblank counter is not trustworthy as it
+        * might reset at some point in that interval and vblank timestamps
+        * are not trustworthy either in that interval. Iow. this can result
+        * in a bogus diff >> 1 which must be avoided as it would cause
+        * random large forward jumps of the software vblank counter.
+        */
+       if (diff > 1 && (vblank->inmodeset & 0x2)) {
+               DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u"
+                             " due to pre-modeset.\n", pipe, diff);
+               diff = 1;
+       }
+
+       /*
+        * FIMXE: Need to replace this hack with proper seqlocks.
+        *
+        * Restrict the bump of the software vblank counter to a safe maximum
+        * value of +1 whenever there is the possibility that concurrent readers
+        * of vblank timestamps could be active at the moment, as the current
+        * implementation of the timestamp caching and updating is not safe
+        * against concurrent readers for calls to store_vblank() with a bump
+        * of anything but +1. A bump != 1 would very likely return corrupted
+        * timestamps to userspace, because the same slot in the cache could
+        * be concurrently written by store_vblank() and read by one of those
+        * readers without the read-retry logic detecting the collision.
+        *
+        * Concurrent readers can exist when we are called from the
+        * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
+        * irq callers. However, all those calls to us are happening with the
+        * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
+        * can't increase while we are executing. Therefore a zero refcount at
+        * this point is safe for arbitrary counter bumps if we are called
+        * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
+        * we must also accept a refcount of 1, as whenever we are called from
+        * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
+        * we must let that one pass through in order to not lose vblank counts
+        * during vblank irq off - which would completely defeat the whole
+        * point of this routine.
+        *
+        * Whenever we are called from vblank irq, we have to assume concurrent
+        * readers exist or can show up any time during our execution, even if
+        * the refcount is currently zero, as vblank irqs are usually only
+        * enabled due to the presence of readers, and because when we are called
+        * from vblank irq we can't hold the vbl_lock to protect us from sudden
+        * bumps in vblank refcount. Therefore also restrict bumps to +1 when
+        * called from vblank irq.
+        */
+       if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
+           (flags & DRM_CALLED_FROM_VBLIRQ))) {
+               DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
+                             "refcount %u, vblirq %u\n", pipe, diff,
+                             atomic_read(&vblank->refcount),
+                             (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
+               diff = 1;
+       }
+
        DRM_DEBUG_VBL("updating vblank count on crtc %u:"
                      " current=%u, diff=%u, hw=%u hw_last=%u\n",
                      pipe, vblank->count, diff, cur_vblank, vblank->last);
@@ -1313,7 +1371,13 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
        spin_lock_irqsave(&dev->event_lock, irqflags);
 
        spin_lock(&dev->vbl_lock);
-       vblank_disable_and_save(dev, pipe);
+       DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
+                     pipe, vblank->enabled, vblank->inmodeset);
+
+       /* Avoid redundant vblank disables without previous drm_vblank_on(). */
+       if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
+               vblank_disable_and_save(dev, pipe);
+
        wake_up(&vblank->queue);
 
        /*
@@ -1415,6 +1479,9 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
                return;
 
        spin_lock_irqsave(&dev->vbl_lock, irqflags);
+       DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
+                     pipe, vblank->enabled, vblank->inmodeset);
+
        /* Drop our private "prevent drm_vblank_get" refcount */
        if (vblank->inmodeset) {
                atomic_dec(&vblank->refcount);
@@ -1427,8 +1494,7 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
         * re-enable interrupts if there are users left, or the
         * user wishes vblank interrupts to be enabled all the time.
         */
-       if (atomic_read(&vblank->refcount) != 0 ||
-           (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
+       if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
                WARN_ON(drm_vblank_enable(dev, pipe));
        spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 }
@@ -1523,6 +1589,7 @@ void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
        if (vblank->inmodeset) {
                spin_lock_irqsave(&dev->vbl_lock, irqflags);
                dev->vblank_disable_allowed = true;
+               drm_reset_vblank_timestamp(dev, pipe);
                spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 
                if (vblank->inmodeset & 0x2)
index c707fa6fca85bac195b824c7dcdb885074161f9a..e3bdc8b1c32c0786aacea4261351ef9fc5e92ef1 100644 (file)
@@ -130,7 +130,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
                return ret;
        }
        /* We have the initial and handle reference but need only one now */
-       drm_gem_object_unreference(&r->gem);
+       drm_gem_object_unreference_unlocked(&r->gem);
        *handlep = handle;
        return 0;
 }
index b4741d121a744f67b95f1d4af95fd0518e1ddf4f..61fcb3b2229776fb9d070760d8590de45834fe31 100644 (file)
@@ -402,6 +402,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
        if (ret)
                goto cleanup_gem_stolen;
 
+       intel_setup_gmbus(dev);
+
        /* Important: The output setup functions called by modeset_init need
         * working irqs for e.g. gmbus and dp aux transfers. */
        intel_modeset_init(dev);
@@ -451,6 +453,7 @@ cleanup_gem:
 cleanup_irq:
        intel_guc_ucode_fini(dev);
        drm_irq_uninstall(dev);
+       intel_teardown_gmbus(dev);
 cleanup_gem_stolen:
        i915_gem_cleanup_stolen(dev);
 cleanup_vga_switcheroo:
@@ -1028,7 +1031,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        /* Try to make sure MCHBAR is enabled before poking at it */
        intel_setup_mchbar(dev);
-       intel_setup_gmbus(dev);
        intel_opregion_setup(dev);
 
        i915_gem_load(dev);
@@ -1099,7 +1101,6 @@ out_gem_unload:
        if (dev->pdev->msi_enabled)
                pci_disable_msi(dev->pdev);
 
-       intel_teardown_gmbus(dev);
        intel_teardown_mchbar(dev);
        pm_qos_remove_request(&dev_priv->pm_qos);
        destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
@@ -1198,7 +1199,6 @@ int i915_driver_unload(struct drm_device *dev)
 
        intel_csr_ucode_fini(dev);
 
-       intel_teardown_gmbus(dev);
        intel_teardown_mchbar(dev);
 
        destroy_workqueue(dev_priv->hotplug.dp_wq);
index 760e0ce4aa26941c1ebd8d96706ccdbe3b7d7154..a6ad938f44a68f736e30921ad04094a73cab0668 100644 (file)
@@ -531,7 +531,10 @@ void intel_detect_pch(struct drm_device *dev)
                                dev_priv->pch_type = PCH_SPT;
                                DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
                                WARN_ON(!IS_SKYLAKE(dev));
-                       } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) {
+                       } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
+                                  ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
+                                   pch->subsystem_vendor == 0x1af4 &&
+                                   pch->subsystem_device == 0x1100)) {
                                dev_priv->pch_type = intel_virt_detect_pch(dev);
                        } else
                                continue;
index f4af19a0d5696324c307617adfb0c86a3fdfed38..d3ce4da6a6ade3740405b5e98968707a958a07ae 100644 (file)
@@ -2614,6 +2614,7 @@ struct drm_i915_cmd_table {
 #define INTEL_PCH_SPT_DEVICE_ID_TYPE           0xA100
 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE                0x9D00
 #define INTEL_PCH_P2X_DEVICE_ID_TYPE           0x7100
+#define INTEL_PCH_QEMU_DEVICE_ID_TYPE          0x2900 /* qemu q35 has 2918 */
 
 #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
 #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
index 02ceb7a4b4815e52a825a290aa8e6d2cd78d25b5..0433d25f9d23a6b8cf4c6a4650b81ae7024c0535 100644 (file)
@@ -340,6 +340,10 @@ void i915_gem_context_reset(struct drm_device *dev)
                        i915_gem_context_unreference(lctx);
                        ring->last_context = NULL;
                }
+
+               /* Force the GPU state to be reinitialised on enabling */
+               if (ring->default_context)
+                       ring->default_context->legacy_hw_ctx.initialized = false;
        }
 }
 
@@ -708,7 +712,7 @@ static int do_switch(struct drm_i915_gem_request *req)
        if (ret)
                goto unpin_out;
 
-       if (!to->legacy_hw_ctx.initialized) {
+       if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
                hw_flags |= MI_RESTORE_INHIBIT;
                /* NB: If we inhibit the restore, the context is not allowed to
                 * die because future work may end up depending on valid address
index 0d228f909dcb5fa710dc084f69f0e4cb7bf31bed..0f42a2782afc31aabc2ed6d333fba457caa69705 100644 (file)
@@ -2354,9 +2354,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
                                spt_irq_handler(dev, pch_iir);
                        else
                                cpt_irq_handler(dev, pch_iir);
-               } else
-                       DRM_ERROR("The master control interrupt lied (SDE)!\n");
-
+               } else {
+                       /*
+                        * Like on previous PCH there seems to be something
+                        * fishy going on with forwarding PCH interrupts.
+                        */
+                       DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
+               }
        }
 
        I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
index a6752a61d99f268217e7054a8531036d109be7ee..7e6158b889da9f51b9cc0af75aad89c8c2ebcf3f 100644 (file)
@@ -1582,7 +1582,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
                         DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
                         DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
                         wrpll_params.central_freq;
-       } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+       } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+                  intel_encoder->type == INTEL_OUTPUT_DP_MST) {
                switch (crtc_state->port_clock / 2) {
                case 81000:
                        ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
index 32cf973469785cb916eb3c2bdd125a741efb5e4c..f859a5b87ed46599c87c2050271c31d9a8eeea07 100644 (file)
@@ -11930,11 +11930,21 @@ connected_sink_compute_bpp(struct intel_connector *connector,
                pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
        }
 
-       /* Clamp bpp to 8 on screens without EDID 1.4 */
-       if (connector->base.display_info.bpc == 0 && bpp > 24) {
-               DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
-                             bpp);
-               pipe_config->pipe_bpp = 24;
+       /* Clamp bpp to default limit on screens without EDID 1.4 */
+       if (connector->base.display_info.bpc == 0) {
+               int type = connector->base.connector_type;
+               int clamp_bpp = 24;
+
+               /* Fall back to 18 bpp when DP sink capability is unknown. */
+               if (type == DRM_MODE_CONNECTOR_DisplayPort ||
+                   type == DRM_MODE_CONNECTOR_eDP)
+                       clamp_bpp = 18;
+
+               if (bpp > clamp_bpp) {
+                       DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
+                                     bpp, clamp_bpp);
+                       pipe_config->pipe_bpp = clamp_bpp;
+               }
        }
 }
 
@@ -13537,11 +13547,12 @@ intel_check_primary_plane(struct drm_plane *plane,
        int max_scale = DRM_PLANE_HELPER_NO_SCALING;
        bool can_position = false;
 
-       /* use scaler when colorkey is not required */
-       if (INTEL_INFO(plane->dev)->gen >= 9 &&
-           state->ckey.flags == I915_SET_COLORKEY_NONE) {
-               min_scale = 1;
-               max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
+       if (INTEL_INFO(plane->dev)->gen >= 9) {
+               /* use scaler when colorkey is not required */
+               if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
+                       min_scale = 1;
+                       max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
+               }
                can_position = true;
        }
 
@@ -15565,6 +15576,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
        mutex_lock(&dev->struct_mutex);
        intel_cleanup_gt_powersave(dev);
        mutex_unlock(&dev->struct_mutex);
+
+       intel_teardown_gmbus(dev);
 }
 
 /*
index a5e99ac305daab3ef69471d37b0ec9a97a27425c..a8912aecc31f0de663425fd6c3e868eeac5746f2 100644 (file)
@@ -207,7 +207,12 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
        gpio = *data++;
 
        /* pull up/down */
-       action = *data++;
+       action = *data++ & 1;
+
+       if (gpio >= ARRAY_SIZE(gtable)) {
+               DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
+               goto out;
+       }
 
        function = gtable[gpio].function_reg;
        pad = gtable[gpio].pad_reg;
@@ -226,6 +231,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
        vlv_gpio_nc_write(dev_priv, pad, val);
        mutex_unlock(&dev_priv->sb_lock);
 
+out:
        return data;
 }
 
index b17785719598c9ca867340dc77aaed858f0c72db..d7a6437d9da234e5a573fb734643417a19b2ba8a 100644 (file)
@@ -468,9 +468,14 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
        list_for_each_entry(connector, &mode_config->connector_list, head) {
                struct intel_connector *intel_connector = to_intel_connector(connector);
                connector->polled = intel_connector->polled;
-               if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
-                       connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+               /* MST has a dynamic intel_connector->encoder and it's reprobing
+                * is all handled by the MST helpers. */
                if (intel_connector->mst_port)
+                       continue;
+
+               if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
+                   intel_connector->encoder->hpd_pin > HPD_NONE)
                        connector->polled = DRM_CONNECTOR_POLL_HPD;
        }
 
index 8324654037b65577fd4bdc65484576e9ceaf432a..f3bee54c414f926bde876fa5df12319f7dc9907b 100644 (file)
@@ -675,7 +675,7 @@ int intel_setup_gmbus(struct drm_device *dev)
        return 0;
 
 err:
-       while (--pin) {
+       while (pin--) {
                if (!intel_gmbus_is_valid_pin(dev_priv, pin))
                        continue;
 
index 88e12bdf79e23388a912fc53939658c9971a406f..d69547a65dbb95599c400e864b95c779863ef995 100644 (file)
@@ -1706,6 +1706,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
        if (flush_domains) {
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+               flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
                flags |= PIPE_CONTROL_FLUSH_ENABLE;
        }
 
index 9461a238f5d5a2040117ce80f87ea01864bd8389..f6b2a814e629117a14067b0cbe58dddffcff4b91 100644 (file)
@@ -347,6 +347,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
        if (flush_domains) {
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+               flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
                flags |= PIPE_CONTROL_FLUSH_ENABLE;
        }
        if (invalidate_domains) {
@@ -419,6 +420,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
        if (flush_domains) {
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+               flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
                flags |= PIPE_CONTROL_FLUSH_ENABLE;
        }
        if (invalidate_domains) {
index 2e7cbe933533c97b9a211827d5005d2adc282496..2a5ed746035458dbd4f7db04490222acc81b950f 100644 (file)
@@ -969,10 +969,13 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
 
                NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
 
+               mutex_lock(&drm->dev->mode_config.mutex);
                if (plugged)
                        drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
                else
                        drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+               mutex_unlock(&drm->dev->mode_config.mutex);
+
                drm_helper_hpd_irq_event(connector->dev);
        }
 
index 64c8d932d5f19de27aa6f66c4e5371f5484cdd7c..58a3f7cf2fb3278c7b52d89182c57d1d771cab28 100644 (file)
@@ -634,10 +634,6 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
                nv_crtc->lut.depth = 0;
        }
 
-       /* Make sure that drm and hw vblank irqs get resumed if needed. */
-       for (head = 0; head < dev->mode_config.num_crtc; head++)
-               drm_vblank_on(dev, head);
-
        /* This should ensure we don't hit a locking problem when someone
         * wakes us up via a connector.  We should never go into suspend
         * while the display is on anyways.
@@ -647,6 +643,10 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
 
        drm_helper_resume_force_mode(dev);
 
+       /* Make sure that drm and hw vblank irqs get resumed if needed. */
+       for (head = 0; head < dev->mode_config.num_crtc; head++)
+               drm_vblank_on(dev, head);
+
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 
index 60e32c4e4e49606ef99eeaa4f16186fd4cf1605e..35ecc0d0458f07c0ebb04b08f5ed94ef11f4b19a 100644 (file)
@@ -24,7 +24,7 @@
 static int nouveau_platform_probe(struct platform_device *pdev)
 {
        const struct nvkm_device_tegra_func *func;
-       struct nvkm_device *device;
+       struct nvkm_device *device = NULL;
        struct drm_device *drm;
        int ret;
 
index 7f8a42721eb20ccafe19fcaefe3d972f432a6c7f..e7e581d6a8ff24d256b6526d2b93c41b7a00daa3 100644 (file)
@@ -252,32 +252,40 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
 
        if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
                return -ENOMEM;
-       *pdevice = &tdev->device;
+
        tdev->func = func;
        tdev->pdev = pdev;
        tdev->irq = -1;
 
        tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
-       if (IS_ERR(tdev->vdd))
-               return PTR_ERR(tdev->vdd);
+       if (IS_ERR(tdev->vdd)) {
+               ret = PTR_ERR(tdev->vdd);
+               goto free;
+       }
 
        tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
-       if (IS_ERR(tdev->rst))
-               return PTR_ERR(tdev->rst);
+       if (IS_ERR(tdev->rst)) {
+               ret = PTR_ERR(tdev->rst);
+               goto free;
+       }
 
        tdev->clk = devm_clk_get(&pdev->dev, "gpu");
-       if (IS_ERR(tdev->clk))
-               return PTR_ERR(tdev->clk);
+       if (IS_ERR(tdev->clk)) {
+               ret = PTR_ERR(tdev->clk);
+               goto free;
+       }
 
        tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
-       if (IS_ERR(tdev->clk_pwr))
-               return PTR_ERR(tdev->clk_pwr);
+       if (IS_ERR(tdev->clk_pwr)) {
+               ret = PTR_ERR(tdev->clk_pwr);
+               goto free;
+       }
 
        nvkm_device_tegra_probe_iommu(tdev);
 
        ret = nvkm_device_tegra_power_up(tdev);
        if (ret)
-               return ret;
+               goto remove;
 
        tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
        ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
@@ -285,9 +293,19 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
                               cfg, dbg, detect, mmio, subdev_mask,
                               &tdev->device);
        if (ret)
-               return ret;
+               goto powerdown;
+
+       *pdevice = &tdev->device;
 
        return 0;
+
+powerdown:
+       nvkm_device_tegra_power_down(tdev);
+remove:
+       nvkm_device_tegra_remove_iommu(tdev);
+free:
+       kfree(tdev);
+       return ret;
 }
 #else
 int
index 74e2f7c6c07e4a583d9e3f6d62016b92c5694d28..9688970eca47dbe4ccea5b2e700b0cfdb40b02e0 100644 (file)
@@ -328,6 +328,7 @@ nvkm_dp_train(struct work_struct *w)
                .outp = outp,
        }, *dp = &_dp;
        u32 datarate = 0;
+       u8  pwr;
        int ret;
 
        if (!outp->base.info.location && disp->func->sor.magic)
@@ -355,6 +356,15 @@ nvkm_dp_train(struct work_struct *w)
        /* disable link interrupt handling during link training */
        nvkm_notify_put(&outp->irq);
 
+       /* ensure sink is not in a low-power state */
+       if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) {
+               if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
+                       pwr &= ~DPCD_SC00_SET_POWER;
+                       pwr |=  DPCD_SC00_SET_POWER_D0;
+                       nvkm_wraux(outp->aux, DPCD_SC00, &pwr, 1);
+               }
+       }
+
        /* enable down-spreading and execute pre-train script from vbios */
        dp_link_train_init(dp, outp->dpcd[3] & 0x01);
 
index 9596290329c70e0cbe34259fbf5db39e12a9cbcb..6e10c5e0ef1162232253b29a248ad2c15e801f21 100644 (file)
 #define DPCD_LS0C_LANE1_POST_CURSOR2                                       0x0c
 #define DPCD_LS0C_LANE0_POST_CURSOR2                                       0x03
 
+/* DPCD Sink Control */
+#define DPCD_SC00                                                       0x00600
+#define DPCD_SC00_SET_POWER                                                0x03
+#define DPCD_SC00_SET_POWER_D0                                             0x01
+#define DPCD_SC00_SET_POWER_D3                                             0x03
+
 void nvkm_dp_train(struct work_struct *);
 #endif
index 2ae8577497ca6e1c9cc87b265ff7ca92ee5375ec..7c2e78201ead9d475c3ba97d083c523026c21a47 100644 (file)
@@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
                       cmd->command_size))
                return -EFAULT;
 
-       reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
+       reloc_info = kmalloc_array(cmd->relocs_num,
+                                  sizeof(struct qxl_reloc_info), GFP_KERNEL);
        if (!reloc_info)
                return -ENOMEM;
 
index 752072771388419c6b932f77aecc84db64d89e1b..367a916f364e94617a1b15fcd7d78cd3f90c8dc6 100644 (file)
@@ -301,6 +301,14 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
         * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
         */
        if (ASIC_IS_DCE8(rdev)) {
+               unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) &
+                       DENTIST_DPREFCLK_WDIVIDER_MASK) >>
+                       DENTIST_DPREFCLK_WDIVIDER_SHIFT;
+               div = radeon_audio_decode_dfs_div(div);
+
+               if (div)
+                       clock = clock * 100 / div;
+
                WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
                WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
        } else {
index 9953356fe2637cfacdc2ba41e8ecd082d65213ff..3cf04a2f44bbb05169264a504349dec3d949e5bb 100644 (file)
@@ -289,6 +289,16 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
         * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
         * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
         */
+       if (ASIC_IS_DCE41(rdev)) {
+               unsigned int div = (RREG32(DCE41_DENTIST_DISPCLK_CNTL) &
+                       DENTIST_DPREFCLK_WDIVIDER_MASK) >>
+                       DENTIST_DPREFCLK_WDIVIDER_SHIFT;
+               div = radeon_audio_decode_dfs_div(div);
+
+               if (div)
+                       clock = 100 * clock / div;
+       }
+
        WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
        WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
 }
index 4aa5f755572b1593a8b6f7876cf5f7aed183715d..13b6029d65cc524528aec6a1f1727b9d695aa4af 100644 (file)
 #define DCCG_AUDIO_DTO1_CNTL              0x05cc
 #       define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3)
 
+#define DCE41_DENTIST_DISPCLK_CNTL                     0x049c
+#       define DENTIST_DPREFCLK_WDIVIDER(x)            (((x) & 0x7f) << 24)
+#       define DENTIST_DPREFCLK_WDIVIDER_MASK          (0x7f << 24)
+#       define DENTIST_DPREFCLK_WDIVIDER_SHIFT         24
+
 /* DCE 4.0 AFMT */
 #define HDMI_CONTROL                         0x7030
 #       define HDMI_KEEPOUT_MODE             (1 << 0)
index 87db64983ea8c18a0305eeedf1e25f18cffc87ae..5580568088bb2ff2d78f85c78a4b4f45fbd8243a 100644 (file)
@@ -268,6 +268,7 @@ struct radeon_clock {
        uint32_t current_dispclk;
        uint32_t dp_extclk;
        uint32_t max_pixel_clock;
+       uint32_t vco_freq;
 };
 
 /*
index 8f285244c839a8c85ad186a1b877f94749776a58..de9a2ffcf5f762539d6d4c92464ed5eff19e1c42 100644 (file)
@@ -437,7 +437,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
        }
 
        /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
-       if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
+       if (((dev->pdev->device == 0x9802) ||
+            (dev->pdev->device == 0x9805) ||
+            (dev->pdev->device == 0x9806)) &&
            (dev->pdev->subsystem_vendor == 0x1734) &&
            (dev->pdev->subsystem_device == 0x11bd)) {
                if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
@@ -448,14 +450,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
                }
        }
 
-       /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
-       if ((dev->pdev->device == 0x9805) &&
-           (dev->pdev->subsystem_vendor == 0x1734) &&
-           (dev->pdev->subsystem_device == 0x11bd)) {
-               if (*connector_type == DRM_MODE_CONNECTOR_VGA)
-                       return false;
-       }
-
        return true;
 }
 
@@ -1112,6 +1106,31 @@ union firmware_info {
        ATOM_FIRMWARE_INFO_V2_2 info_22;
 };
 
+union igp_info {
+       struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+       struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+       struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+       struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+       struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
+};
+
+static void radeon_atombios_get_dentist_vco_freq(struct radeon_device *rdev)
+{
+       struct radeon_mode_info *mode_info = &rdev->mode_info;
+       int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+       union igp_info *igp_info;
+       u8 frev, crev;
+       u16 data_offset;
+
+       if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+                       &frev, &crev, &data_offset)) {
+               igp_info = (union igp_info *)(mode_info->atom_context->bios +
+                       data_offset);
+               rdev->clock.vco_freq =
+                       le32_to_cpu(igp_info->info_6.ulDentistVCOFreq);
+       }
+}
+
 bool radeon_atom_get_clock_info(struct drm_device *dev)
 {
        struct radeon_device *rdev = dev->dev_private;
@@ -1263,20 +1282,25 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
                rdev->mode_info.firmware_flags =
                        le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
 
+               if (ASIC_IS_DCE8(rdev))
+                       rdev->clock.vco_freq =
+                               le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq);
+               else if (ASIC_IS_DCE5(rdev))
+                       rdev->clock.vco_freq = rdev->clock.current_dispclk;
+               else if (ASIC_IS_DCE41(rdev))
+                       radeon_atombios_get_dentist_vco_freq(rdev);
+               else
+                       rdev->clock.vco_freq = rdev->clock.current_dispclk;
+
+               if (rdev->clock.vco_freq == 0)
+                       rdev->clock.vco_freq = 360000;  /* 3.6 GHz */
+
                return true;
        }
 
        return false;
 }
 
-union igp_info {
-       struct _ATOM_INTEGRATED_SYSTEM_INFO info;
-       struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
-       struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
-       struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
-       struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
-};
-
 bool radeon_atombios_sideport_present(struct radeon_device *rdev)
 {
        struct radeon_mode_info *mode_info = &rdev->mode_info;
index 2c02e99b5f95a65a26357f26d67e91a45f8224a4..b214663b370da00905faa882ea27513153aa91bd 100644 (file)
@@ -739,9 +739,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
        struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-       struct radeon_connector_atom_dig *dig_connector =
-               radeon_connector->con_priv;
 
        if (!dig || !dig->afmt)
                return;
@@ -753,10 +750,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
                radeon_audio_write_speaker_allocation(encoder);
                radeon_audio_write_sad_regs(encoder);
                radeon_audio_write_latency_fields(encoder, mode);
-               if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
-                       radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
-               else
-                       radeon_audio_set_dto(encoder, dig_connector->dp_clock);
+               radeon_audio_set_dto(encoder, rdev->clock.vco_freq * 10);
                radeon_audio_set_audio_packet(encoder);
                radeon_audio_select_pin(encoder);
 
@@ -781,3 +775,15 @@ void radeon_audio_dpms(struct drm_encoder *encoder, int mode)
        if (radeon_encoder->audio && radeon_encoder->audio->dpms)
                radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON);
 }
+
+unsigned int radeon_audio_decode_dfs_div(unsigned int div)
+{
+       if (div >= 8 && div < 64)
+               return (div - 8) * 25 + 200;
+       else if (div >= 64 && div < 96)
+               return (div - 64) * 50 + 1600;
+       else if (div >= 96 && div < 128)
+               return (div - 96) * 100 + 3200;
+       else
+               return 0;
+}
index 059cc3012062a7b50708620c557771dcfea821c0..5c70cceaa4a6c35e673ff9c50306e36e7f93ff3d 100644 (file)
@@ -79,5 +79,6 @@ void radeon_audio_fini(struct radeon_device *rdev);
 void radeon_audio_mode_set(struct drm_encoder *encoder,
        struct drm_display_mode *mode);
 void radeon_audio_dpms(struct drm_encoder *encoder, int mode);
+unsigned int radeon_audio_decode_dfs_div(unsigned int div);
 
 #endif
index c566993a2ec3bcbe393ff14d079b0134f0426c7d..d690df545b4d5b81de4be552725e497254bd5541 100644 (file)
@@ -1744,6 +1744,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
        }
 
        drm_kms_helper_poll_enable(dev);
+       drm_helper_hpd_irq_event(dev);
 
        /* set the power state here in case we are a PX system or headless */
        if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
index 1eca0acac016f314cd6960b9dda3067bcc04e83c..13767d21835f61de34b5dba824a308b36525b59a 100644 (file)
@@ -403,7 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
        struct drm_crtc *crtc = &radeon_crtc->base;
        unsigned long flags;
        int r;
-       int vpos, hpos, stat, min_udelay;
+       int vpos, hpos, stat, min_udelay = 0;
+       unsigned repcnt = 4;
        struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
 
         down_read(&rdev->exclusive_lock);
@@ -454,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
         * In practice this won't execute very often unless on very fast
         * machines because the time window for this to happen is very small.
         */
-       for (;;) {
+       while (radeon_crtc->enabled && repcnt--) {
                /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
                 * start in hpos, and to the "fudged earlier" vblank start in
                 * vpos.
@@ -472,10 +473,22 @@ static void radeon_flip_work_func(struct work_struct *__work)
                /* Sleep at least until estimated real start of hw vblank */
                spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
                min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+               if (min_udelay > vblank->framedur_ns / 2000) {
+                       /* Don't wait ridiculously long - something is wrong */
+                       repcnt = 0;
+                       break;
+               }
                usleep_range(min_udelay, 2 * min_udelay);
                spin_lock_irqsave(&crtc->dev->event_lock, flags);
        };
 
+       if (!repcnt)
+               DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
+                                "framedur %d, linedur %d, stat %d, vpos %d, "
+                                "hpos %d\n", work->crtc_id, min_udelay,
+                                vblank->framedur_ns / 1000,
+                                vblank->linedur_ns / 1000, stat, vpos, hpos);
+
        /* do the flip (mmio) */
        radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
 
index 84d45633d28c9b1c58f58dc6ec6383eba2207230..fb6ad143873f5b40d2c027a20dd25914dbd5e29e 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <drm/drmP.h>
 #include <drm/radeon_drm.h>
+#include <drm/drm_cache.h>
 #include "radeon.h"
 #include "radeon_trace.h"
 
@@ -245,6 +246,12 @@ int radeon_bo_create(struct radeon_device *rdev,
                DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
                              "better performance thanks to write-combining\n");
        bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+#else
+       /* For architectures that don't support WC memory,
+        * mask out the WC flag from the BO
+        */
+       if (!drm_arch_can_wc_memory())
+               bo->flags &= ~RADEON_GEM_GTT_WC;
 #endif
 
        radeon_ttm_placement_from_domain(bo, domain);
index 59abebd6b5dc42d4941131adb51b76ab677d3fd4..1fa81215cea1922088122230cc13f386046db7db 100644 (file)
@@ -1075,12 +1075,6 @@ force:
 
        /* update display watermarks based on new power state */
        radeon_bandwidth_update(rdev);
-       /* update displays */
-       radeon_dpm_display_configuration_changed(rdev);
-
-       rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
-       rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
-       rdev->pm.dpm.single_display = single_display;
 
        /* wait for the rings to drain */
        for (i = 0; i < RADEON_NUM_RINGS; i++) {
@@ -1097,6 +1091,13 @@ force:
 
        radeon_dpm_post_set_power_state(rdev);
 
+       /* update displays */
+       radeon_dpm_display_configuration_changed(rdev);
+
+       rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
+       rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+       rdev->pm.dpm.single_display = single_display;
+
        if (rdev->asic->dpm.force_performance_level) {
                if (rdev->pm.dpm.thermal_active) {
                        enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
index c507896aca45a43a3fe271b79b84e7db61bfb85a..197b157b73d09b96f7ced03c6703b6696c210793 100644 (file)
@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
                        /* see if we can skip over some allocations */
                } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
 
+               for (i = 0; i < RADEON_NUM_RINGS; ++i)
+                       radeon_fence_ref(fences[i]);
+
                spin_unlock(&sa_manager->wq.lock);
                r = radeon_fence_wait_any(rdev, fences, false);
+               for (i = 0; i < RADEON_NUM_RINGS; ++i)
+                       radeon_fence_unref(&fences[i]);
                spin_lock(&sa_manager->wq.lock);
                /* if we have nothing to wait for block */
                if (r == -ENOENT) {
index e34307459e501f60895ca4a0abe156995706f921..e06ac546a90ff185a31f64b4ce6e9fd76d071adc 100644 (file)
@@ -758,7 +758,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
                                                       0, PAGE_SIZE,
                                                       PCI_DMA_BIDIRECTIONAL);
                if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
-                       while (--i) {
+                       while (i--) {
                                pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
                                               PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
                                gtt->ttm.dma_address[i] = 0;
index 48d97c040f495cc69c0d890b5a5e6b4fee587a62..3979632b9225747bb65c82def8f6d6d440d404e6 100644 (file)
@@ -455,15 +455,15 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
 
        if (soffset) {
                /* make sure object fit at this offset */
-               eoffset = soffset + size;
+               eoffset = soffset + size - 1;
                if (soffset >= eoffset) {
                        r = -EINVAL;
                        goto error_unreserve;
                }
 
                last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
-               if (last_pfn > rdev->vm_manager.max_pfn) {
-                       dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
+               if (last_pfn >= rdev->vm_manager.max_pfn) {
+                       dev_err(rdev->dev, "va above limit (0x%08X >= 0x%08X)\n",
                                last_pfn, rdev->vm_manager.max_pfn);
                        r = -EINVAL;
                        goto error_unreserve;
@@ -478,7 +478,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
        eoffset /= RADEON_GPU_PAGE_SIZE;
        if (soffset || eoffset) {
                struct interval_tree_node *it;
-               it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
+               it = interval_tree_iter_first(&vm->va, soffset, eoffset);
                if (it && it != &bo_va->it) {
                        struct radeon_bo_va *tmp;
                        tmp = container_of(it, struct radeon_bo_va, it);
@@ -518,7 +518,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
        if (soffset || eoffset) {
                spin_lock(&vm->status_lock);
                bo_va->it.start = soffset;
-               bo_va->it.last = eoffset - 1;
+               bo_va->it.last = eoffset;
                list_add(&bo_va->vm_status, &vm->cleared);
                spin_unlock(&vm->status_lock);
                interval_tree_insert(&bo_va->it, &vm->va);
@@ -888,7 +888,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
        unsigned i;
 
        start >>= radeon_vm_block_size;
-       end >>= radeon_vm_block_size;
+       end = (end - 1) >> radeon_vm_block_size;
 
        for (i = start; i <= end; ++i)
                radeon_bo_fence(vm->page_tables[i].bo, fence, true);
index 4c4a7218a3bd63c11f7f9c8d6b96583bef27368b..d1a7b58dd291f8798666276acded0290aae805e2 100644 (file)
 #define DCCG_AUDIO_DTO1_PHASE                           0x05c0
 #define DCCG_AUDIO_DTO1_MODULE                          0x05c4
 
+#define DENTIST_DISPCLK_CNTL                           0x0490
+#      define DENTIST_DPREFCLK_WDIVIDER(x)             (((x) & 0x7f) << 24)
+#      define DENTIST_DPREFCLK_WDIVIDER_MASK           (0x7f << 24)
+#      define DENTIST_DPREFCLK_WDIVIDER_SHIFT          24
+
 #define AFMT_AUDIO_SRC_CONTROL                          0x713c
 #define                AFMT_AUDIO_SRC_SELECT(x)                (((x) & 7) << 0)
 /* AFMT_AUDIO_SRC_SELECT
index 07a0d378e122b1b206b8aceb1a97234254d00e74..a01efe39a8206ec1b1d1fd26a3192c5d72abb885 100644 (file)
@@ -178,12 +178,12 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
                return -EINVAL;
        }
 
-       for (i = 0; i < sign->num; ++i) {
-               if (sign->val[i].chip_id == chip_id)
+       for (i = 0; i < le32_to_cpu(sign->num); ++i) {
+               if (le32_to_cpu(sign->val[i].chip_id) == chip_id)
                        break;
        }
 
-       if (i == sign->num)
+       if (i == le32_to_cpu(sign->num))
                return -EINVAL;
 
        data += (256 - 64) / 4;
@@ -191,18 +191,18 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
        data[1] = sign->val[i].nonce[1];
        data[2] = sign->val[i].nonce[2];
        data[3] = sign->val[i].nonce[3];
-       data[4] = sign->len + 64;
+       data[4] = cpu_to_le32(le32_to_cpu(sign->len) + 64);
 
        memset(&data[5], 0, 44);
        memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign));
 
-       data += data[4] / 4;
+       data += le32_to_cpu(data[4]) / 4;
        data[0] = sign->val[i].sigval[0];
        data[1] = sign->val[i].sigval[1];
        data[2] = sign->val[i].sigval[2];
        data[3] = sign->val[i].sigval[3];
 
-       rdev->vce.keyselect = sign->val[i].keyselect;
+       rdev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect);
 
        return 0;
 }
index 6377e8151000f5401309133b635150acf128c2ad..67cebb23c940e2dc0019eab8b1dbe0b4a036650a 100644 (file)
@@ -247,7 +247,7 @@ static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
 {
        struct vmw_cmdbuf_man *man = header->man;
 
-       BUG_ON(!spin_is_locked(&man->lock));
+       lockdep_assert_held_once(&man->lock);
 
        if (header->inline_space) {
                vmw_cmdbuf_header_inline_free(header);
index c49812b80dd0dae82c77096f75fbd4cced19ffb1..24fb348a44e141f71574f20d1e57bdaad2759ce6 100644 (file)
@@ -25,6 +25,7 @@
  *
  **************************************************************************/
 #include <linux/module.h>
+#include <linux/console.h>
 
 #include <drm/drmP.h>
 #include "vmwgfx_drv.h"
@@ -1538,6 +1539,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 static int __init vmwgfx_init(void)
 {
        int ret;
+
+#ifdef CONFIG_VGA_CONSOLE
+       if (vgacon_text_force())
+               return -EINVAL;
+#endif
+
        ret = drm_pci_init(&driver, &vmw_pci_driver);
        if (ret)
                DRM_ERROR("Failed initializing DRM.\n");
index 9b4bb9e74d7300793f3ecb1c0feef42085780819..7c2e118a77b03b8295fca0b447f9f195fe3848de 100644 (file)
@@ -763,21 +763,25 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
        uint32_t format;
        struct drm_vmw_size content_base_size;
        struct vmw_resource *res;
+       unsigned int bytes_pp;
        int ret;
 
        switch (mode_cmd->depth) {
        case 32:
        case 24:
                format = SVGA3D_X8R8G8B8;
+               bytes_pp = 4;
                break;
 
        case 16:
        case 15:
                format = SVGA3D_R5G6B5;
+               bytes_pp = 2;
                break;
 
        case 8:
                format = SVGA3D_P8;
+               bytes_pp = 1;
                break;
 
        default:
@@ -785,7 +789,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
                return -EINVAL;
        }
 
-       content_base_size.width  = mode_cmd->width;
+       content_base_size.width  = mode_cmd->pitch / bytes_pp;
        content_base_size.height = mode_cmd->height;
        content_base_size.depth  = 1;
 
index c4dcab048cb8ecbc38b7784408bfd7902d5a40a7..9098f13f2f44aaf16673addebd4c54d09417e36d 100644 (file)
@@ -630,10 +630,19 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
         *    on the ring. We will not signal if more data is
         *    to be placed.
         *
+        * Based on the channel signal state, we will decide
+        * which signaling policy will be applied.
+        *
         * If we cannot write to the ring-buffer; signal the host
         * even if we may not have written anything. This is a rare
         * enough condition that it should not matter.
         */
+
+       if (channel->signal_policy)
+               signal = true;
+       else
+               kick_q = true;
+
        if (((ret == 0) && kick_q && signal) || (ret))
                vmbus_setevent(channel);
 
@@ -733,10 +742,19 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
         *    on the ring. We will not signal if more data is
         *    to be placed.
         *
+        * Based on the channel signal state, we will decide
+        * which signaling policy will be applied.
+        *
         * If we cannot write to the ring-buffer; signal the host
         * even if we may not have written anything. This is a rare
         * enough condition that it should not matter.
         */
+
+       if (channel->signal_policy)
+               signal = true;
+       else
+               kick_q = true;
+
        if (((ret == 0) && kick_q && signal) || (ret))
                vmbus_setevent(channel);
 
index f155b83804819ff97455ba5312f62de7f14991e2..2b3105c8aed399f8ce9cd3df03d6a265378131fc 100644 (file)
@@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
        struct ads1015_data *data = i2c_get_clientdata(client);
        unsigned int pga = data->channel_data[channel].pga;
        int fullscale = fullscale_table[pga];
-       const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
+       const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
 
        return DIV_ROUND_CLOSEST(reg * fullscale, mask);
 }
index c8487894b31236cefd761b24cac48fb4e17e6d52..c43318d3416e20a4d027a32402b1cf751718b8c6 100644 (file)
@@ -930,6 +930,17 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
 MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
 
 static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
+       {
+               /*
+                * CPU fan speed going up and down on Dell Studio XPS 8000
+                * for unknown reasons.
+                */
+               .ident = "Dell Studio XPS 8000",
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8000"),
+               },
+       },
        {
                /*
                 * CPU fan speed going up and down on Dell Studio XPS 8100
index 82de3deeb18a7ddf5e041e695b35cc8b1500abea..685568b1236d4a26db2d685ce36dcb9729e9f3ab 100644 (file)
@@ -406,16 +406,11 @@ static int gpio_fan_get_cur_state(struct thermal_cooling_device *cdev,
                                  unsigned long *state)
 {
        struct gpio_fan_data *fan_data = cdev->devdata;
-       int r;
 
        if (!fan_data)
                return -EINVAL;
 
-       r = get_fan_speed_index(fan_data);
-       if (r < 0)
-               return r;
-
-       *state = r;
+       *state = fan_data->speed_index;
        return 0;
 }
 
index e25492137d8bec86a5ff406ff1025724df095fbd..93738dfbf6313ea09f9f970ce463ec8374b4f661 100644 (file)
@@ -548,7 +548,7 @@ static int coresight_name_match(struct device *dev, void *data)
        to_match = data;
        i_csdev = to_coresight_device(dev);
 
-       if (!strcmp(to_match, dev_name(&i_csdev->dev)))
+       if (to_match && !strcmp(to_match, dev_name(&i_csdev->dev)))
                return 1;
 
        return 0;
index 8e9637eea51254199efe1ad0580434590a6ef243..81115abf3c1f5b65db91791f17c75c8061bed6c1 100644 (file)
@@ -562,8 +562,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
        if (!dev)
                return -ENOMEM;
 
-       dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(struct bsc_regs *),
-                                      GFP_KERNEL);
+       dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(*dev->bsc_regmap), GFP_KERNEL);
        if (!dev->bsc_regmap)
                return -ENOMEM;
 
index f62d69799a9c55b0f79e2de5e29ec9a09b257c98..27fa0cb09538cebfd0f9388112cfe30abb773edd 100644 (file)
@@ -1271,6 +1271,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
        switch (dev->device) {
        case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS:
        case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS:
+       case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
+       case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
        case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
                priv->features |= FEATURE_I2C_BLOCK_READ;
                priv->features |= FEATURE_IRQ;
index 0a26dd6d9b19f96d97c9b3b244509892afae7023..d6d2b358291045c2c278da65179caab373987fbf 100644 (file)
@@ -782,11 +782,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
        wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
 
        /* Check if the device started its remove_one */
-       spin_lock_irq(&cm.lock);
+       spin_lock_irqsave(&cm.lock, flags);
        if (!cm_dev->going_down)
                queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
                                   msecs_to_jiffies(wait_time));
-       spin_unlock_irq(&cm.lock);
+       spin_unlock_irqrestore(&cm.lock, flags);
 
        cm_id_priv->timewait_info = NULL;
 }
index 2d762a2ecd81252cde34cbc2dbc1083c1d8832dc..17a15c56028cf501dbf6aae3149f82c91c3a21f1 100644 (file)
@@ -453,7 +453,7 @@ static inline int cma_validate_port(struct ib_device *device, u8 port,
        if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
                return ret;
 
-       if (dev_type == ARPHRD_ETHER)
+       if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port))
                ndev = dev_get_by_index(&init_net, bound_if_index);
 
        ret = ib_find_cached_gid_by_port(device, gid, port, ndev, NULL);
index cb78b1e9bcd9ec3035884e3c0c6504bf317dbfc8..f504ba73e5dc27200c988f342ee3d6423072ee9e 100644 (file)
@@ -149,7 +149,7 @@ static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_en
        error = l2t_send(tdev, skb, l2e);
        if (error < 0)
                kfree_skb(skb);
-       return error;
+       return error < 0 ? error : 0;
 }
 
 int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
@@ -165,7 +165,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
        error = cxgb3_ofld_send(tdev, skb);
        if (error < 0)
                kfree_skb(skb);
-       return error;
+       return error < 0 ? error : 0;
 }
 
 static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
index 7e97cb55a6bfa9517a726b9be897d17563272a5d..c4e0915283904e034ffdb05be18420c9eac7fba8 100644 (file)
@@ -275,7 +275,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
        props->max_sge = min(max_rq_sg, max_sq_sg);
        props->max_sge_rd = props->max_sge;
        props->max_cq              = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
-       props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1;
+       props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
        props->max_mr              = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
        props->max_pd              = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
        props->max_qp_rd_atom      = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
index 40f85bb3e0d3bdce5289a5c8c9c2418df33e4ca4..3eff35c2d453f7b11f9475f5120cb4df26f94dda 100644 (file)
@@ -100,9 +100,10 @@ static u32 credit_table[31] = {
        32768                   /* 1E */
 };
 
-static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
+static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map,
+                        gfp_t gfp)
 {
-       unsigned long page = get_zeroed_page(GFP_KERNEL);
+       unsigned long page = get_zeroed_page(gfp);
 
        /*
         * Free the page if someone raced with us installing it.
@@ -121,7 +122,7 @@ static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
  * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
  */
 static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
-                    enum ib_qp_type type, u8 port)
+                    enum ib_qp_type type, u8 port, gfp_t gfp)
 {
        u32 i, offset, max_scan, qpn;
        struct qpn_map *map;
@@ -151,7 +152,7 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
        max_scan = qpt->nmaps - !offset;
        for (i = 0;;) {
                if (unlikely(!map->page)) {
-                       get_map_page(qpt, map);
+                       get_map_page(qpt, map, gfp);
                        if (unlikely(!map->page))
                                break;
                }
@@ -983,13 +984,21 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
        size_t sz;
        size_t sg_list_sz;
        struct ib_qp *ret;
+       gfp_t gfp;
+
 
        if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
            init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
-           init_attr->create_flags) {
-               ret = ERR_PTR(-EINVAL);
-               goto bail;
-       }
+           init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
+               return ERR_PTR(-EINVAL);
+
+       /* GFP_NOIO is applicable in RC QPs only */
+       if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
+           init_attr->qp_type != IB_QPT_RC)
+               return ERR_PTR(-EINVAL);
+
+       gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
+                       GFP_NOIO : GFP_KERNEL;
 
        /* Check receive queue parameters if no SRQ is specified. */
        if (!init_attr->srq) {
@@ -1021,7 +1030,8 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
                sz = sizeof(struct qib_sge) *
                        init_attr->cap.max_send_sge +
                        sizeof(struct qib_swqe);
-               swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
+               swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz,
+                               gfp, PAGE_KERNEL);
                if (swq == NULL) {
                        ret = ERR_PTR(-ENOMEM);
                        goto bail;
@@ -1037,13 +1047,13 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
                } else if (init_attr->cap.max_recv_sge > 1)
                        sg_list_sz = sizeof(*qp->r_sg_list) *
                                (init_attr->cap.max_recv_sge - 1);
-               qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
+               qp = kzalloc(sz + sg_list_sz, gfp);
                if (!qp) {
                        ret = ERR_PTR(-ENOMEM);
                        goto bail_swq;
                }
                RCU_INIT_POINTER(qp->next, NULL);
-               qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
+               qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp);
                if (!qp->s_hdr) {
                        ret = ERR_PTR(-ENOMEM);
                        goto bail_qp;
@@ -1058,8 +1068,16 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
                        qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
                        sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
                                sizeof(struct qib_rwqe);
-                       qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
-                                                  qp->r_rq.size * sz);
+                       if (gfp != GFP_NOIO)
+                               qp->r_rq.wq = vmalloc_user(
+                                               sizeof(struct qib_rwq) +
+                                               qp->r_rq.size * sz);
+                       else
+                               qp->r_rq.wq = __vmalloc(
+                                               sizeof(struct qib_rwq) +
+                                               qp->r_rq.size * sz,
+                                               gfp, PAGE_KERNEL);
+
                        if (!qp->r_rq.wq) {
                                ret = ERR_PTR(-ENOMEM);
                                goto bail_qp;
@@ -1090,7 +1108,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
                dev = to_idev(ibpd->device);
                dd = dd_from_dev(dev);
                err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
-                               init_attr->port_num);
+                               init_attr->port_num, gfp);
                if (err < 0) {
                        ret = ERR_PTR(err);
                        vfree(qp->r_rq.wq);
index f8ea069a3eafca4078ab70848c3804867609143c..b2fb5286dbd98250534964887a8c2c46678cd725 100644 (file)
@@ -286,15 +286,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        struct qib_ibdev *dev = to_idev(ibqp->device);
        struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
        struct qib_mcast *mcast = NULL;
-       struct qib_mcast_qp *p, *tmp;
+       struct qib_mcast_qp *p, *tmp, *delp = NULL;
        struct rb_node *n;
        int last = 0;
        int ret;
 
-       if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
-               ret = -EINVAL;
-               goto bail;
-       }
+       if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
+               return -EINVAL;
 
        spin_lock_irq(&ibp->lock);
 
@@ -303,8 +301,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        while (1) {
                if (n == NULL) {
                        spin_unlock_irq(&ibp->lock);
-                       ret = -EINVAL;
-                       goto bail;
+                       return -EINVAL;
                }
 
                mcast = rb_entry(n, struct qib_mcast, rb_node);
@@ -328,6 +325,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
                 */
                list_del_rcu(&p->list);
                mcast->n_attached--;
+               delp = p;
 
                /* If this was the last attached QP, remove the GID too. */
                if (list_empty(&mcast->qp_list)) {
@@ -338,15 +336,16 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        }
 
        spin_unlock_irq(&ibp->lock);
+       /* QP not attached */
+       if (!delp)
+               return -EINVAL;
+       /*
+        * Wait for any list walkers to finish before freeing the
+        * list element.
+        */
+       wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
+       qib_mcast_qp_free(delp);
 
-       if (p) {
-               /*
-                * Wait for any list walkers to finish before freeing the
-                * list element.
-                */
-               wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
-               qib_mcast_qp_free(p);
-       }
        if (last) {
                atomic_dec(&mcast->refcount);
                wait_event(mcast->wait, !atomic_read(&mcast->refcount));
@@ -355,11 +354,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
                dev->n_mcast_grps_allocated--;
                spin_unlock_irq(&dev->n_mcast_grps_lock);
        }
-
-       ret = 0;
-
-bail:
-       return ret;
+       return 0;
 }
 
 int qib_mcast_tree_empty(struct qib_ibport *ibp)
index 013bdfff2d4d023c30a57baf4c833788bfd7dbee..bf4959f4225bd35c356c49172e0f978e0c81166f 100644 (file)
@@ -228,6 +228,10 @@ static int amd_iommu_enable_interrupts(void);
 static int __init iommu_go_to_state(enum iommu_init_state state);
 static void init_device_table_dma(void);
 
+static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
+                                   u8 bank, u8 cntr, u8 fxn,
+                                   u64 *value, bool is_write);
+
 static inline void update_last_devid(u16 devid)
 {
        if (devid > amd_iommu_last_bdf)
@@ -1015,6 +1019,34 @@ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
        pci_write_config_dword(iommu->dev, 0xf0, 0x90);
 }
 
+/*
+ * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
+ * Workaround:
+ *     BIOS should enable ATS write permission check by setting
+ *     L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
+ */
+static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
+{
+       u32 value;
+
+       if ((boot_cpu_data.x86 != 0x15) ||
+           (boot_cpu_data.x86_model < 0x30) ||
+           (boot_cpu_data.x86_model > 0x3f))
+               return;
+
+       /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
+       value = iommu_read_l2(iommu, 0x47);
+
+       if (value & BIT(0))
+               return;
+
+       /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
+       iommu_write_l2(iommu, 0x47, value | BIT(0));
+
+       pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
+               dev_name(&iommu->dev->dev));
+}
+
 /*
  * This function clues the initialization function for one IOMMU
  * together and also allocates the command buffer and programs the
@@ -1142,8 +1174,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
        amd_iommu_pc_present = true;
 
        /* Check if the performance counters can be written to */
-       if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) ||
-           (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) ||
+       if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
+           (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
            (val != val2)) {
                pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
                amd_iommu_pc_present = false;
@@ -1284,6 +1316,7 @@ static int iommu_init_pci(struct amd_iommu *iommu)
        }
 
        amd_iommu_erratum_746_workaround(iommu);
+       amd_iommu_ats_write_check_workaround(iommu);
 
        iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
                                               amd_iommu_groups, "ivhd%d",
@@ -2283,22 +2316,15 @@ u8 amd_iommu_pc_get_max_counters(u16 devid)
 }
 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
 
-int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
+static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
+                                   u8 bank, u8 cntr, u8 fxn,
                                    u64 *value, bool is_write)
 {
-       struct amd_iommu *iommu;
        u32 offset;
        u32 max_offset_lim;
 
-       /* Make sure the IOMMU PC resource is available */
-       if (!amd_iommu_pc_present)
-               return -ENODEV;
-
-       /* Locate the iommu associated with the device ID */
-       iommu = amd_iommu_rlookup_table[devid];
-
        /* Check for valid iommu and pc register indexing */
-       if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7)))
+       if (WARN_ON((fxn > 0x28) || (fxn & 7)))
                return -ENODEV;
 
        offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
@@ -2322,3 +2348,16 @@ int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
        return 0;
 }
 EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
+
+int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
+                                   u64 *value, bool is_write)
+{
+       struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+       /* Make sure the IOMMU PC resource is available */
+       if (!amd_iommu_pc_present || iommu == NULL)
+               return -ENODEV;
+
+       return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,
+                                       value, is_write);
+}
index 55a19e49205bcab9ad05c6d9bae05a4a6f02721a..3821c4786662a4841f9e83c33d6bd7f70cf6d53d 100644 (file)
@@ -329,7 +329,8 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
        /* Only care about add/remove events for physical functions */
        if (pdev->is_virtfn)
                return NOTIFY_DONE;
-       if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE)
+       if (action != BUS_NOTIFY_ADD_DEVICE &&
+           action != BUS_NOTIFY_REMOVED_DEVICE)
                return NOTIFY_DONE;
 
        info = dmar_alloc_pci_notify_info(pdev, action);
@@ -339,7 +340,7 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
        down_write(&dmar_global_lock);
        if (action == BUS_NOTIFY_ADD_DEVICE)
                dmar_pci_bus_add_dev(info);
-       else if (action == BUS_NOTIFY_DEL_DEVICE)
+       else if (action == BUS_NOTIFY_REMOVED_DEVICE)
                dmar_pci_bus_del_dev(info);
        up_write(&dmar_global_lock);
 
index 986a53e3eb96b4bb56faedfdb36b4bd658aacb99..a2e1b7f14df29cc78b625ec88455438d0fa1fe07 100644 (file)
@@ -4367,7 +4367,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
                                rmrru->devices_cnt);
                        if(ret < 0)
                                return ret;
-               } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
+               } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
                        dmar_remove_dev_scope(info, rmrr->segment,
                                rmrru->devices, rmrru->devices_cnt);
                }
@@ -4387,7 +4387,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
                                break;
                        else if(ret < 0)
                                return ret;
-               } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
+               } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
                        if (dmar_remove_dev_scope(info, atsr->segment,
                                        atsru->devices, atsru->devices_cnt))
                                break;
index b12a5d58546f922b460d6aac34c073ff363ef1b8..37199b9b2cfa260659a98da3eb1a48053e7bddfb 100644 (file)
@@ -86,7 +86,7 @@ int aic_common_set_priority(int priority, unsigned *val)
            priority > AT91_AIC_IRQ_MAX_PRIORITY)
                return -EINVAL;
 
-       *val &= AT91_AIC_PRIOR;
+       *val &= ~AT91_AIC_PRIOR;
        *val |= priority;
 
        return 0;
index e23d1d18f9d6a39731bdd34633e2bbc9bc473525..a159529f9d53d14f3d8989eedd9d0ff7f9f7b8b3 100644 (file)
@@ -597,11 +597,6 @@ static void its_unmask_irq(struct irq_data *d)
        lpi_set_config(d, true);
 }
 
-static void its_eoi_irq(struct irq_data *d)
-{
-       gic_write_eoir(d->hwirq);
-}
-
 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
                            bool force)
 {
@@ -638,7 +633,7 @@ static struct irq_chip its_irq_chip = {
        .name                   = "ITS",
        .irq_mask               = its_mask_irq,
        .irq_unmask             = its_unmask_irq,
-       .irq_eoi                = its_eoi_irq,
+       .irq_eoi                = irq_chip_eoi_parent,
        .irq_set_affinity       = its_set_affinity,
        .irq_compose_msi_msg    = its_irq_compose_msi_msg,
 };
index c22e2d40cb302452624c29efd44c483e577f5333..efe50845939d91fee149acb085912697031dd9a0 100644 (file)
@@ -241,6 +241,7 @@ static int __init asm9260_of_init(struct device_node *np,
                writel(0, icoll_priv.intr + i);
 
        icoll_add_domain(np, ASM9260_NUM_IRQS);
+       set_handle_irq(icoll_handle_irq);
 
        return 0;
 }
index 8587d0f8d8c03300e6ece2da0f252b767d31f781..f6cb1b8bb981bc1fe9a132efba6f2cc732326533 100644 (file)
@@ -47,6 +47,7 @@
 #define INTC_ILR0              0x0100
 
 #define ACTIVEIRQ_MASK         0x7f    /* omap2/3 active interrupt bits */
+#define SPURIOUSIRQ_MASK       (0x1ffffff << 7)
 #define INTCPS_NR_ILR_REGS     128
 #define INTCPS_NR_MIR_REGS     4
 
@@ -330,11 +331,35 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
 static asmlinkage void __exception_irq_entry
 omap_intc_handle_irq(struct pt_regs *regs)
 {
+       extern unsigned long irq_err_count;
        u32 irqnr;
 
        irqnr = intc_readl(INTC_SIR);
+
+       /*
+        * A spurious IRQ can result if interrupt that triggered the
+        * sorting is no longer active during the sorting (10 INTC
+        * functional clock cycles after interrupt assertion). Or a
+        * change in interrupt mask affected the result during sorting
+        * time. There is no special handling required except ignoring
+        * the SIR register value just read and retrying.
+        * See section 6.2.5 of AM335x TRM Literature Number: SPRUH73K
+        *
+        * Many a times, a spurious interrupt situation has been fixed
+        * by adding a flush for the posted write acking the IRQ in
+        * the device driver. Typically, this is going be the device
+        * driver whose interrupt was handled just before the spurious
+        * IRQ occurred. Pay attention to those device drivers if you
+        * run into hitting the spurious IRQ condition below.
+        */
+       if (unlikely((irqnr & SPURIOUSIRQ_MASK) == SPURIOUSIRQ_MASK)) {
+               pr_err_once("%s: spurious irq!\n", __func__);
+               irq_err_count++;
+               omap_ack_irq(NULL);
+               return;
+       }
+
        irqnr &= ACTIVEIRQ_MASK;
-       WARN_ONCE(!irqnr, "Spurious IRQ ?\n");
        handle_domain_irq(domain, irqnr, regs);
 }
 
index 83392f856dfd5d86e67cc636ab61e0fb0f16750d..22b9e34ceb75c5318a25870bf346a27ba2e64052 100644 (file)
@@ -1741,6 +1741,7 @@ static void bch_btree_gc(struct cache_set *c)
        do {
                ret = btree_root(gc_root, c, &op, &writes, &stats);
                closure_sync(&writes);
+               cond_resched();
 
                if (ret && ret != -EAGAIN)
                        pr_warn("gc failed!");
@@ -2162,8 +2163,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
                rw_lock(true, b, b->level);
 
                if (b->key.ptr[0] != btree_ptr ||
-                   b->seq != seq + 1)
+                   b->seq != seq + 1) {
+                       op->lock = b->level;
                        goto out;
+               }
        }
 
        SET_KEY_PTRS(check_key, 1);
index 679a093a3bf6382a3edc89dc5be96e9019b938ea..8d0ead98eb6edc2c25eafad5f8b4ab0a49e91e2d 100644 (file)
@@ -685,6 +685,8 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
        WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
             sysfs_create_link(&c->kobj, &d->kobj, d->name),
             "Couldn't create device <-> cache set symlinks");
+
+       clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
 }
 
 static void bcache_device_detach(struct bcache_device *d)
@@ -847,8 +849,11 @@ void bch_cached_dev_run(struct cached_dev *dc)
        buf[SB_LABEL_SIZE] = '\0';
        env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
 
-       if (atomic_xchg(&dc->running, 1))
+       if (atomic_xchg(&dc->running, 1)) {
+               kfree(env[1]);
+               kfree(env[2]);
                return;
+       }
 
        if (!d->c &&
            BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
@@ -1933,6 +1938,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
                        else
                                err = "device busy";
                        mutex_unlock(&bch_register_lock);
+                       if (attr == &ksysfs_register_quiet)
+                               goto out;
                }
                goto err;
        }
@@ -1971,8 +1978,7 @@ out:
 err_close:
        blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 err:
-       if (attr != &ksysfs_register_quiet)
-               pr_info("error opening %s: %s", path, err);
+       pr_info("error opening %s: %s", path, err);
        ret = -EINVAL;
        goto out;
 }
@@ -2066,8 +2072,10 @@ static int __init bcache_init(void)
        closure_debug_init();
 
        bcache_major = register_blkdev(0, "bcache");
-       if (bcache_major < 0)
+       if (bcache_major < 0) {
+               unregister_reboot_notifier(&reboot);
                return bcache_major;
+       }
 
        if (!(bcache_wq = create_workqueue("bcache")) ||
            !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
index b23f88d9f18cf1caa863b1b4ed56c37901296ece..b9346cd9cda192bdf9142bfa462dc0ab74383707 100644 (file)
@@ -323,6 +323,10 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
 
 static bool dirty_pred(struct keybuf *buf, struct bkey *k)
 {
+       struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
+
+       BUG_ON(KEY_INODE(k) != dc->disk.id);
+
        return KEY_DIRTY(k);
 }
 
@@ -372,11 +376,24 @@ next:
        }
 }
 
+/*
+ * Returns true if we scanned the entire disk
+ */
 static bool refill_dirty(struct cached_dev *dc)
 {
        struct keybuf *buf = &dc->writeback_keys;
+       struct bkey start = KEY(dc->disk.id, 0, 0);
        struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
-       bool searched_from_start = false;
+       struct bkey start_pos;
+
+       /*
+        * make sure keybuf pos is inside the range for this disk - at bringup
+        * we might not be attached yet so this disk's inode nr isn't
+        * initialized then
+        */
+       if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
+           bkey_cmp(&buf->last_scanned, &end) > 0)
+               buf->last_scanned = start;
 
        if (dc->partial_stripes_expensive) {
                refill_full_stripes(dc);
@@ -384,14 +401,20 @@ static bool refill_dirty(struct cached_dev *dc)
                        return false;
        }
 
-       if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
-               buf->last_scanned = KEY(dc->disk.id, 0, 0);
-               searched_from_start = true;
-       }
-
+       start_pos = buf->last_scanned;
        bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
 
-       return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
+       if (bkey_cmp(&buf->last_scanned, &end) < 0)
+               return false;
+
+       /*
+        * If we get to the end start scanning again from the beginning, and
+        * only scan up to where we initially started scanning from:
+        */
+       buf->last_scanned = start;
+       bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
+
+       return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
 }
 
 static int bch_writeback_thread(void *arg)
index 0a9dab187b79c7ef0a4429c4616a6985d320b964..073a042aed243b2660f6b70a380d0647c709aa65 100644 (file)
@@ -63,7 +63,8 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
 
 static inline void bch_writeback_queue(struct cached_dev *dc)
 {
-       wake_up_process(dc->writeback_thread);
+       if (!IS_ERR_OR_NULL(dc->writeback_thread))
+               wake_up_process(dc->writeback_thread);
 }
 
 static inline void bch_writeback_add(struct cached_dev *dc)
index fae34e7a0b1e4e4d60b5867eff9422e432fba83b..12b5216c2cfed2758d19f55ba9d2c2d4f58d4deb 100644 (file)
@@ -69,7 +69,7 @@ struct dm_exception_store_type {
         * Update the metadata with this exception.
         */
        void (*commit_exception) (struct dm_exception_store *store,
-                                 struct dm_exception *e,
+                                 struct dm_exception *e, int valid,
                                  void (*callback) (void *, int success),
                                  void *callback_context);
 
index 3164b8bce2948591999f429e4633235c1f7b1062..4d3909393f2cce5488ced8843ccab1375d40d2d9 100644 (file)
@@ -695,7 +695,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
 }
 
 static void persistent_commit_exception(struct dm_exception_store *store,
-                                       struct dm_exception *e,
+                                       struct dm_exception *e, int valid,
                                        void (*callback) (void *, int success),
                                        void *callback_context)
 {
@@ -704,6 +704,9 @@ static void persistent_commit_exception(struct dm_exception_store *store,
        struct core_exception ce;
        struct commit_callback *cb;
 
+       if (!valid)
+               ps->valid = 0;
+
        ce.old_chunk = e->old_chunk;
        ce.new_chunk = e->new_chunk;
        write_exception(ps, ps->current_committed++, &ce);
index 9b7c8c8049d6186f54bdfec114c43cb3ce4d77fa..4d50a12cf00c699b85a0df1c0b1dd86f0137b691 100644 (file)
@@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store,
 }
 
 static void transient_commit_exception(struct dm_exception_store *store,
-                                      struct dm_exception *e,
+                                      struct dm_exception *e, int valid,
                                       void (*callback) (void *, int success),
                                       void *callback_context)
 {
        /* Just succeed */
-       callback(callback_context, 1);
+       callback(callback_context, valid);
 }
 
 static void transient_usage(struct dm_exception_store *store,
index c06b74e91cd6aeef00ef4eefae9953d4d8c8f91b..61f184ad081c2f0820e22740fcf0b615b813e547 100644 (file)
@@ -1438,8 +1438,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
        dm_table_event(s->ti->table);
 }
 
-static void pending_complete(struct dm_snap_pending_exception *pe, int success)
+static void pending_complete(void *context, int success)
 {
+       struct dm_snap_pending_exception *pe = context;
        struct dm_exception *e;
        struct dm_snapshot *s = pe->snap;
        struct bio *origin_bios = NULL;
@@ -1509,24 +1510,13 @@ out:
        free_pending_exception(pe);
 }
 
-static void commit_callback(void *context, int success)
-{
-       struct dm_snap_pending_exception *pe = context;
-
-       pending_complete(pe, success);
-}
-
 static void complete_exception(struct dm_snap_pending_exception *pe)
 {
        struct dm_snapshot *s = pe->snap;
 
-       if (unlikely(pe->copy_error))
-               pending_complete(pe, 0);
-
-       else
-               /* Update the metadata if we are persistent */
-               s->store->type->commit_exception(s->store, &pe->e,
-                                                commit_callback, pe);
+       /* Update the metadata if we are persistent */
+       s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
+                                        pending_complete, pe);
 }
 
 /*
index 63903a5a5d9ee3b580d552673b42b9716b99322c..a1cc797fe88f49243caa7eb03a603b2a81d1ec88 100644 (file)
@@ -3453,8 +3453,8 @@ static void pool_postsuspend(struct dm_target *ti)
        struct pool_c *pt = ti->private;
        struct pool *pool = pt->pool;
 
-       cancel_delayed_work(&pool->waker);
-       cancel_delayed_work(&pool->no_space_timeout);
+       cancel_delayed_work_sync(&pool->waker);
+       cancel_delayed_work_sync(&pool->no_space_timeout);
        flush_workqueue(pool->wq);
        (void) commit(pool);
 }
index 5df40480228b7a26e3c73ac78e963ce47ed25448..dd834927bc66e87bf4ec433bc96889068d3bbb82 100644 (file)
@@ -1191,6 +1191,8 @@ static void dm_unprep_request(struct request *rq)
 
        if (clone)
                free_rq_clone(clone);
+       else if (!tio->md->queue->mq_ops)
+               free_rq_tio(tio);
 }
 
 /*
index fca6dbcf9a4727f85d61f7d161094870f5db24d5..7e44005595c1e77d1982e7e89b1259ddea97bf9a 100644 (file)
@@ -152,12 +152,9 @@ static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result)
 
 static int brb_pop(struct bop_ring_buffer *brb)
 {
-       struct block_op *bop;
-
        if (brb_empty(brb))
                return -ENODATA;
 
-       bop = brb->bops + brb->begin;
        brb->begin = brb_next(brb, brb->begin);
 
        return 0;
index c38ef1a72b4aee35e0fbb2491cc89a07565065e9..e2a3833170e3229a3a8460915f5a747bb4a6f50f 100644 (file)
@@ -2313,9 +2313,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
                dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n",
                                 __func__, c->delivery_system, fe->ops.info.type);
 
-               /* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't
-                * do it, it is done for it. */
-               info->caps |= FE_CAN_INVERSION_AUTO;
+               /* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */
+               if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT))
+                       info->caps |= FE_CAN_INVERSION_AUTO;
                err = 0;
                break;
        }
index 0e209b56c76c1c487007121e336e0438944acb93..c6abeb4fba9db51827fc815fde920b1a30faa1fa 100644 (file)
@@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe)
 {
        struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
        struct tda1004x_state* state = fe->demodulator_priv;
+       int status;
 
        dprintk("%s\n", __func__);
 
+       status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
+       if (status == -1)
+               return -EIO;
+
+       /* Only update the properties cache if device is locked */
+       if (!(status & 8))
+               return 0;
+
        // inversion status
        fe_params->inversion = INVERSION_OFF;
        if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
index 5631ec004eedb8a5c91a758d20ab1c9351e512bf..01adcdc52346634cda4c7553bab709faf0df59fc 100644 (file)
@@ -1960,10 +1960,9 @@ static int adv76xx_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
        }
 
        /* tx 5v detect */
-       tx_5v = io_read(sd, 0x70) & info->cable_det_mask;
+       tx_5v = irq_reg_0x70 & info->cable_det_mask;
        if (tx_5v) {
                v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v);
-               io_write(sd, 0x71, tx_5v);
                adv76xx_s_detect_tx_5v_ctrl(sd);
                if (handled)
                        *handled = true;
index 7830aef3db459661e9c3e436dc53477d3ae7625a..40f77685cc4a206eca9260ee1ad1271f7bdf1e35 100644 (file)
@@ -153,6 +153,8 @@ static int sunxi_ir_probe(struct platform_device *pdev)
        if (!ir)
                return -ENOMEM;
 
+       spin_lock_init(&ir->ir_lock);
+
        if (of_device_is_compatible(dn, "allwinner,sun5i-a13-ir"))
                ir->fifo_size = 64;
        else
index ce157edd45fa1adb3dd382037dd421f8b8590091..0e1ca2b00e61e3e78e3f30f1f357a2902aebb7d0 100644 (file)
@@ -168,6 +168,7 @@ static int si2157_init(struct dvb_frontend *fe)
                len = fw->data[fw->size - remaining];
                if (len > SI2157_ARGLEN) {
                        dev_err(&client->dev, "Bad firmware length\n");
+                       ret = -EINVAL;
                        goto err_release_firmware;
                }
                memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
index 146071b8e11618a12b480db88ee946c5af11e87b..bfff1d1c70ab0149b79bea42752140e292a9fe19 100644 (file)
@@ -1491,8 +1491,13 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
        struct v4l2_fract *tpf = &cp->timeperframe;
        struct sd *sd = (struct sd *) gspca_dev;
 
-       /* Set requested framerate */
-       sd->frame_rate = tpf->denominator / tpf->numerator;
+       if (tpf->numerator == 0 || tpf->denominator == 0)
+               /* Set default framerate */
+               sd->frame_rate = 30;
+       else
+               /* Set requested framerate */
+               sd->frame_rate = tpf->denominator / tpf->numerator;
+
        if (gspca_dev->streaming)
                set_frame_rate(gspca_dev);
 
index c70ff406b07ac55cf6f48b2741930b39afa2ca2d..c028a5c2438ed19560c5416e4c39b56ad3e3da5a 100644 (file)
@@ -4802,7 +4802,11 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
        struct v4l2_fract *tpf = &cp->timeperframe;
        int fr, i;
 
-       sd->framerate = tpf->denominator / tpf->numerator;
+       if (tpf->numerator == 0 || tpf->denominator == 0)
+               sd->framerate = 30;
+       else
+               sd->framerate = tpf->denominator / tpf->numerator;
+
        if (gspca_dev->streaming)
                setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure));
 
index 27b4b9e7c0c2f63c65c0fdaa1f515fc949a4dee2..502984c724ff5efd14ef10250dd5d8e653468111 100644 (file)
@@ -822,10 +822,10 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
                return res | POLLERR;
 
        /*
-        * For output streams you can write as long as there are fewer buffers
-        * queued than there are buffers available.
+        * For output streams you can call write() as long as there are fewer
+        * buffers queued than there are buffers available.
         */
-       if (q->is_output && q->queued_count < q->num_buffers)
+       if (q->is_output && q->fileio && q->queued_count < q->num_buffers)
                return res | POLLOUT | POLLWRNORM;
 
        if (list_empty(&q->done_list)) {
index 85761d7eb333173040204a7a5593bf2c7cf06485..be2c8e248e2e30e31518738e8b16c30a766d905a 100644 (file)
@@ -414,7 +414,7 @@ static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
                delta = mftb() - psl_tb;
                if (delta < 0)
                        delta = -delta;
-       } while (cputime_to_usecs(delta) > 16);
+       } while (tb_to_ns(delta) > 16000);
 
        return 0;
 }
index c241e15cacb1f022e766a1280208f8cb6dfbd176..cbd4331fb45cb00368173e446b8619d852386a14 100644 (file)
@@ -203,7 +203,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
        mask <<= shift;
        val <<= shift;
 
-       v = (in_le32(ioaddr) & ~mask) || (val & mask);
+       v = (in_le32(ioaddr) & ~mask) | (val & mask);
 
        out_le32(ioaddr, v);
        return PCIBIOS_SUCCESSFUL;
index b2f2486b3d757cd1bba127d8328177716d539591..80f9afcb13823282a9859e08a96c36f55901efa6 100644 (file)
@@ -458,7 +458,11 @@ static int mei_ioctl_client_notify_request(struct file *file, u32 request)
 {
        struct mei_cl *cl = file->private_data;
 
-       return mei_cl_notify_request(cl, file, request);
+       if (request != MEI_HBM_NOTIFICATION_START &&
+           request != MEI_HBM_NOTIFICATION_STOP)
+               return -EINVAL;
+
+       return mei_cl_notify_request(cl, file, (u8)request);
 }
 
 /**
@@ -657,7 +661,9 @@ out:
  * @file: pointer to file structure
  * @band: band bitmap
  *
- * Return: poll mask
+ * Return: negative on error,
+ *         0 if it did no changes,
+ *         and positive a process was added or deleted
  */
 static int mei_fasync(int fd, struct file *file, int band)
 {
@@ -665,7 +671,7 @@ static int mei_fasync(int fd, struct file *file, int band)
        struct mei_cl *cl = file->private_data;
 
        if (!mei_cl_is_connected(cl))
-               return POLLERR;
+               return -ENODEV;
 
        return fasync_helper(fd, file, band, &cl->ev_async);
 }
index 3a9a79ec4343cd0f9f71158768e5707340def684..3d5087b03999dea605f2b424899077da2592fd53 100644 (file)
@@ -1076,8 +1076,7 @@ static int mmc_select_hs400(struct mmc_card *card)
        mmc_set_clock(host, max_dtr);
 
        /* Switch card to HS mode */
-       val = EXT_CSD_TIMING_HS |
-             card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
+       val = EXT_CSD_TIMING_HS;
        err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                           EXT_CSD_HS_TIMING, val,
                           card->ext_csd.generic_cmd6_time,
@@ -1160,8 +1159,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
        mmc_set_clock(host, max_dtr);
 
        /* Switch HS400 to HS DDR */
-       val = EXT_CSD_TIMING_HS |
-             card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
+       val = EXT_CSD_TIMING_HS;
        err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
                           val, card->ext_csd.generic_cmd6_time,
                           true, send_status, true);
index 141eaa923e18eecc140039d34604d5f2969c023b..967535d76e3468199979be0ffff23a7d0d3c2a93 100644 (file)
@@ -626,9 +626,9 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
         * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
         */
        if (!mmc_host_is_spi(card->host) &&
-               (card->sd_bus_speed == UHS_SDR50_BUS_SPEED ||
-                card->sd_bus_speed == UHS_DDR50_BUS_SPEED ||
-                card->sd_bus_speed == UHS_SDR104_BUS_SPEED)) {
+               (card->host->ios.timing == MMC_TIMING_UHS_SDR50 ||
+                card->host->ios.timing == MMC_TIMING_UHS_DDR50 ||
+                card->host->ios.timing == MMC_TIMING_UHS_SDR104)) {
                err = mmc_execute_tuning(card);
 
                /*
@@ -638,7 +638,7 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
                 * difference between v3.00 and 3.01 spec means that CMD19
                 * tuning is also available for DDR50 mode.
                 */
-               if (err && card->sd_bus_speed == UHS_DDR50_BUS_SPEED) {
+               if (err && card->host->ios.timing == MMC_TIMING_UHS_DDR50) {
                        pr_warn("%s: ddr50 tuning failed\n",
                                mmc_hostname(card->host));
                        err = 0;
index 16d838e6d623be3968a7a900e85348d17a642fe3..467b3cf80c44549c704080d78540c46599eb6882 100644 (file)
@@ -535,8 +535,8 @@ static int mmc_sdio_init_uhs_card(struct mmc_card *card)
         * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
         */
        if (!mmc_host_is_spi(card->host) &&
-           ((card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50) ||
-            (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)))
+           ((card->host->ios.timing == MMC_TIMING_UHS_SDR50) ||
+             (card->host->ios.timing == MMC_TIMING_UHS_SDR104)))
                err = mmc_execute_tuning(card);
 out:
        return err;
@@ -630,7 +630,7 @@ try_again:
         */
        if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) {
                err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
-                                       ocr);
+                                       ocr_card);
                if (err == -EAGAIN) {
                        sdio_reset(host);
                        mmc_go_idle(host);
index fb266745f8240d603956b8b791370ac4764f5a23..acece3299756e997b15020a100ad14def283c020 100644 (file)
@@ -1886,7 +1886,7 @@ static struct amba_id mmci_ids[] = {
        {
                .id     = 0x00280180,
                .mask   = 0x00ffffff,
-               .data   = &variant_u300,
+               .data   = &variant_nomadik,
        },
        {
                .id     = 0x00480180,
index ce08896b9d696b00440fe7807aeddf72b922f320..28a057fae0a196092c113af946bc5ef32629f28a 100644 (file)
@@ -804,7 +804,7 @@ static int pxamci_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
                goto out;
        } else {
-               mmc->caps |= host->pdata->gpio_card_ro_invert ?
+               mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
                        0 : MMC_CAP2_RO_ACTIVE_HIGH;
        }
 
index f6047fc9406204d6ee672b74544d0ec6866308cd..a5cda926d38eb23be20eece31c6b1ef0c44723a3 100644 (file)
@@ -146,6 +146,33 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
        .ops = &sdhci_acpi_ops_int,
 };
 
+static int bxt_get_cd(struct mmc_host *mmc)
+{
+       int gpio_cd = mmc_gpio_get_cd(mmc);
+       struct sdhci_host *host = mmc_priv(mmc);
+       unsigned long flags;
+       int ret = 0;
+
+       if (!gpio_cd)
+               return 0;
+
+       pm_runtime_get_sync(mmc->parent);
+
+       spin_lock_irqsave(&host->lock, flags);
+
+       if (host->flags & SDHCI_DEVICE_DEAD)
+               goto out;
+
+       ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+out:
+       spin_unlock_irqrestore(&host->lock, flags);
+
+       pm_runtime_mark_last_busy(mmc->parent);
+       pm_runtime_put_autosuspend(mmc->parent);
+
+       return ret;
+}
+
 static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
                                      const char *hid, const char *uid)
 {
@@ -196,6 +223,9 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
 
        /* Platform specific code during sd probe slot goes here */
 
+       if (hid && !strcmp(hid, "80865ACA"))
+               host->mmc_host_ops.get_cd = bxt_get_cd;
+
        return 0;
 }
 
index cf7ad458b4f44fe60beb0a614882fbb985aede37..45ee07d3a76150390f82926f0343d50d953fcaf0 100644 (file)
@@ -277,7 +277,7 @@ static int spt_select_drive_strength(struct sdhci_host *host,
        if (sdhci_pci_spt_drive_strength > 0)
                drive_strength = sdhci_pci_spt_drive_strength & 0xf;
        else
-               drive_strength = 1; /* 33-ohm */
+               drive_strength = 0; /* Default 50-ohm */
 
        if ((mmc_driver_type_mask(drive_strength) & card_drv) == 0)
                drive_strength = 0; /* Default 50-ohm */
@@ -330,6 +330,33 @@ static void spt_read_drive_strength(struct sdhci_host *host)
        sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf);
 }
 
+static int bxt_get_cd(struct mmc_host *mmc)
+{
+       int gpio_cd = mmc_gpio_get_cd(mmc);
+       struct sdhci_host *host = mmc_priv(mmc);
+       unsigned long flags;
+       int ret = 0;
+
+       if (!gpio_cd)
+               return 0;
+
+       pm_runtime_get_sync(mmc->parent);
+
+       spin_lock_irqsave(&host->lock, flags);
+
+       if (host->flags & SDHCI_DEVICE_DEAD)
+               goto out;
+
+       ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+out:
+       spin_unlock_irqrestore(&host->lock, flags);
+
+       pm_runtime_mark_last_busy(mmc->parent);
+       pm_runtime_put_autosuspend(mmc->parent);
+
+       return ret;
+}
+
 static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
 {
        slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
@@ -362,6 +389,10 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
        slot->cd_con_id = NULL;
        slot->cd_idx = 0;
        slot->cd_override_level = true;
+       if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
+           slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
+               slot->host->mmc_host_ops.get_cd = bxt_get_cd;
+
        return 0;
 }
 
index b48565ed5616c79302008963a05aa7e1e18d742c..8814eb6b83bfd5d28cb73cbc6e215c1e1f997916 100644 (file)
@@ -540,9 +540,12 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
 
                BUG_ON(len > 65536);
 
-               /* tran, valid */
-               sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID);
-               desc += host->desc_sz;
+               if (len) {
+                       /* tran, valid */
+                       sdhci_adma_write_desc(host, desc, addr, len,
+                                             ADMA2_TRAN_VALID);
+                       desc += host->desc_sz;
+               }
 
                /*
                 * If this triggers then we have a calculation bug
@@ -1364,7 +1367,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
        sdhci_runtime_pm_get(host);
 
        /* Firstly check card presence */
-       present = sdhci_do_get_cd(host);
+       present = mmc->ops->get_cd(mmc);
 
        spin_lock_irqsave(&host->lock, flags);
 
@@ -2760,7 +2763,7 @@ static int sdhci_runtime_pm_put(struct sdhci_host *host)
 
 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
 {
-       if (host->runtime_suspended || host->bus_on)
+       if (host->bus_on)
                return;
        host->bus_on = true;
        pm_runtime_get_noresume(host->mmc->parent);
@@ -2768,7 +2771,7 @@ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
 
 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
 {
-       if (host->runtime_suspended || !host->bus_on)
+       if (!host->bus_on)
                return;
        host->bus_on = false;
        pm_runtime_put_noidle(host->mmc->parent);
@@ -2861,6 +2864,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
 
        host = mmc_priv(mmc);
        host->mmc = mmc;
+       host->mmc_host_ops = sdhci_ops;
+       mmc->ops = &host->mmc_host_ops;
 
        return host;
 }
@@ -3057,7 +3062,6 @@ int sdhci_add_host(struct sdhci_host *host)
        /*
         * Set host parameters.
         */
-       mmc->ops = &sdhci_ops;
        max_clk = host->max_clk;
 
        if (host->ops->get_min_clock)
index 9d4aa31b683ac2d64e16f31f88d8d0893162a225..9c331ac5ad6b0d8a6b9b88c7ef1de6b966989bc2 100644 (file)
@@ -425,6 +425,7 @@ struct sdhci_host {
 
        /* Internal data */
        struct mmc_host *mmc;   /* MMC structure */
+       struct mmc_host_ops mmc_host_ops;       /* MMC host ops */
        u64 dma_mask;           /* custom DMA mask */
 
 #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
index 4498e92116b808d2a62f6e146f33f3b41838ee33..b47122d3e8d8c71b435cdc47a275f349bf102a22 100644 (file)
@@ -1634,7 +1634,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
        struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
        struct mmc_request *mrq = host->mrq;
        struct mmc_data *data = mrq ? mrq->data : NULL;
-       struct scatterlist *sg = host->sg ?: data->sg;
+       struct scatterlist *sg;
 
        dev_warn(mmc_dev(host->mmc),
                 "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n",
@@ -1666,6 +1666,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
        case USDHI6_WAIT_FOR_MWRITE:
        case USDHI6_WAIT_FOR_READ:
        case USDHI6_WAIT_FOR_WRITE:
+               sg = host->sg ?: data->sg;
                dev_dbg(mmc_dev(host->mmc),
                        "%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n",
                        data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
index 2a1b6e037e1a1ced496ac446ca07d6037ef61d0e..0134ba32a05784b65d1a0e6d470eee7a857df74c 100644 (file)
@@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
        vol->changing_leb = 1;
        vol->ch_lnum = req->lnum;
 
-       vol->upd_buf = vmalloc(req->bytes);
+       vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
        if (!vol->upd_buf)
                return -ENOMEM;
 
index f1692e418fe40a17cbafc06a480996d2463e54c2..28bbca0af238bb6a2642045a2e4e0273e38f2dc5 100644 (file)
@@ -214,6 +214,8 @@ static void bond_uninit(struct net_device *bond_dev);
 static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
                                                struct rtnl_link_stats64 *stats);
 static void bond_slave_arr_handler(struct work_struct *work);
+static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
+                                 int mod);
 
 /*---------------------------- General routines -----------------------------*/
 
@@ -2418,7 +2420,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
                 struct slave *slave)
 {
        struct arphdr *arp = (struct arphdr *)skb->data;
-       struct slave *curr_active_slave;
+       struct slave *curr_active_slave, *curr_arp_slave;
        unsigned char *arp_ptr;
        __be32 sip, tip;
        int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
@@ -2465,26 +2467,41 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
                     &sip, &tip);
 
        curr_active_slave = rcu_dereference(bond->curr_active_slave);
+       curr_arp_slave = rcu_dereference(bond->current_arp_slave);
 
-       /* Backup slaves won't see the ARP reply, but do come through
-        * here for each ARP probe (so we swap the sip/tip to validate
-        * the probe).  In a "redundant switch, common router" type of
-        * configuration, the ARP probe will (hopefully) travel from
-        * the active, through one switch, the router, then the other
-        * switch before reaching the backup.
+       /* We 'trust' the received ARP enough to validate it if:
+        *
+        * (a) the slave receiving the ARP is active (which includes the
+        * current ARP slave, if any), or
+        *
+        * (b) the receiving slave isn't active, but there is a currently
+        * active slave and it received valid arp reply(s) after it became
+        * the currently active slave, or
+        *
+        * (c) there is an ARP slave that sent an ARP during the prior ARP
+        * interval, and we receive an ARP reply on any slave.  We accept
+        * these because switch FDB update delays may deliver the ARP
+        * reply to a slave other than the sender of the ARP request.
         *
-        * We 'trust' the arp requests if there is an active slave and
-        * it received valid arp reply(s) after it became active. This
-        * is done to avoid endless looping when we can't reach the
+        * Note: for (b), backup slaves are receiving the broadcast ARP
+        * request, not a reply.  This request passes from the sending
+        * slave through the L2 switch(es) to the receiving slave.  Since
+        * this is checking the request, sip/tip are swapped for
+        * validation.
+        *
+        * This is done to avoid endless looping when we can't reach the
         * arp_ip_target and fool ourselves with our own arp requests.
         */
-
        if (bond_is_active_slave(slave))
                bond_validate_arp(bond, slave, sip, tip);
        else if (curr_active_slave &&
                 time_after(slave_last_rx(bond, curr_active_slave),
                            curr_active_slave->last_link_up))
                bond_validate_arp(bond, slave, tip, sip);
+       else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
+                bond_time_in_interval(bond,
+                                      dev_trans_start(curr_arp_slave->dev), 1))
+               bond_validate_arp(bond, slave, sip, tip);
 
 out_unlock:
        if (arp != (struct arphdr *)skb->data)
index fc5b75675cd8b6dbebbbc79d357001676900fefa..eb7192fab5932bcde88f31f0b90604997e27a524 100644 (file)
@@ -117,6 +117,9 @@ MODULE_LICENSE("GPL v2");
  */
 #define EMS_USB_ARM7_CLOCK 8000000
 
+#define CPC_TX_QUEUE_TRIGGER_LOW       25
+#define CPC_TX_QUEUE_TRIGGER_HIGH      35
+
 /*
  * CAN-Message representation in a CPC_MSG. Message object type is
  * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
@@ -278,6 +281,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
        switch (urb->status) {
        case 0:
                dev->free_slots = dev->intr_in_buffer[1];
+               if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
+                       if (netif_queue_stopped(netdev)){
+                               netif_wake_queue(netdev);
+                       }
+               }
                break;
 
        case -ECONNRESET: /* unlink */
@@ -526,8 +534,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
        /* Release context */
        context->echo_index = MAX_TX_URBS;
 
-       if (netif_queue_stopped(netdev))
-               netif_wake_queue(netdev);
 }
 
 /*
@@ -587,7 +593,7 @@ static int ems_usb_start(struct ems_usb *dev)
        int err, i;
 
        dev->intr_in_buffer[0] = 0;
-       dev->free_slots = 15; /* initial size */
+       dev->free_slots = 50; /* initial size */
 
        for (i = 0; i < MAX_RX_URBS; i++) {
                struct urb *urb = NULL;
@@ -835,7 +841,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
 
                /* Slow down tx path */
                if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
-                   dev->free_slots < 5) {
+                   dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
                        netif_stop_queue(netdev);
                }
        }
index b06dba05594aaeb79988487bc431412b3dfae2c4..2dea39b5cb0b6ee3f1f54379e31abdfab3c4e978 100644 (file)
@@ -1519,7 +1519,7 @@ int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
 
        /* no PVID with ranges, otherwise it's a bug */
        if (pvid)
-               err = _mv88e6xxx_port_pvid_set(ds, port, vid);
+               err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end);
 unlock:
        mutex_unlock(&ps->smi_mutex);
 
index 79789d8e52da3e7aba39acdb4be1ddcae1baaa9e..ca5ac5d6f4e6ca6d87ce029b7f4e06e8653e2bac 100644 (file)
@@ -7833,6 +7833,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
        return ret;
 }
 
+static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
+{
+       /* Check if we will never have enough descriptors,
+        * as gso_segs can be more than current ring size
+        */
+       return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
+}
+
 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
 
 /* Use GSO to workaround all TSO packets that meet HW bug conditions
@@ -7936,14 +7944,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                 * vlan encapsulated.
                 */
                if (skb->protocol == htons(ETH_P_8021Q) ||
-                   skb->protocol == htons(ETH_P_8021AD))
-                       return tg3_tso_bug(tp, tnapi, txq, skb);
+                   skb->protocol == htons(ETH_P_8021AD)) {
+                       if (tg3_tso_bug_gso_check(tnapi, skb))
+                               return tg3_tso_bug(tp, tnapi, txq, skb);
+                       goto drop;
+               }
 
                if (!skb_is_gso_v6(skb)) {
                        if (unlikely((ETH_HLEN + hdr_len) > 80) &&
-                           tg3_flag(tp, TSO_BUG))
-                               return tg3_tso_bug(tp, tnapi, txq, skb);
-
+                           tg3_flag(tp, TSO_BUG)) {
+                               if (tg3_tso_bug_gso_check(tnapi, skb))
+                                       return tg3_tso_bug(tp, tnapi, txq, skb);
+                               goto drop;
+                       }
                        ip_csum = iph->check;
                        ip_tot_len = iph->tot_len;
                        iph->check = 0;
@@ -8075,7 +8088,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (would_hit_hwbug) {
                tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
 
-               if (mss) {
+               if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
                        /* If it's a TSO packet, do GSO instead of
                         * allocating and copying to a large linear SKB
                         */
index 1671fa3332c2d3011db8c308a17ea5a9bf8b3fb2..7ba6d530b0c0ab6e3d1c3d4ea21b0b950ebe3c3f 100644 (file)
@@ -33,7 +33,7 @@
 
 #define DRV_NAME               "enic"
 #define DRV_DESCRIPTION                "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION            "2.3.0.12"
+#define DRV_VERSION            "2.3.0.20"
 #define DRV_COPYRIGHT          "Copyright 2008-2013 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX          6
index 1ffd1050860bb5cde576fd291e0651b351d578ff..1fdf5fe12a9562251e3bcee9fe635872053357a2 100644 (file)
@@ -298,7 +298,8 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
                          int wait)
 {
        struct devcmd2_controller *dc2c = vdev->devcmd2;
-       struct devcmd2_result *result = dc2c->result + dc2c->next_result;
+       struct devcmd2_result *result;
+       u8 color;
        unsigned int i;
        int delay, err;
        u32 fetch_index, new_posted;
@@ -336,13 +337,17 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
        if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
                return 0;
 
+       result = dc2c->result + dc2c->next_result;
+       color = dc2c->color;
+
+       dc2c->next_result++;
+       if (dc2c->next_result == dc2c->result_size) {
+               dc2c->next_result = 0;
+               dc2c->color = dc2c->color ? 0 : 1;
+       }
+
        for (delay = 0; delay < wait; delay++) {
-               if (result->color == dc2c->color) {
-                       dc2c->next_result++;
-                       if (dc2c->next_result == dc2c->result_size) {
-                               dc2c->next_result = 0;
-                               dc2c->color = dc2c->color ? 0 : 1;
-                       }
+               if (result->color == color) {
                        if (result->error) {
                                err = result->error;
                                if (err != ERR_ECMDUNKNOWN ||
index 038f9ce391e626f02d3fe2f51a19fbd4b08a2711..1494997c4f7e3bef36141ea439d812ca9bb1a335 100644 (file)
@@ -236,6 +236,24 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
        .enable         = mlx4_en_phc_enable,
 };
 
+#define MLX4_EN_WRAP_AROUND_SEC        10ULL
+
+/* This function calculates the max shift that enables the user range
+ * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
+ */
+static u32 freq_to_shift(u16 freq)
+{
+       u32 freq_khz = freq * 1000;
+       u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
+       u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
+               max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
+       /* calculate max possible multiplier in order to fit in 64bit */
+       u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
+
+       /* This comes from the reverse of clocksource_khz2mult */
+       return ilog2(div_u64(max_mul * freq_khz, 1000000));
+}
+
 void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
 {
        struct mlx4_dev *dev = mdev->dev;
@@ -254,12 +272,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
        memset(&mdev->cycles, 0, sizeof(mdev->cycles));
        mdev->cycles.read = mlx4_en_read_clock;
        mdev->cycles.mask = CLOCKSOURCE_MASK(48);
-       /* Using shift to make calculation more accurate. Since current HW
-        * clock frequency is 427 MHz, and cycles are given using a 48 bits
-        * register, the biggest shift when calculating using u64, is 14
-        * (max_cycles * multiplier < 2^64)
-        */
-       mdev->cycles.shift = 14;
+       mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
        mdev->cycles.mult =
                clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
        mdev->nominal_c_mult = mdev->cycles.mult;
index 7869f97de5daf94cb99554c51a28a610aca76348..67e9633ea9c7cfa6aa2a6a93402d10a5d60da0ba 100644 (file)
@@ -2381,8 +2381,6 @@ out:
        /* set offloads */
        priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
                                      NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
-       priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
-       priv->dev->features    |= NETIF_F_GSO_UDP_TUNNEL;
 }
 
 static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2393,8 +2391,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
        /* unset offloads */
        priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
                                      NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
-       priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
-       priv->dev->features    &= ~NETIF_F_GSO_UDP_TUNNEL;
 
        ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
                                  VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -3020,6 +3016,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                priv->rss_hash_fn = ETH_RSS_HASH_TOP;
        }
 
+       if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+               dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+               dev->features    |= NETIF_F_GSO_UDP_TUNNEL;
+       }
+
        mdev->pndev[port] = dev;
        mdev->upper[port] = NULL;
 
index ee99e67187f5b1cc68fc224b91d5b2bf2d133b05..3904b5fc0b7c904548763ffbb2d31fbcccfe5b6c 100644 (file)
@@ -238,11 +238,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        stats->collisions = 0;
        stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
        stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
-       stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+       stats->rx_over_errors = 0;
        stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
        stats->rx_frame_errors = 0;
        stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
-       stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+       stats->rx_missed_errors = 0;
        stats->tx_aborted_errors = 0;
        stats->tx_carrier_errors = 0;
        stats->tx_fifo_errors = 0;
index 617fb22b5d81e75e34a96a73a3c9918e060d3cb1..7dbeafa659340eea3dbd28f5ad53c4b4cd8e4b05 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/if_bridge.h>
 #include <linux/workqueue.h>
 #include <linux/jiffies.h>
+#include <linux/rtnetlink.h>
 #include <net/switchdev.h>
 
 #include "spectrum.h"
@@ -812,6 +813,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
 
        mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
 
+       rtnl_lock();
        do {
                mlxsw_reg_sfn_pack(sfn_pl);
                err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
@@ -824,6 +826,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
                        mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
 
        } while (num_rec);
+       rtnl_unlock();
 
        kfree(sfn_pl);
        mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
index e9f2349e98bc0f32894c4880faf98f30b7737eab..52ec3d6e056a02d6fe9a7992608b4a30e52c2744 100644 (file)
@@ -3531,12 +3531,14 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
        info.addr = lw->addr;
        info.vid = lw->vid;
 
+       rtnl_lock();
        if (learned && removing)
                call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
                                         lw->rocker_port->dev, &info.info);
        else if (learned && !removing)
                call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
                                         lw->rocker_port->dev, &info.info);
+       rtnl_unlock();
 
        rocker_port_kfree(lw->trans, work);
 }
index 47b711739ba902cd300f53489a8d00aa87d1a660..e6cefd0e326225f0f10d299e7122dd07cf44644a 100644 (file)
@@ -845,6 +845,11 @@ static void decode_rxts(struct dp83640_private *dp83640,
        struct skb_shared_hwtstamps *shhwtstamps = NULL;
        struct sk_buff *skb;
        unsigned long flags;
+       u8 overflow;
+
+       overflow = (phy_rxts->ns_hi >> 14) & 0x3;
+       if (overflow)
+               pr_debug("rx timestamp queue overflow, count %d\n", overflow);
 
        spin_lock_irqsave(&dp83640->rx_lock, flags);
 
@@ -887,6 +892,7 @@ static void decode_txts(struct dp83640_private *dp83640,
        struct skb_shared_hwtstamps shhwtstamps;
        struct sk_buff *skb;
        u64 ns;
+       u8 overflow;
 
        /* We must already have the skb that triggered this. */
 
@@ -896,6 +902,17 @@ static void decode_txts(struct dp83640_private *dp83640,
                pr_debug("have timestamp but tx_queue empty\n");
                return;
        }
+
+       overflow = (phy_txts->ns_hi >> 14) & 0x3;
+       if (overflow) {
+               pr_debug("tx timestamp queue overflow, count %d\n", overflow);
+               while (skb) {
+                       skb_complete_tx_timestamp(skb, NULL);
+                       skb = skb_dequeue(&dp83640->tx_queue);
+               }
+               return;
+       }
+
        ns = phy2txts(phy_txts);
        memset(&shhwtstamps, 0, sizeof(shhwtstamps));
        shhwtstamps.hwtstamp = ns_to_ktime(ns);
index 0a37f840fcc52d17cee2d23dce50d27723735bad..4e0068e775f9c7dbc2028934d2790821cee97d12 100644 (file)
@@ -395,6 +395,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
 
                if (!__pppoe_xmit(sk_pppox(relay_po), skb))
                        goto abort_put;
+
+               sock_put(sk_pppox(relay_po));
        } else {
                if (sock_queue_rcv_skb(sk, skb))
                        goto abort_kfree;
index 597c53e0a2ecce6a7fecaaf5032006f31795b307..f7e8c79349adbdd2c60ef4ccb3766999a171e948 100644 (file)
@@ -129,24 +129,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
        return i < MAX_CALLID;
 }
 
-static int add_chan(struct pppox_sock *sock)
+static int add_chan(struct pppox_sock *sock,
+                   struct pptp_addr *sa)
 {
        static int call_id;
 
        spin_lock(&chan_lock);
-       if (!sock->proto.pptp.src_addr.call_id) {
+       if (!sa->call_id)       {
                call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
                if (call_id == MAX_CALLID) {
                        call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
                        if (call_id == MAX_CALLID)
                                goto out_err;
                }
-               sock->proto.pptp.src_addr.call_id = call_id;
-       } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
+               sa->call_id = call_id;
+       } else if (test_bit(sa->call_id, callid_bitmap)) {
                goto out_err;
+       }
 
-       set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
-       rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
+       sock->proto.pptp.src_addr = *sa;
+       set_bit(sa->call_id, callid_bitmap);
+       rcu_assign_pointer(callid_sock[sa->call_id], sock);
        spin_unlock(&chan_lock);
 
        return 0;
@@ -416,7 +419,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
        struct sock *sk = sock->sk;
        struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
        struct pppox_sock *po = pppox_sk(sk);
-       struct pptp_opt *opt = &po->proto.pptp;
        int error = 0;
 
        if (sockaddr_len < sizeof(struct sockaddr_pppox))
@@ -424,10 +426,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
 
        lock_sock(sk);
 
-       opt->src_addr = sp->sa_addr.pptp;
-       if (add_chan(po))
+       if (sk->sk_state & PPPOX_DEAD) {
+               error = -EALREADY;
+               goto out;
+       }
+
+       if (sk->sk_state & PPPOX_BOUND) {
                error = -EBUSY;
+               goto out;
+       }
+
+       if (add_chan(po, &sp->sa_addr.pptp))
+               error = -EBUSY;
+       else
+               sk->sk_state |= PPPOX_BOUND;
 
+out:
        release_sock(sk);
        return error;
 }
@@ -498,7 +512,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
        }
 
        opt->dst_addr = sp->sa_addr.pptp;
-       sk->sk_state = PPPOX_CONNECTED;
+       sk->sk_state |= PPPOX_CONNECTED;
 
  end:
        release_sock(sk);
index 5fccc5a8153f4d0f73f0fb212912302c19679c27..982e0acd1a36748c964d131d552e27a60f54fc1d 100644 (file)
@@ -492,6 +492,7 @@ static const struct usb_device_id products[] = {
 
        /* 3. Combined interface devices matching on interface number */
        {QMI_FIXED_INTF(0x0408, 0xea42, 4)},    /* Yota / Megafon M100-1 */
+       {QMI_FIXED_INTF(0x05c6, 0x6001, 3)},    /* 4G LTE usb-modem U901 */
        {QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
        {QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
        {QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
index 405a7b6cca2560219d490d9e1597138c0c85259e..e0fcda4ddd55a401b5aedc9e4d79ace206758afb 100644 (file)
@@ -1984,11 +1984,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                                     vxlan->cfg.port_max, true);
 
        if (info) {
-               if (info->key.tun_flags & TUNNEL_CSUM)
-                       flags |= VXLAN_F_UDP_CSUM;
-               else
-                       flags &= ~VXLAN_F_UDP_CSUM;
-
                ttl = info->key.ttl;
                tos = info->key.tos;
 
@@ -2003,8 +1998,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        goto drop;
                sk = vxlan->vn4_sock->sock->sk;
 
-               if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
-                       df = htons(IP_DF);
+               if (info) {
+                       if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
+                               df = htons(IP_DF);
+
+                       if (info->key.tun_flags & TUNNEL_CSUM)
+                               flags |= VXLAN_F_UDP_CSUM;
+                       else
+                               flags &= ~VXLAN_F_UDP_CSUM;
+               }
 
                memset(&fl4, 0, sizeof(fl4));
                fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
@@ -2102,6 +2104,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        return;
                }
 
+               if (info) {
+                       if (info->key.tun_flags & TUNNEL_CSUM)
+                               flags &= ~VXLAN_F_UDP_ZERO_CSUM6_TX;
+                       else
+                               flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
+               }
+
                ttl = ttl ? : ip6_dst_hoplimit(ndst);
                err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr,
                                      0, ttl, src_port, dst_port, htonl(vni << 8), md,
index e18629a16fb0260dff9b3486c572f4bfd2166fcd..0961f33de05eb5907c9f77cf7431ddf41783aa40 100644 (file)
@@ -1154,6 +1154,9 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
 
        priv->ucode_loaded = false;
        iwl_trans_stop_device(priv->trans);
+       ret = iwl_trans_start_hw(priv->trans);
+       if (ret)
+               goto out;
 
        priv->wowlan = true;
 
index d6e0c1b5c20cf31e3b8860f9afb8361a0d44c443..8215d7405f64edd2a320493db4603e37c962895b 100644 (file)
@@ -1267,6 +1267,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
                return -EBUSY;
        }
 
+       /* we don't support "match all" in the firmware */
+       if (!req->n_match_sets)
+               return -EOPNOTSUPP;
+
        ret = iwl_mvm_check_running_scans(mvm, type);
        if (ret)
                return ret;
index 639761fb2bfb2f8a1dbd5925984e6ad0c15f9587..d58c094f2f044a67f7a4bb2d0fab0b3784ad7131 100644 (file)
@@ -384,6 +384,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
        {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
@@ -401,10 +402,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
index 90283453073c1684b4332445e0028b65482af179..8c7204738aa3f076b28c55ef41bff4a3fdb42e36 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -924,9 +926,16 @@ monitor:
        if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
                iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
                               trans_pcie->fw_mon_phys >> dest->base_shift);
-               iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
-                              (trans_pcie->fw_mon_phys +
-                               trans_pcie->fw_mon_size) >> dest->end_shift);
+               if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+                       iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
+                                      (trans_pcie->fw_mon_phys +
+                                       trans_pcie->fw_mon_size - 256) >>
+                                               dest->end_shift);
+               else
+                       iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
+                                      (trans_pcie->fw_mon_phys +
+                                       trans_pcie->fw_mon_size) >>
+                                               dest->end_shift);
        }
 }
 
index f46c9d7f652813a46fe6d8bbe24a48ab814afd6f..7f471bff435c08ec1dc009b3fa7288dc0f7dcc80 100644 (file)
@@ -801,7 +801,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                                                                      hw_queue);
                        if (rx_remained_cnt == 0)
                                return;
-
+                       buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[
+                               rtlpci->rx_ring[rxring_idx].idx];
+                       pdesc = (struct rtl_rx_desc *)skb->data;
                } else {        /* rx descriptor */
                        pdesc = &rtlpci->rx_ring[rxring_idx].desc[
                                rtlpci->rx_ring[rxring_idx].idx];
@@ -824,13 +826,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
                if (unlikely(!new_skb))
                        goto no_new;
-               if (rtlpriv->use_new_trx_flow) {
-                       buffer_desc =
-                         &rtlpci->rx_ring[rxring_idx].buffer_desc
-                               [rtlpci->rx_ring[rxring_idx].idx];
-                       /*means rx wifi info*/
-                       pdesc = (struct rtl_rx_desc *)skb->data;
-               }
                memset(&rx_status , 0 , sizeof(rx_status));
                rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
                                                 &rx_status, (u8 *)pdesc, skb);
index 11344121c55e072f4a1e5788d24202971e565cc4..47e32cb0ec1a418281a3b1a2308a16fb5ff2e666 100644 (file)
@@ -88,8 +88,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
        u8 tid;
 
        rtl8188ee_bt_reg_init(hw);
-       rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
-
        rtlpriv->dm.dm_initialgain_enable = 1;
        rtlpriv->dm.dm_flag = 0;
        rtlpriv->dm.disable_framebursting = 0;
@@ -138,6 +136,11 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
        rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
        rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
        rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+       rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
+       rtlpriv->cfg->mod_params->sw_crypto =
+               rtlpriv->cfg->mod_params->sw_crypto;
+       rtlpriv->cfg->mod_params->disable_watchdog =
+               rtlpriv->cfg->mod_params->disable_watchdog;
        if (rtlpriv->cfg->mod_params->disable_watchdog)
                pr_info("watchdog disabled\n");
        if (!rtlpriv->psc.inactiveps)
index de6cb6c3a48cc7b931e752596272a3783558f842..4780bdc63b2bf1cfb5b11e728a2806d167b3f657 100644 (file)
@@ -139,6 +139,8 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
        rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
        rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
        rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+       rtlpriv->cfg->mod_params->sw_crypto =
+               rtlpriv->cfg->mod_params->sw_crypto;
        if (!rtlpriv->psc.inactiveps)
                pr_info("rtl8192ce: Power Save off (module option)\n");
        if (!rtlpriv->psc.fwctrl_lps)
index fd4a5353d2169e39ef3f3a12c26aee577778f162..7c6f7f0d18c602011d9048fc54565b105efaf64e 100644 (file)
@@ -65,6 +65,8 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
        rtlpriv->dm.disable_framebursting = false;
        rtlpriv->dm.thermalvalue = 0;
        rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
+       rtlpriv->cfg->mod_params->sw_crypto =
+               rtlpriv->cfg->mod_params->sw_crypto;
 
        /* for firmware buf */
        rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
index b19d0398215fdd61cc5f7c7287f07be92f5ef7f4..c6e09a19de1ac5bd4fecbf43df28b6d32e3dfc30 100644 (file)
@@ -376,8 +376,8 @@ module_param_named(swlps, rtl92de_mod_params.swctrl_lps, bool, 0444);
 module_param_named(fwlps, rtl92de_mod_params.fwctrl_lps, bool, 0444);
 MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
 MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
-MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
-MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
+MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
+MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
 MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
 
 static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
index e1fd27c888bfc66d617f1672d50457ca32e22e47..31baca41ac2f4c90641bb6270d33915dd9f299b3 100644 (file)
@@ -187,6 +187,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
        rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
        rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
        rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+       rtlpriv->cfg->mod_params->sw_crypto =
+               rtlpriv->cfg->mod_params->sw_crypto;
        if (!rtlpriv->psc.inactiveps)
                pr_info("Power Save off (module option)\n");
        if (!rtlpriv->psc.fwctrl_lps)
@@ -425,8 +427,8 @@ module_param_named(swlps, rtl92se_mod_params.swctrl_lps, bool, 0444);
 module_param_named(fwlps, rtl92se_mod_params.fwctrl_lps, bool, 0444);
 MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
 MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
-MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
-MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
+MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
+MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
 MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
 
 static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
index 3859b3e3d158d5e5dde4914073320283a2455dff..ff49a8c0ff61a27efde458ec9a920744f4e23bbd 100644 (file)
@@ -150,6 +150,11 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
        rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
        rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
        rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+       rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
+       rtlpriv->cfg->mod_params->sw_crypto =
+               rtlpriv->cfg->mod_params->sw_crypto;
+       rtlpriv->cfg->mod_params->disable_watchdog =
+               rtlpriv->cfg->mod_params->disable_watchdog;
        if (rtlpriv->cfg->mod_params->disable_watchdog)
                pr_info("watchdog disabled\n");
        rtlpriv->psc.reg_fwctrl_lps = 3;
@@ -267,6 +272,8 @@ static struct rtl_mod_params rtl8723e_mod_params = {
        .swctrl_lps = false,
        .fwctrl_lps = true,
        .debug = DBG_EMERG,
+       .msi_support = false,
+       .disable_watchdog = false,
 };
 
 static struct rtl_hal_cfg rtl8723e_hal_cfg = {
@@ -383,12 +390,14 @@ module_param_named(debug, rtl8723e_mod_params.debug, int, 0444);
 module_param_named(ips, rtl8723e_mod_params.inactiveps, bool, 0444);
 module_param_named(swlps, rtl8723e_mod_params.swctrl_lps, bool, 0444);
 module_param_named(fwlps, rtl8723e_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(msi, rtl8723e_mod_params.msi_support, bool, 0444);
 module_param_named(disable_watchdog, rtl8723e_mod_params.disable_watchdog,
                   bool, 0444);
 MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
 MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
 MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
 MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
+MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
 MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
 MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
 
index d091f1d5f91eb2bbd5f399e01187c7de47cf4aab..a78eaeda000801c05dfbe6b6962784cfc3915e40 100644 (file)
@@ -93,7 +93,6 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 
        rtl8723be_bt_reg_init(hw);
-       rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
        rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
 
        rtlpriv->dm.dm_initialgain_enable = 1;
@@ -151,6 +150,10 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
        rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
        rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
        rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
+       rtlpriv->cfg->mod_params->sw_crypto =
+                rtlpriv->cfg->mod_params->sw_crypto;
+       rtlpriv->cfg->mod_params->disable_watchdog =
+                rtlpriv->cfg->mod_params->disable_watchdog;
        if (rtlpriv->cfg->mod_params->disable_watchdog)
                pr_info("watchdog disabled\n");
        rtlpriv->psc.reg_fwctrl_lps = 3;
@@ -267,6 +270,9 @@ static struct rtl_mod_params rtl8723be_mod_params = {
        .inactiveps = true,
        .swctrl_lps = false,
        .fwctrl_lps = true,
+       .msi_support = false,
+       .disable_watchdog = false,
+       .debug = DBG_EMERG,
 };
 
 static struct rtl_hal_cfg rtl8723be_hal_cfg = {
index 4fa916dffc91924a0354c3c779ae5bf41cb79cec..72a2c1969646c73db5c657f284f7941eb275fc0c 100644 (file)
@@ -636,6 +636,13 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
                msi_base = be32_to_cpup(msi_map + 2);
                rid_len = be32_to_cpup(msi_map + 3);
 
+               if (rid_base & ~map_mask) {
+                       dev_err(parent_dev,
+                               "Invalid msi-map translation - msi-map-mask (0x%x) ignores rid-base (0x%x)\n",
+                               map_mask, rid_base);
+                       return rid_out;
+               }
+
                msi_controller_node = of_find_node_by_phandle(phandle);
 
                matched = (masked_rid >= rid_base &&
@@ -655,7 +662,7 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
        if (!matched)
                return rid_out;
 
-       rid_out = masked_rid + msi_base;
+       rid_out = masked_rid - rid_base + msi_base;
        dev_dbg(dev,
                "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n",
                dev_name(parent_dev), map_mask, rid_base, msi_base,
index ed34c9520a02987503b02e2f371ea9e2a6d3a1a4..6153853ca9c31e319f4c6aae6dd92ed05215ba07 100644 (file)
 
 #define to_keystone_pcie(x)    container_of(x, struct keystone_pcie, pp)
 
-static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
-{
-       return sys->private_data;
-}
-
 static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
                                             u32 *bit_pos)
 {
@@ -108,7 +103,7 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
        struct pcie_port *pp;
 
        msi = irq_data_get_msi_desc(d);
-       pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
+       pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
        ks_pcie = to_keystone_pcie(pp);
        offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
        update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
@@ -146,7 +141,7 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
        u32 offset;
 
        msi = irq_data_get_msi_desc(d);
-       pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
+       pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
        ks_pcie = to_keystone_pcie(pp);
        offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
 
@@ -167,7 +162,7 @@ static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
        u32 offset;
 
        msi = irq_data_get_msi_desc(d);
-       pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
+       pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
        ks_pcie = to_keystone_pcie(pp);
        offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
 
index ff538568a61774af57290eac6b27cd6ea9457908..0b3e0bfa7be5d505dffbcc5cef190be0ce1caaaa 100644 (file)
@@ -953,8 +953,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
 {
        pci_lock_rescan_remove();
 
-       if (slot->flags & SLOT_IS_GOING_AWAY)
+       if (slot->flags & SLOT_IS_GOING_AWAY) {
+               pci_unlock_rescan_remove();
                return -ENODEV;
+       }
 
        /* configure all functions */
        if (!(slot->flags & SLOT_ENABLED))
index 0bf82a20a0fb479ccfbdb43479bf9b0cf6ecff6a..48d21e0edd568cedc16b1626f78f0a95b22bca1c 100644 (file)
@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
        rpc->rpd = dev;
        INIT_WORK(&rpc->dpc_handler, aer_isr);
        mutex_init(&rpc->rpc_mutex);
-       init_waitqueue_head(&rpc->wait_release);
 
        /* Use PCIe bus function to store rpc into PCIe device */
        set_service_data(dev, rpc);
@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
                if (rpc->isr)
                        free_irq(dev->irq, dev);
 
-               wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
-
+               flush_work(&rpc->dpc_handler);
                aer_disable_rootport(rpc);
                kfree(rpc);
                set_service_data(dev, NULL);
index 84420b7c9456ecbb0e43a9e8ca539af5b2ee1a20..945c939a86c5c2919335d85a95dbeee34f9c0bff 100644 (file)
@@ -72,7 +72,6 @@ struct aer_rpc {
                                         * recovery on the same
                                         * root port hierarchy
                                         */
-       wait_queue_head_t wait_release;
 };
 
 struct aer_broadcast_data {
index fba785e9df75570b35a9b90e671bc8d5dbad6e91..4e14de0f0f98055ae1c3fbdb7338f5d8d778e96d 100644 (file)
@@ -811,8 +811,6 @@ void aer_isr(struct work_struct *work)
        while (get_e_source(rpc, &e_src))
                aer_isr_one_error(p_device, &e_src);
        mutex_unlock(&rpc->rpc_mutex);
-
-       wake_up(&rpc->wait_release);
 }
 
 /**
index c777b97207d5378936a8f25a54be603d76691ab9..5f70fee59a947da1daddbeb2253043dc79c794fb 100644 (file)
@@ -53,7 +53,7 @@ struct pcifront_device {
 };
 
 struct pcifront_sd {
-       int domain;
+       struct pci_sysdata sd;
        struct pcifront_device *pdev;
 };
 
@@ -67,7 +67,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
                                    unsigned int domain, unsigned int bus,
                                    struct pcifront_device *pdev)
 {
-       sd->domain = domain;
+       /* Because we do not expose that information via XenBus. */
+       sd->sd.node = first_online_node;
+       sd->sd.domain = domain;
        sd->pdev = pdev;
 }
 
@@ -468,8 +470,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
        dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
                 domain, bus);
 
-       bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
-       sd = kmalloc(sizeof(*sd), GFP_KERNEL);
+       bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
+       sd = kzalloc(sizeof(*sd), GFP_KERNEL);
        if (!bus_entry || !sd) {
                err = -ENOMEM;
                goto err_out;
index 8c7f27db6ad352260f8ad8675758ed365b6390a6..e7e574dc667a35d6d2c68ddf9e21744453d11bec 100644 (file)
@@ -275,20 +275,21 @@ EXPORT_SYMBOL_GPL(phy_exit);
 
 int phy_power_on(struct phy *phy)
 {
-       int ret;
+       int ret = 0;
 
        if (!phy)
-               return 0;
+               goto out;
 
        if (phy->pwr) {
                ret = regulator_enable(phy->pwr);
                if (ret)
-                       return ret;
+                       goto out;
        }
 
        ret = phy_pm_runtime_get_sync(phy);
        if (ret < 0 && ret != -ENOTSUPP)
-               return ret;
+               goto err_pm_sync;
+
        ret = 0; /* Override possible ret == -ENOTSUPP */
 
        mutex_lock(&phy->mutex);
@@ -296,19 +297,20 @@ int phy_power_on(struct phy *phy)
                ret = phy->ops->power_on(phy);
                if (ret < 0) {
                        dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
-                       goto out;
+                       goto err_pwr_on;
                }
        }
        ++phy->power_count;
        mutex_unlock(&phy->mutex);
        return 0;
 
-out:
+err_pwr_on:
        mutex_unlock(&phy->mutex);
        phy_pm_runtime_put_sync(phy);
+err_pm_sync:
        if (phy->pwr)
                regulator_disable(phy->pwr);
-
+out:
        return ret;
 }
 EXPORT_SYMBOL_GPL(phy_power_on);
index a313dfc0245f550ac5322fec17d890662d52e790..d78ee151c9e4a703bbeb071cda98032cd03dccee 100644 (file)
@@ -864,6 +864,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G50-30"),
                },
        },
+       {
+               .ident = "Lenovo ideapad Y700-17ISK",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-17ISK"),
+               },
+       },
        {
                .ident = "Lenovo Yoga 2 11 / 13 / Pro",
                .matches = {
@@ -892,6 +899,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 3"),
                },
        },
+       {
+               .ident = "Lenovo Yoga 700",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 700"),
+               },
+       },
        {
                .ident = "Lenovo Yoga 900",
                .matches = {
index c01302989ee47817eb37acac36755c63dd0b262c..b0f62141ea4d975fe65cc26d892fd494190235bd 100644 (file)
@@ -2484,6 +2484,14 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
        brightness = __get_lcd_brightness(dev);
        if (brightness < 0)
                return 0;
+       /*
+        * If transflective backlight is supported and the brightness is zero
+        * (lowest brightness level), the set_lcd_brightness function will
+        * activate the transflective backlight, making the LCD appear to be
+        * turned off, simply increment the brightness level to avoid that.
+        */
+       if (dev->tr_backlight_supported && brightness == 0)
+               brightness++;
        ret = set_lcd_brightness(dev, brightness);
        if (ret) {
                pr_debug("Backlight method is read-only, disabling backlight support\n");
index 8df0b0e62976d460c96a0be4dffbe53363586b4a..00676208080e6b63afcea9b58d2dedb5d6356b55 100644 (file)
@@ -446,6 +446,7 @@ config REGULATOR_MC13892
 config REGULATOR_MT6311
        tristate "MediaTek MT6311 PMIC"
        depends on I2C
+       select REGMAP_I2C
        help
          Say y here to select this option to enable the power regulator of
          MediaTek MT6311 PMIC.
index 35de22fdb7a06d96e0bb7bdf3e199165b5b83cc1..f2e1a39ce0f35258447093fd84223fc25b087e29 100644 (file)
@@ -27,8 +27,8 @@
 #define AXP20X_IO_ENABLED              0x03
 #define AXP20X_IO_DISABLED             0x07
 
-#define AXP22X_IO_ENABLED              0x04
-#define AXP22X_IO_DISABLED             0x03
+#define AXP22X_IO_ENABLED              0x03
+#define AXP22X_IO_DISABLED             0x04
 
 #define AXP20X_WORKMODE_DCDC2_MASK     BIT(2)
 #define AXP20X_WORKMODE_DCDC3_MASK     BIT(1)
index a263c10359e1553f007a1a031e8b76733c463109..4abfbdb285ec3181d19cf80c7fe4588ee8081759 100644 (file)
@@ -3031,6 +3031,7 @@ static void dasd_setup_queue(struct dasd_block *block)
                max = block->base->discipline->max_blocks << block->s2b_shift;
        }
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue);
+       block->request_queue->limits.max_dev_sectors = max;
        blk_queue_logical_block_size(block->request_queue,
                                     block->bp_block);
        blk_queue_max_hw_sectors(block->request_queue, max);
index 184b1dbeb55463b768eb9aa7215665686d5e4591..286782c60da4e3b197d7d9901b7e17c2e69673ac 100644 (file)
@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
                spin_unlock_irqrestore(&lcu->lock, flags);
                cancel_work_sync(&lcu->suc_data.worker);
                spin_lock_irqsave(&lcu->lock, flags);
-               if (device == lcu->suc_data.device)
+               if (device == lcu->suc_data.device) {
+                       dasd_put_device(device);
                        lcu->suc_data.device = NULL;
+               }
        }
        was_pending = 0;
        if (device == lcu->ruac_data.device) {
@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
                was_pending = 1;
                cancel_delayed_work_sync(&lcu->ruac_data.dwork);
                spin_lock_irqsave(&lcu->lock, flags);
-               if (device == lcu->ruac_data.device)
+               if (device == lcu->ruac_data.device) {
+                       dasd_put_device(device);
                        lcu->ruac_data.device = NULL;
+               }
        }
        private->lcu = NULL;
        spin_unlock_irqrestore(&lcu->lock, flags);
@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work)
        if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
                            " alias data in lcu (rc = %d), retry later", rc);
-               schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
+               if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
+                       dasd_put_device(device);
        } else {
+               dasd_put_device(device);
                lcu->ruac_data.device = NULL;
                lcu->flags &= ~UPDATE_PENDING;
        }
@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
         */
        if (!usedev)
                return -EINVAL;
+       dasd_get_device(usedev);
        lcu->ruac_data.device = usedev;
-       schedule_delayed_work(&lcu->ruac_data.dwork, 0);
+       if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
+               dasd_put_device(usedev);
        return 0;
 }
 
@@ -723,7 +731,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
        ASCEBC((char *) &cqr->magic, 4);
        ccw = cqr->cpaddr;
        ccw->cmd_code = DASD_ECKD_CCW_RSCK;
-       ccw->flags = ;
+       ccw->flags = CCW_FLAG_SLI;
        ccw->count = 16;
        ccw->cda = (__u32)(addr_t) cqr->data;
        ((char *)cqr->data)[0] = reason;
@@ -930,6 +938,7 @@ static void summary_unit_check_handling_work(struct work_struct *work)
        /* 3. read new alias configuration */
        _schedule_lcu_update(lcu, device);
        lcu->suc_data.device = NULL;
+       dasd_put_device(device);
        spin_unlock_irqrestore(&lcu->lock, flags);
 }
 
@@ -989,6 +998,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
        }
        lcu->suc_data.reason = reason;
        lcu->suc_data.device = device;
+       dasd_get_device(device);
        spin_unlock(&lcu->lock);
-       schedule_work(&lcu->suc_data.worker);
+       if (!schedule_work(&lcu->suc_data.worker))
+               dasd_put_device(device);
 };
index 16a1935cc9c1131359e514befb04b6a7bb155f9b..e197c6f39de2d17dd6672bc0328764fd0c858943 100644 (file)
@@ -2192,7 +2192,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
        /* Clear outstanding commands array. */
        for (que = 0; que < ha->max_req_queues; que++) {
                req = ha->req_q_map[que];
-               if (!req)
+               if (!req || !test_bit(que, ha->req_qid_map))
                        continue;
                req->out_ptr = (void *)(req->ring + req->length);
                *req->out_ptr = 0;
@@ -2209,7 +2209,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
 
        for (que = 0; que < ha->max_rsp_queues; que++) {
                rsp = ha->rsp_q_map[que];
-               if (!rsp)
+               if (!rsp || !test_bit(que, ha->rsp_qid_map))
                        continue;
                rsp->in_ptr = (void *)(rsp->ring + rsp->length);
                *rsp->in_ptr = 0;
@@ -4961,7 +4961,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
 
        for (i = 1; i < ha->max_rsp_queues; i++) {
                rsp = ha->rsp_q_map[i];
-               if (rsp) {
+               if (rsp && test_bit(i, ha->rsp_qid_map)) {
                        rsp->options &= ~BIT_0;
                        ret = qla25xx_init_rsp_que(base_vha, rsp);
                        if (ret != QLA_SUCCESS)
@@ -4976,8 +4976,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
        }
        for (i = 1; i < ha->max_req_queues; i++) {
                req = ha->req_q_map[i];
-               if (req) {
-               /* Clear outstanding commands array. */
+               if (req && test_bit(i, ha->req_qid_map)) {
+                       /* Clear outstanding commands array. */
                        req->options &= ~BIT_0;
                        ret = qla25xx_init_req_que(base_vha, req);
                        if (ret != QLA_SUCCESS)
index ccf6a7f9902407aed21a5610b724a333c901c323..0e59731f95ad1d11e684cf6de2eeb0f894fd4755 100644 (file)
@@ -3018,9 +3018,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
                    "MSI-X: Failed to enable support "
                    "-- %d/%d\n Retry with %d vectors.\n",
                    ha->msix_count, ret, ret);
+               ha->msix_count = ret;
+               ha->max_rsp_queues = ha->msix_count - 1;
        }
-       ha->msix_count = ret;
-       ha->max_rsp_queues = ha->msix_count - 1;
        ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
                                ha->msix_count, GFP_KERNEL);
        if (!ha->msix_entries) {
index c5dd594f6c316185b203f3e25c1595b4c320da81..cf7ba52bae665fa482b8535f92e5fcb1dbe25244 100644 (file)
@@ -600,7 +600,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
        /* Delete request queues */
        for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
                req = ha->req_q_map[cnt];
-               if (req) {
+               if (req && test_bit(cnt, ha->req_qid_map)) {
                        ret = qla25xx_delete_req_que(vha, req);
                        if (ret != QLA_SUCCESS) {
                                ql_log(ql_log_warn, vha, 0x00ea,
@@ -614,7 +614,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
        /* Delete response queues */
        for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
                rsp = ha->rsp_q_map[cnt];
-               if (rsp) {
+               if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
                        ret = qla25xx_delete_rsp_que(vha, rsp);
                        if (ret != QLA_SUCCESS) {
                                ql_log(ql_log_warn, vha, 0x00eb,
index bfa9a64c316b6ca2eec9a1b2277608f68101d92d..fc6674db4f2db3b5da454c0df566e638477d8114 100644 (file)
@@ -397,6 +397,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
        int cnt;
 
        for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
+               if (!test_bit(cnt, ha->req_qid_map))
+                       continue;
+
                req = ha->req_q_map[cnt];
                qla2x00_free_req_que(ha, req);
        }
@@ -404,6 +407,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
        ha->req_q_map = NULL;
 
        for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
+               if (!test_bit(cnt, ha->rsp_qid_map))
+                       continue;
+
                rsp = ha->rsp_q_map[cnt];
                qla2x00_free_rsp_que(ha, rsp);
        }
index ddbe2e7ac14d095129cc9cbd90ab87cbd414bc36..c3e62252460452aac6507db5ddc7ad576f969ffa 100644 (file)
@@ -395,6 +395,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
        if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
                for (i = 0; i < vha->hw->max_req_queues; i++) {
                        struct req_que *req = vha->hw->req_q_map[i];
+
+                       if (!test_bit(i, vha->hw->req_qid_map))
+                               continue;
+
                        if (req || !buf) {
                                length = req ?
                                    req->length : REQUEST_ENTRY_CNT_24XX;
@@ -408,6 +412,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
        } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
                for (i = 0; i < vha->hw->max_rsp_queues; i++) {
                        struct rsp_que *rsp = vha->hw->rsp_q_map[i];
+
+                       if (!test_bit(i, vha->hw->rsp_qid_map))
+                               continue;
+
                        if (rsp || !buf) {
                                length = rsp ?
                                    rsp->length : RESPONSE_ENTRY_CNT_MQ;
@@ -634,6 +642,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
        if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
                for (i = 0; i < vha->hw->max_req_queues; i++) {
                        struct req_que *req = vha->hw->req_q_map[i];
+
+                       if (!test_bit(i, vha->hw->req_qid_map))
+                               continue;
+
                        if (req || !buf) {
                                qla27xx_insert16(i, buf, len);
                                qla27xx_insert16(1, buf, len);
@@ -645,6 +657,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
        } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
                for (i = 0; i < vha->hw->max_rsp_queues; i++) {
                        struct rsp_que *rsp = vha->hw->rsp_q_map[i];
+
+                       if (!test_bit(i, vha->hw->rsp_qid_map))
+                               continue;
+
                        if (rsp || !buf) {
                                qla27xx_insert16(i, buf, len);
                                qla27xx_insert16(1, buf, len);
index 84fa4c46eaa6fc692490a5acc0b81e8938bfd5f6..bb669d32ccd0daee203a69840313fcb9cf343ee0 100644 (file)
@@ -2893,7 +2893,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
            sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
            sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
                rw_max = q->limits.io_opt =
-                       logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
+                       sdkp->opt_xfer_blocks * sdp->sector_size;
        else
                rw_max = BLK_DEF_MAX_SECTORS;
 
index 91a003011acfacb277e892b47c99771ceae80ffa..a9bac3bf20de14e541a22608fcb1542cad0f6d5f 100644 (file)
@@ -34,7 +34,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
 
 static int __init sh_pm_runtime_init(void)
 {
-       if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
+       if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
                if (!of_find_compatible_node(NULL, NULL,
                                             "renesas,cpg-mstp-clocks"))
                        return 0;
index aebad36391c93b36658c52222e761366c111216f..8feac599e9ab4bfe886b8da89960f04ed32c1aba 100644 (file)
@@ -1571,6 +1571,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
 
        as->use_cs_gpios = true;
        if (atmel_spi_is_v2(as) &&
+           pdev->dev.of_node &&
            !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) {
                as->use_cs_gpios = false;
                master->num_chipselect = 4;
index 1f8903d356e5581f48d6935177906bba6bc7e302..ed8283e7397aecd884616ec828fe2846b15c10df 100644 (file)
@@ -1024,6 +1024,16 @@ static int omap2_mcspi_setup(struct spi_device *spi)
                spi->controller_state = cs;
                /* Link this to context save list */
                list_add_tail(&cs->node, &ctx->cs);
+
+               if (gpio_is_valid(spi->cs_gpio)) {
+                       ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
+                       if (ret) {
+                               dev_err(&spi->dev, "failed to request gpio\n");
+                               return ret;
+                       }
+                       gpio_direction_output(spi->cs_gpio,
+                                        !(spi->mode & SPI_CS_HIGH));
+               }
        }
 
        if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
@@ -1032,15 +1042,6 @@ static int omap2_mcspi_setup(struct spi_device *spi)
                        return ret;
        }
 
-       if (gpio_is_valid(spi->cs_gpio)) {
-               ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
-               if (ret) {
-                       dev_err(&spi->dev, "failed to request gpio\n");
-                       return ret;
-               }
-               gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
-       }
-
        ret = pm_runtime_get_sync(mcspi->dev);
        if (ret < 0)
                return ret;
index 79ac19246548afb5a6e5574a97d09dd7cb8ca110..70b8f4fabfad31af474fae8b7943f7a1404cbb8f 100644 (file)
@@ -825,8 +825,7 @@ static void lcd_write_cmd_s(int cmd)
        lcd_send_serial(0x1F);  /* R/W=W, RS=0 */
        lcd_send_serial(cmd & 0x0F);
        lcd_send_serial((cmd >> 4) & 0x0F);
-       /* the shortest command takes at least 40 us */
-       usleep_range(40, 100);
+       udelay(40);             /* the shortest command takes at least 40 us */
        spin_unlock_irq(&pprt_lock);
 }
 
@@ -837,8 +836,7 @@ static void lcd_write_data_s(int data)
        lcd_send_serial(0x5F);  /* R/W=W, RS=1 */
        lcd_send_serial(data & 0x0F);
        lcd_send_serial((data >> 4) & 0x0F);
-       /* the shortest data takes at least 40 us */
-       usleep_range(40, 100);
+       udelay(40);             /* the shortest data takes at least 40 us */
        spin_unlock_irq(&pprt_lock);
 }
 
@@ -848,20 +846,19 @@ static void lcd_write_cmd_p8(int cmd)
        spin_lock_irq(&pprt_lock);
        /* present the data to the data port */
        w_dtr(pprt, cmd);
-       /* maintain the data during 20 us before the strobe */
-       usleep_range(20, 100);
+       udelay(20);     /* maintain the data during 20 us before the strobe */
 
        bits.e = BIT_SET;
        bits.rs = BIT_CLR;
        bits.rw = BIT_CLR;
        set_ctrl_bits();
 
-       usleep_range(40, 100);  /* maintain the strobe during 40 us */
+       udelay(40);     /* maintain the strobe during 40 us */
 
        bits.e = BIT_CLR;
        set_ctrl_bits();
 
-       usleep_range(120, 500); /* the shortest command takes at least 120 us */
+       udelay(120);    /* the shortest command takes at least 120 us */
        spin_unlock_irq(&pprt_lock);
 }
 
@@ -871,20 +868,19 @@ static void lcd_write_data_p8(int data)
        spin_lock_irq(&pprt_lock);
        /* present the data to the data port */
        w_dtr(pprt, data);
-       /* maintain the data during 20 us before the strobe */
-       usleep_range(20, 100);
+       udelay(20);     /* maintain the data during 20 us before the strobe */
 
        bits.e = BIT_SET;
        bits.rs = BIT_SET;
        bits.rw = BIT_CLR;
        set_ctrl_bits();
 
-       usleep_range(40, 100);  /* maintain the strobe during 40 us */
+       udelay(40);     /* maintain the strobe during 40 us */
 
        bits.e = BIT_CLR;
        set_ctrl_bits();
 
-       usleep_range(45, 100);  /* the shortest data takes at least 45 us */
+       udelay(45);     /* the shortest data takes at least 45 us */
        spin_unlock_irq(&pprt_lock);
 }
 
@@ -894,7 +890,7 @@ static void lcd_write_cmd_tilcd(int cmd)
        spin_lock_irq(&pprt_lock);
        /* present the data to the control port */
        w_ctr(pprt, cmd);
-       usleep_range(60, 120);
+       udelay(60);
        spin_unlock_irq(&pprt_lock);
 }
 
@@ -904,7 +900,7 @@ static void lcd_write_data_tilcd(int data)
        spin_lock_irq(&pprt_lock);
        /* present the data to the data port */
        w_dtr(pprt, data);
-       usleep_range(60, 120);
+       udelay(60);
        spin_unlock_irq(&pprt_lock);
 }
 
@@ -947,7 +943,7 @@ static void lcd_clear_fast_s(void)
                lcd_send_serial(0x5F);  /* R/W=W, RS=1 */
                lcd_send_serial(' ' & 0x0F);
                lcd_send_serial((' ' >> 4) & 0x0F);
-               usleep_range(40, 100);  /* the shortest data takes at least 40 us */
+               udelay(40);     /* the shortest data takes at least 40 us */
        }
        spin_unlock_irq(&pprt_lock);
 
@@ -971,7 +967,7 @@ static void lcd_clear_fast_p8(void)
                w_dtr(pprt, ' ');
 
                /* maintain the data during 20 us before the strobe */
-               usleep_range(20, 100);
+               udelay(20);
 
                bits.e = BIT_SET;
                bits.rs = BIT_SET;
@@ -979,13 +975,13 @@ static void lcd_clear_fast_p8(void)
                set_ctrl_bits();
 
                /* maintain the strobe during 40 us */
-               usleep_range(40, 100);
+               udelay(40);
 
                bits.e = BIT_CLR;
                set_ctrl_bits();
 
                /* the shortest data takes at least 45 us */
-               usleep_range(45, 100);
+               udelay(45);
        }
        spin_unlock_irq(&pprt_lock);
 
@@ -1007,7 +1003,7 @@ static void lcd_clear_fast_tilcd(void)
        for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
                /* present the data to the data port */
                w_dtr(pprt, ' ');
-               usleep_range(60, 120);
+               udelay(60);
        }
 
        spin_unlock_irq(&pprt_lock);
index 3b5835b2812849f0c32055f065ef0be6a0c4d0d0..a5bbb338f275495c18bf6051adf60ee8a3bbcc1d 100644 (file)
@@ -6,6 +6,11 @@
 #include "spk_priv.h"
 #include "serialio.h"
 
+#include <linux/serial_core.h>
+/* WARNING:  Do not change this to <linux/serial.h> without testing that
+ * SERIAL_PORT_DFNS does get defined to the appropriate value. */
+#include <asm/serial.h>
+
 #ifndef SERIAL_PORT_DFNS
 #define SERIAL_PORT_DFNS
 #endif
@@ -23,9 +28,15 @@ const struct old_serial_port *spk_serial_init(int index)
        int baud = 9600, quot = 0;
        unsigned int cval = 0;
        int cflag = CREAD | HUPCL | CLOCAL | B9600 | CS8;
-       const struct old_serial_port *ser = rs_table + index;
+       const struct old_serial_port *ser;
        int err;
 
+       if (index >= ARRAY_SIZE(rs_table)) {
+               pr_info("no port info for ttyS%d\n", index);
+               return NULL;
+       }
+       ser = rs_table + index;
+
        /*      Divisor, bytesize and parity */
        quot = ser->baud_base / baud;
        cval = cflag & (CSIZE | CSTOPB);
index 88ea4e4f124b2113cf686f470aed171706304a94..3436a83568ea378c51b41f9fe04a2932756c84a7 100644 (file)
@@ -826,6 +826,49 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
        return dev;
 }
 
+/*
+ * Check if the underlying struct block_device request_queue supports
+ * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
+ * in ATA and we need to set TPE=1
+ */
+bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
+                                      struct request_queue *q, int block_size)
+{
+       if (!blk_queue_discard(q))
+               return false;
+
+       attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
+                                                               block_size;
+       /*
+        * Currently hardcoded to 1 in Linux/SCSI code..
+        */
+       attrib->max_unmap_block_desc_count = 1;
+       attrib->unmap_granularity = q->limits.discard_granularity / block_size;
+       attrib->unmap_granularity_alignment = q->limits.discard_alignment /
+                                                               block_size;
+       return true;
+}
+EXPORT_SYMBOL(target_configure_unmap_from_queue);
+
+/*
+ * Convert from blocksize advertised to the initiator to the 512 byte
+ * units unconditionally used by the Linux block layer.
+ */
+sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
+{
+       switch (dev->dev_attrib.block_size) {
+       case 4096:
+               return lb << 3;
+       case 2048:
+               return lb << 2;
+       case 1024:
+               return lb << 1;
+       default:
+               return lb;
+       }
+}
+EXPORT_SYMBOL(target_to_linux_sector);
+
 int target_configure_device(struct se_device *dev)
 {
        struct se_hba *hba = dev->se_hba;
index e3195700211a3ebc38192391bf6488ed6ae86099..75f0f08b2a34f32d0b2bfdd35fa40f027e74ae75 100644 (file)
@@ -160,25 +160,11 @@ static int fd_configure_device(struct se_device *dev)
                        " block_device blocks: %llu logical_block_size: %d\n",
                        dev_size, div_u64(dev_size, fd_dev->fd_block_size),
                        fd_dev->fd_block_size);
-               /*
-                * Check if the underlying struct block_device request_queue supports
-                * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
-                * in ATA and we need to set TPE=1
-                */
-               if (blk_queue_discard(q)) {
-                       dev->dev_attrib.max_unmap_lba_count =
-                               q->limits.max_discard_sectors;
-                       /*
-                        * Currently hardcoded to 1 in Linux/SCSI code..
-                        */
-                       dev->dev_attrib.max_unmap_block_desc_count = 1;
-                       dev->dev_attrib.unmap_granularity =
-                               q->limits.discard_granularity >> 9;
-                       dev->dev_attrib.unmap_granularity_alignment =
-                               q->limits.discard_alignment;
+
+               if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
+                                                     fd_dev->fd_block_size))
                        pr_debug("IFILE: BLOCK Discard support available,"
-                                       " disabled by default\n");
-               }
+                                " disabled by default\n");
                /*
                 * Enable write same emulation for IBLOCK and use 0xFFFF as
                 * the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -490,9 +476,12 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
        if (S_ISBLK(inode->i_mode)) {
                /* The backend is block device, use discard */
                struct block_device *bdev = inode->i_bdev;
+               struct se_device *dev = cmd->se_dev;
 
-               ret = blkdev_issue_discard(bdev, lba,
-                               nolb, GFP_KERNEL, 0);
+               ret = blkdev_issue_discard(bdev,
+                                          target_to_linux_sector(dev, lba),
+                                          target_to_linux_sector(dev,  nolb),
+                                          GFP_KERNEL, 0);
                if (ret < 0) {
                        pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
                                ret);
index f29c69120054463eee986c0ab6ed48ec6c504c39..2c53dcefff3e1517fcd70936a9a6b887c1fc3c42 100644 (file)
@@ -121,27 +121,11 @@ static int iblock_configure_device(struct se_device *dev)
        dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
        dev->dev_attrib.hw_queue_depth = q->nr_requests;
 
-       /*
-        * Check if the underlying struct block_device request_queue supports
-        * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
-        * in ATA and we need to set TPE=1
-        */
-       if (blk_queue_discard(q)) {
-               dev->dev_attrib.max_unmap_lba_count =
-                               q->limits.max_discard_sectors;
-
-               /*
-                * Currently hardcoded to 1 in Linux/SCSI code..
-                */
-               dev->dev_attrib.max_unmap_block_desc_count = 1;
-               dev->dev_attrib.unmap_granularity =
-                               q->limits.discard_granularity >> 9;
-               dev->dev_attrib.unmap_granularity_alignment =
-                               q->limits.discard_alignment;
-
+       if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
+                                             dev->dev_attrib.hw_block_size))
                pr_debug("IBLOCK: BLOCK Discard support available,"
-                               " disabled by default\n");
-       }
+                        " disabled by default\n");
+
        /*
         * Enable write same emulation for IBLOCK and use 0xFFFF as
         * the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -413,9 +397,13 @@ static sense_reason_t
 iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
 {
        struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
+       struct se_device *dev = cmd->se_dev;
        int ret;
 
-       ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
+       ret = blkdev_issue_discard(bdev,
+                                  target_to_linux_sector(dev, lba),
+                                  target_to_linux_sector(dev,  nolb),
+                                  GFP_KERNEL, 0);
        if (ret < 0) {
                pr_err("blkdev_issue_discard() failed: %d\n", ret);
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -431,8 +419,10 @@ iblock_execute_write_same(struct se_cmd *cmd)
        struct scatterlist *sg;
        struct bio *bio;
        struct bio_list list;
-       sector_t block_lba = cmd->t_task_lba;
-       sector_t sectors = sbc_get_write_same_sectors(cmd);
+       struct se_device *dev = cmd->se_dev;
+       sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
+       sector_t sectors = target_to_linux_sector(dev,
+                                       sbc_get_write_same_sectors(cmd));
 
        if (cmd->prot_op) {
                pr_err("WRITE_SAME: Protection information with IBLOCK"
@@ -646,12 +636,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                  enum dma_data_direction data_direction)
 {
        struct se_device *dev = cmd->se_dev;
+       sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
        struct iblock_req *ibr;
        struct bio *bio, *bio_start;
        struct bio_list list;
        struct scatterlist *sg;
        u32 sg_num = sgl_nents;
-       sector_t block_lba;
        unsigned bio_cnt;
        int rw = 0;
        int i;
@@ -677,24 +667,6 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                rw = READ;
        }
 
-       /*
-        * Convert the blocksize advertised to the initiator to the 512 byte
-        * units unconditionally used by the Linux block layer.
-        */
-       if (dev->dev_attrib.block_size == 4096)
-               block_lba = (cmd->t_task_lba << 3);
-       else if (dev->dev_attrib.block_size == 2048)
-               block_lba = (cmd->t_task_lba << 2);
-       else if (dev->dev_attrib.block_size == 1024)
-               block_lba = (cmd->t_task_lba << 1);
-       else if (dev->dev_attrib.block_size == 512)
-               block_lba = cmd->t_task_lba;
-       else {
-               pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
-                               " %u\n", dev->dev_attrib.block_size);
-               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-       }
-
        ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
        if (!ibr)
                goto fail;
index 28fb3016370faf1048bd361c1b98d2c3855483c8..88029cc6de5ea9fdceed35af249065e097f7bbca 100644 (file)
@@ -68,23 +68,25 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
 
        if (dev) {
                spin_lock_irqsave(&dev->se_tmr_lock, flags);
-               list_del(&tmr->tmr_list);
+               list_del_init(&tmr->tmr_list);
                spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
        }
 
        kfree(tmr);
 }
 
-static void core_tmr_handle_tas_abort(
-       struct se_node_acl *tmr_nacl,
-       struct se_cmd *cmd,
-       int tas)
+static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
 {
-       bool remove = true;
+       unsigned long flags;
+       bool remove = true, send_tas;
        /*
         * TASK ABORTED status (TAS) bit support
         */
-       if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
+       spin_lock_irqsave(&cmd->t_state_lock, flags);
+       send_tas = (cmd->transport_state & CMD_T_TAS);
+       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+       if (send_tas) {
                remove = false;
                transport_send_task_abort(cmd);
        }
@@ -107,6 +109,46 @@ static int target_check_cdb_and_preempt(struct list_head *list,
        return 1;
 }
 
+static bool __target_check_io_state(struct se_cmd *se_cmd,
+                                   struct se_session *tmr_sess, int tas)
+{
+       struct se_session *sess = se_cmd->se_sess;
+
+       assert_spin_locked(&sess->sess_cmd_lock);
+       WARN_ON_ONCE(!irqs_disabled());
+       /*
+        * If command already reached CMD_T_COMPLETE state within
+        * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
+        * this se_cmd has been passed to fabric driver and will
+        * not be aborted.
+        *
+        * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
+        * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
+        * long as se_cmd->cmd_kref is still active unless zero.
+        */
+       spin_lock(&se_cmd->t_state_lock);
+       if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
+               pr_debug("Attempted to abort io tag: %llu already complete or"
+                       " fabric stop, skipping\n", se_cmd->tag);
+               spin_unlock(&se_cmd->t_state_lock);
+               return false;
+       }
+       if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
+               pr_debug("Attempted to abort io tag: %llu already shutdown,"
+                       " skipping\n", se_cmd->tag);
+               spin_unlock(&se_cmd->t_state_lock);
+               return false;
+       }
+       se_cmd->transport_state |= CMD_T_ABORTED;
+
+       if ((tmr_sess != se_cmd->se_sess) && tas)
+               se_cmd->transport_state |= CMD_T_TAS;
+
+       spin_unlock(&se_cmd->t_state_lock);
+
+       return kref_get_unless_zero(&se_cmd->cmd_kref);
+}
+
 void core_tmr_abort_task(
        struct se_device *dev,
        struct se_tmr_req *tmr,
@@ -130,34 +172,22 @@ void core_tmr_abort_task(
                if (tmr->ref_task_tag != ref_tag)
                        continue;
 
-               if (!kref_get_unless_zero(&se_cmd->cmd_kref))
-                       continue;
-
                printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
                        se_cmd->se_tfo->get_fabric_name(), ref_tag);
 
-               spin_lock(&se_cmd->t_state_lock);
-               if (se_cmd->transport_state & CMD_T_COMPLETE) {
-                       printk("ABORT_TASK: ref_tag: %llu already complete,"
-                              " skipping\n", ref_tag);
-                       spin_unlock(&se_cmd->t_state_lock);
+               if (!__target_check_io_state(se_cmd, se_sess, 0)) {
                        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
-
                        target_put_sess_cmd(se_cmd);
-
                        goto out;
                }
-               se_cmd->transport_state |= CMD_T_ABORTED;
-               spin_unlock(&se_cmd->t_state_lock);
-
                list_del_init(&se_cmd->se_cmd_list);
                spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 
                cancel_work_sync(&se_cmd->work);
                transport_wait_for_tasks(se_cmd);
 
-               target_put_sess_cmd(se_cmd);
                transport_cmd_finish_abort(se_cmd, true);
+               target_put_sess_cmd(se_cmd);
 
                printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
                                " ref_tag: %llu\n", ref_tag);
@@ -178,9 +208,11 @@ static void core_tmr_drain_tmr_list(
        struct list_head *preempt_and_abort_list)
 {
        LIST_HEAD(drain_tmr_list);
+       struct se_session *sess;
        struct se_tmr_req *tmr_p, *tmr_pp;
        struct se_cmd *cmd;
        unsigned long flags;
+       bool rc;
        /*
         * Release all pending and outgoing TMRs aside from the received
         * LUN_RESET tmr..
@@ -206,17 +238,39 @@ static void core_tmr_drain_tmr_list(
                if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
                        continue;
 
+               sess = cmd->se_sess;
+               if (WARN_ON_ONCE(!sess))
+                       continue;
+
+               spin_lock(&sess->sess_cmd_lock);
                spin_lock(&cmd->t_state_lock);
-               if (!(cmd->transport_state & CMD_T_ACTIVE)) {
+               if (!(cmd->transport_state & CMD_T_ACTIVE) ||
+                    (cmd->transport_state & CMD_T_FABRIC_STOP)) {
                        spin_unlock(&cmd->t_state_lock);
+                       spin_unlock(&sess->sess_cmd_lock);
                        continue;
                }
                if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
                        spin_unlock(&cmd->t_state_lock);
+                       spin_unlock(&sess->sess_cmd_lock);
                        continue;
                }
+               if (sess->sess_tearing_down || cmd->cmd_wait_set) {
+                       spin_unlock(&cmd->t_state_lock);
+                       spin_unlock(&sess->sess_cmd_lock);
+                       continue;
+               }
+               cmd->transport_state |= CMD_T_ABORTED;
                spin_unlock(&cmd->t_state_lock);
 
+               rc = kref_get_unless_zero(&cmd->cmd_kref);
+               if (!rc) {
+                       printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
+                       spin_unlock(&sess->sess_cmd_lock);
+                       continue;
+               }
+               spin_unlock(&sess->sess_cmd_lock);
+
                list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
        }
        spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
@@ -230,20 +284,26 @@ static void core_tmr_drain_tmr_list(
                        (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
                        tmr_p->function, tmr_p->response, cmd->t_state);
 
+               cancel_work_sync(&cmd->work);
+               transport_wait_for_tasks(cmd);
+
                transport_cmd_finish_abort(cmd, 1);
+               target_put_sess_cmd(cmd);
        }
 }
 
 static void core_tmr_drain_state_list(
        struct se_device *dev,
        struct se_cmd *prout_cmd,
-       struct se_node_acl *tmr_nacl,
+       struct se_session *tmr_sess,
        int tas,
        struct list_head *preempt_and_abort_list)
 {
        LIST_HEAD(drain_task_list);
+       struct se_session *sess;
        struct se_cmd *cmd, *next;
        unsigned long flags;
+       int rc;
 
        /*
         * Complete outstanding commands with TASK_ABORTED SAM status.
@@ -282,6 +342,16 @@ static void core_tmr_drain_state_list(
                if (prout_cmd == cmd)
                        continue;
 
+               sess = cmd->se_sess;
+               if (WARN_ON_ONCE(!sess))
+                       continue;
+
+               spin_lock(&sess->sess_cmd_lock);
+               rc = __target_check_io_state(cmd, tmr_sess, tas);
+               spin_unlock(&sess->sess_cmd_lock);
+               if (!rc)
+                       continue;
+
                list_move_tail(&cmd->state_list, &drain_task_list);
                cmd->state_active = false;
        }
@@ -289,7 +359,7 @@ static void core_tmr_drain_state_list(
 
        while (!list_empty(&drain_task_list)) {
                cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
-               list_del(&cmd->state_list);
+               list_del_init(&cmd->state_list);
 
                pr_debug("LUN_RESET: %s cmd: %p"
                        " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
@@ -313,16 +383,11 @@ static void core_tmr_drain_state_list(
                 * loop above, but we do it down here given that
                 * cancel_work_sync may block.
                 */
-               if (cmd->t_state == TRANSPORT_COMPLETE)
-                       cancel_work_sync(&cmd->work);
-
-               spin_lock_irqsave(&cmd->t_state_lock, flags);
-               target_stop_cmd(cmd, &flags);
-
-               cmd->transport_state |= CMD_T_ABORTED;
-               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+               cancel_work_sync(&cmd->work);
+               transport_wait_for_tasks(cmd);
 
-               core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
+               core_tmr_handle_tas_abort(cmd, tas);
+               target_put_sess_cmd(cmd);
        }
 }
 
@@ -334,6 +399,7 @@ int core_tmr_lun_reset(
 {
        struct se_node_acl *tmr_nacl = NULL;
        struct se_portal_group *tmr_tpg = NULL;
+       struct se_session *tmr_sess = NULL;
        int tas;
         /*
         * TASK_ABORTED status bit, this is configurable via ConfigFS
@@ -352,8 +418,9 @@ int core_tmr_lun_reset(
         * or struct se_device passthrough..
         */
        if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
-               tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
-               tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
+               tmr_sess = tmr->task_cmd->se_sess;
+               tmr_nacl = tmr_sess->se_node_acl;
+               tmr_tpg = tmr_sess->se_tpg;
                if (tmr_nacl && tmr_tpg) {
                        pr_debug("LUN_RESET: TMR caller fabric: %s"
                                " initiator port %s\n",
@@ -366,7 +433,7 @@ int core_tmr_lun_reset(
                dev->transport->name, tas);
 
        core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
-       core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
+       core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
                                preempt_and_abort_list);
 
        /*
index 4fdcee2006d1698f4b4e8689c7976ea161a34685..94f4ffac723f4263f9fb5e6afc4e32dee6ac99e2 100644 (file)
@@ -528,9 +528,6 @@ void transport_deregister_session(struct se_session *se_sess)
 }
 EXPORT_SYMBOL(transport_deregister_session);
 
-/*
- * Called with cmd->t_state_lock held.
- */
 static void target_remove_from_state_list(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
@@ -555,10 +552,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&cmd->t_state_lock, flags);
-       if (write_pending)
-               cmd->t_state = TRANSPORT_WRITE_PENDING;
-
        if (remove_from_lists) {
                target_remove_from_state_list(cmd);
 
@@ -568,6 +561,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
                cmd->se_lun = NULL;
        }
 
+       spin_lock_irqsave(&cmd->t_state_lock, flags);
+       if (write_pending)
+               cmd->t_state = TRANSPORT_WRITE_PENDING;
+
        /*
         * Determine if frontend context caller is requesting the stopping of
         * this command for frontend exceptions.
@@ -621,6 +618,8 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
 
 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
 {
+       bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
+
        if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
                transport_lun_remove_cmd(cmd);
        /*
@@ -632,7 +631,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
 
        if (transport_cmd_check_stop_to_fabric(cmd))
                return;
-       if (remove)
+       if (remove && ack_kref)
                transport_put_cmd(cmd);
 }
 
@@ -700,7 +699,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
         * Check for case where an explicit ABORT_TASK has been received
         * and transport_wait_for_tasks() will be waiting for completion..
         */
-       if (cmd->transport_state & CMD_T_ABORTED &&
+       if (cmd->transport_state & CMD_T_ABORTED ||
            cmd->transport_state & CMD_T_STOP) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                complete_all(&cmd->t_transport_stop_comp);
@@ -1850,19 +1849,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
        return true;
 }
 
+static int __transport_check_aborted_status(struct se_cmd *, int);
+
 void target_execute_cmd(struct se_cmd *cmd)
 {
-       /*
-        * If the received CDB has aleady been aborted stop processing it here.
-        */
-       if (transport_check_aborted_status(cmd, 1))
-               return;
-
        /*
         * Determine if frontend context caller is requesting the stopping of
         * this command for frontend exceptions.
+        *
+        * If the received CDB has aleady been aborted stop processing it here.
         */
        spin_lock_irq(&cmd->t_state_lock);
+       if (__transport_check_aborted_status(cmd, 1)) {
+               spin_unlock_irq(&cmd->t_state_lock);
+               return;
+       }
        if (cmd->transport_state & CMD_T_STOP) {
                pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
                        __func__, __LINE__, cmd->tag);
@@ -2213,20 +2214,14 @@ static inline void transport_free_pages(struct se_cmd *cmd)
 }
 
 /**
- * transport_release_cmd - free a command
- * @cmd:       command to free
+ * transport_put_cmd - release a reference to a command
+ * @cmd:       command to release
  *
- * This routine unconditionally frees a command, and reference counting
- * or list removal must be done in the caller.
+ * This routine releases our reference to the command and frees it if possible.
  */
-static int transport_release_cmd(struct se_cmd *cmd)
+static int transport_put_cmd(struct se_cmd *cmd)
 {
        BUG_ON(!cmd->se_tfo);
-
-       if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
-               core_tmr_release_req(cmd->se_tmr_req);
-       if (cmd->t_task_cdb != cmd->__t_task_cdb)
-               kfree(cmd->t_task_cdb);
        /*
         * If this cmd has been setup with target_get_sess_cmd(), drop
         * the kref and call ->release_cmd() in kref callback.
@@ -2234,18 +2229,6 @@ static int transport_release_cmd(struct se_cmd *cmd)
        return target_put_sess_cmd(cmd);
 }
 
-/**
- * transport_put_cmd - release a reference to a command
- * @cmd:       command to release
- *
- * This routine releases our reference to the command and frees it if possible.
- */
-static int transport_put_cmd(struct se_cmd *cmd)
-{
-       transport_free_pages(cmd);
-       return transport_release_cmd(cmd);
-}
-
 void *transport_kmap_data_sg(struct se_cmd *cmd)
 {
        struct scatterlist *sg = cmd->t_data_sg;
@@ -2441,34 +2424,58 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
        }
 }
 
-int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+static bool
+__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
+                          unsigned long *flags);
+
+static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
 {
        unsigned long flags;
+
+       spin_lock_irqsave(&cmd->t_state_lock, flags);
+       __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
+       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+}
+
+int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+{
        int ret = 0;
+       bool aborted = false, tas = false;
 
        if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
                if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
-                        transport_wait_for_tasks(cmd);
+                       target_wait_free_cmd(cmd, &aborted, &tas);
 
-               ret = transport_release_cmd(cmd);
+               if (!aborted || tas)
+                       ret = transport_put_cmd(cmd);
        } else {
                if (wait_for_tasks)
-                       transport_wait_for_tasks(cmd);
+                       target_wait_free_cmd(cmd, &aborted, &tas);
                /*
                 * Handle WRITE failure case where transport_generic_new_cmd()
                 * has already added se_cmd to state_list, but fabric has
                 * failed command before I/O submission.
                 */
-               if (cmd->state_active) {
-                       spin_lock_irqsave(&cmd->t_state_lock, flags);
+               if (cmd->state_active)
                        target_remove_from_state_list(cmd);
-                       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-               }
 
                if (cmd->se_lun)
                        transport_lun_remove_cmd(cmd);
 
-               ret = transport_put_cmd(cmd);
+               if (!aborted || tas)
+                       ret = transport_put_cmd(cmd);
+       }
+       /*
+        * If the task has been internally aborted due to TMR ABORT_TASK
+        * or LUN_RESET, target_core_tmr.c is responsible for performing
+        * the remaining calls to target_put_sess_cmd(), and not the
+        * callers of this function.
+        */
+       if (aborted) {
+               pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
+               wait_for_completion(&cmd->cmd_wait_comp);
+               cmd->se_tfo->release_cmd(cmd);
+               ret = 1;
        }
        return ret;
 }
@@ -2508,26 +2515,46 @@ out:
 }
 EXPORT_SYMBOL(target_get_sess_cmd);
 
+static void target_free_cmd_mem(struct se_cmd *cmd)
+{
+       transport_free_pages(cmd);
+
+       if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+               core_tmr_release_req(cmd->se_tmr_req);
+       if (cmd->t_task_cdb != cmd->__t_task_cdb)
+               kfree(cmd->t_task_cdb);
+}
+
 static void target_release_cmd_kref(struct kref *kref)
 {
        struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
        struct se_session *se_sess = se_cmd->se_sess;
        unsigned long flags;
+       bool fabric_stop;
 
        spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
        if (list_empty(&se_cmd->se_cmd_list)) {
                spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+               target_free_cmd_mem(se_cmd);
                se_cmd->se_tfo->release_cmd(se_cmd);
                return;
        }
-       if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
+
+       spin_lock(&se_cmd->t_state_lock);
+       fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
+       spin_unlock(&se_cmd->t_state_lock);
+
+       if (se_cmd->cmd_wait_set || fabric_stop) {
+               list_del_init(&se_cmd->se_cmd_list);
                spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+               target_free_cmd_mem(se_cmd);
                complete(&se_cmd->cmd_wait_comp);
                return;
        }
-       list_del(&se_cmd->se_cmd_list);
+       list_del_init(&se_cmd->se_cmd_list);
        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 
+       target_free_cmd_mem(se_cmd);
        se_cmd->se_tfo->release_cmd(se_cmd);
 }
 
@@ -2539,6 +2566,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
        struct se_session *se_sess = se_cmd->se_sess;
 
        if (!se_sess) {
+               target_free_cmd_mem(se_cmd);
                se_cmd->se_tfo->release_cmd(se_cmd);
                return 1;
        }
@@ -2555,6 +2583,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
 {
        struct se_cmd *se_cmd;
        unsigned long flags;
+       int rc;
 
        spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
        if (se_sess->sess_tearing_down) {
@@ -2564,8 +2593,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
        se_sess->sess_tearing_down = 1;
        list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
 
-       list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
-               se_cmd->cmd_wait_set = 1;
+       list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
+               rc = kref_get_unless_zero(&se_cmd->cmd_kref);
+               if (rc) {
+                       se_cmd->cmd_wait_set = 1;
+                       spin_lock(&se_cmd->t_state_lock);
+                       se_cmd->transport_state |= CMD_T_FABRIC_STOP;
+                       spin_unlock(&se_cmd->t_state_lock);
+               }
+       }
 
        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 }
@@ -2578,15 +2614,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
 {
        struct se_cmd *se_cmd, *tmp_cmd;
        unsigned long flags;
+       bool tas;
 
        list_for_each_entry_safe(se_cmd, tmp_cmd,
                                &se_sess->sess_wait_list, se_cmd_list) {
-               list_del(&se_cmd->se_cmd_list);
+               list_del_init(&se_cmd->se_cmd_list);
 
                pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
                        " %d\n", se_cmd, se_cmd->t_state,
                        se_cmd->se_tfo->get_cmd_state(se_cmd));
 
+               spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+               tas = (se_cmd->transport_state & CMD_T_TAS);
+               spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+               if (!target_put_sess_cmd(se_cmd)) {
+                       if (tas)
+                               target_put_sess_cmd(se_cmd);
+               }
+
                wait_for_completion(&se_cmd->cmd_wait_comp);
                pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
                        " fabric state: %d\n", se_cmd, se_cmd->t_state,
@@ -2608,53 +2654,75 @@ void transport_clear_lun_ref(struct se_lun *lun)
        wait_for_completion(&lun->lun_ref_comp);
 }
 
-/**
- * transport_wait_for_tasks - wait for completion to occur
- * @cmd:       command to wait
- *
- * Called from frontend fabric context to wait for storage engine
- * to pause and/or release frontend generated struct se_cmd.
- */
-bool transport_wait_for_tasks(struct se_cmd *cmd)
+static bool
+__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
+                          bool *aborted, bool *tas, unsigned long *flags)
+       __releases(&cmd->t_state_lock)
+       __acquires(&cmd->t_state_lock)
 {
-       unsigned long flags;
 
-       spin_lock_irqsave(&cmd->t_state_lock, flags);
+       assert_spin_locked(&cmd->t_state_lock);
+       WARN_ON_ONCE(!irqs_disabled());
+
+       if (fabric_stop)
+               cmd->transport_state |= CMD_T_FABRIC_STOP;
+
+       if (cmd->transport_state & CMD_T_ABORTED)
+               *aborted = true;
+
+       if (cmd->transport_state & CMD_T_TAS)
+               *tas = true;
+
        if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
-           !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
-               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+           !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
                return false;
-       }
 
        if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
-           !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
-               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+           !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
                return false;
-       }
 
-       if (!(cmd->transport_state & CMD_T_ACTIVE)) {
-               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+       if (!(cmd->transport_state & CMD_T_ACTIVE))
+               return false;
+
+       if (fabric_stop && *aborted)
                return false;
-       }
 
        cmd->transport_state |= CMD_T_STOP;
 
-       pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n",
-               cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
+       pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
+                " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
+                cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
 
-       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+       spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
 
        wait_for_completion(&cmd->t_transport_stop_comp);
 
-       spin_lock_irqsave(&cmd->t_state_lock, flags);
+       spin_lock_irqsave(&cmd->t_state_lock, *flags);
        cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
 
-       pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n",
-               cmd->tag);
+       pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
+                "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
+
+       return true;
+}
 
+/**
+ * transport_wait_for_tasks - wait for completion to occur
+ * @cmd:       command to wait
+ *
+ * Called from frontend fabric context to wait for storage engine
+ * to pause and/or release frontend generated struct se_cmd.
+ */
+bool transport_wait_for_tasks(struct se_cmd *cmd)
+{
+       unsigned long flags;
+       bool ret, aborted = false, tas = false;
+
+       spin_lock_irqsave(&cmd->t_state_lock, flags);
+       ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
-       return true;
+       return ret;
 }
 EXPORT_SYMBOL(transport_wait_for_tasks);
 
@@ -2836,28 +2904,49 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
 }
 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
 
-int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+       __releases(&cmd->t_state_lock)
+       __acquires(&cmd->t_state_lock)
 {
+       assert_spin_locked(&cmd->t_state_lock);
+       WARN_ON_ONCE(!irqs_disabled());
+
        if (!(cmd->transport_state & CMD_T_ABORTED))
                return 0;
-
        /*
         * If cmd has been aborted but either no status is to be sent or it has
         * already been sent, just return
         */
-       if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
+       if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
+               if (send_status)
+                       cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
                return 1;
+       }
 
-       pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n",
-                cmd->t_task_cdb[0], cmd->tag);
+       pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
+               " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
 
        cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
        cmd->scsi_status = SAM_STAT_TASK_ABORTED;
        trace_target_cmd_complete(cmd);
+
+       spin_unlock_irq(&cmd->t_state_lock);
        cmd->se_tfo->queue_status(cmd);
+       spin_lock_irq(&cmd->t_state_lock);
 
        return 1;
 }
+
+int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+{
+       int ret;
+
+       spin_lock_irq(&cmd->t_state_lock);
+       ret = __transport_check_aborted_status(cmd, send_status);
+       spin_unlock_irq(&cmd->t_state_lock);
+
+       return ret;
+}
 EXPORT_SYMBOL(transport_check_aborted_status);
 
 void transport_send_task_abort(struct se_cmd *cmd)
@@ -2879,11 +2968,17 @@ void transport_send_task_abort(struct se_cmd *cmd)
         */
        if (cmd->data_direction == DMA_TO_DEVICE) {
                if (cmd->se_tfo->write_pending_status(cmd) != 0) {
-                       cmd->transport_state |= CMD_T_ABORTED;
+                       spin_lock_irqsave(&cmd->t_state_lock, flags);
+                       if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
+                               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+                               goto send_abort;
+                       }
                        cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
+                       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                        return;
                }
        }
+send_abort:
        cmd->scsi_status = SAM_STAT_TASK_ABORTED;
 
        transport_lun_remove_cmd(cmd);
@@ -2900,8 +2995,17 @@ static void target_tmr_work(struct work_struct *work)
        struct se_cmd *cmd = container_of(work, struct se_cmd, work);
        struct se_device *dev = cmd->se_dev;
        struct se_tmr_req *tmr = cmd->se_tmr_req;
+       unsigned long flags;
        int ret;
 
+       spin_lock_irqsave(&cmd->t_state_lock, flags);
+       if (cmd->transport_state & CMD_T_ABORTED) {
+               tmr->response = TMR_FUNCTION_REJECTED;
+               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+               goto check_stop;
+       }
+       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
        switch (tmr->function) {
        case TMR_ABORT_TASK:
                core_tmr_abort_task(dev, tmr, cmd->se_sess);
@@ -2934,9 +3038,17 @@ static void target_tmr_work(struct work_struct *work)
                break;
        }
 
+       spin_lock_irqsave(&cmd->t_state_lock, flags);
+       if (cmd->transport_state & CMD_T_ABORTED) {
+               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+               goto check_stop;
+       }
        cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
+       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
        cmd->se_tfo->queue_tm_rsp(cmd);
 
+check_stop:
        transport_cmd_check_stop_to_fabric(cmd);
 }
 
index e3fbc5a5d88f166930e8633e671348614d00dd4d..6ceac4f2d4b227d52045632992568d08ea158c5b 100644 (file)
@@ -377,26 +377,28 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_device,
  * get_load() - get load for a cpu since last updated
  * @cpufreq_device:    &struct cpufreq_cooling_device for this cpu
  * @cpu:       cpu number
+ * @cpu_idx:   index of the cpu in cpufreq_device->allowed_cpus
  *
  * Return: The average load of cpu @cpu in percentage since this
  * function was last called.
  */
-static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu)
+static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu,
+                   int cpu_idx)
 {
        u32 load;
        u64 now, now_idle, delta_time, delta_idle;
 
        now_idle = get_cpu_idle_time(cpu, &now, 0);
-       delta_idle = now_idle - cpufreq_device->time_in_idle[cpu];
-       delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu];
+       delta_idle = now_idle - cpufreq_device->time_in_idle[cpu_idx];
+       delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu_idx];
 
        if (delta_time <= delta_idle)
                load = 0;
        else
                load = div64_u64(100 * (delta_time - delta_idle), delta_time);
 
-       cpufreq_device->time_in_idle[cpu] = now_idle;
-       cpufreq_device->time_in_idle_timestamp[cpu] = now;
+       cpufreq_device->time_in_idle[cpu_idx] = now_idle;
+       cpufreq_device->time_in_idle_timestamp[cpu_idx] = now;
 
        return load;
 }
@@ -598,7 +600,7 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
                u32 load;
 
                if (cpu_online(cpu))
-                       load = get_load(cpufreq_device, cpu);
+                       load = get_load(cpufreq_device, cpu, i);
                else
                        load = 0;
 
index 2f9f7086ac3dd0d7a3a71015593fffacf8ee586e..ea9366ad3e6bb285e52e368691a0d495cbb3429f 100644 (file)
@@ -63,6 +63,19 @@ static unsigned long get_target_state(struct thermal_instance *instance,
        next_target = instance->target;
        dev_dbg(&cdev->device, "cur_state=%ld\n", cur_state);
 
+       if (!instance->initialized) {
+               if (throttle) {
+                       next_target = (cur_state + 1) >= instance->upper ?
+                                       instance->upper :
+                                       ((cur_state + 1) < instance->lower ?
+                                       instance->lower : (cur_state + 1));
+               } else {
+                       next_target = THERMAL_NO_TARGET;
+               }
+
+               return next_target;
+       }
+
        switch (trend) {
        case THERMAL_TREND_RAISING:
                if (throttle) {
@@ -149,7 +162,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
                dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
                                        old_target, (int)instance->target);
 
-               if (old_target == instance->target)
+               if (instance->initialized && old_target == instance->target)
                        continue;
 
                /* Activate a passive thermal instance */
@@ -161,7 +174,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
                        instance->target == THERMAL_NO_TARGET)
                        update_passive_instance(tz, trip_type, -1);
 
-
+               instance->initialized = true;
                instance->cdev->updated = false; /* cdev needs update */
        }
 
index d9e525cc9c1ce24bcd9d55dd59730f375b15d552..ba08b5521382890e66b14a54a5b67e7bc29c69fe 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/of.h>
 #include <net/netlink.h>
 #include <net/genetlink.h>
+#include <linux/suspend.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/thermal.h>
@@ -59,6 +60,8 @@ static LIST_HEAD(thermal_governor_list);
 static DEFINE_MUTEX(thermal_list_lock);
 static DEFINE_MUTEX(thermal_governor_lock);
 
+static atomic_t in_suspend;
+
 static struct thermal_governor *def_governor;
 
 static struct thermal_governor *__find_governor(const char *name)
@@ -532,14 +535,31 @@ static void update_temperature(struct thermal_zone_device *tz)
        mutex_unlock(&tz->lock);
 
        trace_thermal_temperature(tz);
-       dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
-                               tz->last_temperature, tz->temperature);
+       if (tz->last_temperature == THERMAL_TEMP_INVALID)
+               dev_dbg(&tz->device, "last_temperature N/A, current_temperature=%d\n",
+                       tz->temperature);
+       else
+               dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
+                       tz->last_temperature, tz->temperature);
+}
+
+static void thermal_zone_device_reset(struct thermal_zone_device *tz)
+{
+       struct thermal_instance *pos;
+
+       tz->temperature = THERMAL_TEMP_INVALID;
+       tz->passive = 0;
+       list_for_each_entry(pos, &tz->thermal_instances, tz_node)
+               pos->initialized = false;
 }
 
 void thermal_zone_device_update(struct thermal_zone_device *tz)
 {
        int count;
 
+       if (atomic_read(&in_suspend))
+               return;
+
        if (!tz->ops->get_temp)
                return;
 
@@ -1321,6 +1341,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
        if (!result) {
                list_add_tail(&dev->tz_node, &tz->thermal_instances);
                list_add_tail(&dev->cdev_node, &cdev->thermal_instances);
+               atomic_set(&tz->need_update, 1);
        }
        mutex_unlock(&cdev->lock);
        mutex_unlock(&tz->lock);
@@ -1430,6 +1451,7 @@ __thermal_cooling_device_register(struct device_node *np,
                                  const struct thermal_cooling_device_ops *ops)
 {
        struct thermal_cooling_device *cdev;
+       struct thermal_zone_device *pos = NULL;
        int result;
 
        if (type && strlen(type) >= THERMAL_NAME_LENGTH)
@@ -1474,6 +1496,12 @@ __thermal_cooling_device_register(struct device_node *np,
        /* Update binding information for 'this' new cdev */
        bind_cdev(cdev);
 
+       mutex_lock(&thermal_list_lock);
+       list_for_each_entry(pos, &thermal_tz_list, node)
+               if (atomic_cmpxchg(&pos->need_update, 1, 0))
+                       thermal_zone_device_update(pos);
+       mutex_unlock(&thermal_list_lock);
+
        return cdev;
 }
 
@@ -1806,6 +1834,8 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
        tz->trips = trips;
        tz->passive_delay = passive_delay;
        tz->polling_delay = polling_delay;
+       /* A new thermal zone needs to be updated anyway. */
+       atomic_set(&tz->need_update, 1);
 
        dev_set_name(&tz->device, "thermal_zone%d", tz->id);
        result = device_register(&tz->device);
@@ -1900,7 +1930,10 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
 
        INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check);
 
-       thermal_zone_device_update(tz);
+       thermal_zone_device_reset(tz);
+       /* Update the new thermal zone and mark it as already updated. */
+       if (atomic_cmpxchg(&tz->need_update, 1, 0))
+               thermal_zone_device_update(tz);
 
        return tz;
 
@@ -2140,6 +2173,36 @@ static void thermal_unregister_governors(void)
        thermal_gov_power_allocator_unregister();
 }
 
+static int thermal_pm_notify(struct notifier_block *nb,
+                               unsigned long mode, void *_unused)
+{
+       struct thermal_zone_device *tz;
+
+       switch (mode) {
+       case PM_HIBERNATION_PREPARE:
+       case PM_RESTORE_PREPARE:
+       case PM_SUSPEND_PREPARE:
+               atomic_set(&in_suspend, 1);
+               break;
+       case PM_POST_HIBERNATION:
+       case PM_POST_RESTORE:
+       case PM_POST_SUSPEND:
+               atomic_set(&in_suspend, 0);
+               list_for_each_entry(tz, &thermal_tz_list, node) {
+                       thermal_zone_device_reset(tz);
+                       thermal_zone_device_update(tz);
+               }
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static struct notifier_block thermal_pm_nb = {
+       .notifier_call = thermal_pm_notify,
+};
+
 static int __init thermal_init(void)
 {
        int result;
@@ -2160,6 +2223,11 @@ static int __init thermal_init(void)
        if (result)
                goto exit_netlink;
 
+       result = register_pm_notifier(&thermal_pm_nb);
+       if (result)
+               pr_warn("Thermal: Can not register suspend notifier, return %d\n",
+                       result);
+
        return 0;
 
 exit_netlink:
@@ -2179,6 +2247,7 @@ error:
 
 static void __exit thermal_exit(void)
 {
+       unregister_pm_notifier(&thermal_pm_nb);
        of_thermal_destroy_zones();
        genetlink_exit();
        class_unregister(&thermal_class);
index d7ac1fccd6598a80453dc51da655e37b30b85ac7..749d41abfbabecfc8fc33f54f4bc2e5d680450f8 100644 (file)
@@ -41,6 +41,7 @@ struct thermal_instance {
        struct thermal_zone_device *tz;
        struct thermal_cooling_device *cdev;
        int trip;
+       bool initialized;
        unsigned long upper;    /* Highest cooling state for this trip point */
        unsigned long lower;    /* Lowest cooling state for this trip point */
        unsigned long target;   /* expected cooling state */
index 45f86da1d6d37b4650ddfaf216879195ca9c6542..03b6743461d15de129a2d7908e851db450111d3c 100644 (file)
@@ -158,7 +158,7 @@ static void ci_otg_work(struct work_struct *work)
 int ci_hdrc_otg_init(struct ci_hdrc *ci)
 {
        INIT_WORK(&ci->work, ci_otg_work);
-       ci->wq = create_singlethread_workqueue("ci_otg");
+       ci->wq = create_freezable_workqueue("ci_otg");
        if (!ci->wq) {
                dev_err(ci->dev, "can't create workqueue\n");
                return -ENODEV;
index e4c70dce3e7cead284d3e86cd69aba840d667e00..fa4e23930614a47ac14ece43e863286fadb91402 100644 (file)
@@ -1841,6 +1841,11 @@ static const struct usb_device_id acm_ids[] = {
        },
 #endif
 
+       /*Samsung phone in firmware update mode */
+       { USB_DEVICE(0x04e8, 0x685d),
+       .driver_info = IGNORE_DEVICE,
+       },
+
        /* Exclude Infineon Flash Loader utility */
        { USB_DEVICE(0x058b, 0x0041),
        .driver_info = IGNORE_DEVICE,
index 36f1cb74588c5b7bff230e3ed91e42f73c3b47e8..78be201d81f4eabe0ec6d75e95cf3d76e8aef9db 100644 (file)
@@ -853,7 +853,6 @@ struct dwc3 {
        unsigned                pullups_connected:1;
        unsigned                resize_fifos:1;
        unsigned                setup_packet_pending:1;
-       unsigned                start_config_issued:1;
        unsigned                three_stage_setup:1;
        unsigned                usb3_lpm_capable:1;
 
index 5320e939e0902647edb21a5e55d7e537c1b03b33..b13912d5fa995f463a93b14c521888cee77541c5 100644 (file)
@@ -555,7 +555,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
        int ret;
        u32 reg;
 
-       dwc->start_config_issued = false;
        cfg = le16_to_cpu(ctrl->wValue);
 
        switch (state) {
@@ -737,10 +736,6 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
                dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
                ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
                break;
-       case USB_REQ_SET_INTERFACE:
-               dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
-               dwc->start_config_issued = false;
-               /* Fall through */
        default:
                dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
                ret = dwc3_ep0_delegate_req(dwc, ctrl);
index a58376fd65fe6d788fa817d5600696bfd431c791..69ffe6e8d77fc0421fcbbd5826e0e70ca8d99966 100644 (file)
@@ -388,24 +388,66 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
        dep->trb_pool_dma = 0;
 }
 
+static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
+
+/**
+ * dwc3_gadget_start_config - Configure EP resources
+ * @dwc: pointer to our controller context structure
+ * @dep: endpoint that is being enabled
+ *
+ * The assignment of transfer resources cannot perfectly follow the
+ * data book due to the fact that the controller driver does not have
+ * all knowledge of the configuration in advance. It is given this
+ * information piecemeal by the composite gadget framework after every
+ * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
+ * programming model in this scenario can cause errors. For two
+ * reasons:
+ *
+ * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
+ * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
+ * multiple interfaces.
+ *
+ * 2) The databook does not mention doing more DEPXFERCFG for new
+ * endpoint on alt setting (8.1.6).
+ *
+ * The following simplified method is used instead:
+ *
+ * All hardware endpoints can be assigned a transfer resource and this
+ * setting will stay persistent until either a core reset or
+ * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
+ * do DEPXFERCFG for every hardware endpoint as well. We are
+ * guaranteed that there are as many transfer resources as endpoints.
+ *
+ * This function is called for each endpoint when it is being enabled
+ * but is triggered only when called for EP0-out, which always happens
+ * first, and which should only happen in one of the above conditions.
+ */
 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
 {
        struct dwc3_gadget_ep_cmd_params params;
        u32                     cmd;
+       int                     i;
+       int                     ret;
+
+       if (dep->number)
+               return 0;
 
        memset(&params, 0x00, sizeof(params));
+       cmd = DWC3_DEPCMD_DEPSTARTCFG;
 
-       if (dep->number != 1) {
-               cmd = DWC3_DEPCMD_DEPSTARTCFG;
-               /* XferRscIdx == 0 for ep0 and 2 for the remaining */
-               if (dep->number > 1) {
-                       if (dwc->start_config_issued)
-                               return 0;
-                       dwc->start_config_issued = true;
-                       cmd |= DWC3_DEPCMD_PARAM(2);
-               }
+       ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
+       if (ret)
+               return ret;
 
-               return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
+       for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
+               struct dwc3_ep *dep = dwc->eps[i];
+
+               if (!dep)
+                       continue;
+
+               ret = dwc3_gadget_set_xfer_resource(dwc, dep);
+               if (ret)
+                       return ret;
        }
 
        return 0;
@@ -519,10 +561,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
                struct dwc3_trb *trb_st_hw;
                struct dwc3_trb *trb_link;
 
-               ret = dwc3_gadget_set_xfer_resource(dwc, dep);
-               if (ret)
-                       return ret;
-
                dep->endpoint.desc = desc;
                dep->comp_desc = comp_desc;
                dep->type = usb_endpoint_type(desc);
@@ -1604,8 +1642,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
        }
        dwc3_writel(dwc->regs, DWC3_DCFG, reg);
 
-       dwc->start_config_issued = false;
-
        /* Start with SuperSpeed Default */
        dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
 
@@ -2202,7 +2238,6 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
        dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 
        dwc3_disconnect_gadget(dwc);
-       dwc->start_config_issued = false;
 
        dwc->gadget.speed = USB_SPEED_UNKNOWN;
        dwc->setup_packet_pending = false;
@@ -2253,7 +2288,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
 
        dwc3_stop_active_transfers(dwc);
        dwc3_clear_stall_all_ep(dwc);
-       dwc->start_config_issued = false;
 
        /* Reset device address to zero */
        reg = dwc3_readl(dwc->regs, DWC3_DCFG);
index 1dd9919081f80661d4cb3eb47d3a597c595beb6f..7a76fe4c2f9ea9dc40804c04553c6476177943c0 100644 (file)
@@ -162,6 +162,9 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
        { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
        { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+       { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
+       { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
+       { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
        { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
        { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
        { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
index db86e512e0fcb3af0548eb9907b04ce68db3a8dc..348e19834b83e12e7aafafa9d46374822fe43f7f 100644 (file)
@@ -270,6 +270,7 @@ static void option_instat_callback(struct urb *urb);
 #define TELIT_PRODUCT_UE910_V2                 0x1012
 #define TELIT_PRODUCT_LE922_USBCFG0            0x1042
 #define TELIT_PRODUCT_LE922_USBCFG3            0x1043
+#define TELIT_PRODUCT_LE922_USBCFG5            0x1045
 #define TELIT_PRODUCT_LE920                    0x1200
 #define TELIT_PRODUCT_LE910                    0x1201
 
@@ -315,6 +316,7 @@ static void option_instat_callback(struct urb *urb);
 #define TOSHIBA_PRODUCT_G450                   0x0d45
 
 #define ALINK_VENDOR_ID                                0x1e0e
+#define SIMCOM_PRODUCT_SIM7100E                        0x9001 /* Yes, ALINK_VENDOR_ID */
 #define ALINK_PRODUCT_PH300                    0x9100
 #define ALINK_PRODUCT_3GU                      0x9200
 
@@ -607,6 +609,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
        .reserved = BIT(3) | BIT(4),
 };
 
+static const struct option_blacklist_info simcom_sim7100e_blacklist = {
+       .reserved = BIT(5) | BIT(6),
+};
+
 static const struct option_blacklist_info telit_le910_blacklist = {
        .sendsetup = BIT(0),
        .reserved = BIT(1) | BIT(2),
@@ -1122,9 +1128,13 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
        { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
+       { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
+         .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1176,6 +1186,8 @@ static const struct usb_device_id option_ids[] = {
                .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
                .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
+               .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
                .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
@@ -1645,6 +1657,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
        { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
        { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
+       { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
+         .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
        { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
          .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
        },
index 9919d2a9faf278177aacf32c149fda22b00a1d83..1bc6089b90083a05e0ef3e4ff71c0ef752e3831a 100644 (file)
@@ -157,14 +157,17 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_SWI(0x1199, 0x9056)},   /* Sierra Wireless Modem */
        {DEVICE_SWI(0x1199, 0x9060)},   /* Sierra Wireless Modem */
        {DEVICE_SWI(0x1199, 0x9061)},   /* Sierra Wireless Modem */
-       {DEVICE_SWI(0x1199, 0x9070)},   /* Sierra Wireless MC74xx/EM74xx */
-       {DEVICE_SWI(0x1199, 0x9071)},   /* Sierra Wireless MC74xx/EM74xx */
+       {DEVICE_SWI(0x1199, 0x9070)},   /* Sierra Wireless MC74xx */
+       {DEVICE_SWI(0x1199, 0x9071)},   /* Sierra Wireless MC74xx */
+       {DEVICE_SWI(0x1199, 0x9078)},   /* Sierra Wireless EM74xx */
+       {DEVICE_SWI(0x1199, 0x9079)},   /* Sierra Wireless EM74xx */
        {DEVICE_SWI(0x413c, 0x81a2)},   /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a3)},   /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a4)},   /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a8)},   /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a9)},   /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81b1)},   /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
+       {DEVICE_SWI(0x413c, 0x81b3)},   /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
 
        /* Huawei devices */
        {DEVICE_HWI(0x03f0, 0x581d)},   /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
index 56bf6dbb93db7ac281a0af4fa01a8d212fd87670..9982cb176ce86f2a7fd46fc9e205bf94c9403624 100644 (file)
@@ -446,7 +446,8 @@ static long vfio_pci_ioctl(void *device_data,
                info.num_regions = VFIO_PCI_NUM_REGIONS;
                info.num_irqs = VFIO_PCI_NUM_IRQS;
 
-               return copy_to_user((void __user *)arg, &info, minsz);
+               return copy_to_user((void __user *)arg, &info, minsz) ?
+                       -EFAULT : 0;
 
        } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
                struct pci_dev *pdev = vdev->pdev;
@@ -520,7 +521,8 @@ static long vfio_pci_ioctl(void *device_data,
                        return -EINVAL;
                }
 
-               return copy_to_user((void __user *)arg, &info, minsz);
+               return copy_to_user((void __user *)arg, &info, minsz) ?
+                       -EFAULT : 0;
 
        } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
                struct vfio_irq_info info;
@@ -555,7 +557,8 @@ static long vfio_pci_ioctl(void *device_data,
                else
                        info.flags |= VFIO_IRQ_INFO_NORESIZE;
 
-               return copy_to_user((void __user *)arg, &info, minsz);
+               return copy_to_user((void __user *)arg, &info, minsz) ?
+                       -EFAULT : 0;
 
        } else if (cmd == VFIO_DEVICE_SET_IRQS) {
                struct vfio_irq_set hdr;
index 418cdd9ba3f4841353c5cbdddb345e9363578736..e65b142d34222dd4ba548e46c44017d2b762cf06 100644 (file)
@@ -219,7 +219,8 @@ static long vfio_platform_ioctl(void *device_data,
                info.num_regions = vdev->num_regions;
                info.num_irqs = vdev->num_irqs;
 
-               return copy_to_user((void __user *)arg, &info, minsz);
+               return copy_to_user((void __user *)arg, &info, minsz) ?
+                       -EFAULT : 0;
 
        } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
                struct vfio_region_info info;
@@ -240,7 +241,8 @@ static long vfio_platform_ioctl(void *device_data,
                info.size = vdev->regions[info.index].size;
                info.flags = vdev->regions[info.index].flags;
 
-               return copy_to_user((void __user *)arg, &info, minsz);
+               return copy_to_user((void __user *)arg, &info, minsz) ?
+                       -EFAULT : 0;
 
        } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
                struct vfio_irq_info info;
@@ -259,7 +261,8 @@ static long vfio_platform_ioctl(void *device_data,
                info.flags = vdev->irqs[info.index].flags;
                info.count = vdev->irqs[info.index].count;
 
-               return copy_to_user((void __user *)arg, &info, minsz);
+               return copy_to_user((void __user *)arg, &info, minsz) ?
+                       -EFAULT : 0;
 
        } else if (cmd == VFIO_DEVICE_SET_IRQS) {
                struct vfio_irq_set hdr;
index 59d47cb638d5d18aac6700157ecc9be918772f91..ecb826eefe0214861bf5e141b34ca74ecf804598 100644 (file)
@@ -999,7 +999,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
 
                info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
 
-               return copy_to_user((void __user *)arg, &info, minsz);
+               return copy_to_user((void __user *)arg, &info, minsz) ?
+                       -EFAULT : 0;
 
        } else if (cmd == VFIO_IOMMU_MAP_DMA) {
                struct vfio_iommu_type1_dma_map map;
@@ -1032,7 +1033,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
                if (ret)
                        return ret;
 
-               return copy_to_user((void __user *)arg, &unmap, minsz);
+               return copy_to_user((void __user *)arg, &unmap, minsz) ?
+                       -EFAULT : 0;
        }
 
        return -ENOTTY;
index 92f394927f241bef338a293170e4a09b9f45c647..6e92917ba77a97477f32a0426ee914c351867a6b 100644 (file)
@@ -709,6 +709,7 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
        }
 
        if (!err) {
+               ops->cur_blink_jiffies = HZ / 5;
                info->fbcon_par = ops;
 
                if (vc)
@@ -956,6 +957,7 @@ static const char *fbcon_startup(void)
        ops->currcon = -1;
        ops->graphics = 1;
        ops->cur_rotate = -1;
+       ops->cur_blink_jiffies = HZ / 5;
        info->fbcon_par = ops;
        p->con_rotate = initial_rotation;
        set_blitting_type(vc, info);
index 7efc32945810e8fdcafa76a6328517f35d65ea3c..7d3e5d0e9aa4ff907ab5e51284b57b9492bf3891 100644 (file)
@@ -209,8 +209,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
         */
        if (vb->num_pfns != 0)
                tell_host(vb, vb->deflate_vq);
-       mutex_unlock(&vb->balloon_lock);
        release_pages_balloon(vb);
+       mutex_unlock(&vb->balloon_lock);
        return num_freed_pages;
 }
 
index 78f804af6c2020a9b927286e3496f6a2d8541953..2046a68ad0ba20748129d2ff338d964268051634 100644 (file)
@@ -545,6 +545,7 @@ err_enable_device:
 static void virtio_pci_remove(struct pci_dev *pci_dev)
 {
        struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+       struct device *dev = get_device(&vp_dev->vdev.dev);
 
        unregister_virtio_device(&vp_dev->vdev);
 
@@ -554,6 +555,7 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
                virtio_pci_modern_remove(vp_dev);
 
        pci_disable_device(pci_dev);
+       put_device(dev);
 }
 
 static struct pci_driver virtio_pci_driver = {
index 73dafdc494aa8322e037f1d4a9ad4ec5530c5743..fb0221434f814aa49eef41f6aea7a60c86a4c817 100644 (file)
@@ -227,8 +227,9 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
        /*
         * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
         * to access the BARs where the MSI-X entries reside.
+        * But VF devices are unique in which the PF needs to be checked.
         */
-       pci_read_config_word(dev, PCI_COMMAND, &cmd);
+       pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd);
        if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
                return -ENXIO;
 
@@ -332,6 +333,9 @@ void xen_pcibk_do_op(struct work_struct *data)
        struct xen_pcibk_dev_data *dev_data = NULL;
        struct xen_pci_op *op = &pdev->op;
        int test_intx = 0;
+#ifdef CONFIG_PCI_MSI
+       unsigned int nr = 0;
+#endif
 
        *op = pdev->sh_info->op;
        barrier();
@@ -360,6 +364,7 @@ void xen_pcibk_do_op(struct work_struct *data)
                        op->err = xen_pcibk_disable_msi(pdev, dev, op);
                        break;
                case XEN_PCI_OP_enable_msix:
+                       nr = op->value;
                        op->err = xen_pcibk_enable_msix(pdev, dev, op);
                        break;
                case XEN_PCI_OP_disable_msix:
@@ -382,7 +387,7 @@ void xen_pcibk_do_op(struct work_struct *data)
        if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
                unsigned int i;
 
-               for (i = 0; i < op->value; i++)
+               for (i = 0; i < nr; i++)
                        pdev->sh_info->op.msix_entries[i].vector =
                                op->msix_entries[i].vector;
        }
index ad4eb1024d1ffb7663db4e770fdd4c5c2663e821..51387d75c7bf1202556cf919042bc0b055bc6a27 100644 (file)
@@ -939,12 +939,12 @@ out:
        spin_unlock_irqrestore(&info->v2p_lock, flags);
 
 out_free:
-       mutex_lock(&tpg->tv_tpg_mutex);
-       tpg->tv_tpg_fe_count--;
-       mutex_unlock(&tpg->tv_tpg_mutex);
-
-       if (err)
+       if (err) {
+               mutex_lock(&tpg->tv_tpg_mutex);
+               tpg->tv_tpg_fe_count--;
+               mutex_unlock(&tpg->tv_tpg_mutex);
                kfree(new);
+       }
 
        return err;
 }
index 3e36e4adc4a35539e9415951118a974db2794c5a..9aba42b78253e33a2d0d1d6a06f83ba0cf122ad0 100644 (file)
@@ -328,8 +328,8 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
                list_add_tail(&work->ordered_list, &wq->ordered_list);
                spin_unlock_irqrestore(&wq->list_lock, flags);
        }
-       queue_work(wq->normal_wq, &work->normal_work);
        trace_btrfs_work_queued(work);
+       queue_work(wq->normal_wq, &work->normal_work);
 }
 
 void btrfs_queue_work(struct btrfs_workqueue *wq,
index 35489e7129a7e8de9d0232279d41d3bbd19ae1df..385b449fd7ed4969d9e408f9ac756217209b48d9 100644 (file)
@@ -1572,7 +1572,7 @@ struct btrfs_fs_info {
 
        spinlock_t delayed_iput_lock;
        struct list_head delayed_iputs;
-       struct rw_semaphore delayed_iput_sem;
+       struct mutex cleaner_delayed_iput_mutex;
 
        /* this protects tree_mod_seq_list */
        spinlock_t tree_mod_seq_lock;
index 0ddca67344948ce6f27a5a86aec9e6ffc8973628..41fb43183406c944086ca6eefaf10c7f5a35880e 100644 (file)
@@ -1582,8 +1582,23 @@ int btrfs_init_fs_root(struct btrfs_root *root)
        ret = get_anon_bdev(&root->anon_dev);
        if (ret)
                goto free_writers;
+
+       mutex_lock(&root->objectid_mutex);
+       ret = btrfs_find_highest_objectid(root,
+                                       &root->highest_objectid);
+       if (ret) {
+               mutex_unlock(&root->objectid_mutex);
+               goto free_root_dev;
+       }
+
+       ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
+
+       mutex_unlock(&root->objectid_mutex);
+
        return 0;
 
+free_root_dev:
+       free_anon_bdev(root->anon_dev);
 free_writers:
        btrfs_free_subvolume_writers(root->subv_writers);
 fail:
@@ -1781,7 +1796,10 @@ static int cleaner_kthread(void *arg)
                        goto sleep;
                }
 
+               mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
                btrfs_run_delayed_iputs(root);
+               mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
+
                again = btrfs_clean_one_deleted_snapshot(root);
                mutex_unlock(&root->fs_info->cleaner_mutex);
 
@@ -2541,8 +2559,8 @@ int open_ctree(struct super_block *sb,
        mutex_init(&fs_info->delete_unused_bgs_mutex);
        mutex_init(&fs_info->reloc_mutex);
        mutex_init(&fs_info->delalloc_root_mutex);
+       mutex_init(&fs_info->cleaner_delayed_iput_mutex);
        seqlock_init(&fs_info->profiles_lock);
-       init_rwsem(&fs_info->delayed_iput_sem);
 
        INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
        INIT_LIST_HEAD(&fs_info->space_info);
@@ -2667,6 +2685,7 @@ int open_ctree(struct super_block *sb,
        if (btrfs_check_super_csum(bh->b_data)) {
                printk(KERN_ERR "BTRFS: superblock checksum mismatch\n");
                err = -EINVAL;
+               brelse(bh);
                goto fail_alloc;
        }
 
@@ -2899,6 +2918,18 @@ retry_root_backup:
        tree_root->commit_root = btrfs_root_node(tree_root);
        btrfs_set_root_refs(&tree_root->root_item, 1);
 
+       mutex_lock(&tree_root->objectid_mutex);
+       ret = btrfs_find_highest_objectid(tree_root,
+                                       &tree_root->highest_objectid);
+       if (ret) {
+               mutex_unlock(&tree_root->objectid_mutex);
+               goto recovery_tree_root;
+       }
+
+       ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
+
+       mutex_unlock(&tree_root->objectid_mutex);
+
        ret = btrfs_read_roots(fs_info, tree_root);
        if (ret)
                goto recovery_tree_root;
index c4661db2b72ae4c412adaaecb7cbc236fbe1e7cf..2368cac1115ae74726065de1376473cf7c9712f7 100644 (file)
@@ -4086,8 +4086,10 @@ commit_trans:
                    !atomic_read(&root->fs_info->open_ioctl_trans)) {
                        need_commit--;
 
-                       if (need_commit > 0)
+                       if (need_commit > 0) {
+                               btrfs_start_delalloc_roots(fs_info, 0, -1);
                                btrfs_wait_ordered_roots(fs_info, -1);
+                       }
 
                        trans = btrfs_join_transaction(root);
                        if (IS_ERR(trans))
@@ -4100,11 +4102,12 @@ commit_trans:
                                if (ret)
                                        return ret;
                                /*
-                                * make sure that all running delayed iput are
-                                * done
+                                * The cleaner kthread might still be doing iput
+                                * operations. Wait for it to finish so that
+                                * more space is released.
                                 */
-                               down_write(&root->fs_info->delayed_iput_sem);
-                               up_write(&root->fs_info->delayed_iput_sem);
+                               mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
+                               mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
                                goto again;
                        } else {
                                btrfs_end_transaction(trans, root);
index 767a6056ac45afce29844e469f582baec8ef28cd..07573dc1614abfcce4e290850bf80149c2e5cf17 100644 (file)
@@ -515,7 +515,7 @@ out:
        return ret;
 }
 
-static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
+int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
 {
        struct btrfs_path *path;
        int ret;
@@ -555,13 +555,6 @@ int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
        int ret;
        mutex_lock(&root->objectid_mutex);
 
-       if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
-               ret = btrfs_find_highest_objectid(root,
-                                                 &root->highest_objectid);
-               if (ret)
-                       goto out;
-       }
-
        if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
                ret = -ENOSPC;
                goto out;
index ddb347bfee232ec26543055fbf408bfb92f8028f..c8e864b2d530f88c7e65350ca557e36e6061700e 100644 (file)
@@ -9,5 +9,6 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
                         struct btrfs_trans_handle *trans);
 
 int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid);
+int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid);
 
 #endif
index 54b5f0de623b067e4c1600ea165fd11d98fa587f..4bc9dbf29a73fbde861d9a8cab06db3876c8bf20 100644 (file)
@@ -3142,8 +3142,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
        if (empty)
                return;
 
-       down_read(&fs_info->delayed_iput_sem);
-
        spin_lock(&fs_info->delayed_iput_lock);
        list_splice_init(&fs_info->delayed_iputs, &list);
        spin_unlock(&fs_info->delayed_iput_lock);
@@ -3154,8 +3152,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
                iput(delayed->inode);
                kfree(delayed);
        }
-
-       up_read(&root->fs_info->delayed_iput_sem);
 }
 
 /*
@@ -6493,7 +6489,7 @@ out_unlock_inode:
 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                      struct dentry *dentry)
 {
-       struct btrfs_trans_handle *trans;
+       struct btrfs_trans_handle *trans = NULL;
        struct btrfs_root *root = BTRFS_I(dir)->root;
        struct inode *inode = d_inode(old_dentry);
        u64 index;
@@ -6519,6 +6515,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
        trans = btrfs_start_transaction(root, 5);
        if (IS_ERR(trans)) {
                err = PTR_ERR(trans);
+               trans = NULL;
                goto fail;
        }
 
@@ -6552,9 +6549,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                btrfs_log_new_name(trans, inode, NULL, parent);
        }
 
-       btrfs_end_transaction(trans, root);
        btrfs_balance_delayed_items(root);
 fail:
+       if (trans)
+               btrfs_end_transaction(trans, root);
        if (drop_inode) {
                inode_dec_link_count(inode);
                iput(inode);
@@ -8548,15 +8546,28 @@ int btrfs_readpage(struct file *file, struct page *page)
 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
 {
        struct extent_io_tree *tree;
-
+       struct inode *inode = page->mapping->host;
+       int ret;
 
        if (current->flags & PF_MEMALLOC) {
                redirty_page_for_writepage(wbc, page);
                unlock_page(page);
                return 0;
        }
+
+       /*
+        * If we are under memory pressure we will call this directly from the
+        * VM, we need to make sure we have the inode referenced for the ordered
+        * extent.  If not just return like we didn't do anything.
+        */
+       if (!igrab(inode)) {
+               redirty_page_for_writepage(wbc, page);
+               return AOP_WRITEPAGE_ACTIVATE;
+       }
        tree = &BTRFS_I(page->mapping->host)->io_tree;
-       return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
+       ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
+       btrfs_add_delayed_iput(inode);
+       return ret;
 }
 
 static int btrfs_writepages(struct address_space *mapping,
@@ -9650,9 +9661,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        /*
         * 2 items for inode item and ref
         * 2 items for dir items
+        * 1 item for updating parent inode item
+        * 1 item for the inline extent item
         * 1 item for xattr if selinux is on
         */
-       trans = btrfs_start_transaction(root, 5);
+       trans = btrfs_start_transaction(root, 7);
        if (IS_ERR(trans))
                return PTR_ERR(trans);
 
index 08fd3f0f34fd086d5faaa257608e0be5b8020c9b..f07d01bc4875e594b4847567a112b13733ef7252 100644 (file)
@@ -568,6 +568,10 @@ static noinline int create_subvol(struct inode *dir,
                goto fail;
        }
 
+       mutex_lock(&new_root->objectid_mutex);
+       new_root->highest_objectid = new_dirid;
+       mutex_unlock(&new_root->objectid_mutex);
+
        /*
         * insert the directory item
         */
index 7cf8509deda7c0ea6e98af02a8ab0acafbe016b2..2c849b08a91b53e0a7f1c475301a77d623cb5293 100644 (file)
@@ -310,8 +310,16 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
                set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
 
                err = btrfs_insert_fs_root(root->fs_info, root);
+               /*
+                * The root might have been inserted already, as before we look
+                * for orphan roots, log replay might have happened, which
+                * triggers a transaction commit and qgroup accounting, which
+                * in turn reads and inserts fs roots while doing backref
+                * walking.
+                */
+               if (err == -EEXIST)
+                       err = 0;
                if (err) {
-                       BUG_ON(err == -EEXIST);
                        btrfs_free_fs_root(root);
                        break;
                }
index 355a458cba1abe29efb3410a6ae93261052ba1e9..63a6152be04b4f4265b25e253eadb1d4d91b8969 100644 (file)
@@ -1469,7 +1469,21 @@ static int read_symlink(struct btrfs_root *root,
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
        if (ret < 0)
                goto out;
-       BUG_ON(ret);
+       if (ret) {
+               /*
+                * An empty symlink inode. Can happen in rare error paths when
+                * creating a symlink (transaction committed before the inode
+                * eviction handler removed the symlink inode items and a crash
+                * happened in between or the subvol was snapshoted in between).
+                * Print an informative message to dmesg/syslog so that the user
+                * can delete the symlink.
+                */
+               btrfs_err(root->fs_info,
+                         "Found empty symlink inode %llu at root %llu",
+                         ino, root->root_key.objectid);
+               ret = -EIO;
+               goto out;
+       }
 
        ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
                        struct btrfs_file_extent_item);
index 24154e422945167f474557887c62acaf6ed0779c..fe609b81dd1b572370a8c6f92f268e4efc0ca0e2 100644 (file)
@@ -1956,6 +1956,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
  * there are other factors that may change the result (like a new metadata
  * chunk).
  *
+ * If metadata is exhausted, f_bavail will be 0.
+ *
  * FIXME: not accurate for mixed block groups, total and free/used are ok,
  * available appears slightly larger.
  */
@@ -1967,11 +1969,13 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
        struct btrfs_space_info *found;
        u64 total_used = 0;
        u64 total_free_data = 0;
+       u64 total_free_meta = 0;
        int bits = dentry->d_sb->s_blocksize_bits;
        __be32 *fsid = (__be32 *)fs_info->fsid;
        unsigned factor = 1;
        struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
        int ret;
+       u64 thresh = 0;
 
        /*
         * holding chunk_muext to avoid allocating new chunks, holding
@@ -1997,6 +2001,8 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
                                }
                        }
                }
+               if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
+                       total_free_meta += found->disk_total - found->disk_used;
 
                total_used += found->disk_used;
        }
@@ -2019,6 +2025,24 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
        buf->f_bavail += div_u64(total_free_data, factor);
        buf->f_bavail = buf->f_bavail >> bits;
 
+       /*
+        * We calculate the remaining metadata space minus global reserve. If
+        * this is (supposedly) smaller than zero, there's no space. But this
+        * does not hold in practice, the exhausted state happens where's still
+        * some positive delta. So we apply some guesswork and compare the
+        * delta to a 4M threshold.  (Practically observed delta was ~2M.)
+        *
+        * We probably cannot calculate the exact threshold value because this
+        * depends on the internal reservations requested by various
+        * operations, so some operations that consume a few metadata will
+        * succeed even if the Avail is zero. But this is better than the other
+        * way around.
+        */
+       thresh = 4 * 1024 * 1024;
+
+       if (total_free_meta - thresh < block_rsv->size)
+               buf->f_bavail = 0;
+
        buf->f_type = BTRFS_SUPER_MAGIC;
        buf->f_bsize = dentry->d_sb->s_blocksize;
        buf->f_namelen = BTRFS_NAME_LEN;
index 9e084477d32060d8fea535dbc7d345fc10473e96..9c62a6f9757aef0efe25db846c1c06a0d0a14e8b 100644 (file)
@@ -232,6 +232,7 @@ static struct btrfs_device *__alloc_device(void)
        spin_lock_init(&dev->reada_lock);
        atomic_set(&dev->reada_in_flight, 0);
        atomic_set(&dev->dev_stats_ccnt, 0);
+       btrfs_device_data_ordered_init(dev);
        INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
        INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
 
index c3cc1609025fa3a966c2d5b10f32626214a9e4ef..44b3d4280abb85c8dd8bfe86a2090659e75c0e17 100644 (file)
  * so that it will fit. We use hash_64 to convert the value to 31 bits, and
  * then add 1, to ensure that we don't end up with a 0 as the value.
  */
-#if BITS_PER_LONG == 64
 static inline ino_t
 cifs_uniqueid_to_ino_t(u64 fileid)
 {
+       if ((sizeof(ino_t)) < (sizeof(u64)))
+               return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
+
        return (ino_t)fileid;
+
 }
-#else
-static inline ino_t
-cifs_uniqueid_to_ino_t(u64 fileid)
-{
-       return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
-}
-#endif
 
 extern struct file_system_type cifs_fs_type;
 extern const struct address_space_operations cifs_addr_ops;
index 90b4f9f7de660a261b5f93322af59282294953fd..76fcb50295a38b63a58a0d3f656d023e02b988a8 100644 (file)
@@ -1396,11 +1396,10 @@ openRetry:
  * current bigbuf.
  */
 static int
-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+discard_remaining_data(struct TCP_Server_Info *server)
 {
        unsigned int rfclen = get_rfc1002_length(server->smallbuf);
        int remaining = rfclen + 4 - server->total_read;
-       struct cifs_readdata *rdata = mid->callback_data;
 
        while (remaining > 0) {
                int length;
@@ -1414,10 +1413,20 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
                remaining -= length;
        }
 
-       dequeue_mid(mid, rdata->result);
        return 0;
 }
 
+static int
+cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+{
+       int length;
+       struct cifs_readdata *rdata = mid->callback_data;
+
+       length = discard_remaining_data(server);
+       dequeue_mid(mid, rdata->result);
+       return length;
+}
+
 int
 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
 {
@@ -1446,6 +1455,12 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
                return length;
        server->total_read += length;
 
+       if (server->ops->is_status_pending &&
+           server->ops->is_status_pending(buf, server, 0)) {
+               discard_remaining_data(server);
+               return -1;
+       }
+
        /* Was the SMB read successful? */
        rdata->result = server->ops->map_error(buf, false);
        if (rdata->result != 0) {
index 767555518d40ccd357b6f5540f2758bb8be6e654..373b5cd1c9136b58093c3f1d9ade8553b9d1f2ad 100644 (file)
@@ -1109,21 +1109,25 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
 {
        char *data_offset;
        struct create_context *cc;
-       unsigned int next = 0;
+       unsigned int next;
+       unsigned int remaining;
        char *name;
 
        data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset);
+       remaining = le32_to_cpu(rsp->CreateContextsLength);
        cc = (struct create_context *)data_offset;
-       do {
-               cc = (struct create_context *)((char *)cc + next);
+       while (remaining >= sizeof(struct create_context)) {
                name = le16_to_cpu(cc->NameOffset) + (char *)cc;
-               if (le16_to_cpu(cc->NameLength) != 4 ||
-                   strncmp(name, "RqLs", 4)) {
-                       next = le32_to_cpu(cc->Next);
-                       continue;
-               }
-               return server->ops->parse_lease_buf(cc, epoch);
-       } while (next != 0);
+               if (le16_to_cpu(cc->NameLength) == 4 &&
+                   strncmp(name, "RqLs", 4) == 0)
+                       return server->ops->parse_lease_buf(cc, epoch);
+
+               next = le32_to_cpu(cc->Next);
+               if (!next)
+                       break;
+               remaining -= next;
+               cc = (struct create_context *)((char *)cc + next);
+       }
 
        return 0;
 }
index 5c33aeb0f68febdd03e6f478c7949fd847e9fdcd..877bcbbd03ff549a8536bf289dac294f659645b9 100644 (file)
@@ -269,9 +269,6 @@ static inline int dname_external(const struct dentry *dentry)
        return dentry->d_name.name != dentry->d_iname;
 }
 
-/*
- * Make sure other CPUs see the inode attached before the type is set.
- */
 static inline void __d_set_inode_and_type(struct dentry *dentry,
                                          struct inode *inode,
                                          unsigned type_flags)
@@ -279,28 +276,18 @@ static inline void __d_set_inode_and_type(struct dentry *dentry,
        unsigned flags;
 
        dentry->d_inode = inode;
-       smp_wmb();
        flags = READ_ONCE(dentry->d_flags);
        flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
        flags |= type_flags;
        WRITE_ONCE(dentry->d_flags, flags);
 }
 
-/*
- * Ideally, we want to make sure that other CPUs see the flags cleared before
- * the inode is detached, but this is really a violation of RCU principles
- * since the ordering suggests we should always set inode before flags.
- *
- * We should instead replace or discard the entire dentry - but that sucks
- * performancewise on mass deletion/rename.
- */
 static inline void __d_clear_type_and_inode(struct dentry *dentry)
 {
        unsigned flags = READ_ONCE(dentry->d_flags);
 
        flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
        WRITE_ONCE(dentry->d_flags, flags);
-       smp_wmb();
        dentry->d_inode = NULL;
 }
 
@@ -370,9 +357,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
        __releases(dentry->d_inode->i_lock)
 {
        struct inode *inode = dentry->d_inode;
+
+       raw_write_seqcount_begin(&dentry->d_seq);
        __d_clear_type_and_inode(dentry);
        hlist_del_init(&dentry->d_u.d_alias);
-       dentry_rcuwalk_invalidate(dentry);
+       raw_write_seqcount_end(&dentry->d_seq);
        spin_unlock(&dentry->d_lock);
        spin_unlock(&inode->i_lock);
        if (!inode->i_nlink)
@@ -1757,8 +1746,9 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
        spin_lock(&dentry->d_lock);
        if (inode)
                hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
+       raw_write_seqcount_begin(&dentry->d_seq);
        __d_set_inode_and_type(dentry, inode, add_flags);
-       dentry_rcuwalk_invalidate(dentry);
+       raw_write_seqcount_end(&dentry->d_seq);
        spin_unlock(&dentry->d_lock);
        fsnotify_d_instantiate(dentry, inode);
 }
index 602e8441bc0fb6b094ec96dfb3273ff73b8b1506..01171d8a6ee94fea8ad7ff02015426ad156b3197 100644 (file)
@@ -472,8 +472,8 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
                dio->io_error = -EIO;
 
        if (dio->is_async && dio->rw == READ && dio->should_dirty) {
-               bio_check_pages_dirty(bio);     /* transfers ownership */
                err = bio->bi_error;
+               bio_check_pages_dirty(bio);     /* transfers ownership */
        } else {
                bio_for_each_segment_all(bvec, bio, i) {
                        struct page *page = bvec->bv_page;
index 90001da9abfdfe98b01a0eacdb5381425234b126..66842e55c48cdc0efdde1db8f01b564b651ef85d 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/efi.h>
 #include <linux/fs.h>
 #include <linux/slab.h>
+#include <linux/mount.h>
 
 #include "internal.h"
 
@@ -103,9 +104,78 @@ out_free:
        return size;
 }
 
+static int
+efivarfs_ioc_getxflags(struct file *file, void __user *arg)
+{
+       struct inode *inode = file->f_mapping->host;
+       unsigned int i_flags;
+       unsigned int flags = 0;
+
+       i_flags = inode->i_flags;
+       if (i_flags & S_IMMUTABLE)
+               flags |= FS_IMMUTABLE_FL;
+
+       if (copy_to_user(arg, &flags, sizeof(flags)))
+               return -EFAULT;
+       return 0;
+}
+
+static int
+efivarfs_ioc_setxflags(struct file *file, void __user *arg)
+{
+       struct inode *inode = file->f_mapping->host;
+       unsigned int flags;
+       unsigned int i_flags = 0;
+       int error;
+
+       if (!inode_owner_or_capable(inode))
+               return -EACCES;
+
+       if (copy_from_user(&flags, arg, sizeof(flags)))
+               return -EFAULT;
+
+       if (flags & ~FS_IMMUTABLE_FL)
+               return -EOPNOTSUPP;
+
+       if (!capable(CAP_LINUX_IMMUTABLE))
+               return -EPERM;
+
+       if (flags & FS_IMMUTABLE_FL)
+               i_flags |= S_IMMUTABLE;
+
+
+       error = mnt_want_write_file(file);
+       if (error)
+               return error;
+
+       mutex_lock(&inode->i_mutex);
+       inode_set_flags(inode, i_flags, S_IMMUTABLE);
+       mutex_unlock(&inode->i_mutex);
+
+       mnt_drop_write_file(file);
+
+       return 0;
+}
+
+long
+efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p)
+{
+       void __user *arg = (void __user *)p;
+
+       switch (cmd) {
+       case FS_IOC_GETFLAGS:
+               return efivarfs_ioc_getxflags(file, arg);
+       case FS_IOC_SETFLAGS:
+               return efivarfs_ioc_setxflags(file, arg);
+       }
+
+       return -ENOTTY;
+}
+
 const struct file_operations efivarfs_file_operations = {
        .open   = simple_open,
        .read   = efivarfs_file_read,
        .write  = efivarfs_file_write,
        .llseek = no_llseek,
+       .unlocked_ioctl = efivarfs_file_ioctl,
 };
index 3381b9da9ee6080881720f0500b71c74ecdf6058..e2ab6d0497f2bb86dee165421487d27e4b64ba0d 100644 (file)
@@ -15,7 +15,8 @@
 #include "internal.h"
 
 struct inode *efivarfs_get_inode(struct super_block *sb,
-                               const struct inode *dir, int mode, dev_t dev)
+                               const struct inode *dir, int mode,
+                               dev_t dev, bool is_removable)
 {
        struct inode *inode = new_inode(sb);
 
@@ -23,6 +24,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb,
                inode->i_ino = get_next_ino();
                inode->i_mode = mode;
                inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+               inode->i_flags = is_removable ? 0 : S_IMMUTABLE;
                switch (mode & S_IFMT) {
                case S_IFREG:
                        inode->i_fop = &efivarfs_file_operations;
@@ -102,22 +104,17 @@ static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid)
 static int efivarfs_create(struct inode *dir, struct dentry *dentry,
                          umode_t mode, bool excl)
 {
-       struct inode *inode;
+       struct inode *inode = NULL;
        struct efivar_entry *var;
        int namelen, i = 0, err = 0;
+       bool is_removable = false;
 
        if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
                return -EINVAL;
 
-       inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
-       if (!inode)
-               return -ENOMEM;
-
        var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
-       if (!var) {
-               err = -ENOMEM;
-               goto out;
-       }
+       if (!var)
+               return -ENOMEM;
 
        /* length of the variable name itself: remove GUID and separator */
        namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
@@ -125,6 +122,16 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
        efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1,
                        &var->var.VendorGuid);
 
+       if (efivar_variable_is_removable(var->var.VendorGuid,
+                                        dentry->d_name.name, namelen))
+               is_removable = true;
+
+       inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable);
+       if (!inode) {
+               err = -ENOMEM;
+               goto out;
+       }
+
        for (i = 0; i < namelen; i++)
                var->var.VariableName[i] = dentry->d_name.name[i];
 
@@ -138,7 +145,8 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
 out:
        if (err) {
                kfree(var);
-               iput(inode);
+               if (inode)
+                       iput(inode);
        }
        return err;
 }
index b5ff16addb7ce0984d14d2efe0324be884d20911..b4505188e799b9f5f50b2f792090775fd4244127 100644 (file)
@@ -15,7 +15,8 @@ extern const struct file_operations efivarfs_file_operations;
 extern const struct inode_operations efivarfs_dir_inode_operations;
 extern bool efivarfs_valid_name(const char *str, int len);
 extern struct inode *efivarfs_get_inode(struct super_block *sb,
-                       const struct inode *dir, int mode, dev_t dev);
+                       const struct inode *dir, int mode, dev_t dev,
+                       bool is_removable);
 
 extern struct list_head efivarfs_list;
 
index 86a2121828c312e53d64aedee9506319bbadf6c1..abb244b0602458e1ab52ba6730a7e41a02965166 100644 (file)
@@ -118,8 +118,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
        struct dentry *dentry, *root = sb->s_root;
        unsigned long size = 0;
        char *name;
-       int len, i;
+       int len;
        int err = -ENOMEM;
+       bool is_removable = false;
 
        entry = kzalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry)
@@ -128,15 +129,17 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
        memcpy(entry->var.VariableName, name16, name_size);
        memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
 
-       len = ucs2_strlen(entry->var.VariableName);
+       len = ucs2_utf8size(entry->var.VariableName);
 
        /* name, plus '-', plus GUID, plus NUL*/
        name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL);
        if (!name)
                goto fail;
 
-       for (i = 0; i < len; i++)
-               name[i] = entry->var.VariableName[i] & 0xFF;
+       ucs2_as_utf8(name, entry->var.VariableName, len);
+
+       if (efivar_variable_is_removable(entry->var.VendorGuid, name, len))
+               is_removable = true;
 
        name[len] = '-';
 
@@ -144,7 +147,8 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
 
        name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
 
-       inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0);
+       inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0,
+                                  is_removable);
        if (!inode)
                goto fail_name;
 
@@ -200,7 +204,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_d_op              = &efivarfs_d_ops;
        sb->s_time_gran         = 1;
 
-       inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
+       inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true);
        if (!inode)
                return -ENOMEM;
        inode->i_op = &efivarfs_dir_inode_operations;
index ea433a7f4bca21511ba84fbfbe52f71883680661..06bda0361e7ce80dcecdf2245b4922e0af8cffd3 100644 (file)
@@ -657,6 +657,34 @@ has_zeroout:
        return retval;
 }
 
+/*
+ * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
+ * we have to be careful as someone else may be manipulating b_state as well.
+ */
+static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
+{
+       unsigned long old_state;
+       unsigned long new_state;
+
+       flags &= EXT4_MAP_FLAGS;
+
+       /* Dummy buffer_head? Set non-atomically. */
+       if (!bh->b_page) {
+               bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
+               return;
+       }
+       /*
+        * Someone else may be modifying b_state. Be careful! This is ugly but
+        * once we get rid of using bh as a container for mapping information
+        * to pass to / from get_block functions, this can go away.
+        */
+       do {
+               old_state = READ_ONCE(bh->b_state);
+               new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
+       } while (unlikely(
+                cmpxchg(&bh->b_state, old_state, new_state) != old_state));
+}
+
 /* Maximum number of blocks we map for direct IO at once. */
 #define DIO_MAX_BLOCKS 4096
 
@@ -693,7 +721,7 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
                ext4_io_end_t *io_end = ext4_inode_aio(inode);
 
                map_bh(bh, inode->i_sb, map.m_pblk);
-               bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
+               ext4_update_bh_state(bh, map.m_flags);
                if (IS_DAX(inode) && buffer_unwritten(bh)) {
                        /*
                         * dgc: I suspect unwritten conversion on ext4+DAX is
@@ -1669,7 +1697,7 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
                return ret;
 
        map_bh(bh, inode->i_sb, map.m_pblk);
-       bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
+       ext4_update_bh_state(bh, map.m_flags);
 
        if (buffer_unwritten(bh)) {
                /* A delayed write to unwritten bh should be marked
index 023f6a1f23cd034810aa26acde809cde118841f2..7a8ea13515847196c13107322900e540bfdffa23 100644 (file)
@@ -223,6 +223,9 @@ static void wb_wait_for_completion(struct backing_dev_info *bdi,
 #define WB_FRN_HIST_MAX_SLOTS  (WB_FRN_HIST_THR_SLOTS / 2 + 1)
                                        /* one round can affect upto 5 slots */
 
+static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
+static struct workqueue_struct *isw_wq;
+
 void __inode_attach_wb(struct inode *inode, struct page *page)
 {
        struct backing_dev_info *bdi = inode_to_bdi(inode);
@@ -424,6 +427,8 @@ skip_switch:
 
        iput(inode);
        kfree(isw);
+
+       atomic_dec(&isw_nr_in_flight);
 }
 
 static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
@@ -433,7 +438,7 @@ static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
 
        /* needs to grab bh-unsafe locks, bounce to work item */
        INIT_WORK(&isw->work, inode_switch_wbs_work_fn);
-       schedule_work(&isw->work);
+       queue_work(isw_wq, &isw->work);
 }
 
 /**
@@ -469,7 +474,8 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
 
        /* while holding I_WB_SWITCH, no one else can update the association */
        spin_lock(&inode->i_lock);
-       if (inode->i_state & (I_WB_SWITCH | I_FREEING) ||
+       if (!(inode->i_sb->s_flags & MS_ACTIVE) ||
+           inode->i_state & (I_WB_SWITCH | I_FREEING) ||
            inode_to_wb(inode) == isw->new_wb) {
                spin_unlock(&inode->i_lock);
                goto out_free;
@@ -480,6 +486,8 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
        ihold(inode);
        isw->inode = inode;
 
+       atomic_inc(&isw_nr_in_flight);
+
        /*
         * In addition to synchronizing among switchers, I_WB_SWITCH tells
         * the RCU protected stat update paths to grab the mapping's
@@ -842,6 +850,33 @@ restart:
                wb_put(last_wb);
 }
 
+/**
+ * cgroup_writeback_umount - flush inode wb switches for umount
+ *
+ * This function is called when a super_block is about to be destroyed and
+ * flushes in-flight inode wb switches.  An inode wb switch goes through
+ * RCU and then workqueue, so the two need to be flushed in order to ensure
+ * that all previously scheduled switches are finished.  As wb switches are
+ * rare occurrences and synchronize_rcu() can take a while, perform
+ * flushing iff wb switches are in flight.
+ */
+void cgroup_writeback_umount(void)
+{
+       if (atomic_read(&isw_nr_in_flight)) {
+               synchronize_rcu();
+               flush_workqueue(isw_wq);
+       }
+}
+
+static int __init cgroup_writeback_init(void)
+{
+       isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0);
+       if (!isw_wq)
+               return -ENOMEM;
+       return 0;
+}
+fs_initcall(cgroup_writeback_init);
+
 #else  /* CONFIG_CGROUP_WRITEBACK */
 
 static struct bdi_writeback *
index 2ac99db3750ef7b2d2bf3e9ea9e90e69320a0d83..5a7b3229b956ea99fe57b1e88f7d8234b5fee690 100644 (file)
@@ -730,15 +730,13 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
 
        init_special_inode(inode, mode, dev);
        err = do_mknod(name, mode, MAJOR(dev), MINOR(dev));
-       if (!err)
+       if (err)
                goto out_free;
 
        err = read_name(inode, name);
        __putname(name);
        if (err)
                goto out_put;
-       if (err)
-               goto out_put;
 
        d_instantiate(dentry, inode);
        return 0;
index ae4d5a1fa4c9b7f75ce188d8ec80e69d0a820892..bffb908acbd435fb661f31b7cc1a62a38e83cfa5 100644 (file)
@@ -375,12 +375,11 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
        struct inode *inode = d_inode(dentry);
        dnode_secno dno;
        int r;
-       int rep = 0;
        int err;
 
        hpfs_lock(dir->i_sb);
        hpfs_adjust_length(name, &len);
-again:
+
        err = -ENOENT;
        de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
        if (!de)
@@ -400,33 +399,9 @@ again:
                hpfs_error(dir->i_sb, "there was error when removing dirent");
                err = -EFSERROR;
                break;
-       case 2:         /* no space for deleting, try to truncate file */
-
+       case 2:         /* no space for deleting */
                err = -ENOSPC;
-               if (rep++)
-                       break;
-
-               dentry_unhash(dentry);
-               if (!d_unhashed(dentry)) {
-                       hpfs_unlock(dir->i_sb);
-                       return -ENOSPC;
-               }
-               if (generic_permission(inode, MAY_WRITE) ||
-                   !S_ISREG(inode->i_mode) ||
-                   get_write_access(inode)) {
-                       d_rehash(dentry);
-               } else {
-                       struct iattr newattrs;
-                       /*pr_info("truncating file before delete.\n");*/
-                       newattrs.ia_size = 0;
-                       newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
-                       err = notify_change(dentry, &newattrs, NULL);
-                       put_write_access(inode);
-                       if (!err)
-                               goto again;
-               }
-               hpfs_unlock(dir->i_sb);
-               return -ENOSPC;
+               break;
        default:
                drop_nlink(inode);
                err = 0;
index 3ea36554107fc7740558ac81daa0328c308b2903..8918ac905a3b1face373e3a0ad456c2d3bf6d95b 100644 (file)
@@ -2,10 +2,6 @@
        JFFS2 LOCKING DOCUMENTATION
        ---------------------------
 
-At least theoretically, JFFS2 does not require the Big Kernel Lock
-(BKL), which was always helpfully obtained for it by Linux 2.4 VFS
-code. It has its own locking, as described below.
-
 This document attempts to describe the existing locking rules for
 JFFS2. It is not expected to remain perfectly up to date, but ought to
 be fairly close.
@@ -69,6 +65,7 @@ Ordering constraints:
           any f->sem held.
        2. Never attempt to lock two file mutexes in one thread.
           No ordering rules have been made for doing so.
+       3. Never lock a page cache page with f->sem held.
 
 
        erase_completion_lock spinlock
index a3750f902adcbae8f8cabbad066f9d629b805155..c1f04947d7dcff0394656687988d7f8c6371f872 100644 (file)
@@ -49,7 +49,8 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
 
 
 static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
-                                   struct jffs2_inode_cache *ic)
+                                   struct jffs2_inode_cache *ic,
+                                   int *dir_hardlinks)
 {
        struct jffs2_full_dirent *fd;
 
@@ -68,19 +69,21 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
                        dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
                                  fd->name, fd->ino, ic->ino);
                        jffs2_mark_node_obsolete(c, fd->raw);
+                       /* Clear the ic/raw union so it doesn't cause problems later. */
+                       fd->ic = NULL;
                        continue;
                }
 
+               /* From this point, fd->raw is no longer used so we can set fd->ic */
+               fd->ic = child_ic;
+               child_ic->pino_nlink++;
+               /* If we appear (at this stage) to have hard-linked directories,
+                * set a flag to trigger a scan later */
                if (fd->type == DT_DIR) {
-                       if (child_ic->pino_nlink) {
-                               JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n",
-                                           fd->name, fd->ino, ic->ino);
-                               /* TODO: What do we do about it? */
-                       } else {
-                               child_ic->pino_nlink = ic->ino;
-                       }
-               } else
-                       child_ic->pino_nlink++;
+                       child_ic->flags |= INO_FLAGS_IS_DIR;
+                       if (child_ic->pino_nlink > 1)
+                               *dir_hardlinks = 1;
+               }
 
                dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
                /* Can't free scan_dents so far. We might need them in pass 2 */
@@ -94,8 +97,7 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
 */
 static int jffs2_build_filesystem(struct jffs2_sb_info *c)
 {
-       int ret;
-       int i;
+       int ret, i, dir_hardlinks = 0;
        struct jffs2_inode_cache *ic;
        struct jffs2_full_dirent *fd;
        struct jffs2_full_dirent *dead_fds = NULL;
@@ -119,7 +121,7 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
        /* Now scan the directory tree, increasing nlink according to every dirent found. */
        for_each_inode(i, c, ic) {
                if (ic->scan_dents) {
-                       jffs2_build_inode_pass1(c, ic);
+                       jffs2_build_inode_pass1(c, ic, &dir_hardlinks);
                        cond_resched();
                }
        }
@@ -155,6 +157,20 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
        }
 
        dbg_fsbuild("pass 2a complete\n");
+
+       if (dir_hardlinks) {
+               /* If we detected directory hardlinks earlier, *hopefully*
+                * they are gone now because some of the links were from
+                * dead directories which still had some old dirents lying
+                * around and not yet garbage-collected, but which have
+                * been discarded above. So clear the pino_nlink field
+                * in each directory, so that the final scan below can
+                * print appropriate warnings. */
+               for_each_inode(i, c, ic) {
+                       if (ic->flags & INO_FLAGS_IS_DIR)
+                               ic->pino_nlink = 0;
+               }
+       }
        dbg_fsbuild("freeing temporary data structures\n");
 
        /* Finally, we can scan again and free the dirent structs */
@@ -162,6 +178,33 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
                while(ic->scan_dents) {
                        fd = ic->scan_dents;
                        ic->scan_dents = fd->next;
+                       /* We do use the pino_nlink field to count nlink of
+                        * directories during fs build, so set it to the
+                        * parent ino# now. Now that there's hopefully only
+                        * one. */
+                       if (fd->type == DT_DIR) {
+                               if (!fd->ic) {
+                                       /* We'll have complained about it and marked the coresponding
+                                          raw node obsolete already. Just skip it. */
+                                       continue;
+                               }
+
+                               /* We *have* to have set this in jffs2_build_inode_pass1() */
+                               BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR));
+
+                               /* We clear ic->pino_nlink âˆ€ directories' ic *only* if dir_hardlinks
+                                * is set. Otherwise, we know this should never trigger anyway, so
+                                * we don't do the check. And ic->pino_nlink still contains the nlink
+                                * value (which is 1). */
+                               if (dir_hardlinks && fd->ic->pino_nlink) {
+                                       JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n",
+                                                   fd->name, fd->ino, ic->ino, fd->ic->pino_nlink);
+                                       /* Should we unlink it from its previous parent? */
+                               }
+
+                               /* For directories, ic->pino_nlink holds that parent inode # */
+                               fd->ic->pino_nlink = ic->ino;
+                       }
                        jffs2_free_full_dirent(fd);
                }
                ic->scan_dents = NULL;
@@ -240,11 +283,7 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
 
                        /* Reduce nlink of the child. If it's now zero, stick it on the
                           dead_fds list to be cleaned up later. Else just free the fd */
-
-                       if (fd->type == DT_DIR)
-                               child_ic->pino_nlink = 0;
-                       else
-                               child_ic->pino_nlink--;
+                       child_ic->pino_nlink--;
 
                        if (!child_ic->pino_nlink) {
                                dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n",
index f509f62e12f6ef85e95b5c159353cba757ef4af1..3361979d728c0b5e7d1714c02e7b34428fcc658c 100644 (file)
@@ -137,39 +137,33 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
        struct page *pg;
        struct inode *inode = mapping->host;
        struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
-       struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
-       struct jffs2_raw_inode ri;
-       uint32_t alloc_len = 0;
        pgoff_t index = pos >> PAGE_CACHE_SHIFT;
        uint32_t pageofs = index << PAGE_CACHE_SHIFT;
        int ret = 0;
 
-       jffs2_dbg(1, "%s()\n", __func__);
-
-       if (pageofs > inode->i_size) {
-               ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
-                                         ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
-               if (ret)
-                       return ret;
-       }
-
-       mutex_lock(&f->sem);
        pg = grab_cache_page_write_begin(mapping, index, flags);
-       if (!pg) {
-               if (alloc_len)
-                       jffs2_complete_reservation(c);
-               mutex_unlock(&f->sem);
+       if (!pg)
                return -ENOMEM;
-       }
        *pagep = pg;
 
-       if (alloc_len) {
+       jffs2_dbg(1, "%s()\n", __func__);
+
+       if (pageofs > inode->i_size) {
                /* Make new hole frag from old EOF to new page */
+               struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+               struct jffs2_raw_inode ri;
                struct jffs2_full_dnode *fn;
+               uint32_t alloc_len;
 
                jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
                          (unsigned int)inode->i_size, pageofs);
 
+               ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
+                                         ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+               if (ret)
+                       goto out_page;
+
+               mutex_lock(&f->sem);
                memset(&ri, 0, sizeof(ri));
 
                ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -196,6 +190,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
                if (IS_ERR(fn)) {
                        ret = PTR_ERR(fn);
                        jffs2_complete_reservation(c);
+                       mutex_unlock(&f->sem);
                        goto out_page;
                }
                ret = jffs2_add_full_dnode_to_inode(c, f, fn);
@@ -210,10 +205,12 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
                        jffs2_mark_node_obsolete(c, fn->raw);
                        jffs2_free_full_dnode(fn);
                        jffs2_complete_reservation(c);
+                       mutex_unlock(&f->sem);
                        goto out_page;
                }
                jffs2_complete_reservation(c);
                inode->i_size = pageofs;
+               mutex_unlock(&f->sem);
        }
 
        /*
@@ -222,18 +219,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
         * case of a short-copy.
         */
        if (!PageUptodate(pg)) {
+               mutex_lock(&f->sem);
                ret = jffs2_do_readpage_nolock(inode, pg);
+               mutex_unlock(&f->sem);
                if (ret)
                        goto out_page;
        }
-       mutex_unlock(&f->sem);
        jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
        return ret;
 
 out_page:
        unlock_page(pg);
        page_cache_release(pg);
-       mutex_unlock(&f->sem);
        return ret;
 }
 
index 5a2dec2b064c945aba23acac93dcee2820019d68..95d5880a63ee10fe98ad3520b20890f247e36d0c 100644 (file)
@@ -1296,14 +1296,17 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
                BUG_ON(start > orig_start);
        }
 
-       /* First, use readpage() to read the appropriate page into the page cache */
-       /* Q: What happens if we actually try to GC the _same_ page for which commit_write()
-        *    triggered garbage collection in the first place?
-        * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the
-        *    page OK. We'll actually write it out again in commit_write, which is a little
-        *    suboptimal, but at least we're correct.
-        */
+       /* The rules state that we must obtain the page lock *before* f->sem, so
+        * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
+        * actually going to *change* so we're safe; we only allow reading.
+        *
+        * It is important to note that jffs2_write_begin() will ensure that its
+        * page is marked Uptodate before allocating space. That means that if we
+        * end up here trying to GC the *same* page that jffs2_write_begin() is
+        * trying to write out, read_cache_page() will not deadlock. */
+       mutex_unlock(&f->sem);
        pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
+       mutex_lock(&f->sem);
 
        if (IS_ERR(pg_ptr)) {
                pr_warn("read_cache_page() returned error: %ld\n",
index fa35ff79ab358fe79c734f54ff51ebcb21ab9f7f..0637271f377012b51f6684622f3a0c604385d474 100644 (file)
@@ -194,6 +194,7 @@ struct jffs2_inode_cache {
 #define INO_STATE_CLEARING     6       /* In clear_inode() */
 
 #define INO_FLAGS_XATTR_CHECKED        0x01    /* has no duplicate xattr_ref */
+#define INO_FLAGS_IS_DIR       0x02    /* is a directory */
 
 #define RAWNODE_CLASS_INODE_CACHE      0
 #define RAWNODE_CLASS_XATTR_DATUM      1
@@ -249,7 +250,10 @@ struct jffs2_readinode_info
 
 struct jffs2_full_dirent
 {
-       struct jffs2_raw_node_ref *raw;
+       union {
+               struct jffs2_raw_node_ref *raw;
+               struct jffs2_inode_cache *ic; /* Just during part of build */
+       };
        struct jffs2_full_dirent *next;
        uint32_t version;
        uint32_t ino; /* == zero for unlink */
index 0d2b3267e2a3eb8fefbffb3cd3f58b31fe4ed04d..6333263b7bc86a975f1d9fd5487276a15991bfb4 100644 (file)
@@ -2182,7 +2182,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
                goto out;
        }
 
-again:
        error = flock_to_posix_lock(filp, file_lock, &flock);
        if (error)
                goto out;
@@ -2224,19 +2223,22 @@ again:
         * Attempt to detect a close/fcntl race and recover by
         * releasing the lock that was just acquired.
         */
-       /*
-        * we need that spin_lock here - it prevents reordering between
-        * update of i_flctx->flc_posix and check for it done in close().
-        * rcu_read_lock() wouldn't do.
-        */
-       spin_lock(&current->files->file_lock);
-       f = fcheck(fd);
-       spin_unlock(&current->files->file_lock);
-       if (!error && f != filp && flock.l_type != F_UNLCK) {
-               flock.l_type = F_UNLCK;
-               goto again;
+       if (!error && file_lock->fl_type != F_UNLCK) {
+               /*
+                * We need that spin_lock here - it prevents reordering between
+                * update of i_flctx->flc_posix and check for it done in
+                * close(). rcu_read_lock() wouldn't do.
+                */
+               spin_lock(&current->files->file_lock);
+               f = fcheck(fd);
+               spin_unlock(&current->files->file_lock);
+               if (f != filp) {
+                       file_lock->fl_type = F_UNLCK;
+                       error = do_lock_file_wait(filp, cmd, file_lock);
+                       WARN_ON_ONCE(error);
+                       error = -EBADF;
+               }
        }
-
 out:
        locks_free_lock(file_lock);
        return error;
@@ -2322,7 +2324,6 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
                goto out;
        }
 
-again:
        error = flock64_to_posix_lock(filp, file_lock, &flock);
        if (error)
                goto out;
@@ -2364,14 +2365,22 @@ again:
         * Attempt to detect a close/fcntl race and recover by
         * releasing the lock that was just acquired.
         */
-       spin_lock(&current->files->file_lock);
-       f = fcheck(fd);
-       spin_unlock(&current->files->file_lock);
-       if (!error && f != filp && flock.l_type != F_UNLCK) {
-               flock.l_type = F_UNLCK;
-               goto again;
+       if (!error && file_lock->fl_type != F_UNLCK) {
+               /*
+                * We need that spin_lock here - it prevents reordering between
+                * update of i_flctx->flc_posix and check for it done in
+                * close(). rcu_read_lock() wouldn't do.
+                */
+               spin_lock(&current->files->file_lock);
+               f = fcheck(fd);
+               spin_unlock(&current->files->file_lock);
+               if (f != filp) {
+                       file_lock->fl_type = F_UNLCK;
+                       error = do_lock_file_wait(filp, cmd, file_lock);
+                       WARN_ON_ONCE(error);
+                       error = -EBADF;
+               }
        }
-
 out:
        locks_free_lock(file_lock);
        return error;
index 0c3974cd3ecd55670ccab51c596b0b141cbdd721..d8ee4da93650422917c020b40a1dc718e6ea5845 100644 (file)
@@ -1711,6 +1711,11 @@ static inline int should_follow_link(struct nameidata *nd, struct path *link,
                return 0;
        if (!follow)
                return 0;
+       /* make sure that d_is_symlink above matches inode */
+       if (nd->flags & LOOKUP_RCU) {
+               if (read_seqcount_retry(&link->dentry->d_seq, seq))
+                       return -ECHILD;
+       }
        return pick_link(nd, link, inode, seq);
 }
 
@@ -1742,11 +1747,11 @@ static int walk_component(struct nameidata *nd, int flags)
                if (err < 0)
                        return err;
 
-               inode = d_backing_inode(path.dentry);
                seq = 0;        /* we are already out of RCU mode */
                err = -ENOENT;
                if (d_is_negative(path.dentry))
                        goto out_path_put;
+               inode = d_backing_inode(path.dentry);
        }
 
        if (flags & WALK_PUT)
@@ -3130,12 +3135,12 @@ retry_lookup:
                return error;
 
        BUG_ON(nd->flags & LOOKUP_RCU);
-       inode = d_backing_inode(path.dentry);
        seq = 0;        /* out of RCU mode, so the value doesn't matter */
        if (unlikely(d_is_negative(path.dentry))) {
                path_to_nameidata(&path, nd);
                return -ENOENT;
        }
+       inode = d_backing_inode(path.dentry);
 finish_lookup:
        if (nd->depth)
                put_link(nd);
@@ -3144,11 +3149,6 @@ finish_lookup:
        if (unlikely(error))
                return error;
 
-       if (unlikely(d_is_symlink(path.dentry)) && !(open_flag & O_PATH)) {
-               path_to_nameidata(&path, nd);
-               return -ELOOP;
-       }
-
        if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
                path_to_nameidata(&path, nd);
        } else {
@@ -3167,6 +3167,10 @@ finish_open:
                return error;
        }
        audit_inode(nd->name, nd->path.dentry, 0);
+       if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) {
+               error = -ELOOP;
+               goto out;
+       }
        error = -EISDIR;
        if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
                goto out;
@@ -3210,6 +3214,10 @@ opened:
                        goto exit_fput;
        }
 out:
+       if (unlikely(error > 0)) {
+               WARN_ON(1);
+               error = -EINVAL;
+       }
        if (got_write)
                mnt_drop_write(nd->path.mnt);
        path_put(&save_parent);
index f496ed721d278bb4c5f9551da2dd1f43681d29cb..98a44157353a25d9b94087ec766a20d078c2d76d 100644 (file)
@@ -2461,9 +2461,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
                dentry = d_add_unique(dentry, igrab(state->inode));
                if (dentry == NULL) {
                        dentry = opendata->dentry;
-               } else if (dentry != ctx->dentry) {
+               } else {
                        dput(ctx->dentry);
-                       ctx->dentry = dget(dentry);
+                       ctx->dentry = dentry;
                }
                nfs_set_verifier(dentry,
                                nfs_save_change_attribute(d_inode(opendata->dir)));
index 7f604727f487b3680d7f145161288656dd1aaf80..e6795c7c76a854b39441381bdce5fed147bb1a53 100644 (file)
@@ -956,6 +956,7 @@ clean_orphan:
                tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
                                update_isize, end);
                if (tmp_ret < 0) {
+                       ocfs2_inode_unlock(inode, 1);
                        ret = tmp_ret;
                        mlog_errno(ret);
                        brelse(di_bh);
index 954aeb80e202be0a40fc42eeca465c033ab1fc9a..f5f4b328f860d95aa1d1bfab87514ce3a6b57a9a 100644 (file)
@@ -415,6 +415,7 @@ void generic_shutdown_super(struct super_block *sb)
                sb->s_flags &= ~MS_ACTIVE;
 
                fsnotify_unmount_inodes(sb);
+               cgroup_writeback_umount();
 
                evict_inodes(sb);
 
index 0419485891f2a8eb6125f154f52d7b16c5fb05b4..0f1c6f315cdc5294ca63bac49efbf69a5dfee805 100644 (file)
@@ -75,7 +75,7 @@ typedef u64 __nocast cputime64_t;
  */
 static inline cputime_t timespec_to_cputime(const struct timespec *val)
 {
-       u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
+       u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
        return (__force cputime_t) ret;
 }
 static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
@@ -91,7 +91,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
  */
 static inline cputime_t timeval_to_cputime(const struct timeval *val)
 {
-       u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
+       u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
+                       val->tv_usec * NSEC_PER_USEC;
        return (__force cputime_t) ret;
 }
 static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
index 7bfb063029d83fe1374e78a174f4a638bc39bc49..461a0558bca4d8d16e81b88e0286e3fa6251ab79 100644 (file)
 
 void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
 
+static inline bool drm_arch_can_wc_memory(void)
+{
+#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
+       return false;
+#else
+       return true;
+#endif
+}
+
 #endif
index 5340099741aec8c48575b1c1056f23903e150ed6..f356f9716474a2f68e665500e5414832bcc4aa84 100644 (file)
@@ -44,8 +44,6 @@ struct drm_dp_vcpi {
 /**
  * struct drm_dp_mst_port - MST port
  * @kref: reference count for this port.
- * @guid_valid: for DP 1.2 devices if we have validated the GUID.
- * @guid: guid for DP 1.2 device on this port.
  * @port_num: port number
  * @input: if this port is an input port.
  * @mcs: message capability status - DP 1.2 spec.
@@ -70,10 +68,6 @@ struct drm_dp_vcpi {
 struct drm_dp_mst_port {
        struct kref kref;
 
-       /* if dpcd 1.2 device is on this port - its GUID info */
-       bool guid_valid;
-       u8 guid[16];
-
        u8 port_num;
        bool input;
        bool mcs;
@@ -109,10 +103,12 @@ struct drm_dp_mst_port {
  * @tx_slots: transmission slots for this device.
  * @last_seqno: last sequence number used to talk to this.
  * @link_address_sent: if a link address message has been sent to this device yet.
+ * @guid: guid for DP 1.2 branch device. port under this branch can be
+ * identified by port #.
  *
  * This structure represents an MST branch device, there is one
- * primary branch device at the root, along with any others connected
- * to downstream ports
+ * primary branch device at the root, along with any other branches connected
+ * to downstream port of parent branches.
  */
 struct drm_dp_mst_branch {
        struct kref kref;
@@ -131,6 +127,9 @@ struct drm_dp_mst_branch {
        struct drm_dp_sideband_msg_tx *tx_slots[2];
        int last_seqno;
        bool link_address_sent;
+
+       /* global unique identifier to identify branch devices */
+       u8 guid[16];
 };
 
 
@@ -405,11 +404,9 @@ struct drm_dp_payload {
  * @conn_base_id: DRM connector ID this mgr is connected to.
  * @down_rep_recv: msg receiver state for down replies.
  * @up_req_recv: msg receiver state for up requests.
- * @lock: protects mst state, primary, guid, dpcd.
+ * @lock: protects mst state, primary, dpcd.
  * @mst_state: if this manager is enabled for an MST capable port.
  * @mst_primary: pointer to the primary branch device.
- * @guid_valid: GUID valid for the primary branch device.
- * @guid: GUID for primary port.
  * @dpcd: cache of DPCD for primary port.
  * @pbn_div: PBN to slots divisor.
  *
@@ -431,13 +428,11 @@ struct drm_dp_mst_topology_mgr {
        struct drm_dp_sideband_msg_rx up_req_recv;
 
        /* pointer to info about the initial MST device */
-       struct mutex lock; /* protects mst_state + primary + guid + dpcd */
+       struct mutex lock; /* protects mst_state + primary + dpcd */
 
        bool mst_state;
        struct drm_dp_mst_branch *mst_primary;
-       /* primary MST device GUID */
-       bool guid_valid;
-       u8 guid[16];
+
        u8 dpcd[DP_RECEIVER_CAP_SIZE];
        u8 sink_count;
        int pbn_div;
@@ -450,9 +445,7 @@ struct drm_dp_mst_topology_mgr {
           the mstb tx_slots and txmsg->state once they are queued */
        struct mutex qlock;
        struct list_head tx_msg_downq;
-       struct list_head tx_msg_upq;
        bool tx_down_in_progress;
-       bool tx_up_in_progress;
 
        /* payload info + lock for it */
        struct mutex payload_lock;
index d639049a613dfdb264650f8ee938836e6ed28e10..553210c02ee0f655fd28b8ae64d370e7cee2fc70 100644 (file)
@@ -73,18 +73,28 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
 #define DRM_FIXED_ONE          (1ULL << DRM_FIXED_POINT)
 #define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
 #define DRM_FIXED_DIGITS_MASK  (~DRM_FIXED_DECIMAL_MASK)
+#define DRM_FIXED_EPSILON      1LL
+#define DRM_FIXED_ALMOST_ONE   (DRM_FIXED_ONE - DRM_FIXED_EPSILON)
 
 static inline s64 drm_int2fixp(int a)
 {
        return ((s64)a) << DRM_FIXED_POINT;
 }
 
-static inline int drm_fixp2int(int64_t a)
+static inline int drm_fixp2int(s64 a)
 {
        return ((s64)a) >> DRM_FIXED_POINT;
 }
 
-static inline unsigned drm_fixp_msbset(int64_t a)
+static inline int drm_fixp2int_ceil(s64 a)
+{
+       if (a > 0)
+               return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
+       else
+               return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
+}
+
+static inline unsigned drm_fixp_msbset(s64 a)
 {
        unsigned shift, sign = (a >> 63) & 1;
 
@@ -136,6 +146,45 @@ static inline s64 drm_fixp_div(s64 a, s64 b)
        return result;
 }
 
+static inline s64 drm_fixp_from_fraction(s64 a, s64 b)
+{
+       s64 res;
+       bool a_neg = a < 0;
+       bool b_neg = b < 0;
+       u64 a_abs = a_neg ? -a : a;
+       u64 b_abs = b_neg ? -b : b;
+       u64 rem;
+
+       /* determine integer part */
+       u64 res_abs  = div64_u64_rem(a_abs, b_abs, &rem);
+
+       /* determine fractional part */
+       {
+               u32 i = DRM_FIXED_POINT;
+
+               do {
+                       rem <<= 1;
+                       res_abs <<= 1;
+                       if (rem >= b_abs) {
+                               res_abs |= 1;
+                               rem -= b_abs;
+                       }
+               } while (--i != 0);
+       }
+
+       /* round up LSB */
+       {
+               u64 summand = (rem << 1) >= b_abs;
+
+               res_abs += summand;
+       }
+
+       res = (s64) res_abs;
+       if (a_neg ^ b_neg)
+               res = -res;
+       return res;
+}
+
 static inline s64 drm_fixp_exp(s64 x)
 {
        s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
index d2992bfa17063a052a08a106b9105284ce4bfa4a..c1a2f345cbe6146352fff77080019eba5d0fe76e 100644 (file)
@@ -487,8 +487,8 @@ enum ata_tf_protocols {
 };
 
 enum ata_ioctls {
-       ATA_IOC_GET_IO32        = 0x309,
-       ATA_IOC_SET_IO32        = 0x324,
+       ATA_IOC_GET_IO32        = 0x309, /* HDIO_GET_32BIT */
+       ATA_IOC_SET_IO32        = 0x324, /* HDIO_SET_32BIT */
 };
 
 /* core structures */
index b9b6e046b52ea5b9f783b69274fbdd6ac8fe5d23..79cfaeef1b0d63122d645eff2f11ca16e5b15d58 100644 (file)
@@ -310,6 +310,43 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
        bio->bi_flags &= ~(1U << bit);
 }
 
+static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
+{
+       *bv = bio_iovec(bio);
+}
+
+static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
+{
+       struct bvec_iter iter = bio->bi_iter;
+       int idx;
+
+       if (!bio_flagged(bio, BIO_CLONED)) {
+               *bv = bio->bi_io_vec[bio->bi_vcnt - 1];
+               return;
+       }
+
+       if (unlikely(!bio_multiple_segments(bio))) {
+               *bv = bio_iovec(bio);
+               return;
+       }
+
+       bio_advance_iter(bio, &iter, iter.bi_size);
+
+       if (!iter.bi_bvec_done)
+               idx = iter.bi_idx - 1;
+       else    /* in the middle of bvec */
+               idx = iter.bi_idx;
+
+       *bv = bio->bi_io_vec[idx];
+
+       /*
+        * iter.bi_bvec_done records actual length of the last bvec
+        * if this bio ends in the middle of one io vector
+        */
+       if (iter.bi_bvec_done)
+               bv->bv_len = iter.bi_bvec_done;
+}
+
 enum bip_flags {
        BIP_BLOCK_INTEGRITY     = 1 << 0, /* block layer owns integrity data */
        BIP_MAPPED_INTEGRITY    = 1 << 1, /* ref tag has been remapped */
index c70e3588a48c723f4b7cd8536c0dabfc02871d5c..168755791ec88ac895c4cc0eb267638ba459e39e 100644 (file)
@@ -1367,6 +1367,13 @@ static inline void put_dev_sector(Sector p)
        page_cache_release(p.v);
 }
 
+static inline bool __bvec_gap_to_prev(struct request_queue *q,
+                               struct bio_vec *bprv, unsigned int offset)
+{
+       return offset ||
+               ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+}
+
 /*
  * Check if adding a bio_vec after bprv with offset would create a gap in
  * the SG list. Most drivers don't care about this, but some do.
@@ -1376,18 +1383,22 @@ static inline bool bvec_gap_to_prev(struct request_queue *q,
 {
        if (!queue_virt_boundary(q))
                return false;
-       return offset ||
-               ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+       return __bvec_gap_to_prev(q, bprv, offset);
 }
 
 static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
                         struct bio *next)
 {
-       if (!bio_has_data(prev))
-               return false;
+       if (bio_has_data(prev) && queue_virt_boundary(q)) {
+               struct bio_vec pb, nb;
+
+               bio_get_last_bvec(prev, &pb);
+               bio_get_first_bvec(next, &nb);
 
-       return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1],
-                               next->bi_io_vec[0].bv_offset);
+               return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
+       }
+
+       return false;
 }
 
 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
index 71b1d6cdcb5d1fc53dd45fa3950e138cfac19dfe..8dbd7879fdc6b74719d9cb65b85d8c60f589bde6 100644 (file)
@@ -220,6 +220,7 @@ struct ceph_connection {
        struct ceph_entity_addr actual_peer_addr;
 
        /* message out temps */
+       struct ceph_msg_header out_hdr;
        struct ceph_msg *out_msg;        /* sending message (== tail of
                                            out_sent) */
        bool out_msg_done;
@@ -229,7 +230,6 @@ struct ceph_connection {
        int out_kvec_left;   /* kvec's left in out_kvec */
        int out_skip;        /* skip this many bytes */
        int out_kvec_bytes;  /* total bytes left */
-       bool out_kvec_is_msg; /* kvec refers to out_msg */
        int out_more;        /* there is more data after the kvecs */
        __le64 out_temp_ack; /* for writing an ack */
        struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
index 06b77f9dd3f2052ac87f970b8aa8c195b78f7ada..8e30faeab18363cf67be62e76f7ffa3fd2349046 100644 (file)
@@ -133,6 +133,12 @@ struct cgroup_subsys_state {
         */
        u64 serial_nr;
 
+       /*
+        * Incremented by online self and children.  Used to guarantee that
+        * parents are not offlined before their children.
+        */
+       atomic_t online_cnt;
+
        /* percpu_ref killing and RCU release */
        struct rcu_head rcu_head;
        struct work_struct destroy_work;
index 85a868ccb4931d374a1ee9fb4e4036bb84399561..fea160ee5803fd121d0493f622e240b4c35da480 100644 (file)
@@ -137,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
        task_unlock(current);
 }
 
+extern void cpuset_post_attach_flush(void);
+
 #else /* !CONFIG_CPUSETS */
 
 static inline bool cpusets_enabled(void) { return false; }
@@ -243,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
        return false;
 }
 
+static inline void cpuset_post_attach_flush(void)
+{
+}
+
 #endif /* !CONFIG_CPUSETS */
 
 #endif /* _LINUX_CPUSET_H */
index d67ae119cf4eb9d3143d5019108f1d7fbf252b88..8a2e009c8a5a9503aa4c31815e1057092ec6076f 100644 (file)
@@ -409,9 +409,7 @@ static inline bool d_mountpoint(const struct dentry *dentry)
  */
 static inline unsigned __d_entry_type(const struct dentry *dentry)
 {
-       unsigned type = READ_ONCE(dentry->d_flags);
-       smp_rmb();
-       return type & DCACHE_ENTRY_TYPE;
+       return dentry->d_flags & DCACHE_ENTRY_TYPE;
 }
 
 static inline bool d_is_miss(const struct dentry *dentry)
index 569b5a866bb1e6308bbc4c0a28a2da92d106d6b5..47be3ad7d3e5bad63b48a8fa344dbea65c0a97dd 100644 (file)
@@ -1199,7 +1199,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
 struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
                                       struct list_head *head, bool remove);
 
-bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len);
+bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
+                    unsigned long data_size);
+bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
+                                 size_t len);
 
 extern struct work_struct efivar_work;
 void efivar_run_worker(void);
index 8fdc17b84739bdeb31a618e388a9d2238c6490f6..ae6a711dcd1ddc446413621a0a63959469110ac2 100644 (file)
@@ -630,6 +630,11 @@ struct hv_input_signal_event_buffer {
        struct hv_input_signal_event event;
 };
 
+enum hv_signal_policy {
+       HV_SIGNAL_POLICY_DEFAULT = 0,
+       HV_SIGNAL_POLICY_EXPLICIT,
+};
+
 struct vmbus_channel {
        /* Unique channel id */
        int id;
@@ -757,8 +762,21 @@ struct vmbus_channel {
         * link up channels based on their CPU affinity.
         */
        struct list_head percpu_list;
+       /*
+        * Host signaling policy: The default policy will be
+        * based on the ring buffer state. We will also support
+        * a policy where the client driver can have explicit
+        * signaling control.
+        */
+       enum hv_signal_policy  signal_policy;
 };
 
+static inline void set_channel_signal_state(struct vmbus_channel *c,
+                                           enum hv_signal_policy policy)
+{
+       c->signal_policy = policy;
+}
+
 static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
 {
        c->batched_reading = state;
index 600c1e0626a5ff6c91df3a0b2f2fcf0ef485fedf..b20a2752f934f8f427d796341ed0a433f1948487 100644 (file)
@@ -718,7 +718,7 @@ struct ata_device {
        union {
                u16             id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
                u32             gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
-       };
+       } ____cacheline_aligned;
 
        /* DEVSLP Timing Variables from Identify Device Data Log */
        u8                      devslp_timing[ATA_LOG_DEVSLP_SIZE];
index 3a19c79918e02d37c3e77ed32a88eb9f36bb16c3..b229a9961d029facd4e72e156ac3383d21f94f24 100644 (file)
@@ -302,6 +302,12 @@ struct mod_tree_node {
        struct latch_tree_node node;
 };
 
+struct mod_kallsyms {
+       Elf_Sym *symtab;
+       unsigned int num_symtab;
+       char *strtab;
+};
+
 struct module {
        enum module_state state;
 
@@ -411,14 +417,9 @@ struct module {
 #endif
 
 #ifdef CONFIG_KALLSYMS
-       /*
-        * We keep the symbol and string tables for kallsyms.
-        * The core_* fields below are temporary, loader-only (they
-        * could really be discarded after module init).
-        */
-       Elf_Sym *symtab, *core_symtab;
-       unsigned int num_symtab, core_num_syms;
-       char *strtab, *core_strtab;
+       /* Protected by RCU and/or module_mutex: use rcu_dereference() */
+       struct mod_kallsyms *kallsyms;
+       struct mod_kallsyms core_kallsyms;
 
        /* Section attributes */
        struct module_sect_attrs *sect_attrs;
index c0e961474a527058c8d1ac2aa72070c9b15e3db5..5455b660bd880d9f3df6b63e047d0df6ed24e4a5 100644 (file)
@@ -544,9 +544,7 @@ extern int  nfs_readpage_async(struct nfs_open_context *, struct inode *,
 
 static inline loff_t nfs_size_to_loff_t(__u64 size)
 {
-       if (size > (__u64) OFFSET_MAX - 1)
-               return OFFSET_MAX - 1;
-       return (loff_t) size;
+       return min_t(u64, size, OFFSET_MAX);
 }
 
 static inline ino_t
index 50777b5b1e4c3967d9f4fcc441b604fe6f0e0008..92d112aeec686680bb58236ff5869f3f13c45791 100644 (file)
@@ -15,10 +15,7 @@ struct shmem_inode_info {
        unsigned int            seals;          /* shmem seals */
        unsigned long           flags;
        unsigned long           alloced;        /* data pages alloced to file */
-       union {
-               unsigned long   swapped;        /* subtotal assigned to swap */
-               char            *symlink;       /* unswappable short symlink */
-       };
+       unsigned long           swapped;        /* subtotal assigned to swap */
        struct shared_policy    policy;         /* NUMA memory alloc policy */
        struct list_head        swaplist;       /* chain of maybes on swap */
        struct simple_xattrs    xattrs;         /* list of xattrs */
index 9147f9f34cbe460eec9f782cb226095006f07b11..75f136a22a5ebf378b3e0ddec8493effc8b72d36 100644 (file)
@@ -219,6 +219,7 @@ struct sk_buff;
 #else
 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
 #endif
+extern int sysctl_max_skb_frags;
 
 typedef struct skb_frag_struct skb_frag_t;
 
index 613c29bd6baf4763e57530b794df843d9cea61ee..e13a1ace50e902ad0ae38a81f5563d67eaa4a00f 100644 (file)
@@ -43,6 +43,9 @@
 /* Default weight of a bound cooling device */
 #define THERMAL_WEIGHT_DEFAULT 0
 
+/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
+#define THERMAL_TEMP_INVALID   -274000
+
 /* Unit conversion macros */
 #define DECI_KELVIN_TO_CELSIUS(t)      ({                      \
        long _t = (t);                                          \
@@ -167,6 +170,7 @@ struct thermal_attr {
  * @forced_passive:    If > 0, temperature at which to switch on all ACPI
  *                     processor cooling devices.  Currently only used by the
  *                     step-wise governor.
+ * @need_update:       if equals 1, thermal_zone_device_update needs to be invoked.
  * @ops:       operations this &thermal_zone_device supports
  * @tzp:       thermal zone parameters
  * @governor:  pointer to the governor for this thermal zone
@@ -194,6 +198,7 @@ struct thermal_zone_device {
        int emul_temperature;
        int passive;
        unsigned int forced_passive;
+       atomic_t need_update;
        struct thermal_zone_device_ops *ops;
        struct thermal_zone_params *tzp;
        struct thermal_governor *governor;
index 429fdfc3baf59e018d0e198062f894642af8a5bc..925730bc9fc1bb4914410779bdf5ea469ed41c18 100644 (file)
@@ -568,6 +568,8 @@ enum {
        FILTER_DYN_STRING,
        FILTER_PTR_STRING,
        FILTER_TRACE_FN,
+       FILTER_COMM,
+       FILTER_CPU,
 };
 
 extern int trace_event_raw_init(struct trace_event_call *call);
index cbb20afdbc01cc4a600574446d5b19c33a0d90c1..bb679b48f408217a422d0cd427ede3e05e2e67bd 100644 (file)
@@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s);
 unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
 int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
 
+unsigned long ucs2_utf8size(const ucs2_char_t *src);
+unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src,
+                          unsigned long maxlength);
+
 #endif /* _LINUX_UCS2_STRING_H_ */
index b333c945e57117aa3d80f6ec2df47f6513c82260..d0b5ca5d4e080346e8a657c6c6aee540e47776e7 100644 (file)
@@ -198,6 +198,7 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
 void wbc_detach_inode(struct writeback_control *wbc);
 void wbc_account_io(struct writeback_control *wbc, struct page *page,
                    size_t bytes);
+void cgroup_writeback_umount(void);
 
 /**
  * inode_attach_wb - associate an inode with its wb
@@ -301,6 +302,10 @@ static inline void wbc_account_io(struct writeback_control *wbc,
 {
 }
 
+static inline void cgroup_writeback_umount(void)
+{
+}
+
 #endif /* CONFIG_CGROUP_WRITEBACK */
 
 /*
index 2a91a0561a478393ca9e9d2f1993467cc4c5c9cb..9b4c418bebd84ae0a7debfbcc697ee92b136ec99 100644 (file)
@@ -6,8 +6,8 @@
 #include <linux/mutex.h>
 #include <net/sock.h>
 
-void unix_inflight(struct file *fp);
-void unix_notinflight(struct file *fp);
+void unix_inflight(struct user_struct *user, struct file *fp);
+void unix_notinflight(struct user_struct *user, struct file *fp);
 void unix_gc(void);
 void wait_for_unix_gc(void);
 struct sock *unix_get_socket(struct file *filp);
index 6816f0fa5693dc275e1e4997375f29b5d99f7b64..30a56ab2ccfb05d7bc9a527ebdc09da19b3d5f7b 100644 (file)
@@ -44,6 +44,24 @@ static inline bool skb_valid_dst(const struct sk_buff *skb)
        return dst && !(dst->flags & DST_METADATA);
 }
 
+static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
+                                      const struct sk_buff *skb_b)
+{
+       const struct metadata_dst *a, *b;
+
+       if (!(skb_a->_skb_refdst | skb_b->_skb_refdst))
+               return 0;
+
+       a = (const struct metadata_dst *) skb_dst(skb_a);
+       b = (const struct metadata_dst *) skb_dst(skb_b);
+
+       if (!a != !b || a->u.tun_info.options_len != b->u.tun_info.options_len)
+               return 1;
+
+       return memcmp(&a->u.tun_info, &b->u.tun_info,
+                     sizeof(a->u.tun_info) + a->u.tun_info.options_len);
+}
+
 struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
 struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);
 
index 481fe1c9044cfd8b49585139e24df16b0716debf..49dcad4fe99e0ad5de491ef0e0675a3b516aabca 100644 (file)
@@ -270,8 +270,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
                                            struct sock *newsk,
                                            const struct request_sock *req);
 
-void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
-                             struct sock *child);
+struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
+                                     struct request_sock *req,
+                                     struct sock *child);
 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
                                   unsigned long timeout);
 struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
index 877f682989b892fb1d70256bda6b33f03e34a0f5..295d291269e2c88ed4930041597a28f7c9a7a2f7 100644 (file)
@@ -64,8 +64,16 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
 
 void ip6_route_input(struct sk_buff *skb);
 
-struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
-                                  struct flowi6 *fl6);
+struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
+                                        struct flowi6 *fl6, int flags);
+
+static inline struct dst_entry *ip6_route_output(struct net *net,
+                                                const struct sock *sk,
+                                                struct flowi6 *fl6)
+{
+       return ip6_route_output_flags(net, sk, fl6, 0);
+}
+
 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
                                   int flags);
 
index 9f4df68105ab28eecdeb9d799a83910b73142a06..3f98233388fbe181c13063dc27f21fa70a6d5e34 100644 (file)
@@ -61,6 +61,7 @@ struct fib_nh_exception {
        struct rtable __rcu             *fnhe_rth_input;
        struct rtable __rcu             *fnhe_rth_output;
        unsigned long                   fnhe_stamp;
+       struct rcu_head                 rcu;
 };
 
 struct fnhe_hash_bucket {
index 262532d111f51e3a91a06af785b4721e8fab9d56..59fa93c01d2a16a129298498f1d56556b895acd9 100644 (file)
@@ -21,6 +21,7 @@ struct scm_creds {
 struct scm_fp_list {
        short                   count;
        short                   max;
+       struct user_struct      *user;
        struct file             *fp[SCM_MAX_FD];
 };
 
index f80e74c5ad18b22c274ecd7e75b6a23ffe7268b4..414d822bc1db778c3606c62a346227228f7319f7 100644 (file)
@@ -449,7 +449,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
 
 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
 void tcp_v4_mtu_reduced(struct sock *sk);
-void tcp_req_err(struct sock *sk, u32 seq);
+void tcp_req_err(struct sock *sk, u32 seq, bool abort);
 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 struct sock *tcp_create_openreq_child(const struct sock *sk,
                                      struct request_sock *req,
index 56cf8e485ef22101ac22b1120704664bf599eaef..28ee5c2e6bcd7b08abeda34669cc2da17f4dcf9a 100644 (file)
@@ -94,5 +94,8 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
        sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
 
 bool target_sense_desc_format(struct se_device *dev);
+sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
+bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
+                                      struct request_queue *q, int block_size);
 
 #endif /* TARGET_CORE_BACKEND_H */
index aabf0aca017157a72dd84accb5975c87fe98e79b..689f4d2071225ab648c7e652669af57e4a9c0530 100644 (file)
@@ -138,6 +138,7 @@ enum se_cmd_flags_table {
        SCF_COMPARE_AND_WRITE           = 0x00080000,
        SCF_COMPARE_AND_WRITE_POST      = 0x00100000,
        SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
+       SCF_ACK_KREF                    = 0x00400000,
 };
 
 /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
@@ -490,6 +491,8 @@ struct se_cmd {
 #define CMD_T_DEV_ACTIVE       (1 << 7)
 #define CMD_T_REQUEST_STOP     (1 << 8)
 #define CMD_T_BUSY             (1 << 9)
+#define CMD_T_TAS              (1 << 10)
+#define CMD_T_FABRIC_STOP      (1 << 11)
        spinlock_t              t_state_lock;
        struct kref             cmd_kref;
        struct completion       t_transport_stop_comp;
index c2e5d6cb34e36ba2e409985c7d66c70347dbebc7..ebd10e6245984fb5c38b6ee13c3861b8e78ed040 100644 (file)
@@ -307,7 +307,7 @@ header-y += nfs_mount.h
 header-y += nl80211.h
 header-y += n_r3964.h
 header-y += nubus.h
-header-y += nvme.h
+header-y += nvme_ioctl.h
 header-y += nvram.h
 header-y += omap3isp.h
 header-y += omapfb.h
index d1d3e8f57de907764fe3080632062485f5639443..2e7f7ab739e41c46072a69e787bf4e44f560ae55 100644 (file)
@@ -2082,7 +2082,7 @@ static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
                /* adjust offset of jmps if necessary */
                if (i < pos && i + insn->off + 1 > pos)
                        insn->off += delta;
-               else if (i > pos && i + insn->off + 1 < pos)
+               else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
                        insn->off -= delta;
        }
 }
index 470f6536b9e8cfb029eedb5ecdb3c3e8c3c341be..fb1ecfd2decd907e907d1d68d74792cb6d07714e 100644 (file)
@@ -57,7 +57,7 @@
 #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
 #include <linux/kthread.h>
 #include <linux/delay.h>
-
+#include <linux/cpuset.h>
 #include <linux/atomic.h>
 
 /*
@@ -2764,6 +2764,7 @@ out_unlock_rcu:
 out_unlock_threadgroup:
        percpu_up_write(&cgroup_threadgroup_rwsem);
        cgroup_kn_unlock(of->kn);
+       cpuset_post_attach_flush();
        return ret ?: nbytes;
 }
 
@@ -4783,6 +4784,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
        INIT_LIST_HEAD(&css->sibling);
        INIT_LIST_HEAD(&css->children);
        css->serial_nr = css_serial_nr_next++;
+       atomic_set(&css->online_cnt, 0);
 
        if (cgroup_parent(cgrp)) {
                css->parent = cgroup_css(cgroup_parent(cgrp), ss);
@@ -4805,6 +4807,10 @@ static int online_css(struct cgroup_subsys_state *css)
        if (!ret) {
                css->flags |= CSS_ONLINE;
                rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
+
+               atomic_inc(&css->online_cnt);
+               if (css->parent)
+                       atomic_inc(&css->parent->online_cnt);
        }
        return ret;
 }
@@ -5036,10 +5042,15 @@ static void css_killed_work_fn(struct work_struct *work)
                container_of(work, struct cgroup_subsys_state, destroy_work);
 
        mutex_lock(&cgroup_mutex);
-       offline_css(css);
-       mutex_unlock(&cgroup_mutex);
 
-       css_put(css);
+       do {
+               offline_css(css);
+               css_put(css);
+               /* @css can't go away while we're holding cgroup_mutex */
+               css = css->parent;
+       } while (css && atomic_dec_and_test(&css->online_cnt));
+
+       mutex_unlock(&cgroup_mutex);
 }
 
 /* css kill confirmation processing requires process context, bounce */
@@ -5048,8 +5059,10 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
        struct cgroup_subsys_state *css =
                container_of(ref, struct cgroup_subsys_state, refcnt);
 
-       INIT_WORK(&css->destroy_work, css_killed_work_fn);
-       queue_work(cgroup_destroy_wq, &css->destroy_work);
+       if (atomic_dec_and_test(&css->online_cnt)) {
+               INIT_WORK(&css->destroy_work, css_killed_work_fn);
+               queue_work(cgroup_destroy_wq, &css->destroy_work);
+       }
 }
 
 /**
index 02a8ea5c99632907b2ae887015c59bd4ad35da9d..2ade632197d5159efb21b5b45b2de41417901a0c 100644 (file)
@@ -286,6 +286,8 @@ static struct cpuset top_cpuset = {
 static DEFINE_MUTEX(cpuset_mutex);
 static DEFINE_SPINLOCK(callback_lock);
 
+static struct workqueue_struct *cpuset_migrate_mm_wq;
+
 /*
  * CPU / memory hotplug is handled asynchronously.
  */
@@ -971,31 +973,51 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
 }
 
 /*
- * cpuset_migrate_mm
- *
- *    Migrate memory region from one set of nodes to another.
- *
- *    Temporarilly set tasks mems_allowed to target nodes of migration,
- *    so that the migration code can allocate pages on these nodes.
- *
- *    While the mm_struct we are migrating is typically from some
- *    other task, the task_struct mems_allowed that we are hacking
- *    is for our current task, which must allocate new pages for that
- *    migrating memory region.
+ * Migrate memory region from one set of nodes to another.  This is
+ * performed asynchronously as it can be called from process migration path
+ * holding locks involved in process management.  All mm migrations are
+ * performed in the queued order and can be waited for by flushing
+ * cpuset_migrate_mm_wq.
  */
 
+struct cpuset_migrate_mm_work {
+       struct work_struct      work;
+       struct mm_struct        *mm;
+       nodemask_t              from;
+       nodemask_t              to;
+};
+
+static void cpuset_migrate_mm_workfn(struct work_struct *work)
+{
+       struct cpuset_migrate_mm_work *mwork =
+               container_of(work, struct cpuset_migrate_mm_work, work);
+
+       /* on a wq worker, no need to worry about %current's mems_allowed */
+       do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
+       mmput(mwork->mm);
+       kfree(mwork);
+}
+
 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
                                                        const nodemask_t *to)
 {
-       struct task_struct *tsk = current;
-
-       tsk->mems_allowed = *to;
+       struct cpuset_migrate_mm_work *mwork;
 
-       do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
+       mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
+       if (mwork) {
+               mwork->mm = mm;
+               mwork->from = *from;
+               mwork->to = *to;
+               INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
+               queue_work(cpuset_migrate_mm_wq, &mwork->work);
+       } else {
+               mmput(mm);
+       }
+}
 
-       rcu_read_lock();
-       guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed);
-       rcu_read_unlock();
+void cpuset_post_attach_flush(void)
+{
+       flush_workqueue(cpuset_migrate_mm_wq);
 }
 
 /*
@@ -1096,7 +1118,8 @@ static void update_tasks_nodemask(struct cpuset *cs)
                mpol_rebind_mm(mm, &cs->mems_allowed);
                if (migrate)
                        cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
-               mmput(mm);
+               else
+                       mmput(mm);
        }
        css_task_iter_end(&it);
 
@@ -1541,11 +1564,11 @@ static void cpuset_attach(struct cgroup_taskset *tset)
                         * @old_mems_allowed is the right nodesets that we
                         * migrate mm from.
                         */
-                       if (is_memory_migrate(cs)) {
+                       if (is_memory_migrate(cs))
                                cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
                                                  &cpuset_attach_nodemask_to);
-                       }
-                       mmput(mm);
+                       else
+                               mmput(mm);
                }
        }
 
@@ -1710,6 +1733,7 @@ out_unlock:
        mutex_unlock(&cpuset_mutex);
        kernfs_unbreak_active_protection(of->kn);
        css_put(&cs->css);
+       flush_workqueue(cpuset_migrate_mm_wq);
        return retval ?: nbytes;
 }
 
@@ -2355,6 +2379,9 @@ void __init cpuset_init_smp(void)
        top_cpuset.effective_mems = node_states[N_MEMORY];
 
        register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
+
+       cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
+       BUG_ON(!cpuset_migrate_mm_wq);
 }
 
 /**
index a302cf9a2126c8a911ff4da1a944c88c6d3b3f8a..57bff7857e879bf2301a92723ade0a968870e637 100644 (file)
@@ -138,7 +138,8 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
        unsigned int flags = 0, irq = desc->irq_data.irq;
        struct irqaction *action = desc->action;
 
-       do {
+       /* action might have become NULL since we dropped the lock */
+       while (action) {
                irqreturn_t res;
 
                trace_irq_handler_entry(irq, action);
@@ -173,7 +174,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
 
                retval |= res;
                action = action->next;
-       } while (action);
+       }
 
        add_interrupt_randomness(irq, flags);
 
index 7a4e473cea4d31b1a51505a51cb1222628decdd9..25ced161ebebb1ad58626cba853dc472db8f1caa 100644 (file)
@@ -133,8 +133,10 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
        if (addr) {
                *ptr = addr;
                devres_add(dev, ptr);
-       } else
+       } else {
                devres_free(ptr);
+               return ERR_PTR(-ENXIO);
+       }
 
        return addr;
 }
index 14833e6d5e372d41a02e61334b9867461e245bf4..0e5c71195f185c667f875ab54b0ac022eadd519c 100644 (file)
@@ -327,6 +327,9 @@ struct load_info {
        struct _ddebug *debug;
        unsigned int num_debug;
        bool sig_ok;
+#ifdef CONFIG_KALLSYMS
+       unsigned long mod_kallsyms_init_off;
+#endif
        struct {
                unsigned int sym, str, mod, vers, info, pcpu;
        } index;
@@ -2492,10 +2495,21 @@ static void layout_symtab(struct module *mod, struct load_info *info)
        strsect->sh_flags |= SHF_ALLOC;
        strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
                                         info->index.str) | INIT_OFFSET_MASK;
-       mod->init_size = debug_align(mod->init_size);
        pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
+
+       /* We'll tack temporary mod_kallsyms on the end. */
+       mod->init_size = ALIGN(mod->init_size,
+                              __alignof__(struct mod_kallsyms));
+       info->mod_kallsyms_init_off = mod->init_size;
+       mod->init_size += sizeof(struct mod_kallsyms);
+       mod->init_size = debug_align(mod->init_size);
 }
 
+/*
+ * We use the full symtab and strtab which layout_symtab arranged to
+ * be appended to the init section.  Later we switch to the cut-down
+ * core-only ones.
+ */
 static void add_kallsyms(struct module *mod, const struct load_info *info)
 {
        unsigned int i, ndst;
@@ -2504,28 +2518,33 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
        char *s;
        Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
 
-       mod->symtab = (void *)symsec->sh_addr;
-       mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
+       /* Set up to point into init section. */
+       mod->kallsyms = mod->module_init + info->mod_kallsyms_init_off;
+
+       mod->kallsyms->symtab = (void *)symsec->sh_addr;
+       mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
        /* Make sure we get permanent strtab: don't use info->strtab. */
-       mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
+       mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
 
        /* Set types up while we still have access to sections. */
-       for (i = 0; i < mod->num_symtab; i++)
-               mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
-
-       mod->core_symtab = dst = mod->module_core + info->symoffs;
-       mod->core_strtab = s = mod->module_core + info->stroffs;
-       src = mod->symtab;
-       for (ndst = i = 0; i < mod->num_symtab; i++) {
+       for (i = 0; i < mod->kallsyms->num_symtab; i++)
+               mod->kallsyms->symtab[i].st_info
+                       = elf_type(&mod->kallsyms->symtab[i], info);
+
+       /* Now populate the cut down core kallsyms for after init. */
+       mod->core_kallsyms.symtab = dst = mod->module_core + info->symoffs;
+       mod->core_kallsyms.strtab = s = mod->module_core + info->stroffs;
+       src = mod->kallsyms->symtab;
+       for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
                if (i == 0 ||
                    is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
                        dst[ndst] = src[i];
-                       dst[ndst++].st_name = s - mod->core_strtab;
-                       s += strlcpy(s, &mod->strtab[src[i].st_name],
+                       dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
+                       s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
                                     KSYM_NAME_LEN) + 1;
                }
        }
-       mod->core_num_syms = ndst;
+       mod->core_kallsyms.num_symtab = ndst;
 }
 #else
 static inline void layout_symtab(struct module *mod, struct load_info *info)
@@ -3274,9 +3293,8 @@ static noinline int do_init_module(struct module *mod)
        module_put(mod);
        trim_init_extable(mod);
 #ifdef CONFIG_KALLSYMS
-       mod->num_symtab = mod->core_num_syms;
-       mod->symtab = mod->core_symtab;
-       mod->strtab = mod->core_strtab;
+       /* Switch to core kallsyms now init is done: kallsyms may be walking! */
+       rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
 #endif
        mod_tree_remove_init(mod);
        unset_module_init_ro_nx(mod);
@@ -3646,9 +3664,9 @@ static inline int is_arm_mapping_symbol(const char *str)
               && (str[2] == '\0' || str[2] == '.');
 }
 
-static const char *symname(struct module *mod, unsigned int symnum)
+static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum)
 {
-       return mod->strtab + mod->symtab[symnum].st_name;
+       return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
 }
 
 static const char *get_ksymbol(struct module *mod,
@@ -3658,6 +3676,7 @@ static const char *get_ksymbol(struct module *mod,
 {
        unsigned int i, best = 0;
        unsigned long nextval;
+       struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
 
        /* At worse, next value is at end of module */
        if (within_module_init(addr, mod))
@@ -3667,32 +3686,32 @@ static const char *get_ksymbol(struct module *mod,
 
        /* Scan for closest preceding symbol, and next symbol. (ELF
           starts real symbols at 1). */
-       for (i = 1; i < mod->num_symtab; i++) {
-               if (mod->symtab[i].st_shndx == SHN_UNDEF)
+       for (i = 1; i < kallsyms->num_symtab; i++) {
+               if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
                        continue;
 
                /* We ignore unnamed symbols: they're uninformative
                 * and inserted at a whim. */
-               if (*symname(mod, i) == '\0'
-                   || is_arm_mapping_symbol(symname(mod, i)))
+               if (*symname(kallsyms, i) == '\0'
+                   || is_arm_mapping_symbol(symname(kallsyms, i)))
                        continue;
 
-               if (mod->symtab[i].st_value <= addr
-                   && mod->symtab[i].st_value > mod->symtab[best].st_value)
+               if (kallsyms->symtab[i].st_value <= addr
+                   && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value)
                        best = i;
-               if (mod->symtab[i].st_value > addr
-                   && mod->symtab[i].st_value < nextval)
-                       nextval = mod->symtab[i].st_value;
+               if (kallsyms->symtab[i].st_value > addr
+                   && kallsyms->symtab[i].st_value < nextval)
+                       nextval = kallsyms->symtab[i].st_value;
        }
 
        if (!best)
                return NULL;
 
        if (size)
-               *size = nextval - mod->symtab[best].st_value;
+               *size = nextval - kallsyms->symtab[best].st_value;
        if (offset)
-               *offset = addr - mod->symtab[best].st_value;
-       return symname(mod, best);
+               *offset = addr - kallsyms->symtab[best].st_value;
+       return symname(kallsyms, best);
 }
 
 /* For kallsyms to ask for address resolution.  NULL means not found.  Careful
@@ -3782,18 +3801,21 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 
        preempt_disable();
        list_for_each_entry_rcu(mod, &modules, list) {
+               struct mod_kallsyms *kallsyms;
+
                if (mod->state == MODULE_STATE_UNFORMED)
                        continue;
-               if (symnum < mod->num_symtab) {
-                       *value = mod->symtab[symnum].st_value;
-                       *type = mod->symtab[symnum].st_info;
-                       strlcpy(name, symname(mod, symnum), KSYM_NAME_LEN);
+               kallsyms = rcu_dereference_sched(mod->kallsyms);
+               if (symnum < kallsyms->num_symtab) {
+                       *value = kallsyms->symtab[symnum].st_value;
+                       *type = kallsyms->symtab[symnum].st_info;
+                       strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN);
                        strlcpy(module_name, mod->name, MODULE_NAME_LEN);
                        *exported = is_exported(name, *value, mod);
                        preempt_enable();
                        return 0;
                }
-               symnum -= mod->num_symtab;
+               symnum -= kallsyms->num_symtab;
        }
        preempt_enable();
        return -ERANGE;
@@ -3802,11 +3824,12 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 static unsigned long mod_find_symname(struct module *mod, const char *name)
 {
        unsigned int i;
+       struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
 
-       for (i = 0; i < mod->num_symtab; i++)
-               if (strcmp(name, symname(mod, i)) == 0 &&
-                   mod->symtab[i].st_info != 'U')
-                       return mod->symtab[i].st_value;
+       for (i = 0; i < kallsyms->num_symtab; i++)
+               if (strcmp(name, symname(kallsyms, i)) == 0 &&
+                   kallsyms->symtab[i].st_info != 'U')
+                       return kallsyms->symtab[i].st_value;
        return 0;
 }
 
@@ -3845,11 +3868,14 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
        module_assert_mutex();
 
        list_for_each_entry(mod, &modules, list) {
+               /* We hold module_mutex: no need for rcu_dereference_sched */
+               struct mod_kallsyms *kallsyms = mod->kallsyms;
+
                if (mod->state == MODULE_STATE_UNFORMED)
                        continue;
-               for (i = 0; i < mod->num_symtab; i++) {
-                       ret = fn(data, symname(mod, i),
-                                mod, mod->symtab[i].st_value);
+               for (i = 0; i < kallsyms->num_symtab; i++) {
+                       ret = fn(data, symname(kallsyms, i),
+                                mod, kallsyms->symtab[i].st_value);
                        if (ret != 0)
                                return ret;
                }
index f150dbbe6f62d31aa07b3fbf3f07a8807fa40991..249b1eb1e6e1381c6963296db8c408f99a01b3b5 100644 (file)
@@ -1083,9 +1083,10 @@ struct resource * __request_region(struct resource *parent,
                if (!conflict)
                        break;
                if (conflict != parent) {
-                       parent = conflict;
-                       if (!(conflict->flags & IORESOURCE_BUSY))
+                       if (!(conflict->flags & IORESOURCE_BUSY)) {
+                               parent = conflict;
                                continue;
+                       }
                }
                if (conflict->flags & flags & IORESOURCE_MUXED) {
                        add_wait_queue(&muxed_resource_wait, &wait);
index 580ac2d4024ffbdb29960ccdd86424fa730b1e78..15a1795bbba17d1140e3220c41d48dd26ac43c95 100644 (file)
@@ -316,24 +316,24 @@ static inline void seccomp_sync_threads(void)
                put_seccomp_filter(thread);
                smp_store_release(&thread->seccomp.filter,
                                  caller->seccomp.filter);
+
+               /*
+                * Don't let an unprivileged task work around
+                * the no_new_privs restriction by creating
+                * a thread that sets it up, enters seccomp,
+                * then dies.
+                */
+               if (task_no_new_privs(caller))
+                       task_set_no_new_privs(thread);
+
                /*
                 * Opt the other thread into seccomp if needed.
                 * As threads are considered to be trust-realm
                 * equivalent (see ptrace_may_access), it is safe to
                 * allow one thread to transition the other.
                 */
-               if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) {
-                       /*
-                        * Don't let an unprivileged task work around
-                        * the no_new_privs restriction by creating
-                        * a thread that sets it up, enters seccomp,
-                        * then dies.
-                        */
-                       if (task_no_new_privs(caller))
-                               task_set_no_new_privs(thread);
-
+               if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
                        seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
-               }
        }
 }
 
index ce033c7aa2e8f8a179ae4a031c176e1a358a100d..9cff0ab82b635d33d2936697d909724d07767e69 100644 (file)
@@ -69,10 +69,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf,
 static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
 {
        struct posix_clock *clk = get_posix_clock(fp);
-       int result = 0;
+       unsigned int result = 0;
 
        if (!clk)
-               return -ENODEV;
+               return POLLERR;
 
        if (clk->ops.poll)
                result = clk->ops.poll(clk, fp, wait);
index 7c7ec45159834a1b25576fbed037c9951f3c076f..22c57e191a234f1f5a6cde4121eb30d7cb6ec9a4 100644 (file)
@@ -977,9 +977,9 @@ static void tick_nohz_switch_to_nohz(void)
        /* Get the next period */
        next = tick_init_jiffy_update();
 
-       hrtimer_forward_now(&ts->sched_timer, tick_period);
        hrtimer_set_expires(&ts->sched_timer, next);
-       tick_program_event(next, 1);
+       hrtimer_forward_now(&ts->sched_timer, tick_period);
+       tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
        tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
 }
 
index d563c19603029bc6c57ac55154f71ed027320276..99188ee5d9d0908df81d3acbadbc6bd1af44ef0f 100644 (file)
@@ -305,8 +305,7 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
 
        delta = timekeeping_get_delta(tkr);
 
-       nsec = delta * tkr->mult + tkr->xtime_nsec;
-       nsec >>= tkr->shift;
+       nsec = (delta * tkr->mult + tkr->xtime_nsec) >> tkr->shift;
 
        /* If arch requires, add in get_arch_timeoffset() */
        return nsec + arch_gettimeoffset();
index 4f6ef6912e00173040867f6a68c52f010bbfd21d..d202d991edae5e433f3f0b8ccb864222a65a2b40 100644 (file)
@@ -97,16 +97,16 @@ trace_find_event_field(struct trace_event_call *call, char *name)
        struct ftrace_event_field *field;
        struct list_head *head;
 
-       field = __find_event_field(&ftrace_generic_fields, name);
+       head = trace_get_fields(call);
+       field = __find_event_field(head, name);
        if (field)
                return field;
 
-       field = __find_event_field(&ftrace_common_fields, name);
+       field = __find_event_field(&ftrace_generic_fields, name);
        if (field)
                return field;
 
-       head = trace_get_fields(call);
-       return __find_event_field(head, name);
+       return __find_event_field(&ftrace_common_fields, name);
 }
 
 static int __trace_define_field(struct list_head *head, const char *type,
@@ -171,8 +171,10 @@ static int trace_define_generic_fields(void)
 {
        int ret;
 
-       __generic_field(int, cpu, FILTER_OTHER);
-       __generic_field(char *, comm, FILTER_PTR_STRING);
+       __generic_field(int, CPU, FILTER_CPU);
+       __generic_field(int, cpu, FILTER_CPU);
+       __generic_field(char *, COMM, FILTER_COMM);
+       __generic_field(char *, comm, FILTER_COMM);
 
        return ret;
 }
@@ -869,7 +871,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
                 * The ftrace subsystem is for showing formats only.
                 * They can not be enabled or disabled via the event files.
                 */
-               if (call->class && call->class->reg)
+               if (call->class && call->class->reg &&
+                   !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
                        return file;
        }
 
index f93a219b18daaa92ed314ad7ac4ba8e0ed3c4c09..6816302542b28158a4756373f5b3a57ed1a907fa 100644 (file)
@@ -1043,13 +1043,14 @@ static int init_pred(struct filter_parse_state *ps,
                return -EINVAL;
        }
 
-       if (is_string_field(field)) {
+       if (field->filter_type == FILTER_COMM) {
+               filter_build_regex(pred);
+               fn = filter_pred_comm;
+               pred->regex.field_len = TASK_COMM_LEN;
+       } else if (is_string_field(field)) {
                filter_build_regex(pred);
 
-               if (!strcmp(field->name, "comm")) {
-                       fn = filter_pred_comm;
-                       pred->regex.field_len = TASK_COMM_LEN;
-               } else if (field->filter_type == FILTER_STATIC_STRING) {
+               if (field->filter_type == FILTER_STATIC_STRING) {
                        fn = filter_pred_string;
                        pred->regex.field_len = field->size;
                } else if (field->filter_type == FILTER_DYN_STRING)
@@ -1072,7 +1073,7 @@ static int init_pred(struct filter_parse_state *ps,
                }
                pred->val = val;
 
-               if (!strcmp(field->name, "cpu"))
+               if (field->filter_type == FILTER_CPU)
                        fn = filter_pred_cpu;
                else
                        fn = select_comparison_fn(pred->op, field->size,
index c579dbab2e36ab20dd94a5f753a480df17ae28ff..450c21fd0e6e4289bcf1d2d3b6ba78d68d992df5 100644 (file)
@@ -568,6 +568,16 @@ static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
                                                  int node)
 {
        assert_rcu_or_wq_mutex_or_pool_mutex(wq);
+
+       /*
+        * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
+        * delayed item is pending.  The plan is to keep CPU -> NODE
+        * mapping valid and stable across CPU on/offlines.  Once that
+        * happens, this workaround can be removed.
+        */
+       if (unlikely(node == NUMA_NO_NODE))
+               return wq->dfl_pwq;
+
        return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
 }
 
@@ -1458,13 +1468,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
        timer_stats_timer_set_start_info(&dwork->timer);
 
        dwork->wq = wq;
-       /* timer isn't guaranteed to run in this cpu, record earlier */
-       if (cpu == WORK_CPU_UNBOUND)
-               cpu = raw_smp_processor_id();
        dwork->cpu = cpu;
        timer->expires = jiffies + delay;
 
-       add_timer_on(timer, cpu);
+       if (unlikely(cpu != WORK_CPU_UNBOUND))
+               add_timer_on(timer, cpu);
+       else
+               add_timer(timer);
 }
 
 /**
index f0df318104e7272ef97641421c938069a047a85e..1a48744253d71ef1f928ac25c0844d327245abbb 100644 (file)
@@ -210,9 +210,11 @@ config RANDOM32_SELFTEST
 # compression support is select'ed if needed
 #
 config 842_COMPRESS
+       select CRC32
        tristate
 
 config 842_DECOMPRESS
+       select CRC32
        tristate
 
 config ZLIB_INFLATE
index 6f500ef2301d893c9b2c737747fc355974ff4a19..f0b323abb4c64a566700d9d265f4451174847eaa 100644 (file)
@@ -49,3 +49,65 @@ ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len)
         }
 }
 EXPORT_SYMBOL(ucs2_strncmp);
+
+unsigned long
+ucs2_utf8size(const ucs2_char_t *src)
+{
+       unsigned long i;
+       unsigned long j = 0;
+
+       for (i = 0; i < ucs2_strlen(src); i++) {
+               u16 c = src[i];
+
+               if (c >= 0x800)
+                       j += 3;
+               else if (c >= 0x80)
+                       j += 2;
+               else
+                       j += 1;
+       }
+
+       return j;
+}
+EXPORT_SYMBOL(ucs2_utf8size);
+
+/*
+ * copy at most maxlength bytes of whole utf8 characters to dest from the
+ * ucs2 string src.
+ *
+ * The return value is the number of characters copied, not including the
+ * final NUL character.
+ */
+unsigned long
+ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
+{
+       unsigned int i;
+       unsigned long j = 0;
+       unsigned long limit = ucs2_strnlen(src, maxlength);
+
+       for (i = 0; maxlength && i < limit; i++) {
+               u16 c = src[i];
+
+               if (c >= 0x800) {
+                       if (maxlength < 3)
+                               break;
+                       maxlength -= 3;
+                       dest[j++] = 0xe0 | (c & 0xf000) >> 12;
+                       dest[j++] = 0x80 | (c & 0x0fc0) >> 6;
+                       dest[j++] = 0x80 | (c & 0x003f);
+               } else if (c >= 0x80) {
+                       if (maxlength < 2)
+                               break;
+                       maxlength -= 2;
+                       dest[j++] = 0xc0 | (c & 0x7c0) >> 6;
+                       dest[j++] = 0x80 | (c & 0x03f);
+               } else {
+                       maxlength -= 1;
+                       dest[j++] = c & 0x7f;
+               }
+       }
+       if (maxlength)
+               dest[j] = '\0';
+       return j;
+}
+EXPORT_SYMBOL(ucs2_as_utf8);
index d3116be5a00fa51646b5a0b45683a138a4ed3f7c..300117f1a08f35c27c531d55ed0a08a1a9b81e50 100644 (file)
@@ -61,6 +61,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
        bool dequeued_page;
 
        dequeued_page = false;
+       spin_lock_irqsave(&b_dev_info->pages_lock, flags);
        list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
                /*
                 * Block others from accessing the 'page' while we get around
@@ -75,15 +76,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
                                continue;
                        }
 #endif
-                       spin_lock_irqsave(&b_dev_info->pages_lock, flags);
                        balloon_page_delete(page);
                        __count_vm_event(BALLOON_DEFLATE);
-                       spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
                        unlock_page(page);
                        dequeued_page = true;
                        break;
                }
        }
+       spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 
        if (!dequeued_page) {
                /*
index c387430f06c319d52794152b7e100a66a10c9f68..b80bf4746b67ac0de41505165ef9c147e93e44ef 100644 (file)
@@ -3399,8 +3399,18 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (unlikely(pmd_none(*pmd)) &&
            unlikely(__pte_alloc(mm, vma, pmd, address)))
                return VM_FAULT_OOM;
-       /* if an huge pmd materialized from under us just retry later */
-       if (unlikely(pmd_trans_huge(*pmd)))
+       /*
+        * If a huge pmd materialized under us just retry later.  Use
+        * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
+        * didn't become pmd_trans_huge under us and then back to pmd_none, as
+        * a result of MADV_DONTNEED running immediately after a huge pmd fault
+        * in a different thread of this mm, in turn leading to a misleading
+        * pmd_trans_huge() retval.  All we have to ensure is that it is a
+        * regular pmd that we can walk with pte_offset_map() and we can do that
+        * through an atomic read in C, which is what pmd_trans_unstable()
+        * provides.
+        */
+       if (unlikely(pmd_trans_unstable(pmd)))
                return 0;
        /*
         * A regular pmd is established and it can't morph into a huge pmd
index 7890d0bb5e23c3db75cc682878a2c5ec89b0e513..6d17e0ab42d40f3253df41bbecc1e0d85acb204b 100644 (file)
@@ -1578,7 +1578,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
                                         (GFP_HIGHUSER_MOVABLE |
                                          __GFP_THISNODE | __GFP_NOMEMALLOC |
                                          __GFP_NORETRY | __GFP_NOWARN) &
-                                        ~(__GFP_IO | __GFP_FS), 0);
+                                        ~__GFP_RECLAIM, 0);
 
        return newpage;
 }
index 2afcdbbdb68506096d5c6fad584a8425709d10aa..ea5a70cfc1d828bb09cc236c91835fd090c3e875 100644 (file)
@@ -620,8 +620,7 @@ static void shmem_evict_inode(struct inode *inode)
                        list_del_init(&info->swaplist);
                        mutex_unlock(&shmem_swaplist_mutex);
                }
-       } else
-               kfree(info->symlink);
+       }
 
        simple_xattrs_free(&info->xattrs);
        WARN_ON(inode->i_blocks);
@@ -2462,13 +2461,12 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
        info = SHMEM_I(inode);
        inode->i_size = len-1;
        if (len <= SHORT_SYMLINK_LEN) {
-               info->symlink = kmemdup(symname, len, GFP_KERNEL);
-               if (!info->symlink) {
+               inode->i_link = kmemdup(symname, len, GFP_KERNEL);
+               if (!inode->i_link) {
                        iput(inode);
                        return -ENOMEM;
                }
                inode->i_op = &shmem_short_symlink_operations;
-               inode->i_link = info->symlink;
        } else {
                error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
                if (error) {
@@ -3083,6 +3081,7 @@ static struct inode *shmem_alloc_inode(struct super_block *sb)
 static void shmem_destroy_callback(struct rcu_head *head)
 {
        struct inode *inode = container_of(head, struct inode, i_rcu);
+       kfree(inode->i_link);
        kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
 }
 
index 9e9cca3689a08ab7c9038b9bab4c61d7214004af..795ddd8b2f77567381a3405839cae239871e18ad 100644 (file)
@@ -307,6 +307,9 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
 
        /* check that it's our buffer */
        if (lowpan_is_ipv6(*skb_network_header(skb))) {
+               /* Pull off the 1-byte of 6lowpan header. */
+               skb_pull(skb, 1);
+
                /* Copy the packet so that the IPv6 header is
                 * properly aligned.
                 */
@@ -317,6 +320,7 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
 
                local_skb->protocol = htons(ETH_P_IPV6);
                local_skb->pkt_type = PACKET_HOST;
+               local_skb->dev = dev;
 
                skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
 
@@ -335,6 +339,8 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
                if (!local_skb)
                        goto drop;
 
+               local_skb->dev = dev;
+
                ret = iphc_decompress(local_skb, dev, chan);
                if (ret < 0) {
                        kfree_skb(local_skb);
@@ -343,7 +349,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
 
                local_skb->protocol = htons(ETH_P_IPV6);
                local_skb->pkt_type = PACKET_HOST;
-               local_skb->dev = dev;
 
                if (give_skb_to_upper(local_skb, dev)
                                != NET_RX_SUCCESS) {
index 85b82f7adbd2dd96ec7163a6d3bc3c155dc2518e..24e9410923d04c16b6570cfdf61b1010f9c50049 100644 (file)
@@ -722,8 +722,12 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
        if (hci_update_random_address(req, false, &own_addr_type))
                return;
 
+       /* Set window to be the same value as the interval to enable
+        * continuous scanning.
+        */
        cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
-       cp.scan_window = cpu_to_le16(hdev->le_scan_window);
+       cp.scan_window = cp.scan_interval;
+
        bacpy(&cp.peer_addr, &conn->dst);
        cp.peer_addr_type = conn->dst_type;
        cp.own_address_type = own_addr_type;
index 981f8a202c27d2298a8a9459fa2a671cb3b3197a..02778c5bc1497b3db1b82bdba3b81247bc272a15 100644 (file)
@@ -175,21 +175,29 @@ static u8 update_white_list(struct hci_request *req)
         * command to remove it from the controller.
         */
        list_for_each_entry(b, &hdev->le_white_list, list) {
-               struct hci_cp_le_del_from_white_list cp;
+               /* If the device is neither in pend_le_conns nor
+                * pend_le_reports then remove it from the whitelist.
+                */
+               if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
+                                              &b->bdaddr, b->bdaddr_type) &&
+                   !hci_pend_le_action_lookup(&hdev->pend_le_reports,
+                                              &b->bdaddr, b->bdaddr_type)) {
+                       struct hci_cp_le_del_from_white_list cp;
+
+                       cp.bdaddr_type = b->bdaddr_type;
+                       bacpy(&cp.bdaddr, &b->bdaddr);
 
-               if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
-                                             &b->bdaddr, b->bdaddr_type) ||
-                   hci_pend_le_action_lookup(&hdev->pend_le_reports,
-                                             &b->bdaddr, b->bdaddr_type)) {
-                       white_list_entries++;
+                       hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
+                                   sizeof(cp), &cp);
                        continue;
                }
 
-               cp.bdaddr_type = b->bdaddr_type;
-               bacpy(&cp.bdaddr, &b->bdaddr);
+               if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
+                       /* White list can not be used with RPAs */
+                       return 0x00;
+               }
 
-               hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
-                           sizeof(cp), &cp);
+               white_list_entries++;
        }
 
        /* Since all no longer valid white list entries have been
index ffed8a1d4f27634866c93d22b4ceb059b956cc91..4b175df35184b08b5695d966f66b129148c6c011 100644 (file)
@@ -1072,22 +1072,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
                        hcon->dst_type = smp->remote_irk->addr_type;
                        queue_work(hdev->workqueue, &conn->id_addr_update_work);
                }
-
-               /* When receiving an indentity resolving key for
-                * a remote device that does not use a resolvable
-                * private address, just remove the key so that
-                * it is possible to use the controller white
-                * list for scanning.
-                *
-                * Userspace will have been told to not store
-                * this key at this point. So it is safe to
-                * just remove it.
-                */
-               if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) {
-                       list_del_rcu(&smp->remote_irk->list);
-                       kfree_rcu(smp->remote_irk, rcu);
-                       smp->remote_irk = NULL;
-               }
        }
 
        if (smp->csrk) {
index a1abe4936fe15299b7643b8944023050c5f938c1..3addc05b9a16676b07ca13bc6faf0cba62b3acfd 100644 (file)
@@ -121,6 +121,7 @@ static struct notifier_block br_device_notifier = {
        .notifier_call = br_device_event
 };
 
+/* called with RTNL */
 static int br_switchdev_event(struct notifier_block *unused,
                              unsigned long event, void *ptr)
 {
@@ -130,7 +131,6 @@ static int br_switchdev_event(struct notifier_block *unused,
        struct switchdev_notifier_fdb_info *fdb_info;
        int err = NOTIFY_DONE;
 
-       rtnl_lock();
        p = br_port_get_rtnl(dev);
        if (!p)
                goto out;
@@ -155,7 +155,6 @@ static int br_switchdev_event(struct notifier_block *unused,
        }
 
 out:
-       rtnl_unlock();
        return err;
 }
 
index 9981039ef4ffcca29081b83a5e06a81ce6871f20..63ae5dd24fc597277ac384cb06b27ea16c6db25b 100644 (file)
@@ -672,6 +672,8 @@ static void reset_connection(struct ceph_connection *con)
        }
        con->in_seq = 0;
        con->in_seq_acked = 0;
+
+       con->out_skip = 0;
 }
 
 /*
@@ -771,6 +773,8 @@ static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
 
 static void con_out_kvec_reset(struct ceph_connection *con)
 {
+       BUG_ON(con->out_skip);
+
        con->out_kvec_left = 0;
        con->out_kvec_bytes = 0;
        con->out_kvec_cur = &con->out_kvec[0];
@@ -779,9 +783,9 @@ static void con_out_kvec_reset(struct ceph_connection *con)
 static void con_out_kvec_add(struct ceph_connection *con,
                                size_t size, void *data)
 {
-       int index;
+       int index = con->out_kvec_left;
 
-       index = con->out_kvec_left;
+       BUG_ON(con->out_skip);
        BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
 
        con->out_kvec[index].iov_len = size;
@@ -790,6 +794,27 @@ static void con_out_kvec_add(struct ceph_connection *con,
        con->out_kvec_bytes += size;
 }
 
+/*
+ * Chop off a kvec from the end.  Return residual number of bytes for
+ * that kvec, i.e. how many bytes would have been written if the kvec
+ * hadn't been nuked.
+ */
+static int con_out_kvec_skip(struct ceph_connection *con)
+{
+       int off = con->out_kvec_cur - con->out_kvec;
+       int skip = 0;
+
+       if (con->out_kvec_bytes > 0) {
+               skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len;
+               BUG_ON(con->out_kvec_bytes < skip);
+               BUG_ON(!con->out_kvec_left);
+               con->out_kvec_bytes -= skip;
+               con->out_kvec_left--;
+       }
+
+       return skip;
+}
+
 #ifdef CONFIG_BLOCK
 
 /*
@@ -1175,6 +1200,13 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
        return new_piece;
 }
 
+static size_t sizeof_footer(struct ceph_connection *con)
+{
+       return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
+           sizeof(struct ceph_msg_footer) :
+           sizeof(struct ceph_msg_footer_old);
+}
+
 static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
 {
        BUG_ON(!msg);
@@ -1197,7 +1229,6 @@ static void prepare_write_message_footer(struct ceph_connection *con)
        m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
 
        dout("prepare_write_message_footer %p\n", con);
-       con->out_kvec_is_msg = true;
        con->out_kvec[v].iov_base = &m->footer;
        if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
                if (con->ops->sign_message)
@@ -1225,7 +1256,6 @@ static void prepare_write_message(struct ceph_connection *con)
        u32 crc;
 
        con_out_kvec_reset(con);
-       con->out_kvec_is_msg = true;
        con->out_msg_done = false;
 
        /* Sneak an ack in there first?  If we can get it into the same
@@ -1265,18 +1295,19 @@ static void prepare_write_message(struct ceph_connection *con)
 
        /* tag + hdr + front + middle */
        con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
-       con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
+       con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr);
        con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
 
        if (m->middle)
                con_out_kvec_add(con, m->middle->vec.iov_len,
                        m->middle->vec.iov_base);
 
-       /* fill in crc (except data pages), footer */
+       /* fill in hdr crc and finalize hdr */
        crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
        con->out_msg->hdr.crc = cpu_to_le32(crc);
-       con->out_msg->footer.flags = 0;
+       memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr));
 
+       /* fill in front and middle crc, footer */
        crc = crc32c(0, m->front.iov_base, m->front.iov_len);
        con->out_msg->footer.front_crc = cpu_to_le32(crc);
        if (m->middle) {
@@ -1288,6 +1319,7 @@ static void prepare_write_message(struct ceph_connection *con)
        dout("%s front_crc %u middle_crc %u\n", __func__,
             le32_to_cpu(con->out_msg->footer.front_crc),
             le32_to_cpu(con->out_msg->footer.middle_crc));
+       con->out_msg->footer.flags = 0;
 
        /* is there a data payload? */
        con->out_msg->footer.data_crc = 0;
@@ -1492,7 +1524,6 @@ static int write_partial_kvec(struct ceph_connection *con)
                }
        }
        con->out_kvec_left = 0;
-       con->out_kvec_is_msg = false;
        ret = 1;
 out:
        dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
@@ -1584,6 +1615,7 @@ static int write_partial_skip(struct ceph_connection *con)
 {
        int ret;
 
+       dout("%s %p %d left\n", __func__, con, con->out_skip);
        while (con->out_skip > 0) {
                size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
 
@@ -2313,9 +2345,9 @@ static int read_partial_message(struct ceph_connection *con)
                        ceph_pr_addr(&con->peer_addr.in_addr),
                        seq, con->in_seq + 1);
                con->in_base_pos = -front_len - middle_len - data_len -
-                       sizeof(m->footer);
+                       sizeof_footer(con);
                con->in_tag = CEPH_MSGR_TAG_READY;
-               return 0;
+               return 1;
        } else if ((s64)seq - (s64)con->in_seq > 1) {
                pr_err("read_partial_message bad seq %lld expected %lld\n",
                       seq, con->in_seq + 1);
@@ -2338,10 +2370,10 @@ static int read_partial_message(struct ceph_connection *con)
                        /* skip this message */
                        dout("alloc_msg said skip message\n");
                        con->in_base_pos = -front_len - middle_len - data_len -
-                               sizeof(m->footer);
+                               sizeof_footer(con);
                        con->in_tag = CEPH_MSGR_TAG_READY;
                        con->in_seq++;
-                       return 0;
+                       return 1;
                }
 
                BUG_ON(!con->in_msg);
@@ -2506,13 +2538,13 @@ more:
 
 more_kvec:
        /* kvec data queued? */
-       if (con->out_skip) {
-               ret = write_partial_skip(con);
+       if (con->out_kvec_left) {
+               ret = write_partial_kvec(con);
                if (ret <= 0)
                        goto out;
        }
-       if (con->out_kvec_left) {
-               ret = write_partial_kvec(con);
+       if (con->out_skip) {
+               ret = write_partial_skip(con);
                if (ret <= 0)
                        goto out;
        }
@@ -3050,16 +3082,31 @@ void ceph_msg_revoke(struct ceph_msg *msg)
                ceph_msg_put(msg);
        }
        if (con->out_msg == msg) {
-               dout("%s %p msg %p - was sending\n", __func__, con, msg);
-               con->out_msg = NULL;
-               if (con->out_kvec_is_msg) {
-                       con->out_skip = con->out_kvec_bytes;
-                       con->out_kvec_is_msg = false;
+               BUG_ON(con->out_skip);
+               /* footer */
+               if (con->out_msg_done) {
+                       con->out_skip += con_out_kvec_skip(con);
+               } else {
+                       BUG_ON(!msg->data_length);
+                       if (con->peer_features & CEPH_FEATURE_MSG_AUTH)
+                               con->out_skip += sizeof(msg->footer);
+                       else
+                               con->out_skip += sizeof(msg->old_footer);
                }
+               /* data, middle, front */
+               if (msg->data_length)
+                       con->out_skip += msg->cursor.total_resid;
+               if (msg->middle)
+                       con->out_skip += con_out_kvec_skip(con);
+               con->out_skip += con_out_kvec_skip(con);
+
+               dout("%s %p msg %p - was sending, will write %d skip %d\n",
+                    __func__, con, msg, con->out_kvec_bytes, con->out_skip);
                msg->hdr.seq = 0;
-
+               con->out_msg = NULL;
                ceph_msg_put(msg);
        }
+
        mutex_unlock(&con->mutex);
 }
 
index f8f235930d887adf1df94314b18c719638328df0..a28e47ff1b1b3496f753ae1aaf0ede6032a75b7b 100644 (file)
@@ -2843,8 +2843,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
        mutex_lock(&osdc->request_mutex);
        req = __lookup_request(osdc, tid);
        if (!req) {
-               pr_warn("%s osd%d tid %llu unknown, skipping\n",
-                       __func__, osd->o_osd, tid);
+               dout("%s osd%d tid %llu unknown, skipping\n", __func__,
+                    osd->o_osd, tid);
                m = NULL;
                *skip = 1;
                goto out;
index 7f00f24397700647d9e2be6ddd90482d65388971..9efbdb3ff78a0098b83f60e0c626da933035e983 100644 (file)
@@ -4145,6 +4145,7 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
 
                diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
                diffs |= p->vlan_tci ^ skb->vlan_tci;
+               diffs |= skb_metadata_dst_cmp(p, skb);
                if (maclen == ETH_HLEN)
                        diffs |= compare_ether_header(skb_mac_header(p),
                                                      skb_mac_header(skb));
@@ -4342,10 +4343,12 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
                break;
 
        case GRO_MERGED_FREE:
-               if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+               if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
+                       skb_dst_drop(skb);
                        kmem_cache_free(skbuff_head_cache, skb);
-               else
+               } else {
                        __kfree_skb(skb);
+               }
                break;
 
        case GRO_HELD:
@@ -7125,8 +7128,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
        dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
        setup(dev);
 
-       if (!dev->tx_queue_len)
+       if (!dev->tx_queue_len) {
                dev->priv_flags |= IFF_NO_QUEUE;
+               dev->tx_queue_len = 1;
+       }
 
        dev->num_tx_queues = txqs;
        dev->real_num_tx_queues = txqs;
index d79699c9d1b9eb9f250254e360b2d5a7b4ff6e34..12e7003320107dbb2b86f4fa1cfd20a8d59e8ed5 100644 (file)
@@ -208,7 +208,6 @@ ip:
        case htons(ETH_P_IPV6): {
                const struct ipv6hdr *iph;
                struct ipv6hdr _iph;
-               __be32 flow_label;
 
 ipv6:
                iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
@@ -230,8 +229,12 @@ ipv6:
                        key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
                }
 
-               flow_label = ip6_flowlabel(iph);
-               if (flow_label) {
+               if ((dissector_uses_key(flow_dissector,
+                                       FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
+                    (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
+                   ip6_flowlabel(iph)) {
+                       __be32 flow_label = ip6_flowlabel(iph);
+
                        if (dissector_uses_key(flow_dissector,
                                               FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
                                key_tags = skb_flow_dissector_target(flow_dissector,
@@ -396,6 +399,13 @@ ip_proto_again:
                                goto out_bad;
                        proto = eth->h_proto;
                        nhoff += sizeof(*eth);
+
+                       /* Cap headers that we access via pointers at the
+                        * end of the Ethernet header as our maximum alignment
+                        * at that point is only 2 bytes.
+                        */
+                       if (NET_IP_ALIGN)
+                               hlen = nhoff;
                }
 
                key_control->flags |= FLOW_DIS_ENCAPSULATION;
index 8a1741b14302bd0cecdc265848feba8222400d17..dce0acb929f169774c93c42e0a97c8df656ad342 100644 (file)
@@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
                *fplp = fpl;
                fpl->count = 0;
                fpl->max = SCM_MAX_FD;
+               fpl->user = NULL;
        }
        fpp = &fpl->fp[fpl->count];
 
@@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
                *fpp++ = file;
                fpl->count++;
        }
+
+       if (!fpl->user)
+               fpl->user = get_uid(current_user());
+
        return num;
 }
 
@@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm)
                scm->fp = NULL;
                for (i=fpl->count-1; i>=0; i--)
                        fput(fpl->fp[i]);
+               free_uid(fpl->user);
                kfree(fpl);
        }
 }
@@ -336,6 +342,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
                for (i = 0; i < fpl->count; i++)
                        get_file(fpl->fp[i]);
                new_fpl->max = new_fpl->count;
+               new_fpl->user = get_uid(fpl->user);
        }
        return new_fpl;
 }
index b2df375ec9c2173a8132b8efa1c3062f0510284b..5bf88f58bee7405ff65f80487a64339b92a91bcb 100644 (file)
@@ -79,6 +79,8 @@
 
 struct kmem_cache *skbuff_head_cache __read_mostly;
 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
+int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
+EXPORT_SYMBOL(sysctl_max_skb_frags);
 
 /**
  *     skb_panic - private function for out-of-line support
index 95b6139d710c46825d1e43f825188d81fcb70f60..a6beb7b6ae556dff501413d6661d9c4655502e36 100644 (file)
@@ -26,6 +26,7 @@ static int zero = 0;
 static int one = 1;
 static int min_sndbuf = SOCK_MIN_SNDBUF;
 static int min_rcvbuf = SOCK_MIN_RCVBUF;
+static int max_skb_frags = MAX_SKB_FRAGS;
 
 static int net_msg_warn;       /* Unused, but still a sysctl */
 
@@ -392,6 +393,15 @@ static struct ctl_table net_core_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "max_skb_frags",
+               .data           = &sysctl_max_skb_frags,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &one,
+               .extra2         = &max_skb_frags,
+       },
        { }
 };
 
index 5684e14932bd47e97b9d547307bfc50230e7be7d..902d606324a04ff81adbdc301a1ed58baa3f61f9 100644 (file)
@@ -824,26 +824,26 @@ lookup:
 
        if (sk->sk_state == DCCP_NEW_SYN_RECV) {
                struct request_sock *req = inet_reqsk(sk);
-               struct sock *nsk = NULL;
+               struct sock *nsk;
 
                sk = req->rsk_listener;
-               if (likely(sk->sk_state == DCCP_LISTEN)) {
-                       nsk = dccp_check_req(sk, skb, req);
-               } else {
+               if (unlikely(sk->sk_state != DCCP_LISTEN)) {
                        inet_csk_reqsk_queue_drop_and_put(sk, req);
                        goto lookup;
                }
+               sock_hold(sk);
+               nsk = dccp_check_req(sk, skb, req);
                if (!nsk) {
                        reqsk_put(req);
-                       goto discard_it;
+                       goto discard_and_relse;
                }
                if (nsk == sk) {
-                       sock_hold(sk);
                        reqsk_put(req);
                } else if (dccp_child_process(sk, nsk, skb)) {
                        dccp_v4_ctl_send_reset(sk, skb);
-                       goto discard_it;
+                       goto discard_and_relse;
                } else {
+                       sock_put(sk);
                        return 0;
                }
        }
index 9c6d0508e63a2ab7f13105cd91ea8c027c4a7557..b8608b71a66d34894722c17121754c403c74d946 100644 (file)
@@ -691,26 +691,26 @@ lookup:
 
        if (sk->sk_state == DCCP_NEW_SYN_RECV) {
                struct request_sock *req = inet_reqsk(sk);
-               struct sock *nsk = NULL;
+               struct sock *nsk;
 
                sk = req->rsk_listener;
-               if (likely(sk->sk_state == DCCP_LISTEN)) {
-                       nsk = dccp_check_req(sk, skb, req);
-               } else {
+               if (unlikely(sk->sk_state != DCCP_LISTEN)) {
                        inet_csk_reqsk_queue_drop_and_put(sk, req);
                        goto lookup;
                }
+               sock_hold(sk);
+               nsk = dccp_check_req(sk, skb, req);
                if (!nsk) {
                        reqsk_put(req);
-                       goto discard_it;
+                       goto discard_and_relse;
                }
                if (nsk == sk) {
-                       sock_hold(sk);
                        reqsk_put(req);
                } else if (dccp_child_process(sk, nsk, skb)) {
                        dccp_v6_ctl_send_reset(sk, skb);
-                       goto discard_it;
+                       goto discard_and_relse;
                } else {
+                       sock_put(sk);
                        return 0;
                }
        }
index cebd9d31e65a4a7539cab0bef71887736bc188f7..f6303b17546b3535d035b7f662cf43e2b2d59280 100644 (file)
@@ -1847,7 +1847,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
        if (err < 0)
                goto errout;
 
-       err = EINVAL;
+       err = -EINVAL;
        if (!tb[NETCONFA_IFINDEX])
                goto errout;
 
index 46b9c887bede0568ec378d3b809dda8fa463a166..64148914803a8443ecc0de2a45c141ae72cc0258 100644 (file)
@@ -789,14 +789,16 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
        reqsk_put(req);
 }
 
-void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
-                             struct sock *child)
+struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
+                                     struct request_sock *req,
+                                     struct sock *child)
 {
        struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
 
        spin_lock(&queue->rskq_lock);
        if (unlikely(sk->sk_state != TCP_LISTEN)) {
                inet_child_forget(sk, req, child);
+               child = NULL;
        } else {
                req->sk = child;
                req->dl_next = NULL;
@@ -808,6 +810,7 @@ void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
                sk_acceptq_added(sk);
        }
        spin_unlock(&queue->rskq_lock);
+       return child;
 }
 EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
 
@@ -817,11 +820,8 @@ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
        if (own_req) {
                inet_csk_reqsk_queue_drop(sk, req);
                reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
-               inet_csk_reqsk_queue_add(sk, req, child);
-               /* Warning: caller must not call reqsk_put(req);
-                * child stole last reference on it.
-                */
-               return child;
+               if (inet_csk_reqsk_queue_add(sk, req, child))
+                       return child;
        }
        /* Too bad, another child took ownership of the request, undo. */
        bh_unlock_sock(child);
index 1fe55ae817818abf9f10fa4c06a222476d3d3c45..b8a0607dab967fb337f1beb059664b7533780f98 100644 (file)
@@ -661,6 +661,7 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
        struct ipq *qp;
 
        IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
+       skb_orphan(skb);
 
        /* Lookup (or create) queue header */
        qp = ip_find(net, ip_hdr(skb), user, vif);
index 5f73a7c03e27d334c771f144825c4a2f718d71ba..a50124260f5a4aaa98a3e4a582dbcbdbc236e370 100644 (file)
@@ -249,6 +249,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
                switch (cmsg->cmsg_type) {
                case IP_RETOPTS:
                        err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
+
+                       /* Our caller is responsible for freeing ipc->opt */
                        err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
                                             err < 40 ? err : 40);
                        if (err)
index 6fb869f646bf7a15b2f012cc1982c2a9f3d5935f..a04dee536b8ef8b5f4c1a085563d5d37b6fa118b 100644 (file)
@@ -27,8 +27,6 @@ static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb,
 {
        int err;
 
-       skb_orphan(skb);
-
        local_bh_disable();
        err = ip_defrag(net, skb, user);
        local_bh_enable();
index e89094ab5ddb8ce2b6eb2d78a9a9046b42287bd5..aa67e0e64b69dad791056706e986b4d0f15bbb88 100644 (file)
@@ -746,8 +746,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 
        if (msg->msg_controllen) {
                err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
-               if (err)
+               if (unlikely(err)) {
+                       kfree(ipc.opt);
                        return err;
+               }
                if (ipc.opt)
                        free = 1;
        }
index bc35f1842512bef8e4d87e76542d7bf11f8946fa..7113bae4e6a0c02726e0e11c33415b6779b7d04b 100644 (file)
@@ -547,8 +547,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 
        if (msg->msg_controllen) {
                err = ip_cmsg_send(net, msg, &ipc, false);
-               if (err)
+               if (unlikely(err)) {
+                       kfree(ipc.opt);
                        goto out;
+               }
                if (ipc.opt)
                        free = 1;
        }
index 85f184e429c63c8c426f58c779dc3544cf2e2a54..02c62299d717b9f6c38a5227e3b3ae376e0015b6 100644 (file)
@@ -129,6 +129,7 @@ static int ip_rt_mtu_expires __read_mostly  = 10 * 60 * HZ;
 static int ip_rt_min_pmtu __read_mostly                = 512 + 20 + 20;
 static int ip_rt_min_advmss __read_mostly      = 256;
 
+static int ip_rt_gc_timeout __read_mostly      = RT_GC_TIMEOUT;
 /*
  *     Interface to generic destination cache.
  */
@@ -755,7 +756,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
                                struct fib_nh *nh = &FIB_RES_NH(res);
 
                                update_or_create_fnhe(nh, fl4->daddr, new_gw,
-                                                     0, 0);
+                                               0, jiffies + ip_rt_gc_timeout);
                        }
                        if (kill_route)
                                rt->dst.obsolete = DST_OBSOLETE_KILL;
@@ -1556,6 +1557,36 @@ static void ip_handle_martian_source(struct net_device *dev,
 #endif
 }
 
+static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
+{
+       struct fnhe_hash_bucket *hash;
+       struct fib_nh_exception *fnhe, __rcu **fnhe_p;
+       u32 hval = fnhe_hashfun(daddr);
+
+       spin_lock_bh(&fnhe_lock);
+
+       hash = rcu_dereference_protected(nh->nh_exceptions,
+                                        lockdep_is_held(&fnhe_lock));
+       hash += hval;
+
+       fnhe_p = &hash->chain;
+       fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
+       while (fnhe) {
+               if (fnhe->fnhe_daddr == daddr) {
+                       rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
+                               fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
+                       fnhe_flush_routes(fnhe);
+                       kfree_rcu(fnhe, rcu);
+                       break;
+               }
+               fnhe_p = &fnhe->fnhe_next;
+               fnhe = rcu_dereference_protected(fnhe->fnhe_next,
+                                                lockdep_is_held(&fnhe_lock));
+       }
+
+       spin_unlock_bh(&fnhe_lock);
+}
+
 /* called in rcu_read_lock() section */
 static int __mkroute_input(struct sk_buff *skb,
                           const struct fib_result *res,
@@ -1609,11 +1640,20 @@ static int __mkroute_input(struct sk_buff *skb,
 
        fnhe = find_exception(&FIB_RES_NH(*res), daddr);
        if (do_cache) {
-               if (fnhe)
+               if (fnhe) {
                        rth = rcu_dereference(fnhe->fnhe_rth_input);
-               else
-                       rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
+                       if (rth && rth->dst.expires &&
+                           time_after(jiffies, rth->dst.expires)) {
+                               ip_del_fnhe(&FIB_RES_NH(*res), daddr);
+                               fnhe = NULL;
+                       } else {
+                               goto rt_cache;
+                       }
+               }
+
+               rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
 
+rt_cache:
                if (rt_cache_valid(rth)) {
                        skb_dst_set_noref(skb, &rth->dst);
                        goto out;
@@ -2014,19 +2054,29 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
                struct fib_nh *nh = &FIB_RES_NH(*res);
 
                fnhe = find_exception(nh, fl4->daddr);
-               if (fnhe)
+               if (fnhe) {
                        prth = &fnhe->fnhe_rth_output;
-               else {
-                       if (unlikely(fl4->flowi4_flags &
-                                    FLOWI_FLAG_KNOWN_NH &&
-                                    !(nh->nh_gw &&
-                                      nh->nh_scope == RT_SCOPE_LINK))) {
-                               do_cache = false;
-                               goto add;
+                       rth = rcu_dereference(*prth);
+                       if (rth && rth->dst.expires &&
+                           time_after(jiffies, rth->dst.expires)) {
+                               ip_del_fnhe(nh, fl4->daddr);
+                               fnhe = NULL;
+                       } else {
+                               goto rt_cache;
                        }
-                       prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
                }
+
+               if (unlikely(fl4->flowi4_flags &
+                            FLOWI_FLAG_KNOWN_NH &&
+                            !(nh->nh_gw &&
+                              nh->nh_scope == RT_SCOPE_LINK))) {
+                       do_cache = false;
+                       goto add;
+               }
+               prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
                rth = rcu_dereference(*prth);
+
+rt_cache:
                if (rt_cache_valid(rth)) {
                        dst_hold(&rth->dst);
                        return rth;
@@ -2569,7 +2619,6 @@ void ip_rt_multicast_event(struct in_device *in_dev)
 }
 
 #ifdef CONFIG_SYSCTL
-static int ip_rt_gc_timeout __read_mostly      = RT_GC_TIMEOUT;
 static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
 static int ip_rt_gc_elasticity __read_mostly   = 8;
index c82cca18c90fbd67c2daf71c6769ee5fef21d2a9..036a76ba2ac293bcf6e33655cbc7508e74f7cf8f 100644 (file)
 
 #include <asm/uaccess.h>
 #include <asm/ioctls.h>
+#include <asm/unaligned.h>
 #include <net/busy_poll.h>
 
 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
@@ -938,7 +939,7 @@ new_segment:
 
                i = skb_shinfo(skb)->nr_frags;
                can_coalesce = skb_can_coalesce(skb, i, page, offset);
-               if (!can_coalesce && i >= MAX_SKB_FRAGS) {
+               if (!can_coalesce && i >= sysctl_max_skb_frags) {
                        tcp_mark_push(tp, skb);
                        goto new_segment;
                }
@@ -1211,7 +1212,7 @@ new_segment:
 
                        if (!skb_can_coalesce(skb, i, pfrag->page,
                                              pfrag->offset)) {
-                               if (i == MAX_SKB_FRAGS || !sg) {
+                               if (i == sysctl_max_skb_frags || !sg) {
                                        tcp_mark_push(tp, skb);
                                        goto new_segment;
                                }
@@ -2637,6 +2638,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
        const struct inet_connection_sock *icsk = inet_csk(sk);
        u32 now = tcp_time_stamp;
        unsigned int start;
+       u64 rate64;
        u32 rate;
 
        memset(info, 0, sizeof(*info));
@@ -2702,15 +2704,17 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
        info->tcpi_total_retrans = tp->total_retrans;
 
        rate = READ_ONCE(sk->sk_pacing_rate);
-       info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL;
+       rate64 = rate != ~0U ? rate : ~0ULL;
+       put_unaligned(rate64, &info->tcpi_pacing_rate);
 
        rate = READ_ONCE(sk->sk_max_pacing_rate);
-       info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
+       rate64 = rate != ~0U ? rate : ~0ULL;
+       put_unaligned(rate64, &info->tcpi_max_pacing_rate);
 
        do {
                start = u64_stats_fetch_begin_irq(&tp->syncp);
-               info->tcpi_bytes_acked = tp->bytes_acked;
-               info->tcpi_bytes_received = tp->bytes_received;
+               put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked);
+               put_unaligned(tp->bytes_received, &info->tcpi_bytes_received);
        } while (u64_stats_fetch_retry_irq(&tp->syncp, start));
        info->tcpi_segs_out = tp->segs_out;
        info->tcpi_segs_in = tp->segs_in;
index d8841a2f15691fd80c46fbaa62e2431e9ca16003..8c7e63163e9258ae328f02271100055274339969 100644 (file)
@@ -312,7 +312,7 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk)
 
 
 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
-void tcp_req_err(struct sock *sk, u32 seq)
+void tcp_req_err(struct sock *sk, u32 seq, bool abort)
 {
        struct request_sock *req = inet_reqsk(sk);
        struct net *net = sock_net(sk);
@@ -324,7 +324,7 @@ void tcp_req_err(struct sock *sk, u32 seq)
 
        if (seq != tcp_rsk(req)->snt_isn) {
                NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
-       } else {
+       } else if (abort) {
                /*
                 * Still in SYN_RECV, just remove it silently.
                 * There is no good way to pass the error to the newly
@@ -384,7 +384,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
        }
        seq = ntohl(th->seq);
        if (sk->sk_state == TCP_NEW_SYN_RECV)
-               return tcp_req_err(sk, seq);
+               return tcp_req_err(sk, seq,
+                                 type == ICMP_PARAMETERPROB ||
+                                 type == ICMP_TIME_EXCEEDED ||
+                                 (type == ICMP_DEST_UNREACH &&
+                                  (code == ICMP_NET_UNREACH ||
+                                   code == ICMP_HOST_UNREACH)));
 
        bh_lock_sock(sk);
        /* If too many ICMPs get dropped on busy
@@ -705,7 +710,8 @@ release_sk1:
    outside socket context is ugly, certainly. What can I do?
  */
 
-static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
+static void tcp_v4_send_ack(struct net *net,
+                           struct sk_buff *skb, u32 seq, u32 ack,
                            u32 win, u32 tsval, u32 tsecr, int oif,
                            struct tcp_md5sig_key *key,
                            int reply_flags, u8 tos)
@@ -720,7 +726,6 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
                        ];
        } rep;
        struct ip_reply_arg arg;
-       struct net *net = dev_net(skb_dst(skb)->dev);
 
        memset(&rep.th, 0, sizeof(struct tcphdr));
        memset(&arg, 0, sizeof(arg));
@@ -782,7 +787,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
        struct inet_timewait_sock *tw = inet_twsk(sk);
        struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 
-       tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+       tcp_v4_send_ack(sock_net(sk), skb,
+                       tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
                        tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
                        tcp_time_stamp + tcptw->tw_ts_offset,
                        tcptw->tw_ts_recent,
@@ -801,8 +807,10 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
        /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
         * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
         */
-       tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
-                       tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
+       u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
+                                            tcp_sk(sk)->snd_nxt;
+
+       tcp_v4_send_ack(sock_net(sk), skb, seq,
                        tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
                        tcp_time_stamp,
                        req->ts_recent,
@@ -1586,28 +1594,30 @@ process:
 
        if (sk->sk_state == TCP_NEW_SYN_RECV) {
                struct request_sock *req = inet_reqsk(sk);
-               struct sock *nsk = NULL;
+               struct sock *nsk;
 
                sk = req->rsk_listener;
-               if (tcp_v4_inbound_md5_hash(sk, skb))
-                       goto discard_and_relse;
-               if (likely(sk->sk_state == TCP_LISTEN)) {
-                       nsk = tcp_check_req(sk, skb, req, false);
-               } else {
+               if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
+                       reqsk_put(req);
+                       goto discard_it;
+               }
+               if (unlikely(sk->sk_state != TCP_LISTEN)) {
                        inet_csk_reqsk_queue_drop_and_put(sk, req);
                        goto lookup;
                }
+               sock_hold(sk);
+               nsk = tcp_check_req(sk, skb, req, false);
                if (!nsk) {
                        reqsk_put(req);
-                       goto discard_it;
+                       goto discard_and_relse;
                }
                if (nsk == sk) {
-                       sock_hold(sk);
                        reqsk_put(req);
                } else if (tcp_child_process(sk, nsk, skb)) {
                        tcp_v4_send_reset(nsk, skb);
-                       goto discard_it;
+                       goto discard_and_relse;
                } else {
+                       sock_put(sk);
                        return 0;
                }
        }
index c43890848641948b1e9c55244614ac9a48756753..7f8ab46adf616474221805b33dd94b237be48329 100644 (file)
@@ -966,8 +966,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        if (msg->msg_controllen) {
                err = ip_cmsg_send(sock_net(sk), msg, &ipc,
                                   sk->sk_family == AF_INET6);
-               if (err)
+               if (unlikely(err)) {
+                       kfree(ipc.opt);
                        return err;
+               }
                if (ipc.opt)
                        free = 1;
                connected = 0;
index 1f21087accab258e5be6cc104f8b8ced13fedd6d..e8d3da0817d353d5d854e9c61e30a65a34f0e4c6 100644 (file)
@@ -583,7 +583,7 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
        if (err < 0)
                goto errout;
 
-       err = EINVAL;
+       err = -EINVAL;
        if (!tb[NETCONFA_IFINDEX])
                goto errout;
 
@@ -3506,6 +3506,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
 {
        struct inet6_dev *idev = ifp->idev;
        struct net_device *dev = idev->dev;
+       bool notify = false;
 
        addrconf_join_solict(dev, &ifp->addr);
 
@@ -3551,7 +3552,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
                        /* Because optimistic nodes can use this address,
                         * notify listeners. If DAD fails, RTM_DELADDR is sent.
                         */
-                       ipv6_ifa_notify(RTM_NEWADDR, ifp);
+                       notify = true;
                }
        }
 
@@ -3559,6 +3560,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
 out:
        spin_unlock(&ifp->lock);
        read_unlock_bh(&idev->lock);
+       if (notify)
+               ipv6_ifa_notify(RTM_NEWADDR, ifp);
 }
 
 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
index 517c55b01ba84b55a0004ce9505d14c4a3951cfc..428162155280ca2af782ea9fd9fa26e0d1666d89 100644 (file)
@@ -162,6 +162,9 @@ ipv4_connected:
        fl6.fl6_dport = inet->inet_dport;
        fl6.fl6_sport = inet->inet_sport;
 
+       if (!fl6.flowi6_oif)
+               fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
+
        if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
                fl6.flowi6_oif = np->mcast_oif;
 
index 1f9ebe3cbb4ac042edd0b05754d1fc03cdfe73cd..dc2db4f7b182c4ebc1a8a51487a3d2e893955df9 100644 (file)
@@ -540,12 +540,13 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
                }
                spin_lock_bh(&ip6_sk_fl_lock);
                for (sflp = &np->ipv6_fl_list;
-                    (sfl = rcu_dereference(*sflp)) != NULL;
+                    (sfl = rcu_dereference_protected(*sflp,
+                                                     lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
                     sflp = &sfl->next) {
                        if (sfl->fl->label == freq.flr_label) {
                                if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
                                        np->flow_label &= ~IPV6_FLOWLABEL_MASK;
-                               *sflp = rcu_dereference(sfl->next);
+                               *sflp = sfl->next;
                                spin_unlock_bh(&ip6_sk_fl_lock);
                                fl_release(sfl->fl);
                                kfree_rcu(sfl, rcu);
index 6473889f1736f2e6b6dadc9a44367d17f4dc0b23..31144c486c521d98fca1e9099eb9801ae3e31854 100644 (file)
@@ -909,6 +909,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
        struct rt6_info *rt;
 #endif
        int err;
+       int flags = 0;
 
        /* The correct way to handle this would be to do
         * ip6_route_get_saddr, and then ip6_route_output; however,
@@ -940,10 +941,13 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
                        dst_release(*dst);
                        *dst = NULL;
                }
+
+               if (fl6->flowi6_oif)
+                       flags |= RT6_LOOKUP_F_IFACE;
        }
 
        if (!*dst)
-               *dst = ip6_route_output(net, sk, fl6);
+               *dst = ip6_route_output_flags(net, sk, fl6, flags);
 
        err = (*dst)->error;
        if (err)
index 826e6aa44f8d42c9c2e815ee134a4f58f1f29c80..3f164d3aaee2eaa7aa246d6c100ef405fd3016e0 100644 (file)
@@ -1174,11 +1174,10 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table
        return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
 }
 
-struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
-                                   struct flowi6 *fl6)
+struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
+                                        struct flowi6 *fl6, int flags)
 {
        struct dst_entry *dst;
-       int flags = 0;
        bool any_src;
 
        dst = l3mdev_rt6_dst_by_oif(net, fl6);
@@ -1199,7 +1198,7 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
 
        return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
 }
-EXPORT_SYMBOL(ip6_route_output);
+EXPORT_SYMBOL_GPL(ip6_route_output_flags);
 
 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
 {
index bd100b47c7171ce1d85ea5a437c13081a60cd29c..b8d405623f4f07f8f50fdb590655019cb1cadd8a 100644 (file)
@@ -328,6 +328,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        struct tcp_sock *tp;
        __u32 seq, snd_una;
        struct sock *sk;
+       bool fatal;
        int err;
 
        sk = __inet6_lookup_established(net, &tcp_hashinfo,
@@ -346,8 +347,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                return;
        }
        seq = ntohl(th->seq);
+       fatal = icmpv6_err_convert(type, code, &err);
        if (sk->sk_state == TCP_NEW_SYN_RECV)
-               return tcp_req_err(sk, seq);
+               return tcp_req_err(sk, seq, fatal);
 
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
@@ -401,7 +403,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                goto out;
        }
 
-       icmpv6_err_convert(type, code, &err);
 
        /* Might be for an request_sock */
        switch (sk->sk_state) {
@@ -1387,7 +1388,7 @@ process:
 
        if (sk->sk_state == TCP_NEW_SYN_RECV) {
                struct request_sock *req = inet_reqsk(sk);
-               struct sock *nsk = NULL;
+               struct sock *nsk;
 
                sk = req->rsk_listener;
                tcp_v6_fill_cb(skb, hdr, th);
@@ -1395,24 +1396,24 @@ process:
                        reqsk_put(req);
                        goto discard_it;
                }
-               if (likely(sk->sk_state == TCP_LISTEN)) {
-                       nsk = tcp_check_req(sk, skb, req, false);
-               } else {
+               if (unlikely(sk->sk_state != TCP_LISTEN)) {
                        inet_csk_reqsk_queue_drop_and_put(sk, req);
                        goto lookup;
                }
+               sock_hold(sk);
+               nsk = tcp_check_req(sk, skb, req, false);
                if (!nsk) {
                        reqsk_put(req);
-                       goto discard_it;
+                       goto discard_and_relse;
                }
                if (nsk == sk) {
-                       sock_hold(sk);
                        reqsk_put(req);
                        tcp_v6_restore_cb(skb);
                } else if (tcp_child_process(sk, nsk, skb)) {
                        tcp_v6_send_reset(nsk, skb);
-                       goto discard_it;
+                       goto discard_and_relse;
                } else {
+                       sock_put(sk);
                        return 0;
                }
        }
index 435608c4306d4afccf690eda945d4cb7eb962c6b..20ab7b2ec4632a3f9b271f05cb71aceff764505c 100644 (file)
@@ -708,6 +708,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
        if (!addr || addr->sa_family != AF_IUCV)
                return -EINVAL;
 
+       if (addr_len < sizeof(struct sockaddr_iucv))
+               return -EINVAL;
+
        lock_sock(sk);
        if (sk->sk_state != IUCV_OPEN) {
                err = -EBADFD;
index f93c5be612a7cb43611708ebb66c25b4db0d27cf..2caaa84ce92dac811c7813ebbb233c9596d03ca0 100644 (file)
@@ -124,8 +124,13 @@ static int l2tp_tunnel_notify(struct genl_family *family,
        ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
                                  NLM_F_ACK, tunnel, cmd);
 
-       if (ret >= 0)
-               return genlmsg_multicast_allns(family, msg, 0,  0, GFP_ATOMIC);
+       if (ret >= 0) {
+               ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
+               /* We don't care if no one is listening */
+               if (ret == -ESRCH)
+                       ret = 0;
+               return ret;
+       }
 
        nlmsg_free(msg);
 
@@ -147,8 +152,13 @@ static int l2tp_session_notify(struct genl_family *family,
        ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
                                   NLM_F_ACK, session, cmd);
 
-       if (ret >= 0)
-               return genlmsg_multicast_allns(family, msg, 0,  0, GFP_ATOMIC);
+       if (ret >= 0) {
+               ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
+               /* We don't care if no one is listening */
+               if (ret == -ESRCH)
+                       ret = 0;
+               return ret;
+       }
 
        nlmsg_free(msg);
 
index 337bb5d7800384b8acd52664951be429668e3f3c..6a12b0f5cac871e7906b7fee7a5da868b21d25c7 100644 (file)
@@ -1732,7 +1732,6 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
                if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
                        continue;
                sdata->u.ibss.last_scan_completed = jiffies;
-               ieee80211_queue_work(&local->hw, &sdata->work);
        }
        mutex_unlock(&local->iflist_mtx);
 }
index fa28500f28fd9fd8d452602b0407741589f12e36..6f85b6ab8e51015ab9a0520822fc4daff6af92b0 100644 (file)
@@ -1370,17 +1370,6 @@ out:
        sdata_unlock(sdata);
 }
 
-void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
-{
-       struct ieee80211_sub_if_data *sdata;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(sdata, &local->interfaces, list)
-               if (ieee80211_vif_is_mesh(&sdata->vif) &&
-                   ieee80211_sdata_running(sdata))
-                       ieee80211_queue_work(&local->hw, &sdata->work);
-       rcu_read_unlock();
-}
 
 void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
 {
index a1596344c3ba1c5fe590767f3c0483310d63121a..4a8019f79fb2cba035a316b0a8d56a7e8f25d614 100644 (file)
@@ -362,14 +362,10 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
        return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP;
 }
 
-void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
-
 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
 void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
 void ieee80211s_stop(void);
 #else
-static inline void
-ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
 static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
 { return false; }
 static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
index 3aa04344942bfc06c75341abf841646745c58c7f..83097c3832d129228226dbdcc3e2f2d8af188671 100644 (file)
@@ -4003,8 +4003,6 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
                if (!ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
                        ieee80211_queue_work(&sdata->local->hw,
                                             &sdata->u.mgd.monitor_work);
-               /* and do all the other regular work too */
-               ieee80211_queue_work(&sdata->local->hw, &sdata->work);
        }
 }
 
index a413e52f7691418116d928f684a0fc27b308c2d6..acbe182b75d1510473ba73ba664b63122f895195 100644 (file)
@@ -314,6 +314,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
        bool was_scanning = local->scanning;
        struct cfg80211_scan_request *scan_req;
        struct ieee80211_sub_if_data *scan_sdata;
+       struct ieee80211_sub_if_data *sdata;
 
        lockdep_assert_held(&local->mtx);
 
@@ -373,7 +374,16 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
 
        ieee80211_mlme_notify_scan_completed(local);
        ieee80211_ibss_notify_scan_completed(local);
-       ieee80211_mesh_notify_scan_completed(local);
+
+       /* Requeue all the work that might have been ignored while
+        * the scan was in progress; if there was none this will
+        * just be a no-op for the particular interface.
+        */
+       list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+               if (ieee80211_sdata_running(sdata))
+                       ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+       }
+
        if (was_scanning)
                ieee80211_start_next_roc(local);
 }
index 1605691d94144aee0fc50ffb17be05eca2b59675..d933cb89efac18651174ea34ae2cb3d6f23a1701 100644 (file)
@@ -90,7 +90,7 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
        int err;
        struct vxlan_config conf = {
                .no_share = true,
-               .flags = VXLAN_F_COLLECT_METADATA,
+               .flags = VXLAN_F_COLLECT_METADATA | VXLAN_F_UDP_ZERO_CSUM6_RX,
        };
 
        if (!options) {
index f53bf3b6558b094b6e1b379568620157b4ffae17..cf5b69ab18294bb2fa36a00f2a19dbf02d0f3045 100644 (file)
@@ -1095,17 +1095,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
        return res;
 }
 
-static bool rfkill_readable(struct rfkill_data *data)
-{
-       bool r;
-
-       mutex_lock(&data->mtx);
-       r = !list_empty(&data->events);
-       mutex_unlock(&data->mtx);
-
-       return r;
-}
-
 static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
                               size_t count, loff_t *pos)
 {
@@ -1122,8 +1111,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
                        goto out;
                }
                mutex_unlock(&data->mtx);
+               /* since we re-check and it just compares pointers,
+                * using !list_empty() without locking isn't a problem
+                */
                ret = wait_event_interruptible(data->read_wait,
-                                              rfkill_readable(data));
+                                              !list_empty(&data->events));
                mutex_lock(&data->mtx);
 
                if (ret)
index b5c2cf2aa6d4bc00e5c4bac2b3326db3d785c8be..af1acf0098666f3ffa8bc5bd168525f550478fef 100644 (file)
@@ -1852,6 +1852,7 @@ reset:
        }
 
        tp = old_tp;
+       protocol = tc_skb_protocol(skb);
        goto reclassify;
 #endif
 }
index 3d9ea9a48289af7324d2c96d82938dd978634ce5..8b4ff315695ed1c9a9a24defab6911f787ae7201 100644 (file)
@@ -60,6 +60,8 @@
 #include <net/inet_common.h>
 #include <net/inet_ecn.h>
 
+#define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
+
 /* Global data structures. */
 struct sctp_globals sctp_globals __read_mostly;
 
@@ -1352,6 +1354,8 @@ static __init int sctp_init(void)
        unsigned long limit;
        int max_share;
        int order;
+       int num_entries;
+       int max_entry_order;
 
        sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
 
@@ -1404,14 +1408,24 @@ static __init int sctp_init(void)
 
        /* Size and allocate the association hash table.
         * The methodology is similar to that of the tcp hash tables.
+        * Though not identical.  Start by getting a goal size
         */
        if (totalram_pages >= (128 * 1024))
                goal = totalram_pages >> (22 - PAGE_SHIFT);
        else
                goal = totalram_pages >> (24 - PAGE_SHIFT);
 
-       for (order = 0; (1UL << order) < goal; order++)
-               ;
+       /* Then compute the page order for said goal */
+       order = get_order(goal);
+
+       /* Now compute the required page order for the maximum sized table we
+        * want to create
+        */
+       max_entry_order = get_order(MAX_SCTP_PORT_HASH_ENTRIES *
+                                   sizeof(struct sctp_bind_hashbucket));
+
+       /* Limit the page order by that maximum hash table size */
+       order = min(order, max_entry_order);
 
        do {
                sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE /
@@ -1445,20 +1459,35 @@ static __init int sctp_init(void)
                INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
        }
 
-       /* Allocate and initialize the SCTP port hash table.  */
+       /* Allocate and initialize the SCTP port hash table.
+        * Note that order is initalized to start at the max sized
+        * table we want to support.  If we can't get that many pages
+        * reduce the order and try again
+        */
        do {
-               sctp_port_hashsize = (1UL << order) * PAGE_SIZE /
-                                       sizeof(struct sctp_bind_hashbucket);
-               if ((sctp_port_hashsize > (64 * 1024)) && order > 0)
-                       continue;
                sctp_port_hashtable = (struct sctp_bind_hashbucket *)
                        __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order);
        } while (!sctp_port_hashtable && --order > 0);
+
        if (!sctp_port_hashtable) {
                pr_err("Failed bind hash alloc\n");
                status = -ENOMEM;
                goto err_bhash_alloc;
        }
+
+       /* Now compute the number of entries that will fit in the
+        * port hash space we allocated
+        */
+       num_entries = (1UL << order) * PAGE_SIZE /
+                     sizeof(struct sctp_bind_hashbucket);
+
+       /* And finish by rounding it down to the nearest power of two
+        * this wastes some memory of course, but its needed because
+        * the hash function operates based on the assumption that
+        * that the number of entries is a power of two
+        */
+       sctp_port_hashsize = rounddown_pow_of_two(num_entries);
+
        for (i = 0; i < sctp_port_hashsize; i++) {
                spin_lock_init(&sctp_port_hashtable[i].lock);
                INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
index ef1d90fdc773d2ca4104c9dabc96d1058e99629a..be1489fc3234bd2ed0281d50f7e11a2a205d38d1 100644 (file)
@@ -5542,6 +5542,7 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
        struct sctp_hmac_algo_param *hmacs;
        __u16 data_len = 0;
        u32 num_idents;
+       int i;
 
        if (!ep->auth_enable)
                return -EACCES;
@@ -5559,8 +5560,12 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
                return -EFAULT;
        if (put_user(num_idents, &p->shmac_num_idents))
                return -EFAULT;
-       if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
-               return -EFAULT;
+       for (i = 0; i < num_idents; i++) {
+               __u16 hmacid = ntohs(hmacs->hmac_ids[i]);
+
+               if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
+                       return -EFAULT;
+       }
        return 0;
 }
 
@@ -6640,6 +6645,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
 
                        if (cmsgs->srinfo->sinfo_flags &
                            ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
+                             SCTP_SACK_IMMEDIATELY |
                              SCTP_ABORT | SCTP_EOF))
                                return -EINVAL;
                        break;
@@ -6663,6 +6669,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
 
                        if (cmsgs->sinfo->snd_flags &
                            ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
+                             SCTP_SACK_IMMEDIATELY |
                              SCTP_ABORT | SCTP_EOF))
                                return -EINVAL;
                        break;
index 5e4f815c2b34d22fba11ac2443e9d66bc27c779c..21e20353178e05e4940512141ac50fc41d5f4954 100644 (file)
@@ -1225,7 +1225,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
        if (bp[0] == '\\' && bp[1] == 'x') {
                /* HEX STRING */
                bp += 2;
-               while (len < bufsize) {
+               while (len < bufsize - 1) {
                        int h, l;
 
                        h = hex_to_bin(bp[0]);
index f34e535e93bdf780d7093c84e32a6288029d5442..d5d7132ac847307ac076a9fd3320d1ea1425398d 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/list.h>
 #include <linux/workqueue.h>
 #include <linux/if_vlan.h>
+#include <linux/rtnetlink.h>
 #include <net/ip_fib.h>
 #include <net/switchdev.h>
 
@@ -565,7 +566,6 @@ int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
 }
 EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
 
-static DEFINE_MUTEX(switchdev_mutex);
 static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
 
 /**
@@ -580,9 +580,9 @@ int register_switchdev_notifier(struct notifier_block *nb)
 {
        int err;
 
-       mutex_lock(&switchdev_mutex);
+       rtnl_lock();
        err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
-       mutex_unlock(&switchdev_mutex);
+       rtnl_unlock();
        return err;
 }
 EXPORT_SYMBOL_GPL(register_switchdev_notifier);
@@ -598,9 +598,9 @@ int unregister_switchdev_notifier(struct notifier_block *nb)
 {
        int err;
 
-       mutex_lock(&switchdev_mutex);
+       rtnl_lock();
        err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
-       mutex_unlock(&switchdev_mutex);
+       rtnl_unlock();
        return err;
 }
 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
@@ -614,16 +614,17 @@ EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
  *     Call all network notifier blocks. This should be called by driver
  *     when it needs to propagate hardware event.
  *     Return values are same as for atomic_notifier_call_chain().
+ *     rtnl_lock must be held.
  */
 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
                             struct switchdev_notifier_info *info)
 {
        int err;
 
+       ASSERT_RTNL();
+
        info->dev = dev;
-       mutex_lock(&switchdev_mutex);
        err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
-       mutex_unlock(&switchdev_mutex);
        return err;
 }
 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
index 9dc239dfe19211f2a6367f9581bf79527f73af31..92e367a0a5cecc32da7779eedea2e0fc417273b0 100644 (file)
@@ -399,8 +399,10 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
 
        hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
                          NLM_F_MULTI, TIPC_NL_LINK_GET);
-       if (!hdr)
+       if (!hdr) {
+               tipc_bcast_unlock(net);
                return -EMSGSIZE;
+       }
 
        attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
        if (!attrs)
index 20cddec0a43c7227a6e3aff860286ec04f9cd423..3926b561f87311b691d6d7ab3ce5cae8ed4b340d 100644 (file)
@@ -168,12 +168,6 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
        skb_queue_head_init(&n_ptr->bc_entry.inputq1);
        __skb_queue_head_init(&n_ptr->bc_entry.arrvq);
        skb_queue_head_init(&n_ptr->bc_entry.inputq2);
-       hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
-       list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
-               if (n_ptr->addr < temp_node->addr)
-                       break;
-       }
-       list_add_tail_rcu(&n_ptr->list, &temp_node->list);
        n_ptr->state = SELF_DOWN_PEER_LEAVING;
        n_ptr->signature = INVALID_NODE_SIG;
        n_ptr->active_links[0] = INVALID_BEARER_ID;
@@ -193,6 +187,12 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
        tipc_node_get(n_ptr);
        setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
        n_ptr->keepalive_intv = U32_MAX;
+       hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
+       list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+               if (n_ptr->addr < temp_node->addr)
+                       break;
+       }
+       list_add_tail_rcu(&n_ptr->list, &temp_node->list);
 exit:
        spin_unlock_bh(&tn->node_list_lock);
        return n_ptr;
index 350cca33ee0a64b08ce6135aa130011263eba136..69ee2eeef968851192035cb166410f1f37e7722f 100644 (file)
@@ -289,15 +289,14 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid,
                                struct sockaddr_tipc *addr, void *usr_data,
                                void *buf, size_t len)
 {
-       struct tipc_subscriber *subscriber = usr_data;
+       struct tipc_subscriber *subscrb = usr_data;
        struct tipc_subscription *sub = NULL;
        struct tipc_net *tn = net_generic(net, tipc_net_id);
 
-       tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscriber, &sub);
-       if (sub)
-               tipc_nametbl_subscribe(sub);
-       else
-               tipc_conn_terminate(tn->topsrv, subscriber->conid);
+       if (tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscrb, &sub))
+               return tipc_conn_terminate(tn->topsrv, subscrb->conid);
+
+       tipc_nametbl_subscribe(sub);
 }
 
 /* Handle one request to establish a new subscriber */
index e3f85bc8b135d8feb13d60432e5f6cdb1064fbd6..898a53a562b8dff761fbf108888fe5ae28818341 100644 (file)
@@ -1496,7 +1496,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
        UNIXCB(skb).fp = NULL;
 
        for (i = scm->fp->count-1; i >= 0; i--)
-               unix_notinflight(scm->fp->fp[i]);
+               unix_notinflight(scm->fp->user, scm->fp->fp[i]);
 }
 
 static void unix_destruct_scm(struct sk_buff *skb)
@@ -1561,7 +1561,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
                return -ENOMEM;
 
        for (i = scm->fp->count - 1; i >= 0; i--)
-               unix_inflight(scm->fp->fp[i]);
+               unix_inflight(scm->fp->user, scm->fp->fp[i]);
        return max_level;
 }
 
@@ -1781,7 +1781,12 @@ restart_locked:
                        goto out_unlock;
        }
 
-       if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
+       /* other == sk && unix_peer(other) != sk if
+        * - unix_peer(sk) == NULL, destination address bound to sk
+        * - unix_peer(sk) == sk by time of get but disconnected before lock
+        */
+       if (other != sk &&
+           unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
                if (timeo) {
                        timeo = unix_wait_for_peer(other, timeo);
 
@@ -2270,13 +2275,15 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
        size_t size = state->size;
        unsigned int last_len;
 
-       err = -EINVAL;
-       if (sk->sk_state != TCP_ESTABLISHED)
+       if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
+               err = -EINVAL;
                goto out;
+       }
 
-       err = -EOPNOTSUPP;
-       if (flags & MSG_OOB)
+       if (unlikely(flags & MSG_OOB)) {
+               err = -EOPNOTSUPP;
                goto out;
+       }
 
        target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
        timeo = sock_rcvtimeo(sk, noblock);
@@ -2322,9 +2329,11 @@ again:
                                goto unlock;
 
                        unix_state_unlock(sk);
-                       err = -EAGAIN;
-                       if (!timeo)
+                       if (!timeo) {
+                               err = -EAGAIN;
                                break;
+                       }
+
                        mutex_unlock(&u->readlock);
 
                        timeo = unix_stream_data_wait(sk, timeo, last,
@@ -2332,6 +2341,7 @@ again:
 
                        if (signal_pending(current)) {
                                err = sock_intr_errno(timeo);
+                               scm_destroy(&scm);
                                goto out;
                        }
 
index c512f64d528766f940b30236bf3e3c8fa2047661..4d9679701a6df5113df3e24a1c9b86e2a63b3710 100644 (file)
@@ -220,7 +220,7 @@ done:
        return skb->len;
 }
 
-static struct sock *unix_lookup_by_ino(int ino)
+static struct sock *unix_lookup_by_ino(unsigned int ino)
 {
        int i;
        struct sock *sk;
index 8fcdc2283af50c5caecd409b79cae6028ca2c836..6a0d48525fcf9a71f54bb43495b200b300f5341e 100644 (file)
@@ -116,7 +116,7 @@ struct sock *unix_get_socket(struct file *filp)
  * descriptor if it is for an AF_UNIX socket.
  */
 
-void unix_inflight(struct file *fp)
+void unix_inflight(struct user_struct *user, struct file *fp)
 {
        struct sock *s = unix_get_socket(fp);
 
@@ -133,11 +133,11 @@ void unix_inflight(struct file *fp)
                }
                unix_tot_inflight++;
        }
-       fp->f_cred->user->unix_inflight++;
+       user->unix_inflight++;
        spin_unlock(&unix_gc_lock);
 }
 
-void unix_notinflight(struct file *fp)
+void unix_notinflight(struct user_struct *user, struct file *fp)
 {
        struct sock *s = unix_get_socket(fp);
 
@@ -152,7 +152,7 @@ void unix_notinflight(struct file *fp)
                        list_del_init(&u->link);
                unix_tot_inflight--;
        }
-       fp->f_cred->user->unix_inflight--;
+       user->unix_inflight--;
        spin_unlock(&unix_gc_lock);
 }
 
index dacf71a43ad41f678e3a0574c84321bacd0ff30e..ba6c34ea5429535cd303b1aa1d6ebd6ae30dd912 100755 (executable)
@@ -62,7 +62,7 @@ vmlinux_link()
                        -Wl,--start-group                                    \
                                 ${KBUILD_VMLINUX_MAIN}                      \
                        -Wl,--end-group                                      \
-                       -lutil -lrt ${1}
+                       -lutil -lrt -lpthread ${1}
                rm -f linux
        fi
 }
index ff81026f6ddbae7de368a1ae4bfbfeb1a66a9ae6..7c57c7fcf5a2cdec1ffb03a6e471f2b5511d0bd3 100644 (file)
@@ -398,12 +398,10 @@ static int smk_copy_relabel(struct list_head *nhead, struct list_head *ohead,
  */
 static inline unsigned int smk_ptrace_mode(unsigned int mode)
 {
-       switch (mode) {
-       case PTRACE_MODE_READ:
-               return MAY_READ;
-       case PTRACE_MODE_ATTACH:
+       if (mode & PTRACE_MODE_ATTACH)
                return MAY_READWRITE;
-       }
+       if (mode & PTRACE_MODE_READ)
+               return MAY_READ;
 
        return 0;
 }
index d3c19c970a06bf35789cd8444971e7421a00c8ca..cb6ed10816d49ac963a65816a92ca738a7fb42d0 100644 (file)
@@ -281,7 +281,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
        int rc = 0;
 
        /* require ptrace target be a child of ptracer on attach */
-       if (mode == PTRACE_MODE_ATTACH) {
+       if (mode & PTRACE_MODE_ATTACH) {
                switch (ptrace_scope) {
                case YAMA_SCOPE_DISABLED:
                        /* No additional restrictions. */
@@ -307,7 +307,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
                }
        }
 
-       if (rc) {
+       if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0) {
                printk_ratelimited(KERN_NOTICE
                        "ptrace of pid %d was attempted by: %s (pid %d)\n",
                        child->pid, current->comm, current->pid);
index b9c0910fb8c4ead97559c7f9c1737e76b2d5cf7f..0608f216f3592fc597c85c2c92594b891e499647 100644 (file)
@@ -170,6 +170,19 @@ struct snd_ctl_elem_value32 {
         unsigned char reserved[128];
 };
 
+#ifdef CONFIG_X86_X32
+/* x32 has a different alignment for 64bit values from ia32 */
+struct snd_ctl_elem_value_x32 {
+       struct snd_ctl_elem_id id;
+       unsigned int indirect;  /* bit-field causes misalignment */
+       union {
+               s32 integer[128];
+               unsigned char data[512];
+               s64 integer64[64];
+       } value;
+       unsigned char reserved[128];
+};
+#endif /* CONFIG_X86_X32 */
 
 /* get the value type and count of the control */
 static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id,
@@ -219,9 +232,11 @@ static int get_elem_size(int type, int count)
 
 static int copy_ctl_value_from_user(struct snd_card *card,
                                    struct snd_ctl_elem_value *data,
-                                   struct snd_ctl_elem_value32 __user *data32,
+                                   void __user *userdata,
+                                   void __user *valuep,
                                    int *typep, int *countp)
 {
+       struct snd_ctl_elem_value32 __user *data32 = userdata;
        int i, type, size;
        int uninitialized_var(count);
        unsigned int indirect;
@@ -239,8 +254,9 @@ static int copy_ctl_value_from_user(struct snd_card *card,
        if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
            type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
                for (i = 0; i < count; i++) {
+                       s32 __user *intp = valuep;
                        int val;
-                       if (get_user(val, &data32->value.integer[i]))
+                       if (get_user(val, &intp[i]))
                                return -EFAULT;
                        data->value.integer.value[i] = val;
                }
@@ -250,8 +266,7 @@ static int copy_ctl_value_from_user(struct snd_card *card,
                        dev_err(card->dev, "snd_ioctl32_ctl_elem_value: unknown type %d\n", type);
                        return -EINVAL;
                }
-               if (copy_from_user(data->value.bytes.data,
-                                  data32->value.data, size))
+               if (copy_from_user(data->value.bytes.data, valuep, size))
                        return -EFAULT;
        }
 
@@ -261,7 +276,8 @@ static int copy_ctl_value_from_user(struct snd_card *card,
 }
 
 /* restore the value to 32bit */
-static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32,
+static int copy_ctl_value_to_user(void __user *userdata,
+                                 void __user *valuep,
                                  struct snd_ctl_elem_value *data,
                                  int type, int count)
 {
@@ -270,22 +286,22 @@ static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32,
        if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
            type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
                for (i = 0; i < count; i++) {
+                       s32 __user *intp = valuep;
                        int val;
                        val = data->value.integer.value[i];
-                       if (put_user(val, &data32->value.integer[i]))
+                       if (put_user(val, &intp[i]))
                                return -EFAULT;
                }
        } else {
                size = get_elem_size(type, count);
-               if (copy_to_user(data32->value.data,
-                                data->value.bytes.data, size))
+               if (copy_to_user(valuep, data->value.bytes.data, size))
                        return -EFAULT;
        }
        return 0;
 }
 
-static int snd_ctl_elem_read_user_compat(struct snd_card *card, 
-                                        struct snd_ctl_elem_value32 __user *data32)
+static int ctl_elem_read_user(struct snd_card *card,
+                             void __user *userdata, void __user *valuep)
 {
        struct snd_ctl_elem_value *data;
        int err, type, count;
@@ -294,7 +310,9 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
        if (data == NULL)
                return -ENOMEM;
 
-       if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0)
+       err = copy_ctl_value_from_user(card, data, userdata, valuep,
+                                      &type, &count);
+       if (err < 0)
                goto error;
 
        snd_power_lock(card);
@@ -303,14 +321,15 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
                err = snd_ctl_elem_read(card, data);
        snd_power_unlock(card);
        if (err >= 0)
-               err = copy_ctl_value_to_user(data32, data, type, count);
+               err = copy_ctl_value_to_user(userdata, valuep, data,
+                                            type, count);
  error:
        kfree(data);
        return err;
 }
 
-static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
-                                         struct snd_ctl_elem_value32 __user *data32)
+static int ctl_elem_write_user(struct snd_ctl_file *file,
+                              void __user *userdata, void __user *valuep)
 {
        struct snd_ctl_elem_value *data;
        struct snd_card *card = file->card;
@@ -320,7 +339,9 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
        if (data == NULL)
                return -ENOMEM;
 
-       if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0)
+       err = copy_ctl_value_from_user(card, data, userdata, valuep,
+                                      &type, &count);
+       if (err < 0)
                goto error;
 
        snd_power_lock(card);
@@ -329,12 +350,39 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
                err = snd_ctl_elem_write(card, file, data);
        snd_power_unlock(card);
        if (err >= 0)
-               err = copy_ctl_value_to_user(data32, data, type, count);
+               err = copy_ctl_value_to_user(userdata, valuep, data,
+                                            type, count);
  error:
        kfree(data);
        return err;
 }
 
+static int snd_ctl_elem_read_user_compat(struct snd_card *card,
+                                        struct snd_ctl_elem_value32 __user *data32)
+{
+       return ctl_elem_read_user(card, data32, &data32->value);
+}
+
+static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
+                                         struct snd_ctl_elem_value32 __user *data32)
+{
+       return ctl_elem_write_user(file, data32, &data32->value);
+}
+
+#ifdef CONFIG_X86_X32
+static int snd_ctl_elem_read_user_x32(struct snd_card *card,
+                                     struct snd_ctl_elem_value_x32 __user *data32)
+{
+       return ctl_elem_read_user(card, data32, &data32->value);
+}
+
+static int snd_ctl_elem_write_user_x32(struct snd_ctl_file *file,
+                                      struct snd_ctl_elem_value_x32 __user *data32)
+{
+       return ctl_elem_write_user(file, data32, &data32->value);
+}
+#endif /* CONFIG_X86_X32 */
+
 /* add or replace a user control */
 static int snd_ctl_elem_add_compat(struct snd_ctl_file *file,
                                   struct snd_ctl_elem_info32 __user *data32,
@@ -393,6 +441,10 @@ enum {
        SNDRV_CTL_IOCTL_ELEM_WRITE32 = _IOWR('U', 0x13, struct snd_ctl_elem_value32),
        SNDRV_CTL_IOCTL_ELEM_ADD32 = _IOWR('U', 0x17, struct snd_ctl_elem_info32),
        SNDRV_CTL_IOCTL_ELEM_REPLACE32 = _IOWR('U', 0x18, struct snd_ctl_elem_info32),
+#ifdef CONFIG_X86_X32
+       SNDRV_CTL_IOCTL_ELEM_READ_X32 = _IOWR('U', 0x12, struct snd_ctl_elem_value_x32),
+       SNDRV_CTL_IOCTL_ELEM_WRITE_X32 = _IOWR('U', 0x13, struct snd_ctl_elem_value_x32),
+#endif /* CONFIG_X86_X32 */
 };
 
 static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -431,6 +483,12 @@ static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, uns
                return snd_ctl_elem_add_compat(ctl, argp, 0);
        case SNDRV_CTL_IOCTL_ELEM_REPLACE32:
                return snd_ctl_elem_add_compat(ctl, argp, 1);
+#ifdef CONFIG_X86_X32
+       case SNDRV_CTL_IOCTL_ELEM_READ_X32:
+               return snd_ctl_elem_read_user_x32(ctl->card, argp);
+       case SNDRV_CTL_IOCTL_ELEM_WRITE_X32:
+               return snd_ctl_elem_write_user_x32(ctl, argp);
+#endif /* CONFIG_X86_X32 */
        }
 
        down_read(&snd_ioctl_rwsem);
index 9630e9f72b7ba2ad77f50a516ecc559c0c200975..1f64ab0c2a95a291638406e62daa15ad6a172db3 100644 (file)
@@ -183,6 +183,14 @@ static int snd_pcm_ioctl_channel_info_compat(struct snd_pcm_substream *substream
        return err;
 }
 
+#ifdef CONFIG_X86_X32
+/* X32 ABI has the same struct as x86-64 for snd_pcm_channel_info */
+static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
+                                    struct snd_pcm_channel_info __user *src);
+#define snd_pcm_ioctl_channel_info_x32(s, p)   \
+       snd_pcm_channel_info_user(s, p)
+#endif /* CONFIG_X86_X32 */
+
 struct snd_pcm_status32 {
        s32 state;
        struct compat_timespec trigger_tstamp;
@@ -243,6 +251,71 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
        return err;
 }
 
+#ifdef CONFIG_X86_X32
+/* X32 ABI has 64bit timespec and 64bit alignment */
+struct snd_pcm_status_x32 {
+       s32 state;
+       u32 rsvd; /* alignment */
+       struct timespec trigger_tstamp;
+       struct timespec tstamp;
+       u32 appl_ptr;
+       u32 hw_ptr;
+       s32 delay;
+       u32 avail;
+       u32 avail_max;
+       u32 overrange;
+       s32 suspended_state;
+       u32 audio_tstamp_data;
+       struct timespec audio_tstamp;
+       struct timespec driver_tstamp;
+       u32 audio_tstamp_accuracy;
+       unsigned char reserved[52-2*sizeof(struct timespec)];
+} __packed;
+
+#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst))
+
+static int snd_pcm_status_user_x32(struct snd_pcm_substream *substream,
+                                  struct snd_pcm_status_x32 __user *src,
+                                  bool ext)
+{
+       struct snd_pcm_status status;
+       int err;
+
+       memset(&status, 0, sizeof(status));
+       /*
+        * with extension, parameters are read/write,
+        * get audio_tstamp_data from user,
+        * ignore rest of status structure
+        */
+       if (ext && get_user(status.audio_tstamp_data,
+                               (u32 __user *)(&src->audio_tstamp_data)))
+               return -EFAULT;
+       err = snd_pcm_status(substream, &status);
+       if (err < 0)
+               return err;
+
+       if (clear_user(src, sizeof(*src)))
+               return -EFAULT;
+       if (put_user(status.state, &src->state) ||
+           put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) ||
+           put_timespec(&status.tstamp, &src->tstamp) ||
+           put_user(status.appl_ptr, &src->appl_ptr) ||
+           put_user(status.hw_ptr, &src->hw_ptr) ||
+           put_user(status.delay, &src->delay) ||
+           put_user(status.avail, &src->avail) ||
+           put_user(status.avail_max, &src->avail_max) ||
+           put_user(status.overrange, &src->overrange) ||
+           put_user(status.suspended_state, &src->suspended_state) ||
+           put_user(status.audio_tstamp_data, &src->audio_tstamp_data) ||
+           put_timespec(&status.audio_tstamp, &src->audio_tstamp) ||
+           put_timespec(&status.driver_tstamp, &src->driver_tstamp) ||
+           put_user(status.audio_tstamp_accuracy, &src->audio_tstamp_accuracy))
+               return -EFAULT;
+
+       return err;
+}
+#endif /* CONFIG_X86_X32 */
+
 /* both for HW_PARAMS and HW_REFINE */
 static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
                                          int refine, 
@@ -469,6 +542,93 @@ static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
        return 0;
 }
 
+#ifdef CONFIG_X86_X32
+/* X32 ABI has 64bit timespec and 64bit alignment */
+struct snd_pcm_mmap_status_x32 {
+       s32 state;
+       s32 pad1;
+       u32 hw_ptr;
+       u32 pad2; /* alignment */
+       struct timespec tstamp;
+       s32 suspended_state;
+       struct timespec audio_tstamp;
+} __packed;
+
+struct snd_pcm_mmap_control_x32 {
+       u32 appl_ptr;
+       u32 avail_min;
+};
+
+struct snd_pcm_sync_ptr_x32 {
+       u32 flags;
+       u32 rsvd; /* alignment */
+       union {
+               struct snd_pcm_mmap_status_x32 status;
+               unsigned char reserved[64];
+       } s;
+       union {
+               struct snd_pcm_mmap_control_x32 control;
+               unsigned char reserved[64];
+       } c;
+} __packed;
+
+static int snd_pcm_ioctl_sync_ptr_x32(struct snd_pcm_substream *substream,
+                                     struct snd_pcm_sync_ptr_x32 __user *src)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       volatile struct snd_pcm_mmap_status *status;
+       volatile struct snd_pcm_mmap_control *control;
+       u32 sflags;
+       struct snd_pcm_mmap_control scontrol;
+       struct snd_pcm_mmap_status sstatus;
+       snd_pcm_uframes_t boundary;
+       int err;
+
+       if (snd_BUG_ON(!runtime))
+               return -EINVAL;
+
+       if (get_user(sflags, &src->flags) ||
+           get_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
+           get_user(scontrol.avail_min, &src->c.control.avail_min))
+               return -EFAULT;
+       if (sflags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
+               err = snd_pcm_hwsync(substream);
+               if (err < 0)
+                       return err;
+       }
+       status = runtime->status;
+       control = runtime->control;
+       boundary = recalculate_boundary(runtime);
+       if (!boundary)
+               boundary = 0x7fffffff;
+       snd_pcm_stream_lock_irq(substream);
+       /* FIXME: we should consider the boundary for the sync from app */
+       if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
+               control->appl_ptr = scontrol.appl_ptr;
+       else
+               scontrol.appl_ptr = control->appl_ptr % boundary;
+       if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
+               control->avail_min = scontrol.avail_min;
+       else
+               scontrol.avail_min = control->avail_min;
+       sstatus.state = status->state;
+       sstatus.hw_ptr = status->hw_ptr % boundary;
+       sstatus.tstamp = status->tstamp;
+       sstatus.suspended_state = status->suspended_state;
+       sstatus.audio_tstamp = status->audio_tstamp;
+       snd_pcm_stream_unlock_irq(substream);
+       if (put_user(sstatus.state, &src->s.status.state) ||
+           put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) ||
+           put_timespec(&sstatus.tstamp, &src->s.status.tstamp) ||
+           put_user(sstatus.suspended_state, &src->s.status.suspended_state) ||
+           put_timespec(&sstatus.audio_tstamp, &src->s.status.audio_tstamp) ||
+           put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
+           put_user(scontrol.avail_min, &src->c.control.avail_min))
+               return -EFAULT;
+
+       return 0;
+}
+#endif /* CONFIG_X86_X32 */
 
 /*
  */
@@ -487,7 +647,12 @@ enum {
        SNDRV_PCM_IOCTL_WRITEN_FRAMES32 = _IOW('A', 0x52, struct snd_xfern32),
        SNDRV_PCM_IOCTL_READN_FRAMES32 = _IOR('A', 0x53, struct snd_xfern32),
        SNDRV_PCM_IOCTL_SYNC_PTR32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr32),
-
+#ifdef CONFIG_X86_X32
+       SNDRV_PCM_IOCTL_CHANNEL_INFO_X32 = _IOR('A', 0x32, struct snd_pcm_channel_info),
+       SNDRV_PCM_IOCTL_STATUS_X32 = _IOR('A', 0x20, struct snd_pcm_status_x32),
+       SNDRV_PCM_IOCTL_STATUS_EXT_X32 = _IOWR('A', 0x24, struct snd_pcm_status_x32),
+       SNDRV_PCM_IOCTL_SYNC_PTR_X32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr_x32),
+#endif /* CONFIG_X86_X32 */
 };
 
 static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -559,6 +724,16 @@ static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned l
                return snd_pcm_ioctl_rewind_compat(substream, argp);
        case SNDRV_PCM_IOCTL_FORWARD32:
                return snd_pcm_ioctl_forward_compat(substream, argp);
+#ifdef CONFIG_X86_X32
+       case SNDRV_PCM_IOCTL_STATUS_X32:
+               return snd_pcm_status_user_x32(substream, argp, false);
+       case SNDRV_PCM_IOCTL_STATUS_EXT_X32:
+               return snd_pcm_status_user_x32(substream, argp, true);
+       case SNDRV_PCM_IOCTL_SYNC_PTR_X32:
+               return snd_pcm_ioctl_sync_ptr_x32(substream, argp);
+       case SNDRV_PCM_IOCTL_CHANNEL_INFO_X32:
+               return snd_pcm_ioctl_channel_info_x32(substream, argp);
+#endif /* CONFIG_X86_X32 */
        }
 
        return -ENOIOCTLCMD;
index 5268c1f58c25b7e143b5713a45dac36220558cc5..09a89094dcf72b3c8bf7ded51a8f723f41f8bdc6 100644 (file)
@@ -94,9 +94,58 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
        return 0;
 }
 
+#ifdef CONFIG_X86_X32
+/* X32 ABI has 64bit timespec and 64bit alignment */
+struct snd_rawmidi_status_x32 {
+       s32 stream;
+       u32 rsvd; /* alignment */
+       struct timespec tstamp;
+       u32 avail;
+       u32 xruns;
+       unsigned char reserved[16];
+} __attribute__((packed));
+
+#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst))
+
+static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile,
+                                       struct snd_rawmidi_status_x32 __user *src)
+{
+       int err;
+       struct snd_rawmidi_status status;
+
+       if (rfile->output == NULL)
+               return -EINVAL;
+       if (get_user(status.stream, &src->stream))
+               return -EFAULT;
+
+       switch (status.stream) {
+       case SNDRV_RAWMIDI_STREAM_OUTPUT:
+               err = snd_rawmidi_output_status(rfile->output, &status);
+               break;
+       case SNDRV_RAWMIDI_STREAM_INPUT:
+               err = snd_rawmidi_input_status(rfile->input, &status);
+               break;
+       default:
+               return -EINVAL;
+       }
+       if (err < 0)
+               return err;
+
+       if (put_timespec(&status.tstamp, &src->tstamp) ||
+           put_user(status.avail, &src->avail) ||
+           put_user(status.xruns, &src->xruns))
+               return -EFAULT;
+
+       return 0;
+}
+#endif /* CONFIG_X86_X32 */
+
 enum {
        SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32),
        SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32),
+#ifdef CONFIG_X86_X32
+       SNDRV_RAWMIDI_IOCTL_STATUS_X32 = _IOWR('W', 0x20, struct snd_rawmidi_status_x32),
+#endif /* CONFIG_X86_X32 */
 };
 
 static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -115,6 +164,10 @@ static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsign
                return snd_rawmidi_ioctl_params_compat(rfile, argp);
        case SNDRV_RAWMIDI_IOCTL_STATUS32:
                return snd_rawmidi_ioctl_status_compat(rfile, argp);
+#ifdef CONFIG_X86_X32
+       case SNDRV_RAWMIDI_IOCTL_STATUS_X32:
+               return snd_rawmidi_ioctl_status_x32(rfile, argp);
+#endif /* CONFIG_X86_X32 */
        }
        return -ENOIOCTLCMD;
 }
index 7354b8bed86099072227fba427a355acf868cba8..cb23899100eedb4d2bd03399a1ab97975ff08745 100644 (file)
@@ -148,8 +148,6 @@ odev_release(struct inode *inode, struct file *file)
        if ((dp = file->private_data) == NULL)
                return 0;
 
-       snd_seq_oss_drain_write(dp);
-
        mutex_lock(&register_mutex);
        snd_seq_oss_release(dp);
        mutex_unlock(&register_mutex);
index b43924325249c01b0cdd11fc1164572a50369f98..d7b4d016b547584ed337c6d0e29502ad1b12c5bf 100644 (file)
@@ -127,7 +127,6 @@ int snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int co
 unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait);
 
 void snd_seq_oss_reset(struct seq_oss_devinfo *dp);
-void snd_seq_oss_drain_write(struct seq_oss_devinfo *dp);
 
 /* */
 void snd_seq_oss_process_queue(struct seq_oss_devinfo *dp, abstime_t time);
index 6779e82b46dd7060bd982137ca75cfcdbbfa989e..92c96a95a903abd82ace8256b77c2c3bb99ff620 100644 (file)
@@ -435,22 +435,6 @@ snd_seq_oss_release(struct seq_oss_devinfo *dp)
 }
 
 
-/*
- * Wait until the queue is empty (if we don't have nonblock)
- */
-void
-snd_seq_oss_drain_write(struct seq_oss_devinfo *dp)
-{
-       if (! dp->timer->running)
-               return;
-       if (is_write_mode(dp->file_mode) && !is_nonblock_mode(dp->file_mode) &&
-           dp->writeq) {
-               while (snd_seq_oss_writeq_sync(dp->writeq))
-                       ;
-       }
-}
-
-
 /*
  * reset sequencer devices
  */
index e05802ae6e1b1e240a187a43c9f5308fcf7145c0..2e908225d754cfd4f4d23aa40abab01785351456 100644 (file)
@@ -70,13 +70,14 @@ static int snd_timer_user_status_compat(struct file *file,
                                        struct snd_timer_status32 __user *_status)
 {
        struct snd_timer_user *tu;
-       struct snd_timer_status status;
+       struct snd_timer_status32 status;
        
        tu = file->private_data;
        if (snd_BUG_ON(!tu->timeri))
                return -ENXIO;
        memset(&status, 0, sizeof(status));
-       status.tstamp = tu->tstamp;
+       status.tstamp.tv_sec = tu->tstamp.tv_sec;
+       status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
        status.resolution = snd_timer_resolution(tu->timeri);
        status.lost = tu->timeri->lost;
        status.overrun = tu->overrun;
@@ -88,12 +89,21 @@ static int snd_timer_user_status_compat(struct file *file,
        return 0;
 }
 
+#ifdef CONFIG_X86_X32
+/* X32 ABI has the same struct as x86-64 */
+#define snd_timer_user_status_x32(file, s) \
+       snd_timer_user_status(file, s)
+#endif /* CONFIG_X86_X32 */
+
 /*
  */
 
 enum {
        SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32),
        SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32),
+#ifdef CONFIG_X86_X32
+       SNDRV_TIMER_IOCTL_STATUS_X32 = _IOW('T', 0x14, struct snd_timer_status),
+#endif /* CONFIG_X86_X32 */
 };
 
 static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -122,6 +132,10 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
                return snd_timer_user_info_compat(file, argp);
        case SNDRV_TIMER_IOCTL_STATUS32:
                return snd_timer_user_status_compat(file, argp);
+#ifdef CONFIG_X86_X32
+       case SNDRV_TIMER_IOCTL_STATUS_X32:
+               return snd_timer_user_status_x32(file, argp);
+#endif /* CONFIG_X86_X32 */
        }
        return -ENOIOCTLCMD;
 }
index 2c13298e80b7477a4bef0646319af55f118831e2..2ff692dd2c5f79ef577aa3be3b6df5326e3d8470 100644 (file)
@@ -357,7 +357,10 @@ enum {
                                        ((pci)->device == 0x0d0c) || \
                                        ((pci)->device == 0x160c))
 
-#define IS_BROXTON(pci)        ((pci)->device == 0x5a98)
+#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
+#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
+#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
+#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
 
 static char *driver_short_names[] = {
        [AZX_DRIVER_ICH] = "HDA Intel",
@@ -534,13 +537,13 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
 
        if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
                snd_hdac_set_codec_wakeup(bus, true);
-       if (IS_BROXTON(pci)) {
+       if (IS_SKL_PLUS(pci)) {
                pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
                val = val & ~INTEL_HDA_CGCTL_MISCBDCGE;
                pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
        }
        azx_init_chip(chip, full_reset);
-       if (IS_BROXTON(pci)) {
+       if (IS_SKL_PLUS(pci)) {
                pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
                val = val | INTEL_HDA_CGCTL_MISCBDCGE;
                pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
@@ -549,7 +552,7 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
                snd_hdac_set_codec_wakeup(bus, false);
 
        /* reduce dma latency to avoid noise */
-       if (IS_BROXTON(pci))
+       if (IS_BXT(pci))
                bxt_reduce_dma_latency(chip);
 }
 
@@ -971,11 +974,6 @@ static int azx_resume(struct device *dev)
 /* put codec down to D3 at hibernation for Intel SKL+;
  * otherwise BIOS may still access the codec and screw up the driver
  */
-#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
-#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
-#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
-
 static int azx_freeze_noirq(struct device *dev)
 {
        struct pci_dev *pci = to_pci_dev(dev);
index efd4980cffb8a0273228ac7570315f0f1aa1dbf9..c2430b36e1cef101d460c4ca651a9d1b6228735b 100644 (file)
@@ -4749,6 +4749,7 @@ enum {
        ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
        ALC293_FIXUP_LENOVO_SPK_NOISE,
        ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
+       ALC255_FIXUP_DELL_SPK_NOISE,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -5368,6 +5369,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc233_fixup_lenovo_line2_mic_hotkey,
        },
+       [ALC255_FIXUP_DELL_SPK_NOISE] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_disable_aamix,
+               .chained = true,
+               .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5379,6 +5386,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
        SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
+       SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
        SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
        SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
        SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
@@ -5410,6 +5418,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
        SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
        SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+       SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
index 2875b4f6d8c9e6a792638547d4b6850fa620a857..7c8941b8b2defd9b21b8159f1cef333659ce423d 100644 (file)
@@ -2879,7 +2879,7 @@ static int snd_hdsp_get_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl
 {
        struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
 
-       ucontrol->value.enumerated.item[0] = hdsp_dds_offset(hdsp);
+       ucontrol->value.integer.value[0] = hdsp_dds_offset(hdsp);
        return 0;
 }
 
@@ -2891,7 +2891,7 @@ static int snd_hdsp_put_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl
 
        if (!snd_hdsp_use_is_exclusive(hdsp))
                return -EBUSY;
-       val = ucontrol->value.enumerated.item[0];
+       val = ucontrol->value.integer.value[0];
        spin_lock_irq(&hdsp->lock);
        if (val != hdsp_dds_offset(hdsp))
                change = (hdsp_set_dds_offset(hdsp, val) == 0) ? 1 : 0;
index 8bc8016c173d6a6005e33222df1381272bb7819e..a4a999a0317eaefc30eeb2265fd38b784c4afd8a 100644 (file)
@@ -1601,6 +1601,9 @@ static void hdspm_set_dds_value(struct hdspm *hdspm, int rate)
 {
        u64 n;
 
+       if (snd_BUG_ON(rate <= 0))
+               return;
+
        if (rate >= 112000)
                rate /= 4;
        else if (rate >= 56000)
@@ -2215,6 +2218,8 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm)
                } else {
                        /* slave mode, return external sample rate */
                        rate = hdspm_external_sample_rate(hdspm);
+                       if (!rate)
+                               rate = hdspm->system_sample_rate;
                }
        }
 
@@ -2260,8 +2265,11 @@ static int snd_hdspm_put_system_sample_rate(struct snd_kcontrol *kcontrol,
                                            ucontrol)
 {
        struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+       int rate = ucontrol->value.integer.value[0];
 
-       hdspm_set_dds_value(hdspm, ucontrol->value.enumerated.item[0]);
+       if (rate < 27000 || rate > 207000)
+               return -EINVAL;
+       hdspm_set_dds_value(hdspm, ucontrol->value.integer.value[0]);
        return 0;
 }
 
@@ -4449,7 +4457,7 @@ static int snd_hdspm_get_tco_word_term(struct snd_kcontrol *kcontrol,
 {
        struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
 
-       ucontrol->value.enumerated.item[0] = hdspm->tco->term;
+       ucontrol->value.integer.value[0] = hdspm->tco->term;
 
        return 0;
 }
@@ -4460,8 +4468,8 @@ static int snd_hdspm_put_tco_word_term(struct snd_kcontrol *kcontrol,
 {
        struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
 
-       if (hdspm->tco->term != ucontrol->value.enumerated.item[0]) {
-               hdspm->tco->term = ucontrol->value.enumerated.item[0];
+       if (hdspm->tco->term != ucontrol->value.integer.value[0]) {
+               hdspm->tco->term = ucontrol->value.integer.value[0];
 
                hdspm_tco_write(hdspm);
 
index 4f6ce1cac8e20ef8b504073a6dfcc6236c0cf90c..c458d60d50300823e03e39109b715c94bf44cbbf 100644 (file)
@@ -1124,6 +1124,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
        case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
        case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
+       case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
        case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
        case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
        case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
index 96234b638249e6f7c2ecb1c4820b62a7abefdd4c..5d51d6ff08e6a603173ad071b7651cc0fbce241d 100644 (file)
@@ -254,7 +254,7 @@ int main(int argc, char *argv[])
                        syslog(LOG_ERR, "Illegal op:%d\n", op);
                }
                vss_msg->error = error;
-               len = write(vss_fd, &error, sizeof(struct hv_vss_msg));
+               len = write(vss_fd, vss_msg, sizeof(struct hv_vss_msg));
                if (len != sizeof(struct hv_vss_msg)) {
                        syslog(LOG_ERR, "write failed; error: %d %s", errno,
                               strerror(errno));
index 2d9d8306dbd3f97f3d1f9734ace17b25d9ed583a..4a3a72cb5805e256871abda351792ba23b2d30cd 100644 (file)
@@ -310,7 +310,6 @@ int perf_stat_process_counter(struct perf_stat_config *config,
        int i, ret;
 
        aggr->val = aggr->ena = aggr->run = 0;
-       init_stats(ps->res_stats);
 
        if (counter->per_pkg)
                zero_per_pkg(counter);
index 77edcdcc016bbe9e891dbee8ce27c5824caaae63..057278448515a455a17ff2b48af2bf0902c2e7fb 100755 (executable)
@@ -88,7 +88,11 @@ test_delete()
                exit 1
        fi
 
-       rm $file
+       rm $file 2>/dev/null
+       if [ $? -ne 0 ]; then
+               chattr -i $file
+               rm $file
+       fi
 
        if [ -e $file ]; then
                echo "$file couldn't be deleted" >&2
@@ -111,6 +115,7 @@ test_zero_size_delete()
                exit 1
        fi
 
+       chattr -i $file
        printf "$attrs" > $file
 
        if [ -e $file ]; then
@@ -141,7 +146,11 @@ test_valid_filenames()
                        echo "$file could not be created" >&2
                        ret=1
                else
-                       rm $file
+                       rm $file 2>/dev/null
+                       if [ $? -ne 0 ]; then
+                               chattr -i $file
+                               rm $file
+                       fi
                fi
        done
 
@@ -174,7 +183,11 @@ test_invalid_filenames()
 
                if [ -e $file ]; then
                        echo "Creating $file should have failed" >&2
-                       rm $file
+                       rm $file 2>/dev/null
+                       if [ $? -ne 0 ]; then
+                               chattr -i $file
+                               rm $file
+                       fi
                        ret=1
                fi
        done
index 8c0764407b3c349431a6b78ca595e450e2661066..4af74f7330365e668d9e1379f6894f12d974374d 100644 (file)
@@ -1,10 +1,68 @@
+#include <errno.h>
 #include <stdio.h>
 #include <stdint.h>
 #include <stdlib.h>
 #include <unistd.h>
+#include <sys/ioctl.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <fcntl.h>
+#include <linux/fs.h>
+
+static int set_immutable(const char *path, int immutable)
+{
+       unsigned int flags;
+       int fd;
+       int rc;
+       int error;
+
+       fd = open(path, O_RDONLY);
+       if (fd < 0)
+               return fd;
+
+       rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
+       if (rc < 0) {
+               error = errno;
+               close(fd);
+               errno = error;
+               return rc;
+       }
+
+       if (immutable)
+               flags |= FS_IMMUTABLE_FL;
+       else
+               flags &= ~FS_IMMUTABLE_FL;
+
+       rc = ioctl(fd, FS_IOC_SETFLAGS, &flags);
+       error = errno;
+       close(fd);
+       errno = error;
+       return rc;
+}
+
+static int get_immutable(const char *path)
+{
+       unsigned int flags;
+       int fd;
+       int rc;
+       int error;
+
+       fd = open(path, O_RDONLY);
+       if (fd < 0)
+               return fd;
+
+       rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
+       if (rc < 0) {
+               error = errno;
+               close(fd);
+               errno = error;
+               return rc;
+       }
+       close(fd);
+       if (flags & FS_IMMUTABLE_FL)
+               return 1;
+       return 0;
+}
 
 int main(int argc, char **argv)
 {
@@ -27,7 +85,7 @@ int main(int argc, char **argv)
        buf[4] = 0;
 
        /* create a test variable */
-       fd = open(path, O_WRONLY | O_CREAT);
+       fd = open(path, O_WRONLY | O_CREAT, 0600);
        if (fd < 0) {
                perror("open(O_WRONLY)");
                return EXIT_FAILURE;
@@ -41,6 +99,18 @@ int main(int argc, char **argv)
 
        close(fd);
 
+       rc = get_immutable(path);
+       if (rc < 0) {
+               perror("ioctl(FS_IOC_GETFLAGS)");
+               return EXIT_FAILURE;
+       } else if (rc) {
+               rc = set_immutable(path, 0);
+               if (rc < 0) {
+                       perror("ioctl(FS_IOC_SETFLAGS)");
+                       return EXIT_FAILURE;
+               }
+       }
+
        fd = open(path, O_RDONLY);
        if (fd < 0) {
                perror("open");
index 7a2f449bd85d02d7f6571fa4f5b3091df81a04d9..5d10f104f3ebd0994533c1c26813b5e6ebf90376 100644 (file)
@@ -1875,8 +1875,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
 static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
 {
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-
-       int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
+       int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
+       int sz = nr_longs * sizeof(unsigned long);
        vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
        vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
        vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
index 77d42be6970ed88cd5812ecea855db267a333468..4f70d12e392d3dcf6064c11e58ad7c8f798419ad 100644 (file)
@@ -173,7 +173,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
         * do alloc nowait since if we are going to sleep anyway we
         * may as well sleep faulting in page
         */
-       work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
+       work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
        if (!work)
                return 0;