Merge branch 'flock' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/bkl
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Oct 2010 01:13:34 +0000 (18:13 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Oct 2010 01:13:34 +0000 (18:13 -0700)
* 'flock' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/bkl:
  locks: turn lock_flocks into a spinlock
  fasync: re-organize fasync entry insertion to allow it under a spinlock
  locks/nfsd: allocate file lock outside of spinlock
  lockd: fix nlmsvc_notify_blocked locking
  lockd: push lock_flocks down

754 files changed:
Documentation/DocBook/device-drivers.tmpl
Documentation/DocBook/kernel-api.tmpl
Documentation/filesystems/Locking
Documentation/filesystems/nfs/idmapper.txt
Documentation/filesystems/proc.txt
Documentation/filesystems/sharedsubtree.txt
Documentation/misc-devices/apds990x.txt [new file with mode: 0644]
Documentation/misc-devices/bh1770glc.txt [new file with mode: 0644]
Documentation/sysrq.txt
Documentation/timers/hpet_example.c
Documentation/trace/postprocess/trace-vmscan-postprocess.pl
Documentation/vm/highmem.txt [new file with mode: 0644]
MAINTAINERS
arch/alpha/Kconfig
arch/alpha/include/asm/core_mcpcia.h
arch/alpha/include/asm/core_t2.h
arch/alpha/include/asm/pgtable.h
arch/alpha/kernel/core_t2.c
arch/alpha/kernel/machvec_impl.h
arch/arm/include/asm/highmem.h
arch/arm/include/asm/pgtable.h
arch/arm/mach-ep93xx/clock.c
arch/arm/mm/fault-armv.c
arch/arm/mm/highmem.c
arch/arm/mm/pgd.c
arch/avr32/include/asm/pgtable.h
arch/blackfin/include/asm/entry.h
arch/cris/include/asm/pgtable.h
arch/frv/include/asm/highmem.h
arch/frv/include/asm/pgtable.h
arch/frv/mb93090-mb00/pci-dma.c
arch/frv/mm/cache-page.c
arch/frv/mm/highmem.c
arch/ia64/include/asm/pgtable.h
arch/m32r/include/asm/pgtable.h
arch/m68k/include/asm/entry_mm.h
arch/m68k/include/asm/entry_no.h
arch/m68k/include/asm/motorola_pgtable.h
arch/m68k/include/asm/sun3_pgtable.h
arch/microblaze/include/asm/pgtable.h
arch/mips/include/asm/highmem.h
arch/mips/include/asm/pgtable-32.h
arch/mips/include/asm/pgtable-64.h
arch/mips/mm/highmem.c
arch/mn10300/include/asm/highmem.h
arch/mn10300/include/asm/pgtable.h
arch/parisc/include/asm/pgtable.h
arch/powerpc/include/asm/highmem.h
arch/powerpc/include/asm/pgtable-ppc32.h
arch/powerpc/include/asm/pgtable-ppc64.h
arch/powerpc/kernel/vio.c
arch/powerpc/mm/highmem.c
arch/s390/include/asm/pgtable.h
arch/score/include/asm/pgtable.h
arch/sh/include/asm/pgtable_32.h
arch/sh/include/asm/pgtable_64.h
arch/sparc/include/asm/highmem.h
arch/sparc/include/asm/pgtable_32.h
arch/sparc/include/asm/pgtable_64.h
arch/sparc/mm/highmem.c
arch/tile/Kconfig
arch/tile/Makefile
arch/tile/include/arch/sim.h [new file with mode: 0644]
arch/tile/include/arch/sim_def.h
arch/tile/include/arch/spr_def.h
arch/tile/include/arch/spr_def_32.h
arch/tile/include/asm/backtrace.h
arch/tile/include/asm/compat.h
arch/tile/include/asm/highmem.h
arch/tile/include/asm/irqflags.h
arch/tile/include/asm/mman.h
arch/tile/include/asm/page.h
arch/tile/include/asm/pgtable.h
arch/tile/include/asm/processor.h
arch/tile/include/asm/ptrace.h
arch/tile/include/asm/syscalls.h
arch/tile/include/asm/system.h
arch/tile/include/asm/traps.h
arch/tile/include/hv/hypervisor.h
arch/tile/kernel/backtrace.c
arch/tile/kernel/compat.c
arch/tile/kernel/compat_signal.c
arch/tile/kernel/entry.S
arch/tile/kernel/head_32.S
arch/tile/kernel/intvec_32.S
arch/tile/kernel/irq.c
arch/tile/kernel/messaging.c
arch/tile/kernel/process.c
arch/tile/kernel/ptrace.c
arch/tile/kernel/regs_32.S
arch/tile/kernel/setup.c
arch/tile/kernel/signal.c
arch/tile/kernel/single_step.c
arch/tile/kernel/smp.c
arch/tile/kernel/stack.c
arch/tile/kernel/sys.c
arch/tile/kernel/traps.c
arch/tile/kvm/Kconfig [new file with mode: 0644]
arch/tile/lib/Makefile
arch/tile/lib/atomic_32.c
arch/tile/lib/exports.c
arch/tile/lib/memcpy_32.S
arch/tile/lib/memmove.c [new file with mode: 0644]
arch/tile/lib/memmove_32.c [deleted file]
arch/tile/lib/memset_32.c
arch/tile/lib/strlen_32.c
arch/tile/mm/fault.c
arch/tile/mm/highmem.c
arch/tile/mm/homecache.c
arch/tile/mm/init.c
arch/um/Kconfig.um
arch/um/defconfig
arch/um/include/asm/dma-mapping.h [deleted file]
arch/um/include/asm/pgtable.h
arch/um/include/asm/system.h
arch/um/kernel/dyn.lds.S
arch/um/kernel/irq.c
arch/um/kernel/uml.lds.S
arch/um/os-Linux/time.c
arch/x86/include/asm/highmem.h
arch/x86/include/asm/iomap.h
arch/x86/include/asm/pgtable_32.h
arch/x86/include/asm/pgtable_64.h
arch/x86/include/asm/xen/hypercall.h
arch/x86/include/asm/xen/page.h
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/crash_dump_32.c
arch/x86/kernel/hpet.c
arch/x86/kernel/smpboot.c
arch/x86/mm/fault.c
arch/x86/mm/highmem_32.c
arch/x86/mm/iomap_32.c
arch/x86/xen/Kconfig
arch/x86/xen/enlighten.c
arch/x86/xen/mmu.c
arch/x86/xen/mmu.h
arch/x86/xen/setup.c
arch/x86/xen/xen-ops.h
arch/xtensa/include/asm/pgtable.h
crypto/async_tx/async_memcpy.c
crypto/blkcipher.c
drivers/acpi/Kconfig
drivers/acpi/ac.c
drivers/acpi/acpica/Makefile
drivers/acpi/acpica/acdebug.h
drivers/acpi/acpica/acevents.h
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/achware.h
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/acmacros.h
drivers/acpi/acpica/acnamesp.h
drivers/acpi/acpica/acobject.h
drivers/acpi/acpica/acutils.h
drivers/acpi/acpica/dsmethod.c
drivers/acpi/acpica/dswexec.c
drivers/acpi/acpica/evevent.c
drivers/acpi/acpica/evgpeblk.c
drivers/acpi/acpica/evgpeinit.c
drivers/acpi/acpica/evmisc.c
drivers/acpi/acpica/evrgnini.c
drivers/acpi/acpica/evxface.c
drivers/acpi/acpica/evxfevnt.c
drivers/acpi/acpica/evxfregn.c
drivers/acpi/acpica/exfldio.c
drivers/acpi/acpica/exmutex.c
drivers/acpi/acpica/exprep.c
drivers/acpi/acpica/exregion.c
drivers/acpi/acpica/hwpci.c [new file with mode: 0644]
drivers/acpi/acpica/nsrepair2.c
drivers/acpi/acpica/nsutils.c
drivers/acpi/acpica/tbfadt.c
drivers/acpi/acpica/utdebug.c
drivers/acpi/acpica/uteval.c
drivers/acpi/acpica/utglobal.c
drivers/acpi/acpica/utids.c
drivers/acpi/acpica/utinit.c
drivers/acpi/acpica/utmath.c
drivers/acpi/acpica/utmisc.c
drivers/acpi/acpica/utmutex.c
drivers/acpi/acpica/utosi.c [new file with mode: 0644]
drivers/acpi/acpica/utxface.c
drivers/acpi/acpica/utxferror.c [new file with mode: 0644]
drivers/acpi/battery.c
drivers/acpi/bus.c
drivers/acpi/button.c
drivers/acpi/dock.c
drivers/acpi/ec.c
drivers/acpi/fan.c
drivers/acpi/osl.c
drivers/acpi/pci_irq.c
drivers/acpi/pci_link.c
drivers/acpi/pci_root.c
drivers/acpi/power.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_idle.c
drivers/acpi/processor_thermal.c
drivers/acpi/processor_throttling.c
drivers/acpi/sbs.c
drivers/acpi/scan.c
drivers/acpi/sleep.c
drivers/acpi/sleep.h
drivers/acpi/thermal.c
drivers/acpi/video.c
drivers/base/node.c
drivers/base/power/runtime.c
drivers/block/loop.c
drivers/char/agp/Makefile
drivers/char/agp/agp.h
drivers/char/agp/amd-k7-agp.c
drivers/char/agp/backend.c
drivers/char/agp/generic.c
drivers/char/agp/intel-agp.c
drivers/char/agp/intel-agp.h
drivers/char/agp/intel-gtt.c
drivers/char/hpet.c
drivers/char/hvc_tile.c
drivers/char/hvc_xen.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/mem.c
drivers/crypto/hifn_795x.c
drivers/gpio/pca953x.c
drivers/gpu/Makefile
drivers/gpu/drm/Makefile
drivers/gpu/drm/drm_agpsupport.c
drivers/gpu/drm/drm_context.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_debugfs.c
drivers/gpu/drm/drm_drawable.c [deleted file]
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_lock.c
drivers/gpu/drm/drm_memory.c
drivers/gpu/drm/drm_proc.c
drivers/gpu/drm/drm_scatter.c
drivers/gpu/drm/drm_stub.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/i810/i810_drv.c
drivers/gpu/drm/i830/i830_drv.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/dvo_ch7017.c
drivers/gpu/drm/i915/dvo_ch7xxx.c
drivers/gpu/drm/i915/dvo_ivch.c
drivers/gpu/drm/i915/dvo_sil164.c
drivers/gpu/drm/i915/dvo_tfp410.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_debug.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_opregion.c [deleted file]
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/intel_acpi.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_bios.h
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_modes.c
drivers/gpu/drm/i915/intel_opregion.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/mga/mga_drv.c
drivers/gpu/drm/nouveau/Kconfig
drivers/gpu/drm/nouveau/Makefile
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_bios.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_calc.c
drivers/gpu/drm/nouveau/nouveau_channel.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_connector.h
drivers/gpu/drm/nouveau/nouveau_debugfs.c
drivers/gpu/drm/nouveau/nouveau_dma.c
drivers/gpu/drm/nouveau/nouveau_dma.h
drivers/gpu/drm/nouveau/nouveau_dp.c
drivers/gpu/drm/nouveau/nouveau_drv.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_encoder.h
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_grctx.h
drivers/gpu/drm/nouveau/nouveau_hw.c
drivers/gpu/drm/nouveau/nouveau_i2c.c
drivers/gpu/drm/nouveau/nouveau_i2c.h
drivers/gpu/drm/nouveau/nouveau_irq.c
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/nouveau/nouveau_notifier.c
drivers/gpu/drm/nouveau/nouveau_object.c
drivers/gpu/drm/nouveau/nouveau_perf.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_pm.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_pm.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_ramht.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_ramht.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_reg.h
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nouveau_state.c
drivers/gpu/drm/nouveau/nouveau_temp.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_volt.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nv04_crtc.c
drivers/gpu/drm/nouveau/nv04_dac.c
drivers/gpu/drm/nouveau/nv04_dfp.c
drivers/gpu/drm/nouveau/nv04_fbcon.c
drivers/gpu/drm/nouveau/nv04_fifo.c
drivers/gpu/drm/nouveau/nv04_instmem.c
drivers/gpu/drm/nouveau/nv04_pm.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nv04_tv.c
drivers/gpu/drm/nouveau/nv10_fifo.c
drivers/gpu/drm/nouveau/nv10_graph.c
drivers/gpu/drm/nouveau/nv17_tv.c
drivers/gpu/drm/nouveau/nv17_tv.h
drivers/gpu/drm/nouveau/nv17_tv_modes.c
drivers/gpu/drm/nouveau/nv20_graph.c
drivers/gpu/drm/nouveau/nv40_fifo.c
drivers/gpu/drm/nouveau/nv40_graph.c
drivers/gpu/drm/nouveau/nv40_grctx.c
drivers/gpu/drm/nouveau/nv50_crtc.c
drivers/gpu/drm/nouveau/nv50_cursor.c
drivers/gpu/drm/nouveau/nv50_dac.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nv50_fb.c
drivers/gpu/drm/nouveau/nv50_fbcon.c
drivers/gpu/drm/nouveau/nv50_fifo.c
drivers/gpu/drm/nouveau/nv50_graph.c
drivers/gpu/drm/nouveau/nv50_grctx.c
drivers/gpu/drm/nouveau/nv50_instmem.c
drivers/gpu/drm/nouveau/nv50_pm.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nv50_sor.c
drivers/gpu/drm/nouveau/nva3_pm.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvc0_fifo.c
drivers/gpu/drm/nouveau/nvc0_instmem.c
drivers/gpu/drm/nouveau/nvreg.h
drivers/gpu/drm/r128/r128_drv.c
drivers/gpu/drm/radeon/Makefile
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_blit_kms.c [new file with mode: 0644]
drivers/gpu/drm/radeon/evergreen_blit_shaders.c [new file with mode: 0644]
drivers/gpu/drm/radeon/evergreen_blit_shaders.h [new file with mode: 0644]
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r420.c
drivers/gpu/drm/radeon/r520.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_blit_kms.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cursor.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/reg_srcs/evergreen
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/savage/savage_drv.c
drivers/gpu/drm/sis/sis_drv.c
drivers/gpu/drm/tdfx/tdfx_drv.c
drivers/gpu/drm/ttm/Makefile
drivers/gpu/drm/ttm/ttm_agp_backend.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_manager.c [new file with mode: 0644]
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/via/via_drv.c
drivers/gpu/drm/vmwgfx/Makefile
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c [new file with mode: 0644]
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/stub/Kconfig [new file with mode: 0644]
drivers/gpu/stub/Makefile [new file with mode: 0644]
drivers/gpu/stub/poulsbo.c [new file with mode: 0644]
drivers/idle/intel_idle.c
drivers/infiniband/core/agent.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/iwcm.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/multicast.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/sysfs.c
drivers/infiniband/core/ucma.c
drivers/infiniband/core/ud_header.c
drivers/infiniband/core/user_mad.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/amso1100/Kbuild
drivers/infiniband/hw/amso1100/c2_intr.c
drivers/infiniband/hw/cxgb3/Makefile
drivers/infiniband/hw/cxgb3/cxio_hal.c
drivers/infiniband/hw/cxgb3/cxio_wr.h
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/hw/cxgb3/iwch_ev.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/cxgb3/iwch_qp.c
drivers/infiniband/hw/cxgb3/iwch_user.h
drivers/infiniband/hw/cxgb4/Makefile
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/ev.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/mem.c
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/resource.c
drivers/infiniband/hw/cxgb4/t4.h
drivers/infiniband/hw/cxgb4/user.h
drivers/infiniband/hw/ehca/ehca_mrmw.c
drivers/infiniband/hw/ipath/Makefile
drivers/infiniband/hw/ipath/ipath_fs.c
drivers/infiniband/hw/mlx4/ah.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mthca/mthca_qp.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_nic.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/qib/qib.h
drivers/infiniband/hw/qib/qib_fs.c
drivers/infiniband/hw/qib/qib_init.c
drivers/infiniband/hw/qib/qib_pcie.c
drivers/infiniband/hw/qib/qib_rc.c
drivers/infiniband/hw/qib/qib_uc.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/infiniband/ulp/srp/ib_srp.h
drivers/macintosh/windfarm_pm121.c
drivers/md/dm-snap-persistent.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/ad525x_dpot-i2c.c
drivers/misc/ad525x_dpot-spi.c
drivers/misc/ad525x_dpot.c
drivers/misc/ad525x_dpot.h
drivers/misc/apds9802als.c [new file with mode: 0644]
drivers/misc/apds990x.c [new file with mode: 0644]
drivers/misc/bh1770glc.c [new file with mode: 0644]
drivers/misc/ibmasm/ibmasmfs.c
drivers/misc/isl29020.c [new file with mode: 0644]
drivers/misc/lkdtm.c
drivers/misc/phantom.c
drivers/misc/sgi-xp/xpc_uv.c
drivers/net/mlx4/en_main.c
drivers/net/mlx4/en_netdev.c
drivers/net/mlx4/en_port.c
drivers/net/mlx4/en_port.h
drivers/net/mlx4/fw.c
drivers/net/mlx4/intf.c
drivers/net/mlx4/main.c
drivers/net/mlx4/mlx4_en.h
drivers/net/mlx4/port.c
drivers/oprofile/oprofilefs.c
drivers/pnp/base.h
drivers/pnp/core.c
drivers/pnp/driver.c
drivers/pnp/pnpacpi/core.c
drivers/pnp/resource.c
drivers/staging/pohmelfs/inode.c
drivers/usb/core/inode.c
drivers/usb/gadget/f_fs.c
drivers/usb/gadget/inode.c
drivers/video/Kconfig
drivers/video/au1200fb.c
drivers/xen/events.c
drivers/xen/xenbus/xenbus_probe.c
drivers/xen/xenfs/Makefile
drivers/xen/xenfs/privcmd.c [new file with mode: 0644]
drivers/xen/xenfs/super.c
drivers/xen/xenfs/xenfs.h
drivers/xen/xenfs/xenstored.c [new file with mode: 0644]
firmware/ihex2fw.c
fs/9p/vfs_inode.c
fs/Kconfig
fs/affs/file.c
fs/affs/inode.c
fs/afs/dir.c
fs/afs/write.c
fs/aio.c
fs/anon_inodes.c
fs/autofs4/inode.c
fs/bfs/dir.c
fs/binfmt_misc.c
fs/block_dev.c
fs/btrfs/inode.c
fs/buffer.c
fs/ceph/addr.c
fs/cifs/file.c
fs/coda/dir.c
fs/configfs/inode.c
fs/dcache.c
fs/debugfs/inode.c
fs/direct-io.c
fs/exec.c
fs/exofs/file.c
fs/exofs/namei.c
fs/exportfs/expfs.c
fs/ext2/dir.c
fs/ext2/ext2.h
fs/ext2/inode.c
fs/ext2/namei.c
fs/ext2/super.c
fs/ext2/xattr.c
fs/ext3/inode.c
fs/ext3/namei.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/namei.c
fs/file_table.c
fs/freevxfs/vxfs_inode.c
fs/fs-writeback.c
fs/fuse/control.c
fs/fuse/dev.c
fs/gfs2/aops.c
fs/gfs2/meta_io.c
fs/gfs2/ops_fstype.c
fs/gfs2/ops_inode.c
fs/gfs2/super.c
fs/hfs/hfs_fs.h
fs/hfs/inode.c
fs/hfs/mdb.c
fs/hfs/super.c
fs/hfsplus/dir.c
fs/hfsplus/inode.c
fs/hostfs/hostfs.h
fs/hostfs/hostfs_kern.c
fs/hostfs/hostfs_user.c
fs/hugetlbfs/inode.c
fs/inode.c
fs/internal.h
fs/isofs/inode.c
fs/jffs2/dir.c
fs/jfs/jfs_imap.c
fs/jfs/jfs_txnmgr.c
fs/jfs/namei.c
fs/libfs.c
fs/locks.c
fs/logfs/dir.c
fs/minix/namei.c
fs/namei.c
fs/namespace.c
fs/nfs/dir.c
fs/nfs/getroot.c
fs/nfs/nfsroot.c
fs/nfs/write.c
fs/nfsd/vfs.c
fs/nilfs2/namei.c
fs/nilfs2/segment.c
fs/notify/fsnotify.c
fs/notify/inode_mark.c
fs/ntfs/super.c
fs/ocfs2/aops.c
fs/ocfs2/aops.h
fs/ocfs2/dlmfs/dlmfs.c
fs/ocfs2/file.c
fs/ocfs2/namei.c
fs/pipe.c
fs/proc/Kconfig
fs/proc/base.c
fs/proc/proc_sysctl.c
fs/ramfs/inode.c
fs/read_write.c
fs/reiserfs/inode.c
fs/reiserfs/ioctl.c
fs/reiserfs/namei.c
fs/reiserfs/xattr.c
fs/seq_file.c
fs/smbfs/dir.c
fs/smbfs/inode.c
fs/smbfs/proc.c
fs/super.c
fs/sysv/namei.c
fs/ubifs/dir.c
fs/udf/namei.c
fs/ufs/namei.c
fs/xfs/linux-2.6/xfs_aops.c
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_iops.c
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/xfs_inode.h
include/acpi/acpi_bus.h
include/acpi/acpi_drivers.h
include/acpi/acpiosxf.h
include/acpi/acpixf.h
include/acpi/actypes.h
include/acpi/platform/acenv.h
include/acpi/platform/acgcc.h
include/acpi/platform/aclinux.h
include/asm-generic/vmlinux.lds.h
include/drm/drmP.h
include/drm/drm_crtc.h
include/drm/drm_crtc_helper.h
include/drm/drm_dp_helper.h
include/drm/i915_drm.h
include/drm/intel-gtt.h [new file with mode: 0644]
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
include/drm/vmwgfx_drm.h
include/linux/acpi.h
include/linux/backing-dev.h
include/linux/buffer_head.h
include/linux/completion.h
include/linux/fs.h
include/linux/gfp.h
include/linux/highmem.h
include/linux/i2c/apds990x.h [new file with mode: 0644]
include/linux/i2c/bh1770glc.h [new file with mode: 0644]
include/linux/idr.h
include/linux/io-mapping.h
include/linux/kernel.h
include/linux/kfifo.h
include/linux/list.h
include/linux/math64.h
include/linux/memory_hotplug.h
include/linux/mlx4/cmd.h
include/linux/mlx4/device.h
include/linux/mlx4/driver.h
include/linux/mlx4/qp.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/mmzone.h
include/linux/moduleparam.h
include/linux/pageblock-flags.h
include/linux/pagemap.h
include/linux/ratelimit.h
include/linux/reiserfs_fs.h
include/linux/rmap.h
include/linux/sched.h
include/linux/sfi.h
include/linux/swap.h
include/linux/types.h
include/linux/vmalloc.h
include/linux/workqueue.h
include/linux/writeback.h
include/rdma/ib_addr.h
include/rdma/ib_pack.h
include/rdma/ib_user_verbs.h
include/rdma/ib_verbs.h
include/scsi/srp.h
include/trace/events/ext4.h
include/trace/events/vmscan.h
include/trace/events/writeback.h
include/xen/Kbuild
include/xen/interface/memory.h
include/xen/privcmd.h [new file with mode: 0644]
include/xen/xen-ops.h
init/do_mounts.c
init/do_mounts_md.c
init/do_mounts_rd.c
init/initramfs.c
init/noinitramfs.c
ipc/mqueue.c
kernel/cgroup.c
kernel/exit.c
kernel/fork.c
kernel/futex.c
kernel/kexec.c
kernel/power/snapshot.c
kernel/power/swap.c
kernel/printk.c
kernel/stop_machine.c
kernel/sysctl.c
kernel/user.c
kernel/wait.c
kernel/workqueue.c
lib/Kconfig.debug
lib/bitmap.c
lib/div64.c
lib/idr.c
lib/list_sort.c
lib/parser.c
lib/percpu_counter.c
lib/vsprintf.c
mm/backing-dev.c
mm/dmapool.c
mm/filemap.c
mm/highmem.c
mm/hugetlb.c
mm/internal.h
mm/memory-failure.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/migrate.c
mm/mremap.c
mm/nommu.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_alloc.c
mm/page_isolation.c
mm/rmap.c
mm/shmem.c
mm/slab.c
mm/swapfile.c
mm/vmalloc.c
mm/vmscan.c
mm/vmstat.c
net/socket.c
net/sunrpc/rpc_pipe.c
net/unix/af_unix.c
scripts/checkpatch.pl
scripts/get_maintainer.pl
security/apparmor/path.c
security/inode.c
security/selinux/selinuxfs.c
security/tomoyo/realpath.c

index feca0758391e145a8fb76e5a7059ce7f057e79ab..22edcbb9ddaf7602d8599c89e8d0a643edc4d439 100644 (file)
      <sect1><title>Delaying, scheduling, and timer routines</title>
 !Iinclude/linux/sched.h
 !Ekernel/sched.c
+!Iinclude/linux/completion.h
 !Ekernel/timer.c
+     </sect1>
+     <sect1><title>Wait queues and Wake events</title>
+!Iinclude/linux/wait.h
+!Ekernel/wait.c
      </sect1>
      <sect1><title>High-resolution timers</title>
 !Iinclude/linux/ktime.h
index 6b4e07f28b695893f2a48924cffbb06994b9750b..7160652a8736fc5f2978f720206ec3221570fd51 100644 (file)
@@ -93,6 +93,12 @@ X!Ilib/string.c
 !Elib/crc32.c
 !Elib/crc-ccitt.c
      </sect1>
+
+     <sect1 id="idr"><title>idr/ida Functions</title>
+!Pinclude/linux/idr.h idr sync
+!Plib/idr.c IDA description
+!Elib/idr.c
+     </sect1>
   </chapter>
 
   <chapter id="mm">
index 2db4283efa8dbf99e0e6576edc8d0a054d3b299d..8a817f656f0a808dffb344c6a46bab960cb8a0ae 100644 (file)
@@ -349,21 +349,36 @@ call this method upon the IO completion.
 
 --------------------------- block_device_operations -----------------------
 prototypes:
-       int (*open) (struct inode *, struct file *);
-       int (*release) (struct inode *, struct file *);
-       int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long);
+       int (*open) (struct block_device *, fmode_t);
+       int (*release) (struct gendisk *, fmode_t);
+       int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
+       int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
+       int (*direct_access) (struct block_device *, sector_t, void **, unsigned long *);
        int (*media_changed) (struct gendisk *);
+       void (*unlock_native_capacity) (struct gendisk *);
        int (*revalidate_disk) (struct gendisk *);
+       int (*getgeo)(struct block_device *, struct hd_geometry *);
+       void (*swap_slot_free_notify) (struct block_device *, unsigned long);
 
 locking rules:
-                       BKL     bd_sem
-open:                  yes     yes
-release:               yes     yes
-ioctl:                 yes     no
+                       BKL     bd_mutex
+open:                  no      yes
+release:               no      yes
+ioctl:                 no      no
+compat_ioctl:          no      no
+direct_access:         no      no
 media_changed:         no      no
+unlock_native_capacity:        no      no
 revalidate_disk:       no      no
+getgeo:                        no      no
+swap_slot_free_notify: no      no      (see below)
+
+media_changed, unlock_native_capacity and revalidate_disk are called only from
+check_disk_change().
+
+swap_slot_free_notify is called with swap_lock and sometimes the page lock
+held.
 
-The last two are called only from check_disk_change().
 
 --------------------------- file_operations -------------------------------
 prototypes:
index c3852041a21f5dc6fddee3024e53e2308ce45c06..b9b4192ea8b588ebd2d5be15767e69b93c8a7d0a 100644 (file)
@@ -6,7 +6,7 @@ Id mapper is used by NFS to translate user and group ids into names, and to
 translate user and group names into ids.  Part of this translation involves
 performing an upcall to userspace to request the information.  Id mapper will
 user request-key to perform this upcall and cache the result.  The program
-/usr/sbin/nfs.upcall should be called by request-key, and will perform the
+/usr/sbin/nfs.idmap should be called by request-key, and will perform the
 translation and initialize a key with the resulting information.
 
  NFS_USE_NEW_IDMAPPER must be selected when configuring the kernel to use this
@@ -20,12 +20,12 @@ direct the upcall.  The following line should be added:
 
 #OP    TYPE    DESCRIPTION     CALLOUT INFO    PROGRAM ARG1 ARG2 ARG3 ...
 #======        ======= =============== =============== ===============================
-create id_resolver     *       *               /usr/sbin/nfs.upcall %k %d 600
+create id_resolver     *       *               /usr/sbin/nfs.idmap %k %d 600
 
-This will direct all id_resolver requests to the program /usr/sbin/nfs.upcall.
+This will direct all id_resolver requests to the program /usr/sbin/nfs.idmap.
 The last parameter, 600, defines how many seconds into the future the key will
-expire.  This parameter is optional for /usr/sbin/nfs.upcall.  When the timeout
-is not specified, nfs.upcall will default to 600 seconds.
+expire.  This parameter is optional for /usr/sbin/nfs.idmap.  When the timeout
+is not specified, nfs.idmap will default to 600 seconds.
 
 id mapper uses for key descriptions:
          uid:  Find the UID for the given user
@@ -39,29 +39,29 @@ would edit your request-key.conf so it look similar to this:
 
 #OP    TYPE    DESCRIPTION     CALLOUT INFO    PROGRAM ARG1 ARG2 ARG3 ...
 #======        ======= =============== =============== ===============================
-create id_resolver     uid:*   *               /some/other/program  %k %d 600
-create id_resolver     *       *               /usr/sbin/nfs.upcall %k %d 600
+create id_resolver     uid:*   *               /some/other/program %k %d 600
+create id_resolver     *       *               /usr/sbin/nfs.idmap %k %d 600
 
 Notice that the new line was added above the line for the generic program.
 request-key will find the first matching line and corresponding program.  In
 this case, /some/other/program will handle all uid lookups and
-/usr/sbin/nfs.upcall will handle gid, user, and group lookups.
+/usr/sbin/nfs.idmap will handle gid, user, and group lookups.
 
 See <file:Documentation/keys-request-keys.txt> for more information about the
 request-key function.
 
 
-==========
-nfs.upcall
-==========
-nfs.upcall is designed to be called by request-key, and should not be run "by
+=========
+nfs.idmap
+=========
+nfs.idmap is designed to be called by request-key, and should not be run "by
 hand".  This program takes two arguments, a serialized key and a key
 description.  The serialized key is first converted into a key_serial_t, and
 then passed as an argument to keyctl_instantiate (both are part of keyutils.h).
 
-The actual lookups are performed by functions found in nfsidmap.h.  nfs.upcall
+The actual lookups are performed by functions found in nfsidmap.h.  nfs.idmap
 determines the correct function to call by looking at the first part of the
 description string.  For example, a uid lookup description will appear as
 "uid:user@domain".
 
-nfs.upcall will return 0 if the key was instantiated, and non-zero otherwise.
+nfs.idmap will return 0 if the key was instantiated, and non-zero otherwise.
index a6aca87408830b12aa0302dad05da5fa79244617..a563b74c7aef400ccfcd8e5e0a10cd03b94523f1 100644 (file)
@@ -374,13 +374,13 @@ Swap:                  0 kB
 KernelPageSize:        4 kB
 MMUPageSize:           4 kB
 
-The first  of these lines shows  the same information  as is displayed for the
-mapping in /proc/PID/maps.  The remaining lines show  the size of the mapping,
-the amount of the mapping that is currently resident in RAM, the "proportional
-set size” (divide each shared page by the number of processes sharing it), the
-number of clean and dirty shared pages in the mapping, and the number of clean
-and dirty private pages in the mapping.  The "Referenced" indicates the amount
-of memory currently marked as referenced or accessed.
+The first of these lines shows the same information as is displayed for the
+mapping in /proc/PID/maps.  The remaining lines show the size of the mapping
+(size), the amount of the mapping that is currently resident in RAM (RSS), the
+process' proportional share of this mapping (PSS), the number of clean and
+dirty shared pages in the mapping, and the number of clean and dirty private
+pages in the mapping.  The "Referenced" indicates the amount of memory
+currently marked as referenced or accessed.
 
 This file is only present if the CONFIG_MMU kernel configuration option is
 enabled.
index fc0e39af43c32a42343f97daed5a5d285f844863..4ede421c9687a71d33027b95118c90dc743da0fe 100644 (file)
@@ -62,10 +62,10 @@ replicas continue to be exactly same.
        # mount /dev/sd0  /tmp/a
 
        #ls /tmp/a
-       t1 t2 t2
+       t1 t2 t3
 
        #ls /mnt/a
-       t1 t2 t2
+       t1 t2 t3
 
        Note that the mount has propagated to the mount at /mnt as well.
 
diff --git a/Documentation/misc-devices/apds990x.txt b/Documentation/misc-devices/apds990x.txt
new file mode 100644 (file)
index 0000000..d5408ca
--- /dev/null
@@ -0,0 +1,111 @@
+Kernel driver apds990x
+======================
+
+Supported chips:
+Avago APDS990X
+
+Data sheet:
+Not freely available
+
+Author:
+Samu Onkalo <samu.p.onkalo@nokia.com>
+
+Description
+-----------
+
+APDS990x is a combined ambient light and proximity sensor. ALS and proximity
+functionality are highly connected. ALS measurement path must be running
+while the proximity functionality is enabled.
+
+ALS produces raw measurement values for two channels: Clear channel
+(infrared + visible light) and IR only. However, threshold comparisons happen
+using clear channel only. Lux value and the threshold level on the HW
+might vary quite much depending the spectrum of the light source.
+
+Driver makes necessary conversions to both directions so that user handles
+only lux values. Lux value is calculated using information from the both
+channels. HW threshold level is calculated from the given lux value to match
+with current type of the lightning. Sometimes inaccuracy of the estimations
+lead to false interrupt, but that doesn't harm.
+
+ALS contains 4 different gain steps. Driver automatically
+selects suitable gain step. After each measurement, reliability of the results
+is estimated and new measurement is trigged if necessary.
+
+Platform data can provide tuned values to the conversion formulas if
+values are known. Otherwise plain sensor default values are used.
+
+Proximity side is little bit simpler. There is no need for complex conversions.
+It produces directly usable values.
+
+Driver controls chip operational state using pm_runtime framework.
+Voltage regulators are controlled based on chip operational state.
+
+SYSFS
+-----
+
+
+chip_id
+       RO - shows detected chip type and version
+
+power_state
+       RW - enable / disable chip. Uses counting logic
+            1 enables the chip
+            0 disables the chip
+lux0_input
+       RO - measured lux value
+            sysfs_notify called when threshold interrupt occurs
+
+lux0_sensor_range
+       RO - lux0_input max value. Actually never reaches since sensor tends
+            to saturate much before that. Real max value varies depending
+            on the light spectrum etc.
+
+lux0_rate
+       RW - measurement rate in Hz
+
+lux0_rate_avail
+       RO - supported measurement rates
+
+lux0_calibscale
+       RW - calibration value. Set to neutral value by default.
+            Output results are multiplied with calibscale / calibscale_default
+            value.
+
+lux0_calibscale_default
+       RO - neutral calibration value
+
+lux0_thresh_above_value
+       RW - HI level threshold value. All results above the value
+            trigs an interrupt. 65535 (i.e. sensor_range) disables the above
+            interrupt.
+
+lux0_thresh_below_value
+       RW - LO level threshold value. All results below the value
+            trigs an interrupt. 0 disables the below interrupt.
+
+prox0_raw
+       RO - measured proximity value
+            sysfs_notify called when threshold interrupt occurs
+
+prox0_sensor_range
+       RO - prox0_raw max value (1023)
+
+prox0_raw_en
+       RW - enable / disable proximity - uses counting logic
+            1 enables the proximity
+            0 disables the proximity
+
+prox0_reporting_mode
+       RW - trigger / periodic. In "trigger" mode the driver tells two possible
+            values: 0 or prox0_sensor_range value. 0 means no proximity,
+            1023 means proximity. This causes minimal number of interrupts.
+            In "periodic" mode the driver reports all values above
+            prox0_thresh_above. This causes more interrupts, but it can give
+            _rough_ estimate about the distance.
+
+prox0_reporting_mode_avail
+       RO - accepted values to prox0_reporting_mode (trigger, periodic)
+
+prox0_thresh_above_value
+       RW - threshold level which trigs proximity events.
diff --git a/Documentation/misc-devices/bh1770glc.txt b/Documentation/misc-devices/bh1770glc.txt
new file mode 100644 (file)
index 0000000..7d64c01
--- /dev/null
@@ -0,0 +1,116 @@
+Kernel driver bh1770glc
+=======================
+
+Supported chips:
+ROHM BH1770GLC
+OSRAM SFH7770
+
+Data sheet:
+Not freely available
+
+Author:
+Samu Onkalo <samu.p.onkalo@nokia.com>
+
+Description
+-----------
+BH1770GLC and SFH7770 are combined ambient light and proximity sensors.
+ALS and proximity parts operates on their own, but they shares common I2C
+interface and interrupt logic. In principle they can run on their own,
+but ALS side results are used to estimate reliability of the proximity sensor.
+
+ALS produces 16 bit lux values. The chip contains interrupt logic to produce
+low and high threshold interrupts.
+
+Proximity part contains IR-led driver up to 3 IR leds. The chip measures
+amount of reflected IR light and produces proximity result. Resolution is
+8 bit. Driver supports only one channel. Driver uses ALS results to estimate
+reliability of the proximity results. Thus ALS is always running while
+proximity detection is needed.
+
+Driver uses threshold interrupts to avoid need for polling the values.
+Proximity low interrupt doesn't exists in the chip. This is simulated
+by using a delayed work. As long as there is proximity threshold above
+interrupts the delayed work is pushed forward. So, when proximity level goes
+below the threshold value, there is no interrupt and the delayed work will
+finally run. This is handled as no proximity indication.
+
+Chip state is controlled via runtime pm framework when enabled in config.
+
+Calibscale factor is used to hide differences between the chips. By default
+value set to neutral state meaning factor of 1.00. To get proper values,
+calibrated source of light is needed as a reference. Calibscale factor is set
+so that measurement produces about the expected lux value.
+
+SYSFS
+-----
+
+chip_id
+       RO - shows detected chip type and version
+
+power_state
+       RW - enable / disable chip. Uses counting logic
+            1 enables the chip
+            0 disables the chip
+
+lux0_input
+       RO - measured lux value
+            sysfs_notify called when threshold interrupt occurs
+
+lux0_sensor_range
+       RO - lux0_input max value
+
+lux0_rate
+       RW - measurement rate in Hz
+
+lux0_rate_avail
+       RO - supported measurement rates
+
+lux0_thresh_above_value
+       RW - HI level threshold value. All results above the value
+            trigs an interrupt. 65535 (i.e. sensor_range) disables the above
+            interrupt.
+
+lux0_thresh_below_value
+       RW - LO level threshold value. All results below the value
+            trigs an interrupt. 0 disables the below interrupt.
+
+lux0_calibscale
+       RW - calibration value. Set to neutral value by default.
+            Output results are multiplied with calibscale / calibscale_default
+            value.
+
+lux0_calibscale_default
+       RO - neutral calibration value
+
+prox0_raw
+       RO - measured proximity value
+            sysfs_notify called when threshold interrupt occurs
+
+prox0_sensor_range
+       RO - prox0_raw max value
+
+prox0_raw_en
+       RW - enable / disable proximity - uses counting logic
+            1 enables the proximity
+            0 disables the proximity
+
+prox0_thresh_above_count
+       RW - number of proximity interrupts needed before triggering the event
+
+prox0_rate_above
+       RW - Measurement rate (in Hz) when the level is above threshold
+            i.e. when proximity on has been reported.
+
+prox0_rate_below
+       RW - Measurement rate (in Hz) when the level is below threshold
+            i.e. when proximity off has been reported.
+
+prox0_rate_avail
+       RO - Supported proximity measurement rates in Hz
+
+prox0_thresh_above0_value
+       RW - threshold level which trigs proximity events.
+            Filtered by persistence filter (prox0_thresh_above_count)
+
+prox0_thresh_above1_value
+       RW - threshold level which trigs event immediately
index 5c17196c8fe9172f5c2ae7364c1b9d7966d6cb73..312e3754e8c5599b00c644d64be5b9907bb64ada 100644 (file)
@@ -75,7 +75,7 @@ On all -  write a character to /proc/sysrq-trigger.  e.g.:
 
 'f'    - Will call oom_kill to kill a memory hog process.
 
-'g'    - Used by kgdb on ppc and sh platforms.
+'g'    - Used by kgdb (kernel debugger)
 
 'h'     - Will display help (actually any other key than those listed
           here will display help. but 'h' is easy to remember :-)
@@ -110,12 +110,15 @@ On all -  write a character to /proc/sysrq-trigger.  e.g.:
 
 'u'     - Will attempt to remount all mounted filesystems read-only.
 
-'v'    - Dumps Voyager SMP processor info to your console.
+'v'    - Forcefully restores framebuffer console
+'v'    - Causes ETM buffer dump [ARM-specific]
 
 'w'    - Dumps tasks that are in uninterruptable (blocked) state.
 
 'x'    - Used by xmon interface on ppc/powerpc platforms.
 
+'y'    - Show global CPU Registers [SPARC-64 specific]
+
 'z'    - Dump the ftrace buffer
 
 '0'-'9' - Sets the console log level, controlling which kernel messages
index 4bfafb7bc4c5852b2796fa15572a753bcb717a6e..9a3e7012c1900be12f2967c3a71dffb9a71c772d 100644 (file)
@@ -97,6 +97,33 @@ hpet_open_close(int argc, const char **argv)
 void
 hpet_info(int argc, const char **argv)
 {
+       struct hpet_info        info;
+       int                     fd;
+
+       if (argc != 1) {
+               fprintf(stderr, "hpet_info: device-name\n");
+               return;
+       }
+
+       fd = open(argv[0], O_RDONLY);
+       if (fd < 0) {
+               fprintf(stderr, "hpet_info: open of %s failed\n", argv[0]);
+               return;
+       }
+
+       if (ioctl(fd, HPET_INFO, &info) < 0) {
+               fprintf(stderr, "hpet_info: failed to get info\n");
+               goto out;
+       }
+
+       fprintf(stderr, "hpet_info: hi_irqfreq 0x%lx hi_flags 0x%lx ",
+               info.hi_ireqfreq, info.hi_flags);
+       fprintf(stderr, "hi_hpet %d hi_timer %d\n",
+               info.hi_hpet, info.hi_timer);
+
+out:
+       close(fd);
+       return;
 }
 
 void
index 1b55146d1c8d9b5ef8eee34028c6af11ff41a24c..b3e73ddb1567902fdeb9d65e1a661db9bca7780d 100644 (file)
@@ -46,7 +46,7 @@ use constant HIGH_KSWAPD_LATENCY              => 20;
 use constant HIGH_KSWAPD_REWAKEUP              => 21;
 use constant HIGH_NR_SCANNED                   => 22;
 use constant HIGH_NR_TAKEN                     => 23;
-use constant HIGH_NR_RECLAIM                   => 24;
+use constant HIGH_NR_RECLAIMED                 => 24;
 use constant HIGH_NR_CONTIG_DIRTY              => 25;
 
 my %perprocesspid;
@@ -58,11 +58,13 @@ my $opt_read_procstat;
 my $total_wakeup_kswapd;
 my ($total_direct_reclaim, $total_direct_nr_scanned);
 my ($total_direct_latency, $total_kswapd_latency);
+my ($total_direct_nr_reclaimed);
 my ($total_direct_writepage_file_sync, $total_direct_writepage_file_async);
 my ($total_direct_writepage_anon_sync, $total_direct_writepage_anon_async);
 my ($total_kswapd_nr_scanned, $total_kswapd_wake);
 my ($total_kswapd_writepage_file_sync, $total_kswapd_writepage_file_async);
 my ($total_kswapd_writepage_anon_sync, $total_kswapd_writepage_anon_async);
+my ($total_kswapd_nr_reclaimed);
 
 # Catch sigint and exit on request
 my $sigint_report = 0;
@@ -104,7 +106,7 @@ my $regex_kswapd_wake_default = 'nid=([0-9]*) order=([0-9]*)';
 my $regex_kswapd_sleep_default = 'nid=([0-9]*)';
 my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*)';
 my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_taken=([0-9]*) contig_taken=([0-9]*) contig_dirty=([0-9]*) contig_failed=([0-9]*)';
-my $regex_lru_shrink_inactive_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) priority=([0-9]*)';
+my $regex_lru_shrink_inactive_default = 'nid=([0-9]*) zid=([0-9]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) priority=([0-9]*) flags=([A-Z_|]*)';
 my $regex_lru_shrink_active_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_rotated=([0-9]*) priority=([0-9]*)';
 my $regex_writepage_default = 'page=([0-9a-f]*) pfn=([0-9]*) flags=([A-Z_|]*)';
 
@@ -203,8 +205,8 @@ $regex_lru_shrink_inactive = generate_traceevent_regex(
                        "vmscan/mm_vmscan_lru_shrink_inactive",
                        $regex_lru_shrink_inactive_default,
                        "nid", "zid",
-                       "lru",
-                       "nr_scanned", "nr_reclaimed", "priority");
+                       "nr_scanned", "nr_reclaimed", "priority",
+                       "flags");
 $regex_lru_shrink_active = generate_traceevent_regex(
                        "vmscan/mm_vmscan_lru_shrink_active",
                        $regex_lru_shrink_active_default,
@@ -375,6 +377,16 @@ EVENT_PROCESS:
                        my $nr_contig_dirty = $7;
                        $perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned;
                        $perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty;
+               } elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") {
+                       $details = $5;
+                       if ($details !~ /$regex_lru_shrink_inactive/o) {
+                               print "WARNING: Failed to parse mm_vmscan_lru_shrink_inactive as expected\n";
+                               print "         $details\n";
+                               print "         $regex_lru_shrink_inactive/o\n";
+                               next;
+                       }
+                       my $nr_reclaimed = $4;
+                       $perprocesspid{$process_pid}->{HIGH_NR_RECLAIMED} += $nr_reclaimed;
                } elsif ($tracepoint eq "mm_vmscan_writepage") {
                        $details = $5;
                        if ($details !~ /$regex_writepage/o) {
@@ -464,8 +476,8 @@ sub dump_stats {
 
        # Print out process activity
        printf("\n");
-       printf("%-" . $max_strlen . "s %8s %10s   %8s   %8s %8s %8s %8s\n", "Process", "Direct",  "Wokeup", "Pages",   "Pages",   "Pages",     "Time");
-       printf("%-" . $max_strlen . "s %8s %10s   %8s   %8s %8s %8s %8s\n", "details", "Rclms",   "Kswapd", "Scanned", "Sync-IO", "ASync-IO",  "Stalled");
+       printf("%-" . $max_strlen . "s %8s %10s   %8s %8s  %8s %8s %8s %8s\n", "Process", "Direct",  "Wokeup", "Pages",   "Pages",   "Pages",   "Pages",     "Time");
+       printf("%-" . $max_strlen . "s %8s %10s   %8s %8s  %8s %8s %8s %8s\n", "details", "Rclms",   "Kswapd", "Scanned", "Rclmed",  "Sync-IO", "ASync-IO",  "Stalled");
        foreach $process_pid (keys %stats) {
 
                if (!$stats{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN}) {
@@ -475,6 +487,7 @@ sub dump_stats {
                $total_direct_reclaim += $stats{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN};
                $total_wakeup_kswapd += $stats{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD};
                $total_direct_nr_scanned += $stats{$process_pid}->{HIGH_NR_SCANNED};
+               $total_direct_nr_reclaimed += $stats{$process_pid}->{HIGH_NR_RECLAIMED};
                $total_direct_writepage_file_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC};
                $total_direct_writepage_anon_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC};
                $total_direct_writepage_file_async += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC};
@@ -489,11 +502,12 @@ sub dump_stats {
                        $index++;
                }
 
-               printf("%-" . $max_strlen . "s %8d %10d   %8u   %8u %8u %8.3f",
+               printf("%-" . $max_strlen . "s %8d %10d   %8u %8u  %8u %8u %8.3f",
                        $process_pid,
                        $stats{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN},
                        $stats{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD},
                        $stats{$process_pid}->{HIGH_NR_SCANNED},
+                       $stats{$process_pid}->{HIGH_NR_RECLAIMED},
                        $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC},
                        $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_ASYNC},
                        $this_reclaim_delay / 1000);
@@ -529,8 +543,8 @@ sub dump_stats {
 
        # Print out kswapd activity
        printf("\n");
-       printf("%-" . $max_strlen . "s %8s %10s   %8s   %8s %8s %8s\n", "Kswapd",   "Kswapd",  "Order",     "Pages",   "Pages",  "Pages");
-       printf("%-" . $max_strlen . "s %8s %10s   %8s   %8s %8s %8s\n", "Instance", "Wakeups", "Re-wakeup", "Scanned", "Sync-IO", "ASync-IO");
+       printf("%-" . $max_strlen . "s %8s %10s   %8s   %8s %8s %8s\n", "Kswapd",   "Kswapd",  "Order",     "Pages",   "Pages",   "Pages",  "Pages");
+       printf("%-" . $max_strlen . "s %8s %10s   %8s   %8s %8s %8s\n", "Instance", "Wakeups", "Re-wakeup", "Scanned", "Rclmed",  "Sync-IO", "ASync-IO");
        foreach $process_pid (keys %stats) {
 
                if (!$stats{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE}) {
@@ -539,16 +553,18 @@ sub dump_stats {
 
                $total_kswapd_wake += $stats{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE};
                $total_kswapd_nr_scanned += $stats{$process_pid}->{HIGH_NR_SCANNED};
+               $total_kswapd_nr_reclaimed += $stats{$process_pid}->{HIGH_NR_RECLAIMED};
                $total_kswapd_writepage_file_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC};
                $total_kswapd_writepage_anon_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC};
                $total_kswapd_writepage_file_async += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC};
                $total_kswapd_writepage_anon_async += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_ASYNC};
 
-               printf("%-" . $max_strlen . "s %8d %10d   %8u   %8i %8u",
+               printf("%-" . $max_strlen . "s %8d %10d   %8u %8u  %8i %8u",
                        $process_pid,
                        $stats{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE},
                        $stats{$process_pid}->{HIGH_KSWAPD_REWAKEUP},
                        $stats{$process_pid}->{HIGH_NR_SCANNED},
+                       $stats{$process_pid}->{HIGH_NR_RECLAIMED},
                        $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC},
                        $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_ASYNC});
 
@@ -579,6 +595,7 @@ sub dump_stats {
        print "\nSummary\n";
        print "Direct reclaims:                         $total_direct_reclaim\n";
        print "Direct reclaim pages scanned:            $total_direct_nr_scanned\n";
+       print "Direct reclaim pages reclaimed:          $total_direct_nr_reclaimed\n";
        print "Direct reclaim write file sync I/O:      $total_direct_writepage_file_sync\n";
        print "Direct reclaim write anon sync I/O:      $total_direct_writepage_anon_sync\n";
        print "Direct reclaim write file async I/O:     $total_direct_writepage_file_async\n";
@@ -588,6 +605,7 @@ sub dump_stats {
        print "\n";
        print "Kswapd wakeups:                          $total_kswapd_wake\n";
        print "Kswapd pages scanned:                    $total_kswapd_nr_scanned\n";
+       print "Kswapd pages reclaimed:                  $total_kswapd_nr_reclaimed\n";
        print "Kswapd reclaim write file sync I/O:      $total_kswapd_writepage_file_sync\n";
        print "Kswapd reclaim write anon sync I/O:      $total_kswapd_writepage_anon_sync\n";
        print "Kswapd reclaim write file async I/O:     $total_kswapd_writepage_file_async\n";
@@ -612,6 +630,7 @@ sub aggregate_perprocesspid() {
                $perprocess{$process}->{MM_VMSCAN_WAKEUP_KSWAPD} += $perprocesspid{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD};
                $perprocess{$process}->{HIGH_KSWAPD_REWAKEUP} += $perprocesspid{$process_pid}->{HIGH_KSWAPD_REWAKEUP};
                $perprocess{$process}->{HIGH_NR_SCANNED} += $perprocesspid{$process_pid}->{HIGH_NR_SCANNED};
+               $perprocess{$process}->{HIGH_NR_RECLAIMED} += $perprocesspid{$process_pid}->{HIGH_NR_RECLAIMED};
                $perprocess{$process}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC} += $perprocesspid{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC};
                $perprocess{$process}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC} += $perprocesspid{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC};
                $perprocess{$process}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC} += $perprocesspid{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC};
diff --git a/Documentation/vm/highmem.txt b/Documentation/vm/highmem.txt
new file mode 100644 (file)
index 0000000..4324d24
--- /dev/null
@@ -0,0 +1,162 @@
+
+                            ====================
+                            HIGH MEMORY HANDLING
+                            ====================
+
+By: Peter Zijlstra <a.p.zijlstra@chello.nl>
+
+Contents:
+
+ (*) What is high memory?
+
+ (*) Temporary virtual mappings.
+
+ (*) Using kmap_atomic.
+
+ (*) Cost of temporary mappings.
+
+ (*) i386 PAE.
+
+
+====================
+WHAT IS HIGH MEMORY?
+====================
+
+High memory (highmem) is used when the size of physical memory approaches or
+exceeds the maximum size of virtual memory.  At that point it becomes
+impossible for the kernel to keep all of the available physical memory mapped
+at all times.  This means the kernel needs to start using temporary mappings of
+the pieces of physical memory that it wants to access.
+
+The part of (physical) memory not covered by a permanent mapping is what we
+refer to as 'highmem'.  There are various architecture dependent constraints on
+where exactly that border lies.
+
+In the i386 arch, for example, we choose to map the kernel into every process's
+VM space so that we don't have to pay the full TLB invalidation costs for
+kernel entry/exit.  This means the available virtual memory space (4GiB on
+i386) has to be divided between user and kernel space.
+
+The traditional split for architectures using this approach is 3:1, 3GiB for
+userspace and the top 1GiB for kernel space:
+
+               +--------+ 0xffffffff
+               | Kernel |
+               +--------+ 0xc0000000
+               |        |
+               | User   |
+               |        |
+               +--------+ 0x00000000
+
+This means that the kernel can at most map 1GiB of physical memory at any one
+time, but because we need virtual address space for other things - including
+temporary maps to access the rest of the physical memory - the actual direct
+map will typically be less (usually around ~896MiB).
+
+Other architectures that have mm context tagged TLBs can have separate kernel
+and user maps.  Some hardware (like some ARMs), however, have limited virtual
+space when they use mm context tags.
+
+
+==========================
+TEMPORARY VIRTUAL MAPPINGS
+==========================
+
+The kernel contains several ways of creating temporary mappings:
+
+ (*) vmap().  This can be used to make a long duration mapping of multiple
+     physical pages into a contiguous virtual space.  It needs global
+     synchronization to unmap.
+
+ (*) kmap().  This permits a short duration mapping of a single page.  It needs
+     global synchronization, but is amortized somewhat.  It is also prone to
+     deadlocks when using in a nested fashion, and so it is not recommended for
+     new code.
+
+ (*) kmap_atomic().  This permits a very short duration mapping of a single
+     page.  Since the mapping is restricted to the CPU that issued it, it
+     performs well, but the issuing task is therefore required to stay on that
+     CPU until it has finished, lest some other task displace its mappings.
+
+     kmap_atomic() may also be used by interrupt contexts, since it is does not
+     sleep and the caller may not sleep until after kunmap_atomic() is called.
+
+     It may be assumed that k[un]map_atomic() won't fail.
+
+
+=================
+USING KMAP_ATOMIC
+=================
+
+When and where to use kmap_atomic() is straightforward.  It is used when code
+wants to access the contents of a page that might be allocated from high memory
+(see __GFP_HIGHMEM), for example a page in the pagecache.  The API has two
+functions, and they can be used in a manner similar to the following:
+
+       /* Find the page of interest. */
+       struct page *page = find_get_page(mapping, offset);
+
+       /* Gain access to the contents of that page. */
+       void *vaddr = kmap_atomic(page);
+
+       /* Do something to the contents of that page. */
+       memset(vaddr, 0, PAGE_SIZE);
+
+       /* Unmap that page. */
+       kunmap_atomic(vaddr);
+
+Note that the kunmap_atomic() call takes the result of the kmap_atomic() call
+not the argument.
+
+If you need to map two pages because you want to copy from one page to
+another you need to keep the kmap_atomic calls strictly nested, like:
+
+       vaddr1 = kmap_atomic(page1);
+       vaddr2 = kmap_atomic(page2);
+
+       memcpy(vaddr1, vaddr2, PAGE_SIZE);
+
+       kunmap_atomic(vaddr2);
+       kunmap_atomic(vaddr1);
+
+
+==========================
+COST OF TEMPORARY MAPPINGS
+==========================
+
+The cost of creating temporary mappings can be quite high.  The arch has to
+manipulate the kernel's page tables, the data TLB and/or the MMU's registers.
+
+If CONFIG_HIGHMEM is not set, then the kernel will try and create a mapping
+simply with a bit of arithmetic that will convert the page struct address into
+a pointer to the page contents rather than juggling mappings about.  In such a
+case, the unmap operation may be a null operation.
+
+If CONFIG_MMU is not set, then there can be no temporary mappings and no
+highmem.  In such a case, the arithmetic approach will also be used.
+
+
+========
+i386 PAE
+========
+
+The i386 arch, under some circumstances, will permit you to stick up to 64GiB
+of RAM into your 32-bit machine.  This has a number of consequences:
+
+ (*) Linux needs a page-frame structure for each page in the system and the
+     pageframes need to live in the permanent mapping, which means:
+
+ (*) you can have 896M/sizeof(struct page) page-frames at most; with struct
+     page being 32-bytes that would end up being something in the order of 112G
+     worth of pages; the kernel, however, needs to store more than just
+     page-frames in that memory...
+
+ (*) PAE makes your page tables larger - which slows the system down as more
+     data has to be accessed to traverse in TLB fills and the like.  One
+     advantage is that PAE has more PTE bits and can provide advanced features
+     like NX and PAT.
+
+The general recommendation is that you don't use more than 8GiB on a 32-bit
+machine - although more might work for you and your workload, you're pretty
+much on your own - don't expect kernel developers to really care much if things
+come apart.
index 146b8a068a4ea60813f2e6ce8fa1df0bb80bade0..debde0128cd0f661937d759923d2dd0790cc2fac 100644 (file)
@@ -243,21 +243,6 @@ F: drivers/pnp/pnpacpi/
 F:     include/linux/acpi.h
 F:     include/acpi/
 
-ACPI BATTERY DRIVERS
-M:     Alexey Starikovskiy <astarikovskiy@suse.de>
-L:     linux-acpi@vger.kernel.org
-W:     http://www.lesswatts.org/projects/acpi/
-S:     Supported
-F:     drivers/acpi/battery.c
-F:     drivers/acpi/*sbs*
-
-ACPI EC DRIVER
-M:     Alexey Starikovskiy <astarikovskiy@suse.de>
-L:     linux-acpi@vger.kernel.org
-W:     http://www.lesswatts.org/projects/acpi/
-S:     Supported
-F:     drivers/acpi/ec.c
-
 ACPI FAN DRIVER
 M:     Zhang Rui <rui.zhang@intel.com>
 L:     linux-acpi@vger.kernel.org
@@ -657,7 +642,7 @@ ARM/FARADAY FA526 PORT
 M:     Hans Ulli Kroll <ulli.kroll@googlemail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
-T:     git://git.berlios.de/gemini-board
+T:     git git://git.berlios.de/gemini-board
 F:     arch/arm/mm/*-fa*
 
 ARM/FOOTBRIDGE ARCHITECTURE
@@ -672,7 +657,7 @@ ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
 M:     Sascha Hauer <kernel@pengutronix.de>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
-T:     git://git.pengutronix.de/git/imx/linux-2.6.git
+T:     git git://git.pengutronix.de/git/imx/linux-2.6.git
 F:     arch/arm/mach-mx*/
 F:     arch/arm/plat-mxc/
 
@@ -710,8 +695,7 @@ ARM/INCOME PXA270 SUPPORT
 M:     Marek Vasut <marek.vasut@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
-F:     arch/arm/mach-pxa/income.c
-F:     arch/arm/mach-pxa/include/mach-pxa/income.h
+F:     arch/arm/mach-pxa/colibri-pxa270-income.c
 
 ARM/INTEL IOP32X ARM ARCHITECTURE
 M:     Lennert Buytenhek <kernel@wantstofly.org>
@@ -758,13 +742,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-ixp4xx/
 
-ARM/INTEL RESEARCH IMOTE 2 MACHINE SUPPORT
-M:     Jonathan Cameron <jic23@cam.ac.uk>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:     Maintained
-F:     arch/arm/mach-pxa/imote2.c
-
-ARM/INTEL RESEARCH STARGATE 2 MACHINE SUPPORT
+ARM/INTEL RESEARCH IMOTE/STARGATE 2 MACHINE SUPPORT
 M:     Jonathan Cameron <jic23@cam.ac.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
@@ -929,40 +907,20 @@ W:        http://www.fluff.org/ben/linux/
 S:     Maintained
 F:     arch/arm/mach-s3c2410/
 
-ARM/S3C2440 ARM ARCHITECTURE
+ARM/S3C244x ARM ARCHITECTURE
 M:     Ben Dooks <ben-linux@fluff.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.fluff.org/ben/linux/
 S:     Maintained
 F:     arch/arm/mach-s3c2440/
-
-ARM/S3C2442 ARM ARCHITECTURE
-M:     Ben Dooks <ben-linux@fluff.org>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.fluff.org/ben/linux/
-S:     Maintained
-F:     arch/arm/mach-s3c2442/
-
-ARM/S3C2443 ARM ARCHITECTURE
-M:     Ben Dooks <ben-linux@fluff.org>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.fluff.org/ben/linux/
-S:     Maintained
 F:     arch/arm/mach-s3c2443/
 
-ARM/S3C6400 ARM ARCHITECTURE
+ARM/S3C64xx ARM ARCHITECTURE
 M:     Ben Dooks <ben-linux@fluff.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.fluff.org/ben/linux/
 S:     Maintained
-F:     arch/arm/mach-s3c6400/
-
-ARM/S3C6410 ARM ARCHITECTURE
-M:     Ben Dooks <ben-linux@fluff.org>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.fluff.org/ben/linux/
-S:     Maintained
-F:     arch/arm/mach-s3c6410/
+F:     arch/arm/mach-s3c64xx/
 
 ARM/S5P ARM ARCHITECTURES
 M:     Kukjin Kim <kgene.kim@samsung.com>
@@ -2102,6 +2060,15 @@ S:       Maintained
 F:     drivers/gpu/drm/
 F:     include/drm/
 
+INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
+M:     Chris Wilson <chris@chris-wilson.co.uk>
+L:     intel-gfx@lists.freedesktop.org
+L:     dri-devel@lists.freedesktop.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel.git
+S:     Supported
+F:     drivers/gpu/drm/i915
+F:     include/drm/i915*
+
 DSCC4 DRIVER
 M:     Francois Romieu <romieu@fr.zoreil.com>
 L:     netdev@vger.kernel.org
@@ -3015,7 +2982,7 @@ M:        Roland Dreier <rolandd@cisco.com>
 M:     Sean Hefty <sean.hefty@intel.com>
 M:     Hal Rosenstock <hal.rosenstock@gmail.com>
 L:     linux-rdma@vger.kernel.org
-W:     http://www.openib.org/
+W:     http://www.openfabrics.org/
 Q:     http://patchwork.kernel.org/project/linux-rdma/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git
 S:     Supported
@@ -3867,7 +3834,7 @@ F:        drivers/net/wireless/mwl8k.c
 MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER
 M:     Nicolas Pitre <nico@fluxnic.net>
 S:     Odd Fixes
-F: drivers/mmc/host/mvsdio.*
+F:     drivers/mmc/host/mvsdio.*
 
 MARVELL YUKON / SYSKONNECT DRIVER
 M:     Mirko Lindner <mlindner@syskonnect.de>
@@ -4958,7 +4925,7 @@ RCUTORTURE MODULE
 M:     Josh Triplett <josh@freedesktop.org>
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 S:     Supported
-T:     git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
 F:     Documentation/RCU/torture.txt
 F:     kernel/rcutorture.c
 
@@ -4983,7 +4950,7 @@ M:        Dipankar Sarma <dipankar@in.ibm.com>
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 W:     http://www.rdrop.com/users/paulmck/rclock/
 S:     Supported
-T:     git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
 F:     Documentation/RCU/
 F:     include/linux/rcu*
 F:     include/linux/srcu*
@@ -5173,6 +5140,16 @@ W:       http://www.kernel.dk
 S:     Maintained
 F:     drivers/scsi/sr*
 
+SCSI RDMA PROTOCOL (SRP) INITIATOR
+M:     David Dillow <dillowda@ornl.gov>
+L:     linux-rdma@vger.kernel.org
+S:     Supported
+W:     http://www.openfabrics.org
+Q:     http://patchwork.kernel.org/project/linux-rdma/list/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dad/srp-initiator.git
+F:     drivers/infiniband/ulp/srp/
+F:     include/scsi/srp.h
+
 SCSI SG DRIVER
 M:     Doug Gilbert <dgilbert@interlog.com>
 L:     linux-scsi@vger.kernel.org
@@ -6141,13 +6118,6 @@ L:       linux-usb@vger.kernel.org
 S:     Maintained
 F:     drivers/usb/serial/option.c
 
-USB OV511 DRIVER
-M:     Mark McClelland <mmcclell@bigfoot.com>
-L:     linux-usb@vger.kernel.org
-W:     http://alpha.dyndns.org/ov511/
-S:     Maintained
-F:     drivers/media/video/ov511.*
-
 USB PEGASUS DRIVER
 M:     Petko Manolov <petkan@users.sourceforge.net>
 L:     linux-usb@vger.kernel.org
@@ -6308,16 +6278,6 @@ S:       Supported
 F:     drivers/usb/host/xhci*
 F:     drivers/usb/host/pci-quirks*
 
-USB ZC0301 DRIVER
-M:     Luca Risolia <luca.risolia@studio.unibo.it>
-L:     linux-usb@vger.kernel.org
-L:     linux-media@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
-W:     http://www.linux-projects.org
-S:     Maintained
-F:     Documentation/video4linux/zc0301.txt
-F:     drivers/media/video/zc0301/
-
 USB ZD1201 DRIVER
 L:     linux-wireless@vger.kernel.org
 W:     http://linux-lc100020.sourceforge.net
index d04ccd73af45f6487f442a0dba26921db884e218..28f93a6c0fde9169b451abc1837d7661bd95152b 100644 (file)
@@ -55,6 +55,9 @@ config ZONE_DMA
        bool
        default y
 
+config ARCH_DMA_ADDR_T_64BIT
+       def_bool y
+
 config NEED_DMA_MAP_STATE
        def_bool y
 
index 21ac53383b37eca3cea4e4ff3ac9b0c6d7989d67..9f67a056b46181e36753575698ea40d6944e6fd2 100644 (file)
@@ -247,7 +247,7 @@ struct el_MCPCIA_uncorrected_frame_mcheck {
 #define vip    volatile int __force *
 #define vuip   volatile unsigned int __force *
 
-#ifdef MCPCIA_ONE_HAE_WINDOW
+#ifndef MCPCIA_ONE_HAE_WINDOW
 #define MCPCIA_FROB_MMIO                                               \
        if (__mcpcia_is_mmio(hose)) {                                   \
                set_hae(hose & 0xffffffff);                             \
index 471c07292e0b88f881393036b46ea3594f106b50..91b46801b290420b067966382d1cd0be19d1d11b 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef __ALPHA_T2__H__
 #define __ALPHA_T2__H__
 
+/* Fit everything into one 128MB HAE window. */
+#define T2_ONE_HAE_WINDOW 1
+
 #include <linux/types.h>
 #include <linux/spinlock.h>
 #include <asm/compiler.h>
@@ -19,7 +22,7 @@
  *
  */
 
-#define T2_MEM_R1_MASK 0x07ffffff  /* Mem sparse region 1 mask is 26 bits */
+#define T2_MEM_R1_MASK 0x07ffffff  /* Mem sparse region 1 mask is 27 bits */
 
 /* GAMMA-SABLE is a SABLE with EV5-based CPUs */
 /* All LYNX machines, EV4 or EV5, use the GAMMA bias also */
@@ -85,7 +88,9 @@
 #define T2_DIR                 (IDENT_ADDR + GAMMA_BIAS + 0x38e0004a0UL)
 #define T2_ICE                 (IDENT_ADDR + GAMMA_BIAS + 0x38e0004c0UL)
 
+#ifndef T2_ONE_HAE_WINDOW
 #define T2_HAE_ADDRESS         T2_HAE_1
+#endif
 
 /*  T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to
  3.8fff.ffff
@@ -429,13 +434,15 @@ extern inline void t2_outl(u32 b, unsigned long addr)
  *
  */
 
+#ifdef T2_ONE_HAE_WINDOW
+#define t2_set_hae
+#else
 #define t2_set_hae { \
-       msb = addr  >> 27; \
+       unsigned long msb = addr >> 27; \
        addr &= T2_MEM_R1_MASK; \
        set_hae(msb); \
 }
-
-extern raw_spinlock_t t2_hae_lock;
+#endif
 
 /*
  * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since
@@ -446,28 +453,22 @@ extern raw_spinlock_t t2_hae_lock;
 __EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
-       unsigned long result, msb;
-       unsigned long flags;
-       raw_spin_lock_irqsave(&t2_hae_lock, flags);
+       unsigned long result;
 
        t2_set_hae;
 
        result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00);
-       raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
        return __kernel_extbl(result, addr & 3);
 }
 
 __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
-       unsigned long result, msb;
-       unsigned long flags;
-       raw_spin_lock_irqsave(&t2_hae_lock, flags);
+       unsigned long result;
 
        t2_set_hae;
 
        result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08);
-       raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
        return __kernel_extwl(result, addr & 3);
 }
 
@@ -478,59 +479,47 @@ __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr)
 __EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
-       unsigned long result, msb;
-       unsigned long flags;
-       raw_spin_lock_irqsave(&t2_hae_lock, flags);
+       unsigned long result;
 
        t2_set_hae;
 
        result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
-       raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
        return result & 0xffffffffUL;
 }
 
 __EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
-       unsigned long r0, r1, work, msb;
-       unsigned long flags;
-       raw_spin_lock_irqsave(&t2_hae_lock, flags);
+       unsigned long r0, r1, work;
 
        t2_set_hae;
 
        work = (addr << 5) + T2_SPARSE_MEM + 0x18;
        r0 = *(vuip)(work);
        r1 = *(vuip)(work + (4 << 5));
-       raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
        return r1 << 32 | r0;
 }
 
 __EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
-       unsigned long msb, w;
-       unsigned long flags;
-       raw_spin_lock_irqsave(&t2_hae_lock, flags);
+       unsigned long w;
 
        t2_set_hae;
 
        w = __kernel_insbl(b, addr & 3);
        *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w;
-       raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
 }
 
 __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
-       unsigned long msb, w;
-       unsigned long flags;
-       raw_spin_lock_irqsave(&t2_hae_lock, flags);
+       unsigned long w;
 
        t2_set_hae;
 
        w = __kernel_inswl(b, addr & 3);
        *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w;
-       raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
 }
 
 /*
@@ -540,29 +529,22 @@ __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
 __EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
-       unsigned long msb;
-       unsigned long flags;
-       raw_spin_lock_irqsave(&t2_hae_lock, flags);
 
        t2_set_hae;
 
        *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
-       raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
 }
 
 __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
-       unsigned long msb, work;
-       unsigned long flags;
-       raw_spin_lock_irqsave(&t2_hae_lock, flags);
+       unsigned long work;
 
        t2_set_hae;
 
        work = (addr << 5) + T2_SPARSE_MEM + 0x18;
        *(vuip)work = b;
        *(vuip)(work + (4 << 5)) = b >> 32;
-       raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
 }
 
 __EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr)
index 71a243294142a41f6a80c0a4dc1e7149aa338dcf..de98a732683d868b29a3175a5cccd6e22c23f1c8 100644 (file)
@@ -318,9 +318,7 @@ extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
 }
 
 #define pte_offset_map(dir,addr)       pte_offset_kernel((dir),(addr))
-#define pte_offset_map_nested(dir,addr)        pte_offset_kernel((dir),(addr))
 #define pte_unmap(pte)                 do { } while (0)
-#define pte_unmap_nested(pte)          do { } while (0)
 
 extern pgd_t swapper_pg_dir[1024];
 
index e6d90568b65d62a378525dfaf0af8fe3366d160f..2f770e99428961f6233c8d7742f705180455bdb2 100644 (file)
@@ -74,8 +74,6 @@
 # define DBG(args)
 #endif
 
-DEFINE_RAW_SPINLOCK(t2_hae_lock);
-
 static volatile unsigned int t2_mcheck_any_expected;
 static volatile unsigned int t2_mcheck_last_taken;
 
@@ -406,6 +404,7 @@ void __init
 t2_init_arch(void)
 {
        struct pci_controller *hose;
+       struct resource *hae_mem;
        unsigned long temp;
        unsigned int i;
 
@@ -433,7 +432,13 @@ t2_init_arch(void)
         */
        pci_isa_hose = hose = alloc_pci_controller();
        hose->io_space = &ioport_resource;
-       hose->mem_space = &iomem_resource;
+       hae_mem = alloc_resource();
+       hae_mem->start = 0;
+       hae_mem->end = T2_MEM_R1_MASK;
+       hae_mem->name = pci_hae0_name;
+       if (request_resource(&iomem_resource, hae_mem) < 0)
+               printk(KERN_ERR "Failed to request HAE_MEM\n");
+       hose->mem_space = hae_mem;
        hose->index = 0;
 
        hose->sparse_mem_base = T2_SPARSE_MEM - IDENT_ADDR;
index 512685f78097a0ed5406e1e2226641cc007545c8..7fa62488bd16791f77b0218451629d749c2e5351 100644 (file)
@@ -25,6 +25,9 @@
 #ifdef MCPCIA_ONE_HAE_WINDOW
 #define MCPCIA_HAE_ADDRESS     (&alpha_mv.hae_cache)
 #endif
+#ifdef T2_ONE_HAE_WINDOW
+#define T2_HAE_ADDRESS         (&alpha_mv.hae_cache)
+#endif
 
 /* Only a few systems don't define IACK_SC, handling all interrupts through
    the SRM console.  But splitting out that one case from IO() below
index 5aff581266024831e359ce30658dd191403714cd..1fc684e70ab6a6c9915ad5fbd302066ccb050a29 100644 (file)
@@ -35,9 +35,9 @@ extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
 #ifdef CONFIG_HIGHMEM
 extern void *kmap(struct page *page);
 extern void kunmap(struct page *page);
-extern void *kmap_atomic(struct page *page, enum km_type type);
-extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
-extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
+extern void *__kmap_atomic(struct page *page);
+extern void __kunmap_atomic(void *kvaddr);
+extern void *kmap_atomic_pfn(unsigned long pfn);
 extern struct page *kmap_atomic_to_page(const void *ptr);
 #endif
 
index a9672e8406a3cdb5efd45f69b0cbe10e47196ec2..b155414192daea110acf0f890a6f3d139ceef182 100644 (file)
@@ -263,17 +263,15 @@ extern struct page *empty_zero_page;
 #define pte_page(pte)          (pfn_to_page(pte_pfn(pte)))
 #define pte_offset_kernel(dir,addr)    (pmd_page_vaddr(*(dir)) + __pte_index(addr))
 
-#define pte_offset_map(dir,addr)       (__pte_map(dir, KM_PTE0) + __pte_index(addr))
-#define pte_offset_map_nested(dir,addr)        (__pte_map(dir, KM_PTE1) + __pte_index(addr))
-#define pte_unmap(pte)                 __pte_unmap(pte, KM_PTE0)
-#define pte_unmap_nested(pte)          __pte_unmap(pte, KM_PTE1)
+#define pte_offset_map(dir,addr)       (__pte_map(dir) + __pte_index(addr))
+#define pte_unmap(pte)                 __pte_unmap(pte)
 
 #ifndef CONFIG_HIGHPTE
-#define __pte_map(dir,km)      pmd_page_vaddr(*(dir))
-#define __pte_unmap(pte,km)    do { } while (0)
+#define __pte_map(dir)         pmd_page_vaddr(*(dir))
+#define __pte_unmap(pte)       do { } while (0)
 #else
-#define __pte_map(dir,km)      ((pte_t *)kmap_atomic(pmd_page(*(dir)), km) + PTRS_PER_PTE)
-#define __pte_unmap(pte,km)    kunmap_atomic((pte - PTRS_PER_PTE), km)
+#define __pte_map(dir)         ((pte_t *)kmap_atomic(pmd_page(*(dir))) + PTRS_PER_PTE)
+#define __pte_unmap(pte)       kunmap_atomic((pte - PTRS_PER_PTE))
 #endif
 
 #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
index 4566bd1c8660b3fe7ac0cff28473746fd3d34c82..ef06c66a6f1630cbab1ec79487382dc8704f4049 100644 (file)
@@ -358,8 +358,7 @@ static int calc_clk_div(struct clk *clk, unsigned long rate,
        int i, found = 0, __div = 0, __pdiv = 0;
 
        /* Don't exceed the maximum rate */
-       max_rate = max(max(clk_pll1.rate / 4, clk_pll2.rate / 4),
-                      clk_xtali.rate / 4);
+       max_rate = max3(clk_pll1.rate / 4, clk_pll2.rate / 4, clk_xtali.rate / 4);
        rate = min(rate, max_rate);
 
        /*
index 8440d952ba6dd1266628678f016bdf5f41a3002a..c493d7244d3d99bee236469940fbed42c10ce476 100644 (file)
@@ -89,13 +89,13 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
         * open-code the spin-locking.
         */
        ptl = pte_lockptr(vma->vm_mm, pmd);
-       pte = pte_offset_map_nested(pmd, address);
+       pte = pte_offset_map(pmd, address);
        spin_lock(ptl);
 
        ret = do_adjust_pte(vma, address, pfn, pte);
 
        spin_unlock(ptl);
-       pte_unmap_nested(pte);
+       pte_unmap(pte);
 
        return ret;
 }
index 1fbdb55bfd1bd34a480ea0e45b7f3a39a2934c6a..c00f119babbfe59a5d51a87fd5ba13f1e4c0f190 100644 (file)
@@ -36,18 +36,17 @@ void kunmap(struct page *page)
 }
 EXPORT_SYMBOL(kunmap);
 
-void *kmap_atomic(struct page *page, enum km_type type)
+void *__kmap_atomic(struct page *page)
 {
        unsigned int idx;
        unsigned long vaddr;
        void *kmap;
+       int type;
 
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
 
-       debug_kmap_atomic(type);
-
 #ifdef CONFIG_DEBUG_HIGHMEM
        /*
         * There is no cache coherency issue when non VIVT, so force the
@@ -61,6 +60,8 @@ void *kmap_atomic(struct page *page, enum km_type type)
        if (kmap)
                return kmap;
 
+       type = kmap_atomic_idx_push();
+
        idx = type + KM_TYPE_NR * smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
@@ -80,14 +81,17 @@ void *kmap_atomic(struct page *page, enum km_type type)
 
        return (void *)vaddr;
 }
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(__kmap_atomic);
 
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
 {
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-       unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
+       int idx, type;
 
        if (kvaddr >= (void *)FIXADDR_START) {
+               type = kmap_atomic_idx_pop();
+               idx = type + KM_TYPE_NR * smp_processor_id();
+
                if (cache_is_vivt())
                        __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
 #ifdef CONFIG_DEBUG_HIGHMEM
@@ -103,15 +107,16 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
        }
        pagefault_enable();
 }
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
 
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+void *kmap_atomic_pfn(unsigned long pfn)
 {
-       unsigned int idx;
        unsigned long vaddr;
+       int idx, type;
 
        pagefault_disable();
 
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR * smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
index be5f58e153bf180d4df1da5037f26652cc7ca2cb..69bbfc6645a673853ed6f8171e08e3a4f9f22585 100644 (file)
@@ -57,9 +57,9 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
                        goto no_pte;
 
                init_pmd = pmd_offset(init_pgd, 0);
-               init_pte = pte_offset_map_nested(init_pmd, 0);
+               init_pte = pte_offset_map(init_pmd, 0);
                set_pte_ext(new_pte, *init_pte, 0);
-               pte_unmap_nested(init_pte);
+               pte_unmap(init_pte);
                pte_unmap(new_pte);
        }
 
index a9ae30c41e746d53e25281d3c8132c5aeba9f09d..6fbfea61f7bb7315d7c19f15db0df6ae8f8bd697 100644 (file)
@@ -319,9 +319,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 #define pte_offset_kernel(dir, address)                                        \
        ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
 #define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
-#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
 #define pte_unmap(pte)         do { } while (0)
-#define pte_unmap_nested(pte)  do { } while (0)
 
 struct vm_area_struct;
 extern void update_mmu_cache(struct vm_area_struct * vma,
index a6886f6e48194027f529a0380feb618eb1f2abcd..4104d5783e2c1a4ad80b633f123e7c1dc219c459 100644 (file)
 #define        LFLUSH_I_AND_D  0x00000808
 #define        LSIGTRAP        5
 
-/* process bits for task_struct.flags */
-#define        PF_TRACESYS_OFF 3
-#define        PF_TRACESYS_BIT 5
-#define        PF_PTRACED_OFF  3
-#define        PF_PTRACED_BIT  4
-#define        PF_DTRACE_OFF   1
-#define        PF_DTRACE_BIT   5
-
 /*
  * NOTE!  The single-stepping code assumes that all interrupt handlers
  * start by saving SYSCFG on the stack with their first instruction.
index f63d6fccbc6caf18906fa06c99d72e44e3b17792..9eaae217b21b755298809150cca419904bd5c944 100644 (file)
@@ -248,10 +248,8 @@ static inline pgd_t * pgd_offset(const struct mm_struct *mm, unsigned long addre
        ((pte_t *) pmd_page_vaddr(*(dir)) +  __pte_offset(address))
 #define pte_offset_map(dir, address) \
        ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
 
 #define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
 #define pte_pfn(x)             ((unsigned long)(__va((x).pte)) >> PAGE_SHIFT)
 #define pfn_pte(pfn, prot)     __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 
index cb4c317eaecc8db55074398fa8d593a2f37a9da3..a8d6565d415d611a030d658a8ce2270be0f8eff6 100644 (file)
@@ -112,12 +112,11 @@ extern struct page *kmap_atomic_to_page(void *ptr);
        (void *) damlr;                                                                           \
 })
 
-static inline void *kmap_atomic(struct page *page, enum km_type type)
+static inline void *kmap_atomic_primary(struct page *page, enum km_type type)
 {
        unsigned long paddr;
 
        pagefault_disable();
-       debug_kmap_atomic(type);
        paddr = page_to_phys(page);
 
        switch (type) {
@@ -125,14 +124,6 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
         case 1:                return __kmap_atomic_primary(1, paddr, 3);
         case 2:                return __kmap_atomic_primary(2, paddr, 4);
         case 3:                return __kmap_atomic_primary(3, paddr, 5);
-        case 4:                return __kmap_atomic_primary(4, paddr, 6);
-        case 5:                return __kmap_atomic_primary(5, paddr, 7);
-        case 6:                return __kmap_atomic_primary(6, paddr, 8);
-        case 7:                return __kmap_atomic_primary(7, paddr, 9);
-        case 8:                return __kmap_atomic_primary(8, paddr, 10);
-
-       case 9 ... 9 + NR_TLB_LINES - 1:
-               return __kmap_atomic_secondary(type - 9, paddr);
 
        default:
                BUG();
@@ -152,22 +143,13 @@ do {                                                                      \
        asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory");   \
 } while(0)
 
-static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type)
 {
        switch (type) {
         case 0:                __kunmap_atomic_primary(0, 2);  break;
         case 1:                __kunmap_atomic_primary(1, 3);  break;
         case 2:                __kunmap_atomic_primary(2, 4);  break;
         case 3:                __kunmap_atomic_primary(3, 5);  break;
-        case 4:                __kunmap_atomic_primary(4, 6);  break;
-        case 5:                __kunmap_atomic_primary(5, 7);  break;
-        case 6:                __kunmap_atomic_primary(6, 8);  break;
-        case 7:                __kunmap_atomic_primary(7, 9);  break;
-        case 8:                __kunmap_atomic_primary(8, 10); break;
-
-       case 9 ... 9 + NR_TLB_LINES - 1:
-               __kunmap_atomic_secondary(type - 9, kvaddr);
-               break;
 
        default:
                BUG();
@@ -175,6 +157,9 @@ static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
        pagefault_enable();
 }
 
+void *__kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
index c18b0d32e63655ed7dc7d26b5ce0c44c5f990c36..6bc241e4b4f8212b386758586693d58e32e3a426 100644 (file)
@@ -451,17 +451,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 
 #if defined(CONFIG_HIGHPTE)
 #define pte_offset_map(dir, address) \
-       ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
-#define pte_offset_map_nested(dir, address) \
-       ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
-#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
-#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
+       ((pte_t *)kmap_atomic(pmd_page(*(dir))) + pte_index(address))
+#define pte_unmap(pte) kunmap_atomic(pte)
 #else
 #define pte_offset_map(dir, address) \
        ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
 #define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
 #endif
 
 /*
index 85d110b71cf735166a31d3d2872e46fc597bbc6d..41098a3803a22db4fa787d4ab5129dfe1da8456d 100644 (file)
@@ -61,14 +61,14 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
        dampr2 = __get_DAMPR(2);
 
        for (i = 0; i < nents; i++) {
-               vaddr = kmap_atomic(sg_page(&sg[i]), __KM_CACHE);
+               vaddr = kmap_atomic_primary(sg_page(&sg[i]), __KM_CACHE);
 
                frv_dcache_writeback((unsigned long) vaddr,
                                     (unsigned long) vaddr + PAGE_SIZE);
 
        }
 
-       kunmap_atomic(vaddr, __KM_CACHE);
+       kunmap_atomic_primary(vaddr, __KM_CACHE);
        if (dampr2) {
                __set_DAMPR(2, dampr2);
                __set_IAMPR(2, dampr2);
index 0261cbe153b5ecf4ff427a6a4d806b280a6a3930..b24ade27a0f09210002916bf0c164aed73777abc 100644 (file)
@@ -26,11 +26,11 @@ void flush_dcache_page(struct page *page)
 
        dampr2 = __get_DAMPR(2);
 
-       vaddr = kmap_atomic(page, __KM_CACHE);
+       vaddr = kmap_atomic_primary(page, __KM_CACHE);
 
        frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE);
 
-       kunmap_atomic(vaddr, __KM_CACHE);
+       kunmap_atomic_primary(vaddr, __KM_CACHE);
 
        if (dampr2) {
                __set_DAMPR(2, dampr2);
@@ -54,12 +54,12 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
 
        dampr2 = __get_DAMPR(2);
 
-       vaddr = kmap_atomic(page, __KM_CACHE);
+       vaddr = kmap_atomic_primary(page, __KM_CACHE);
 
        start = (start & ~PAGE_MASK) | (unsigned long) vaddr;
        frv_cache_wback_inv(start, start + len);
 
-       kunmap_atomic(vaddr, __KM_CACHE);
+       kunmap_atomic_primary(vaddr, __KM_CACHE);
 
        if (dampr2) {
                __set_DAMPR(2, dampr2);
index eadd07658075c3bc942ead2ee3d4d77f0b0803ec..61088dcc159432ad6063d60aee452d7866a2ef4f 100644 (file)
@@ -36,3 +36,53 @@ struct page *kmap_atomic_to_page(void *ptr)
 {
        return virt_to_page(ptr);
 }
+
+void *__kmap_atomic(struct page *page)
+{
+       unsigned long paddr;
+       int type;
+
+       pagefault_disable();
+       type = kmap_atomic_idx_push();
+       paddr = page_to_phys(page);
+
+       switch (type) {
+       /*
+        * The first 4 primary maps are reserved for architecture code
+        */
+       case 0:         return __kmap_atomic_primary(4, paddr, 6);
+       case 1:         return __kmap_atomic_primary(5, paddr, 7);
+       case 2:         return __kmap_atomic_primary(6, paddr, 8);
+       case 3:         return __kmap_atomic_primary(7, paddr, 9);
+       case 4:         return __kmap_atomic_primary(8, paddr, 10);
+
+       case 5 ... 5 + NR_TLB_LINES - 1:
+               return __kmap_atomic_secondary(type - 5, paddr);
+
+       default:
+               BUG();
+               return NULL;
+       }
+}
+EXPORT_SYMBOL(__kmap_atomic);
+
+void __kunmap_atomic(void *kvaddr)
+{
+       int type = kmap_atomic_idx_pop();
+       switch (type) {
+       case 0:         __kunmap_atomic_primary(4, 6);  break;
+       case 1:         __kunmap_atomic_primary(5, 7);  break;
+       case 2:         __kunmap_atomic_primary(6, 8);  break;
+       case 3:         __kunmap_atomic_primary(7, 9);  break;
+       case 4:         __kunmap_atomic_primary(8, 10); break;
+
+       case 5 ... 5 + NR_TLB_LINES - 1:
+               __kunmap_atomic_secondary(type - 5, kvaddr);
+               break;
+
+       default:
+               BUG();
+       }
+       pagefault_enable();
+}
+EXPORT_SYMBOL(__kunmap_atomic);
index c3286f42e501477722bdec7373112e92efc8a40d..1a97af31ef176e71d8138250c03c52b7ab0adf3f 100644 (file)
@@ -406,9 +406,7 @@ pgd_offset (const struct mm_struct *mm, unsigned long address)
 #define pte_index(addr)                (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 #define pte_offset_kernel(dir,addr)    ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
 #define pte_offset_map(dir,addr)       pte_offset_kernel(dir, addr)
-#define pte_offset_map_nested(dir,addr)        pte_offset_map(dir, addr)
 #define pte_unmap(pte)                 do { } while (0)
-#define pte_unmap_nested(pte)          do { } while (0)
 
 /* atomic versions of the some PTE manipulations: */
 
index e6359c566b504f50ae462cc9c917b2126603052d..8a28cfea27297694aa12328e579dd6f44a385d3e 100644 (file)
@@ -332,9 +332,7 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
        ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index(address))
 #define pte_offset_map(dir, address)   \
        ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address)    pte_offset_map(dir, address)
 #define pte_unmap(pte)         do { } while (0)
-#define pte_unmap_nested(pte)  do { } while (0)
 
 /* Encode and de-code a swap entry */
 #define __swp_type(x)                  (((x).val >> 2) & 0x1f)
index e41fea399bfe1c94c03e4a0fcab662ee7a67fbcf..73b8c8fbed9cdbad94c505db43e98a3972d72d5d 100644 (file)
 
 LFLUSH_I_AND_D = 0x00000808
 
-/* process bits for task_struct.ptrace */
-PT_TRACESYS_OFF = 3
-PT_TRACESYS_BIT = 1
-PT_PTRACED_OFF = 3
-PT_PTRACED_BIT = 0
-PT_DTRACE_OFF = 3
-PT_DTRACE_BIT = 2
-
 #define SAVE_ALL_INT save_all_int
 #define SAVE_ALL_SYS save_all_sys
 #define RESTORE_ALL restore_all
index 80e41492aa2a0750018620d73c1387a36bbe265a..26be277394f9fc647931f2145e67173d648c20f2 100644 (file)
 
 #ifdef __ASSEMBLY__
 
-/* process bits for task_struct.flags */
-PF_TRACESYS_OFF = 3
-PF_TRACESYS_BIT = 5
-PF_PTRACED_OFF = 3
-PF_PTRACED_BIT = 4
-PF_DTRACE_OFF = 1
-PF_DTRACE_BIT = 5
-
-LENOSYS = 38
-
 #define SWITCH_STACK_SIZE (6*4+4)      /* Includes return address */
 
 /*
index 8e9a8a754dde0007377d1f59cfc0ed121f56f248..45bd3f589bf0d62baeac34692aa7b6582cd3ae9f 100644 (file)
@@ -221,9 +221,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmdp, unsigned long address)
 }
 
 #define pte_offset_map(pmdp,address) ((pte_t *)__pmd_page(*pmdp) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
-#define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address)
 #define pte_unmap(pte)         ((void)0)
-#define pte_unmap_nested(pte)  ((void)0)
 
 /*
  * Allocate and free page tables. The xxx_kernel() versions are
index f847ec732d62d290a11ba2c464c2185a8d040f89..cf5fad9b525041f20518bb5e776d728e911af007 100644 (file)
@@ -219,9 +219,7 @@ static inline pte_t pgoff_to_pte(unsigned off)
 #define pte_offset_kernel(pmd, address) ((pte_t *) __pmd_page(*pmd) + pte_index(address))
 /* FIXME: should we bother with kmap() here? */
 #define pte_offset_map(pmd, address) ((pte_t *)kmap(pmd_page(*pmd)) + pte_index(address))
-#define pte_offset_map_nested(pmd, address) pte_offset_map(pmd, address)
 #define pte_unmap(pte) kunmap(pte)
-#define pte_unmap_nested(pte) kunmap(pte)
 
 /* Macros to (de)construct the fake PTEs representing swap pages. */
 #define __swp_type(x)          ((x).val & 0x7F)
index d4f421672d3b95c17f9891749928aad310780866..cae268c22ba283513f967461a0e3132be739a6b9 100644 (file)
@@ -504,12 +504,9 @@ static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
 #define pte_offset_kernel(dir, addr)   \
        ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
 #define pte_offset_map(dir, addr)              \
-       ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
-#define pte_offset_map_nested(dir, addr)       \
-       ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
+       ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
 
-#define pte_unmap(pte)         kunmap_atomic(pte, KM_PTE0)
-#define pte_unmap_nested(pte)  kunmap_atomic(pte, KM_PTE1)
+#define pte_unmap(pte)         kunmap_atomic(pte)
 
 /* Encode and decode a nonlinear file mapping entry */
 #define PTE_FILE_MAX_BITS      29
index 75753ca73bfd1c9e0d6ad82681ef15e4f2783897..77e644082a3b84547e0375241026ae1430f57ad4 100644 (file)
@@ -45,18 +45,12 @@ extern pte_t *pkmap_page_table;
 extern void * kmap_high(struct page *page);
 extern void kunmap_high(struct page *page);
 
-extern void *__kmap(struct page *page);
-extern void __kunmap(struct page *page);
-extern void *__kmap_atomic(struct page *page, enum km_type type);
-extern void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
-extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
-extern struct page *__kmap_atomic_to_page(void *ptr);
-
-#define kmap                   __kmap
-#define kunmap                 __kunmap
-#define kmap_atomic            __kmap_atomic
-#define kunmap_atomic_notypecheck              __kunmap_atomic_notypecheck
-#define kmap_atomic_to_page    __kmap_atomic_to_page
+extern void *kmap(struct page *page);
+extern void kunmap(struct page *page);
+extern void *__kmap_atomic(struct page *page);
+extern void __kunmap_atomic(void *kvaddr);
+extern void *kmap_atomic_pfn(unsigned long pfn);
+extern struct page *kmap_atomic_to_page(void *ptr);
 
 #define flush_cache_kmaps()    flush_cache_all()
 
index ae90412556d026fc58d5a97bc9e2761c76080e49..8a153d2fa62af5aa1d3666f232e850e985e2b0ef 100644 (file)
@@ -154,10 +154,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
 
 #define pte_offset_map(dir, address)                                    \
        ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-#define pte_offset_map_nested(dir, address)                             \
-       ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
 #define pte_unmap(pte) ((void)(pte))
-#define pte_unmap_nested(pte) ((void)(pte))
 
 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 
index 1be4b0fa30da7dc30d269c8057acb1f659d650c9..f00896087ddab34b861ac6770e8166d9f2fa764a 100644 (file)
@@ -257,10 +257,7 @@ static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
        ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
 #define pte_offset_map(dir, address)                                   \
        ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-#define pte_offset_map_nested(dir, address)                            \
-       ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
 #define pte_unmap(pte) ((void)(pte))
-#define pte_unmap_nested(pte) ((void)(pte))
 
 /*
  * Initialize a new pgd / pmd table with invalid pointers.
index 6a2b1bf9ef112571ef771e1ba039ec0992d18311..1e69b1fb4b85c16e1a1c5f07bc6f0e054e5196cc 100644 (file)
@@ -9,7 +9,7 @@ static pte_t *kmap_pte;
 
 unsigned long highstart_pfn, highend_pfn;
 
-void *__kmap(struct page *page)
+void *kmap(struct page *page)
 {
        void *addr;
 
@@ -21,16 +21,16 @@ void *__kmap(struct page *page)
 
        return addr;
 }
-EXPORT_SYMBOL(__kmap);
+EXPORT_SYMBOL(kmap);
 
-void __kunmap(struct page *page)
+void kunmap(struct page *page)
 {
        BUG_ON(in_interrupt());
        if (!PageHighMem(page))
                return;
        kunmap_high(page);
 }
-EXPORT_SYMBOL(__kunmap);
+EXPORT_SYMBOL(kunmap);
 
 /*
  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
@@ -41,17 +41,17 @@ EXPORT_SYMBOL(__kunmap);
  * kmaps are appropriate for short, tight code paths only.
  */
 
-void *__kmap_atomic(struct page *page, enum km_type type)
+void *__kmap_atomic(struct page *page)
 {
-       enum fixed_addresses idx;
        unsigned long vaddr;
+       int idx, type;
 
        /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
 
-       debug_kmap_atomic(type);
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR*smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
@@ -64,43 +64,47 @@ void *__kmap_atomic(struct page *page, enum km_type type)
 }
 EXPORT_SYMBOL(__kmap_atomic);
 
-void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
 {
-#ifdef CONFIG_DEBUG_HIGHMEM
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-       enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+       int type;
 
        if (vaddr < FIXADDR_START) { // FIXME
                pagefault_enable();
                return;
        }
 
-       BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+       type = kmap_atomic_idx_pop();
+#ifdef CONFIG_DEBUG_HIGHMEM
+       {
+               int idx = type + KM_TYPE_NR * smp_processor_id();
 
-       /*
-        * force other mappings to Oops if they'll try to access
-        * this pte without first remap it
-        */
-       pte_clear(&init_mm, vaddr, kmap_pte-idx);
-       local_flush_tlb_one(vaddr);
-#endif
+               BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
 
+               /*
+                * force other mappings to Oops if they'll try to access
+                * this pte without first remap it
+                */
+               pte_clear(&init_mm, vaddr, kmap_pte-idx);
+               local_flush_tlb_one(vaddr);
+       }
+#endif
        pagefault_enable();
 }
-EXPORT_SYMBOL(__kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
 
 /*
  * This is the same as kmap_atomic() but can map memory that doesn't
  * have a struct page associated with it.
  */
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+void *kmap_atomic_pfn(unsigned long pfn)
 {
-       enum fixed_addresses idx;
        unsigned long vaddr;
+       int idx, type;
 
        pagefault_disable();
 
-       debug_kmap_atomic(type);
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR*smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
@@ -109,7 +113,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
        return (void*) vaddr;
 }
 
-struct page *__kmap_atomic_to_page(void *ptr)
+struct page *kmap_atomic_to_page(void *ptr)
 {
        unsigned long idx, vaddr = (unsigned long)ptr;
        pte_t *pte;
index b0b187a29b8862a9a99e1c98404c90bb3eb74225..f577ba2268caadc554d2cb34c5163394a83f901f 100644 (file)
@@ -70,15 +70,16 @@ static inline void kunmap(struct page *page)
  * be used in IRQ contexts, so in some (very limited) cases we need
  * it.
  */
-static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
+static inline unsigned long __kmap_atomic(struct page *page)
 {
-       enum fixed_addresses idx;
        unsigned long vaddr;
+       int idx, type;
 
+       pagefault_disable();
        if (page < highmem_start_page)
                return page_address(page);
 
-       debug_kmap_atomic(type);
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR * smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 #if HIGHMEM_DEBUG
@@ -91,26 +92,35 @@ static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
        return vaddr;
 }
 
-static inline void kunmap_atomic_notypecheck(unsigned long vaddr, enum km_type type)
+static inline void __kunmap_atomic(unsigned long vaddr)
 {
-#if HIGHMEM_DEBUG
-       enum fixed_addresses idx = type + KM_TYPE_NR * smp_processor_id();
+       int type;
 
-       if (vaddr < FIXADDR_START) /* FIXME */
+       if (vaddr < FIXADDR_START) { /* FIXME */
+               pagefault_enable();
                return;
+       }
 
-       if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))
-               BUG();
+       type = kmap_atomic_idx_pop();
 
-       /*
-        * force other mappings to Oops if they'll try to access
-        * this pte without first remap it
-        */
-       pte_clear(kmap_pte - idx);
-       __flush_tlb_one(vaddr);
+#if HIGHMEM_DEBUG
+       {
+               unsigned int idx;
+               idx = type + KM_TYPE_NR * smp_processor_id();
+
+               if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))
+                       BUG();
+
+               /*
+                * force other mappings to Oops if they'll try to access
+                * this pte without first remap it
+                */
+               pte_clear(kmap_pte - idx);
+               __flush_tlb_one(vaddr);
+       }
 #endif
+       pagefault_enable();
 }
-
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_HIGHMEM_H */
index 16d88577f3e08510b543452ac322d6cd7e970e77..b049a8bd157774fd9c3f20d208913d04a8761c70 100644 (file)
@@ -457,9 +457,7 @@ static inline int set_kernel_exec(unsigned long vaddr, int enable)
 
 #define pte_offset_map(dir, address) \
        ((pte_t *) page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
 #define pte_unmap(pte)         do {} while (0)
-#define pte_unmap_nested(pte)  do {} while (0)
 
 /*
  * The MN10300 has external MMU info in the form of a TLB: this is adapted from
index 01c15035e783d47ec944f7d47b30ee90202cd9a2..865f37a8a88167306d6469e2bce9c11880022146 100644 (file)
@@ -397,9 +397,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 #define pte_offset_kernel(pmd, address) \
        ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
-#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
 #define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
 
 #define pte_unmap(pte)                 do { } while (0)
 #define pte_unmap_nested(pte)          do { } while (0)
index d10d64a4be38c2c5725a853498007406e211f817..dbc264010d0b9f5b5182d56c02e8730c2025242f 100644 (file)
@@ -60,9 +60,8 @@ extern pte_t *pkmap_page_table;
 
 extern void *kmap_high(struct page *page);
 extern void kunmap_high(struct page *page);
-extern void *kmap_atomic_prot(struct page *page, enum km_type type,
-                             pgprot_t prot);
-extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
+extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
+extern void __kunmap_atomic(void *kvaddr);
 
 static inline void *kmap(struct page *page)
 {
@@ -80,9 +79,9 @@ static inline void kunmap(struct page *page)
        kunmap_high(page);
 }
 
-static inline void *kmap_atomic(struct page *page, enum km_type type)
+static inline void *__kmap_atomic(struct page *page)
 {
-       return kmap_atomic_prot(page, type, kmap_prot);
+       return kmap_atomic_prot(page, kmap_prot);
 }
 
 static inline struct page *kmap_atomic_to_page(void *ptr)
index a7db96f2b5c38dfb4257974aace3739aa11361c7..47edde8c3556221b7371b225f32e1065b4b2fdb8 100644 (file)
@@ -308,12 +308,8 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
 #define pte_offset_kernel(dir, addr)   \
        ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
 #define pte_offset_map(dir, addr)              \
-       ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
-#define pte_offset_map_nested(dir, addr)       \
-       ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
-
-#define pte_unmap(pte)         kunmap_atomic(pte, KM_PTE0)
-#define pte_unmap_nested(pte)  kunmap_atomic(pte, KM_PTE1)
+       ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
+#define pte_unmap(pte)         kunmap_atomic(pte)
 
 /*
  * Encode and decode a swap entry.
index 49865045d56f2559377a62c3938cb9656f76f360..2b09cd522d333d120ad97e1870bb63f12304e81b 100644 (file)
   (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
 
 #define pte_offset_map(dir,addr)       pte_offset_kernel((dir), (addr))
-#define pte_offset_map_nested(dir,addr)        pte_offset_kernel((dir), (addr))
 #define pte_unmap(pte)                 do { } while(0)
-#define pte_unmap_nested(pte)          do { } while(0)
 
 /* to find an entry in a kernel page-table-directory */
 /* This now only contains the vmalloc pages */
index d692989a4318273c06fc1fdc024448afd15d322d..441d2a722f0655b540b993c422c7f433585e1060 100644 (file)
@@ -238,9 +238,7 @@ static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
         * memory in this pool does not change.
         */
        if (spare_needed && reserve_freed) {
-               tmp = min(spare_needed, min(reserve_freed,
-                                           (viodev->cmo.entitled -
-                                            VIO_CMO_MIN_ENT)));
+               tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
 
                vio_cmo.spare += tmp;
                viodev->cmo.entitled -= tmp;
index 857d4173f9c69358209eaf8793b6b64f4f0cfd89..b0848b462bbceb6678a071889cc774e40f77cafd 100644 (file)
  * be used in IRQ contexts, so in some (very limited) cases we need
  * it.
  */
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
-       unsigned int idx;
        unsigned long vaddr;
+       int idx, type;
 
        /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
 
-       debug_kmap_atomic(type);
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR*smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
@@ -52,26 +52,33 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
 }
 EXPORT_SYMBOL(kmap_atomic_prot);
 
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
 {
-#ifdef CONFIG_DEBUG_HIGHMEM
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-       enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+       int type;
 
        if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
                pagefault_enable();
                return;
        }
 
-       BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+       type = kmap_atomic_idx_pop();
 
-       /*
-        * force other mappings to Oops if they'll try to access
-        * this pte without first remap it
-        */
-       pte_clear(&init_mm, vaddr, kmap_pte-idx);
-       local_flush_tlb_page(NULL, vaddr);
+#ifdef CONFIG_DEBUG_HIGHMEM
+       {
+               unsigned int idx;
+
+               idx = type + KM_TYPE_NR * smp_processor_id();
+               BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+
+               /*
+                * force other mappings to Oops if they'll try to access
+                * this pte without first remap it
+                */
+               pte_clear(&init_mm, vaddr, kmap_pte-idx);
+               local_flush_tlb_page(NULL, vaddr);
+       }
 #endif
        pagefault_enable();
 }
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
index 986dc9476c219127604995d210fc28514e736f99..02ace3491c51cca92625f2a39f30fe9aa9741ddb 100644 (file)
@@ -1094,9 +1094,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
-#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
 #define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
 
 /*
  * 31 bit swap entry format:
index ccf38f06c57d78110e14cd21a0e3984c3c52b52d..2fd469807683fadfe15449e2474ec1f35f2fb921 100644 (file)
@@ -88,10 +88,7 @@ static inline void pmd_clear(pmd_t *pmdp)
 
 #define pte_offset_map(dir, address)   \
        ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-#define pte_offset_map_nested(dir, address)    \
-       ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
 #define pte_unmap(pte) ((void)(pte))
-#define pte_unmap_nested(pte) ((void)(pte))
 
 /*
  * Bits 9(_PAGE_PRESENT) and 10(_PAGE_FILE)are taken,
index e172d696e52bbfaa9e21e6017f343cea7e32d3fd..69fdfbf14ea5b00e5dc8ed29cd65b30333a1ee82 100644 (file)
@@ -429,10 +429,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 #define pte_offset_kernel(dir, address) \
        ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
 #define pte_offset_map(dir, address)           pte_offset_kernel(dir, address)
-#define pte_offset_map_nested(dir, address)    pte_offset_kernel(dir, address)
-
 #define pte_unmap(pte)         do { } while (0)
-#define pte_unmap_nested(pte)  do { } while (0)
 
 #ifdef CONFIG_X2TLB
 #define pte_ERROR(e) \
index 0ee46776dad6d9c7032a45856710e357d9fb90d1..10a48111226dbd319315105ed48709d6b3189721 100644 (file)
@@ -84,9 +84,7 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
                ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
 
 #define pte_offset_map(dir,addr)       pte_offset_kernel(dir, addr)
-#define pte_offset_map_nested(dir,addr)        pte_offset_kernel(dir, addr)
 #define pte_unmap(pte)         do { } while (0)
-#define pte_unmap_nested(pte)  do { } while (0)
 
 #ifndef __ASSEMBLY__
 #define IOBASE_VADDR   0xff000000
index ec23b0a87b98fa1ea3038cb1e8b0141da05ec991..3d7afbb7f4bbd0ba4022151a3e09ef6e9ad645f8 100644 (file)
@@ -70,8 +70,8 @@ static inline void kunmap(struct page *page)
        kunmap_high(page);
 }
 
-extern void *kmap_atomic(struct page *page, enum km_type type);
-extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
+extern void *__kmap_atomic(struct page *page);
+extern void __kunmap_atomic(void *kvaddr);
 extern struct page *kmap_atomic_to_page(void *vaddr);
 
 #define flush_cache_kmaps()    flush_cache_all()
index 0ece77f477536b773a9bd310515008c3b152055c..303bd4dc82927b3bf85371c01200054f94e101bb 100644 (file)
@@ -304,10 +304,7 @@ BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long)
  * and sun4c is guaranteed to have no highmem anyway.
  */
 #define pte_offset_map(d, a)           pte_offset_kernel(d,a)
-#define pte_offset_map_nested(d, a)    pte_offset_kernel(d,a)
-
 #define pte_unmap(pte)         do{}while(0)
-#define pte_unmap_nested(pte)  do{}while(0)
 
 /* Certain architectures need to do special things when pte's
  * within a page table are directly modified.  Thus, the following
index f5b5fa76c02dbb2eb385220a5222c191f1bf2a8e..f8dddb7045bbb2d16744e09b4200969178c2eb89 100644 (file)
@@ -652,9 +652,7 @@ static inline int pte_special(pte_t pte)
         ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
 #define pte_offset_kernel              pte_index
 #define pte_offset_map                 pte_index
-#define pte_offset_map_nested          pte_index
 #define pte_unmap(pte)                 do { } while (0)
-#define pte_unmap_nested(pte)          do { } while (0)
 
 /* Actual page table PTE updates.  */
 extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig);
index e139e9cbf5f7a2aaa94c7d8d2f561e67b1fa0951..5e50c09b7dcea796aceea1b88fbd3c0338955abf 100644 (file)
 #include <asm/tlbflush.h>
 #include <asm/fixmap.h>
 
-void *kmap_atomic(struct page *page, enum km_type type)
+void *__kmap_atomic(struct page *page)
 {
-       unsigned long idx;
        unsigned long vaddr;
+       long idx, type;
 
        /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
 
-       debug_kmap_atomic(type);
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR*smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 
@@ -63,44 +63,50 @@ void *kmap_atomic(struct page *page, enum km_type type)
 
        return (void*) vaddr;
 }
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(__kmap_atomic);
 
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
 {
-#ifdef CONFIG_DEBUG_HIGHMEM
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-       unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
+       int type;
 
        if (vaddr < FIXADDR_START) { // FIXME
                pagefault_enable();
                return;
        }
 
-       BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
+       type = kmap_atomic_idx_pop();
 
-/* XXX Fix - Anton */
+#ifdef CONFIG_DEBUG_HIGHMEM
+       {
+               unsigned long idx;
+
+               idx = type + KM_TYPE_NR * smp_processor_id();
+               BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
+
+               /* XXX Fix - Anton */
 #if 0
-       __flush_cache_one(vaddr);
+               __flush_cache_one(vaddr);
 #else
-       flush_cache_all();
+               flush_cache_all();
 #endif
 
-       /*
-        * force other mappings to Oops if they'll try to access
-        * this pte without first remap it
-        */
-       pte_clear(&init_mm, vaddr, kmap_pte-idx);
-/* XXX Fix - Anton */
+               /*
+                * force other mappings to Oops if they'll try to access
+                * this pte without first remap it
+                */
+               pte_clear(&init_mm, vaddr, kmap_pte-idx);
+               /* XXX Fix - Anton */
 #if 0
-       __flush_tlb_one(vaddr);
+               __flush_tlb_one(vaddr);
 #else
-       flush_tlb_all();
+               flush_tlb_all();
 #endif
+       }
 #endif
-
        pagefault_enable();
 }
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
 
 /* We may be fed a pagetable here by ptep_to_xxx and others. */
 struct page *kmap_atomic_to_page(void *ptr)
index 1eb308cb711ab1171779d52d8014041c923fc8a3..89cfee07efa98fc9dc78e42c0f6dc3cb36a651e2 100644 (file)
@@ -96,6 +96,7 @@ config HVC_TILE
 
 config TILE
        def_bool y
+       select HAVE_KVM if !TILEGX
        select GENERIC_FIND_FIRST_BIT
        select GENERIC_FIND_NEXT_BIT
        select USE_GENERIC_SMP_HELPERS
@@ -236,9 +237,9 @@ choice
          If you are not absolutely sure what you are doing, leave this
          option alone!
 
-       config VMSPLIT_375G
+       config VMSPLIT_3_75G
                bool "3.75G/0.25G user/kernel split (no kernel networking)"
-       config VMSPLIT_35G
+       config VMSPLIT_3_5G
                bool "3.5G/0.5G user/kernel split"
        config VMSPLIT_3G
                bool "3G/1G user/kernel split"
@@ -252,8 +253,8 @@ endchoice
 
 config PAGE_OFFSET
        hex
-       default 0xF0000000 if VMSPLIT_375G
-       default 0xE0000000 if VMSPLIT_35G
+       default 0xF0000000 if VMSPLIT_3_75G
+       default 0xE0000000 if VMSPLIT_3_5G
        default 0xB0000000 if VMSPLIT_3G_OPT
        default 0x80000000 if VMSPLIT_2G
        default 0x40000000 if VMSPLIT_1G
@@ -314,6 +315,15 @@ config HARDWALL
        bool "Hardwall support to allow access to user dynamic network"
        default y
 
+config KERNEL_PL
+       int "Processor protection level for kernel"
+       range 1 2
+       default "1"
+       ---help---
+         This setting determines the processor protection level the
+         kernel will be built to run at.  Generally you should use
+         the default value here.
+
 endmenu  # Tilera-specific configuration
 
 menu "Bus options"
@@ -354,3 +364,5 @@ source "security/Kconfig"
 source "crypto/Kconfig"
 
 source "lib/Kconfig"
+
+source "arch/tile/kvm/Kconfig"
index fd8f6bb5facecafb70c223f24638cb43138458d3..17acce70569b01aaf7de554a66c0f7e6e8a47157 100644 (file)
@@ -26,8 +26,9 @@ $(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH))
   endif
 endif
 
-
+ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"")
 KBUILD_CFLAGS   += $(CONFIG_DEBUG_EXTRA_FLAGS)
+endif
 
 LIBGCC_PATH     := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
 
@@ -49,6 +50,20 @@ head-y               := arch/tile/kernel/head_$(BITS).o
 libs-y         += arch/tile/lib/
 libs-y         += $(LIBGCC_PATH)
 
-
 # See arch/tile/Kbuild for content of core part of the kernel
 core-y         += arch/tile/
+
+core-$(CONFIG_KVM) += arch/tile/kvm/
+
+ifdef TILERA_ROOT
+INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot
+endif
+
+install:
+       install -D -m 755 vmlinux $(INSTALL_PATH)/vmlinux-$(KERNELRELEASE)
+       install -D -m 644 .config $(INSTALL_PATH)/config-$(KERNELRELEASE)
+       install -D -m 644 System.map $(INSTALL_PATH)/System.map-$(KERNELRELEASE)
+
+define archhelp
+       echo '  install         - install kernel into $(INSTALL_PATH)'
+endef
diff --git a/arch/tile/include/arch/sim.h b/arch/tile/include/arch/sim.h
new file mode 100644 (file)
index 0000000..74b7c16
--- /dev/null
@@ -0,0 +1,619 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+/**
+ * @file
+ *
+ * Provides an API for controlling the simulator at runtime.
+ */
+
+/**
+ * @addtogroup arch_sim
+ * @{
+ *
+ * An API for controlling the simulator at runtime.
+ *
+ * The simulator's behavior can be modified while it is running.
+ * For example, human-readable trace output can be enabled and disabled
+ * around code of interest.
+ *
+ * There are two ways to modify simulator behavior:
+ * programmatically, by calling various sim_* functions, and
+ * interactively, by entering commands like "sim set functional true"
+ * at the tile-monitor prompt.  Typing "sim help" at that prompt provides
+ * a list of interactive commands.
+ *
+ * All interactive commands can also be executed programmatically by
+ * passing a string to the sim_command function.
+ */
+
+#ifndef __ARCH_SIM_H__
+#define __ARCH_SIM_H__
+
+#include <arch/sim_def.h>
+#include <arch/abi.h>
+
+#ifndef __ASSEMBLER__
+
+#include <arch/spr_def.h>
+
+
+/**
+ * Return true if the current program is running under a simulator,
+ * rather than on real hardware.  If running on hardware, other "sim_xxx()"
+ * calls have no useful effect.
+ */
+static inline int
+sim_is_simulator(void)
+{
+  return __insn_mfspr(SPR_SIM_CONTROL) != 0;
+}
+
+
+/**
+ * Checkpoint the simulator state to a checkpoint file.
+ *
+ * The checkpoint file name is either the default or the name specified
+ * on the command line with "--checkpoint-file".
+ */
+static __inline void
+sim_checkpoint(void)
+{
+  __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_CHECKPOINT);
+}
+
+
+/**
+ * Report whether or not various kinds of simulator tracing are enabled.
+ *
+ * @return The bitwise OR of these values:
+ *
+ * SIM_TRACE_CYCLES (--trace-cycles),
+ * SIM_TRACE_ROUTER (--trace-router),
+ * SIM_TRACE_REGISTER_WRITES (--trace-register-writes),
+ * SIM_TRACE_DISASM (--trace-disasm),
+ * SIM_TRACE_STALL_INFO (--trace-stall-info)
+ * SIM_TRACE_MEMORY_CONTROLLER (--trace-memory-controller)
+ * SIM_TRACE_L2_CACHE (--trace-l2)
+ * SIM_TRACE_LINES (--trace-lines)
+ */
+static __inline unsigned int
+sim_get_tracing(void)
+{
+  return __insn_mfspr(SPR_SIM_CONTROL) & SIM_TRACE_FLAG_MASK;
+}
+
+
+/**
+ * Turn on or off different kinds of simulator tracing.
+ *
+ * @param mask Either one of these special values:
+ *
+ * SIM_TRACE_NONE (turns off tracing),
+ * SIM_TRACE_ALL (turns on all possible tracing).
+ *
+ * or the bitwise OR of these values:
+ *
+ * SIM_TRACE_CYCLES (--trace-cycles),
+ * SIM_TRACE_ROUTER (--trace-router),
+ * SIM_TRACE_REGISTER_WRITES (--trace-register-writes),
+ * SIM_TRACE_DISASM (--trace-disasm),
+ * SIM_TRACE_STALL_INFO (--trace-stall-info)
+ * SIM_TRACE_MEMORY_CONTROLLER (--trace-memory-controller)
+ * SIM_TRACE_L2_CACHE (--trace-l2)
+ * SIM_TRACE_LINES (--trace-lines)
+ */
+static __inline void
+sim_set_tracing(unsigned int mask)
+{
+  __insn_mtspr(SPR_SIM_CONTROL, SIM_TRACE_SPR_ARG(mask));
+}
+
+
+/**
+ * Request dumping of different kinds of simulator state.
+ *
+ * @param mask Either this special value:
+ *
+ * SIM_DUMP_ALL (dump all known state)
+ *
+ * or the bitwise OR of these values:
+ *
+ * SIM_DUMP_REGS (the register file),
+ * SIM_DUMP_SPRS (the SPRs),
+ * SIM_DUMP_ITLB (the iTLB),
+ * SIM_DUMP_DTLB (the dTLB),
+ * SIM_DUMP_L1I (the L1 I-cache),
+ * SIM_DUMP_L1D (the L1 D-cache),
+ * SIM_DUMP_L2 (the L2 cache),
+ * SIM_DUMP_SNREGS (the switch register file),
+ * SIM_DUMP_SNITLB (the switch iTLB),
+ * SIM_DUMP_SNL1I (the switch L1 I-cache),
+ * SIM_DUMP_BACKTRACE (the current backtrace)
+ */
+static __inline void
+sim_dump(unsigned int mask)
+{
+  __insn_mtspr(SPR_SIM_CONTROL, SIM_DUMP_SPR_ARG(mask));
+}
+
+
+/**
+ * Print a string to the simulator stdout.
+ *
+ * @param str The string to be written; a newline is automatically added.
+ */
+static __inline void
+sim_print_string(const char* str)
+{
+  int i;
+  for (i = 0; str[i] != 0; i++)
+  {
+    __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC |
+                 (str[i] << _SIM_CONTROL_OPERATOR_BITS));
+  }
+  __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC |
+               (SIM_PUTC_FLUSH_STRING << _SIM_CONTROL_OPERATOR_BITS));
+}
+
+
+/**
+ * Execute a simulator command string.
+ *
+ * Type 'sim help' at the tile-monitor prompt to learn what commands
+ * are available.  Note the use of the tile-monitor "sim" command to
+ * pass commands to the simulator.
+ *
+ * The argument to sim_command() does not include the leading "sim"
+ * prefix used at the tile-monitor prompt; for example, you might call
+ * sim_command("trace disasm").
+ */
+static __inline void
+sim_command(const char* str)
+{
+  int c;
+  do
+  {
+    c = *str++;
+    __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_COMMAND |
+                 (c << _SIM_CONTROL_OPERATOR_BITS));
+  }
+  while (c);
+}
+
+
+
+#ifndef __DOXYGEN__
+
+/**
+ * The underlying implementation of "_sim_syscall()".
+ *
+ * We use extra "and" instructions to ensure that all the values
+ * we are passing to the simulator are actually valid in the registers
+ * (i.e. returned from memory) prior to the SIM_CONTROL spr.
+ */
+static __inline int _sim_syscall0(int val)
+{
+  long result;
+  __asm__ __volatile__ ("mtspr SIM_CONTROL, r0"
+                        : "=R00" (result) : "R00" (val));
+  return result;
+}
+
+static __inline int _sim_syscall1(int val, long arg1)
+{
+  long result;
+  __asm__ __volatile__ ("{ and zero, r1, r1; mtspr SIM_CONTROL, r0 }"
+                        : "=R00" (result) : "R00" (val), "R01" (arg1));
+  return result;
+}
+
+static __inline int _sim_syscall2(int val, long arg1, long arg2)
+{
+  long result;
+  __asm__ __volatile__ ("{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
+                        : "=R00" (result)
+                        : "R00" (val), "R01" (arg1), "R02" (arg2));
+  return result;
+}
+
+/* Note that _sim_syscall3() and higher are technically at risk of
+   receiving an interrupt right before the mtspr bundle, in which case
+   the register values for arguments 3 and up may still be in flight
+   to the core from a stack frame reload. */
+
+static __inline int _sim_syscall3(int val, long arg1, long arg2, long arg3)
+{
+  long result;
+  __asm__ __volatile__ ("{ and zero, r3, r3 };"
+                        "{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
+                        : "=R00" (result)
+                        : "R00" (val), "R01" (arg1), "R02" (arg2),
+                          "R03" (arg3));
+  return result;
+}
+
+static __inline int _sim_syscall4(int val, long arg1, long arg2, long arg3,
+                                  long arg4)
+{
+  long result;
+  __asm__ __volatile__ ("{ and zero, r3, r4 };"
+                        "{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
+                        : "=R00" (result)
+                        : "R00" (val), "R01" (arg1), "R02" (arg2),
+                          "R03" (arg3), "R04" (arg4));
+  return result;
+}
+
+static __inline int _sim_syscall5(int val, long arg1, long arg2, long arg3,
+                                  long arg4, long arg5)
+{
+  long result;
+  __asm__ __volatile__ ("{ and zero, r3, r4; and zero, r5, r5 };"
+                        "{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
+                        : "=R00" (result)
+                        : "R00" (val), "R01" (arg1), "R02" (arg2),
+                          "R03" (arg3), "R04" (arg4), "R05" (arg5));
+  return result;
+}
+
+
+/**
+ * Make a special syscall to the simulator itself, if running under
+ * simulation. This is used as the implementation of other functions
+ * and should not be used outside this file.
+ *
+ * @param syscall_num The simulator syscall number.
+ * @param nr The number of additional arguments provided.
+ *
+ * @return Varies by syscall.
+ */
+#define _sim_syscall(syscall_num, nr, args...) \
+  _sim_syscall##nr( \
+    ((syscall_num) << _SIM_CONTROL_OPERATOR_BITS) | SIM_CONTROL_SYSCALL, args)
+
+
+/* Values for the "access_mask" parameters below. */
+#define SIM_WATCHPOINT_READ    1
+#define SIM_WATCHPOINT_WRITE   2
+#define SIM_WATCHPOINT_EXECUTE 4
+
+
+static __inline int
+sim_add_watchpoint(unsigned int process_id,
+                   unsigned long address,
+                   unsigned long size,
+                   unsigned int access_mask,
+                   unsigned long user_data)
+{
+  return _sim_syscall(SIM_SYSCALL_ADD_WATCHPOINT, 5, process_id,
+                     address, size, access_mask, user_data);
+}
+
+
+static __inline int
+sim_remove_watchpoint(unsigned int process_id,
+                      unsigned long address,
+                      unsigned long size,
+                      unsigned int access_mask,
+                      unsigned long user_data)
+{
+  return _sim_syscall(SIM_SYSCALL_REMOVE_WATCHPOINT, 5, process_id,
+                     address, size, access_mask, user_data);
+}
+
+
+/**
+ * Return value from sim_query_watchpoint.
+ */
+struct SimQueryWatchpointStatus
+{
+  /**
+   * 0 if a watchpoint fired, 1 if no watchpoint fired, or -1 for
+   * error (meaning a bad process_id).
+   */
+  int syscall_status;
+
+  /**
+   * The address of the watchpoint that fired (this is the address
+   * passed to sim_add_watchpoint, not an address within that range
+   * that actually triggered the watchpoint).
+   */
+  unsigned long address;
+
+  /** The arbitrary user_data installed by sim_add_watchpoint. */
+  unsigned long user_data;
+};
+
+
+static __inline struct SimQueryWatchpointStatus
+sim_query_watchpoint(unsigned int process_id)
+{
+  struct SimQueryWatchpointStatus status;
+  long val = SIM_CONTROL_SYSCALL |
+    (SIM_SYSCALL_QUERY_WATCHPOINT << _SIM_CONTROL_OPERATOR_BITS);
+  __asm__ __volatile__ ("{ and zero, r1, r1; mtspr SIM_CONTROL, r0 }"
+                        : "=R00" (status.syscall_status),
+                          "=R01" (status.address),
+                          "=R02" (status.user_data)
+                        : "R00" (val), "R01" (process_id));
+  return status;
+}
+
+
+/* On the simulator, confirm lines have been evicted everywhere. */
+static __inline void
+sim_validate_lines_evicted(unsigned long long pa, unsigned long length)
+{
+#ifdef __LP64__
+  _sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED, 2, pa, length);
+#else
+  _sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED, 4,
+               0 /* dummy */, (long)(pa), (long)(pa >> 32), length);
+#endif
+}
+
+
+#endif /* !__DOXYGEN__ */
+
+
+
+
+/**
+ * Modify the shaping parameters of a shim.
+ *
+ * @param shim The shim to modify. One of:
+ *   SIM_CONTROL_SHAPING_GBE_0
+ *   SIM_CONTROL_SHAPING_GBE_1
+ *   SIM_CONTROL_SHAPING_GBE_2
+ *   SIM_CONTROL_SHAPING_GBE_3
+ *   SIM_CONTROL_SHAPING_XGBE_0
+ *   SIM_CONTROL_SHAPING_XGBE_1
+ *
+ * @param type The type of shaping. This should be the same type of
+ * shaping that is already in place on the shim. One of:
+ *   SIM_CONTROL_SHAPING_MULTIPLIER
+ *   SIM_CONTROL_SHAPING_PPS
+ *   SIM_CONTROL_SHAPING_BPS
+ *
+ * @param units The magnitude of the rate. One of:
+ *   SIM_CONTROL_SHAPING_UNITS_SINGLE
+ *   SIM_CONTROL_SHAPING_UNITS_KILO
+ *   SIM_CONTROL_SHAPING_UNITS_MEGA
+ *   SIM_CONTROL_SHAPING_UNITS_GIGA
+ *
+ * @param rate The rate to which to change it. This must fit in
+ * SIM_CONTROL_SHAPING_RATE_BITS bits or a warning is issued and
+ * the shaping is not changed.
+ *
+ * @return 0 if no problems were detected in the arguments to sim_set_shaping
+ * or 1 if problems were detected (for example, rate does not fit in 17 bits).
+ */
+static __inline int
+sim_set_shaping(unsigned shim,
+                unsigned type,
+                unsigned units,
+                unsigned rate)
+{
+  if ((rate & ~((1 << SIM_CONTROL_SHAPING_RATE_BITS) - 1)) != 0)
+    return 1;
+
+  __insn_mtspr(SPR_SIM_CONTROL, SIM_SHAPING_SPR_ARG(shim, type, units, rate));
+  return 0;
+}
+
+#ifdef __tilegx__
+
+/** Enable a set of mPIPE links.  Pass a -1 link_mask to enable all links. */
+static __inline void
+sim_enable_mpipe_links(unsigned mpipe, unsigned long link_mask)
+{
+  __insn_mtspr(SPR_SIM_CONTROL,
+               (SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE |
+                (mpipe << 8) | (1 << 16) | ((uint_reg_t)link_mask << 32)));
+}
+
+/** Disable a set of mPIPE links.  Pass a -1 link_mask to disable all links. */
+static __inline void
+sim_disable_mpipe_links(unsigned mpipe, unsigned long link_mask)
+{
+  __insn_mtspr(SPR_SIM_CONTROL,
+               (SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE |
+                (mpipe << 8) | (0 << 16) | ((uint_reg_t)link_mask << 32)));
+}
+
+#endif /* __tilegx__ */
+
+
+/*
+ * An API for changing "functional" mode.
+ */
+
+#ifndef __DOXYGEN__
+
+#define sim_enable_functional() \
+  __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_ENABLE_FUNCTIONAL)
+
+#define sim_disable_functional() \
+  __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_DISABLE_FUNCTIONAL)
+
+#endif /* __DOXYGEN__ */
+
+
+/*
+ * Profiler support.
+ */
+
+/**
+ * Turn profiling on for the current task.
+ *
+ * Note that this has no effect if run in an environment without
+ * profiling support (thus, the proper flags to the simulator must
+ * be supplied).
+ */
+static __inline void
+sim_profiler_enable(void)
+{
+  __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PROFILER_ENABLE);
+}
+
+
+/** Turn profiling off for the current task. */
+static __inline void
+sim_profiler_disable(void)
+{
+  __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PROFILER_DISABLE);
+}
+
+
+/**
+ * Turn profiling on or off for the current task.
+ *
+ * @param enabled If true, turns on profiling. If false, turns it off.
+ *
+ * Note that this has no effect if run in an environment without
+ * profiling support (thus, the proper flags to the simulator must
+ * be supplied).
+ */
+static __inline void
+sim_profiler_set_enabled(int enabled)
+{
+  int val =
+    enabled ? SIM_CONTROL_PROFILER_ENABLE : SIM_CONTROL_PROFILER_DISABLE;
+  __insn_mtspr(SPR_SIM_CONTROL, val);
+}
+
+
+/**
+ * Return true if and only if profiling is currently enabled
+ * for the current task.
+ *
+ * This returns false even if sim_profiler_enable() was called
+ * if the current execution environment does not support profiling.
+ */
+static __inline int
+sim_profiler_is_enabled(void)
+{
+  return ((__insn_mfspr(SPR_SIM_CONTROL) & SIM_PROFILER_ENABLED_MASK) != 0);
+}
+
+
+/**
+ * Reset profiling counters to zero for the current task.
+ *
+ * Resetting can be done while profiling is enabled.  It does not affect
+ * the chip-wide profiling counters.
+ */
+static __inline void
+sim_profiler_clear(void)
+{
+  __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PROFILER_CLEAR);
+}
+
+
+/**
+ * Enable specified chip-level profiling counters.
+ *
+ * Does not affect the per-task profiling counters.
+ *
+ * @param mask Either this special value:
+ *
+ * SIM_CHIP_ALL (enables all chip-level components).
+ *
+ * or the bitwise OR of these values:
+ *
+ * SIM_CHIP_MEMCTL (enable all memory controllers)
+ * SIM_CHIP_XAUI (enable all XAUI controllers)
+ * SIM_CHIP_MPIPE (enable all MPIPE controllers)
+ */
+static __inline void
+sim_profiler_chip_enable(unsigned int mask)
+{
+  __insn_mtspr(SPR_SIM_CONTROL, SIM_PROFILER_CHIP_ENABLE_SPR_ARG(mask));
+}
+
+
+/**
+ * Disable specified chip-level profiling counters.
+ *
+ * Does not affect the per-task profiling counters.
+ *
+ * @param mask Either this special value:
+ *
+ * SIM_CHIP_ALL (disables all chip-level components).
+ *
+ * or the bitwise OR of these values:
+ *
+ * SIM_CHIP_MEMCTL (disable all memory controllers)
+ * SIM_CHIP_XAUI (disable all XAUI controllers)
+ * SIM_CHIP_MPIPE (disable all MPIPE controllers)
+ */
+static __inline void
+sim_profiler_chip_disable(unsigned int mask)
+{
+  __insn_mtspr(SPR_SIM_CONTROL, SIM_PROFILER_CHIP_DISABLE_SPR_ARG(mask));
+}
+
+
+/**
+ * Reset specified chip-level profiling counters to zero.
+ *
+ * Does not affect the per-task profiling counters.
+ *
+ * @param mask Either this special value:
+ *
+ * SIM_CHIP_ALL (clears all chip-level components).
+ *
+ * or the bitwise OR of these values:
+ *
+ * SIM_CHIP_MEMCTL (clear all memory controllers)
+ * SIM_CHIP_XAUI (clear all XAUI controllers)
+ * SIM_CHIP_MPIPE (clear all MPIPE controllers)
+ */
+static __inline void
+sim_profiler_chip_clear(unsigned int mask)
+{
+  __insn_mtspr(SPR_SIM_CONTROL, SIM_PROFILER_CHIP_CLEAR_SPR_ARG(mask));
+}
+
+
+/*
+ * Event support.
+ */
+
+#ifndef __DOXYGEN__
+
+static __inline void
+sim_event_begin(unsigned int x)
+{
+#if defined(__tile__) && !defined(__NO_EVENT_SPR__)
+  __insn_mtspr(SPR_EVENT_BEGIN, x);
+#endif
+}
+
+static __inline void
+sim_event_end(unsigned int x)
+{
+#if defined(__tile__) && !defined(__NO_EVENT_SPR__)
+  __insn_mtspr(SPR_EVENT_END, x);
+#endif
+}
+
+#endif /* !__DOXYGEN__ */
+
+#endif /* !__ASSEMBLER__ */
+
+#endif /* !__ARCH_SIM_H__ */
+
+/** @} */
index 6418fbde063e8a996c95c7004c8fd06a6147a54a..7a17082c3773edb6ae7fc4ef537935a2ceefd4d2 100644 (file)
-// Copyright 2010 Tilera Corporation. All Rights Reserved.
-//
-//   This program is free software; you can redistribute it and/or
-//   modify it under the terms of the GNU General Public License
-//   as published by the Free Software Foundation, version 2.
-//
-//   This program is distributed in the hope that it will be useful, but
-//   WITHOUT ANY WARRANTY; without even the implied warranty of
-//   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
-//   NON INFRINGEMENT.  See the GNU General Public License for
-//   more details.
-
-//! @file
-//!
-//! Some low-level simulator definitions.
-//!
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+/**
+ * @file
+ *
+ * Some low-level simulator definitions.
+ */
 
 #ifndef __ARCH_SIM_DEF_H__
 #define __ARCH_SIM_DEF_H__
 
 
-//! Internal: the low bits of the SIM_CONTROL_* SPR values specify
-//! the operation to perform, and the remaining bits are
-//! an operation-specific parameter (often unused).
-//!
+/**
+ * Internal: the low bits of the SIM_CONTROL_* SPR values specify
+ * the operation to perform, and the remaining bits are
+ * an operation-specific parameter (often unused).
+ */
 #define _SIM_CONTROL_OPERATOR_BITS 8
 
 
-//== Values which can be written to SPR_SIM_CONTROL.
+/*
+ * Values which can be written to SPR_SIM_CONTROL.
+ */
 
-//! If written to SPR_SIM_CONTROL, stops profiling.
-//!
+/** If written to SPR_SIM_CONTROL, stops profiling. */
 #define SIM_CONTROL_PROFILER_DISABLE 0
 
-//! If written to SPR_SIM_CONTROL, starts profiling.
-//!
+/** If written to SPR_SIM_CONTROL, starts profiling. */
 #define SIM_CONTROL_PROFILER_ENABLE 1
 
-//! If written to SPR_SIM_CONTROL, clears profiling counters.
-//!
+/** If written to SPR_SIM_CONTROL, clears profiling counters. */
 #define SIM_CONTROL_PROFILER_CLEAR 2
 
-//! If written to SPR_SIM_CONTROL, checkpoints the simulator.
-//!
+/** If written to SPR_SIM_CONTROL, checkpoints the simulator. */
 #define SIM_CONTROL_CHECKPOINT 3
 
-//! If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
-//! sets the tracing mask to the given mask. See "sim_set_tracing()".
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
+ * sets the tracing mask to the given mask. See "sim_set_tracing()".
+ */
 #define SIM_CONTROL_SET_TRACING 4
 
-//! If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
-//! dumps the requested items of machine state to the log.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
+ * dumps the requested items of machine state to the log.
+ */
 #define SIM_CONTROL_DUMP 5
 
-//! If written to SPR_SIM_CONTROL, clears chip-level profiling counters.
-//!
+/** If written to SPR_SIM_CONTROL, clears chip-level profiling counters. */
 #define SIM_CONTROL_PROFILER_CHIP_CLEAR 6
 
-//! If written to SPR_SIM_CONTROL, disables chip-level profiling.
-//!
+/** If written to SPR_SIM_CONTROL, disables chip-level profiling. */
 #define SIM_CONTROL_PROFILER_CHIP_DISABLE 7
 
-//! If written to SPR_SIM_CONTROL, enables chip-level profiling.
-//!
+/** If written to SPR_SIM_CONTROL, enables chip-level profiling. */
 #define SIM_CONTROL_PROFILER_CHIP_ENABLE 8
 
-//! If written to SPR_SIM_CONTROL, enables chip-level functional mode
-//!
+/** If written to SPR_SIM_CONTROL, enables chip-level functional mode */
 #define SIM_CONTROL_ENABLE_FUNCTIONAL 9
 
-//! If written to SPR_SIM_CONTROL, disables chip-level functional mode.
-//!
+/** If written to SPR_SIM_CONTROL, disables chip-level functional mode. */
 #define SIM_CONTROL_DISABLE_FUNCTIONAL 10
 
-//! If written to SPR_SIM_CONTROL, enables chip-level functional mode.
-//! All tiles must perform this write for functional mode to be enabled.
-//! Ignored in naked boot mode unless --functional is specified.
-//! WARNING: Only the hypervisor startup code should use this!
-//!
+/**
+ * If written to SPR_SIM_CONTROL, enables chip-level functional mode.
+ * All tiles must perform this write for functional mode to be enabled.
+ * Ignored in naked boot mode unless --functional is specified.
+ * WARNING: Only the hypervisor startup code should use this!
+ */
 #define SIM_CONTROL_ENABLE_FUNCTIONAL_BARRIER 11
 
-//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
-//! writes a string directly to the simulator output.  Written to once for
-//! each character in the string, plus a final NUL.  Instead of NUL,
-//! you can also use "SIM_PUTC_FLUSH_STRING" or "SIM_PUTC_FLUSH_BINARY".
-//!
-// ISSUE: Document the meaning of "newline", and the handling of NUL.
-//
+/**
+ * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
+ * writes a string directly to the simulator output.  Written to once for
+ * each character in the string, plus a final NUL.  Instead of NUL,
+ * you can also use "SIM_PUTC_FLUSH_STRING" or "SIM_PUTC_FLUSH_BINARY".
+ */
+/* ISSUE: Document the meaning of "newline", and the handling of NUL. */
 #define SIM_CONTROL_PUTC 12
 
-//! If written to SPR_SIM_CONTROL, clears the --grind-coherence state for
-//! this core.  This is intended to be used before a loop that will
-//! invalidate the cache by loading new data and evicting all current data.
-//! Generally speaking, this API should only be used by system code.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, clears the --grind-coherence state for
+ * this core.  This is intended to be used before a loop that will
+ * invalidate the cache by loading new data and evicting all current data.
+ * Generally speaking, this API should only be used by system code.
+ */
 #define SIM_CONTROL_GRINDER_CLEAR 13
 
-//! If written to SPR_SIM_CONTROL, shuts down the simulator.
-//!
+/** If written to SPR_SIM_CONTROL, shuts down the simulator. */
 #define SIM_CONTROL_SHUTDOWN 14
 
-//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
-//! indicates that a fork syscall just created the given process.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
+ * indicates that a fork syscall just created the given process.
+ */
 #define SIM_CONTROL_OS_FORK 15
 
-//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
-//! indicates that an exit syscall was just executed by the given process.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
+ * indicates that an exit syscall was just executed by the given process.
+ */
 #define SIM_CONTROL_OS_EXIT 16
 
-//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
-//! indicates that the OS just switched to the given process.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
+ * indicates that the OS just switched to the given process.
+ */
 #define SIM_CONTROL_OS_SWITCH 17
 
-//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
-//! indicates that an exec syscall was just executed. Written to once for
-//! each character in the executable name, plus a final NUL.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
+ * indicates that an exec syscall was just executed. Written to once for
+ * each character in the executable name, plus a final NUL.
+ */
 #define SIM_CONTROL_OS_EXEC 18
 
-//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
-//! indicates that an interpreter (PT_INTERP) was loaded.  Written to once
-//! for each character in "ADDR:PATH", plus a final NUL, where "ADDR" is a
-//! hex load address starting with "0x", and "PATH" is the executable name.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
+ * indicates that an interpreter (PT_INTERP) was loaded.  Written to once
+ * for each character in "ADDR:PATH", plus a final NUL, where "ADDR" is a
+ * hex load address starting with "0x", and "PATH" is the executable name.
+ */
 #define SIM_CONTROL_OS_INTERP 19
 
-//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
-//! indicates that a dll was loaded.  Written to once for each character
-//! in "ADDR:PATH", plus a final NUL, where "ADDR" is a hexadecimal load
-//! address starting with "0x", and "PATH" is the executable name.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
+ * indicates that a dll was loaded.  Written to once for each character
+ * in "ADDR:PATH", plus a final NUL, where "ADDR" is a hexadecimal load
+ * address starting with "0x", and "PATH" is the executable name.
+ */
 #define SIM_CONTROL_DLOPEN 20
 
-//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
-//! indicates that a dll was unloaded.  Written to once for each character
-//! in "ADDR", plus a final NUL, where "ADDR" is a hexadecimal load
-//! address starting with "0x".
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
+ * indicates that a dll was unloaded.  Written to once for each character
+ * in "ADDR", plus a final NUL, where "ADDR" is a hexadecimal load
+ * address starting with "0x".
+ */
 #define SIM_CONTROL_DLCLOSE 21
 
-//! If written to SPR_SIM_CONTROL, combined with a flag (shifted by 8),
-//! indicates whether to allow data reads to remotely-cached
-//! dirty cache lines to be cached locally without grinder warnings or
-//! assertions (used by Linux kernel fast memcpy).
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a flag (shifted by 8),
+ * indicates whether to allow data reads to remotely-cached
+ * dirty cache lines to be cached locally without grinder warnings or
+ * assertions (used by Linux kernel fast memcpy).
+ */
 #define SIM_CONTROL_ALLOW_MULTIPLE_CACHING 22
 
-//! If written to SPR_SIM_CONTROL, enables memory tracing.
-//!
+/** If written to SPR_SIM_CONTROL, enables memory tracing. */
 #define SIM_CONTROL_ENABLE_MEM_LOGGING 23
 
-//! If written to SPR_SIM_CONTROL, disables memory tracing.
-//!
+/** If written to SPR_SIM_CONTROL, disables memory tracing. */
 #define SIM_CONTROL_DISABLE_MEM_LOGGING 24
 
-//! If written to SPR_SIM_CONTROL, changes the shaping parameters of one of
-//! the gbe or xgbe shims. Must specify the shim id, the type, the units, and
-//! the rate, as defined in SIM_SHAPING_SPR_ARG.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, changes the shaping parameters of one of
+ * the gbe or xgbe shims. Must specify the shim id, the type, the units, and
+ * the rate, as defined in SIM_SHAPING_SPR_ARG.
+ */
 #define SIM_CONTROL_SHAPING 25
 
-//! If written to SPR_SIM_CONTROL, combined with character (shifted by 8),
-//! requests that a simulator command be executed.  Written to once for each
-//! character in the command, plus a final NUL.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with character (shifted by 8),
+ * requests that a simulator command be executed.  Written to once for each
+ * character in the command, plus a final NUL.
+ */
 #define SIM_CONTROL_COMMAND 26
 
-//! If written to SPR_SIM_CONTROL, indicates that the simulated system
-//! is panicking, to allow debugging via --debug-on-panic.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, indicates that the simulated system
+ * is panicking, to allow debugging via --debug-on-panic.
+ */
 #define SIM_CONTROL_PANIC 27
 
-//! If written to SPR_SIM_CONTROL, triggers a simulator syscall.
-//! See "sim_syscall()" for more info.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, triggers a simulator syscall.
+ * See "sim_syscall()" for more info.
+ */
 #define SIM_CONTROL_SYSCALL 32
 
-//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
-//! provides the pid that subsequent SIM_CONTROL_OS_FORK writes should
-//! use as the pid, rather than the default previous SIM_CONTROL_OS_SWITCH.
-//!
+/**
+ * If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
+ * provides the pid that subsequent SIM_CONTROL_OS_FORK writes should
+ * use as the pid, rather than the default previous SIM_CONTROL_OS_SWITCH.
+ */
 #define SIM_CONTROL_OS_FORK_PARENT 33
 
-//! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
-//! (shifted by 8), clears the pending magic data section.  The cleared
-//! pending magic data section and any subsequently appended magic bytes
-//! will only take effect when the classifier blast programmer is run.
+/**
+ * If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
+ * (shifted by 8), clears the pending magic data section.  The cleared
+ * pending magic data section and any subsequently appended magic bytes
+ * will only take effect when the classifier blast programmer is run.
+ */
 #define SIM_CONTROL_CLEAR_MPIPE_MAGIC_BYTES 34
 
-//! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
-//! (shifted by 8) and a byte of data (shifted by 16), appends that byte
-//! to the shim's pending magic data section.  The pending magic data
-//! section takes effect when the classifier blast programmer is run.
+/**
+ * If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
+ * (shifted by 8) and a byte of data (shifted by 16), appends that byte
+ * to the shim's pending magic data section.  The pending magic data
+ * section takes effect when the classifier blast programmer is run.
+ */
 #define SIM_CONTROL_APPEND_MPIPE_MAGIC_BYTE 35
 
-//! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
-//! (shifted by 8), an enable=1/disable=0 bit (shifted by 16), and a
-//! mask of links (shifted by 32), enable or disable the corresponding
-//! mPIPE links.
+/**
+ * If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
+ * (shifted by 8), an enable=1/disable=0 bit (shifted by 16), and a
+ * mask of links (shifted by 32), enable or disable the corresponding
+ * mPIPE links.
+ */
 #define SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE 36
 
-//== Syscall numbers for use with "sim_syscall()".
 
-//! Syscall number for sim_add_watchpoint().
-//!
+/*
+ * Syscall numbers for use with "sim_syscall()".
+ */
+
+/** Syscall number for sim_add_watchpoint(). */
 #define SIM_SYSCALL_ADD_WATCHPOINT 2
 
-//! Syscall number for sim_remove_watchpoint().
-//!
+/** Syscall number for sim_remove_watchpoint(). */
 #define SIM_SYSCALL_REMOVE_WATCHPOINT 3
 
-//! Syscall number for sim_query_watchpoint().
-//!
+/** Syscall number for sim_query_watchpoint(). */
 #define SIM_SYSCALL_QUERY_WATCHPOINT 4
 
-//! Syscall number that asserts that the cache lines whose 64-bit PA
-//! is passed as the second argument to sim_syscall(), and over a
-//! range passed as the third argument, are no longer in cache.
-//! The simulator raises an error if this is not the case.
-//!
+/**
+ * Syscall number that asserts that the cache lines whose 64-bit PA
+ * is passed as the second argument to sim_syscall(), and over a
+ * range passed as the third argument, are no longer in cache.
+ * The simulator raises an error if this is not the case.
+ */
 #define SIM_SYSCALL_VALIDATE_LINES_EVICTED 5
 
 
-//== Bit masks which can be shifted by 8, combined with
-//== SIM_CONTROL_SET_TRACING, and written to SPR_SIM_CONTROL.
+/*
+ * Bit masks which can be shifted by 8, combined with
+ * SIM_CONTROL_SET_TRACING, and written to SPR_SIM_CONTROL.
+ */
 
-//! @addtogroup arch_sim
-//! @{
+/**
+ * @addtogroup arch_sim
+ * @{
+ */
 
-//! Enable --trace-cycle when passed to simulator_set_tracing().
-//!
+/** Enable --trace-cycle when passed to simulator_set_tracing(). */
 #define SIM_TRACE_CYCLES          0x01
 
-//! Enable --trace-router when passed to simulator_set_tracing().
-//!
+/** Enable --trace-router when passed to simulator_set_tracing(). */
 #define SIM_TRACE_ROUTER          0x02
 
-//! Enable --trace-register-writes when passed to simulator_set_tracing().
-//!
+/** Enable --trace-register-writes when passed to simulator_set_tracing(). */
 #define SIM_TRACE_REGISTER_WRITES 0x04
 
-//! Enable --trace-disasm when passed to simulator_set_tracing().
-//!
+/** Enable --trace-disasm when passed to simulator_set_tracing(). */
 #define SIM_TRACE_DISASM          0x08
 
-//! Enable --trace-stall-info when passed to simulator_set_tracing().
-//!
+/** Enable --trace-stall-info when passed to simulator_set_tracing(). */
 #define SIM_TRACE_STALL_INFO      0x10
 
-//! Enable --trace-memory-controller when passed to simulator_set_tracing().
-//!
+/** Enable --trace-memory-controller when passed to simulator_set_tracing(). */
 #define SIM_TRACE_MEMORY_CONTROLLER 0x20
 
-//! Enable --trace-l2 when passed to simulator_set_tracing().
-//!
+/** Enable --trace-l2 when passed to simulator_set_tracing(). */
 #define SIM_TRACE_L2_CACHE 0x40
 
-//! Enable --trace-lines when passed to simulator_set_tracing().
-//!
+/** Enable --trace-lines when passed to simulator_set_tracing(). */
 #define SIM_TRACE_LINES 0x80
 
-//! Turn off all tracing when passed to simulator_set_tracing().
-//!
+/** Turn off all tracing when passed to simulator_set_tracing(). */
 #define SIM_TRACE_NONE 0
 
-//! Turn on all tracing when passed to simulator_set_tracing().
-//!
+/** Turn on all tracing when passed to simulator_set_tracing(). */
 #define SIM_TRACE_ALL (-1)
 
-//! @}
+/** @} */
 
-//! Computes the value to write to SPR_SIM_CONTROL to set tracing flags.
-//!
+/** Computes the value to write to SPR_SIM_CONTROL to set tracing flags. */
 #define SIM_TRACE_SPR_ARG(mask) \
   (SIM_CONTROL_SET_TRACING | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
 
 
-//== Bit masks which can be shifted by 8, combined with
-//== SIM_CONTROL_DUMP, and written to SPR_SIM_CONTROL.
+/*
+ * Bit masks which can be shifted by 8, combined with
+ * SIM_CONTROL_DUMP, and written to SPR_SIM_CONTROL.
+ */
 
-//! @addtogroup arch_sim
-//! @{
+/**
+ * @addtogroup arch_sim
+ * @{
+ */
 
-//! Dump the general-purpose registers.
-//!
+/** Dump the general-purpose registers. */
 #define SIM_DUMP_REGS          0x001
 
-//! Dump the SPRs.
-//!
+/** Dump the SPRs. */
 #define SIM_DUMP_SPRS          0x002
 
-//! Dump the ITLB.
-//!
+/** Dump the ITLB. */
 #define SIM_DUMP_ITLB          0x004
 
-//! Dump the DTLB.
-//!
+/** Dump the DTLB. */
 #define SIM_DUMP_DTLB          0x008
 
-//! Dump the L1 I-cache.
-//!
+/** Dump the L1 I-cache. */
 #define SIM_DUMP_L1I           0x010
 
-//! Dump the L1 D-cache.
-//!
+/** Dump the L1 D-cache. */
 #define SIM_DUMP_L1D           0x020
 
-//! Dump the L2 cache.
-//!
+/** Dump the L2 cache. */
 #define SIM_DUMP_L2            0x040
 
-//! Dump the switch registers.
-//!
+/** Dump the switch registers. */
 #define SIM_DUMP_SNREGS        0x080
 
-//! Dump the switch ITLB.
-//!
+/** Dump the switch ITLB. */
 #define SIM_DUMP_SNITLB        0x100
 
-//! Dump the switch L1 I-cache.
-//!
+/** Dump the switch L1 I-cache. */
 #define SIM_DUMP_SNL1I         0x200
 
-//! Dump the current backtrace.
-//!
+/** Dump the current backtrace. */
 #define SIM_DUMP_BACKTRACE     0x400
 
-//! Only dump valid lines in caches.
-//!
+/** Only dump valid lines in caches. */
 #define SIM_DUMP_VALID_LINES   0x800
 
-//! Dump everything that is dumpable.
-//!
+/** Dump everything that is dumpable. */
 #define SIM_DUMP_ALL (-1 & ~SIM_DUMP_VALID_LINES)
 
-// @}
+/** @} */
 
-//! Computes the value to write to SPR_SIM_CONTROL to dump machine state.
-//!
+/** Computes the value to write to SPR_SIM_CONTROL to dump machine state. */
 #define SIM_DUMP_SPR_ARG(mask) \
   (SIM_CONTROL_DUMP | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
 
 
-//== Bit masks which can be shifted by 8, combined with
-//== SIM_CONTROL_PROFILER_CHIP_xxx, and written to SPR_SIM_CONTROL.
+/*
+ * Bit masks which can be shifted by 8, combined with
+ * SIM_CONTROL_PROFILER_CHIP_xxx, and written to SPR_SIM_CONTROL.
+ */
 
-//! @addtogroup arch_sim
-//! @{
+/**
+ * @addtogroup arch_sim
+ * @{
+ */
 
-//! Use with with SIM_PROFILER_CHIP_xxx to control the memory controllers.
-//!
+/** Use with with SIM_PROFILER_CHIP_xxx to control the memory controllers. */
 #define SIM_CHIP_MEMCTL        0x001
 
-//! Use with with SIM_PROFILER_CHIP_xxx to control the XAUI interface.
-//!
+/** Use with with SIM_PROFILER_CHIP_xxx to control the XAUI interface. */
 #define SIM_CHIP_XAUI          0x002
 
-//! Use with with SIM_PROFILER_CHIP_xxx to control the PCIe interface.
-//!
+/** Use with with SIM_PROFILER_CHIP_xxx to control the PCIe interface. */
 #define SIM_CHIP_PCIE          0x004
 
-//! Use with with SIM_PROFILER_CHIP_xxx to control the MPIPE interface.
-//!
+/** Use with with SIM_PROFILER_CHIP_xxx to control the MPIPE interface. */
 #define SIM_CHIP_MPIPE         0x008
 
-//! Reference all chip devices.
-//!
+/** Use with with SIM_PROFILER_CHIP_xxx to control the TRIO interface. */
+#define SIM_CHIP_TRIO          0x010
+
+/** Reference all chip devices. */
 #define SIM_CHIP_ALL (-1)
 
-//! @}
+/** @} */
 
-//! Computes the value to write to SPR_SIM_CONTROL to clear chip statistics.
-//!
+/** Computes the value to write to SPR_SIM_CONTROL to clear chip statistics. */
 #define SIM_PROFILER_CHIP_CLEAR_SPR_ARG(mask) \
   (SIM_CONTROL_PROFILER_CHIP_CLEAR | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
 
-//! Computes the value to write to SPR_SIM_CONTROL to disable chip statistics.
-//!
+/** Computes the value to write to SPR_SIM_CONTROL to disable chip statistics.*/
 #define SIM_PROFILER_CHIP_DISABLE_SPR_ARG(mask) \
   (SIM_CONTROL_PROFILER_CHIP_DISABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
 
-//! Computes the value to write to SPR_SIM_CONTROL to enable chip statistics.
-//!
+/** Computes the value to write to SPR_SIM_CONTROL to enable chip statistics. */
 #define SIM_PROFILER_CHIP_ENABLE_SPR_ARG(mask) \
   (SIM_CONTROL_PROFILER_CHIP_ENABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
 
 
 
-// Shim bitrate controls.
+/* Shim bitrate controls. */
 
-//! The number of bits used to store the shim id.
-//!
+/** The number of bits used to store the shim id. */
 #define SIM_CONTROL_SHAPING_SHIM_ID_BITS 3
 
-//! @addtogroup arch_sim
-//! @{
+/**
+ * @addtogroup arch_sim
+ * @{
+ */
 
-//! Change the gbe 0 bitrate.
-//!
+/** Change the gbe 0 bitrate. */
 #define SIM_CONTROL_SHAPING_GBE_0 0x0
 
-//! Change the gbe 1 bitrate.
-//!
+/** Change the gbe 1 bitrate. */
 #define SIM_CONTROL_SHAPING_GBE_1 0x1
 
-//! Change the gbe 2 bitrate.
-//!
+/** Change the gbe 2 bitrate. */
 #define SIM_CONTROL_SHAPING_GBE_2 0x2
 
-//! Change the gbe 3 bitrate.
-//!
+/** Change the gbe 3 bitrate. */
 #define SIM_CONTROL_SHAPING_GBE_3 0x3
 
-//! Change the xgbe 0 bitrate.
-//!
+/** Change the xgbe 0 bitrate. */
 #define SIM_CONTROL_SHAPING_XGBE_0 0x4
 
-//! Change the xgbe 1 bitrate.
-//!
+/** Change the xgbe 1 bitrate. */
 #define SIM_CONTROL_SHAPING_XGBE_1 0x5
 
-//! The type of shaping to do.
-//!
+/** The type of shaping to do. */
 #define SIM_CONTROL_SHAPING_TYPE_BITS 2
 
-//! Control the multiplier.
-//!
+/** Control the multiplier. */
 #define SIM_CONTROL_SHAPING_MULTIPLIER 0
 
-//! Control the PPS.
-//!
+/** Control the PPS. */
 #define SIM_CONTROL_SHAPING_PPS 1
 
-//! Control the BPS.
-//!
+/** Control the BPS. */
 #define SIM_CONTROL_SHAPING_BPS 2
 
-//! The number of bits for the units for the shaping parameter.
-//!
+/** The number of bits for the units for the shaping parameter. */
 #define SIM_CONTROL_SHAPING_UNITS_BITS 2
 
-//! Provide a number in single units.
-//!
+/** Provide a number in single units. */
 #define SIM_CONTROL_SHAPING_UNITS_SINGLE 0
 
-//! Provide a number in kilo units.
-//!
+/** Provide a number in kilo units. */
 #define SIM_CONTROL_SHAPING_UNITS_KILO 1
 
-//! Provide a number in mega units.
-//!
+/** Provide a number in mega units. */
 #define SIM_CONTROL_SHAPING_UNITS_MEGA 2
 
-//! Provide a number in giga units.
-//!
+/** Provide a number in giga units. */
 #define SIM_CONTROL_SHAPING_UNITS_GIGA 3
 
-// @}
+/** @} */
 
-//! How many bits are available for the rate.
-//!
+/** How many bits are available for the rate. */
 #define SIM_CONTROL_SHAPING_RATE_BITS \
   (32 - (_SIM_CONTROL_OPERATOR_BITS + \
          SIM_CONTROL_SHAPING_SHIM_ID_BITS + \
          SIM_CONTROL_SHAPING_TYPE_BITS + \
          SIM_CONTROL_SHAPING_UNITS_BITS))
 
-//! Computes the value to write to SPR_SIM_CONTROL to change a bitrate.
-//!
+/** Computes the value to write to SPR_SIM_CONTROL to change a bitrate. */
 #define SIM_SHAPING_SPR_ARG(shim, type, units, rate) \
   (SIM_CONTROL_SHAPING | \
    ((shim) | \
                SIM_CONTROL_SHAPING_UNITS_BITS))) << _SIM_CONTROL_OPERATOR_BITS)
 
 
-//== Values returned when reading SPR_SIM_CONTROL.
-// ISSUE: These names should share a longer common prefix.
+/*
+ * Values returned when reading SPR_SIM_CONTROL.
+ * ISSUE: These names should share a longer common prefix.
+ */
 
-//! When reading SPR_SIM_CONTROL, the mask of simulator tracing bits
-//! (SIM_TRACE_xxx values).
-//!
+/**
+ * When reading SPR_SIM_CONTROL, the mask of simulator tracing bits
+ * (SIM_TRACE_xxx values).
+ */
 #define SIM_TRACE_FLAG_MASK 0xFFFF
 
-//! When reading SPR_SIM_CONTROL, the mask for whether profiling is enabled.
-//!
+/** When reading SPR_SIM_CONTROL, the mask for whether profiling is enabled. */
 #define SIM_PROFILER_ENABLED_MASK 0x10000
 
 
-//== Special arguments for "SIM_CONTROL_PUTC".
+/*
+ * Special arguments for "SIM_CONTROL_PUTC".
+ */
 
-//! Flag value for forcing a PUTC string-flush, including
-//! coordinate/cycle prefix and newline.
-//!
+/**
+ * Flag value for forcing a PUTC string-flush, including
+ * coordinate/cycle prefix and newline.
+ */
 #define SIM_PUTC_FLUSH_STRING 0x100
 
-//! Flag value for forcing a PUTC binary-data-flush, which skips the
-//! prefix and does not append a newline.
-//!
+/**
+ * Flag value for forcing a PUTC binary-data-flush, which skips the
+ * prefix and does not append a newline.
+ */
 #define SIM_PUTC_FLUSH_BINARY 0x101
 
 
-#endif //__ARCH_SIM_DEF_H__
+#endif /* __ARCH_SIM_DEF_H__ */
index c8fdbd9a45e67b53c4c9da654613e844d9902807..442fcba0d12266db8ed8febd68c62a101a7b3ab6 100644 (file)
  *   more details.
  */
 
+/*
+ * In addition to including the proper base SPR definition file, depending
+ * on machine architecture, this file defines several macros which allow
+ * kernel code to use protection-level dependent SPRs without worrying
+ * about which PL it's running at.  In these macros, the PL that the SPR
+ * or interrupt number applies to is replaced by K.
+ */
+
+#if CONFIG_KERNEL_PL != 1 && CONFIG_KERNEL_PL != 2
+#error CONFIG_KERNEL_PL must be 1 or 2
+#endif
+
+/* Concatenate 4 strings. */
+#define __concat4(a, b, c, d) a ## b ## c ## d
+#define _concat4(a, b, c, d)  __concat4(a, b, c, d)
+
 #ifdef __tilegx__
 #include <arch/spr_def_64.h>
+
+/* TILE-Gx dependent, protection-level dependent SPRs. */
+
+#define SPR_INTERRUPT_MASK_K \
+       _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL,,)
+#define SPR_INTERRUPT_MASK_SET_K \
+       _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL,,)
+#define SPR_INTERRUPT_MASK_RESET_K \
+       _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL,,)
+#define SPR_INTERRUPT_VECTOR_BASE_K \
+       _concat4(SPR_INTERRUPT_VECTOR_BASE_, CONFIG_KERNEL_PL,,)
+
+#define SPR_IPI_MASK_K \
+       _concat4(SPR_IPI_MASK_, CONFIG_KERNEL_PL,,)
+#define SPR_IPI_MASK_RESET_K \
+       _concat4(SPR_IPI_MASK_RESET_, CONFIG_KERNEL_PL,,)
+#define SPR_IPI_MASK_SET_K \
+       _concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,)
+#define SPR_IPI_EVENT_K \
+       _concat4(SPR_IPI_EVENT_, CONFIG_KERNEL_PL,,)
+#define SPR_IPI_EVENT_RESET_K \
+       _concat4(SPR_IPI_EVENT_RESET_, CONFIG_KERNEL_PL,,)
+#define SPR_IPI_MASK_SET_K \
+       _concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,)
+#define INT_IPI_K \
+       _concat4(INT_IPI_, CONFIG_KERNEL_PL,,)
+
+#define SPR_SINGLE_STEP_CONTROL_K \
+       _concat4(SPR_SINGLE_STEP_CONTROL_, CONFIG_KERNEL_PL,,)
+#define SPR_SINGLE_STEP_EN_K_K \
+       _concat4(SPR_SINGLE_STEP_EN_, CONFIG_KERNEL_PL, _, CONFIG_KERNEL_PL)
+#define INT_SINGLE_STEP_K \
+       _concat4(INT_SINGLE_STEP_, CONFIG_KERNEL_PL,,)
+
 #else
 #include <arch/spr_def_32.h>
+
+/* TILEPro dependent, protection-level dependent SPRs. */
+
+#define SPR_INTERRUPT_MASK_K_0 \
+       _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL, _0,)
+#define SPR_INTERRUPT_MASK_K_1 \
+       _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL, _1,)
+#define SPR_INTERRUPT_MASK_SET_K_0 \
+       _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL, _0,)
+#define SPR_INTERRUPT_MASK_SET_K_1 \
+       _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL, _1,)
+#define SPR_INTERRUPT_MASK_RESET_K_0 \
+       _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL, _0,)
+#define SPR_INTERRUPT_MASK_RESET_K_1 \
+       _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL, _1,)
+
 #endif
+
+/* Generic protection-level dependent SPRs. */
+
+#define SPR_SYSTEM_SAVE_K_0 \
+       _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _0,)
+#define SPR_SYSTEM_SAVE_K_1 \
+       _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _1,)
+#define SPR_SYSTEM_SAVE_K_2 \
+       _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _2,)
+#define SPR_SYSTEM_SAVE_K_3 \
+       _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _3,)
+#define SPR_EX_CONTEXT_K_0 \
+       _concat4(SPR_EX_CONTEXT_, CONFIG_KERNEL_PL, _0,)
+#define SPR_EX_CONTEXT_K_1 \
+       _concat4(SPR_EX_CONTEXT_, CONFIG_KERNEL_PL, _1,)
+#define SPR_INTCTRL_K_STATUS \
+       _concat4(SPR_INTCTRL_, CONFIG_KERNEL_PL, _STATUS,)
+#define INT_INTCTRL_K \
+       _concat4(INT_INTCTRL_, CONFIG_KERNEL_PL,,)
index b4fc06864df652ba9a3c1c9461b008016bff04cd..bbc1f4c924eebe0c9d0183bf5119468297e430a1 100644 (file)
 #define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2
 #define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1
 #define SPR_EX_CONTEXT_1_1__ICS_MASK  0x4
+#define SPR_EX_CONTEXT_2_0 0x4605
+#define SPR_EX_CONTEXT_2_1 0x4606
+#define SPR_EX_CONTEXT_2_1__PL_SHIFT 0
+#define SPR_EX_CONTEXT_2_1__PL_RMASK 0x3
+#define SPR_EX_CONTEXT_2_1__PL_MASK  0x3
+#define SPR_EX_CONTEXT_2_1__ICS_SHIFT 2
+#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
+#define SPR_EX_CONTEXT_2_1__ICS_MASK  0x4
 #define SPR_FAIL 0x4e09
 #define SPR_INTCTRL_0_STATUS 0x4a07
 #define SPR_INTCTRL_1_STATUS 0x4807
+#define SPR_INTCTRL_2_STATUS 0x4607
 #define SPR_INTERRUPT_CRITICAL_SECTION 0x4e0a
 #define SPR_INTERRUPT_MASK_0_0 0x4a08
 #define SPR_INTERRUPT_MASK_0_1 0x4a09
 #define SPR_INTERRUPT_MASK_1_0 0x4809
 #define SPR_INTERRUPT_MASK_1_1 0x480a
+#define SPR_INTERRUPT_MASK_2_0 0x4608
+#define SPR_INTERRUPT_MASK_2_1 0x4609
 #define SPR_INTERRUPT_MASK_RESET_0_0 0x4a0a
 #define SPR_INTERRUPT_MASK_RESET_0_1 0x4a0b
 #define SPR_INTERRUPT_MASK_RESET_1_0 0x480b
 #define SPR_INTERRUPT_MASK_RESET_1_1 0x480c
+#define SPR_INTERRUPT_MASK_RESET_2_0 0x460a
+#define SPR_INTERRUPT_MASK_RESET_2_1 0x460b
 #define SPR_INTERRUPT_MASK_SET_0_0 0x4a0c
 #define SPR_INTERRUPT_MASK_SET_0_1 0x4a0d
 #define SPR_INTERRUPT_MASK_SET_1_0 0x480d
 #define SPR_INTERRUPT_MASK_SET_1_1 0x480e
+#define SPR_INTERRUPT_MASK_SET_2_0 0x460c
+#define SPR_INTERRUPT_MASK_SET_2_1 0x460d
 #define SPR_MPL_DMA_CPL_SET_0 0x5800
 #define SPR_MPL_DMA_CPL_SET_1 0x5801
+#define SPR_MPL_DMA_CPL_SET_2 0x5802
 #define SPR_MPL_DMA_NOTIFY_SET_0 0x3800
 #define SPR_MPL_DMA_NOTIFY_SET_1 0x3801
+#define SPR_MPL_DMA_NOTIFY_SET_2 0x3802
 #define SPR_MPL_INTCTRL_0_SET_0 0x4a00
 #define SPR_MPL_INTCTRL_0_SET_1 0x4a01
+#define SPR_MPL_INTCTRL_0_SET_2 0x4a02
 #define SPR_MPL_INTCTRL_1_SET_0 0x4800
 #define SPR_MPL_INTCTRL_1_SET_1 0x4801
+#define SPR_MPL_INTCTRL_1_SET_2 0x4802
+#define SPR_MPL_INTCTRL_2_SET_0 0x4600
+#define SPR_MPL_INTCTRL_2_SET_1 0x4601
+#define SPR_MPL_INTCTRL_2_SET_2 0x4602
 #define SPR_MPL_SN_ACCESS_SET_0 0x0800
 #define SPR_MPL_SN_ACCESS_SET_1 0x0801
+#define SPR_MPL_SN_ACCESS_SET_2 0x0802
 #define SPR_MPL_SN_CPL_SET_0 0x5a00
 #define SPR_MPL_SN_CPL_SET_1 0x5a01
+#define SPR_MPL_SN_CPL_SET_2 0x5a02
 #define SPR_MPL_SN_FIREWALL_SET_0 0x2c00
 #define SPR_MPL_SN_FIREWALL_SET_1 0x2c01
+#define SPR_MPL_SN_FIREWALL_SET_2 0x2c02
 #define SPR_MPL_SN_NOTIFY_SET_0 0x2a00
 #define SPR_MPL_SN_NOTIFY_SET_1 0x2a01
+#define SPR_MPL_SN_NOTIFY_SET_2 0x2a02
 #define SPR_MPL_UDN_ACCESS_SET_0 0x0c00
 #define SPR_MPL_UDN_ACCESS_SET_1 0x0c01
+#define SPR_MPL_UDN_ACCESS_SET_2 0x0c02
 #define SPR_MPL_UDN_AVAIL_SET_0 0x4000
 #define SPR_MPL_UDN_AVAIL_SET_1 0x4001
+#define SPR_MPL_UDN_AVAIL_SET_2 0x4002
 #define SPR_MPL_UDN_CA_SET_0 0x3c00
 #define SPR_MPL_UDN_CA_SET_1 0x3c01
+#define SPR_MPL_UDN_CA_SET_2 0x3c02
 #define SPR_MPL_UDN_COMPLETE_SET_0 0x1400
 #define SPR_MPL_UDN_COMPLETE_SET_1 0x1401
+#define SPR_MPL_UDN_COMPLETE_SET_2 0x1402
 #define SPR_MPL_UDN_FIREWALL_SET_0 0x3000
 #define SPR_MPL_UDN_FIREWALL_SET_1 0x3001
+#define SPR_MPL_UDN_FIREWALL_SET_2 0x3002
 #define SPR_MPL_UDN_REFILL_SET_0 0x1000
 #define SPR_MPL_UDN_REFILL_SET_1 0x1001
+#define SPR_MPL_UDN_REFILL_SET_2 0x1002
 #define SPR_MPL_UDN_TIMER_SET_0 0x3600
 #define SPR_MPL_UDN_TIMER_SET_1 0x3601
+#define SPR_MPL_UDN_TIMER_SET_2 0x3602
 #define SPR_MPL_WORLD_ACCESS_SET_0 0x4e00
 #define SPR_MPL_WORLD_ACCESS_SET_1 0x4e01
+#define SPR_MPL_WORLD_ACCESS_SET_2 0x4e02
 #define SPR_PASS 0x4e0b
 #define SPR_PERF_COUNT_0 0x4205
 #define SPR_PERF_COUNT_1 0x4206
 #define SPR_PERF_COUNT_CTL 0x4207
+#define SPR_PERF_COUNT_DN_CTL 0x4210
 #define SPR_PERF_COUNT_STS 0x4208
 #define SPR_PROC_STATUS 0x4f00
 #define SPR_SIM_CONTROL 0x4e0c
 #define SPR_SYSTEM_SAVE_1_1 0x4901
 #define SPR_SYSTEM_SAVE_1_2 0x4902
 #define SPR_SYSTEM_SAVE_1_3 0x4903
+#define SPR_SYSTEM_SAVE_2_0 0x4700
+#define SPR_SYSTEM_SAVE_2_1 0x4701
+#define SPR_SYSTEM_SAVE_2_2 0x4702
+#define SPR_SYSTEM_SAVE_2_3 0x4703
 #define SPR_TILE_COORD 0x4c17
 #define SPR_TILE_RTF_HWM 0x4e10
 #define SPR_TILE_TIMER_CONTROL 0x3205
index 758ca4619d50017bffdec27c460edc2a95bfd38e..f18887d823999f8a7778defa2196ab10b586b81d 100644 (file)
@@ -146,7 +146,10 @@ enum {
 
        CALLER_SP_IN_R52_BASE = 4,
 
-       CALLER_SP_OFFSET_BASE = 8
+       CALLER_SP_OFFSET_BASE = 8,
+
+       /* Marks the entry point of certain functions. */
+       ENTRY_POINT_INFO_OP = 16
 };
 
 
index 8b60ec8b2d194f6e352df18eba598e3a61ef52f3..c3ae570c0a5d9ef209970d6d00a388a3128e550a 100644 (file)
@@ -216,15 +216,16 @@ struct compat_siginfo;
 struct compat_sigaltstack;
 long compat_sys_execve(const char __user *path,
                       const compat_uptr_t __user *argv,
-                      const compat_uptr_t __user *envp);
+                      const compat_uptr_t __user *envp, struct pt_regs *);
 long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
                             struct compat_sigaction __user *oact,
                             size_t sigsetsize);
 long compat_sys_rt_sigqueueinfo(int pid, int sig,
                                struct compat_siginfo __user *uinfo);
-long compat_sys_rt_sigreturn(void);
+long compat_sys_rt_sigreturn(struct pt_regs *);
 long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
-                           struct compat_sigaltstack __user *uoss_ptr);
+                           struct compat_sigaltstack __user *uoss_ptr,
+                           struct pt_regs *);
 long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high);
 long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high);
 long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
@@ -255,4 +256,12 @@ long tile_compat_sys_ptrace(compat_long_t request, compat_long_t pid,
 /* Tilera Linux syscalls that don't have "compat" versions. */
 #define compat_sys_flush_cache sys_flush_cache
 
+/* These are the intvec_64.S trampolines. */
+long _compat_sys_execve(const char __user *path,
+                       const compat_uptr_t __user *argv,
+                       const compat_uptr_t __user *envp);
+long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
+                           struct compat_sigaltstack __user *uoss_ptr);
+long _compat_sys_rt_sigreturn(void);
+
 #endif /* _ASM_TILE_COMPAT_H */
index d155db6fa9bd9e64858858e01637e3ba420700aa..e0f7ee186721cf554dc142efdfc58350eaf6a7df 100644 (file)
@@ -60,12 +60,12 @@ void *kmap_fix_kpte(struct page *page, int finished);
 /* This macro is used only in map_new_virtual() to map "page". */
 #define kmap_prot page_to_kpgprot(page)
 
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
+void *__kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+void *kmap_atomic_pfn(unsigned long pfn);
+void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
 struct page *kmap_atomic_to_page(void *ptr);
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
-void *kmap_atomic(struct page *page, enum km_type type);
+void *kmap_atomic_prot(struct page *page, pgprot_t prot);
 void kmap_atomic_fix_kpte(struct page *page, int finished);
 
 #define flush_cache_kmaps()    do { } while (0)
index a11d4837ee4d6526137858e8fdd6226deb0a95d0..641e4ff3d805aa2014d6f6df0948c80fba580947 100644 (file)
        int __n = (n); \
        int __mask = 1 << (__n & 0x1f); \
        if (__n < 32) \
-               __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, __mask); \
+               __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, __mask); \
        else \
-               __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, __mask); \
+               __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, __mask); \
 } while (0)
 #define interrupt_mask_reset(n) do { \
        int __n = (n); \
        int __mask = 1 << (__n & 0x1f); \
        if (__n < 32) \
-               __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, __mask); \
+               __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, __mask); \
        else \
-               __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, __mask); \
+               __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, __mask); \
 } while (0)
 #define interrupt_mask_check(n) ({ \
        int __n = (n); \
        (((__n < 32) ? \
-        __insn_mfspr(SPR_INTERRUPT_MASK_1_0) : \
-        __insn_mfspr(SPR_INTERRUPT_MASK_1_1)) \
+        __insn_mfspr(SPR_INTERRUPT_MASK_K_0) : \
+        __insn_mfspr(SPR_INTERRUPT_MASK_K_1)) \
          >> (__n & 0x1f)) & 1; \
 })
 #define interrupt_mask_set_mask(mask) do { \
        unsigned long long __m = (mask); \
-       __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, (unsigned long)(__m)); \
-       __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, (unsigned long)(__m>>32)); \
+       __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, (unsigned long)(__m)); \
+       __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, (unsigned long)(__m>>32)); \
 } while (0)
 #define interrupt_mask_reset_mask(mask) do { \
        unsigned long long __m = (mask); \
-       __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, (unsigned long)(__m)); \
-       __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, (unsigned long)(__m>>32)); \
+       __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \
+       __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \
 } while (0)
 #else
 #define interrupt_mask_set(n) \
-       __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (1UL << (n)))
+       __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n)))
 #define interrupt_mask_reset(n) \
-       __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (1UL << (n)))
+       __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (1UL << (n)))
 #define interrupt_mask_check(n) \
-       ((__insn_mfspr(SPR_INTERRUPT_MASK_1) >> (n)) & 1)
+       ((__insn_mfspr(SPR_INTERRUPT_MASK_K) >> (n)) & 1)
 #define interrupt_mask_set_mask(mask) \
-       __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (mask))
+       __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask))
 #define interrupt_mask_reset_mask(mask) \
-       __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (mask))
+       __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask))
 #endif
 
 /*
  * The set of interrupts we want active if irqs are enabled.
  * Note that in particular, the tile timer interrupt comes and goes
  * from this set, since we have no other way to turn off the timer.
- * Likewise, INTCTRL_1 is removed and re-added during device
+ * Likewise, INTCTRL_K is removed and re-added during device
  * interrupts, as is the the hardwall UDN_FIREWALL interrupt.
  * We use a low bit (MEM_ERROR) as our sentinel value and make sure it
  * is always claimed as an "active interrupt" so we can query that bit
@@ -170,14 +170,14 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
 
 /* Return 0 or 1 to indicate whether interrupts are currently disabled. */
 #define IRQS_DISABLED(tmp)                                     \
-       mfspr   tmp, INTERRUPT_MASK_1;                          \
+       mfspr   tmp, SPR_INTERRUPT_MASK_K;                      \
        andi    tmp, tmp, 1
 
 /* Load up a pointer to &interrupts_enabled_mask. */
 #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg)                   \
-       moveli reg, hw2_last(interrupts_enabled_mask); \
-       shl16insli reg, reg, hw1(interrupts_enabled_mask); \
-       shl16insli reg, reg, hw0(interrupts_enabled_mask); \
+       moveli reg, hw2_last(interrupts_enabled_mask);          \
+       shl16insli reg, reg, hw1(interrupts_enabled_mask);      \
+       shl16insli reg, reg, hw0(interrupts_enabled_mask);      \
        add     reg, reg, tp
 
 /* Disable interrupts. */
@@ -185,18 +185,18 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
        moveli  tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS);      \
        shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS);  \
        shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS);  \
-       mtspr   INTERRUPT_MASK_SET_1, tmp0
+       mtspr   SPR_INTERRUPT_MASK_SET_K, tmp0
 
 /* Disable ALL synchronous interrupts (used by NMI entry). */
 #define IRQ_DISABLE_ALL(tmp)                                   \
        movei   tmp, -1;                                        \
-       mtspr   INTERRUPT_MASK_SET_1, tmp
+       mtspr   SPR_INTERRUPT_MASK_SET_K, tmp
 
 /* Enable interrupts. */
 #define IRQ_ENABLE(tmp0, tmp1)                                 \
        GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0);                  \
        ld      tmp0, tmp0;                                     \
-       mtspr   INTERRUPT_MASK_RESET_1, tmp0
+       mtspr   SPR_INTERRUPT_MASK_RESET_K, tmp0
 
 #else /* !__tilegx__ */
 
@@ -210,14 +210,14 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
  * (making the original code's write of the "high" mask word idempotent).
  */
 #define IRQS_DISABLED(tmp)                                     \
-       mfspr   tmp, INTERRUPT_MASK_1_0;                        \
+       mfspr   tmp, SPR_INTERRUPT_MASK_K_0;                    \
        shri    tmp, tmp, INT_MEM_ERROR;                        \
        andi    tmp, tmp, 1
 
 /* Load up a pointer to &interrupts_enabled_mask. */
 #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg)                   \
-       moveli  reg, lo16(interrupts_enabled_mask);     \
-       auli    reg, reg, ha16(interrupts_enabled_mask);\
+       moveli  reg, lo16(interrupts_enabled_mask);             \
+       auli    reg, reg, ha16(interrupts_enabled_mask);        \
        add     reg, reg, tp
 
 /* Disable interrupts. */
@@ -227,16 +227,16 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
         moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS)           \
        };                                                      \
        {                                                       \
-        mtspr  INTERRUPT_MASK_SET_1_0, tmp0;                   \
+        mtspr  SPR_INTERRUPT_MASK_SET_K_0, tmp0;               \
         auli   tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS)     \
        };                                                      \
-       mtspr   INTERRUPT_MASK_SET_1_1, tmp1
+       mtspr   SPR_INTERRUPT_MASK_SET_K_1, tmp1
 
 /* Disable ALL synchronous interrupts (used by NMI entry). */
 #define IRQ_DISABLE_ALL(tmp)                                   \
        movei   tmp, -1;                                        \
-       mtspr   INTERRUPT_MASK_SET_1_0, tmp;                    \
-       mtspr   INTERRUPT_MASK_SET_1_1, tmp
+       mtspr   SPR_INTERRUPT_MASK_SET_K_0, tmp;                \
+       mtspr   SPR_INTERRUPT_MASK_SET_K_1, tmp
 
 /* Enable interrupts. */
 #define IRQ_ENABLE(tmp0, tmp1)                                 \
@@ -246,8 +246,8 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
         addi   tmp1, tmp0, 4                                   \
        };                                                      \
        lw      tmp1, tmp1;                                     \
-       mtspr   INTERRUPT_MASK_RESET_1_0, tmp0;                 \
-       mtspr   INTERRUPT_MASK_RESET_1_1, tmp1
+       mtspr   SPR_INTERRUPT_MASK_RESET_K_0, tmp0;             \
+       mtspr   SPR_INTERRUPT_MASK_RESET_K_1, tmp1
 #endif
 
 /*
index 4c6811e3e8dc09a88bf97881dfa50d17ef426acb..81b8fc348d63d9caea21b2edd5e94cfe999313dd 100644 (file)
@@ -23,6 +23,7 @@
 #define MAP_POPULATE   0x0040          /* populate (prefault) pagetables */
 #define MAP_NONBLOCK   0x0080          /* do not block on IO */
 #define MAP_GROWSDOWN  0x0100          /* stack-like segment */
+#define MAP_STACK      MAP_GROWSDOWN   /* provide convenience alias */
 #define MAP_LOCKED     0x0200          /* pages are locked */
 #define MAP_NORESERVE  0x0400          /* don't check for reservations */
 #define MAP_DENYWRITE  0x0800          /* ETXTBSY */
index 7d90641cf18d8e58a1a6e39d4ef83292a316388d..7979a45430d3bc46aa2d2fc39c8e65772706f0dc 100644 (file)
@@ -199,17 +199,17 @@ static inline __attribute_const__ int get_order(unsigned long size)
  * If you want more physical memory than this then see the CONFIG_HIGHMEM
  * option in the kernel configuration.
  *
- * The top two 16MB chunks in the table below (VIRT and HV) are
- * unavailable to Linux.  Since the kernel interrupt vectors must live
- * at 0xfd000000, we map all of the bottom of RAM at this address with
- * a huge page table entry to minimize its ITLB footprint (as well as
- * at PAGE_OFFSET).  The last architected requirement is that user
- * interrupt vectors live at 0xfc000000, so we make that range of
- * memory available to user processes.  The remaining regions are sized
- * as shown; after the first four addresses, we show "typical" values,
- * since the actual addresses depend on kernel #defines.
+ * The top 16MB chunk in the table below is unavailable to Linux.  Since
+ * the kernel interrupt vectors must live at ether 0xfe000000 or 0xfd000000
+ * (depending on whether the kernel is at PL2 or Pl1), we map all of the
+ * bottom of RAM at this address with a huge page table entry to minimize
+ * its ITLB footprint (as well as at PAGE_OFFSET).  The last architected
+ * requirement is that user interrupt vectors live at 0xfc000000, so we
+ * make that range of memory available to user processes.  The remaining
+ * regions are sized as shown; the first four addresses use the PL 1
+ * values, and after that, we show "typical" values, since the actual
+ * addresses depend on kernel #defines.
  *
- * MEM_VIRT_INTRPT                 0xff000000
  * MEM_HV_INTRPT                   0xfe000000
  * MEM_SV_INTRPT (kernel code)     0xfd000000
  * MEM_USER_INTRPT (user vector)   0xfc000000
@@ -221,9 +221,14 @@ static inline __attribute_const__ int get_order(unsigned long size)
  */
 
 #define MEM_USER_INTRPT                _AC(0xfc000000, UL)
+#if CONFIG_KERNEL_PL == 1
 #define MEM_SV_INTRPT          _AC(0xfd000000, UL)
 #define MEM_HV_INTRPT          _AC(0xfe000000, UL)
-#define MEM_VIRT_INTRPT                _AC(0xff000000, UL)
+#else
+#define MEM_GUEST_INTRPT       _AC(0xfd000000, UL)
+#define MEM_SV_INTRPT          _AC(0xfe000000, UL)
+#define MEM_HV_INTRPT          _AC(0xff000000, UL)
+#endif
 
 #define INTRPT_SIZE            0x4000
 
index b3367379d53754cd7540ff29987e591481767480..dc4ccdd855bca293070ea8bff94a699821d5e01b 100644 (file)
@@ -347,15 +347,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type);
 #define pte_offset_map(dir, address) \
        _pte_offset_map(dir, address, KM_PTE0)
-#define pte_offset_map_nested(dir, address) \
-       _pte_offset_map(dir, address, KM_PTE1)
 #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
-#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
 #else
 #define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
 #define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
 #endif
 
 /* Clear a non-executable kernel PTE and flush it from the TLB. */
index ccd5f84256886c7526edd78bc58ab621f14ef2ac..1747ff3946b2ef50dea6e9591730238f3c720d08 100644 (file)
@@ -328,18 +328,21 @@ extern int kdata_huge;
  * Note that assembly code assumes that USER_PL is zero.
  */
 #define USER_PL 0
-#define KERNEL_PL 1
+#if CONFIG_KERNEL_PL == 2
+#define GUEST_PL 1
+#endif
+#define KERNEL_PL CONFIG_KERNEL_PL
 
-/* SYSTEM_SAVE_1_0 holds the current cpu number ORed with ksp0. */
+/* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */
 #define CPU_LOG_MASK_VALUE 12
 #define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1)
 #if CONFIG_NR_CPUS > CPU_MASK_VALUE
 # error Too many cpus!
 #endif
 #define raw_smp_processor_id() \
-       ((int)__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & CPU_MASK_VALUE)
+       ((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & CPU_MASK_VALUE)
 #define get_current_ksp0() \
-       (__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & ~CPU_MASK_VALUE)
+       (__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~CPU_MASK_VALUE)
 #define next_current_ksp0(task) ({ \
        unsigned long __ksp0 = task_ksp0(task); \
        int __cpu = raw_smp_processor_id(); \
index 4a02bb07397993a2eb025ae7cd79caf618ae03d6..ac6d343129d3f92dd2b7148d72571c0562fffdee 100644 (file)
@@ -62,8 +62,8 @@ struct pt_regs {
        pt_reg_t lr;            /* aliases regs[TREG_LR] */
 
        /* Saved special registers. */
-       pt_reg_t pc;            /* stored in EX_CONTEXT_1_0 */
-       pt_reg_t ex1;           /* stored in EX_CONTEXT_1_1 (PL and ICS bit) */
+       pt_reg_t pc;            /* stored in EX_CONTEXT_K_0 */
+       pt_reg_t ex1;           /* stored in EX_CONTEXT_K_1 (PL and ICS bit) */
        pt_reg_t faultnum;      /* fault number (INT_SWINT_1 for syscall) */
        pt_reg_t orig_r0;       /* r0 at syscall entry, else zero */
        pt_reg_t flags;         /* flags (see below) */
index ce99ffefeacff351c5523d574581e1092d57ca06..3b5507c31eae592a850c8c8ff8851bb14b49682a 100644 (file)
@@ -32,8 +32,9 @@ extern void *compat_sys_call_table[];
 
 /*
  * Note that by convention, any syscall which requires the current
- * register set takes an additional "struct pt_regs *" pointer; the
- * sys_xxx() function just adds the pointer and tail-calls to _sys_xxx().
+ * register set takes an additional "struct pt_regs *" pointer; a
+ * _sys_xxx() trampoline in intvec*.S just sets up the pointer and
+ * jumps to sys_xxx().
  */
 
 /* kernel/sys.c */
@@ -43,66 +44,17 @@ long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi,
 int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
                       u32 len_lo, u32 len_hi, int advice);
 long sys_flush_cache(void);
-long sys_mmap2(unsigned long addr, unsigned long len,
-              unsigned long prot, unsigned long flags,
-              unsigned long fd, unsigned long pgoff);
-#ifdef __tilegx__
-long sys_mmap(unsigned long addr, unsigned long len,
-             unsigned long prot, unsigned long flags,
-             unsigned long fd, off_t pgoff);
+#ifndef __tilegx__  /* No mmap() in the 32-bit kernel. */
+#define sys_mmap sys_mmap
 #endif
 
-/* kernel/process.c */
-long sys_clone(unsigned long clone_flags, unsigned long newsp,
-              void __user *parent_tid, void __user *child_tid);
-long _sys_clone(unsigned long clone_flags, unsigned long newsp,
-               void __user *parent_tid, void __user *child_tid,
-               struct pt_regs *regs);
-long sys_fork(void);
-long _sys_fork(struct pt_regs *regs);
-long sys_vfork(void);
-long _sys_vfork(struct pt_regs *regs);
-long sys_execve(const char __user *filename,
-               const char __user *const __user *argv,
-               const char __user *const __user *envp);
-long _sys_execve(const char __user *filename,
-                const char __user *const __user *argv,
-                const char __user *const __user *envp, struct pt_regs *regs);
-
-/* kernel/signal.c */
-long sys_sigaltstack(const stack_t __user *, stack_t __user *);
-long _sys_sigaltstack(const stack_t __user *, stack_t __user *,
-                     struct pt_regs *);
-long sys_rt_sigreturn(void);
-long _sys_rt_sigreturn(struct pt_regs *regs);
-
-/* platform-independent functions */
-long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize);
-long sys_rt_sigaction(int sig, const struct sigaction __user *act,
-                     struct sigaction __user *oact, size_t sigsetsize);
-
 #ifndef __tilegx__
 /* mm/fault.c */
-int sys_cmpxchg_badaddr(unsigned long address);
-int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *);
+long sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *);
+long _sys_cmpxchg_badaddr(unsigned long address);
 #endif
 
 #ifdef CONFIG_COMPAT
-long compat_sys_execve(const char __user *path,
-                      const compat_uptr_t __user *argv,
-                      const compat_uptr_t __user *envp);
-long _compat_sys_execve(const char __user *path,
-                       const compat_uptr_t __user *argv,
-                       const compat_uptr_t __user *envp,
-                       struct pt_regs *regs);
-long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
-                           struct compat_sigaltstack __user *uoss_ptr);
-long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
-                            struct compat_sigaltstack __user *uoss_ptr,
-                            struct pt_regs *regs);
-long compat_sys_rt_sigreturn(void);
-long _compat_sys_rt_sigreturn(struct pt_regs *regs);
-
 /* These four are not defined for 64-bit, but serve as "compat" syscalls. */
 long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg);
 long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
@@ -110,4 +62,15 @@ long sys_truncate64(const char __user *path, loff_t length);
 long sys_ftruncate64(unsigned int fd, loff_t length);
 #endif
 
+/* These are the intvec*.S trampolines. */
+long _sys_sigaltstack(const stack_t __user *, stack_t __user *);
+long _sys_rt_sigreturn(void);
+long _sys_clone(unsigned long clone_flags, unsigned long newsp,
+               void __user *parent_tid, void __user *child_tid);
+long _sys_execve(const char __user *filename,
+                const char __user *const __user *argv,
+                const char __user *const __user *envp);
+
+#include <asm-generic/syscalls.h>
+
 #endif /* _ASM_TILE_SYSCALLS_H */
index f749be327ce0c503fa80227fb49f1821da6ccab3..5388850deeb231a64b212bb4ddb3c93f419f8167 100644 (file)
 #define get_cycles_low() __insn_mfspr(SPR_CYCLE)   /* just get all 64 bits */
 #endif
 
+#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
+int __mb_incoherent(void);  /* Helper routine for mb_incoherent(). */
+#endif
+
 /* Fence to guarantee visibility of stores to incoherent memory. */
 static inline void
 mb_incoherent(void)
@@ -97,7 +101,6 @@ mb_incoherent(void)
 
 #if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
        {
-               int __mb_incoherent(void);
 #if CHIP_HAS_TILE_WRITE_PENDING()
                const unsigned long WRITE_TIMEOUT_CYCLES = 400;
                unsigned long start = get_cycles_low();
@@ -161,7 +164,7 @@ extern struct task_struct *_switch_to(struct task_struct *prev,
 /* Helper function for _switch_to(). */
 extern struct task_struct *__switch_to(struct task_struct *prev,
                                       struct task_struct *next,
-                                      unsigned long new_system_save_1_0);
+                                      unsigned long new_system_save_k_0);
 
 /* Address that switched-away from tasks are at. */
 extern unsigned long get_switch_to_pc(void);
@@ -214,13 +217,6 @@ int hardwall_deactivate(struct task_struct *task);
 } while (0)
 #endif
 
-/* Invoke the simulator "syscall" mechanism (see arch/tile/kernel/entry.S). */
-extern int _sim_syscall(int syscall_num, ...);
-#define sim_syscall(syscall_num, ...) \
-       _sim_syscall(SIM_CONTROL_SYSCALL + \
-               ((syscall_num) << _SIM_CONTROL_OPERATOR_BITS), \
-               ## __VA_ARGS__)
-
 /*
  * Kernel threads can check to see if they need to migrate their
  * stack whenever they return from a context switch; for user
index 432a9c15c8a2bff86d997703fd5212ed3cc02e26..d06e35f572010cc1a5f4de4e78740039b785f89d 100644 (file)
@@ -59,4 +59,8 @@ void do_hardwall_trap(struct pt_regs *, int fault_num);
 void do_breakpoint(struct pt_regs *, int fault_num);
 
 
+#ifdef __tilegx__
+void gx_singlestep_handle(struct pt_regs *, int fault_num);
+#endif
+
 #endif /* _ASM_TILE_SYSCALLS_H */
index 9bd303a141b260026ce5327020f62142a9b0c07b..f672544cd4f94ed2ebdfbdb9af2cd8edbb021f3a 100644 (file)
@@ -1003,37 +1003,37 @@ int hv_console_write(HV_VirtAddr bytes, int len);
  *  when these occur in a client's interrupt critical section, they must
  *  be delivered through the downcall mechanism.
  *
- *  A downcall is initially delivered to the client as an INTCTRL_1
- *  interrupt.  Upon entry to the INTCTRL_1 vector, the client must
- *  immediately invoke the hv_downcall_dispatch service.  This service
- *  will not return; instead it will cause one of the client's actual
- *  downcall-handling interrupt vectors to be entered.  The EX_CONTEXT
- *  registers in the client will be set so that when the client irets,
- *  it will return to the code which was interrupted by the INTCTRL_1
- *  interrupt.
- *
- *  Under some circumstances, the firing of INTCTRL_1 can race with
+ *  A downcall is initially delivered to the client as an INTCTRL_CL
+ *  interrupt, where CL is the client's PL.  Upon entry to the INTCTRL_CL
+ *  vector, the client must immediately invoke the hv_downcall_dispatch
+ *  service.  This service will not return; instead it will cause one of
+ *  the client's actual downcall-handling interrupt vectors to be entered.
+ *  The EX_CONTEXT registers in the client will be set so that when the
+ *  client irets, it will return to the code which was interrupted by the
+ *  INTCTRL_CL interrupt.
+ *
+ *  Under some circumstances, the firing of INTCTRL_CL can race with
  *  the lowering of a device interrupt.  In such a case, the
  *  hv_downcall_dispatch service may issue an iret instruction instead
  *  of entering one of the client's actual downcall-handling interrupt
  *  vectors.  This will return execution to the location that was
- *  interrupted by INTCTRL_1.
+ *  interrupted by INTCTRL_CL.
  *
  *  Any saving of registers should be done by the actual handling
- *  vectors; no registers should be changed by the INTCTRL_1 handler.
+ *  vectors; no registers should be changed by the INTCTRL_CL handler.
  *  In particular, the client should not use a jal instruction to invoke
  *  the hv_downcall_dispatch service, as that would overwrite the client's
  *  lr register.  Note that the hv_downcall_dispatch service may overwrite
  *  one or more of the client's system save registers.
  *
- *  The client must not modify the INTCTRL_1_STATUS SPR.  The hypervisor
+ *  The client must not modify the INTCTRL_CL_STATUS SPR.  The hypervisor
  *  will set this register to cause a downcall to happen, and will clear
  *  it when no further downcalls are pending.
  *
- *  When a downcall vector is entered, the INTCTRL_1 interrupt will be
+ *  When a downcall vector is entered, the INTCTRL_CL interrupt will be
  *  masked.  When the client is done processing a downcall, and is ready
  *  to accept another, it must unmask this interrupt; if more downcalls
- *  are pending, this will cause the INTCTRL_1 vector to be reentered.
+ *  are pending, this will cause the INTCTRL_CL vector to be reentered.
  *  Currently the following interrupt vectors can be entered through a
  *  downcall:
  *
index d3c41c1ff6bd9dae73908276cd7a41b1c70696a6..55a6a74974b4dad8c973484bcf2dc1934c34b309 100644 (file)
@@ -369,6 +369,10 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
                                        /* Weird; reserved value, ignore it. */
                                        continue;
                                }
+                               if (info_operand & ENTRY_POINT_INFO_OP) {
+                                       /* This info op is ignored by the backtracer. */
+                                       continue;
+                               }
 
                                /* Skip info ops which are not in the
                                 * "one_ago" mode we want right now.
index b1e06d0415555e5437ccd26d9c1ec8990582851a..77739cdd9462dacf2a9188a0a0418c014e6379f9 100644 (file)
@@ -154,8 +154,14 @@ long tile_compat_sys_msgrcv(int msqid,
 #define compat_sys_fstat64 sys_newfstat
 #define compat_sys_fstatat64 sys_newfstatat
 
-/* Pass full 64-bit values through ptrace. */
-#define compat_sys_ptrace tile_compat_sys_ptrace
+/* The native sys_ptrace dynamically handles compat binaries. */
+#define compat_sys_ptrace sys_ptrace
+
+/* Call the trampolines to manage pt_regs where necessary. */
+#define compat_sys_execve _compat_sys_execve
+#define compat_sys_sigaltstack _compat_sys_sigaltstack
+#define compat_sys_rt_sigreturn _compat_sys_rt_sigreturn
+#define sys_clone _sys_clone
 
 /*
  * Note that we can't include <linux/unistd.h> here since the header
index 9c710db43f1329c8b850f4a058e295df171c314e..fb64b99959d42dd796c6d16c4b471cc8d9769a50 100644 (file)
@@ -256,9 +256,9 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
        return err;
 }
 
-long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
-                            struct compat_sigaltstack __user *uoss_ptr,
-                            struct pt_regs *regs)
+long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
+                           struct compat_sigaltstack __user *uoss_ptr,
+                           struct pt_regs *regs)
 {
        stack_t uss, uoss;
        int ret;
@@ -291,7 +291,7 @@ long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
        return ret;
 }
 
-long _compat_sys_rt_sigreturn(struct pt_regs *regs)
+long compat_sys_rt_sigreturn(struct pt_regs *regs)
 {
        struct compat_rt_sigframe __user *frame =
                (struct compat_rt_sigframe __user *) compat_ptr(regs->sp);
@@ -312,7 +312,7 @@ long _compat_sys_rt_sigreturn(struct pt_regs *regs)
        if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
                goto badframe;
 
-       if (_compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0)
+       if (compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0)
                goto badframe;
 
        return r0;
index 3d01383b1b0e3c8c6e805570599eb0b0d6fa211f..fd8dc42abdcb15c07e231f57205045f735c815c0 100644 (file)
@@ -15,7 +15,9 @@
 #include <linux/linkage.h>
 #include <linux/unistd.h>
 #include <asm/irqflags.h>
+#include <asm/processor.h>
 #include <arch/abi.h>
+#include <arch/spr_def.h>
 
 #ifdef __tilegx__
 #define bnzt bnezt
@@ -25,28 +27,6 @@ STD_ENTRY(current_text_addr)
        { move r0, lr; jrp lr }
        STD_ENDPROC(current_text_addr)
 
-STD_ENTRY(_sim_syscall)
-       /*
-        * Wait for r0-r9 to be ready (and lr on the off chance we
-        * want the syscall to locate its caller), then make a magic
-        * simulator syscall.
-        *
-        * We carefully stall until the registers are readable in case they
-        * are the target of a slow load, etc. so that tile-sim will
-        * definitely be able to read all of them inside the magic syscall.
-        *
-        * Technically this is wrong for r3-r9 and lr, since an interrupt
-        * could come in and restore the registers with a slow load right
-        * before executing the mtspr. We may need to modify tile-sim to
-        * explicitly stall for this case, but we do not yet have
-        * a way to implement such a stall.
-        */
-       { and zero, lr, r9 ; and zero, r8, r7 }
-       { and zero, r6, r5 ; and zero, r4, r3 }
-       { and zero, r2, r1 ; mtspr SIM_CONTROL, r0 }
-       { jrp lr }
-       STD_ENDPROC(_sim_syscall)
-
 /*
  * Implement execve().  The i386 code has a note that forking from kernel
  * space results in no copy on write until the execve, so we should be
@@ -102,7 +82,7 @@ STD_ENTRY(KBacktraceIterator_init_current)
 STD_ENTRY(cpu_idle_on_new_stack)
        {
         move sp, r1
-        mtspr SYSTEM_SAVE_1_0, r2
+        mtspr SPR_SYSTEM_SAVE_K_0, r2
        }
        jal free_thread_info
        j cpu_idle
@@ -124,15 +104,15 @@ STD_ENTRY(smp_nap)
 STD_ENTRY(_cpu_idle)
        {
         lnk r0
-        movei r1, 1
+        movei r1, KERNEL_PL
        }
        {
         addli r0, r0, _cpu_idle_nap - .
         mtspr INTERRUPT_CRITICAL_SECTION, r1
        }
-       IRQ_ENABLE(r2, r3)         /* unmask, but still with ICS set */
-       mtspr EX_CONTEXT_1_1, r1   /* PL1, ICS clear */
-       mtspr EX_CONTEXT_1_0, r0
+       IRQ_ENABLE(r2, r3)             /* unmask, but still with ICS set */
+       mtspr SPR_EX_CONTEXT_K_1, r1   /* Kernel PL, ICS clear */
+       mtspr SPR_EX_CONTEXT_K_0, r0
        iret
        .global _cpu_idle_nap
 _cpu_idle_nap:
index 2b4f6c09170179d85884d13574670a47e5220ef2..90e7c4435693d1848917b267a28a8770a8d5c64b 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/asm-offsets.h>
 #include <hv/hypervisor.h>
 #include <arch/chip.h>
+#include <arch/spr_def.h>
 
 /*
  * This module contains the entry code for kernel images. It performs the
@@ -76,7 +77,7 @@ ENTRY(_start)
        }
 1:
 
-       /* Get our processor number and save it away in SAVE_1_0. */
+       /* Get our processor number and save it away in SAVE_K_0. */
        jal hv_inquire_topology
        mulll_uu r4, r1, r2        /* r1 == y, r2 == width */
        add r4, r4, r0             /* r0 == x, so r4 == cpu == y*width + x */
@@ -124,7 +125,7 @@ ENTRY(_start)
        lw r0, r0
        lw sp, r1
        or r4, sp, r4
-       mtspr SYSTEM_SAVE_1_0, r4  /* save ksp0 + cpu */
+       mtspr SPR_SYSTEM_SAVE_K_0, r4  /* save ksp0 + cpu */
        addi sp, sp, -STACK_TOP_DELTA
        {
          move lr, zero   /* stop backtraces in the called function */
index 8f58bdff20d7f7dd9b77d16a6901eb9db82f056b..f5821626247fee9891d47eca123b26440f6c025c 100644 (file)
@@ -32,8 +32,8 @@
 # error "No support for kernel preemption currently"
 #endif
 
-#if INT_INTCTRL_1 < 32 || INT_INTCTRL_1 >= 48
-# error INT_INTCTRL_1 coded to set high interrupt mask
+#if INT_INTCTRL_K < 32 || INT_INTCTRL_K >= 48
+# error INT_INTCTRL_K coded to set high interrupt mask
 #endif
 
 #define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
@@ -132,8 +132,8 @@ intvec_\vecname:
 
        /* Temporarily save a register so we have somewhere to work. */
 
-       mtspr   SYSTEM_SAVE_1_1, r0
-       mfspr   r0, EX_CONTEXT_1_1
+       mtspr   SPR_SYSTEM_SAVE_K_1, r0
+       mfspr   r0, SPR_EX_CONTEXT_K_1
 
        /* The cmpxchg code clears sp to force us to reset it here on fault. */
        {
@@ -167,18 +167,18 @@ intvec_\vecname:
         * The page_fault handler may be downcalled directly by the
         * hypervisor even when Linux is running and has ICS set.
         *
-        * In this case the contents of EX_CONTEXT_1_1 reflect the
+        * In this case the contents of EX_CONTEXT_K_1 reflect the
         * previous fault and can't be relied on to choose whether or
         * not to reinitialize the stack pointer.  So we add a test
-        * to see whether SYSTEM_SAVE_1_2 has the high bit set,
+        * to see whether SYSTEM_SAVE_K_2 has the high bit set,
         * and if so we don't reinitialize sp, since we must be coming
         * from Linux.  (In fact the precise case is !(val & ~1),
         * but any Linux PC has to have the high bit set.)
         *
-        * Note that the hypervisor *always* sets SYSTEM_SAVE_1_2 for
+        * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
         * any path that turns into a downcall to one of our TLB handlers.
         */
-       mfspr   r0, SYSTEM_SAVE_1_2
+       mfspr   r0, SPR_SYSTEM_SAVE_K_2
        {
         blz    r0, 0f    /* high bit in S_S_1_2 is for a PC to use */
         move   r0, sp
@@ -187,12 +187,12 @@ intvec_\vecname:
 
 2:
        /*
-        * SYSTEM_SAVE_1_0 holds the cpu number in the low bits, and
+        * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
         * the current stack top in the higher bits.  So we recover
         * our stack top by just masking off the low bits, then
         * point sp at the top aligned address on the actual stack page.
         */
-       mfspr   r0, SYSTEM_SAVE_1_0
+       mfspr   r0, SPR_SYSTEM_SAVE_K_0
        mm      r0, r0, zero, LOG2_THREAD_SIZE, 31
 
 0:
@@ -254,7 +254,7 @@ intvec_\vecname:
         sw     sp, r3
         addli  sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
        }
-       mfspr   r0, EX_CONTEXT_1_0
+       mfspr   r0, SPR_EX_CONTEXT_K_0
        .ifc \processing,handle_syscall
        /*
         * Bump the saved PC by one bundle so that when we return, we won't
@@ -267,7 +267,7 @@ intvec_\vecname:
         sw     sp, r0
         addli  sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
        }
-       mfspr   r0, EX_CONTEXT_1_1
+       mfspr   r0, SPR_EX_CONTEXT_K_1
        {
         sw     sp, r0
         addi   sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
@@ -289,7 +289,7 @@ intvec_\vecname:
         .endif
         addli  sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
        }
-       mfspr   r0, SYSTEM_SAVE_1_1    /* Original r0 */
+       mfspr   r0, SPR_SYSTEM_SAVE_K_1    /* Original r0 */
        {
         sw     sp, r0
         addi   sp, sp, -PTREGS_OFFSET_REG(0) - 4
@@ -309,12 +309,12 @@ intvec_\vecname:
         * See discussion below at "finish_interrupt_save".
         */
        .ifc \c_routine, do_page_fault
-       mfspr   r2, SYSTEM_SAVE_1_3   /* address of page fault */
-       mfspr   r3, SYSTEM_SAVE_1_2   /* info about page fault */
+       mfspr   r2, SPR_SYSTEM_SAVE_K_3   /* address of page fault */
+       mfspr   r3, SPR_SYSTEM_SAVE_K_2   /* info about page fault */
        .else
        .ifc \vecnum, INT_DOUBLE_FAULT
        {
-        mfspr  r2, SYSTEM_SAVE_1_2   /* double fault info from HV */
+        mfspr  r2, SPR_SYSTEM_SAVE_K_2   /* double fault info from HV */
         movei  r3, 0
        }
        .else
@@ -467,7 +467,7 @@ intvec_\vecname:
        /* Load tp with our per-cpu offset. */
 #ifdef CONFIG_SMP
        {
-        mfspr  r20, SYSTEM_SAVE_1_0
+        mfspr  r20, SPR_SYSTEM_SAVE_K_0
         moveli r21, lo16(__per_cpu_offset)
        }
        {
@@ -487,7 +487,7 @@ intvec_\vecname:
         * We load flags in r32 here so we can jump to .Lrestore_regs
         * directly after do_page_fault_ics() if necessary.
         */
-       mfspr   r32, EX_CONTEXT_1_1
+       mfspr   r32, SPR_EX_CONTEXT_K_1
        {
         andi   r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK  /* mask off ICS */
         PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
@@ -957,11 +957,11 @@ STD_ENTRY(interrupt_return)
        pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
        pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1
        {
-        mtspr  EX_CONTEXT_1_0, r21
+        mtspr  SPR_EX_CONTEXT_K_0, r21
         move   r5, zero
        }
        {
-        mtspr  EX_CONTEXT_1_1, lr
+        mtspr  SPR_EX_CONTEXT_K_1, lr
         andi   lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK  /* mask off ICS */
        }
 
@@ -1020,7 +1020,7 @@ STD_ENTRY(interrupt_return)
 
        /* Set r1 to errno if we are returning an error, otherwise zero. */
        {
-        moveli r29, 1024
+        moveli r29, 4096
         sub    r1, zero, r0
        }
        slt_u   r29, r1, r29
@@ -1199,7 +1199,7 @@ STD_ENTRY(interrupt_return)
        STD_ENDPROC(interrupt_return)
 
        /*
-        * This interrupt variant clears the INT_INTCTRL_1 interrupt mask bit
+        * This interrupt variant clears the INT_INTCTRL_K interrupt mask bit
         * before returning, so we can properly get more downcalls.
         */
        .pushsection .text.handle_interrupt_downcall,"ax"
@@ -1208,11 +1208,11 @@ handle_interrupt_downcall:
        check_single_stepping normal, .Ldispatch_downcall
 .Ldispatch_downcall:
 
-       /* Clear INTCTRL_1 from the set of interrupts we ever enable. */
+       /* Clear INTCTRL_K from the set of interrupts we ever enable. */
        GET_INTERRUPTS_ENABLED_MASK_PTR(r30)
        {
         addi   r30, r30, 4
-        movei  r31, INT_MASK(INT_INTCTRL_1)
+        movei  r31, INT_MASK(INT_INTCTRL_K)
        }
        {
         lw     r20, r30
@@ -1227,7 +1227,7 @@ handle_interrupt_downcall:
        }
        FEEDBACK_REENTER(handle_interrupt_downcall)
 
-       /* Allow INTCTRL_1 to be enabled next time we enable interrupts. */
+       /* Allow INTCTRL_K to be enabled next time we enable interrupts. */
        lw      r20, r30
        or      r20, r20, r31
        sw      r30, r20
@@ -1472,7 +1472,12 @@ handle_ill:
        lw      r26, r24
        sw      r28, r26
 
-       /* Clear TIF_SINGLESTEP */
+       /*
+        * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill.
+        * The normal non-arch flow redundantly clears TIF_SINGLESTEP, but we
+        * need to clear it here and can't really impose on all other arches.
+        * So what's another write between friends?
+        */
        GET_THREAD_INFO(r0)
 
        addi    r1, r0, THREAD_INFO_FLAGS_OFFSET
@@ -1509,7 +1514,7 @@ handle_ill:
 /* Various stub interrupt handlers and syscall handlers */
 
 STD_ENTRY_LOCAL(_kernel_double_fault)
-       mfspr   r1, EX_CONTEXT_1_0
+       mfspr   r1, SPR_EX_CONTEXT_K_0
        move    r2, lr
        move    r3, sp
        move    r4, r52
@@ -1518,34 +1523,29 @@ STD_ENTRY_LOCAL(_kernel_double_fault)
        STD_ENDPROC(_kernel_double_fault)
 
 STD_ENTRY_LOCAL(bad_intr)
-       mfspr   r2, EX_CONTEXT_1_0
+       mfspr   r2, SPR_EX_CONTEXT_K_0
        panic   "Unhandled interrupt %#x: PC %#lx"
        STD_ENDPROC(bad_intr)
 
 /* Put address of pt_regs in reg and jump. */
 #define PTREGS_SYSCALL(x, reg)                          \
-       STD_ENTRY(x);                                   \
+       STD_ENTRY(_##x);                                \
        {                                               \
         PTREGS_PTR(reg, PTREGS_OFFSET_BASE);           \
-        j      _##x                                    \
+        j      x                                       \
        };                                              \
-       STD_ENDPROC(x)
+       STD_ENDPROC(_##x)
 
 PTREGS_SYSCALL(sys_execve, r3)
 PTREGS_SYSCALL(sys_sigaltstack, r2)
 PTREGS_SYSCALL(sys_rt_sigreturn, r0)
+PTREGS_SYSCALL(sys_cmpxchg_badaddr, r1)
 
-/* Save additional callee-saves to pt_regs, put address in reg and jump. */
-#define PTREGS_SYSCALL_ALL_REGS(x, reg)                 \
-       STD_ENTRY(x);                                   \
-       push_extra_callee_saves reg;                    \
-       j       _##x;                                   \
-       STD_ENDPROC(x)
-
-PTREGS_SYSCALL_ALL_REGS(sys_fork, r0)
-PTREGS_SYSCALL_ALL_REGS(sys_vfork, r0)
-PTREGS_SYSCALL_ALL_REGS(sys_clone, r4)
-PTREGS_SYSCALL_ALL_REGS(sys_cmpxchg_badaddr, r1)
+/* Save additional callee-saves to pt_regs, put address in r4 and jump. */
+STD_ENTRY(_sys_clone)
+       push_extra_callee_saves r4
+       j       sys_clone
+       STD_ENDPROC(_sys_clone)
 
 /*
  * This entrypoint is taken for the cmpxchg and atomic_update fast
@@ -1558,12 +1558,14 @@ PTREGS_SYSCALL_ALL_REGS(sys_cmpxchg_badaddr, r1)
  * to be available to it on entry.  It does not modify any callee-save
  * registers (including "lr").  It does not check what PL it is being
  * called at, so you'd better not call it other than at PL0.
+ * The <atomic.h> wrapper assumes it only clobbers r20-r29, so if
+ * it ever is necessary to use more registers, be aware.
  *
  * It does not use the stack, but since it might be re-interrupted by
  * a page fault which would assume the stack was valid, it does
  * save/restore the stack pointer and zero it out to make sure it gets reset.
  * Since we always keep interrupts disabled, the hypervisor won't
- * clobber our EX_CONTEXT_1_x registers, so we don't save/restore them
+ * clobber our EX_CONTEXT_K_x registers, so we don't save/restore them
  * (other than to advance the PC on return).
  *
  * We have to manually validate the user vs kernel address range
@@ -1769,7 +1771,7 @@ ENTRY(sys_cmpxchg)
        /* Do slow mtspr here so the following "mf" waits less. */
        {
         move   sp, r27
-        mtspr  EX_CONTEXT_1_0, r28
+        mtspr  SPR_EX_CONTEXT_K_0, r28
        }
        mf
 
@@ -1788,7 +1790,7 @@ ENTRY(sys_cmpxchg)
        }
        {
         move   sp, r27
-        mtspr  EX_CONTEXT_1_0, r28
+        mtspr  SPR_EX_CONTEXT_K_0, r28
        }
        iret
 
@@ -1816,7 +1818,7 @@ ENTRY(sys_cmpxchg)
 #endif
 
        /* Issue the slow SPR here while the tns result is in flight. */
-       mfspr   r28, EX_CONTEXT_1_0
+       mfspr   r28, SPR_EX_CONTEXT_K_0
 
        {
         addi   r28, r28, 8    /* return to the instruction after the swint1 */
@@ -1904,7 +1906,7 @@ ENTRY(sys_cmpxchg)
 .Lcmpxchg64_mismatch:
        {
         move   sp, r27
-        mtspr  EX_CONTEXT_1_0, r28
+        mtspr  SPR_EX_CONTEXT_K_0, r28
        }
        mf
        {
@@ -1985,8 +1987,13 @@ int_unalign:
        int_hand     INT_PERF_COUNT, PERF_COUNT, \
                     op_handle_perf_interrupt, handle_nmi
        int_hand     INT_INTCTRL_3, INTCTRL_3, bad_intr
+#if CONFIG_KERNEL_PL == 2
+       dc_dispatch  INT_INTCTRL_2, INTCTRL_2
+       int_hand     INT_INTCTRL_1, INTCTRL_1, bad_intr
+#else
        int_hand     INT_INTCTRL_2, INTCTRL_2, bad_intr
        dc_dispatch  INT_INTCTRL_1, INTCTRL_1
+#endif
        int_hand     INT_INTCTRL_0, INTCTRL_0, bad_intr
        int_hand     INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
                     hv_message_intr, handle_interrupt_downcall
index 9a27d563fc30968ce3efb46ac11ce241e10d8cc0..e63917687e998907824a62b1783ffef599418818 100644 (file)
@@ -61,9 +61,9 @@ static DEFINE_SPINLOCK(available_irqs_lock);
 
 #if CHIP_HAS_IPI()
 /* Use SPRs to manipulate device interrupts. */
-#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_1, irq_mask)
-#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_1, irq_mask)
-#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_1, irq_mask)
+#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask)
+#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask)
+#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask)
 #else
 /* Use HV to manipulate device interrupts. */
 #define mask_irqs(irq_mask) hv_disable_intr(irq_mask)
@@ -89,16 +89,16 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
         * masked by a previous interrupt.  Then, mask out the ones
         * we're going to handle.
         */
-       unsigned long masked = __insn_mfspr(SPR_IPI_MASK_1);
-       original_irqs = __insn_mfspr(SPR_IPI_EVENT_1) & ~masked;
-       __insn_mtspr(SPR_IPI_MASK_SET_1, original_irqs);
+       unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K);
+       original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked;
+       __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs);
 #else
        /*
         * Hypervisor performs the equivalent of the Gx code above and
         * then puts the pending interrupt mask into a system save reg
         * for us to find.
         */
-       original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_1_3);
+       original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3);
 #endif
        remaining_irqs = original_irqs;
 
@@ -225,7 +225,7 @@ void __cpuinit setup_irq_regs(void)
        /* Enable interrupt delivery. */
        unmask_irqs(~0UL);
 #if CHIP_HAS_IPI()
-       raw_local_irq_unmask(INT_IPI_1);
+       raw_local_irq_unmask(INT_IPI_K);
 #endif
 }
 
index 6d23ed271d10f0b87280e26660451e320f6a6ff4..997e3933f72679718583dae5a82c72c4d2f9fbdb 100644 (file)
@@ -34,7 +34,7 @@ void __cpuinit init_messaging(void)
                panic("hv_register_message_state: error %d", rc);
 
        /* Make sure downcall interrupts will be enabled. */
-       raw_local_irq_unmask(INT_INTCTRL_1);
+       raw_local_irq_unmask(INT_INTCTRL_K);
 }
 
 void hv_message_intr(struct pt_regs *regs, int intnum)
index 84c29111756c2212f02cd5bc841691c1cc0a0fd9..8430f45daea6bce566cc0980db85f0d0b213af72 100644 (file)
@@ -214,9 +214,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
        /*
         * Copy the callee-saved registers from the passed pt_regs struct
         * into the context-switch callee-saved registers area.
-        * We have to restore the callee-saved registers since we may
-        * be cloning a userspace task with userspace register state,
-        * and we won't be unwinding the same kernel frames to restore them.
+        * This way when we start the interrupt-return sequence, the
+        * callee-save registers will be correctly in registers, which
+        * is how we assume the compiler leaves them as we start doing
+        * the normal return-from-interrupt path after calling C code.
         * Zero out the C ABI save area to mark the top of the stack.
         */
        ksp = (unsigned long) childregs;
@@ -304,15 +305,25 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
 /* Allow user processes to access the DMA SPRs */
 void grant_dma_mpls(void)
 {
+#if CONFIG_KERNEL_PL == 2
+       __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
+       __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
+#else
        __insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
        __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
+#endif
 }
 
 /* Forbid user processes from accessing the DMA SPRs */
 void restrict_dma_mpls(void)
 {
+#if CONFIG_KERNEL_PL == 2
+       __insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1);
+       __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1);
+#else
        __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
        __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
+#endif
 }
 
 /* Pause the DMA engine, then save off its state registers. */
@@ -523,19 +534,14 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
         * Switch kernel SP, PC, and callee-saved registers.
         * In the context of the new task, return the old task pointer
         * (i.e. the task that actually called __switch_to).
-        * Pass the value to use for SYSTEM_SAVE_1_0 when we reset our sp.
+        * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
         */
        return __switch_to(prev, next, next_current_ksp0(next));
 }
 
-long _sys_fork(struct pt_regs *regs)
-{
-       return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
-}
-
-long _sys_clone(unsigned long clone_flags, unsigned long newsp,
-               void __user *parent_tidptr, void __user *child_tidptr,
-               struct pt_regs *regs)
+SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
+               void __user *, parent_tidptr, void __user *, child_tidptr,
+               struct pt_regs *, regs)
 {
        if (!newsp)
                newsp = regs->sp;
@@ -543,18 +549,13 @@ long _sys_clone(unsigned long clone_flags, unsigned long newsp,
                       parent_tidptr, child_tidptr);
 }
 
-long _sys_vfork(struct pt_regs *regs)
-{
-       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp,
-                      regs, 0, NULL, NULL);
-}
-
 /*
  * sys_execve() executes a new program.
  */
-long _sys_execve(const char __user *path,
-                const char __user *const __user *argv,
-                const char __user *const __user *envp, struct pt_regs *regs)
+SYSCALL_DEFINE4(execve, const char __user *, path,
+               const char __user *const __user *, argv,
+               const char __user *const __user *, envp,
+               struct pt_regs *, regs)
 {
        long error;
        char *filename;
@@ -570,9 +571,10 @@ out:
 }
 
 #ifdef CONFIG_COMPAT
-long _compat_sys_execve(const char __user *path,
-                       const compat_uptr_t __user *argv,
-                       const compat_uptr_t __user *envp, struct pt_regs *regs)
+long compat_sys_execve(const char __user *path,
+                      const compat_uptr_t __user *argv,
+                      const compat_uptr_t __user *envp,
+                      struct pt_regs *regs)
 {
        long error;
        char *filename;
index 7161bd03d2fdbe8d057587f20c4adf3ffffcb61a..5b20c2874d51e84da57c151f1244a42e386952bd 100644 (file)
@@ -31,25 +31,6 @@ void user_disable_single_step(struct task_struct *child)
        clear_tsk_thread_flag(child, TIF_SINGLESTEP);
 }
 
-/*
- * This routine will put a word on the process's privileged stack.
- */
-static void putreg(struct task_struct *task,
-                  unsigned long addr, unsigned long value)
-{
-       unsigned int regno = addr / sizeof(unsigned long);
-       struct pt_regs *childregs = task_pt_regs(task);
-       childregs->regs[regno] = value;
-       childregs->flags |= PT_FLAGS_RESTORE_REGS;
-}
-
-static unsigned long getreg(struct task_struct *task, unsigned long addr)
-{
-       unsigned int regno = addr / sizeof(unsigned long);
-       struct pt_regs *childregs = task_pt_regs(task);
-       return childregs->regs[regno];
-}
-
 /*
  * Called by kernel/ptrace.c when detaching..
  */
@@ -66,59 +47,72 @@ void ptrace_disable(struct task_struct *child)
 
 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 {
-       unsigned long __user *datap;
+       unsigned long __user *datap = (long __user __force *)data;
        unsigned long tmp;
        int i;
        long ret = -EIO;
-
-#ifdef CONFIG_COMPAT
-       if (task_thread_info(current)->status & TS_COMPAT)
-               data = (u32)data;
-       if (task_thread_info(child)->status & TS_COMPAT)
-               addr = (u32)addr;
-#endif
-       datap = (unsigned long __user __force *)data;
+       unsigned long *childregs;
+       char *childreg;
 
        switch (request) {
 
        case PTRACE_PEEKUSR:  /* Read register from pt_regs. */
-               if (addr & (sizeof(data)-1))
-                       break;
                if (addr < 0 || addr >= PTREGS_SIZE)
                        break;
-               tmp = getreg(child, addr);   /* Read register */
-               ret = put_user(tmp, datap);
+               childreg = (char *)task_pt_regs(child) + addr;
+#ifdef CONFIG_COMPAT
+               if (is_compat_task()) {
+                       if (addr & (sizeof(compat_long_t)-1))
+                               break;
+                       ret = put_user(*(compat_long_t *)childreg,
+                                      (compat_long_t __user *)datap);
+               } else
+#endif
+               {
+                       if (addr & (sizeof(long)-1))
+                               break;
+                       ret = put_user(*(long *)childreg, datap);
+               }
                break;
 
        case PTRACE_POKEUSR:  /* Write register in pt_regs. */
-               if (addr & (sizeof(data)-1))
-                       break;
                if (addr < 0 || addr >= PTREGS_SIZE)
                        break;
-               putreg(child, addr, data);   /* Write register */
+               childreg = (char *)task_pt_regs(child) + addr;
+#ifdef CONFIG_COMPAT
+               if (is_compat_task()) {
+                       if (addr & (sizeof(compat_long_t)-1))
+                               break;
+                       *(compat_long_t *)childreg = data;
+               } else
+#endif
+               {
+                       if (addr & (sizeof(long)-1))
+                               break;
+                       *(long *)childreg = data;
+               }
                ret = 0;
                break;
 
        case PTRACE_GETREGS:  /* Get all registers from the child. */
                if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE))
                        break;
-               for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) {
-                       ret = __put_user(getreg(child, i), datap);
+               childregs = (long *)task_pt_regs(child);
+               for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) {
+                       ret = __put_user(childregs[i], &datap[i]);
                        if (ret != 0)
                                break;
-                       datap++;
                }
                break;
 
        case PTRACE_SETREGS:  /* Set all registers in the child. */
                if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE))
                        break;
-               for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) {
-                       ret = __get_user(tmp, datap);
+               childregs = (long *)task_pt_regs(child);
+               for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) {
+                       ret = __get_user(childregs[i], &datap[i]);
                        if (ret != 0)
                                break;
-                       putreg(child, i, tmp);
-                       datap++;
                }
                break;
 
index e88d6e122783d94f46aadaf8618d99383486ec58..caa13101c26474f352c4e60a00666d1ccefcb6de 100644 (file)
@@ -85,7 +85,7 @@ STD_ENTRY_SECTION(__switch_to, .sched.text)
        {
          /* Update sp and ksp0 simultaneously to avoid backtracer warnings. */
          move sp, r13
-         mtspr SYSTEM_SAVE_1_0, r2
+         mtspr SPR_SYSTEM_SAVE_K_0, r2
        }
        FOR_EACH_CALLEE_SAVED_REG(LOAD_REG)
 .L__switch_to_pc:
index e7d54c73d5c125e3ccc220b8552e4c012a8aeae3..f3a50e74f9a4a0768058d81564c9da9c625938a5 100644 (file)
@@ -187,11 +187,11 @@ early_param("vmalloc", parse_vmalloc);
 
 #ifdef CONFIG_HIGHMEM
 /*
- * Determine for each controller where its lowmem is mapped and how
- * much of it is mapped there.  On controller zero, the first few
- * megabytes are mapped at 0xfd000000 as code, so in principle we
- * could start our data mappings higher up, but for now we don't
- * bother, to avoid additional confusion.
+ * Determine for each controller where its lowmem is mapped and how much of
+ * it is mapped there.  On controller zero, the first few megabytes are
+ * already mapped in as code at MEM_SV_INTRPT, so in principle we could
+ * start our data mappings higher up, but for now we don't bother, to avoid
+ * additional confusion.
  *
  * One question is whether, on systems with more than 768 Mb and
  * controllers of different sizes, to map in a proportionate amount of
@@ -311,7 +311,7 @@ static void __init setup_memory(void)
 #endif
 
        /* We are using a char to hold the cpu_2_node[] mapping */
-       BUG_ON(MAX_NUMNODES > 127);
+       BUILD_BUG_ON(MAX_NUMNODES > 127);
 
        /* Discover the ranges of memory available to us */
        for (i = 0; ; ++i) {
@@ -876,6 +876,9 @@ void __cpuinit setup_cpu(int boot)
 #if CHIP_HAS_SN_PROC()
        raw_local_irq_unmask(INT_SNITLB_MISS);
 #endif
+#ifdef __tilegx__
+       raw_local_irq_unmask(INT_SINGLE_STEP_K);
+#endif
 
        /*
         * Allow user access to many generic SPRs, like the cycle
@@ -893,11 +896,12 @@ void __cpuinit setup_cpu(int boot)
 #endif
 
        /*
-        * Set the MPL for interrupt control 0 to user level.
-        * This includes access to the SYSTEM_SAVE and EX_CONTEXT SPRs,
-        * as well as the PL 0 interrupt mask.
+        * Set the MPL for interrupt control 0 & 1 to the corresponding
+        * values.  This includes access to the SYSTEM_SAVE and EX_CONTEXT
+        * SPRs, as well as the interrupt mask.
         */
        __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
+       __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
 
        /* Initialize IRQ support for this cpu. */
        setup_irq_regs();
@@ -1033,7 +1037,7 @@ static void __init validate_va(void)
         * In addition, make sure we CAN'T use the end of memory, since
         * we use the last chunk of each pgd for the pgd_list.
         */
-       int i, fc_fd_ok = 0;
+       int i, user_kernel_ok = 0;
        unsigned long max_va = 0;
        unsigned long list_va =
                ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT);
@@ -1044,13 +1048,13 @@ static void __init validate_va(void)
                        break;
                if (range.start <= MEM_USER_INTRPT &&
                    range.start + range.size >= MEM_HV_INTRPT)
-                       fc_fd_ok = 1;
+                       user_kernel_ok = 1;
                if (range.start == 0)
                        max_va = range.size;
                BUG_ON(range.start + range.size > list_va);
        }
-       if (!fc_fd_ok)
-               early_panic("Hypervisor not configured for VAs 0xfc/0xfd\n");
+       if (!user_kernel_ok)
+               early_panic("Hypervisor not configured for user/kernel VAs\n");
        if (max_va == 0)
                early_panic("Hypervisor not configured for low VAs\n");
        if (max_va < KERNEL_HIGH_VADDR)
@@ -1334,6 +1338,10 @@ static void __init pcpu_fc_populate_pte(unsigned long addr)
        pte_t *pte;
 
        BUG_ON(pgd_addr_invalid(addr));
+       if (addr < VMALLOC_START || addr >= VMALLOC_END)
+               panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;"
+                     " try increasing CONFIG_VMALLOC_RESERVE\n",
+                     addr, VMALLOC_START, VMALLOC_END);
 
        pgd = swapper_pg_dir + pgd_index(addr);
        pud = pud_offset(pgd, addr);
index ce183aa1492c7abfaadfca95d54ec76bddc11455..fb28e85ae3aea3698c7d2cd5da564bb0a5f7fb0d 100644 (file)
@@ -41,8 +41,8 @@
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 
 
-long _sys_sigaltstack(const stack_t __user *uss,
-                     stack_t __user *uoss, struct pt_regs *regs)
+SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss,
+               stack_t __user *, uoss, struct pt_regs *, regs)
 {
        return do_sigaltstack(uss, uoss, regs->sp);
 }
@@ -78,7 +78,7 @@ int restore_sigcontext(struct pt_regs *regs,
 }
 
 /* sigreturn() returns long since it restores r0 in the interrupted code. */
-long _sys_rt_sigreturn(struct pt_regs *regs)
+SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
 {
        struct rt_sigframe __user *frame =
                (struct rt_sigframe __user *)(regs->sp);
index 5ec4b9c651f26a72e6ef39616827b6f29d2b0d4d..1eb3b39e36c70e01ad65e325496081157524fc40 100644 (file)
@@ -15,7 +15,7 @@
  * Derived from iLib's single-stepping code.
  */
 
-#ifndef __tilegx__   /* No support for single-step yet. */
+#ifndef __tilegx__   /* Hardware support for single step unavailable. */
 
 /* These functions are only used on the TILE platform */
 #include <linux/slab.h>
@@ -660,4 +660,75 @@ void single_step_once(struct pt_regs *regs)
                regs->pc += 8;
 }
 
+#else
+#include <linux/smp.h>
+#include <linux/ptrace.h>
+#include <arch/spr_def.h>
+
+static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
+
+
+/*
+ * Called directly on the occasion of an interrupt.
+ *
+ * If the process doesn't have single step set, then we use this as an
+ * opportunity to turn single step off.
+ *
+ * It has been mentioned that we could conditionally turn off single stepping
+ * on each entry into the kernel and rely on single_step_once to turn it
+ * on for the processes that matter (as we already do), but this
+ * implementation is somewhat more efficient in that we muck with registers
+ * once on a bum interrupt rather than on every entry into the kernel.
+ *
+ * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
+ * so we have to run through this process again before we can say that an
+ * instruction has executed.
+ *
+ * swint will set CANCELED, but it's a legitimate instruction.  Fortunately
+ * it changes the PC.  If it hasn't changed, then we know that the interrupt
+ * wasn't generated by swint and we'll need to run this process again before
+ * we can say an instruction has executed.
+ *
+ * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
+ * on with our lives.
+ */
+
+void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
+{
+       unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
+       struct thread_info *info = (void *)current_thread_info();
+       int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
+       unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
+
+       if (is_single_step == 0) {
+               __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
+
+       } else if ((*ss_pc != regs->pc) ||
+                  (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
+
+               ptrace_notify(SIGTRAP);
+               control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
+               control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
+               __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
+       }
+}
+
+
+/*
+ * Called from need_singlestep.  Set up the control registers and the enable
+ * register, then return back.
+ */
+
+void single_step_once(struct pt_regs *regs)
+{
+       unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
+       unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
+
+       *ss_pc = regs->pc;
+       control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
+       control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
+       __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
+       __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
+}
+
 #endif /* !__tilegx__ */
index 1cb5ec79de043ad4cdf9ebf244e5d9356c767bf1..75255d90aff309669b110dff32f2254ede9daa27 100644 (file)
@@ -212,7 +212,7 @@ void __init ipi_init(void)
 
                tile.x = cpu_x(cpu);
                tile.y = cpu_y(cpu);
-               if (hv_get_ipi_pte(tile, 1, &pte) != 0)
+               if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
                        panic("Failed to initialize IPI for cpu %d\n", cpu);
 
                offset = hv_pte_get_pfn(pte) << PAGE_SHIFT;
index ea2e0ce28380a2d3fa59391cfc02cb4ca0022f1e..0d54106be3d66b33f089cc499c0b33d82a3635b6 100644 (file)
 #include <arch/abi.h>
 #include <arch/interrupts.h>
 
+#define KBT_ONGOING    0  /* Backtrace still ongoing */
+#define KBT_DONE       1  /* Backtrace cleanly completed */
+#define KBT_RUNNING    2  /* Can't run backtrace on a running task */
+#define KBT_LOOP       3  /* Backtrace entered a loop */
 
 /* Is address on the specified kernel stack? */
 static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp)
@@ -207,11 +211,11 @@ static int KBacktraceIterator_next_item_inclusive(
        for (;;) {
                do {
                        if (!KBacktraceIterator_is_sigreturn(kbt))
-                               return 1;
+                               return KBT_ONGOING;
                } while (backtrace_next(&kbt->it));
 
                if (!KBacktraceIterator_restart(kbt))
-                       return 0;
+                       return KBT_DONE;
        }
 }
 
@@ -264,7 +268,7 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
        kbt->pgtable = NULL;
        kbt->verbose = 0;   /* override in caller if desired */
        kbt->profile = 0;   /* override in caller if desired */
-       kbt->end = 0;
+       kbt->end = KBT_ONGOING;
        kbt->new_context = 0;
        if (is_current) {
                HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
@@ -290,7 +294,7 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
        if (regs == NULL) {
                if (is_current || t->state == TASK_RUNNING) {
                        /* Can't do this; we need registers */
-                       kbt->end = 1;
+                       kbt->end = KBT_RUNNING;
                        return;
                }
                pc = get_switch_to_pc();
@@ -305,26 +309,29 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
        }
 
        backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
-       kbt->end = !KBacktraceIterator_next_item_inclusive(kbt);
+       kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
 }
 EXPORT_SYMBOL(KBacktraceIterator_init);
 
 int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
 {
-       return kbt->end;
+       return kbt->end != KBT_ONGOING;
 }
 EXPORT_SYMBOL(KBacktraceIterator_end);
 
 void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
 {
+       VirtualAddress old_pc = kbt->it.pc, old_sp = kbt->it.sp;
        kbt->new_context = 0;
-       if (!backtrace_next(&kbt->it) &&
-           !KBacktraceIterator_restart(kbt)) {
-                       kbt->end = 1;
-                       return;
-               }
-
-       kbt->end = !KBacktraceIterator_next_item_inclusive(kbt);
+       if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
+               kbt->end = KBT_DONE;
+               return;
+       }
+       kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
+       if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
+               /* Trapped in a loop; give up. */
+               kbt->end = KBT_LOOP;
+       }
 }
 EXPORT_SYMBOL(KBacktraceIterator_next);
 
@@ -387,6 +394,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
                        break;
                }
        }
+       if (kbt->end == KBT_LOOP)
+               pr_err("Stack dump stopped; next frame identical to this one\n");
        if (headers)
                pr_err("Stack dump complete\n");
 }
index f0f87eab8c3982f087e6db3a71327ba283659f5e..7e764669a0222755db93373089539e18ade539d8 100644 (file)
@@ -110,6 +110,15 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
 #define sys_sync_file_range sys_sync_file_range2
 #endif
 
+/* Call the trampolines to manage pt_regs where necessary. */
+#define sys_execve _sys_execve
+#define sys_sigaltstack _sys_sigaltstack
+#define sys_rt_sigreturn _sys_rt_sigreturn
+#define sys_clone _sys_clone
+#ifndef __tilegx__
+#define sys_cmpxchg_badaddr _sys_cmpxchg_badaddr
+#endif
+
 /*
  * Note that we can't include <linux/unistd.h> here since the header
  * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
index 0f362dc2c57f6f68d5c32e90b65c32f5b28d8f7d..5474fc2e77e8456b3d6e745b8defc3129e08eb85 100644 (file)
@@ -260,7 +260,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
                address = regs->pc;
                break;
        case INT_UNALIGN_DATA:
-#ifndef __tilegx__  /* FIXME: GX: no single-step yet */
+#ifndef __tilegx__  /* Emulated support for single step debugging */
                if (unaligned_fixup >= 0) {
                        struct single_step_state *state =
                                current_thread_info()->step_state;
@@ -278,7 +278,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
        case INT_DOUBLE_FAULT:
                /*
                 * For double fault, "reason" is actually passed as
-                * SYSTEM_SAVE_1_2, the hypervisor's double-fault info, so
+                * SYSTEM_SAVE_K_2, the hypervisor's double-fault info, so
                 * we can provide the original fault number rather than
                 * the uninteresting "INT_DOUBLE_FAULT" so the user can
                 * learn what actually struck while PL0 ICS was set.
diff --git a/arch/tile/kvm/Kconfig b/arch/tile/kvm/Kconfig
new file mode 100644 (file)
index 0000000..b88f9c0
--- /dev/null
@@ -0,0 +1,38 @@
+#
+# KVM configuration
+#
+
+source "virt/kvm/Kconfig"
+
+menuconfig VIRTUALIZATION
+       bool "Virtualization"
+       ---help---
+         Say Y here to get to see options for using your Linux host to run
+         other operating systems inside virtual machines (guests).
+         This option alone does not add any kernel code.
+
+         If you say N, all options in this submenu will be skipped and
+         disabled.
+
+if VIRTUALIZATION
+
+config KVM
+       tristate "Kernel-based Virtual Machine (KVM) support"
+       depends on HAVE_KVM && MODULES && EXPERIMENTAL
+       select PREEMPT_NOTIFIERS
+       select ANON_INODES
+       ---help---
+         Support hosting paravirtualized guest machines.
+
+         This module provides access to the hardware capabilities through
+         a character device node named /dev/kvm.
+
+         To compile this as a module, choose M here: the module
+         will be called kvm.
+
+         If unsure, say N.
+
+source drivers/vhost/Kconfig
+source drivers/virtio/Kconfig
+
+endif # VIRTUALIZATION
index 746dc81ed3c4fa3110386852c34b5a8597c082dd..93122d5b1558dad80205f1b09fdfbd19dcfc8a97 100644 (file)
@@ -3,8 +3,8 @@
 #
 
 lib-y = cacheflush.o checksum.o cpumask.o delay.o \
-       mb_incoherent.o uaccess.o \
-       memcpy_$(BITS).o memchr_$(BITS).o memmove_$(BITS).o memset_$(BITS).o \
+       mb_incoherent.o uaccess.o memmove.o \
+       memcpy_$(BITS).o memchr_$(BITS).o memset_$(BITS).o \
        strchr_$(BITS).o strlen_$(BITS).o
 
 ifeq ($(CONFIG_TILEGX),y)
index 8040b42a8eea4b4172271b344aa978b7cc587d55..7a5cc706ab62cf19dbbcae51fdb5b2a4ea04680a 100644 (file)
@@ -300,7 +300,7 @@ void __init __init_atomic_per_cpu(void)
 #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
 
        /* Validate power-of-two and "bigger than cpus" assumption */
-       BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
+       BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
        BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
 
        /*
@@ -314,17 +314,17 @@ void __init __init_atomic_per_cpu(void)
        BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
 
        /* The locks must all fit on one page. */
-       BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
+       BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
 
        /*
         * We use the page offset of the atomic value's address as
         * an index into atomic_locks, excluding the low 3 bits.
         * That should not produce more indices than ATOMIC_HASH_SIZE.
         */
-       BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
+       BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
 
 #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
 
        /* The futex code makes this assumption, so we validate it here. */
-       BUG_ON(sizeof(atomic_t) != sizeof(int));
+       BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
 }
index ce5dbf56578f6a9ee303f484ea54bbdba06c40f2..1509c5597653c0c7e6731ed071477574aad09720 100644 (file)
@@ -45,6 +45,9 @@ EXPORT_SYMBOL(__copy_from_user_zeroing);
 EXPORT_SYMBOL(__copy_in_user_inatomic);
 #endif
 
+/* arch/tile/lib/mb_incoherent.S */
+EXPORT_SYMBOL(__mb_incoherent);
+
 /* hypervisor glue */
 #include <hv/hypervisor.h>
 EXPORT_SYMBOL(hv_dev_open);
index 30c3b7ebb55d54a7017657034e30f74c00c506e0..2a419a6122db78e4b45c532de3b5ef6d24cc1cad 100644 (file)
  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  *   NON INFRINGEMENT.  See the GNU General Public License for
  *   more details.
- *
- * This file shares the implementation of the userspace memcpy and
- * the kernel's memcpy, copy_to_user and copy_from_user.
  */
 
 #include <arch/chip.h>
 
 
+/*
+ * This file shares the implementation of the userspace memcpy and
+ * the kernel's memcpy, copy_to_user and copy_from_user.
+ */
+
 #include <linux/linkage.h>
 
 /* On TILE64, we wrap these functions via arch/tile/lib/memcpy_tile64.c */
@@ -53,9 +55,9 @@
  */
 ENTRY(__copy_from_user_inatomic)
 .type __copy_from_user_inatomic, @function
-        FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \
+       FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \
          .text.memcpy_common, \
-          .Lend_memcpy_common - __copy_from_user_inatomic)
+         .Lend_memcpy_common - __copy_from_user_inatomic)
        { movei r29, IS_COPY_FROM_USER; j memcpy_common }
        .size __copy_from_user_inatomic, . - __copy_from_user_inatomic
 
@@ -64,7 +66,7 @@ ENTRY(__copy_from_user_inatomic)
  */
 ENTRY(__copy_from_user_zeroing)
 .type __copy_from_user_zeroing, @function
-        FEEDBACK_REENTER(__copy_from_user_inatomic)
+       FEEDBACK_REENTER(__copy_from_user_inatomic)
        { movei r29, IS_COPY_FROM_USER_ZEROING; j memcpy_common }
        .size __copy_from_user_zeroing, . - __copy_from_user_zeroing
 
@@ -74,13 +76,13 @@ ENTRY(__copy_from_user_zeroing)
  */
 ENTRY(__copy_to_user_inatomic)
 .type __copy_to_user_inatomic, @function
-        FEEDBACK_REENTER(__copy_from_user_inatomic)
+       FEEDBACK_REENTER(__copy_from_user_inatomic)
        { movei r29, IS_COPY_TO_USER; j memcpy_common }
        .size __copy_to_user_inatomic, . - __copy_to_user_inatomic
 
 ENTRY(memcpy)
 .type memcpy, @function
-        FEEDBACK_REENTER(__copy_from_user_inatomic)
+       FEEDBACK_REENTER(__copy_from_user_inatomic)
        { movei r29, IS_MEMCPY }
        .size memcpy, . - memcpy
        /* Fall through */
@@ -157,35 +159,35 @@ EX:       { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 }
        { addi r3, r1, 60; andi r9, r9, -64 }
 
 #if CHIP_HAS_WH64()
-        /* No need to prefetch dst, we'll just do the wh64
-         * right before we copy a line.
+       /* No need to prefetch dst, we'll just do the wh64
+        * right before we copy a line.
         */
 #endif
 
 EX:    { lw r5, r3; addi r3, r3, 64; movei r4, 1 }
-        /* Intentionally stall for a few cycles to leave L2 cache alone. */
-        { bnzt zero, .; move r27, lr }
+       /* Intentionally stall for a few cycles to leave L2 cache alone. */
+       { bnzt zero, .; move r27, lr }
 EX:    { lw r6, r3; addi r3, r3, 64 }
-        /* Intentionally stall for a few cycles to leave L2 cache alone. */
-        { bnzt zero, . }
+       /* Intentionally stall for a few cycles to leave L2 cache alone. */
+       { bnzt zero, . }
 EX:    { lw r7, r3; addi r3, r3, 64 }
 #if !CHIP_HAS_WH64()
-        /* Prefetch the dest */
-        /* Intentionally stall for a few cycles to leave L2 cache alone. */
-        { bnzt zero, . }
-        /* Use a real load to cause a TLB miss if necessary.  We aren't using
-         * r28, so this should be fine.
-         */
+       /* Prefetch the dest */
+       /* Intentionally stall for a few cycles to leave L2 cache alone. */
+       { bnzt zero, . }
+       /* Use a real load to cause a TLB miss if necessary.  We aren't using
+        * r28, so this should be fine.
+        */
 EX:    { lw r28, r9; addi r9, r9, 64 }
-        /* Intentionally stall for a few cycles to leave L2 cache alone. */
-        { bnzt zero, . }
-        { prefetch r9; addi r9, r9, 64 }
-        /* Intentionally stall for a few cycles to leave L2 cache alone. */
-        { bnzt zero, . }
-        { prefetch r9; addi r9, r9, 64 }
+       /* Intentionally stall for a few cycles to leave L2 cache alone. */
+       { bnzt zero, . }
+       { prefetch r9; addi r9, r9, 64 }
+       /* Intentionally stall for a few cycles to leave L2 cache alone. */
+       { bnzt zero, . }
+       { prefetch r9; addi r9, r9, 64 }
 #endif
-        /* Intentionally stall for a few cycles to leave L2 cache alone. */
-        { bz zero, .Lbig_loop2 }
+       /* Intentionally stall for a few cycles to leave L2 cache alone. */
+       { bz zero, .Lbig_loop2 }
 
        /* On entry to this loop:
         * - r0 points to the start of dst line 0
@@ -197,7 +199,7 @@ EX: { lw r28, r9; addi r9, r9, 64 }
         *   to some "safe" recently loaded address.
         * - r5 contains *(r1 + 60)       [i.e. last word of source line 0]
         * - r6 contains *(r1 + 64 + 60)  [i.e. last word of source line 1]
-         * - r9 contains ((r0 + 63) & -64)
+        * - r9 contains ((r0 + 63) & -64)
         *     [start of next dst cache line.]
         */
 
@@ -208,137 +210,137 @@ EX:     { lw r28, r9; addi r9, r9, 64 }
        /* Copy line 0, first stalling until r5 is ready. */
 EX:    { move r12, r5; lw r16, r1 }
        { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
-        /* Prefetch several lines ahead. */
+       /* Prefetch several lines ahead. */
 EX:    { lw r5, r3; addi r3, r3, 64 }
-        { jal .Lcopy_line }
+       { jal .Lcopy_line }
 
        /* Copy line 1, first stalling until r6 is ready. */
 EX:    { move r12, r6; lw r16, r1 }
        { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
-        /* Prefetch several lines ahead. */
+       /* Prefetch several lines ahead. */
 EX:    { lw r6, r3; addi r3, r3, 64 }
        { jal .Lcopy_line }
 
        /* Copy line 2, first stalling until r7 is ready. */
 EX:    { move r12, r7; lw r16, r1 }
        { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
-        /* Prefetch several lines ahead. */
+       /* Prefetch several lines ahead. */
 EX:    { lw r7, r3; addi r3, r3, 64 }
-        /* Use up a caches-busy cycle by jumping back to the top of the
-         * loop. Might as well get it out of the way now.
-         */
-        { j .Lbig_loop }
+       /* Use up a caches-busy cycle by jumping back to the top of the
+        * loop. Might as well get it out of the way now.
+        */
+       { j .Lbig_loop }
 
 
        /* On entry:
         * - r0 points to the destination line.
         * - r1 points to the source line.
-         * - r3 is the next prefetch address.
+        * - r3 is the next prefetch address.
         * - r9 holds the last address used for wh64.
         * - r12 = WORD_15
-         * - r16 = WORD_0.
-         * - r17 == r1 + 16.
-         * - r27 holds saved lr to restore.
+        * - r16 = WORD_0.
+        * - r17 == r1 + 16.
+        * - r27 holds saved lr to restore.
         *
         * On exit:
         * - r0 is incremented by 64.
         * - r1 is incremented by 64, unless that would point to a word
-         *   beyond the end of the source array, in which case it is redirected
-         *   to point to an arbitrary word already in the cache.
+        *   beyond the end of the source array, in which case it is redirected
+        *   to point to an arbitrary word already in the cache.
         * - r2 is decremented by 64.
-         * - r3 is unchanged, unless it points to a word beyond the
-         *   end of the source array, in which case it is redirected
-         *   to point to an arbitrary word already in the cache.
-         *   Redirecting is OK since if we are that close to the end
-         *   of the array we will not come back to this subroutine
-         *   and use the contents of the prefetched address.
+        * - r3 is unchanged, unless it points to a word beyond the
+        *   end of the source array, in which case it is redirected
+        *   to point to an arbitrary word already in the cache.
+        *   Redirecting is OK since if we are that close to the end
+        *   of the array we will not come back to this subroutine
+        *   and use the contents of the prefetched address.
         * - r4 is nonzero iff r2 >= 64.
-         * - r9 is incremented by 64, unless it points beyond the
-         *   end of the last full destination cache line, in which
-         *   case it is redirected to a "safe address" that can be
-         *   clobbered (sp - 64)
+        * - r9 is incremented by 64, unless it points beyond the
+        *   end of the last full destination cache line, in which
+        *   case it is redirected to a "safe address" that can be
+        *   clobbered (sp - 64)
         * - lr contains the value in r27.
         */
 
 /* r26 unused */
 
 .Lcopy_line:
-        /* TODO: when r3 goes past the end, we would like to redirect it
-         * to prefetch the last partial cache line (if any) just once, for the
-         * benefit of the final cleanup loop. But we don't want to
-         * prefetch that line more than once, or subsequent prefetches
-         * will go into the RTF. But then .Lbig_loop should unconditionally
-         * branch to top of loop to execute final prefetch, and its
-         * nop should become a conditional branch.
-         */
-
-        /* We need two non-memory cycles here to cover the resources
-         * used by the loads initiated by the caller.
-         */
-        { add r15, r1, r2 }
+       /* TODO: when r3 goes past the end, we would like to redirect it
+        * to prefetch the last partial cache line (if any) just once, for the
+        * benefit of the final cleanup loop. But we don't want to
+        * prefetch that line more than once, or subsequent prefetches
+        * will go into the RTF. But then .Lbig_loop should unconditionally
+        * branch to top of loop to execute final prefetch, and its
+        * nop should become a conditional branch.
+        */
+
+       /* We need two non-memory cycles here to cover the resources
+        * used by the loads initiated by the caller.
+        */
+       { add r15, r1, r2 }
 .Lcopy_line2:
-        { slt_u r13, r3, r15; addi r17, r1, 16 }
+       { slt_u r13, r3, r15; addi r17, r1, 16 }
 
-        /* NOTE: this will stall for one cycle as L1 is busy. */
+       /* NOTE: this will stall for one cycle as L1 is busy. */
 
-        /* Fill second L1D line. */
+       /* Fill second L1D line. */
 EX:    { lw r17, r17; addi r1, r1, 48; mvz r3, r13, r1 } /* r17 = WORD_4 */
 
 #if CHIP_HAS_WH64()
-        /* Prepare destination line for writing. */
+       /* Prepare destination line for writing. */
 EX:    { wh64 r9; addi r9, r9, 64 }
 #else
-        /* Prefetch dest line */
+       /* Prefetch dest line */
        { prefetch r9; addi r9, r9, 64 }
 #endif
-        /* Load seven words that are L1D hits to cover wh64 L2 usage. */
+       /* Load seven words that are L1D hits to cover wh64 L2 usage. */
 
-        /* Load the three remaining words from the last L1D line, which
-         * we know has already filled the L1D.
-         */
+       /* Load the three remaining words from the last L1D line, which
+        * we know has already filled the L1D.
+        */
 EX:    { lw r4, r1;  addi r1, r1, 4;   addi r20, r1, 16 }   /* r4 = WORD_12 */
 EX:    { lw r8, r1;  addi r1, r1, 4;   slt_u r13, r20, r15 }/* r8 = WORD_13 */
 EX:    { lw r11, r1; addi r1, r1, -52; mvz r20, r13, r1 }  /* r11 = WORD_14 */
 
-        /* Load the three remaining words from the first L1D line, first
-         * stalling until it has filled by "looking at" r16.
-         */
+       /* Load the three remaining words from the first L1D line, first
+        * stalling until it has filled by "looking at" r16.
+        */
 EX:    { lw r13, r1; addi r1, r1, 4; move zero, r16 }   /* r13 = WORD_1 */
 EX:    { lw r14, r1; addi r1, r1, 4 }                   /* r14 = WORD_2 */
 EX:    { lw r15, r1; addi r1, r1, 8; addi r10, r0, 60 } /* r15 = WORD_3 */
 
-        /* Load second word from the second L1D line, first
-         * stalling until it has filled by "looking at" r17.
-         */
+       /* Load second word from the second L1D line, first
+        * stalling until it has filled by "looking at" r17.
+        */
 EX:    { lw r19, r1; addi r1, r1, 4; move zero, r17 }  /* r19 = WORD_5 */
 
-        /* Store last word to the destination line, potentially dirtying it
-         * for the first time, which keeps the L2 busy for two cycles.
-         */
+       /* Store last word to the destination line, potentially dirtying it
+        * for the first time, which keeps the L2 busy for two cycles.
+        */
 EX:    { sw r10, r12 }                                 /* store(WORD_15) */
 
-        /* Use two L1D hits to cover the sw L2 access above. */
+       /* Use two L1D hits to cover the sw L2 access above. */
 EX:    { lw r10, r1; addi r1, r1, 4 }                  /* r10 = WORD_6 */
 EX:    { lw r12, r1; addi r1, r1, 4 }                  /* r12 = WORD_7 */
 
-        /* Fill third L1D line. */
+       /* Fill third L1D line. */
 EX:    { lw r18, r1; addi r1, r1, 4 }                  /* r18 = WORD_8 */
 
-        /* Store first L1D line. */
+       /* Store first L1D line. */
 EX:    { sw r0, r16; addi r0, r0, 4; add r16, r0, r2 } /* store(WORD_0) */
 EX:    { sw r0, r13; addi r0, r0, 4; andi r16, r16, -64 } /* store(WORD_1) */
 EX:    { sw r0, r14; addi r0, r0, 4; slt_u r16, r9, r16 } /* store(WORD_2) */
 #if CHIP_HAS_WH64()
 EX:    { sw r0, r15; addi r0, r0, 4; addi r13, sp, -64 } /* store(WORD_3) */
 #else
-        /* Back up the r9 to a cache line we are already storing to
+       /* Back up the r9 to a cache line we are already storing to
         * if it gets past the end of the dest vector.  Strictly speaking,
         * we don't need to back up to the start of a cache line, but it's free
         * and tidy, so why not?
-         */
+        */
 EX:    { sw r0, r15; addi r0, r0, 4; andi r13, r0, -64 } /* store(WORD_3) */
 #endif
-        /* Store second L1D line. */
+       /* Store second L1D line. */
 EX:    { sw r0, r17; addi r0, r0, 4; mvz r9, r16, r13 }/* store(WORD_4) */
 EX:    { sw r0, r19; addi r0, r0, 4 }                  /* store(WORD_5) */
 EX:    { sw r0, r10; addi r0, r0, 4 }                  /* store(WORD_6) */
@@ -348,30 +350,30 @@ EX:       { lw r13, r1; addi r1, r1, 4; move zero, r18 }  /* r13 = WORD_9 */
 EX:    { lw r14, r1; addi r1, r1, 4 }                  /* r14 = WORD_10 */
 EX:    { lw r15, r1; move r1, r20   }                  /* r15 = WORD_11 */
 
-        /* Store third L1D line. */
+       /* Store third L1D line. */
 EX:    { sw r0, r18; addi r0, r0, 4 }                  /* store(WORD_8) */
 EX:    { sw r0, r13; addi r0, r0, 4 }                  /* store(WORD_9) */
 EX:    { sw r0, r14; addi r0, r0, 4 }                  /* store(WORD_10) */
 EX:    { sw r0, r15; addi r0, r0, 4 }                  /* store(WORD_11) */
 
-        /* Store rest of fourth L1D line. */
+       /* Store rest of fourth L1D line. */
 EX:    { sw r0, r4;  addi r0, r0, 4 }                  /* store(WORD_12) */
-        {
+       {
 EX:    sw r0, r8                                       /* store(WORD_13) */
-        addi r0, r0, 4
+       addi r0, r0, 4
        /* Will r2 be > 64 after we subtract 64 below? */
-        shri r4, r2, 7
-        }
-        {
+       shri r4, r2, 7
+       }
+       {
 EX:    sw r0, r11                                      /* store(WORD_14) */
-        addi r0, r0, 8
-        /* Record 64 bytes successfully copied. */
-        addi r2, r2, -64
-        }
+       addi r0, r0, 8
+       /* Record 64 bytes successfully copied. */
+       addi r2, r2, -64
+       }
 
        { jrp lr; move lr, r27 }
 
-        /* Convey to the backtrace library that the stack frame is size
+       /* Convey to the backtrace library that the stack frame is size
         * zero, and the real return address is on the stack rather than
         * in 'lr'.
         */
diff --git a/arch/tile/lib/memmove.c b/arch/tile/lib/memmove.c
new file mode 100644 (file)
index 0000000..fd615ae
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/module.h>
+
+void *memmove(void *dest, const void *src, size_t n)
+{
+       if ((const char *)src >= (char *)dest + n
+           || (char *)dest >= (const char *)src + n) {
+               /* We found no overlap, so let memcpy do all the heavy
+                * lifting (prefetching, etc.)
+                */
+               return memcpy(dest, src, n);
+       }
+
+       if (n != 0) {
+               const uint8_t *in;
+               uint8_t x;
+               uint8_t *out;
+               int stride;
+
+               if (src < dest) {
+                       /* copy backwards */
+                       in = (const uint8_t *)src + n - 1;
+                       out = (uint8_t *)dest + n - 1;
+                       stride = -1;
+               } else {
+                       /* copy forwards */
+                       in = (const uint8_t *)src;
+                       out = (uint8_t *)dest;
+                       stride = 1;
+               }
+
+               /* Manually software-pipeline this loop. */
+               x = *in;
+               in += stride;
+
+               while (--n != 0) {
+                       *out = x;
+                       out += stride;
+                       x = *in;
+                       in += stride;
+               }
+
+               *out = x;
+       }
+
+       return dest;
+}
+EXPORT_SYMBOL(memmove);
diff --git a/arch/tile/lib/memmove_32.c b/arch/tile/lib/memmove_32.c
deleted file mode 100644 (file)
index fd615ae..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- *   This program is free software; you can redistribute it and/or
- *   modify it under the terms of the GNU General Public License
- *   as published by the Free Software Foundation, version 2.
- *
- *   This program is distributed in the hope that it will be useful, but
- *   WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- *   NON INFRINGEMENT.  See the GNU General Public License for
- *   more details.
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/module.h>
-
-void *memmove(void *dest, const void *src, size_t n)
-{
-       if ((const char *)src >= (char *)dest + n
-           || (char *)dest >= (const char *)src + n) {
-               /* We found no overlap, so let memcpy do all the heavy
-                * lifting (prefetching, etc.)
-                */
-               return memcpy(dest, src, n);
-       }
-
-       if (n != 0) {
-               const uint8_t *in;
-               uint8_t x;
-               uint8_t *out;
-               int stride;
-
-               if (src < dest) {
-                       /* copy backwards */
-                       in = (const uint8_t *)src + n - 1;
-                       out = (uint8_t *)dest + n - 1;
-                       stride = -1;
-               } else {
-                       /* copy forwards */
-                       in = (const uint8_t *)src;
-                       out = (uint8_t *)dest;
-                       stride = 1;
-               }
-
-               /* Manually software-pipeline this loop. */
-               x = *in;
-               in += stride;
-
-               while (--n != 0) {
-                       *out = x;
-                       out += stride;
-                       x = *in;
-                       in += stride;
-               }
-
-               *out = x;
-       }
-
-       return dest;
-}
-EXPORT_SYMBOL(memmove);
index d014c1fbcbc28816af4925b72d121c1d201edf0a..57dbb3a5bff86a59e71054100c4ac1c885402a7b 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/string.h>
 #include <linux/module.h>
 
+#undef memset
 
 void *memset(void *s, int c, size_t n)
 {
index f26f88e11e4a8d9885ba10eec773b4181a5739a8..4974292a553498bc789dae5e4e938e95e311051c 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/string.h>
 #include <linux/module.h>
 
+#undef strlen
+
 size_t strlen(const char *s)
 {
        /* Get an aligned pointer. */
index 704f3e8a43850c5d32a0eda83bb33190b72d07b3..f295b4ac941de22b760651f5ac019361e00802d2 100644 (file)
@@ -66,10 +66,10 @@ static noinline void force_sig_info_fault(int si_signo, int si_code,
 #ifndef __tilegx__
 /*
  * Synthesize the fault a PL0 process would get by doing a word-load of
- * an unaligned address or a high kernel address.  Called indirectly
- * from sys_cmpxchg() in kernel/intvec.S.
+ * an unaligned address or a high kernel address.
  */
-int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *regs)
+SYSCALL_DEFINE2(cmpxchg_badaddr, unsigned long, address,
+               struct pt_regs *, regs)
 {
        if (address >= PAGE_OFFSET)
                force_sig_info_fault(SIGSEGV, SEGV_MAPERR, address,
@@ -563,10 +563,10 @@ do_sigbus:
 /*
  * When we take an ITLB or DTLB fault or access violation in the
  * supervisor while the critical section bit is set, the hypervisor is
- * reluctant to write new values into the EX_CONTEXT_1_x registers,
+ * reluctant to write new values into the EX_CONTEXT_K_x registers,
  * since that might indicate we have not yet squirreled the SPR
  * contents away and can thus safely take a recursive interrupt.
- * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_1_2.
+ * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_K_2.
  *
  * Note that this routine is called before homecache_tlb_defer_enter(),
  * which means that we can properly unlock any atomics that might
@@ -610,7 +610,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
         * fault.  We didn't set up a kernel stack on initial entry to
         * sys_cmpxchg, but instead had one set up by the fault, which
         * (because sys_cmpxchg never releases ICS) came to us via the
-        * SYSTEM_SAVE_1_2 mechanism, and thus EX_CONTEXT_1_[01] are
+        * SYSTEM_SAVE_K_2 mechanism, and thus EX_CONTEXT_K_[01] are
         * still referencing the original user code.  We release the
         * atomic lock and rewrite pt_regs so that it appears that we
         * came from user-space directly, and after we finish the
index 12ab137e7d4f837f36c95a7ebd3b82caa5a30869..8ef6595e162c1348ca65aa21e349bd2d47b81b42 100644 (file)
@@ -56,50 +56,6 @@ void kunmap(struct page *page)
 }
 EXPORT_SYMBOL(kunmap);
 
-static void debug_kmap_atomic_prot(enum km_type type)
-{
-#ifdef CONFIG_DEBUG_HIGHMEM
-       static unsigned warn_count = 10;
-
-       if (unlikely(warn_count == 0))
-               return;
-
-       if (unlikely(in_interrupt())) {
-               if (in_irq()) {
-                       if (type != KM_IRQ0 && type != KM_IRQ1 &&
-                           type != KM_BIO_SRC_IRQ &&
-                           /* type != KM_BIO_DST_IRQ && */
-                           type != KM_BOUNCE_READ) {
-                               WARN_ON(1);
-                               warn_count--;
-                       }
-               } else if (!irqs_disabled()) {  /* softirq */
-                       if (type != KM_IRQ0 && type != KM_IRQ1 &&
-                           type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
-                           type != KM_SKB_SUNRPC_DATA &&
-                           type != KM_SKB_DATA_SOFTIRQ &&
-                           type != KM_BOUNCE_READ) {
-                               WARN_ON(1);
-                               warn_count--;
-                       }
-               }
-       }
-
-       if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
-           type == KM_BIO_SRC_IRQ /* || type == KM_BIO_DST_IRQ */) {
-               if (!irqs_disabled()) {
-                       WARN_ON(1);
-                       warn_count--;
-               }
-       } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
-               if (irq_count() == 0 && !irqs_disabled()) {
-                       WARN_ON(1);
-                       warn_count--;
-               }
-       }
-#endif
-}
-
 /*
  * Describe a single atomic mapping of a page on a given cpu at a
  * given address, and allow it to be linked into a list.
@@ -240,10 +196,10 @@ void kmap_atomic_fix_kpte(struct page *page, int finished)
  * When holding an atomic kmap is is not legal to sleep, so atomic
  * kmaps are appropriate for short, tight code paths only.
  */
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
-       enum fixed_addresses idx;
        unsigned long vaddr;
+       int idx, type;
        pte_t *pte;
 
        /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
@@ -255,8 +211,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
        if (!PageHighMem(page))
                return page_address(page);
 
-       debug_kmap_atomic_prot(type);
-
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR*smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        pte = kmap_get_pte(vaddr);
@@ -269,25 +224,31 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
 }
 EXPORT_SYMBOL(kmap_atomic_prot);
 
-void *kmap_atomic(struct page *page, enum km_type type)
+void *__kmap_atomic(struct page *page)
 {
        /* PAGE_NONE is a magic value that tells us to check immutability. */
        return kmap_atomic_prot(page, type, PAGE_NONE);
 }
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(__kmap_atomic);
 
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
 {
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-       enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
 
-       /*
-        * Force other mappings to Oops if they try to access this pte without
-        * first remapping it.  Keeping stale mappings around is a bad idea.
-        */
-       if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) {
+       if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
+           vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
                pte_t *pte = kmap_get_pte(vaddr);
                pte_t pteval = *pte;
+               int idx, type;
+
+               type = kmap_atomic_idx_pop();
+               idx = type + KM_TYPE_NR*smp_processor_id();
+
+               /*
+                * Force other mappings to Oops if they try to access this pte
+                * without first remapping it.  Keeping stale mappings around
+                * is a bad idea.
+                */
                BUG_ON(!pte_present(pteval) && !pte_migrating(pteval));
                kmap_atomic_unregister(pte_page(pteval), vaddr);
                kpte_clear_flush(pte, vaddr);
@@ -300,19 +261,19 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
        arch_flush_lazy_mmu_mode();
        pagefault_enable();
 }
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
 
 /*
  * This API is supposed to allow us to map memory without a "struct page".
  * Currently we don't support this, though this may change in the future.
  */
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+void *kmap_atomic_pfn(unsigned long pfn)
 {
-       return kmap_atomic(pfn_to_page(pfn), type);
+       return kmap_atomic(pfn_to_page(pfn));
 }
-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
 {
-       return kmap_atomic_prot(pfn_to_page(pfn), type, prot);
+       return kmap_atomic_prot(pfn_to_page(pfn), prot);
 }
 
 struct page *kmap_atomic_to_page(void *ptr)
index fb3b4a55cec4cd24b29bcf4c6220f0f4c33281cb..d78df3a6ee15e0d30cd2207d1f64df88de2ec590 100644 (file)
@@ -37,6 +37,8 @@
 #include <asm/pgalloc.h>
 #include <asm/homecache.h>
 
+#include <arch/sim.h>
+
 #include "migrate.h"
 
 
@@ -217,13 +219,6 @@ static unsigned long cache_flush_length(unsigned long length)
        return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length;
 }
 
-/* On the simulator, confirm lines have been evicted everywhere. */
-static void validate_lines_evicted(unsigned long pfn, size_t length)
-{
-       sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED,
-                   (HV_PhysAddr)pfn << PAGE_SHIFT, length);
-}
-
 /* Flush a page out of whatever cache(s) it is in. */
 void homecache_flush_cache(struct page *page, int order)
 {
@@ -234,7 +229,7 @@ void homecache_flush_cache(struct page *page, int order)
 
        homecache_mask(page, pages, &home_mask);
        flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0);
-       validate_lines_evicted(pfn, pages * PAGE_SIZE);
+       sim_validate_lines_evicted(PFN_PHYS(pfn), pages * PAGE_SIZE);
 }
 
 
index d89c9eacd162c40a93bc37482a4ed55a4f3d681e..78e1982cb6c9dc0385e258b81bfc233f23baaaac 100644 (file)
@@ -1060,7 +1060,7 @@ void free_initmem(void)
 
        /*
         * Free the pages mapped from 0xc0000000 that correspond to code
-        * pages from 0xfd000000 that we won't use again after init.
+        * pages from MEM_SV_INTRPT that we won't use again after init.
         */
        free_init_pages("unused kernel text",
                        (unsigned long)_sinittext - text_delta,
index ec2b8da1aba483acb7e80b20c2c14041c81f4e7c..50d6aa20c3538637ae7d8e939d64717acbf53fd7 100644 (file)
@@ -120,6 +120,9 @@ config SMP
 
          If you don't know what to do, say N.
 
+config GENERIC_HARDIRQS_NO__DO_IRQ
+       def_bool y
+
 config NR_CPUS
        int "Maximum number of CPUs (2-32)"
        range 2 32
@@ -147,3 +150,6 @@ config KERNEL_STACK_ORDER
          This option determines the size of UML kernel stacks.  They will
          be 1 << order pages.  The default is OK unless you're running Valgrind
          on UML, in which case, set this to 3.
+
+config NO_DMA
+       def_bool y
index 6bd456f96f90f6a345466f20cb0fc64b84a1c082..564f3de65b4ae129675fc82c56feebb635fc9279 100644 (file)
@@ -566,7 +566,6 @@ CONFIG_CRC32=m
 # CONFIG_CRC7 is not set
 # CONFIG_LIBCRC32C is not set
 CONFIG_PLIST=y
-CONFIG_HAS_DMA=y
 
 #
 # SCSI device support
diff --git a/arch/um/include/asm/dma-mapping.h b/arch/um/include/asm/dma-mapping.h
deleted file mode 100644 (file)
index 1f469e8..0000000
+++ /dev/null
@@ -1,112 +0,0 @@
-#ifndef _ASM_DMA_MAPPING_H
-#define _ASM_DMA_MAPPING_H
-
-#include <asm/scatterlist.h>
-
-static inline int
-dma_supported(struct device *dev, u64 mask)
-{
-       BUG();
-       return(0);
-}
-
-static inline int
-dma_set_mask(struct device *dev, u64 dma_mask)
-{
-       BUG();
-       return(0);
-}
-
-static inline void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
-                  gfp_t flag)
-{
-       BUG();
-       return((void *) 0);
-}
-
-static inline void
-dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
-                 dma_addr_t dma_handle)
-{
-       BUG();
-}
-
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *cpu_addr, size_t size,
-              enum dma_data_direction direction)
-{
-       BUG();
-       return(0);
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-                enum dma_data_direction direction)
-{
-       BUG();
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page,
-            unsigned long offset, size_t size,
-            enum dma_data_direction direction)
-{
-       BUG();
-       return(0);
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-              enum dma_data_direction direction)
-{
-       BUG();
-}
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-          enum dma_data_direction direction)
-{
-       BUG();
-       return(0);
-}
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
-            enum dma_data_direction direction)
-{
-       BUG();
-}
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
-               enum dma_data_direction direction)
-{
-       BUG();
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
-           enum dma_data_direction direction)
-{
-       BUG();
-}
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-              enum dma_data_direction direction)
-{
-       BUG();
-}
-
-static inline int
-dma_mapping_error(struct device *dev, dma_addr_t dma_handle)
-{
-       BUG();
-       return 0;
-}
-
-#endif
index a9f7251b4a8dcadd347d7a8d71043e5b312ff673..41474fb5eee70d3f89474ae13c59cbaef10b0f37 100644 (file)
@@ -338,9 +338,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
        ((pte_t *) pmd_page_vaddr(*(dir)) +  pte_index(address))
 #define pte_offset_map(dir, address) \
        ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
 #define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
 
 struct mm_struct;
 extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
index 93af1cf0907ddafdca68e8884e5dc99a2ed0ea0b..68a90ecd14501aaaeb4a832364c58c89c1697d59 100644 (file)
@@ -8,23 +8,38 @@ extern int set_signals(int enable);
 extern void block_signals(void);
 extern void unblock_signals(void);
 
-#define local_save_flags(flags) do { typecheck(unsigned long, flags); \
-                                    (flags) = get_signals(); } while(0)
-#define local_irq_restore(flags) do { typecheck(unsigned long, flags); \
-                                     set_signals(flags); } while(0)
-
-#define local_irq_save(flags) do { local_save_flags(flags); \
-                                   local_irq_disable(); } while(0)
-
-#define local_irq_enable() unblock_signals()
-#define local_irq_disable() block_signals()
-
-#define irqs_disabled()                 \
-({                                      \
-        unsigned long flags;            \
-        local_save_flags(flags);        \
-        (flags == 0);                   \
-})
+static inline unsigned long arch_local_save_flags(void)
+{
+       return get_signals();
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+       set_signals(flags);
+}
+
+static inline void arch_local_irq_enable(void)
+{
+       unblock_signals();
+}
+
+static inline void arch_local_irq_disable(void)
+{
+       block_signals();
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+       unsigned long flags;
+       flags = arch_local_save_flags();
+       arch_local_irq_disable();
+       return flags;
+}
+
+static inline bool arch_irqs_disabled(void)
+{
+       return arch_local_save_flags() == 0;
+}
 
 extern void *_switch_to(void *prev, void *next, void *last);
 #define switch_to(prev, next, last) prev = _switch_to(prev, next, last)
index 69268014dd8e966860f7faa323b394961b6f6428..a3cab6d3ae020ccbb1701742e3e85c5c8d2af161 100644 (file)
@@ -50,8 +50,18 @@ SECTIONS
   .rela.got       : { *(.rela.got) }
   .rel.bss        : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
   .rela.bss       : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
-  .rel.plt        : { *(.rel.plt) }
-  .rela.plt       : { *(.rela.plt) }
+  .rel.plt : {
+       *(.rel.plt)
+       PROVIDE_HIDDEN(__rel_iplt_start = .);
+       *(.rel.iplt)
+       PROVIDE_HIDDEN(__rel_iplt_end = .);
+  }
+  .rela.plt : {
+       *(.rela.plt)
+       PROVIDE_HIDDEN(__rela_iplt_start = .);
+       *(.rela.iplt)
+       PROVIDE_HIDDEN(__rela_iplt_end = .);
+  }
   .init           : {
     KEEP (*(.init))
   } =0x90909090
index a746e3037a5bc896462affb2c62ab493a625cc58..3f0ac9e0c96639b1d1a346be1686d3f6374e5479 100644 (file)
@@ -334,7 +334,7 @@ unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
 {
        struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
        irq_enter();
-       __do_IRQ(irq);
+       generic_handle_irq(irq);
        irq_exit();
        set_irq_regs(old_regs);
        return 1;
@@ -391,17 +391,10 @@ void __init init_IRQ(void)
 {
        int i;
 
-       irq_desc[TIMER_IRQ].status = IRQ_DISABLED;
-       irq_desc[TIMER_IRQ].action = NULL;
-       irq_desc[TIMER_IRQ].depth = 1;
-       irq_desc[TIMER_IRQ].chip = &SIGVTALRM_irq_type;
-       enable_irq(TIMER_IRQ);
+       set_irq_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
+
        for (i = 1; i < NR_IRQS; i++) {
-               irq_desc[i].status = IRQ_DISABLED;
-               irq_desc[i].action = NULL;
-               irq_desc[i].depth = 1;
-               irq_desc[i].chip = &normal_irq_type;
-               enable_irq(i);
+               set_irq_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
        }
 }
 
index ec63785506714f8059c9c7a31cf44d4875466698..fbd99402d4d26219443f3d46c3b355b3c137790e 100644 (file)
@@ -22,7 +22,7 @@ SECTIONS
   _text = .;
   _stext = .;
   __init_begin = .;
-  INIT_TEXT_SECTION(PAGE_SIZE)
+  INIT_TEXT_SECTION(0)
   . = ALIGN(PAGE_SIZE);
 
   .text      :
@@ -43,6 +43,23 @@ SECTIONS
        __syscall_stub_end = .;
   }
 
+  /*
+   * These are needed even in a static link, even if they wind up being empty.
+   * Newer glibc needs these __rel{,a}_iplt_{start,end} symbols.
+   */
+  .rel.plt : {
+       *(.rel.plt)
+       PROVIDE_HIDDEN(__rel_iplt_start = .);
+       *(.rel.iplt)
+       PROVIDE_HIDDEN(__rel_iplt_end = .);
+  }
+  .rela.plt : {
+       *(.rela.plt)
+       PROVIDE_HIDDEN(__rela_iplt_start = .);
+       *(.rela.iplt)
+       PROVIDE_HIDDEN(__rela_iplt_end = .);
+  }
+
   #include "asm/common.lds.S"
 
   init.data : { INIT_DATA }
index dec5678fc17f7b65ef5b1113d8f6246f910bab79..6e3359d6a8394c0d2f811a28a24f41e673e8793c 100644 (file)
@@ -60,7 +60,7 @@ static inline long long timeval_to_ns(const struct timeval *tv)
 long long disable_timer(void)
 {
        struct itimerval time = ((struct itimerval) { { 0, 0 }, { 0, 0 } });
-       int remain, max = UM_NSEC_PER_SEC / UM_HZ;
+       long long remain, max = UM_NSEC_PER_SEC / UM_HZ;
 
        if (setitimer(ITIMER_VIRTUAL, &time, &time) < 0)
                printk(UM_KERN_ERR "disable_timer - setitimer failed, "
index 8caac76ac324b39c647fc3762b25920b06fe6a08..3bd04022fd0c51acf29cc64c0720df69e08a245f 100644 (file)
@@ -59,11 +59,12 @@ extern void kunmap_high(struct page *page);
 
 void *kmap(struct page *page);
 void kunmap(struct page *page);
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
-void *kmap_atomic(struct page *page, enum km_type type);
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
+
+void *kmap_atomic_prot(struct page *page, pgprot_t prot);
+void *__kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+void *kmap_atomic_pfn(unsigned long pfn);
+void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
 struct page *kmap_atomic_to_page(void *ptr);
 
 #define flush_cache_kmaps()    do { } while (0)
index c4191b3b7056c6ad16563375f529d1480668a26e..363e33eb6ec18c5f1fbe0df10113361daaca6505 100644 (file)
 #include <asm/tlbflush.h>
 
 void __iomem *
-iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
+iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
 
 void
-iounmap_atomic(void __iomem *kvaddr, enum km_type type);
+iounmap_atomic(void __iomem *kvaddr);
 
 int
 iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
index 8abde9ec90bffba165cd28cf250b89519507f22e..0c92113c4cb6d63a82cc48f10b2d0b9343eeb1ee 100644 (file)
@@ -49,24 +49,14 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
 #endif
 
 #if defined(CONFIG_HIGHPTE)
-#define __KM_PTE                       \
-       (in_nmi() ? KM_NMI_PTE :        \
-        in_irq() ? KM_IRQ_PTE :        \
-        KM_PTE0)
 #define pte_offset_map(dir, address)                                   \
-       ((pte_t *)kmap_atomic(pmd_page(*(dir)), __KM_PTE) +             \
+       ((pte_t *)kmap_atomic(pmd_page(*(dir))) +               \
         pte_index((address)))
-#define pte_offset_map_nested(dir, address)                            \
-       ((pte_t *)kmap_atomic(pmd_page(*(dir)), KM_PTE1) +              \
-        pte_index((address)))
-#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE)
-#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
+#define pte_unmap(pte) kunmap_atomic((pte))
 #else
 #define pte_offset_map(dir, address)                                   \
        ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
-#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
 #define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
 #endif
 
 /* Clear a kernel PTE and flush it from the TLB */
index f96ac9bedf75db0ca247326ab580301832761d82..f86da20347f27b24fcc5fab4dc4712c4b71584c0 100644 (file)
@@ -127,9 +127,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
 
 /* x86-64 always has all page tables mapped. */
 #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
-#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
 #define pte_unmap(pte) ((void)(pte))/* NOP */
-#define pte_unmap_nested(pte) ((void)(pte)) /* NOP */
 
 #define update_mmu_cache(vma, address, ptep) do { } while (0)
 
index 7fda040a76cd7cae4e9caa3bbc9cf37661694993..a3c28ae4025b2c9a9422fa492faac821b7f221d3 100644 (file)
@@ -200,6 +200,23 @@ extern struct { char _entry[32]; } hypercall_page[];
        (type)__res;                                                    \
 })
 
+static inline long
+privcmd_call(unsigned call,
+            unsigned long a1, unsigned long a2,
+            unsigned long a3, unsigned long a4,
+            unsigned long a5)
+{
+       __HYPERCALL_DECLS;
+       __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
+
+       asm volatile("call *%[call]"
+                    : __HYPERCALL_5PARAM
+                    : [call] "a" (&hypercall_page[call])
+                    : __HYPERCALL_CLOBBER5);
+
+       return (long)__res;
+}
+
 static inline int
 HYPERVISOR_set_trap_table(struct trap_info *table)
 {
index bf5f7d32bd08d4a6dc253a624fe9111a73327c75..dd8c1414b3d57540d374fee9d923947aaa57ceb1 100644 (file)
@@ -37,14 +37,21 @@ typedef struct xpaddr {
 
 
 extern unsigned long get_phys_to_machine(unsigned long pfn);
-extern void set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
 
 static inline unsigned long pfn_to_mfn(unsigned long pfn)
 {
+       unsigned long mfn;
+
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return pfn;
 
-       return get_phys_to_machine(pfn) & ~FOREIGN_FRAME_BIT;
+       mfn = get_phys_to_machine(pfn);
+
+       if (mfn != INVALID_P2M_ENTRY)
+               mfn &= ~FOREIGN_FRAME_BIT;
+
+       return mfn;
 }
 
 static inline int phys_to_machine_mapping_valid(unsigned long pfn)
@@ -159,6 +166,7 @@ static inline pte_t __pte_ma(pteval_t x)
 
 #define pgd_val_ma(x)  ((x).pgd)
 
+void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid);
 
 xmaddr_t arbitrary_virt_to_machine(void *address);
 unsigned long arbitrary_virt_to_mfn(void *vaddr);
index cd8da247dda14c8f638ba1b8df2a7b53aacd1212..a2baafb2fe6d38cddaf3e62dc1a8dd9feb18baf3 100644 (file)
@@ -701,6 +701,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
                per_cpu(acfreq_data, policy->cpu) = NULL;
                acpi_processor_unregister_performance(data->acpi_data,
                                                      policy->cpu);
+               kfree(data->freq_table);
                kfree(data);
        }
 
index 12cd823c8d038008f8abe77785b4bb475dd3cf20..17ad0336621135a2310f3e9a2e5d9bf3d2f668ba 100644 (file)
@@ -327,6 +327,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
        l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
 
        l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
+       l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
 }
 
 static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node)
index fe73c1844a9a5b7c9a0c902bef0b9f993207acee..c1e8c7a5116493568e06ba353dd4150d6c027d9a 100644 (file)
@@ -49,7 +49,6 @@ static unsigned long
 copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
 {
        unsigned long offset, addr = (unsigned long)from;
-       int type = in_nmi() ? KM_NMI : KM_IRQ0;
        unsigned long size, len = 0;
        struct page *page;
        void *map;
@@ -63,9 +62,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
                offset = addr & (PAGE_SIZE - 1);
                size = min(PAGE_SIZE - offset, n - len);
 
-               map = kmap_atomic(page, type);
+               map = kmap_atomic(page);
                memcpy(to, map+offset, size);
-               kunmap_atomic(map, type);
+               kunmap_atomic(map);
                put_page(page);
 
                len  += size;
index 67414550c3ccf25b3d5a3094ba42b5d0f158610d..d5cd13945d5a48a30cc0c61c26824aa2dad38b5b 100644 (file)
@@ -61,7 +61,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
        if (!is_crashed_pfn_valid(pfn))
                return -EFAULT;
 
-       vaddr = kmap_atomic_pfn(pfn, KM_PTE0);
+       vaddr = kmap_atomic_pfn(pfn);
 
        if (!userbuf) {
                memcpy(buf, (vaddr + offset), csize);
index aff0b3c2750929aff9f36ccf946f4a7ae31b1afa..ae03cab4352e8535946a835787c83e5c8563079b 100644 (file)
@@ -713,7 +713,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
 
        switch (action & 0xf) {
        case CPU_ONLINE:
-               INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work);
+               INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work);
                init_completion(&work.complete);
                /* FIXME: add schedule_work_on() */
                schedule_delayed_work_on(cpu, &work.work, 0);
index 6af118511b4a56fde1ee915162d2013c61e3d7b2..6c7faecd9e4aba5fa26919714a696f215d7d97d3 100644 (file)
@@ -747,7 +747,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
                .done   = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
        };
 
-       INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle);
+       INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
 
        alternatives_smp_switch(1);
 
index 852b319edbdcfc524bb1c5d719b53036c13adf2c..7d90ceb882a41ec55f0d8aea7b2c40cd806afd81 100644 (file)
@@ -919,9 +919,9 @@ spurious_fault(unsigned long error_code, unsigned long address)
 int show_unhandled_signals = 1;
 
 static inline int
-access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
+access_error(unsigned long error_code, struct vm_area_struct *vma)
 {
-       if (write) {
+       if (error_code & PF_WRITE) {
                /* write, present and write, not present: */
                if (unlikely(!(vma->vm_flags & VM_WRITE)))
                        return 1;
@@ -956,8 +956,10 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
        struct task_struct *tsk;
        unsigned long address;
        struct mm_struct *mm;
-       int write;
        int fault;
+       int write = error_code & PF_WRITE;
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY |
+                                       (write ? FAULT_FLAG_WRITE : 0);
 
        tsk = current;
        mm = tsk->mm;
@@ -1068,6 +1070,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
                        bad_area_nosemaphore(regs, error_code, address);
                        return;
                }
+retry:
                down_read(&mm->mmap_sem);
        } else {
                /*
@@ -1111,9 +1114,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
         * we can handle it..
         */
 good_area:
-       write = error_code & PF_WRITE;
-
-       if (unlikely(access_error(error_code, write, vma))) {
+       if (unlikely(access_error(error_code, vma))) {
                bad_area_access_error(regs, error_code, address);
                return;
        }
@@ -1123,21 +1124,34 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault:
         */
-       fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
+       fault = handle_mm_fault(mm, vma, address, flags);
 
        if (unlikely(fault & VM_FAULT_ERROR)) {
                mm_fault_error(regs, error_code, address, fault);
                return;
        }
 
-       if (fault & VM_FAULT_MAJOR) {
-               tsk->maj_flt++;
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
-                                    regs, address);
-       } else {
-               tsk->min_flt++;
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
-                                    regs, address);
+       /*
+        * Major/minor page fault accounting is only done on the
+        * initial attempt. If we go through a retry, it is extremely
+        * likely that the page will be found in page cache at that point.
+        */
+       if (flags & FAULT_FLAG_ALLOW_RETRY) {
+               if (fault & VM_FAULT_MAJOR) {
+                       tsk->maj_flt++;
+                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+                                     regs, address);
+               } else {
+                       tsk->min_flt++;
+                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+                                     regs, address);
+               }
+               if (fault & VM_FAULT_RETRY) {
+                       /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+                        * of starvation. */
+                       flags &= ~FAULT_FLAG_ALLOW_RETRY;
+                       goto retry;
+               }
        }
 
        check_v8086_mode(regs, address, tsk);
index 5e8fa12ef861738aae4e91ec8b165f6d2e58a55b..d723e369003cb3658eb60f4e7eed1a94a6a4afda 100644 (file)
@@ -9,6 +9,7 @@ void *kmap(struct page *page)
                return page_address(page);
        return kmap_high(page);
 }
+EXPORT_SYMBOL(kmap);
 
 void kunmap(struct page *page)
 {
@@ -18,6 +19,7 @@ void kunmap(struct page *page)
                return;
        kunmap_high(page);
 }
+EXPORT_SYMBOL(kunmap);
 
 /*
  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
@@ -27,10 +29,10 @@ void kunmap(struct page *page)
  * However when holding an atomic kmap it is not legal to sleep, so atomic
  * kmaps are appropriate for short, tight code paths only.
  */
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
-       enum fixed_addresses idx;
        unsigned long vaddr;
+       int idx, type;
 
        /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
        pagefault_disable();
@@ -38,8 +40,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
        if (!PageHighMem(page))
                return page_address(page);
 
-       debug_kmap_atomic(type);
-
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR*smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        BUG_ON(!pte_none(*(kmap_pte-idx)));
@@ -47,44 +48,56 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
 
        return (void *)vaddr;
 }
+EXPORT_SYMBOL(kmap_atomic_prot);
+
+void *__kmap_atomic(struct page *page)
+{
+       return kmap_atomic_prot(page, kmap_prot);
+}
+EXPORT_SYMBOL(__kmap_atomic);
 
-void *kmap_atomic(struct page *page, enum km_type type)
+/*
+ * This is the same as kmap_atomic() but can map memory that doesn't
+ * have a struct page associated with it.
+ */
+void *kmap_atomic_pfn(unsigned long pfn)
 {
-       return kmap_atomic_prot(page, type, kmap_prot);
+       return kmap_atomic_prot_pfn(pfn, kmap_prot);
 }
+EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
 
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
 {
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-       enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-
-       /*
-        * Force other mappings to Oops if they'll try to access this pte
-        * without first remap it.  Keeping stale mappings around is a bad idea
-        * also, in case the page changes cacheability attributes or becomes
-        * a protected page in a hypervisor.
-        */
-       if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
+
+       if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
+           vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
+               int idx, type;
+
+               type = kmap_atomic_idx_pop();
+               idx = type + KM_TYPE_NR * smp_processor_id();
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+               WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+#endif
+               /*
+                * Force other mappings to Oops if they'll try to access this
+                * pte without first remap it.  Keeping stale mappings around
+                * is a bad idea also, in case the page changes cacheability
+                * attributes or becomes a protected page in a hypervisor.
+                */
                kpte_clear_flush(kmap_pte-idx, vaddr);
-       else {
+       }
 #ifdef CONFIG_DEBUG_HIGHMEM
+       else {
                BUG_ON(vaddr < PAGE_OFFSET);
                BUG_ON(vaddr >= (unsigned long)high_memory);
-#endif
        }
+#endif
 
        pagefault_enable();
 }
-
-/*
- * This is the same as kmap_atomic() but can map memory that doesn't
- * have a struct page associated with it.
- */
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
-{
-       return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
-}
-EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
+EXPORT_SYMBOL(__kunmap_atomic);
 
 struct page *kmap_atomic_to_page(void *ptr)
 {
@@ -98,12 +111,6 @@ struct page *kmap_atomic_to_page(void *ptr)
        pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
        return pte_page(*pte);
 }
-
-EXPORT_SYMBOL(kmap);
-EXPORT_SYMBOL(kunmap);
-EXPORT_SYMBOL(kmap_atomic);
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
-EXPORT_SYMBOL(kmap_atomic_prot);
 EXPORT_SYMBOL(kmap_atomic_to_page);
 
 void __init set_highmem_pages_init(void)
index 72fc70cf6184c756b1157f272b0d5e2b7bcc0609..75a3d7f24a2cc1d635c4820b89a1fd4873698ca9 100644 (file)
@@ -48,21 +48,20 @@ int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
 }
 EXPORT_SYMBOL_GPL(iomap_create_wc);
 
-void
-iomap_free(resource_size_t base, unsigned long size)
+void iomap_free(resource_size_t base, unsigned long size)
 {
        io_free_memtype(base, base + size);
 }
 EXPORT_SYMBOL_GPL(iomap_free);
 
-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
 {
-       enum fixed_addresses idx;
        unsigned long vaddr;
+       int idx, type;
 
        pagefault_disable();
 
-       debug_kmap_atomic(type);
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR * smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
@@ -72,10 +71,10 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 }
 
 /*
- * Map 'pfn' using fixed map 'type' and protections 'prot'
+ * Map 'pfn' using protections 'prot'
  */
 void __iomem *
-iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
+iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
 {
        /*
         * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
@@ -86,24 +85,33 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
        if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
                prot = PAGE_KERNEL_UC_MINUS;
 
-       return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot);
+       return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
 }
 EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
 
 void
-iounmap_atomic(void __iomem *kvaddr, enum km_type type)
+iounmap_atomic(void __iomem *kvaddr)
 {
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-       enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
 
-       /*
-        * Force other mappings to Oops if they'll try to access this pte
-        * without first remap it.  Keeping stale mappings around is a bad idea
-        * also, in case the page changes cacheability attributes or becomes
-        * a protected page in a hypervisor.
-        */
-       if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
+       if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
+           vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
+               int idx, type;
+
+               type = kmap_atomic_idx_pop();
+               idx = type + KM_TYPE_NR * smp_processor_id();
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+               WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+#endif
+               /*
+                * Force other mappings to Oops if they'll try to access this
+                * pte without first remap it.  Keeping stale mappings around
+                * is a bad idea also, in case the page changes cacheability
+                * attributes or becomes a protected page in a hypervisor.
+                */
                kpte_clear_flush(kmap_pte-idx, vaddr);
+       }
 
        pagefault_enable();
 }
index 68128a1b401a30a9316b0a9cfa35369c88f9e81a..90a7f5ad6916418e621ce1a7d3422125d0aacd5c 100644 (file)
@@ -19,15 +19,12 @@ config XEN_PVHVM
        depends on X86_LOCAL_APIC
 
 config XEN_MAX_DOMAIN_MEMORY
-       int "Maximum allowed size of a domain in gigabytes"
-       default 8 if X86_32
-       default 32 if X86_64
+       int
+       default 128
        depends on XEN
        help
-         The pseudo-physical to machine address array is sized
-         according to the maximum possible memory size of a Xen
-         domain.  This array uses 1 page per gigabyte, so there's no
-         need to be too stingy here.
+         This only affects the sizing of some bss arrays, the unused
+         portions of which are freed.
 
 config XEN_SAVE_RESTORE
        bool
index 63b83ceebd1a0984403af50aaefe29331d5f3132..44ab12dc2a12ee57a2f966d420e2c4cc46e0b704 100644 (file)
@@ -136,9 +136,6 @@ static void xen_vcpu_setup(int cpu)
        info.mfn = arbitrary_virt_to_mfn(vcpup);
        info.offset = offset_in_page(vcpup);
 
-       printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n",
-              cpu, vcpup, info.mfn, info.offset);
-
        /* Check to see if the hypervisor will put the vcpu_info
           structure where we want it, which allows direct access via
           a percpu-variable. */
@@ -152,9 +149,6 @@ static void xen_vcpu_setup(int cpu)
                /* This cpu is using the registered vcpu info, even if
                   later ones fail to. */
                per_cpu(xen_vcpu, cpu) = vcpup;
-
-               printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n",
-                      cpu, vcpup);
        }
 }
 
@@ -836,6 +830,11 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
                   Xen console noise. */
                break;
 
+       case MSR_IA32_CR_PAT:
+               if (smp_processor_id() == 0)
+                       xen_set_pat(((u64)high << 32) | low);
+               break;
+
        default:
                ret = native_write_msr_safe(msr, low, high);
        }
@@ -874,8 +873,6 @@ void xen_setup_vcpu_info_placement(void)
        /* xen_vcpu_setup managed to place the vcpu_info within the
           percpu area for all cpus, so make use of it */
        if (have_vcpu_info_placement) {
-               printk(KERN_INFO "Xen: using vcpu_info placement\n");
-
                pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
                pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
                pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
@@ -1189,6 +1186,9 @@ asmlinkage void __init xen_start_kernel(void)
        xen_raw_console_write("mapping kernel into physical memory\n");
        pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
 
+       /* Allocate and initialize top and mid mfn levels for p2m structure */
+       xen_build_mfn_list_list();
+
        init_mm.pgd = pgd;
 
        /* keep using Xen gdt for now; no urgent need to change it */
index f72d18c692217d67c45e57c16ea8585904097d1f..9631c90907ebd67b5522cac2c224cf445cfe7856 100644 (file)
@@ -57,6 +57,7 @@
 #include <asm/linkage.h>
 #include <asm/page.h>
 #include <asm/init.h>
+#include <asm/pat.h>
 
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
@@ -140,7 +141,8 @@ static inline void check_zero(void)
  * large enough to allocate page table pages to allocate the rest.
  * Each page can map 2MB.
  */
-static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
+#define LEVEL1_IDENT_ENTRIES   (PTRS_PER_PTE * 4)
+static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
 
 #ifdef CONFIG_X86_64
 /* l3 pud for userspace vsyscall mapping */
@@ -171,49 +173,182 @@ DEFINE_PER_CPU(unsigned long, xen_current_cr3);   /* actual vcpu cr3 */
  */
 #define USER_LIMIT     ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
 
+/*
+ * Xen leaves the responsibility for maintaining p2m mappings to the
+ * guests themselves, but it must also access and update the p2m array
+ * during suspend/resume when all the pages are reallocated.
+ *
+ * The p2m table is logically a flat array, but we implement it as a
+ * three-level tree to allow the address space to be sparse.
+ *
+ *                               Xen
+ *                                |
+ *     p2m_top              p2m_top_mfn
+ *       /  \                   /   \
+ * p2m_mid p2m_mid     p2m_mid_mfn p2m_mid_mfn
+ *    / \      / \         /           /
+ *  p2m p2m p2m p2m p2m p2m p2m ...
+ *
+ * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p.
+ *
+ * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the
+ * maximum representable pseudo-physical address space is:
+ *  P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
+ *
+ * P2M_PER_PAGE depends on the architecture, as a mfn is always
+ * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
+ * 512 and 1024 entries respectively. 
+ */
+
+unsigned long xen_max_p2m_pfn __read_mostly;
 
-#define P2M_ENTRIES_PER_PAGE   (PAGE_SIZE / sizeof(unsigned long))
-#define TOP_ENTRIES            (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
+#define P2M_PER_PAGE           (PAGE_SIZE / sizeof(unsigned long))
+#define P2M_MID_PER_PAGE       (PAGE_SIZE / sizeof(unsigned long *))
+#define P2M_TOP_PER_PAGE       (PAGE_SIZE / sizeof(unsigned long **))
 
-/* Placeholder for holes in the address space */
-static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
-               { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
+#define MAX_P2M_PFN            (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
 
- /* Array of pointers to pages containing p2m entries */
-static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
-               { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
+/* Placeholders for holes in the address space */
+static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE);
 
-/* Arrays of p2m arrays expressed in mfns used for save/restore */
-static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
+static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
 
-static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
-       __page_aligned_bss;
+RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
+RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
 
 static inline unsigned p2m_top_index(unsigned long pfn)
 {
-       BUG_ON(pfn >= MAX_DOMAIN_PAGES);
-       return pfn / P2M_ENTRIES_PER_PAGE;
+       BUG_ON(pfn >= MAX_P2M_PFN);
+       return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
+}
+
+static inline unsigned p2m_mid_index(unsigned long pfn)
+{
+       return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
 }
 
 static inline unsigned p2m_index(unsigned long pfn)
 {
-       return pfn % P2M_ENTRIES_PER_PAGE;
+       return pfn % P2M_PER_PAGE;
+}
+
+static void p2m_top_init(unsigned long ***top)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+               top[i] = p2m_mid_missing;
+}
+
+static void p2m_top_mfn_init(unsigned long *top)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+               top[i] = virt_to_mfn(p2m_mid_missing_mfn);
+}
+
+static void p2m_top_mfn_p_init(unsigned long **top)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+               top[i] = p2m_mid_missing_mfn;
+}
+
+static void p2m_mid_init(unsigned long **mid)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_MID_PER_PAGE; i++)
+               mid[i] = p2m_missing;
+}
+
+static void p2m_mid_mfn_init(unsigned long *mid)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_MID_PER_PAGE; i++)
+               mid[i] = virt_to_mfn(p2m_missing);
 }
 
-/* Build the parallel p2m_top_mfn structures */
+static void p2m_init(unsigned long *p2m)
+{
+       unsigned i;
+
+       for (i = 0; i < P2M_MID_PER_PAGE; i++)
+               p2m[i] = INVALID_P2M_ENTRY;
+}
+
+/*
+ * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
+ *
+ * This is called both at boot time, and after resuming from suspend:
+ * - At boot time we're called very early, and must use extend_brk()
+ *   to allocate memory.
+ *
+ * - After resume we're called from within stop_machine, but the mfn
+ *   tree should alreay be completely allocated.
+ */
 void xen_build_mfn_list_list(void)
 {
-       unsigned pfn, idx;
+       unsigned long pfn;
 
-       for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
-               unsigned topidx = p2m_top_index(pfn);
+       /* Pre-initialize p2m_top_mfn to be completely missing */
+       if (p2m_top_mfn == NULL) {
+               p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
+               p2m_mid_mfn_init(p2m_mid_missing_mfn);
+
+               p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+               p2m_top_mfn_p_init(p2m_top_mfn_p);
 
-               p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
+               p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
+               p2m_top_mfn_init(p2m_top_mfn);
+       } else {
+               /* Reinitialise, mfn's all change after migration */
+               p2m_mid_mfn_init(p2m_mid_missing_mfn);
        }
 
-       for (idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
-               unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
-               p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
+       for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
+               unsigned topidx = p2m_top_index(pfn);
+               unsigned mididx = p2m_mid_index(pfn);
+               unsigned long **mid;
+               unsigned long *mid_mfn_p;
+
+               mid = p2m_top[topidx];
+               mid_mfn_p = p2m_top_mfn_p[topidx];
+
+               /* Don't bother allocating any mfn mid levels if
+                * they're just missing, just update the stored mfn,
+                * since all could have changed over a migrate.
+                */
+               if (mid == p2m_mid_missing) {
+                       BUG_ON(mididx);
+                       BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
+                       p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
+                       pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
+                       continue;
+               }
+
+               if (mid_mfn_p == p2m_mid_missing_mfn) {
+                       /*
+                        * XXX boot-time only!  We should never find
+                        * missing parts of the mfn tree after
+                        * runtime.  extend_brk() will BUG if we call
+                        * it too late.
+                        */
+                       mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+                       p2m_mid_mfn_init(mid_mfn_p);
+
+                       p2m_top_mfn_p[topidx] = mid_mfn_p;
+               }
+
+               p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
+               mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]);
        }
 }
 
@@ -222,8 +357,8 @@ void xen_setup_mfn_list_list(void)
        BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
 
        HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
-               virt_to_mfn(p2m_top_mfn_list);
-       HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
+               virt_to_mfn(p2m_top_mfn);
+       HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
 }
 
 /* Set up p2m_top to point to the domain-builder provided p2m pages */
@@ -231,98 +366,176 @@ void __init xen_build_dynamic_phys_to_machine(void)
 {
        unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
        unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
-       unsigned pfn;
+       unsigned long pfn;
+
+       xen_max_p2m_pfn = max_pfn;
 
-       for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
+       p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
+       p2m_init(p2m_missing);
+
+       p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
+       p2m_mid_init(p2m_mid_missing);
+
+       p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
+       p2m_top_init(p2m_top);
+
+       /*
+        * The domain builder gives us a pre-constructed p2m array in
+        * mfn_list for all the pages initially given to us, so we just
+        * need to graft that into our tree structure.
+        */
+       for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) {
                unsigned topidx = p2m_top_index(pfn);
+               unsigned mididx = p2m_mid_index(pfn);
 
-               p2m_top[topidx] = &mfn_list[pfn];
-       }
+               if (p2m_top[topidx] == p2m_mid_missing) {
+                       unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
+                       p2m_mid_init(mid);
+
+                       p2m_top[topidx] = mid;
+               }
 
-       xen_build_mfn_list_list();
+               p2m_top[topidx][mididx] = &mfn_list[pfn];
+       }
 }
 
 unsigned long get_phys_to_machine(unsigned long pfn)
 {
-       unsigned topidx, idx;
+       unsigned topidx, mididx, idx;
 
-       if (unlikely(pfn >= MAX_DOMAIN_PAGES))
+       if (unlikely(pfn >= MAX_P2M_PFN))
                return INVALID_P2M_ENTRY;
 
        topidx = p2m_top_index(pfn);
+       mididx = p2m_mid_index(pfn);
        idx = p2m_index(pfn);
-       return p2m_top[topidx][idx];
+
+       return p2m_top[topidx][mididx][idx];
 }
 EXPORT_SYMBOL_GPL(get_phys_to_machine);
 
-/* install a  new p2m_top page */
-bool install_p2mtop_page(unsigned long pfn, unsigned long *p)
+static void *alloc_p2m_page(void)
 {
-       unsigned topidx = p2m_top_index(pfn);
-       unsigned long **pfnp, *mfnp;
-       unsigned i;
+       return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+}
 
-       pfnp = &p2m_top[topidx];
-       mfnp = &p2m_top_mfn[topidx];
+static void free_p2m_page(void *p)
+{
+       free_page((unsigned long)p);
+}
 
-       for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
-               p[i] = INVALID_P2M_ENTRY;
+/* 
+ * Fully allocate the p2m structure for a given pfn.  We need to check
+ * that both the top and mid levels are allocated, and make sure the
+ * parallel mfn tree is kept in sync.  We may race with other cpus, so
+ * the new pages are installed with cmpxchg; if we lose the race then
+ * simply free the page we allocated and use the one that's there.
+ */
+static bool alloc_p2m(unsigned long pfn)
+{
+       unsigned topidx, mididx;
+       unsigned long ***top_p, **mid;
+       unsigned long *top_mfn_p, *mid_mfn;
 
-       if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) {
-               *mfnp = virt_to_mfn(p);
-               return true;
+       topidx = p2m_top_index(pfn);
+       mididx = p2m_mid_index(pfn);
+
+       top_p = &p2m_top[topidx];
+       mid = *top_p;
+
+       if (mid == p2m_mid_missing) {
+               /* Mid level is missing, allocate a new one */
+               mid = alloc_p2m_page();
+               if (!mid)
+                       return false;
+
+               p2m_mid_init(mid);
+
+               if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)
+                       free_p2m_page(mid);
        }
 
-       return false;
-}
+       top_mfn_p = &p2m_top_mfn[topidx];
+       mid_mfn = p2m_top_mfn_p[topidx];
 
-static void alloc_p2m(unsigned long pfn)
-{
-       unsigned long *p;
+       BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
+
+       if (mid_mfn == p2m_mid_missing_mfn) {
+               /* Separately check the mid mfn level */
+               unsigned long missing_mfn;
+               unsigned long mid_mfn_mfn;
+
+               mid_mfn = alloc_p2m_page();
+               if (!mid_mfn)
+                       return false;
+
+               p2m_mid_mfn_init(mid_mfn);
+
+               missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
+               mid_mfn_mfn = virt_to_mfn(mid_mfn);
+               if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn)
+                       free_p2m_page(mid_mfn);
+               else
+                       p2m_top_mfn_p[topidx] = mid_mfn;
+       }
+
+       if (p2m_top[topidx][mididx] == p2m_missing) {
+               /* p2m leaf page is missing */
+               unsigned long *p2m;
+
+               p2m = alloc_p2m_page();
+               if (!p2m)
+                       return false;
 
-       p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
-       BUG_ON(p == NULL);
+               p2m_init(p2m);
+
+               if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing)
+                       free_p2m_page(p2m);
+               else
+                       mid_mfn[mididx] = virt_to_mfn(p2m);
+       }
 
-       if (!install_p2mtop_page(pfn, p))
-               free_page((unsigned long)p);
+       return true;
 }
 
 /* Try to install p2m mapping; fail if intermediate bits missing */
 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 {
-       unsigned topidx, idx;
+       unsigned topidx, mididx, idx;
 
-       if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
+       if (unlikely(pfn >= MAX_P2M_PFN)) {
                BUG_ON(mfn != INVALID_P2M_ENTRY);
                return true;
        }
 
        topidx = p2m_top_index(pfn);
-       if (p2m_top[topidx] == p2m_missing) {
-               if (mfn == INVALID_P2M_ENTRY)
-                       return true;
-               return false;
-       }
-
+       mididx = p2m_mid_index(pfn);
        idx = p2m_index(pfn);
-       p2m_top[topidx][idx] = mfn;
+
+       if (p2m_top[topidx][mididx] == p2m_missing)
+               return mfn == INVALID_P2M_ENTRY;
+
+       p2m_top[topidx][mididx][idx] = mfn;
 
        return true;
 }
 
-void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 {
        if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
                BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
-               return;
+               return true;
        }
 
        if (unlikely(!__set_phys_to_machine(pfn, mfn)))  {
-               alloc_p2m(pfn);
+               if (!alloc_p2m(pfn))
+                       return false;
 
                if (!__set_phys_to_machine(pfn, mfn))
-                       BUG();
+                       return false;
        }
+
+       return true;
 }
 
 unsigned long arbitrary_virt_to_mfn(void *vaddr)
@@ -399,7 +612,7 @@ static bool xen_iomap_pte(pte_t pte)
        return pte_flags(pte) & _PAGE_IOMAP;
 }
 
-static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
+void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
 {
        struct multicall_space mcs;
        struct mmu_update *u;
@@ -411,10 +624,16 @@ static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
        u->ptr = arbitrary_virt_to_machine(ptep).maddr;
        u->val = pte_val_ma(pteval);
 
-       MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_IO);
+       MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
 
        xen_mc_issue(PARAVIRT_LAZY_MMU);
 }
+EXPORT_SYMBOL_GPL(xen_set_domain_pte);
+
+static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
+{
+       xen_set_domain_pte(ptep, pteval, DOMID_IO);
+}
 
 static void xen_extend_mmu_update(const struct mmu_update *update)
 {
@@ -561,7 +780,20 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
        if (val & _PAGE_PRESENT) {
                unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
                pteval_t flags = val & PTE_FLAGS_MASK;
-               val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
+               unsigned long mfn = pfn_to_mfn(pfn);
+
+               /*
+                * If there's no mfn for the pfn, then just create an
+                * empty non-present pte.  Unfortunately this loses
+                * information about the original pfn, so
+                * pte_mfn_to_pfn is asymmetric.
+                */
+               if (unlikely(mfn == INVALID_P2M_ENTRY)) {
+                       mfn = 0;
+                       flags = 0;
+               }
+
+               val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
        }
 
        return val;
@@ -583,10 +815,18 @@ static pteval_t iomap_pte(pteval_t val)
 
 pteval_t xen_pte_val(pte_t pte)
 {
-       if (xen_initial_domain() && (pte.pte & _PAGE_IOMAP))
-               return pte.pte;
+       pteval_t pteval = pte.pte;
+
+       /* If this is a WC pte, convert back from Xen WC to Linux WC */
+       if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
+               WARN_ON(!pat_enabled);
+               pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
+       }
 
-       return pte_mfn_to_pfn(pte.pte);
+       if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
+               return pteval;
+
+       return pte_mfn_to_pfn(pteval);
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
 
@@ -596,10 +836,48 @@ pgdval_t xen_pgd_val(pgd_t pgd)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
 
+/*
+ * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
+ * are reserved for now, to correspond to the Intel-reserved PAT
+ * types.
+ *
+ * We expect Linux's PAT set as follows:
+ *
+ * Idx  PTE flags        Linux    Xen    Default
+ * 0                     WB       WB     WB
+ * 1            PWT      WC       WT     WT
+ * 2        PCD          UC-      UC-    UC-
+ * 3        PCD PWT      UC       UC     UC
+ * 4    PAT              WB       WC     WB
+ * 5    PAT     PWT      WC       WP     WT
+ * 6    PAT PCD          UC-      UC     UC-
+ * 7    PAT PCD PWT      UC       UC     UC
+ */
+
+void xen_set_pat(u64 pat)
+{
+       /* We expect Linux to use a PAT setting of
+        * UC UC- WC WB (ignoring the PAT flag) */
+       WARN_ON(pat != 0x0007010600070106ull);
+}
+
 pte_t xen_make_pte(pteval_t pte)
 {
        phys_addr_t addr = (pte & PTE_PFN_MASK);
 
+       /* If Linux is trying to set a WC pte, then map to the Xen WC.
+        * If _PAGE_PAT is set, then it probably means it is really
+        * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
+        * things work out OK...
+        *
+        * (We should never see kernel mappings with _PAGE_PSE set,
+        * but we could see hugetlbfs mappings, I think.).
+        */
+       if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
+               if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
+                       pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
+       }
+
        /*
         * Unprivileged domains are allowed to do IOMAPpings for
         * PCI passthrough, but not map ISA space.  The ISA
@@ -1712,6 +1990,9 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
        unsigned ident_pte;
        unsigned long pfn;
 
+       level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
+                                     PAGE_SIZE);
+
        ident_pte = 0;
        pfn = 0;
        for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
@@ -1722,7 +2003,7 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
                        pte_page = m2v(pmd[pmdidx].pmd);
                else {
                        /* Check for free pte pages */
-                       if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
+                       if (ident_pte == LEVEL1_IDENT_ENTRIES)
                                break;
 
                        pte_page = &level1_ident_pgt[ident_pte];
@@ -1837,13 +2118,15 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
        return pgd;
 }
 #else  /* !CONFIG_X86_64 */
-static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
+static RESERVE_BRK_ARRAY(pmd_t, level2_kernel_pgt, PTRS_PER_PMD);
 
 __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
                                         unsigned long max_pfn)
 {
        pmd_t *kernel_pmd;
 
+       level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE);
+
        max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
                                  xen_start_info->nr_pt_frames * PAGE_SIZE +
                                  512*1024);
@@ -2269,6 +2552,72 @@ void __init xen_hvm_init_mmu_ops(void)
 }
 #endif
 
+#define REMAP_BATCH_SIZE 16
+
+struct remap_data {
+       unsigned long mfn;
+       pgprot_t prot;
+       struct mmu_update *mmu_update;
+};
+
+static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
+                                unsigned long addr, void *data)
+{
+       struct remap_data *rmd = data;
+       pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
+
+       rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr;
+       rmd->mmu_update->val = pte_val_ma(pte);
+       rmd->mmu_update++;
+
+       return 0;
+}
+
+int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+                              unsigned long addr,
+                              unsigned long mfn, int nr,
+                              pgprot_t prot, unsigned domid)
+{
+       struct remap_data rmd;
+       struct mmu_update mmu_update[REMAP_BATCH_SIZE];
+       int batch;
+       unsigned long range;
+       int err = 0;
+
+       prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
+
+       vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
+
+       rmd.mfn = mfn;
+       rmd.prot = prot;
+
+       while (nr) {
+               batch = min(REMAP_BATCH_SIZE, nr);
+               range = (unsigned long)batch << PAGE_SHIFT;
+
+               rmd.mmu_update = mmu_update;
+               err = apply_to_page_range(vma->vm_mm, addr, range,
+                                         remap_area_mfn_pte_fn, &rmd);
+               if (err)
+                       goto out;
+
+               err = -EFAULT;
+               if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
+                       goto out;
+
+               nr -= batch;
+               addr += range;
+       }
+
+       err = 0;
+out:
+
+       flush_tlb_all();
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
+
 #ifdef CONFIG_XEN_DEBUG_FS
 
 static struct dentry *d_mmu_debug;
index fa938c4aa2f72940e56c439f01d4e14f6aa499ba..537bb9aab777ab00b446d1cec9daded81a3ed71f 100644 (file)
@@ -12,7 +12,6 @@ enum pt_level {
 
 
 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
-bool install_p2mtop_page(unsigned long pfn, unsigned long *p);
 
 void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
 
index 9729c903404b1633a0eaed2b06bd0cfadabf9aa7..105db2501050483901a3ba9be6b37310b4152481 100644 (file)
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
 
+#include <xen/xen.h>
 #include <xen/page.h>
 #include <xen/interface/callback.h>
+#include <xen/interface/memory.h>
 #include <xen/interface/physdev.h>
 #include <xen/interface/memory.h>
 #include <xen/features.h>
@@ -34,6 +36,39 @@ extern void xen_sysenter_target(void);
 extern void xen_syscall_target(void);
 extern void xen_syscall32_target(void);
 
+/* Amount of extra memory space we add to the e820 ranges */
+phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
+
+/* 
+ * The maximum amount of extra memory compared to the base size.  The
+ * main scaling factor is the size of struct page.  At extreme ratios
+ * of base:extra, all the base memory can be filled with page
+ * structures for the extra memory, leaving no space for anything
+ * else.
+ * 
+ * 10x seems like a reasonable balance between scaling flexibility and
+ * leaving a practically usable system.
+ */
+#define EXTRA_MEM_RATIO                (10)
+
+static __init void xen_add_extra_mem(unsigned long pages)
+{
+       u64 size = (u64)pages * PAGE_SIZE;
+       u64 extra_start = xen_extra_mem_start + xen_extra_mem_size;
+
+       if (!pages)
+               return;
+
+       e820_add_region(extra_start, size, E820_RAM);
+       sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+
+       memblock_x86_reserve_range(extra_start, extra_start + size, "XEN EXTRA");
+
+       xen_extra_mem_size += size;
+
+       xen_max_p2m_pfn = PFN_DOWN(extra_start + size);
+}
+
 static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
                                              phys_addr_t end_addr)
 {
@@ -105,16 +140,65 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
 /**
  * machine_specific_memory_setup - Hook for machine specific memory setup.
  **/
-
 char * __init xen_memory_setup(void)
 {
+       static struct e820entry map[E820MAX] __initdata;
+
        unsigned long max_pfn = xen_start_info->nr_pages;
+       unsigned long long mem_end;
+       int rc;
+       struct xen_memory_map memmap;
+       unsigned long extra_pages = 0;
+       unsigned long extra_limit;
+       int i;
+       int op;
 
        max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
+       mem_end = PFN_PHYS(max_pfn);
+
+       memmap.nr_entries = E820MAX;
+       set_xen_guest_handle(memmap.buffer, map);
+
+       op = xen_initial_domain() ?
+               XENMEM_machine_memory_map :
+               XENMEM_memory_map;
+       rc = HYPERVISOR_memory_op(op, &memmap);
+       if (rc == -ENOSYS) {
+               memmap.nr_entries = 1;
+               map[0].addr = 0ULL;
+               map[0].size = mem_end;
+               /* 8MB slack (to balance backend allocations). */
+               map[0].size += 8ULL << 20;
+               map[0].type = E820_RAM;
+               rc = 0;
+       }
+       BUG_ON(rc);
 
        e820.nr_map = 0;
+       xen_extra_mem_start = mem_end;
+       for (i = 0; i < memmap.nr_entries; i++) {
+               unsigned long long end = map[i].addr + map[i].size;
+
+               if (map[i].type == E820_RAM) {
+                       if (map[i].addr < mem_end && end > mem_end) {
+                               /* Truncate region to max_mem. */
+                               u64 delta = end - mem_end;
+
+                               map[i].size -= delta;
+                               extra_pages += PFN_DOWN(delta);
+
+                               end = mem_end;
+                       }
+               }
 
-       e820_add_region(0, PFN_PHYS((u64)max_pfn), E820_RAM);
+               if (end > xen_extra_mem_start)
+                       xen_extra_mem_start = end;
+
+               /* If region is non-RAM or below mem_end, add what remains */
+               if ((map[i].type != E820_RAM || map[i].addr < mem_end) &&
+                   map[i].size > 0)
+                       e820_add_region(map[i].addr, map[i].size, map[i].type);
+       }
 
        /*
         * Even though this is normal, usable memory under Xen, reserve
@@ -136,7 +220,29 @@ char * __init xen_memory_setup(void)
 
        sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
 
-       xen_return_unused_memory(xen_start_info->nr_pages, &e820);
+       extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
+
+       /*
+        * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
+        * factor the base size.  On non-highmem systems, the base
+        * size is the full initial memory allocation; on highmem it
+        * is limited to the max size of lowmem, so that it doesn't
+        * get completely filled.
+        *
+        * In principle there could be a problem in lowmem systems if
+        * the initial memory is also very large with respect to
+        * lowmem, but we won't try to deal with that here.
+        */
+       extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
+                         max_pfn + extra_pages);
+
+       if (extra_limit >= max_pfn)
+               extra_pages = extra_limit - max_pfn;
+       else
+               extra_pages = 0;
+
+       if (!xen_initial_domain())
+               xen_add_extra_mem(extra_pages);
 
        return "Xen";
 }
index 7c8ab86163e9142b1c49c6c1ddad13ff203b7a59..64044747348efb3535461a67610621d9067767e3 100644 (file)
@@ -30,6 +30,9 @@ void xen_setup_machphys_mapping(void);
 pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
 void xen_ident_map_ISA(void);
 void xen_reserve_top(void);
+extern unsigned long xen_max_p2m_pfn;
+
+void xen_set_pat(u64);
 
 char * __init xen_memory_setup(void);
 void __init xen_arch_setup(void);
index 76bf35554117d8044de73e9b3e1138e562379c0c..b03c043ce75b1243f18f080bbb06750913853a03 100644 (file)
@@ -324,10 +324,7 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 #define pte_offset_kernel(dir,addr)                                    \
        ((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr))
 #define pte_offset_map(dir,addr)       pte_offset_kernel((dir),(addr))
-#define pte_offset_map_nested(dir,addr)        pte_offset_kernel((dir),(addr))
-
 #define pte_unmap(pte)         do { } while (0)
-#define pte_unmap_nested(pte)  do { } while (0)
 
 
 /*
index 0ec1fb69d4eacc2a310504caa9109186f9e89370..518c22bd9562ee20cb040c63d7f9eb6a62b4d5c0 100644 (file)
@@ -83,8 +83,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
 
                memcpy(dest_buf, src_buf, len);
 
-               kunmap_atomic(dest_buf, KM_USER0);
                kunmap_atomic(src_buf, KM_USER1);
+               kunmap_atomic(dest_buf, KM_USER0);
 
                async_tx_sync_epilog(submit);
        }
index 90d26c91f4e9e183b2a41363318344e3d646c20a..7a7219266e3cc343d1a9a63a492a9cd38e876db5 100644 (file)
@@ -89,9 +89,9 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
                memcpy(walk->dst.virt.addr, walk->page, n);
                blkcipher_unmap_dst(walk);
        } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
-               blkcipher_unmap_src(walk);
                if (walk->flags & BLKCIPHER_WALK_DIFF)
                        blkcipher_unmap_dst(walk);
+               blkcipher_unmap_src(walk);
        }
 
        scatterwalk_advance(&walk->in, n);
index 88681aca88c581891399e18b056d9a6df94a9e01..3f3489c5ca8c3ade9d27adc657ea4cce27cc86f2 100644 (file)
@@ -9,7 +9,6 @@ menuconfig ACPI
        depends on PCI
        depends on PM
        select PNP
-       select CPU_IDLE
        default y
        help
          Advanced Configuration and Power Interface (ACPI) support for 
@@ -66,7 +65,6 @@ config ACPI_PROCFS
 config ACPI_PROCFS_POWER
        bool "Deprecated power /proc/acpi directories"
        depends on PROC_FS
-       default y
        help
          For backwards compatibility, this option allows
           deprecated power /proc/acpi/ directories to exist, even when
@@ -90,13 +88,6 @@ config ACPI_POWER_METER
          To compile this driver as a module, choose M here:
          the module will be called power-meter.
 
-config ACPI_SYSFS_POWER
-       bool "Future power /sys interface"
-       select POWER_SUPPLY
-       default y
-       help
-         Say N to disable power /sys interface
-
 config ACPI_EC_DEBUGFS
        tristate "EC read/write access through /sys/kernel/debug/ec"
        default n
@@ -136,6 +127,7 @@ config ACPI_PROC_EVENT
 config ACPI_AC
        tristate "AC Adapter"
        depends on X86
+       select POWER_SUPPLY
        default y
        help
          This driver supports the AC Adapter object, which indicates
@@ -148,6 +140,7 @@ config ACPI_AC
 config ACPI_BATTERY
        tristate "Battery"
        depends on X86
+       select POWER_SUPPLY
        default y
        help
          This driver adds support for battery information through
@@ -206,6 +199,7 @@ config ACPI_DOCK
 config ACPI_PROCESSOR
        tristate "Processor"
        select THERMAL
+       select CPU_IDLE
        default y
        help
          This driver installs ACPI as the idle handler for Linux and uses
@@ -364,6 +358,7 @@ config ACPI_HOTPLUG_MEMORY
 config ACPI_SBS
        tristate "Smart Battery System"
        depends on X86
+       select POWER_SUPPLY
        help
          This driver supports the Smart Battery System, another
          type of access to battery information, found on some laptops.
index 56205a0b85dfc60eb3dc85def0a7432639ea0142..ba9afeaa23acfb676021c86432fd89b45f8ba5cf 100644 (file)
@@ -32,9 +32,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
 #include <linux/power_supply.h>
-#endif
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
 
@@ -86,9 +84,7 @@ static struct acpi_driver acpi_ac_driver = {
 };
 
 struct acpi_ac {
-#ifdef CONFIG_ACPI_SYSFS_POWER
        struct power_supply charger;
-#endif
        struct acpi_device * device;
        unsigned long long state;
 };
@@ -104,7 +100,6 @@ static const struct file_operations acpi_ac_fops = {
        .release = single_release,
 };
 #endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
 static int get_ac_property(struct power_supply *psy,
                           enum power_supply_property psp,
                           union power_supply_propval *val)
@@ -123,7 +118,6 @@ static int get_ac_property(struct power_supply *psy,
 static enum power_supply_property ac_props[] = {
        POWER_SUPPLY_PROP_ONLINE,
 };
-#endif
 /* --------------------------------------------------------------------------
                                AC Adapter Management
    -------------------------------------------------------------------------- */
@@ -247,9 +241,7 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
                                                  dev_name(&device->dev), event,
                                                  (u32) ac->state);
                acpi_notifier_call_chain(device, event, (u32) ac->state);
-#ifdef CONFIG_ACPI_SYSFS_POWER
                kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
-#endif
        }
 
        return;
@@ -282,14 +274,12 @@ static int acpi_ac_add(struct acpi_device *device)
 #endif
        if (result)
                goto end;
-#ifdef CONFIG_ACPI_SYSFS_POWER
        ac->charger.name = acpi_device_bid(device);
        ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
        ac->charger.properties = ac_props;
        ac->charger.num_properties = ARRAY_SIZE(ac_props);
        ac->charger.get_property = get_ac_property;
        power_supply_register(&ac->device->dev, &ac->charger);
-#endif
 
        printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
               acpi_device_name(device), acpi_device_bid(device),
@@ -316,10 +306,8 @@ static int acpi_ac_resume(struct acpi_device *device)
        old_state = ac->state;
        if (acpi_ac_get_state(ac))
                return 0;
-#ifdef CONFIG_ACPI_SYSFS_POWER
        if (old_state != ac->state)
                kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
-#endif
        return 0;
 }
 
@@ -333,10 +321,8 @@ static int acpi_ac_remove(struct acpi_device *device, int type)
 
        ac = acpi_driver_data(device);
 
-#ifdef CONFIG_ACPI_SYSFS_POWER
        if (ac->charger.dev)
                power_supply_unregister(&ac->charger);
-#endif
 #ifdef CONFIG_ACPI_PROCFS_POWER
        acpi_ac_remove_fs(device);
 #endif
index d93cc06f4bf81a356ceb33f6a279796d9464f7e6..a7e1d1aa4107c29d454e774ce912e60b7ace4ce5 100644 (file)
@@ -21,7 +21,7 @@ acpi-y += exconfig.o  exfield.o  exnames.o   exoparg6.o  exresolv.o  exstorob.o\
         excreate.o  exmisc.o   exoparg2.o  exregion.o  exstore.o   exutils.o \
         exdump.o    exmutex.o  exoparg3.o  exresnte.o  exstoren.o  exdebug.o
 
-acpi-y += hwacpi.o  hwgpe.o  hwregs.o  hwsleep.o hwxface.o hwvalid.o
+acpi-y += hwacpi.o  hwgpe.o  hwregs.o  hwsleep.o hwxface.o hwvalid.o hwpci.o
 
 acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
 
@@ -44,4 +44,5 @@ acpi-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
 
 acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
                utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
-               utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o
+               utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o \
+               utosi.o utxferror.o
index 48faf3eba9fb60cf0806d2516a69d1150b6883f0..72e9d5eb083cf09c3b9d0c55850f9c362b175fa0 100644 (file)
@@ -105,6 +105,8 @@ void acpi_db_set_method_data(char *type_arg, char *index_arg, char *value_arg);
 acpi_status
 acpi_db_display_objects(char *obj_type_arg, char *display_count_arg);
 
+void acpi_db_display_interfaces(char *action_arg, char *interface_name_arg);
+
 acpi_status acpi_db_find_name_in_namespace(char *name_arg);
 
 void acpi_db_set_scope(char *name);
index 36867cd70eacf3f9495325c71200bc688d705510..a6f99cc37a19947ece8c6ebb322687320e6d2853 100644 (file)
@@ -105,8 +105,9 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
                         struct acpi_gpe_block_info **return_gpe_block);
 
 acpi_status
-acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
-                            struct acpi_gpe_block_info *gpe_block);
+acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+                            struct acpi_gpe_block_info *gpe_block,
+                            void *ignored);
 
 acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block);
 
index 1d192142c69114ea65ecec63c98209190aa2eb83..ad88fcae4eb9859be0c4ecf8d2780ab42dbf621e 100644 (file)
@@ -132,6 +132,7 @@ struct acpi_table_fadt acpi_gbl_FADT;
 u32 acpi_current_gpe_count;
 u32 acpi_gbl_trace_flags;
 acpi_name acpi_gbl_trace_method_name;
+u8 acpi_gbl_system_awake_and_running;
 
 #endif
 
@@ -187,6 +188,10 @@ ACPI_EXTERN u8 acpi_gbl_integer_bit_width;
 ACPI_EXTERN u8 acpi_gbl_integer_byte_width;
 ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
 
+/* Mutex for _OSI support */
+
+ACPI_EXTERN acpi_mutex acpi_gbl_osi_mutex;
+
 /* Reader/Writer lock is used for namespace walk and dynamic table unload */
 
 ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
@@ -255,6 +260,7 @@ ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler;
 ACPI_EXTERN acpi_tbl_handler acpi_gbl_table_handler;
 ACPI_EXTERN void *acpi_gbl_table_handler_context;
 ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
+ACPI_EXTERN acpi_interface_handler acpi_gbl_interface_handler;
 
 /* Owner ID support */
 
@@ -273,8 +279,8 @@ ACPI_EXTERN u8 acpi_gbl_debugger_configuration;
 ACPI_EXTERN u8 acpi_gbl_step_to_next_call;
 ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
 ACPI_EXTERN u8 acpi_gbl_events_initialized;
-ACPI_EXTERN u8 acpi_gbl_system_awake_and_running;
 ACPI_EXTERN u8 acpi_gbl_osi_data;
+ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces;
 
 #ifndef DEFINE_ACPI_GLOBALS
 
@@ -364,6 +370,7 @@ ACPI_EXTERN struct acpi_fixed_event_handler
 ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
 ACPI_EXTERN struct acpi_gpe_block_info
 *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
+ACPI_EXTERN u8 acpi_all_gpes_initialized;
 
 /*****************************************************************************
  *
index 120b3af565968cf0b68d2af967cd20ff90c1ca40..167470ad2d21756874592a51617a499f10e84907 100644 (file)
@@ -121,6 +121,13 @@ acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
                                 struct acpi_gpe_block_info *gpe_block,
                                 void *context);
 
+/*
+ * hwpci - PCI configuration support
+ */
+acpi_status
+acpi_hw_derive_pci_id(struct acpi_pci_id *pci_id,
+                     acpi_handle root_pci_device, acpi_handle pci_region);
+
 #ifdef ACPI_FUTURE_USAGE
 /*
  * hwtimer - ACPI Timer prototypes
index 7dad9160f20998112cc302d0a239c9fa27fcd429..2ceb0c05b2d7d158a8f5489ac21fe445f4a9980f 100644 (file)
@@ -413,6 +413,7 @@ struct acpi_handler_info {
        void *context;          /* Context to be passed to handler */
        struct acpi_namespace_node *method_node;        /* Method node for this GPE level (saved) */
        u8 orig_flags;          /* Original misc info about this GPE */
+       u8 orig_enabled;        /* Set if the GPE was originally enabled */
 };
 
 union acpi_gpe_dispatch_info {
@@ -457,6 +458,7 @@ struct acpi_gpe_block_info {
        u32 register_count;     /* Number of register pairs in block */
        u16 gpe_count;          /* Number of individual GPEs in block */
        u8 block_base_number;   /* Base GPE number for this block */
+       u8 initialized;         /* If set, the GPE block has been initialized */
 };
 
 /* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */
@@ -473,7 +475,6 @@ struct acpi_gpe_walk_info {
        struct acpi_gpe_block_info *gpe_block;
        u16 count;
        acpi_owner_id owner_id;
-       u8 enable_this_gpe;
        u8 execute_by_owner_id;
 };
 
@@ -854,7 +855,7 @@ struct acpi_bit_register_info {
        ACPI_BITMASK_POWER_BUTTON_STATUS   | \
        ACPI_BITMASK_SLEEP_BUTTON_STATUS   | \
        ACPI_BITMASK_RT_CLOCK_STATUS       | \
-       ACPI_BITMASK_PCIEXP_WAKE_DISABLE   | \
+       ACPI_BITMASK_PCIEXP_WAKE_STATUS    | \
        ACPI_BITMASK_WAKE_STATUS)
 
 #define ACPI_BITMASK_TIMER_ENABLE               0x0001
@@ -909,15 +910,21 @@ struct acpi_bit_register_info {
 #define ACPI_OSI_WIN_VISTA              0x07
 #define ACPI_OSI_WINSRV_2008            0x08
 #define ACPI_OSI_WIN_VISTA_SP1          0x09
-#define ACPI_OSI_WIN_7                  0x0A
+#define ACPI_OSI_WIN_VISTA_SP2          0x0A
+#define ACPI_OSI_WIN_7                  0x0B
 
 #define ACPI_ALWAYS_ILLEGAL             0x00
 
 struct acpi_interface_info {
        char *name;
+       struct acpi_interface_info *next;
+       u8 flags;
        u8 value;
 };
 
+#define ACPI_OSI_INVALID                0x01
+#define ACPI_OSI_DYNAMIC                0x02
+
 struct acpi_port_info {
        char *name;
        u16 start;
@@ -997,7 +1004,7 @@ struct acpi_port_info {
 struct acpi_db_method_info {
        acpi_handle main_thread_gate;
        acpi_handle thread_complete_gate;
-       u32 *threads;
+       acpi_thread_id *threads;
        u32 num_threads;
        u32 num_created;
        u32 num_completed;
index 9894929a2abbc8320f31133f3f68a5f516cc3721..8d5c9e0a495f8ab231101e051f824f52e7d5480a 100644 (file)
  * the plist contains a set of parens to allow variable-length lists.
  * These macros are used for both the debug and non-debug versions of the code.
  */
-#define ACPI_ERROR_NAMESPACE(s, e)      acpi_ns_report_error (AE_INFO, s, e);
-#define ACPI_ERROR_METHOD(s, n, p, e)   acpi_ns_report_method_error (AE_INFO, s, n, p, e);
+#define ACPI_ERROR_NAMESPACE(s, e)      acpi_ut_namespace_error (AE_INFO, s, e);
+#define ACPI_ERROR_METHOD(s, n, p, e)   acpi_ut_method_error (AE_INFO, s, n, p, e);
 #define ACPI_WARN_PREDEFINED(plist)     acpi_ut_predefined_warning plist
 #define ACPI_INFO_PREDEFINED(plist)     acpi_ut_predefined_info plist
 
index 9f60ff002203b91907ed0fa2b30dc3634ec0bb66..d44d3bc5b847363959fcf64e8d1751916d001c83 100644 (file)
@@ -338,18 +338,6 @@ acpi_object_type acpi_ns_get_type(struct acpi_namespace_node *node);
 
 u32 acpi_ns_local(acpi_object_type type);
 
-void
-acpi_ns_report_error(const char *module_name,
-                    u32 line_number,
-                    const char *internal_name, acpi_status lookup_status);
-
-void
-acpi_ns_report_method_error(const char *module_name,
-                           u32 line_number,
-                           const char *message,
-                           struct acpi_namespace_node *node,
-                           const char *path, acpi_status lookup_status);
-
 void
 acpi_ns_print_node_pathname(struct acpi_namespace_node *node, const char *msg);
 
index 54857fa87aafa3d5c157a7dff812df0d77108c4c..bdbfaf22bd14e0492cd07c9c35eb6f781a9fd54b 100644 (file)
@@ -248,7 +248,7 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
        u32                             base_byte_offset;   /* Byte offset within containing object */\
        u32                             value;              /* Value to store into the Bank or Index register */\
        u8                              start_field_bit_offset;/* Bit offset within first field datum (0-63) */\
-       u8                              access_bit_width;       /* Read/Write size in bits (8-64) */
+
 
 struct acpi_object_field_common {      /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
        ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Parent Operation Region object (REGION/BANK fields only) */
index 35df755251ce0522907b4dd619f065e073c236e3..72e4183c193777add40ddff637eaa87806330296 100644 (file)
@@ -312,8 +312,6 @@ void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list);
 /*
  * uteval - object evaluation
  */
-acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state);
-
 acpi_status
 acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
                        char *path,
@@ -394,6 +392,21 @@ union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size);
 acpi_status
 acpi_ut_get_object_size(union acpi_operand_object *obj, acpi_size * obj_length);
 
+/*
+ * utosi - Support for the _OSI predefined control method
+ */
+acpi_status acpi_ut_initialize_interfaces(void);
+
+void acpi_ut_interface_terminate(void);
+
+acpi_status acpi_ut_install_interface(acpi_string interface_name);
+
+acpi_status acpi_ut_remove_interface(acpi_string interface_name);
+
+struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name);
+
+acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state);
+
 /*
  * utstate - Generic state creation/cache routines
  */
@@ -473,17 +486,6 @@ u8 acpi_ut_valid_acpi_char(char character, u32 position);
 
 acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer);
 
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_predefined_warning(const char *module_name,
-                          u32 line_number,
-                          char *pathname,
-                          u8 node_flags, const char *format, ...);
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_predefined_info(const char *module_name,
-                       u32 line_number,
-                       char *pathname, u8 node_flags, const char *format, ...);
-
 /* Values for Base above (16=Hex, 10=Decimal) */
 
 #define ACPI_ANY_BASE        0
@@ -574,6 +576,32 @@ acpi_status
 acpi_ut_create_list(char *list_name,
                    u16 object_size, struct acpi_memory_list **return_cache);
 
-#endif
+#endif                         /* ACPI_DBG_TRACK_ALLOCATIONS */
+
+/*
+ * utxferror - various error/warning output functions
+ */
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_predefined_warning(const char *module_name,
+                          u32 line_number,
+                          char *pathname,
+                          u8 node_flags, const char *format, ...);
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_predefined_info(const char *module_name,
+                       u32 line_number,
+                       char *pathname, u8 node_flags, const char *format, ...);
+
+void
+acpi_ut_namespace_error(const char *module_name,
+                       u32 line_number,
+                       const char *internal_name, acpi_status lookup_status);
+
+void
+acpi_ut_method_error(const char *module_name,
+                    u32 line_number,
+                    const char *message,
+                    struct acpi_namespace_node *node,
+                    const char *path, acpi_status lookup_status);
 
 #endif                         /* _ACUTILS_H */
index 64750ee96e2012047742d2e5e20acb89110c6bbb..d94dd8974b55c5fb03845a52f64ec430c70a6ec1 100644 (file)
@@ -573,7 +573,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
 
                                acpi_os_release_mutex(method_desc->method.
                                                      mutex->mutex.os_mutex);
-                               method_desc->method.mutex->mutex.thread_id = NULL;
+                               method_desc->method.mutex->mutex.thread_id = 0;
                        }
                }
 
index d555b374e314a881b463083c890563a658257f4d..6b0b5d08d97acf12d35016c96ce0eaaaa0126627 100644 (file)
@@ -300,10 +300,25 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
                         * we must enter this object into the namespace.  The created
                         * object is temporary and will be deleted upon completion of
                         * the execution of this method.
+                        *
+                        * Note 10/2010: Except for the Scope() op. This opcode does
+                        * not actually create a new object, it refers to an existing
+                        * object. However, for Scope(), we want to indeed open a
+                        * new scope.
                         */
-                       status = acpi_ds_load2_begin_op(walk_state, NULL);
+                       if (op->common.aml_opcode != AML_SCOPE_OP) {
+                               status =
+                                   acpi_ds_load2_begin_op(walk_state, NULL);
+                       } else {
+                               status =
+                                   acpi_ds_scope_stack_push(op->named.node,
+                                                            op->named.node->
+                                                            type, walk_state);
+                               if (ACPI_FAILURE(status)) {
+                                       return_ACPI_STATUS(status);
+                               }
+                       }
                }
-
                break;
 
        case AML_CLASS_EXECUTE:
index 303618889da0a73a65a5d8ff8c79600cc90e6254..c61c3039c31ac3b951203640c555bb807f81c4b0 100644 (file)
@@ -93,47 +93,6 @@ acpi_status acpi_ev_initialize_events(void)
        return_ACPI_STATUS(status);
 }
 
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_install_fadt_gpes
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Completes initialization of the FADT-defined GPE blocks
- *              (0 and 1). The HW must be fully initialized at this point,
- *              including global lock support.
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_install_fadt_gpes(void)
-{
-       acpi_status status;
-
-       ACPI_FUNCTION_TRACE(ev_install_fadt_gpes);
-
-       /* Namespace must be locked */
-
-       status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-       if (ACPI_FAILURE(status)) {
-               return (status);
-       }
-
-       /* FADT GPE Block 0 */
-
-       (void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
-                                          acpi_gbl_gpe_fadt_blocks[0]);
-
-       /* FADT GPE Block 1 */
-
-       (void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
-                                          acpi_gbl_gpe_fadt_blocks[1]);
-
-       (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-       return_ACPI_STATUS(AE_OK);
-}
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ev_install_xrupt_handlers
index 85445fb5844e503fe1a7f9282afafd3bb7cd4740..020add3eee1c65bfa5d7e03b3d1fee5a074558c2 100644 (file)
@@ -363,6 +363,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
        gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
        gpe_block->register_count = register_count;
        gpe_block->block_base_number = gpe_block_base_number;
+       gpe_block->initialized = FALSE;
 
        ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
                    sizeof(struct acpi_generic_address));
@@ -385,11 +386,12 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
                return_ACPI_STATUS(status);
        }
 
+       acpi_all_gpes_initialized = FALSE;
+
        /* Find all GPE methods (_Lxx or_Exx) for this block */
 
        walk_info.gpe_block = gpe_block;
        walk_info.gpe_device = gpe_device;
-       walk_info.enable_this_gpe = FALSE;
        walk_info.execute_by_owner_id = FALSE;
 
        status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
@@ -434,35 +436,34 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
  ******************************************************************************/
 
 acpi_status
-acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
-                            struct acpi_gpe_block_info *gpe_block)
+acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+                            struct acpi_gpe_block_info *gpe_block,
+                            void *ignored)
 {
        acpi_status status;
        struct acpi_gpe_event_info *gpe_event_info;
        u32 gpe_enabled_count;
        u32 gpe_index;
-       u32 gpe_number;
        u32 i;
        u32 j;
 
        ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
 
-       /* Ignore a null GPE block (e.g., if no GPE block 1 exists) */
-
-       if (!gpe_block) {
+       /*
+        * Ignore a null GPE block (e.g., if no GPE block 1 exists) and
+        * GPE blocks that have been initialized already.
+        */
+       if (!gpe_block || gpe_block->initialized) {
                return_ACPI_STATUS(AE_OK);
        }
 
        /*
-        * Enable all GPEs that have a corresponding method.  Any other GPEs
-        * within this block must be enabled via the acpi_enable_gpe interface.
+        * Enable all GPEs that have a corresponding method and have the
+        * ACPI_GPE_CAN_WAKE flag unset.  Any other GPEs within this block must
+        * be enabled via the acpi_enable_gpe() interface.
         */
        gpe_enabled_count = 0;
 
-       if (gpe_device == acpi_gbl_fadt_gpe_device) {
-               gpe_device = NULL;
-       }
-
        for (i = 0; i < gpe_block->register_count; i++) {
                for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
 
@@ -470,27 +471,19 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
 
                        gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
                        gpe_event_info = &gpe_block->event_info[gpe_index];
-                       gpe_number = gpe_index + gpe_block->block_base_number;
 
                        /* Ignore GPEs that have no corresponding _Lxx/_Exx method */
 
-                       if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) {
+                       if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)
+                           || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
                                continue;
                        }
 
-                       /*
-                        * If the GPE has already been enabled for runtime
-                        * signaling, make sure it remains enabled, but do not
-                        * increment its reference counter.
-                        */
-                       status = gpe_event_info->runtime_count ?
-                               acpi_ev_enable_gpe(gpe_event_info) :
-                               acpi_enable_gpe(gpe_device, gpe_number);
-
+                       status = acpi_raw_enable_gpe(gpe_event_info);
                        if (ACPI_FAILURE(status)) {
                                ACPI_EXCEPTION((AE_INFO, status,
-                                               "Could not enable GPE 0x%02X",
-                                               gpe_number));
+                                       "Could not enable GPE 0x%02X",
+                                       gpe_index + gpe_block->block_base_number));
                                continue;
                        }
 
@@ -504,5 +497,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
                                  gpe_enabled_count));
        }
 
+       gpe_block->initialized = TRUE;
+
        return_ACPI_STATUS(AE_OK);
 }
index 3084c5de1bba1b2a22c8ca2324a885ad6838b058..2c7def95f721c6d08596c9ce852a3ac4a1c54e38 100644 (file)
@@ -210,8 +210,7 @@ acpi_status acpi_ev_gpe_initialize(void)
  *
  * DESCRIPTION: Check for new GPE methods (_Lxx/_Exx) made available as a
  *              result of a Load() or load_table() operation. If new GPE
- *              methods have been installed, register the new methods and
- *              enable and runtime GPEs that are associated with them.
+ *              methods have been installed, register the new methods.
  *
  ******************************************************************************/
 
@@ -239,7 +238,6 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
        walk_info.owner_id = table_owner_id;
        walk_info.execute_by_owner_id = TRUE;
        walk_info.count = 0;
-       walk_info.enable_this_gpe = TRUE;
 
        /* Walk the interrupt level descriptor list */
 
@@ -301,8 +299,6 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
  *
  * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
  *    with that owner.
- * If walk_info->enable_this_gpe is TRUE, the GPE that is referred to by a GPE
- *    method is immediately enabled (Used for Load/load_table operators)
  *
  ******************************************************************************/
 
@@ -315,8 +311,6 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
        struct acpi_gpe_walk_info *walk_info =
            ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
        struct acpi_gpe_event_info *gpe_event_info;
-       struct acpi_namespace_node *gpe_device;
-       acpi_status status;
        u32 gpe_number;
        char name[ACPI_NAME_SIZE + 1];
        u8 type;
@@ -421,29 +415,6 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
        gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
        gpe_event_info->dispatch.method_node = method_node;
 
-       /*
-        * Enable this GPE if requested. This only happens when during the
-        * execution of a Load or load_table operator. We have found a new
-        * GPE method and want to immediately enable the GPE if it is a
-        * runtime GPE.
-        */
-       if (walk_info->enable_this_gpe) {
-
-               walk_info->count++;
-               gpe_device = walk_info->gpe_device;
-
-               if (gpe_device == acpi_gbl_fadt_gpe_device) {
-                       gpe_device = NULL;
-               }
-
-               status = acpi_enable_gpe(gpe_device, gpe_number);
-               if (ACPI_FAILURE(status)) {
-                       ACPI_EXCEPTION((AE_INFO, status,
-                                       "Could not enable GPE 0x%02X",
-                                       gpe_number));
-               }
-       }
-
        ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
                          "Registered GPE method %s as GPE number 0x%.2X\n",
                          name, gpe_number));
index df0aea9a8cfdce1c861dd2baa2e77b94e9660e3a..fcaed9fb44ff1957dee8864d3d4feba13ec3f8ba 100644 (file)
@@ -553,7 +553,7 @@ acpi_status acpi_ev_release_global_lock(void)
        acpi_gbl_global_lock_acquired = FALSE;
 
        /* Release the local GL mutex */
-       acpi_ev_global_lock_thread_id = NULL;
+       acpi_ev_global_lock_thread_id = 0;
        acpi_ev_global_lock_acquired = 0;
        acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
        return_ACPI_STATUS(status);
index f40d271bf5688fa6401f542d21c42228fa6268ea..0b47a6dc92902f5333d3fc668597992a2bee24a1 100644 (file)
@@ -289,8 +289,8 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
        }
 
        /*
-        * Get the PCI device and function numbers from the _ADR object contained
-        * in the parent's scope.
+        * Get the PCI device and function numbers from the _ADR object
+        * contained in the parent's scope.
         */
        status = acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR,
                                                 pci_device_node, &pci_value);
@@ -320,9 +320,15 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
                pci_id->bus = ACPI_LOWORD(pci_value);
        }
 
-       /* Complete this device's pci_id */
+       /* Complete/update the PCI ID for this device */
 
-       acpi_os_derive_pci_id(pci_root_node, region_obj->region.node, &pci_id);
+       status =
+           acpi_hw_derive_pci_id(pci_id, pci_root_node,
+                                 region_obj->region.node);
+       if (ACPI_FAILURE(status)) {
+               ACPI_FREE(pci_id);
+               return_ACPI_STATUS(status);
+       }
 
        *region_context = pci_id;
        return_ACPI_STATUS(AE_OK);
index 14e48add32fa0ede706a120b87d3a885e8ecbf0f..36af222cac654d3e2de0cb9c4ed7dc45fb4a0a49 100644 (file)
@@ -726,15 +726,16 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
                        (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
 
        /*
-        * If the GPE is associated with a method and it cannot wake up the
-        * system from sleep states, it was enabled automatically during
-        * initialization, so it has to be disabled now to avoid spurious
-        * execution of the handler.
+        * If the GPE is associated with a method, it might have been enabled
+        * automatically during initialization, in which case it has to be
+        * disabled now to avoid spurious execution of the handler.
         */
 
        if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD)
-           && !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE))
+           && gpe_event_info->runtime_count) {
+               handler->orig_enabled = 1;
                (void)acpi_raw_disable_gpe(gpe_event_info);
+       }
 
        /* Install the handler */
 
@@ -837,13 +838,13 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
        gpe_event_info->flags |= handler->orig_flags;
 
        /*
-        * If the GPE was previously associated with a method and it cannot wake
-        * up the system from sleep states, it should be enabled at this point
-        * to restore the post-initialization configuration.
+        * If the GPE was previously associated with a method and it was
+        * enabled, it should be enabled at this point to restore the
+        * post-initialization configuration.
         */
 
        if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD)
-           && !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE))
+           && handler->orig_enabled)
                (void)acpi_raw_enable_gpe(gpe_event_info);
 
        /* Now we can free the handler object */
index 304825528d4803d22e025b9fb6d2220b10851d0b..a1dabe3fd8ae771c7f8e676d907e4ad4cfba4ba3 100644 (file)
@@ -379,21 +379,12 @@ acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number)
        /* Ensure that we have a valid GPE number */
 
        gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-       if (!gpe_event_info) {
+       if (gpe_event_info) {
+               gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
+       } else {
                status = AE_BAD_PARAMETER;
-               goto unlock_and_exit;
-       }
-
-       if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
-               goto unlock_and_exit;
        }
 
-       gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
-       if (gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD) {
-               (void)acpi_raw_disable_gpe(gpe_event_info);
-       }
-
-unlock_and_exit:
        acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
        return_ACPI_STATUS(status);
 }
@@ -651,7 +642,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
                       struct acpi_generic_address *gpe_block_address,
                       u32 register_count, u32 interrupt_number)
 {
-       acpi_status status;
+       acpi_status status = AE_OK;
        union acpi_operand_object *obj_desc;
        struct acpi_namespace_node *node;
        struct acpi_gpe_block_info *gpe_block;
@@ -715,10 +706,6 @@ acpi_install_gpe_block(acpi_handle gpe_device,
 
        obj_desc->device.gpe_block = gpe_block;
 
-       /* Enable the runtime GPEs in the new block */
-
-       status = acpi_ev_initialize_gpe_block(node, gpe_block);
-
       unlock_and_exit:
        (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
        return_ACPI_STATUS(status);
@@ -924,3 +911,43 @@ acpi_status acpi_enable_all_runtime_gpes(void)
 
        return_ACPI_STATUS(status);
 }
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_update_gpes
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Enable all GPEs that have associated _Lxx or _Exx methods and
+ *              are not pointed to by any device _PRW methods indicating that
+ *              these GPEs are generally intended for system or device wakeup
+ *              (such GPEs have to be enabled directly when the devices whose
+ *              _PRW methods point to them are set up for wakeup signaling).
+ *
+ ******************************************************************************/
+
+acpi_status acpi_update_gpes(void)
+{
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(acpi_update_gpes);
+
+       status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       } else if (acpi_all_gpes_initialized) {
+               goto unlock;
+       }
+
+       status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL);
+       if (ACPI_SUCCESS(status)) {
+               acpi_all_gpes_initialized = TRUE;
+       }
+
+unlock:
+       (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+       return_ACPI_STATUS(status);
+}
index 541cbc1544d5d92a5afe113ddb083bc7481b1534..ce9314f79451ecf5a42e02486475a9bfeca54987 100644 (file)
@@ -64,6 +64,12 @@ ACPI_MODULE_NAME("evxfregn")
  *
  * DESCRIPTION: Install a handler for all op_regions of a given space_id.
  *
+ * NOTE: This function should only be called after acpi_enable_subsystem has
+ * been called. This is because any _REG methods associated with the Space ID
+ * are executed here, and these methods can only be safely executed after
+ * the default handlers have been installed and the hardware has been
+ * initialized (via acpi_enable_subsystem.)
+ *
  ******************************************************************************/
 acpi_status
 acpi_install_address_space_handler(acpi_handle device,
index 047217303a4b748bf091a9a88454f37fc86e9400..38293fd3e088bd008fd5f9855ff72b15766bdb35 100644 (file)
@@ -119,8 +119,8 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
        }
 
        /*
-        * Exit now for SMBus or IPMI address space, it has a non-linear address space
-        * and the request cannot be directly validated
+        * Exit now for SMBus or IPMI address space, it has a non-linear
+        * address space and the request cannot be directly validated
         */
        if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS ||
            rgn_desc->region.space_id == ACPI_ADR_SPACE_IPMI) {
@@ -147,8 +147,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
         * (Region length is specified in bytes)
         */
        if (rgn_desc->region.length <
-           (obj_desc->common_field.base_byte_offset +
-            field_datum_byte_offset +
+           (obj_desc->common_field.base_byte_offset + field_datum_byte_offset +
             obj_desc->common_field.access_byte_width)) {
                if (acpi_gbl_enable_interpreter_slack) {
                        /*
@@ -680,6 +679,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
        u32 buffer_tail_bits;
        u32 datum_count;
        u32 field_datum_count;
+       u32 access_bit_width;
        u32 i;
 
        ACPI_FUNCTION_TRACE(ex_extract_from_field);
@@ -694,16 +694,36 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
 
                return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
        }
+
        ACPI_MEMSET(buffer, 0, buffer_length);
+       access_bit_width = ACPI_MUL_8(obj_desc->common_field.access_byte_width);
+
+       /* Handle the simple case here */
+
+       if ((obj_desc->common_field.start_field_bit_offset == 0) &&
+           (obj_desc->common_field.bit_length == access_bit_width)) {
+               status = acpi_ex_field_datum_io(obj_desc, 0, buffer, ACPI_READ);
+               return_ACPI_STATUS(status);
+       }
+
+/* TBD: Move to common setup code */
+
+       /* Field algorithm is limited to sizeof(u64), truncate if needed */
+
+       if (obj_desc->common_field.access_byte_width > sizeof(u64)) {
+               obj_desc->common_field.access_byte_width = sizeof(u64);
+               access_bit_width = sizeof(u64) * 8;
+       }
 
        /* Compute the number of datums (access width data items) */
 
-       datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
-                                      obj_desc->common_field.access_bit_width);
+       datum_count =
+           ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
+                            access_bit_width);
+
        field_datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length +
                                             obj_desc->common_field.
                                             start_field_bit_offset,
-                                            obj_desc->common_field.
                                             access_bit_width);
 
        /* Priming read from the field */
@@ -738,12 +758,11 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
                 * This avoids the differences in behavior between different compilers
                 * concerning shift values larger than the target data width.
                 */
-               if ((obj_desc->common_field.access_bit_width -
-                    obj_desc->common_field.start_field_bit_offset) <
+               if (access_bit_width -
+                   obj_desc->common_field.start_field_bit_offset <
                    ACPI_INTEGER_BIT_SIZE) {
                        merged_datum |=
-                           raw_datum << (obj_desc->common_field.
-                                         access_bit_width -
+                           raw_datum << (access_bit_width -
                                          obj_desc->common_field.
                                          start_field_bit_offset);
                }
@@ -765,8 +784,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
 
        /* Mask off any extra bits in the last datum */
 
-       buffer_tail_bits = obj_desc->common_field.bit_length %
-           obj_desc->common_field.access_bit_width;
+       buffer_tail_bits = obj_desc->common_field.bit_length % access_bit_width;
        if (buffer_tail_bits) {
                merged_datum &= ACPI_MASK_BITS_ABOVE(buffer_tail_bits);
        }
@@ -798,6 +816,7 @@ acpi_status
 acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
                          void *buffer, u32 buffer_length)
 {
+       void *new_buffer;
        acpi_status status;
        u64 mask;
        u64 width_mask;
@@ -808,9 +827,9 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
        u32 buffer_tail_bits;
        u32 datum_count;
        u32 field_datum_count;
-       u32 i;
+       u32 access_bit_width;
        u32 required_length;
-       void *new_buffer;
+       u32 i;
 
        ACPI_FUNCTION_TRACE(ex_insert_into_field);
 
@@ -844,17 +863,24 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
                buffer_length = required_length;
        }
 
+/* TBD: Move to common setup code */
+
+       /* Algo is limited to sizeof(u64), so cut the access_byte_width */
+       if (obj_desc->common_field.access_byte_width > sizeof(u64)) {
+               obj_desc->common_field.access_byte_width = sizeof(u64);
+       }
+
+       access_bit_width = ACPI_MUL_8(obj_desc->common_field.access_byte_width);
+
        /*
         * Create the bitmasks used for bit insertion.
         * Note: This if/else is used to bypass compiler differences with the
         * shift operator
         */
-       if (obj_desc->common_field.access_bit_width == ACPI_INTEGER_BIT_SIZE) {
+       if (access_bit_width == ACPI_INTEGER_BIT_SIZE) {
                width_mask = ACPI_UINT64_MAX;
        } else {
-               width_mask =
-                   ACPI_MASK_BITS_ABOVE(obj_desc->common_field.
-                                        access_bit_width);
+               width_mask = ACPI_MASK_BITS_ABOVE(access_bit_width);
        }
 
        mask = width_mask &
@@ -863,12 +889,11 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
        /* Compute the number of datums (access width data items) */
 
        datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
-                                      obj_desc->common_field.access_bit_width);
+                                      access_bit_width);
 
        field_datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length +
                                             obj_desc->common_field.
                                             start_field_bit_offset,
-                                            obj_desc->common_field.
                                             access_bit_width);
 
        /* Get initial Datum from the input buffer */
@@ -905,12 +930,11 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
                 * This avoids the differences in behavior between different compilers
                 * concerning shift values larger than the target data width.
                 */
-               if ((obj_desc->common_field.access_bit_width -
+               if ((access_bit_width -
                     obj_desc->common_field.start_field_bit_offset) <
                    ACPI_INTEGER_BIT_SIZE) {
                        merged_datum =
-                           raw_datum >> (obj_desc->common_field.
-                                         access_bit_width -
+                           raw_datum >> (access_bit_width -
                                          obj_desc->common_field.
                                          start_field_bit_offset);
                } else {
@@ -929,6 +953,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
                ACPI_MEMCPY(&raw_datum, ((char *)buffer) + buffer_offset,
                            ACPI_MIN(obj_desc->common_field.access_byte_width,
                                     buffer_length - buffer_offset));
+
                merged_datum |=
                    raw_datum << obj_desc->common_field.start_field_bit_offset;
        }
@@ -937,7 +962,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
 
        buffer_tail_bits = (obj_desc->common_field.bit_length +
                            obj_desc->common_field.start_field_bit_offset) %
-           obj_desc->common_field.access_bit_width;
+           access_bit_width;
        if (buffer_tail_bits) {
                mask &= ACPI_MASK_BITS_ABOVE(buffer_tail_bits);
        }
index f73be97043c0e564b8cab1a56dca2f00697ce807..6af14e43f8394be3f6657994eb42eda9e92fd067 100644 (file)
@@ -336,7 +336,7 @@ acpi_status acpi_ex_release_mutex_object(union acpi_operand_object *obj_desc)
 
        /* Clear mutex info */
 
-       obj_desc->mutex.thread_id = NULL;
+       obj_desc->mutex.thread_id = 0;
        return_ACPI_STATUS(status);
 }
 
@@ -393,10 +393,10 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
        if ((owner_thread->thread_id != walk_state->thread->thread_id) &&
            (obj_desc != acpi_gbl_global_lock_mutex)) {
                ACPI_ERROR((AE_INFO,
-                           "Thread %p cannot release Mutex [%4.4s] acquired by thread %p",
-                           ACPI_CAST_PTR(void, walk_state->thread->thread_id),
+                           "Thread %u cannot release Mutex [%4.4s] acquired by thread %u",
+                           (u32)walk_state->thread->thread_id,
                            acpi_ut_get_node_name(obj_desc->mutex.node),
-                           ACPI_CAST_PTR(void, owner_thread->thread_id)));
+                           (u32)owner_thread->thread_id));
                return_ACPI_STATUS(AE_AML_NOT_OWNER);
        }
 
@@ -488,7 +488,7 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
                /* Mark mutex unowned */
 
                obj_desc->mutex.owner_thread = NULL;
-               obj_desc->mutex.thread_id = NULL;
+               obj_desc->mutex.thread_id = 0;
 
                /* Update Thread sync_level (Last mutex is the important one) */
 
index 98a331d2249b97394e2e0f006c615bdc08d33010..7aae29f73d3f2dfbc5438ff874e05a7ff7855f5d 100644 (file)
@@ -355,12 +355,10 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
                return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
        }
 
-       /* Setup width (access granularity) fields */
+       /* Setup width (access granularity) fields (values are: 1, 2, 4, 8) */
 
        obj_desc->common_field.access_byte_width = (u8)
-           ACPI_DIV_8(access_bit_width);       /* 1,  2,  4,  8 */
-
-       obj_desc->common_field.access_bit_width = (u8) access_bit_width;
+           ACPI_DIV_8(access_bit_width);
 
        /*
         * base_byte_offset is the address of the start of the field within the
@@ -405,8 +403,9 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
 {
        union acpi_operand_object *obj_desc;
        union acpi_operand_object *second_desc = NULL;
-       u32 type;
        acpi_status status;
+       u32 access_byte_width;
+       u32 type;
 
        ACPI_FUNCTION_TRACE(ex_prep_field_value);
 
@@ -421,8 +420,8 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
                type = acpi_ns_get_type(info->region_node);
                if (type != ACPI_TYPE_REGION) {
                        ACPI_ERROR((AE_INFO,
-                                   "Needed Region, found type 0x%X (%s)",
-                                   type, acpi_ut_get_type_name(type)));
+                                   "Needed Region, found type 0x%X (%s)", type,
+                                   acpi_ut_get_type_name(type)));
 
                        return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
                }
@@ -438,7 +437,8 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
        /* Initialize areas of the object that are common to all fields */
 
        obj_desc->common_field.node = info->field_node;
-       status = acpi_ex_prep_common_field_object(obj_desc, info->field_flags,
+       status = acpi_ex_prep_common_field_object(obj_desc,
+                                                 info->field_flags,
                                                  info->attribute,
                                                  info->field_bit_position,
                                                  info->field_bit_length);
@@ -455,26 +455,25 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
                obj_desc->field.region_obj =
                    acpi_ns_get_attached_object(info->region_node);
 
-               /* An additional reference for the container */
+               /* Allow full data read from EC address space */
 
-               acpi_ut_add_reference(obj_desc->field.region_obj);
+               if ((obj_desc->field.region_obj->region.space_id ==
+                    ACPI_ADR_SPACE_EC)
+                   && (obj_desc->common_field.bit_length > 8)) {
+                       access_byte_width =
+                           ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.
+                                                       bit_length);
+
+                       /* Maximum byte width supported is 255 */
 
-               /* allow full data read from EC address space */
-               if (obj_desc->field.region_obj->region.space_id ==
-                       ACPI_ADR_SPACE_EC) {
-                       if (obj_desc->common_field.bit_length > 8) {
-                               unsigned width =
-                                       ACPI_ROUND_BITS_UP_TO_BYTES(
-                                       obj_desc->common_field.bit_length);
-                               // access_bit_width is u8, don't overflow it
-                               if (width > 8)
-                                       width = 8;
+                       if (access_byte_width < 256) {
                                obj_desc->common_field.access_byte_width =
-                                                       width;
-                               obj_desc->common_field.access_bit_width =
-                                                       8 * width;
+                                   (u8)access_byte_width;
                        }
                }
+               /* An additional reference for the container */
+
+               acpi_ut_add_reference(obj_desc->field.region_obj);
 
                ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
                                  "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
index 8819d2ac5aee73b0eb8a4bf205b9c1efa7abb5ab..de17e10da0ed9c5593e3e1d1af8bfd0d9dd81ee4 100644 (file)
@@ -353,7 +353,6 @@ acpi_ex_pci_config_space_handler(u32 function,
        acpi_status status = AE_OK;
        struct acpi_pci_id *pci_id;
        u16 pci_register;
-       u32 value32;
 
        ACPI_FUNCTION_TRACE(ex_pci_config_space_handler);
 
@@ -381,8 +380,7 @@ acpi_ex_pci_config_space_handler(u32 function,
        case ACPI_READ:
 
                status = acpi_os_read_pci_configuration(pci_id, pci_register,
-                                                       &value32, bit_width);
-               *value = value32;
+                                                       value, bit_width);
                break;
 
        case ACPI_WRITE:
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
new file mode 100644 (file)
index 0000000..ad21c7d
--- /dev/null
@@ -0,0 +1,412 @@
+/*******************************************************************************
+ *
+ * Module Name: hwpci - Obtain PCI bus, device, and function numbers
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT          ACPI_NAMESPACE
+ACPI_MODULE_NAME("hwpci")
+
+/* PCI configuration space values */
+#define PCI_CFG_HEADER_TYPE_REG             0x0E
+#define PCI_CFG_PRIMARY_BUS_NUMBER_REG      0x18
+#define PCI_CFG_SECONDARY_BUS_NUMBER_REG    0x19
+/* PCI header values */
+#define PCI_HEADER_TYPE_MASK                0x7F
+#define PCI_TYPE_BRIDGE                     0x01
+#define PCI_TYPE_CARDBUS_BRIDGE             0x02
+typedef struct acpi_pci_device {
+       acpi_handle device;
+       struct acpi_pci_device *next;
+
+} acpi_pci_device;
+
+/* Local prototypes */
+
+static acpi_status
+acpi_hw_build_pci_list(acpi_handle root_pci_device,
+                      acpi_handle pci_region,
+                      struct acpi_pci_device **return_list_head);
+
+static acpi_status
+acpi_hw_process_pci_list(struct acpi_pci_id *pci_id,
+                        struct acpi_pci_device *list_head);
+
+static void acpi_hw_delete_pci_list(struct acpi_pci_device *list_head);
+
+static acpi_status
+acpi_hw_get_pci_device_info(struct acpi_pci_id *pci_id,
+                           acpi_handle pci_device,
+                           u16 *bus_number, u8 *is_bridge);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_derive_pci_id
+ *
+ * PARAMETERS:  pci_id              - Initial values for the PCI ID. May be
+ *                                    modified by this function.
+ *              root_pci_device     - A handle to a PCI device object. This
+ *                                    object must be a PCI Root Bridge having a
+ *                                    _HID value of either PNP0A03 or PNP0A08
+ *              pci_region          - A handle to a PCI configuration space
+ *                                    Operation Region being initialized
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function derives a full PCI ID for a PCI device,
+ *              consisting of a Segment number, Bus number, Device number,
+ *              and function code.
+ *
+ *              The PCI hardware dynamically configures PCI bus numbers
+ *              depending on the bus topology discovered during system
+ *              initialization. This function is invoked during configuration
+ *              of a PCI_Config Operation Region in order to (possibly) update
+ *              the Bus/Device/Function numbers in the pci_id with the actual
+ *              values as determined by the hardware and operating system
+ *              configuration.
+ *
+ *              The pci_id parameter is initially populated during the Operation
+ *              Region initialization. This function is then called, and is
+ *              will make any necessary modifications to the Bus, Device, or
+ *              Function number PCI ID subfields as appropriate for the
+ *              current hardware and OS configuration.
+ *
+ * NOTE:        Created 08/2010. Replaces the previous OSL acpi_os_derive_pci_id
+ *              interface since this feature is OS-independent. This module
+ *              specifically avoids any use of recursion by building a local
+ *              temporary device list.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_hw_derive_pci_id(struct acpi_pci_id *pci_id,
+                     acpi_handle root_pci_device, acpi_handle pci_region)
+{
+       acpi_status status;
+       struct acpi_pci_device *list_head = NULL;
+
+       ACPI_FUNCTION_TRACE(hw_derive_pci_id);
+
+       if (!pci_id) {
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
+       }
+
+       /* Build a list of PCI devices, from pci_region up to root_pci_device */
+
+       status =
+           acpi_hw_build_pci_list(root_pci_device, pci_region, &list_head);
+       if (ACPI_SUCCESS(status)) {
+
+               /* Walk the list, updating the PCI device/function/bus numbers */
+
+               status = acpi_hw_process_pci_list(pci_id, list_head);
+       }
+
+       /* Always delete the list */
+
+       acpi_hw_delete_pci_list(list_head);
+       return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_build_pci_list
+ *
+ * PARAMETERS:  root_pci_device     - A handle to a PCI device object. This
+ *                                    object is guaranteed to be a PCI Root
+ *                                    Bridge having a _HID value of either
+ *                                    PNP0A03 or PNP0A08
+ *              pci_region          - A handle to the PCI configuration space
+ *                                    Operation Region
+ *              return_list_head    - Where the PCI device list is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Builds a list of devices from the input PCI region up to the
+ *              Root PCI device for this namespace subtree.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_hw_build_pci_list(acpi_handle root_pci_device,
+                      acpi_handle pci_region,
+                      struct acpi_pci_device **return_list_head)
+{
+       acpi_handle current_device;
+       acpi_handle parent_device;
+       acpi_status status;
+       struct acpi_pci_device *list_element;
+       struct acpi_pci_device *list_head = NULL;
+
+       /*
+        * Ascend namespace branch until the root_pci_device is reached, building
+        * a list of device nodes. Loop will exit when either the PCI device is
+        * found, or the root of the namespace is reached.
+        */
+       current_device = pci_region;
+       while (1) {
+               status = acpi_get_parent(current_device, &parent_device);
+               if (ACPI_FAILURE(status)) {
+                       return (status);
+               }
+
+               /* Finished when we reach the PCI root device (PNP0A03 or PNP0A08) */
+
+               if (parent_device == root_pci_device) {
+                       *return_list_head = list_head;
+                       return (AE_OK);
+               }
+
+               list_element = ACPI_ALLOCATE(sizeof(struct acpi_pci_device));
+               if (!list_element) {
+                       return (AE_NO_MEMORY);
+               }
+
+               /* Put new element at the head of the list */
+
+               list_element->next = list_head;
+               list_element->device = parent_device;
+               list_head = list_element;
+
+               current_device = parent_device;
+       }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_process_pci_list
+ *
+ * PARAMETERS:  pci_id              - Initial values for the PCI ID. May be
+ *                                    modified by this function.
+ *              list_head           - Device list created by
+ *                                    acpi_hw_build_pci_list
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Walk downward through the PCI device list, getting the device
+ *              info for each, via the PCI configuration space and updating
+ *              the PCI ID as necessary. Deletes the list during traversal.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_hw_process_pci_list(struct acpi_pci_id *pci_id,
+                        struct acpi_pci_device *list_head)
+{
+       acpi_status status = AE_OK;
+       struct acpi_pci_device *info;
+       u16 bus_number;
+       u8 is_bridge = TRUE;
+
+       ACPI_FUNCTION_NAME(hw_process_pci_list);
+
+       ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+                         "Input PciId:  Seg %4.4X Bus %4.4X Dev %4.4X Func %4.4X\n",
+                         pci_id->segment, pci_id->bus, pci_id->device,
+                         pci_id->function));
+
+       bus_number = pci_id->bus;
+
+       /*
+        * Descend down the namespace tree, collecting PCI device, function,
+        * and bus numbers. bus_number is only important for PCI bridges.
+        * Algorithm: As we descend the tree, use the last valid PCI device,
+        * function, and bus numbers that are discovered, and assign them
+        * to the PCI ID for the target device.
+        */
+       info = list_head;
+       while (info) {
+               status = acpi_hw_get_pci_device_info(pci_id, info->device,
+                                                    &bus_number, &is_bridge);
+               if (ACPI_FAILURE(status)) {
+                       return_ACPI_STATUS(status);
+               }
+
+               info = info->next;
+       }
+
+       ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+                         "Output PciId: Seg %4.4X Bus %4.4X Dev %4.4X Func %4.4X "
+                         "Status %X BusNumber %X IsBridge %X\n",
+                         pci_id->segment, pci_id->bus, pci_id->device,
+                         pci_id->function, status, bus_number, is_bridge));
+
+       return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_delete_pci_list
+ *
+ * PARAMETERS:  list_head           - Device list created by
+ *                                    acpi_hw_build_pci_list
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Free the entire PCI list.
+ *
+ ******************************************************************************/
+
+static void acpi_hw_delete_pci_list(struct acpi_pci_device *list_head)
+{
+       struct acpi_pci_device *next;
+       struct acpi_pci_device *previous;
+
+       next = list_head;
+       while (next) {
+               previous = next;
+               next = previous->next;
+               ACPI_FREE(previous);
+       }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_get_pci_device_info
+ *
+ * PARAMETERS:  pci_id              - Initial values for the PCI ID. May be
+ *                                    modified by this function.
+ *              pci_device          - Handle for the PCI device object
+ *              bus_number          - Where a PCI bridge bus number is returned
+ *              is_bridge           - Return value, indicates if this PCI
+ *                                    device is a PCI bridge
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Get the device info for a single PCI device object. Get the
+ *              _ADR (contains PCI device and function numbers), and for PCI
+ *              bridge devices, get the bus number from PCI configuration
+ *              space.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_hw_get_pci_device_info(struct acpi_pci_id *pci_id,
+                           acpi_handle pci_device,
+                           u16 *bus_number, u8 *is_bridge)
+{
+       acpi_status status;
+       acpi_object_type object_type;
+       u64 return_value;
+       u64 pci_value;
+
+       /* We only care about objects of type Device */
+
+       status = acpi_get_type(pci_device, &object_type);
+       if (ACPI_FAILURE(status)) {
+               return (status);
+       }
+
+       if (object_type != ACPI_TYPE_DEVICE) {
+               return (AE_OK);
+       }
+
+       /* We need an _ADR. Ignore device if not present */
+
+       status = acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR,
+                                                pci_device, &return_value);
+       if (ACPI_FAILURE(status)) {
+               return (AE_OK);
+       }
+
+       /*
+        * From _ADR, get the PCI Device and Function and
+        * update the PCI ID.
+        */
+       pci_id->device = ACPI_HIWORD(ACPI_LODWORD(return_value));
+       pci_id->function = ACPI_LOWORD(ACPI_LODWORD(return_value));
+
+       /*
+        * If the previous device was a bridge, use the previous
+        * device bus number
+        */
+       if (*is_bridge) {
+               pci_id->bus = *bus_number;
+       }
+
+       /*
+        * Get the bus numbers from PCI Config space:
+        *
+        * First, get the PCI header_type
+        */
+       *is_bridge = FALSE;
+       status = acpi_os_read_pci_configuration(pci_id,
+                                               PCI_CFG_HEADER_TYPE_REG,
+                                               &pci_value, 8);
+       if (ACPI_FAILURE(status)) {
+               return (status);
+       }
+
+       /* We only care about bridges (1=pci_bridge, 2=card_bus_bridge) */
+
+       pci_value &= PCI_HEADER_TYPE_MASK;
+
+       if ((pci_value != PCI_TYPE_BRIDGE) &&
+           (pci_value != PCI_TYPE_CARDBUS_BRIDGE)) {
+               return (AE_OK);
+       }
+
+       /* Bridge: Get the Primary bus_number */
+
+       status = acpi_os_read_pci_configuration(pci_id,
+                                               PCI_CFG_PRIMARY_BUS_NUMBER_REG,
+                                               &pci_value, 8);
+       if (ACPI_FAILURE(status)) {
+               return (status);
+       }
+
+       *is_bridge = TRUE;
+       pci_id->bus = (u16)pci_value;
+
+       /* Bridge: Get the Secondary bus_number */
+
+       status = acpi_os_read_pci_configuration(pci_id,
+                                               PCI_CFG_SECONDARY_BUS_NUMBER_REG,
+                                               &pci_value, 8);
+       if (ACPI_FAILURE(status)) {
+               return (status);
+       }
+
+       *bus_number = (u16)pci_value;
+       return (AE_OK);
+}
index 4009498fbabd038520fdb47756cd263386f0b980..4ef9f43ea9262273ed06686eae439b9675f5d10d 100644 (file)
@@ -73,10 +73,18 @@ static acpi_status
 acpi_ns_repair_ALR(struct acpi_predefined_data *data,
                   union acpi_operand_object **return_object_ptr);
 
+static acpi_status
+acpi_ns_repair_CID(struct acpi_predefined_data *data,
+                  union acpi_operand_object **return_object_ptr);
+
 static acpi_status
 acpi_ns_repair_FDE(struct acpi_predefined_data *data,
                   union acpi_operand_object **return_object_ptr);
 
+static acpi_status
+acpi_ns_repair_HID(struct acpi_predefined_data *data,
+                  union acpi_operand_object **return_object_ptr);
+
 static acpi_status
 acpi_ns_repair_PSS(struct acpi_predefined_data *data,
                   union acpi_operand_object **return_object_ptr);
@@ -108,8 +116,10 @@ acpi_ns_sort_list(union acpi_operand_object **elements,
  * As necessary:
  *
  * _ALR: Sort the list ascending by ambient_illuminance
+ * _CID: Strings: uppercase all, remove any leading asterisk
  * _FDE: Convert Buffer of BYTEs to a Buffer of DWORDs
  * _GTM: Convert Buffer of BYTEs to a Buffer of DWORDs
+ * _HID: Strings: uppercase all, remove any leading asterisk
  * _PSS: Sort the list descending by Power
  * _TSS: Sort the list descending by Power
  *
@@ -122,8 +132,10 @@ acpi_ns_sort_list(union acpi_operand_object **elements,
  */
 static const struct acpi_repair_info acpi_ns_repairable_names[] = {
        {"_ALR", acpi_ns_repair_ALR},
+       {"_CID", acpi_ns_repair_CID},
        {"_FDE", acpi_ns_repair_FDE},
        {"_GTM", acpi_ns_repair_FDE},   /* _GTM has same repair as _FDE */
+       {"_HID", acpi_ns_repair_HID},
        {"_PSS", acpi_ns_repair_PSS},
        {"_TSS", acpi_ns_repair_TSS},
        {{0, 0, 0, 0}, NULL}    /* Table terminator */
@@ -319,6 +331,157 @@ acpi_ns_repair_FDE(struct acpi_predefined_data *data,
        return (AE_OK);
 }
 
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_repair_CID
+ *
+ * PARAMETERS:  Data                - Pointer to validation data structure
+ *              return_object_ptr   - Pointer to the object returned from the
+ *                                    evaluation of a method or object
+ *
+ * RETURN:      Status. AE_OK if object is OK or was repaired successfully
+ *
+ * DESCRIPTION: Repair for the _CID object. If a string, ensure that all
+ *              letters are uppercase and that there is no leading asterisk.
+ *              If a Package, ensure same for all string elements.
+ *
+ *****************************************************************************/
+
+static acpi_status
+acpi_ns_repair_CID(struct acpi_predefined_data *data,
+                  union acpi_operand_object **return_object_ptr)
+{
+       acpi_status status;
+       union acpi_operand_object *return_object = *return_object_ptr;
+       union acpi_operand_object **element_ptr;
+       union acpi_operand_object *original_element;
+       u16 original_ref_count;
+       u32 i;
+
+       /* Check for _CID as a simple string */
+
+       if (return_object->common.type == ACPI_TYPE_STRING) {
+               status = acpi_ns_repair_HID(data, return_object_ptr);
+               return (status);
+       }
+
+       /* Exit if not a Package */
+
+       if (return_object->common.type != ACPI_TYPE_PACKAGE) {
+               return (AE_OK);
+       }
+
+       /* Examine each element of the _CID package */
+
+       element_ptr = return_object->package.elements;
+       for (i = 0; i < return_object->package.count; i++) {
+               original_element = *element_ptr;
+               original_ref_count = original_element->common.reference_count;
+
+               status = acpi_ns_repair_HID(data, element_ptr);
+               if (ACPI_FAILURE(status)) {
+                       return (status);
+               }
+
+               /* Take care with reference counts */
+
+               if (original_element != *element_ptr) {
+
+                       /* Element was replaced */
+
+                       (*element_ptr)->common.reference_count =
+                           original_ref_count;
+
+                       acpi_ut_remove_reference(original_element);
+               }
+
+               element_ptr++;
+       }
+
+       return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_repair_HID
+ *
+ * PARAMETERS:  Data                - Pointer to validation data structure
+ *              return_object_ptr   - Pointer to the object returned from the
+ *                                    evaluation of a method or object
+ *
+ * RETURN:      Status. AE_OK if object is OK or was repaired successfully
+ *
+ * DESCRIPTION: Repair for the _HID object. If a string, ensure that all
+ *              letters are uppercase and that there is no leading asterisk.
+ *
+ *****************************************************************************/
+
+static acpi_status
+acpi_ns_repair_HID(struct acpi_predefined_data *data,
+                  union acpi_operand_object **return_object_ptr)
+{
+       union acpi_operand_object *return_object = *return_object_ptr;
+       union acpi_operand_object *new_string;
+       char *source;
+       char *dest;
+
+       ACPI_FUNCTION_NAME(ns_repair_HID);
+
+       /* We only care about string _HID objects (not integers) */
+
+       if (return_object->common.type != ACPI_TYPE_STRING) {
+               return (AE_OK);
+       }
+
+       if (return_object->string.length == 0) {
+               ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
+                                     "Invalid zero-length _HID or _CID string"));
+
+               /* Return AE_OK anyway, let driver handle it */
+
+               data->flags |= ACPI_OBJECT_REPAIRED;
+               return (AE_OK);
+       }
+
+       /* It is simplest to always create a new string object */
+
+       new_string = acpi_ut_create_string_object(return_object->string.length);
+       if (!new_string) {
+               return (AE_NO_MEMORY);
+       }
+
+       /*
+        * Remove a leading asterisk if present. For some unknown reason, there
+        * are many machines in the field that contains IDs like this.
+        *
+        * Examples: "*PNP0C03", "*ACPI0003"
+        */
+       source = return_object->string.pointer;
+       if (*source == '*') {
+               source++;
+               new_string->string.length--;
+
+               ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+                                 "%s: Removed invalid leading asterisk\n",
+                                 data->pathname));
+       }
+
+       /*
+        * Copy and uppercase the string. From the ACPI specification:
+        *
+        * A valid PNP ID must be of the form "AAA####" where A is an uppercase
+        * letter and # is a hex digit. A valid ACPI ID must be of the form
+        * "ACPI####" where # is a hex digit.
+        */
+       for (dest = new_string->string.pointer; *source; dest++, source++) {
+               *dest = (char)ACPI_TOUPPER(*source);
+       }
+
+       acpi_ut_remove_reference(return_object);
+       *return_object_ptr = new_string;
+       return (AE_OK);
+}
+
 /******************************************************************************
  *
  * FUNCTION:    acpi_ns_repair_TSS
index e1add3491b04a7def5879b00c4f1e6ab1e6b5353..a7d6ad9c111b6b71fe52f4d5870e0b620efaf356 100644 (file)
@@ -58,104 +58,6 @@ static u8 acpi_ns_valid_path_separator(char sep);
 acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search);
 #endif
 
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_report_error
- *
- * PARAMETERS:  module_name         - Caller's module name (for error output)
- *              line_number         - Caller's line number (for error output)
- *              internal_name       - Name or path of the namespace node
- *              lookup_status       - Exception code from NS lookup
- *
- * RETURN:      None
- *
- * DESCRIPTION: Print warning message with full pathname
- *
- ******************************************************************************/
-
-void
-acpi_ns_report_error(const char *module_name,
-                    u32 line_number,
-                    const char *internal_name, acpi_status lookup_status)
-{
-       acpi_status status;
-       u32 bad_name;
-       char *name = NULL;
-
-       acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
-
-       if (lookup_status == AE_BAD_CHARACTER) {
-
-               /* There is a non-ascii character in the name */
-
-               ACPI_MOVE_32_TO_32(&bad_name,
-                                  ACPI_CAST_PTR(u32, internal_name));
-               acpi_os_printf("[0x%4.4X] (NON-ASCII)", bad_name);
-       } else {
-               /* Convert path to external format */
-
-               status = acpi_ns_externalize_name(ACPI_UINT32_MAX,
-                                                 internal_name, NULL, &name);
-
-               /* Print target name */
-
-               if (ACPI_SUCCESS(status)) {
-                       acpi_os_printf("[%s]", name);
-               } else {
-                       acpi_os_printf("[COULD NOT EXTERNALIZE NAME]");
-               }
-
-               if (name) {
-                       ACPI_FREE(name);
-               }
-       }
-
-       acpi_os_printf(" Namespace lookup failure, %s\n",
-                      acpi_format_exception(lookup_status));
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_report_method_error
- *
- * PARAMETERS:  module_name         - Caller's module name (for error output)
- *              line_number         - Caller's line number (for error output)
- *              Message             - Error message to use on failure
- *              prefix_node         - Prefix relative to the path
- *              Path                - Path to the node (optional)
- *              method_status       - Execution status
- *
- * RETURN:      None
- *
- * DESCRIPTION: Print warning message with full pathname
- *
- ******************************************************************************/
-
-void
-acpi_ns_report_method_error(const char *module_name,
-                           u32 line_number,
-                           const char *message,
-                           struct acpi_namespace_node *prefix_node,
-                           const char *path, acpi_status method_status)
-{
-       acpi_status status;
-       struct acpi_namespace_node *node = prefix_node;
-
-       acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
-
-       if (path) {
-               status =
-                   acpi_ns_get_node(prefix_node, path, ACPI_NS_NO_UPSEARCH,
-                                    &node);
-               if (ACPI_FAILURE(status)) {
-                       acpi_os_printf("[Could not get node by pathname]");
-               }
-       }
-
-       acpi_ns_print_node_pathname(node, message);
-       acpi_os_printf(", %s\n", acpi_format_exception(method_status));
-}
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ns_print_node_pathname
index 1728cb9bf600f020609a4bb2491f61508fd694c9..d2ff4325c4270a6c34e063bccfdf020175ae72b6 100644 (file)
@@ -49,7 +49,7 @@
 ACPI_MODULE_NAME("tbfadt")
 
 /* Local prototypes */
-static inline void
+static ACPI_INLINE void
 acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
                             u8 space_id, u8 byte_width, u64 address);
 
@@ -181,7 +181,7 @@ static struct acpi_fadt_pm_info fadt_pm_info_table[] = {
  *
  ******************************************************************************/
 
-static inline void
+static ACPI_INLINE void
 acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
                             u8 space_id, u8 byte_width, u64 address)
 {
index 983510640059a5bf24feb6e9d85620e918223ee3..f21c486929a54a96fcf8bbcf5a6d34049b00bbf5 100644 (file)
@@ -179,9 +179,8 @@ acpi_debug_print(u32 requested_debug_level,
        if (thread_id != acpi_gbl_prev_thread_id) {
                if (ACPI_LV_THREADS & acpi_dbg_level) {
                        acpi_os_printf
-                           ("\n**** Context Switch from TID %p to TID %p ****\n\n",
-                            ACPI_CAST_PTR(void, acpi_gbl_prev_thread_id),
-                            ACPI_CAST_PTR(void, thread_id));
+                           ("\n**** Context Switch from TID %u to TID %u ****\n\n",
+                            (u32)acpi_gbl_prev_thread_id, (u32)thread_id);
                }
 
                acpi_gbl_prev_thread_id = thread_id;
@@ -194,7 +193,7 @@ acpi_debug_print(u32 requested_debug_level,
        acpi_os_printf("%8s-%04ld ", module_name, line_number);
 
        if (ACPI_LV_THREADS & acpi_dbg_level) {
-               acpi_os_printf("[%p] ", ACPI_CAST_PTR(void, thread_id));
+               acpi_os_printf("[%u] ", (u32)thread_id);
        }
 
        acpi_os_printf("[%02ld] %-22.22s: ",
index 6dfdeb65349058c3fda58ec860236a90ac505665..22f59ef604e06b840f95c0d16ffa048614091071 100644 (file)
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("uteval")
 
-/*
- * Strings supported by the _OSI predefined (internal) method.
- *
- * March 2009: Removed "Linux" as this host no longer wants to respond true
- * for this string. Basically, the only safe OS strings are windows-related
- * and in many or most cases represent the only test path within the
- * BIOS-provided ASL code.
- *
- * The second element of each entry is used to track the newest version of
- * Windows that the BIOS has requested.
- */
-static struct acpi_interface_info acpi_interfaces_supported[] = {
-       /* Operating System Vendor Strings */
-
-       {"Windows 2000", ACPI_OSI_WIN_2000},    /* Windows 2000 */
-       {"Windows 2001", ACPI_OSI_WIN_XP},      /* Windows XP */
-       {"Windows 2001 SP1", ACPI_OSI_WIN_XP_SP1},      /* Windows XP SP1 */
-       {"Windows 2001.1", ACPI_OSI_WINSRV_2003},       /* Windows Server 2003 */
-       {"Windows 2001 SP2", ACPI_OSI_WIN_XP_SP2},      /* Windows XP SP2 */
-       {"Windows 2001.1 SP1", ACPI_OSI_WINSRV_2003_SP1},       /* Windows Server 2003 SP1 - Added 03/2006 */
-       {"Windows 2006", ACPI_OSI_WIN_VISTA},   /* Windows Vista - Added 03/2006 */
-       {"Windows 2006.1", ACPI_OSI_WINSRV_2008},       /* Windows Server 2008 - Added 09/2009 */
-       {"Windows 2006 SP1", ACPI_OSI_WIN_VISTA_SP1},   /* Windows Vista SP1 - Added 09/2009 */
-       {"Windows 2009", ACPI_OSI_WIN_7},       /* Windows 7 and Server 2008 R2 - Added 09/2009 */
-
-       /* Feature Group Strings */
-
-       {"Extended Address Space Descriptor", 0}
-
-       /*
-        * All "optional" feature group strings (features that are implemented
-        * by the host) should be implemented in the host version of
-        * acpi_os_validate_interface and should not be added here.
-        */
-};
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_osi_implementation
- *
- * PARAMETERS:  walk_state          - Current walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Implementation of the _OSI predefined control method
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state)
-{
-       acpi_status status;
-       union acpi_operand_object *string_desc;
-       union acpi_operand_object *return_desc;
-       u32 return_value;
-       u32 i;
-
-       ACPI_FUNCTION_TRACE(ut_osi_implementation);
-
-       /* Validate the string input argument */
-
-       string_desc = walk_state->arguments[0].object;
-       if (!string_desc || (string_desc->common.type != ACPI_TYPE_STRING)) {
-               return_ACPI_STATUS(AE_TYPE);
-       }
-
-       /* Create a return object */
-
-       return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
-       if (!return_desc) {
-               return_ACPI_STATUS(AE_NO_MEMORY);
-       }
-
-       /* Default return value is 0, NOT SUPPORTED */
-
-       return_value = 0;
-
-       /* Compare input string to static table of supported interfaces */
-
-       for (i = 0; i < ACPI_ARRAY_LENGTH(acpi_interfaces_supported); i++) {
-               if (!ACPI_STRCMP(string_desc->string.pointer,
-                                acpi_interfaces_supported[i].name)) {
-                       /*
-                        * The interface is supported.
-                        * Update the osi_data if necessary. We keep track of the latest
-                        * version of Windows that has been requested by the BIOS.
-                        */
-                       if (acpi_interfaces_supported[i].value >
-                           acpi_gbl_osi_data) {
-                               acpi_gbl_osi_data =
-                                   acpi_interfaces_supported[i].value;
-                       }
-
-                       return_value = ACPI_UINT32_MAX;
-                       goto exit;
-               }
-       }
-
-       /*
-        * Did not match the string in the static table, call the host OSL to
-        * check for a match with one of the optional strings (such as
-        * "Module Device", "3.0 Thermal Model", etc.)
-        */
-       status = acpi_os_validate_interface(string_desc->string.pointer);
-       if (ACPI_SUCCESS(status)) {
-
-               /* The interface is supported */
-
-               return_value = ACPI_UINT32_MAX;
-       }
-
-exit:
-       ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INFO,
-               "ACPI: BIOS _OSI(%s) is %ssupported\n",
-               string_desc->string.pointer, return_value == 0 ? "not " : ""));
-
-       /* Complete the return value */
-
-       return_desc->integer.value = return_value;
-       walk_state->return_desc = return_desc;
-       return_ACPI_STATUS (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_osi_invalidate
- *
- * PARAMETERS:  interface_string
- *
- * RETURN:      Status
- *
- * DESCRIPTION: invalidate string in pre-defiend _OSI string list
- *
- ******************************************************************************/
-
-acpi_status acpi_osi_invalidate(char *interface)
-{
-       int i;
-
-       for (i = 0; i < ACPI_ARRAY_LENGTH(acpi_interfaces_supported); i++) {
-               if (!ACPI_STRCMP(interface, acpi_interfaces_supported[i].name)) {
-                       *acpi_interfaces_supported[i].name = '\0';
-                       return AE_OK;
-               }
-       }
-       return AE_NOT_FOUND;
-}
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ut_evaluate_object
index 0558747579efd862ec3aee833fc742943275bc87..e87bc6760be6e01e1b0d5ed52912a2194a7246f3 100644 (file)
@@ -154,14 +154,16 @@ ACPI_EXPORT_SYMBOL(acpi_format_exception)
  * 1) _SB_ is defined to be a device to allow \_SB_._INI to be run
  *    during the initialization sequence.
  * 2) _TZ_ is defined to be a thermal zone in order to allow ASL code to
- *    perform a Notify() operation on it.
+ *    perform a Notify() operation on it. 09/2010: Changed to type Device.
+ *    This still allows notifies, but does not confuse host code that
+ *    searches for valid thermal_zone objects.
  */
 const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = {
        {"_GPE", ACPI_TYPE_LOCAL_SCOPE, NULL},
        {"_PR_", ACPI_TYPE_LOCAL_SCOPE, NULL},
        {"_SB_", ACPI_TYPE_DEVICE, NULL},
        {"_SI_", ACPI_TYPE_LOCAL_SCOPE, NULL},
-       {"_TZ_", ACPI_TYPE_THERMAL, NULL},
+       {"_TZ_", ACPI_TYPE_DEVICE, NULL},
        {"_REV", ACPI_TYPE_INTEGER, (char *)ACPI_CA_SUPPORT_LEVEL},
        {"_OS_", ACPI_TYPE_STRING, ACPI_OS_NAME},
        {"_GL_", ACPI_TYPE_MUTEX, (char *)1},
@@ -766,6 +768,7 @@ acpi_status acpi_ut_init_globals(void)
        acpi_gbl_gpe_fadt_blocks[0] = NULL;
        acpi_gbl_gpe_fadt_blocks[1] = NULL;
        acpi_current_gpe_count = 0;
+       acpi_all_gpes_initialized = FALSE;
 
        /* Global handlers */
 
@@ -774,6 +777,7 @@ acpi_status acpi_ut_init_globals(void)
        acpi_gbl_exception_handler = NULL;
        acpi_gbl_init_handler = NULL;
        acpi_gbl_table_handler = NULL;
+       acpi_gbl_interface_handler = NULL;
 
        /* Global Lock support */
 
@@ -800,6 +804,7 @@ acpi_status acpi_ut_init_globals(void)
        acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
        acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT;
        acpi_gbl_osi_data = 0;
+       acpi_gbl_osi_mutex = NULL;
 
        /* Hardware oriented */
 
index 1397fadd0d4b14807b00650732fee18970e5328b..d2906328535df5903b887ae3c744473e27061e64 100644 (file)
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utids")
 
-/* Local prototypes */
-static void acpi_ut_copy_id_string(char *destination, char *source);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_copy_id_string
- *
- * PARAMETERS:  Destination         - Where to copy the string
- *              Source              - Source string
- *
- * RETURN:      None
- *
- * DESCRIPTION: Copies an ID string for the _HID, _CID, and _UID methods.
- *              Performs removal of a leading asterisk if present -- workaround
- *              for a known issue on a bunch of machines.
- *
- ******************************************************************************/
-
-static void acpi_ut_copy_id_string(char *destination, char *source)
-{
-
-       /*
-        * Workaround for ID strings that have a leading asterisk. This construct
-        * is not allowed by the ACPI specification  (ID strings must be
-        * alphanumeric), but enough existing machines have this embedded in their
-        * ID strings that the following code is useful.
-        */
-       if (*source == '*') {
-               source++;
-       }
-
-       /* Do the actual copy */
-
-       ACPI_STRCPY(destination, source);
-}
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ut_execute_HID
@@ -101,7 +65,6 @@ static void acpi_ut_copy_id_string(char *destination, char *source)
  *              NOTE: Internal function, no parameter validation
  *
  ******************************************************************************/
-
 acpi_status
 acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
                    struct acpica_device_id **return_id)
@@ -147,7 +110,7 @@ acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
        if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
                acpi_ex_eisa_id_to_string(hid->string, obj_desc->integer.value);
        } else {
-               acpi_ut_copy_id_string(hid->string, obj_desc->string.pointer);
+               ACPI_STRCPY(hid->string, obj_desc->string.pointer);
        }
 
        hid->length = length;
@@ -224,7 +187,7 @@ acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
        if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
                acpi_ex_integer_to_string(uid->string, obj_desc->integer.value);
        } else {
-               acpi_ut_copy_id_string(uid->string, obj_desc->string.pointer);
+               ACPI_STRCPY(uid->string, obj_desc->string.pointer);
        }
 
        uid->length = length;
@@ -357,8 +320,8 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
 
                        /* Copy the String CID from the returned object */
 
-                       acpi_ut_copy_id_string(next_id_string,
-                                              cid_objects[i]->string.pointer);
+                       ACPI_STRCPY(next_id_string,
+                                   cid_objects[i]->string.pointer);
                        length = cid_objects[i]->string.length + 1;
                }
 
index a39c93dac7193794e3a063e89a6432db9ff9cc67..c1b1c803ea9b62f8072a4af3d6ea8c8342aa1a91 100644 (file)
@@ -117,6 +117,10 @@ void acpi_ut_subsystem_shutdown(void)
        /* Close the acpi_event Handling */
 
        acpi_ev_terminate();
+
+       /* Delete any dynamic _OSI interfaces */
+
+       acpi_ut_interface_terminate();
 #endif
 
        /* Close the Namespace */
index 35059a14eb72c87eaa86e6345a477822ece004ec..49cf7b7fd816ca8b453b7f6c2e6b736d98b611a3 100644 (file)
 ACPI_MODULE_NAME("utmath")
 
 /*
- * Support for double-precision integer divide.  This code is included here
- * in order to support kernel environments where the double-precision math
- * library is not available.
+ * Optional support for 64-bit double-precision integer divide. This code
+ * is configurable and is implemented in order to support 32-bit kernel
+ * environments where a 64-bit double-precision math library is not available.
+ *
+ * Support for a more normal 64-bit divide/modulo (with check for a divide-
+ * by-zero) appears after this optional section of code.
  */
 #ifndef ACPI_USE_NATIVE_DIVIDE
+/* Structures used only for 64-bit divide */
+typedef struct uint64_struct {
+       u32 lo;
+       u32 hi;
+
+} uint64_struct;
+
+typedef union uint64_overlay {
+       u64 full;
+       struct uint64_struct part;
+
+} uint64_overlay;
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ut_short_divide
@@ -69,6 +85,7 @@ ACPI_MODULE_NAME("utmath")
  *              32-bit remainder.
  *
  ******************************************************************************/
+
 acpi_status
 acpi_ut_short_divide(u64 dividend,
                     u32 divisor, u64 *out_quotient, u32 *out_remainder)
index e8d0724ee4032a25f5fb5a4ab3035aed00bf59cc..c7d0e05ef5a4ebccdf6586249136b0b4ac5c6ca9 100644 (file)
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utmisc")
 
-/*
- * Common suffix for messages
- */
-#define ACPI_COMMON_MSG_SUFFIX \
-       acpi_os_printf(" (%8.8X/%s-%u)\n", ACPI_CA_VERSION, module_name, line_number)
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ut_validate_exception
@@ -1044,160 +1039,3 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
 
        return_ACPI_STATUS(AE_AML_INTERNAL);
 }
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_error, acpi_exception, acpi_warning, acpi_info
- *
- * PARAMETERS:  module_name         - Caller's module name (for error output)
- *              line_number         - Caller's line number (for error output)
- *              Format              - Printf format string + additional args
- *
- * RETURN:      None
- *
- * DESCRIPTION: Print message with module/line/version info
- *
- ******************************************************************************/
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_error(const char *module_name, u32 line_number, const char *format, ...)
-{
-       va_list args;
-
-       acpi_os_printf("ACPI Error: ");
-
-       va_start(args, format);
-       acpi_os_vprintf(format, args);
-       ACPI_COMMON_MSG_SUFFIX;
-       va_end(args);
-}
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_exception(const char *module_name,
-              u32 line_number, acpi_status status, const char *format, ...)
-{
-       va_list args;
-
-       acpi_os_printf("ACPI Exception: %s, ", acpi_format_exception(status));
-
-       va_start(args, format);
-       acpi_os_vprintf(format, args);
-       ACPI_COMMON_MSG_SUFFIX;
-       va_end(args);
-}
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_warning(const char *module_name, u32 line_number, const char *format, ...)
-{
-       va_list args;
-
-       acpi_os_printf("ACPI Warning: ");
-
-       va_start(args, format);
-       acpi_os_vprintf(format, args);
-       ACPI_COMMON_MSG_SUFFIX;
-       va_end(args);
-}
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_info(const char *module_name, u32 line_number, const char *format, ...)
-{
-       va_list args;
-
-       acpi_os_printf("ACPI: ");
-
-       va_start(args, format);
-       acpi_os_vprintf(format, args);
-       acpi_os_printf("\n");
-       va_end(args);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_error)
-ACPI_EXPORT_SYMBOL(acpi_exception)
-ACPI_EXPORT_SYMBOL(acpi_warning)
-ACPI_EXPORT_SYMBOL(acpi_info)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_predefined_warning
- *
- * PARAMETERS:  module_name     - Caller's module name (for error output)
- *              line_number     - Caller's line number (for error output)
- *              Pathname        - Full pathname to the node
- *              node_flags      - From Namespace node for the method/object
- *              Format          - Printf format string + additional args
- *
- * RETURN:      None
- *
- * DESCRIPTION: Warnings for the predefined validation module. Messages are
- *              only emitted the first time a problem with a particular
- *              method/object is detected. This prevents a flood of error
- *              messages for methods that are repeatedly evaluated.
- *
-******************************************************************************/
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_predefined_warning(const char *module_name,
-                          u32 line_number,
-                          char *pathname,
-                          u8 node_flags, const char *format, ...)
-{
-       va_list args;
-
-       /*
-        * Warning messages for this method/object will be disabled after the
-        * first time a validation fails or an object is successfully repaired.
-        */
-       if (node_flags & ANOBJ_EVALUATED) {
-               return;
-       }
-
-       acpi_os_printf("ACPI Warning for %s: ", pathname);
-
-       va_start(args, format);
-       acpi_os_vprintf(format, args);
-       ACPI_COMMON_MSG_SUFFIX;
-       va_end(args);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_predefined_info
- *
- * PARAMETERS:  module_name     - Caller's module name (for error output)
- *              line_number     - Caller's line number (for error output)
- *              Pathname        - Full pathname to the node
- *              node_flags      - From Namespace node for the method/object
- *              Format          - Printf format string + additional args
- *
- * RETURN:      None
- *
- * DESCRIPTION: Info messages for the predefined validation module. Messages
- *              are only emitted the first time a problem with a particular
- *              method/object is detected. This prevents a flood of
- *              messages for methods that are repeatedly evaluated.
- *
- ******************************************************************************/
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_predefined_info(const char *module_name,
-                       u32 line_number,
-                       char *pathname, u8 node_flags, const char *format, ...)
-{
-       va_list args;
-
-       /*
-        * Warning messages for this method/object will be disabled after the
-        * first time a validation fails or an object is successfully repaired.
-        */
-       if (node_flags & ANOBJ_EVALUATED) {
-               return;
-       }
-
-       acpi_os_printf("ACPI Info for %s: ", pathname);
-
-       va_start(args, format);
-       acpi_os_vprintf(format, args);
-       ACPI_COMMON_MSG_SUFFIX;
-       va_end(args);
-}
index f5cca3a1300c60c1d3fae972389031d27782e1a1..d9efa495b4330049a1fa57fde968938c6cb18f0b 100644 (file)
@@ -86,6 +86,12 @@ acpi_status acpi_ut_mutex_initialize(void)
        spin_lock_init(acpi_gbl_gpe_lock);
        spin_lock_init(acpi_gbl_hardware_lock);
 
+       /* Mutex for _OSI support */
+       status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
        /* Create the reader/writer lock for namespace access */
 
        status = acpi_ut_create_rw_lock(&acpi_gbl_namespace_rw_lock);
@@ -117,6 +123,8 @@ void acpi_ut_mutex_terminate(void)
                acpi_ut_delete_mutex(i);
        }
 
+       acpi_os_delete_mutex(acpi_gbl_osi_mutex);
+
        /* Delete the spinlocks */
 
        acpi_os_delete_lock(acpi_gbl_gpe_lock);
@@ -220,18 +228,17 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
                        if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
                                if (i == mutex_id) {
                                        ACPI_ERROR((AE_INFO,
-                                                   "Mutex [%s] already acquired by this thread [%p]",
+                                                   "Mutex [%s] already acquired by this thread [%u]",
                                                    acpi_ut_get_mutex_name
                                                    (mutex_id),
-                                                   ACPI_CAST_PTR(void,
-                                                                 this_thread_id)));
+                                                   (u32)this_thread_id));
 
                                        return (AE_ALREADY_ACQUIRED);
                                }
 
                                ACPI_ERROR((AE_INFO,
-                                           "Invalid acquire order: Thread %p owns [%s], wants [%s]",
-                                           ACPI_CAST_PTR(void, this_thread_id),
+                                           "Invalid acquire order: Thread %u owns [%s], wants [%s]",
+                                           (u32)this_thread_id,
                                            acpi_ut_get_mutex_name(i),
                                            acpi_ut_get_mutex_name(mutex_id)));
 
@@ -242,24 +249,24 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
 #endif
 
        ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
-                         "Thread %p attempting to acquire Mutex [%s]\n",
-                         ACPI_CAST_PTR(void, this_thread_id),
+                         "Thread %u attempting to acquire Mutex [%s]\n",
+                         (u32)this_thread_id,
                          acpi_ut_get_mutex_name(mutex_id)));
 
        status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex,
                                       ACPI_WAIT_FOREVER);
        if (ACPI_SUCCESS(status)) {
                ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
-                                 "Thread %p acquired Mutex [%s]\n",
-                                 ACPI_CAST_PTR(void, this_thread_id),
+                                 "Thread %u acquired Mutex [%s]\n",
+                                 (u32)this_thread_id,
                                  acpi_ut_get_mutex_name(mutex_id)));
 
                acpi_gbl_mutex_info[mutex_id].use_count++;
                acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
        } else {
                ACPI_EXCEPTION((AE_INFO, status,
-                               "Thread %p could not acquire Mutex [0x%X]",
-                               ACPI_CAST_PTR(void, this_thread_id), mutex_id));
+                               "Thread %u could not acquire Mutex [0x%X]",
+                               (u32)this_thread_id, mutex_id));
        }
 
        return (status);
@@ -279,10 +286,14 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
 
 acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
 {
+       acpi_thread_id this_thread_id;
+
        ACPI_FUNCTION_NAME(ut_release_mutex);
 
-       ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %p releasing Mutex [%s]\n",
-                         ACPI_CAST_PTR(void, acpi_os_get_thread_id()),
+       this_thread_id = acpi_os_get_thread_id();
+
+       ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u releasing Mutex [%s]\n",
+                         (u32)this_thread_id,
                          acpi_ut_get_mutex_name(mutex_id)));
 
        if (mutex_id > ACPI_MAX_MUTEX) {
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
new file mode 100644 (file)
index 0000000..18c59a8
--- /dev/null
@@ -0,0 +1,380 @@
+/******************************************************************************
+ *
+ * Module Name: utosi - Support for the _OSI predefined control method
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utosi")
+
+/*
+ * Strings supported by the _OSI predefined control method (which is
+ * implemented internally within this module.)
+ *
+ * March 2009: Removed "Linux" as this host no longer wants to respond true
+ * for this string. Basically, the only safe OS strings are windows-related
+ * and in many or most cases represent the only test path within the
+ * BIOS-provided ASL code.
+ *
+ * The last element of each entry is used to track the newest version of
+ * Windows that the BIOS has requested.
+ */
+static struct acpi_interface_info acpi_default_supported_interfaces[] = {
+       /* Operating System Vendor Strings */
+
+       {"Windows 2000", NULL, 0, ACPI_OSI_WIN_2000},   /* Windows 2000 */
+       {"Windows 2001", NULL, 0, ACPI_OSI_WIN_XP},     /* Windows XP */
+       {"Windows 2001 SP1", NULL, 0, ACPI_OSI_WIN_XP_SP1},     /* Windows XP SP1 */
+       {"Windows 2001.1", NULL, 0, ACPI_OSI_WINSRV_2003},      /* Windows Server 2003 */
+       {"Windows 2001 SP2", NULL, 0, ACPI_OSI_WIN_XP_SP2},     /* Windows XP SP2 */
+       {"Windows 2001.1 SP1", NULL, 0, ACPI_OSI_WINSRV_2003_SP1},      /* Windows Server 2003 SP1 - Added 03/2006 */
+       {"Windows 2006", NULL, 0, ACPI_OSI_WIN_VISTA},  /* Windows Vista - Added 03/2006 */
+       {"Windows 2006.1", NULL, 0, ACPI_OSI_WINSRV_2008},      /* Windows Server 2008 - Added 09/2009 */
+       {"Windows 2006 SP1", NULL, 0, ACPI_OSI_WIN_VISTA_SP1},  /* Windows Vista SP1 - Added 09/2009 */
+       {"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2},  /* Windows Vista SP2 - Added 09/2010 */
+       {"Windows 2009", NULL, 0, ACPI_OSI_WIN_7},      /* Windows 7 and Server 2008 R2 - Added 09/2009 */
+
+       /* Feature Group Strings */
+
+       {"Extended Address Space Descriptor", NULL, 0, 0}
+
+       /*
+        * All "optional" feature group strings (features that are implemented
+        * by the host) should be dynamically added by the host via
+        * acpi_install_interface and should not be manually added here.
+        *
+        * Examples of optional feature group strings:
+        *
+        * "Module Device"
+        * "Processor Device"
+        * "3.0 Thermal Model"
+        * "3.0 _SCP Extensions"
+        * "Processor Aggregator Device"
+        */
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_initialize_interfaces
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Initialize the global _OSI supported interfaces list
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_initialize_interfaces(void)
+{
+       u32 i;
+
+       (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+       acpi_gbl_supported_interfaces = acpi_default_supported_interfaces;
+
+       /* Link the static list of supported interfaces */
+
+       for (i = 0;
+            i < (ACPI_ARRAY_LENGTH(acpi_default_supported_interfaces) - 1);
+            i++) {
+               acpi_default_supported_interfaces[i].next =
+                   &acpi_default_supported_interfaces[(acpi_size) i + 1];
+       }
+
+       acpi_os_release_mutex(acpi_gbl_osi_mutex);
+       return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_interface_terminate
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Delete all interfaces in the global list. Sets
+ *              acpi_gbl_supported_interfaces to NULL.
+ *
+ ******************************************************************************/
+
+void acpi_ut_interface_terminate(void)
+{
+       struct acpi_interface_info *next_interface;
+
+       (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+       next_interface = acpi_gbl_supported_interfaces;
+
+       while (next_interface) {
+               acpi_gbl_supported_interfaces = next_interface->next;
+
+               /* Only interfaces added at runtime can be freed */
+
+               if (next_interface->flags & ACPI_OSI_DYNAMIC) {
+                       ACPI_FREE(next_interface->name);
+                       ACPI_FREE(next_interface);
+               }
+
+               next_interface = acpi_gbl_supported_interfaces;
+       }
+
+       acpi_os_release_mutex(acpi_gbl_osi_mutex);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_install_interface
+ *
+ * PARAMETERS:  interface_name      - The interface to install
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Install the interface into the global interface list.
+ *              Caller MUST hold acpi_gbl_osi_mutex
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_install_interface(acpi_string interface_name)
+{
+       struct acpi_interface_info *interface_info;
+
+       /* Allocate info block and space for the name string */
+
+       interface_info =
+           ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_interface_info));
+       if (!interface_info) {
+               return (AE_NO_MEMORY);
+       }
+
+       interface_info->name =
+           ACPI_ALLOCATE_ZEROED(ACPI_STRLEN(interface_name) + 1);
+       if (!interface_info->name) {
+               ACPI_FREE(interface_info);
+               return (AE_NO_MEMORY);
+       }
+
+       /* Initialize new info and insert at the head of the global list */
+
+       ACPI_STRCPY(interface_info->name, interface_name);
+       interface_info->flags = ACPI_OSI_DYNAMIC;
+       interface_info->next = acpi_gbl_supported_interfaces;
+
+       acpi_gbl_supported_interfaces = interface_info;
+       return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_remove_interface
+ *
+ * PARAMETERS:  interface_name      - The interface to remove
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove the interface from the global interface list.
+ *              Caller MUST hold acpi_gbl_osi_mutex
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_remove_interface(acpi_string interface_name)
+{
+       struct acpi_interface_info *previous_interface;
+       struct acpi_interface_info *next_interface;
+
+       previous_interface = next_interface = acpi_gbl_supported_interfaces;
+       while (next_interface) {
+               if (!ACPI_STRCMP(interface_name, next_interface->name)) {
+
+                       /* Found: name is in either the static list or was added at runtime */
+
+                       if (next_interface->flags & ACPI_OSI_DYNAMIC) {
+
+                               /* Interface was added dynamically, remove and free it */
+
+                               if (previous_interface == next_interface) {
+                                       acpi_gbl_supported_interfaces =
+                                           next_interface->next;
+                               } else {
+                                       previous_interface->next =
+                                           next_interface->next;
+                               }
+
+                               ACPI_FREE(next_interface->name);
+                               ACPI_FREE(next_interface);
+                       } else {
+                               /*
+                                * Interface is in static list. If marked invalid, then it
+                                * does not actually exist. Else, mark it invalid.
+                                */
+                               if (next_interface->flags & ACPI_OSI_INVALID) {
+                                       return (AE_NOT_EXIST);
+                               }
+
+                               next_interface->flags |= ACPI_OSI_INVALID;
+                       }
+
+                       return (AE_OK);
+               }
+
+               previous_interface = next_interface;
+               next_interface = next_interface->next;
+       }
+
+       /* Interface was not found */
+
+       return (AE_NOT_EXIST);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_interface
+ *
+ * PARAMETERS:  interface_name      - The interface to find
+ *
+ * RETURN:      struct acpi_interface_info if found. NULL if not found.
+ *
+ * DESCRIPTION: Search for the specified interface name in the global list.
+ *              Caller MUST hold acpi_gbl_osi_mutex
+ *
+ ******************************************************************************/
+
+struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name)
+{
+       struct acpi_interface_info *next_interface;
+
+       next_interface = acpi_gbl_supported_interfaces;
+       while (next_interface) {
+               if (!ACPI_STRCMP(interface_name, next_interface->name)) {
+                       return (next_interface);
+               }
+
+               next_interface = next_interface->next;
+       }
+
+       return (NULL);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_osi_implementation
+ *
+ * PARAMETERS:  walk_state          - Current walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Implementation of the _OSI predefined control method. When
+ *              an invocation of _OSI is encountered in the system AML,
+ *              control is transferred to this function.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_osi_implementation(struct acpi_walk_state * walk_state)
+{
+       union acpi_operand_object *string_desc;
+       union acpi_operand_object *return_desc;
+       struct acpi_interface_info *interface_info;
+       acpi_interface_handler interface_handler;
+       u32 return_value;
+
+       ACPI_FUNCTION_TRACE(ut_osi_implementation);
+
+       /* Validate the string input argument (from the AML caller) */
+
+       string_desc = walk_state->arguments[0].object;
+       if (!string_desc || (string_desc->common.type != ACPI_TYPE_STRING)) {
+               return_ACPI_STATUS(AE_TYPE);
+       }
+
+       /* Create a return object */
+
+       return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+       if (!return_desc) {
+               return_ACPI_STATUS(AE_NO_MEMORY);
+       }
+
+       /* Default return value is 0, NOT SUPPORTED */
+
+       return_value = 0;
+       (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+
+       /* Lookup the interface in the global _OSI list */
+
+       interface_info = acpi_ut_get_interface(string_desc->string.pointer);
+       if (interface_info && !(interface_info->flags & ACPI_OSI_INVALID)) {
+               /*
+                * The interface is supported.
+                * Update the osi_data if necessary. We keep track of the latest
+                * version of Windows that has been requested by the BIOS.
+                */
+               if (interface_info->value > acpi_gbl_osi_data) {
+                       acpi_gbl_osi_data = interface_info->value;
+               }
+
+               return_value = ACPI_UINT32_MAX;
+       }
+
+       acpi_os_release_mutex(acpi_gbl_osi_mutex);
+
+       /*
+        * Invoke an optional _OSI interface handler. The host OS may wish
+        * to do some interface-specific handling. For example, warn about
+        * certain interfaces or override the true/false support value.
+        */
+       interface_handler = acpi_gbl_interface_handler;
+       if (interface_handler) {
+               return_value =
+                   interface_handler(string_desc->string.pointer,
+                                     return_value);
+       }
+
+       ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO,
+                             "ACPI: BIOS _OSI(\"%s\") is %ssupported\n",
+                             string_desc->string.pointer,
+                             return_value == 0 ? "not " : ""));
+
+       /* Complete the return object */
+
+       return_desc->integer.value = return_value;
+       walk_state->return_desc = return_desc;
+       return_ACPI_STATUS(AE_OK);
+}
index 7f8cefcb2b32da538adb9a2fb73e4dbae645924e..1f484c9a6888934bd82949a86e93339c033a13a1 100644 (file)
@@ -110,6 +110,15 @@ acpi_status __init acpi_initialize_subsystem(void)
                return_ACPI_STATUS(status);
        }
 
+       /* Initialize the global OSI interfaces list with the static names */
+
+       status = acpi_ut_initialize_interfaces();
+       if (ACPI_FAILURE(status)) {
+               ACPI_EXCEPTION((AE_INFO, status,
+                               "During OSI interfaces initialization"));
+               return_ACPI_STATUS(status);
+       }
+
        /* If configured, initialize the AML debugger */
 
        ACPI_DEBUGGER_EXEC(status = acpi_db_initialize());
@@ -289,19 +298,6 @@ acpi_status acpi_initialize_objects(u32 flags)
                }
        }
 
-       /*
-        * Complete the GPE initialization for the GPE blocks defined in the FADT
-        * (GPE block 0 and 1).
-        *
-        * NOTE: Currently, there seems to be no need to run the _REG methods
-        * before enabling the GPEs.
-        */
-       if (!(flags & ACPI_NO_EVENT_INIT)) {
-               status = acpi_ev_install_fadt_gpes();
-               if (ACPI_FAILURE(status))
-                       return (status);
-       }
-
        /*
         * Empty the caches (delete the cached objects) on the assumption that
         * the table load filled them up more than they will be at runtime --
@@ -506,6 +502,7 @@ acpi_install_initialization_handler(acpi_init_handler handler, u32 function)
 
 ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler)
 #endif                         /*  ACPI_FUTURE_USAGE  */
+
 /*****************************************************************************
  *
  * FUNCTION:    acpi_purge_cached_objects
@@ -529,4 +526,117 @@ acpi_status acpi_purge_cached_objects(void)
 }
 
 ACPI_EXPORT_SYMBOL(acpi_purge_cached_objects)
-#endif
+
+/*****************************************************************************
+ *
+ * FUNCTION:    acpi_install_interface
+ *
+ * PARAMETERS:  interface_name      - The interface to install
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Install an _OSI interface to the global list
+ *
+ ****************************************************************************/
+acpi_status acpi_install_interface(acpi_string interface_name)
+{
+       acpi_status status;
+       struct acpi_interface_info *interface_info;
+
+       /* Parameter validation */
+
+       if (!interface_name || (ACPI_STRLEN(interface_name) == 0)) {
+               return (AE_BAD_PARAMETER);
+       }
+
+       (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+
+       /* Check if the interface name is already in the global list */
+
+       interface_info = acpi_ut_get_interface(interface_name);
+       if (interface_info) {
+               /*
+                * The interface already exists in the list. This is OK if the
+                * interface has been marked invalid -- just clear the bit.
+                */
+               if (interface_info->flags & ACPI_OSI_INVALID) {
+                       interface_info->flags &= ~ACPI_OSI_INVALID;
+                       status = AE_OK;
+               } else {
+                       status = AE_ALREADY_EXISTS;
+               }
+       } else {
+               /* New interface name, install into the global list */
+
+               status = acpi_ut_install_interface(interface_name);
+       }
+
+       acpi_os_release_mutex(acpi_gbl_osi_mutex);
+       return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_interface)
+
+/*****************************************************************************
+ *
+ * FUNCTION:    acpi_remove_interface
+ *
+ * PARAMETERS:  interface_name      - The interface to remove
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove an _OSI interface from the global list
+ *
+ ****************************************************************************/
+acpi_status acpi_remove_interface(acpi_string interface_name)
+{
+       acpi_status status;
+
+       /* Parameter validation */
+
+       if (!interface_name || (ACPI_STRLEN(interface_name) == 0)) {
+               return (AE_BAD_PARAMETER);
+       }
+
+       (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+
+       status = acpi_ut_remove_interface(interface_name);
+
+       acpi_os_release_mutex(acpi_gbl_osi_mutex);
+       return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_interface)
+
+/*****************************************************************************
+ *
+ * FUNCTION:    acpi_install_interface_handler
+ *
+ * PARAMETERS:  Handler             - The _OSI interface handler to install
+ *                                    NULL means "remove existing handler"
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Install a handler for the predefined _OSI ACPI method.
+ *              invoked during execution of the internal implementation of
+ *              _OSI. A NULL handler simply removes any existing handler.
+ *
+ ****************************************************************************/
+acpi_status acpi_install_interface_handler(acpi_interface_handler handler)
+{
+       acpi_status status = AE_OK;
+
+       (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+
+       if (handler && acpi_gbl_interface_handler) {
+               status = AE_ALREADY_EXISTS;
+       } else {
+               acpi_gbl_interface_handler = handler;
+       }
+
+       acpi_os_release_mutex(acpi_gbl_osi_mutex);
+       return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
+#endif                         /* !ACPI_ASL_COMPILER */
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
new file mode 100644 (file)
index 0000000..6f12e31
--- /dev/null
@@ -0,0 +1,415 @@
+/*******************************************************************************
+ *
+ * Module Name: utxferror - Various error/warning output functions
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utxferror")
+
+/*
+ * This module is used for the in-kernel ACPICA as well as the ACPICA
+ * tools/applications.
+ *
+ * For the i_aSL compiler case, the output is redirected to stderr so that
+ * any of the various ACPI errors and warnings do not appear in the output
+ * files, for either the compiler or disassembler portions of the tool.
+ */
+#ifdef ACPI_ASL_COMPILER
+#include <stdio.h>
+extern FILE *acpi_gbl_output_file;
+
+#define ACPI_MSG_REDIRECT_BEGIN \
+       FILE                            *output_file = acpi_gbl_output_file; \
+       acpi_os_redirect_output (stderr);
+
+#define ACPI_MSG_REDIRECT_END \
+       acpi_os_redirect_output (output_file);
+
+#else
+/*
+ * non-i_aSL case - no redirection, nothing to do
+ */
+#define ACPI_MSG_REDIRECT_BEGIN
+#define ACPI_MSG_REDIRECT_END
+#endif
+/*
+ * Common message prefixes
+ */
+#define ACPI_MSG_ERROR          "ACPI Error: "
+#define ACPI_MSG_EXCEPTION      "ACPI Exception: "
+#define ACPI_MSG_WARNING        "ACPI Warning: "
+#define ACPI_MSG_INFO           "ACPI: "
+/*
+ * Common message suffix
+ */
+#define ACPI_MSG_SUFFIX \
+       acpi_os_printf (" (%8.8X/%s-%u)\n", ACPI_CA_VERSION, module_name, line_number)
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_error
+ *
+ * PARAMETERS:  module_name         - Caller's module name (for error output)
+ *              line_number         - Caller's line number (for error output)
+ *              Format              - Printf format string + additional args
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Print "ACPI Error" message with module/line/version info
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_error(const char *module_name, u32 line_number, const char *format, ...)
+{
+       va_list arg_list;
+
+       ACPI_MSG_REDIRECT_BEGIN;
+       acpi_os_printf(ACPI_MSG_ERROR);
+
+       va_start(arg_list, format);
+       acpi_os_vprintf(format, arg_list);
+       ACPI_MSG_SUFFIX;
+       va_end(arg_list);
+
+       ACPI_MSG_REDIRECT_END;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_error)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_exception
+ *
+ * PARAMETERS:  module_name         - Caller's module name (for error output)
+ *              line_number         - Caller's line number (for error output)
+ *              Status              - Status to be formatted
+ *              Format              - Printf format string + additional args
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Print "ACPI Exception" message with module/line/version info
+ *              and decoded acpi_status.
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_exception(const char *module_name,
+              u32 line_number, acpi_status status, const char *format, ...)
+{
+       va_list arg_list;
+
+       ACPI_MSG_REDIRECT_BEGIN;
+       acpi_os_printf(ACPI_MSG_EXCEPTION "%s, ",
+                      acpi_format_exception(status));
+
+       va_start(arg_list, format);
+       acpi_os_vprintf(format, arg_list);
+       ACPI_MSG_SUFFIX;
+       va_end(arg_list);
+
+       ACPI_MSG_REDIRECT_END;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_exception)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_warning
+ *
+ * PARAMETERS:  module_name         - Caller's module name (for error output)
+ *              line_number         - Caller's line number (for error output)
+ *              Format              - Printf format string + additional args
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Print "ACPI Warning" message with module/line/version info
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_warning(const char *module_name, u32 line_number, const char *format, ...)
+{
+       va_list arg_list;
+
+       ACPI_MSG_REDIRECT_BEGIN;
+       acpi_os_printf(ACPI_MSG_WARNING);
+
+       va_start(arg_list, format);
+       acpi_os_vprintf(format, arg_list);
+       ACPI_MSG_SUFFIX;
+       va_end(arg_list);
+
+       ACPI_MSG_REDIRECT_END;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_warning)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_info
+ *
+ * PARAMETERS:  module_name         - Caller's module name (for error output)
+ *              line_number         - Caller's line number (for error output)
+ *              Format              - Printf format string + additional args
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Print generic "ACPI:" information message. There is no
+ *              module/line/version info in order to keep the message simple.
+ *
+ * TBD: module_name and line_number args are not needed, should be removed.
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_info(const char *module_name, u32 line_number, const char *format, ...)
+{
+       va_list arg_list;
+
+       ACPI_MSG_REDIRECT_BEGIN;
+       acpi_os_printf(ACPI_MSG_INFO);
+
+       va_start(arg_list, format);
+       acpi_os_vprintf(format, arg_list);
+       acpi_os_printf("\n");
+       va_end(arg_list);
+
+       ACPI_MSG_REDIRECT_END;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_info)
+
+/*
+ * The remainder of this module contains internal error functions that may
+ * be configured out.
+ */
+#if !defined (ACPI_NO_ERROR_MESSAGES) && !defined (ACPI_BIN_APP)
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_predefined_warning
+ *
+ * PARAMETERS:  module_name     - Caller's module name (for error output)
+ *              line_number     - Caller's line number (for error output)
+ *              Pathname        - Full pathname to the node
+ *              node_flags      - From Namespace node for the method/object
+ *              Format          - Printf format string + additional args
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Warnings for the predefined validation module. Messages are
+ *              only emitted the first time a problem with a particular
+ *              method/object is detected. This prevents a flood of error
+ *              messages for methods that are repeatedly evaluated.
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_predefined_warning(const char *module_name,
+                          u32 line_number,
+                          char *pathname,
+                          u8 node_flags, const char *format, ...)
+{
+       va_list arg_list;
+
+       /*
+        * Warning messages for this method/object will be disabled after the
+        * first time a validation fails or an object is successfully repaired.
+        */
+       if (node_flags & ANOBJ_EVALUATED) {
+               return;
+       }
+
+       acpi_os_printf(ACPI_MSG_WARNING "For %s: ", pathname);
+
+       va_start(arg_list, format);
+       acpi_os_vprintf(format, arg_list);
+       ACPI_MSG_SUFFIX;
+       va_end(arg_list);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_predefined_info
+ *
+ * PARAMETERS:  module_name     - Caller's module name (for error output)
+ *              line_number     - Caller's line number (for error output)
+ *              Pathname        - Full pathname to the node
+ *              node_flags      - From Namespace node for the method/object
+ *              Format          - Printf format string + additional args
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Info messages for the predefined validation module. Messages
+ *              are only emitted the first time a problem with a particular
+ *              method/object is detected. This prevents a flood of
+ *              messages for methods that are repeatedly evaluated.
+ *
+ ******************************************************************************/
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_predefined_info(const char *module_name,
+                       u32 line_number,
+                       char *pathname, u8 node_flags, const char *format, ...)
+{
+       va_list arg_list;
+
+       /*
+        * Warning messages for this method/object will be disabled after the
+        * first time a validation fails or an object is successfully repaired.
+        */
+       if (node_flags & ANOBJ_EVALUATED) {
+               return;
+       }
+
+       acpi_os_printf(ACPI_MSG_INFO "For %s: ", pathname);
+
+       va_start(arg_list, format);
+       acpi_os_vprintf(format, arg_list);
+       ACPI_MSG_SUFFIX;
+       va_end(arg_list);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_namespace_error
+ *
+ * PARAMETERS:  module_name         - Caller's module name (for error output)
+ *              line_number         - Caller's line number (for error output)
+ *              internal_name       - Name or path of the namespace node
+ *              lookup_status       - Exception code from NS lookup
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Print error message with the full pathname for the NS node.
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_namespace_error(const char *module_name,
+                       u32 line_number,
+                       const char *internal_name, acpi_status lookup_status)
+{
+       acpi_status status;
+       u32 bad_name;
+       char *name = NULL;
+
+       ACPI_MSG_REDIRECT_BEGIN;
+       acpi_os_printf(ACPI_MSG_ERROR);
+
+       if (lookup_status == AE_BAD_CHARACTER) {
+
+               /* There is a non-ascii character in the name */
+
+               ACPI_MOVE_32_TO_32(&bad_name,
+                                  ACPI_CAST_PTR(u32, internal_name));
+               acpi_os_printf("[0x%4.4X] (NON-ASCII)", bad_name);
+       } else {
+               /* Convert path to external format */
+
+               status = acpi_ns_externalize_name(ACPI_UINT32_MAX,
+                                                 internal_name, NULL, &name);
+
+               /* Print target name */
+
+               if (ACPI_SUCCESS(status)) {
+                       acpi_os_printf("[%s]", name);
+               } else {
+                       acpi_os_printf("[COULD NOT EXTERNALIZE NAME]");
+               }
+
+               if (name) {
+                       ACPI_FREE(name);
+               }
+       }
+
+       acpi_os_printf(" Namespace lookup failure, %s",
+                      acpi_format_exception(lookup_status));
+
+       ACPI_MSG_SUFFIX;
+       ACPI_MSG_REDIRECT_END;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_method_error
+ *
+ * PARAMETERS:  module_name         - Caller's module name (for error output)
+ *              line_number         - Caller's line number (for error output)
+ *              Message             - Error message to use on failure
+ *              prefix_node         - Prefix relative to the path
+ *              Path                - Path to the node (optional)
+ *              method_status       - Execution status
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Print error message with the full pathname for the method.
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_method_error(const char *module_name,
+                    u32 line_number,
+                    const char *message,
+                    struct acpi_namespace_node *prefix_node,
+                    const char *path, acpi_status method_status)
+{
+       acpi_status status;
+       struct acpi_namespace_node *node = prefix_node;
+
+       ACPI_MSG_REDIRECT_BEGIN;
+       acpi_os_printf(ACPI_MSG_ERROR);
+
+       if (path) {
+               status =
+                   acpi_ns_get_node(prefix_node, path, ACPI_NS_NO_UPSEARCH,
+                                    &node);
+               if (ACPI_FAILURE(status)) {
+                       acpi_os_printf("[Could not get node by pathname]");
+               }
+       }
+
+       acpi_ns_print_node_pathname(node, message);
+       acpi_os_printf(", %s", acpi_format_exception(method_status));
+
+       ACPI_MSG_SUFFIX;
+       ACPI_MSG_REDIRECT_END;
+}
+
+#endif                         /* ACPI_NO_ERROR_MESSAGES */
index 98417201e9ce3881257354e7c2a360ee019a4e39..95649d373071ac93d14bbbb478db1ab4a8cdb801 100644 (file)
 
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
-
-#ifdef CONFIG_ACPI_SYSFS_POWER
 #include <linux/power_supply.h>
-#endif
 
 #define PREFIX "ACPI: "
 
@@ -98,13 +95,12 @@ enum {
         * due to bad math.
         */
        ACPI_BATTERY_QUIRK_SIGNED16_CURRENT,
+       ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY,
 };
 
 struct acpi_battery {
        struct mutex lock;
-#ifdef CONFIG_ACPI_SYSFS_POWER
        struct power_supply bat;
-#endif
        struct acpi_device *device;
        unsigned long update_time;
        int rate_now;
@@ -141,7 +137,6 @@ inline int acpi_battery_present(struct acpi_battery *battery)
        return battery->device->status.battery_present;
 }
 
-#ifdef CONFIG_ACPI_SYSFS_POWER
 static int acpi_battery_technology(struct acpi_battery *battery)
 {
        if (!strcasecmp("NiCd", battery->type))
@@ -186,6 +181,7 @@ static int acpi_battery_get_property(struct power_supply *psy,
                                     enum power_supply_property psp,
                                     union power_supply_propval *val)
 {
+       int ret = 0;
        struct acpi_battery *battery = to_acpi_battery(psy);
 
        if (acpi_battery_present(battery)) {
@@ -214,26 +210,44 @@ static int acpi_battery_get_property(struct power_supply *psy,
                val->intval = battery->cycle_count;
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
-               val->intval = battery->design_voltage * 1000;
+               if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
+                       ret = -ENODEV;
+               else
+                       val->intval = battery->design_voltage * 1000;
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_NOW:
-               val->intval = battery->voltage_now * 1000;
+               if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
+                       ret = -ENODEV;
+               else
+                       val->intval = battery->voltage_now * 1000;
                break;
        case POWER_SUPPLY_PROP_CURRENT_NOW:
        case POWER_SUPPLY_PROP_POWER_NOW:
-               val->intval = battery->rate_now * 1000;
+               if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
+                       ret = -ENODEV;
+               else
+                       val->intval = battery->rate_now * 1000;
                break;
        case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
        case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
-               val->intval = battery->design_capacity * 1000;
+               if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+                       ret = -ENODEV;
+               else
+                       val->intval = battery->design_capacity * 1000;
                break;
        case POWER_SUPPLY_PROP_CHARGE_FULL:
        case POWER_SUPPLY_PROP_ENERGY_FULL:
-               val->intval = battery->full_charge_capacity * 1000;
+               if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+                       ret = -ENODEV;
+               else
+                       val->intval = battery->full_charge_capacity * 1000;
                break;
        case POWER_SUPPLY_PROP_CHARGE_NOW:
        case POWER_SUPPLY_PROP_ENERGY_NOW:
-               val->intval = battery->capacity_now * 1000;
+               if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
+                       ret = -ENODEV;
+               else
+                       val->intval = battery->capacity_now * 1000;
                break;
        case POWER_SUPPLY_PROP_MODEL_NAME:
                val->strval = battery->model_number;
@@ -245,9 +259,9 @@ static int acpi_battery_get_property(struct power_supply *psy,
                val->strval = battery->serial_number;
                break;
        default:
-               return -EINVAL;
+               ret = -EINVAL;
        }
-       return 0;
+       return ret;
 }
 
 static enum power_supply_property charge_battery_props[] = {
@@ -281,7 +295,6 @@ static enum power_supply_property energy_battery_props[] = {
        POWER_SUPPLY_PROP_MANUFACTURER,
        POWER_SUPPLY_PROP_SERIAL_NUMBER,
 };
-#endif
 
 #ifdef CONFIG_ACPI_PROCFS_POWER
 inline char *acpi_battery_units(struct acpi_battery *battery)
@@ -412,6 +425,8 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
                result = extract_package(battery, buffer.pointer,
                                info_offsets, ARRAY_SIZE(info_offsets));
        kfree(buffer.pointer);
+       if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
+               battery->full_charge_capacity = battery->design_capacity;
        return result;
 }
 
@@ -448,6 +463,10 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
            battery->rate_now != -1)
                battery->rate_now = abs((s16)battery->rate_now);
 
+       if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
+           && battery->capacity_now >= 0 && battery->capacity_now <= 100)
+               battery->capacity_now = (battery->capacity_now *
+                               battery->full_charge_capacity) / 100;
        return result;
 }
 
@@ -492,7 +511,6 @@ static int acpi_battery_init_alarm(struct acpi_battery *battery)
        return acpi_battery_set_alarm(battery);
 }
 
-#ifdef CONFIG_ACPI_SYSFS_POWER
 static ssize_t acpi_battery_alarm_show(struct device *dev,
                                        struct device_attribute *attr,
                                        char *buf)
@@ -552,7 +570,6 @@ static void sysfs_remove_battery(struct acpi_battery *battery)
        power_supply_unregister(&battery->bat);
        battery->bat.dev = NULL;
 }
-#endif
 
 static void acpi_battery_quirks(struct acpi_battery *battery)
 {
@@ -561,6 +578,33 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
        }
 }
 
+/*
+ * According to the ACPI spec, some kinds of primary batteries can
+ * report percentage battery remaining capacity directly to OS.
+ * In this case, it reports the Last Full Charged Capacity == 100
+ * and BatteryPresentRate == 0xFFFFFFFF.
+ *
+ * Now we found some battery reports percentage remaining capacity
+ * even if it's rechargeable.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=15979
+ *
+ * Handle this correctly so that they won't break userspace.
+ */
+static void acpi_battery_quirks2(struct acpi_battery *battery)
+{
+       if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
+               return ;
+
+        if (battery->full_charge_capacity == 100 &&
+            battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN &&
+            battery->capacity_now >=0 && battery->capacity_now <= 100) {
+               set_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags);
+               battery->full_charge_capacity = battery->design_capacity;
+               battery->capacity_now = (battery->capacity_now *
+                               battery->full_charge_capacity) / 100;
+       }
+}
+
 static int acpi_battery_update(struct acpi_battery *battery)
 {
        int result, old_present = acpi_battery_present(battery);
@@ -568,9 +612,7 @@ static int acpi_battery_update(struct acpi_battery *battery)
        if (result)
                return result;
        if (!acpi_battery_present(battery)) {
-#ifdef CONFIG_ACPI_SYSFS_POWER
                sysfs_remove_battery(battery);
-#endif
                battery->update_time = 0;
                return 0;
        }
@@ -582,11 +624,11 @@ static int acpi_battery_update(struct acpi_battery *battery)
                acpi_battery_quirks(battery);
                acpi_battery_init_alarm(battery);
        }
-#ifdef CONFIG_ACPI_SYSFS_POWER
        if (!battery->bat.dev)
                sysfs_add_battery(battery);
-#endif
-       return acpi_battery_get_state(battery);
+       result = acpi_battery_get_state(battery);
+       acpi_battery_quirks2(battery);
+       return result;
 }
 
 /* --------------------------------------------------------------------------
@@ -867,26 +909,20 @@ static void acpi_battery_remove_fs(struct acpi_device *device)
 static void acpi_battery_notify(struct acpi_device *device, u32 event)
 {
        struct acpi_battery *battery = acpi_driver_data(device);
-#ifdef CONFIG_ACPI_SYSFS_POWER
        struct device *old;
-#endif
 
        if (!battery)
                return;
-#ifdef CONFIG_ACPI_SYSFS_POWER
        old = battery->bat.dev;
-#endif
        acpi_battery_update(battery);
        acpi_bus_generate_proc_event(device, event,
                                     acpi_battery_present(battery));
        acpi_bus_generate_netlink_event(device->pnp.device_class,
                                        dev_name(&device->dev), event,
                                        acpi_battery_present(battery));
-#ifdef CONFIG_ACPI_SYSFS_POWER
        /* acpi_battery_update could remove power_supply object */
        if (old && battery->bat.dev)
                power_supply_changed(&battery->bat);
-#endif
 }
 
 static int acpi_battery_add(struct acpi_device *device)
@@ -934,9 +970,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
 #ifdef CONFIG_ACPI_PROCFS_POWER
        acpi_battery_remove_fs(device);
 #endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
        sysfs_remove_battery(battery);
-#endif
        mutex_destroy(&battery->lock);
        kfree(battery);
        return 0;
index 310e3b9749cbbacdabb3c03d288a6b42874aa41b..d68bd61072bb797e3e7f0413d6b97a9650f99222 100644 (file)
@@ -935,6 +935,12 @@ static int __init acpi_bus_init(void)
                goto error1;
        }
 
+       /*
+        * _PDC control method may load dynamic SSDT tables,
+        * and we need to install the table handler before that.
+        */
+       acpi_sysfs_init();
+
        acpi_early_processor_set_pdc();
 
        /*
@@ -1026,7 +1032,6 @@ static int __init acpi_init(void)
        acpi_scan_init();
        acpi_ec_init();
        acpi_power_init();
-       acpi_sysfs_init();
        acpi_debugfs_init();
        acpi_sleep_proc_init();
        acpi_wakeup_device_init();
index 1575a9b51f1d9e1dfb0e16efb65bf31f60bcb506..71ef9cd0735f2fadd8c76d67cee4029bc72b1ce1 100644 (file)
@@ -338,7 +338,8 @@ static int acpi_button_add(struct acpi_device *device)
 {
        struct acpi_button *button;
        struct input_dev *input;
-       char *hid, *name, *class;
+       const char *hid = acpi_device_hid(device);
+       char *name, *class;
        int error;
 
        button = kzalloc(sizeof(struct acpi_button), GFP_KERNEL);
@@ -353,7 +354,6 @@ static int acpi_button_add(struct acpi_device *device)
                goto err_free_button;
        }
 
-       hid = acpi_device_hid(device);
        name = acpi_device_name(device);
        class = acpi_device_class(device);
 
index 3fe29e992be8aa2cd324f02696388862319327e2..81514a4918cc11ee1313b234aec2daa6d6b0765b 100644 (file)
@@ -725,6 +725,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
                        complete_dock(ds);
                        dock_event(ds, event, DOCK_EVENT);
                        dock_lock(ds, 1);
+                       acpi_update_gpes();
                        break;
                }
                if (dock_present(ds) || dock_in_progress(ds))
@@ -929,7 +930,7 @@ static struct attribute_group dock_attribute_group = {
  * allocated and initialize a new dock station device.  Find all devices
  * that are on the dock station, and register for dock event notifications.
  */
-static int dock_add(acpi_handle handle)
+static int __init dock_add(acpi_handle handle)
 {
        int ret, id;
        struct dock_station ds, *dock_station;
@@ -1023,7 +1024,7 @@ static int dock_remove(struct dock_station *ds)
  *
  * This is called by acpi_walk_namespace to look for dock stations.
  */
-static acpi_status
+static __init acpi_status
 find_dock(acpi_handle handle, u32 lvl, void *context, void **rv)
 {
        if (is_dock(handle))
@@ -1032,7 +1033,7 @@ find_dock(acpi_handle handle, u32 lvl, void *context, void **rv)
        return AE_OK;
 }
 
-static acpi_status
+static __init acpi_status
 find_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
 {
        /* If bay is a dock, it's already handled */
index f31291ba94d092ec3a9cc9b6a51fc0383ee1d56a..372ff80b7b0c93f88f6f2ea9e100c0fdd2aaf6bf 100644 (file)
@@ -83,6 +83,11 @@ enum {
        EC_FLAGS_BLOCKED,               /* Transactions are blocked */
 };
 
+/* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
+static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
+module_param(ec_delay, uint, 0644);
+MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
+
 /* If we find an EC via the ECDT, we need to keep a ptr to its context */
 /* External interfaces use first EC only, so remember */
 typedef int (*acpi_ec_query_func) (void *data);
@@ -210,7 +215,7 @@ static int ec_poll(struct acpi_ec *ec)
        int repeat = 2; /* number of command restarts */
        while (repeat--) {
                unsigned long delay = jiffies +
-                       msecs_to_jiffies(ACPI_EC_DELAY);
+                       msecs_to_jiffies(ec_delay);
                do {
                        /* don't sleep with disabled interrupts */
                        if (EC_FLAGS_MSI || irqs_disabled()) {
@@ -265,7 +270,7 @@ static int ec_check_ibf0(struct acpi_ec *ec)
 
 static int ec_wait_ibf0(struct acpi_ec *ec)
 {
-       unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
+       unsigned long delay = jiffies + msecs_to_jiffies(ec_delay);
        /* interrupt wait manually if GPE mode is not active */
        while (time_before(jiffies, delay))
                if (wait_event_timeout(ec->wait, ec_check_ibf0(ec),
index d94d2953c9740f34675eeb576e00e74ee621bfd2..60049080c86985755109074a26a260e660ae7a72 100644 (file)
@@ -27,8 +27,6 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
 #include <asm/uaccess.h>
 #include <linux/thermal.h>
 #include <acpi/acpi_bus.h>
@@ -118,122 +116,6 @@ static struct thermal_cooling_device_ops fan_cooling_ops = {
        .set_cur_state = fan_set_cur_state,
 };
 
-/* --------------------------------------------------------------------------
-                              FS Interface (/proc)
-   -------------------------------------------------------------------------- */
-#ifdef CONFIG_ACPI_PROCFS
-
-static struct proc_dir_entry *acpi_fan_dir;
-
-static int acpi_fan_read_state(struct seq_file *seq, void *offset)
-{
-       struct acpi_device *device = seq->private;
-       int state = 0;
-
-
-       if (device) {
-               if (acpi_bus_get_power(device->handle, &state))
-                       seq_printf(seq, "status:                  ERROR\n");
-               else
-                       seq_printf(seq, "status:                  %s\n",
-                                  !state ? "on" : "off");
-       }
-       return 0;
-}
-
-static int acpi_fan_state_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_fan_read_state, PDE(inode)->data);
-}
-
-static ssize_t
-acpi_fan_write_state(struct file *file, const char __user * buffer,
-                    size_t count, loff_t * ppos)
-{
-       int result = 0;
-       struct seq_file *m = file->private_data;
-       struct acpi_device *device = m->private;
-       char state_string[3] = { '\0' };
-
-       if (count > sizeof(state_string) - 1)
-               return -EINVAL;
-
-       if (copy_from_user(state_string, buffer, count))
-               return -EFAULT;
-
-       state_string[count] = '\0';
-       if ((state_string[0] < '0') || (state_string[0] > '3'))
-               return -EINVAL;
-       if (state_string[1] == '\n')
-               state_string[1] = '\0';
-       if (state_string[1] != '\0')
-               return -EINVAL;
-
-       result = acpi_bus_set_power(device->handle,
-                                   simple_strtoul(state_string, NULL, 0));
-       if (result)
-               return result;
-
-       return count;
-}
-
-static const struct file_operations acpi_fan_state_ops = {
-       .open = acpi_fan_state_open_fs,
-       .read = seq_read,
-       .write = acpi_fan_write_state,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
-static int acpi_fan_add_fs(struct acpi_device *device)
-{
-       struct proc_dir_entry *entry = NULL;
-
-
-       if (!device)
-               return -EINVAL;
-
-       if (!acpi_device_dir(device)) {
-               acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
-                                                    acpi_fan_dir);
-               if (!acpi_device_dir(device))
-                       return -ENODEV;
-       }
-
-       /* 'status' [R/W] */
-       entry = proc_create_data(ACPI_FAN_FILE_STATE,
-                                S_IFREG | S_IRUGO | S_IWUSR,
-                                acpi_device_dir(device),
-                                &acpi_fan_state_ops,
-                                device);
-       if (!entry)
-               return -ENODEV;
-       return 0;
-}
-
-static int acpi_fan_remove_fs(struct acpi_device *device)
-{
-
-       if (acpi_device_dir(device)) {
-               remove_proc_entry(ACPI_FAN_FILE_STATE, acpi_device_dir(device));
-               remove_proc_entry(acpi_device_bid(device), acpi_fan_dir);
-               acpi_device_dir(device) = NULL;
-       }
-
-       return 0;
-}
-#else
-static int acpi_fan_add_fs(struct acpi_device *device)
-{
-       return 0;
-}
-
-static int acpi_fan_remove_fs(struct acpi_device *device)
-{
-       return 0;
-}
-#endif
 /* --------------------------------------------------------------------------
                                  Driver Interface
    -------------------------------------------------------------------------- */
@@ -284,10 +166,6 @@ static int acpi_fan_add(struct acpi_device *device)
                dev_err(&device->dev, "Failed to create sysfs link "
                        "'device'\n");
 
-       result = acpi_fan_add_fs(device);
-       if (result)
-               goto end;
-
        printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
               acpi_device_name(device), acpi_device_bid(device),
               !device->power.state ? "on" : "off");
@@ -303,7 +181,6 @@ static int acpi_fan_remove(struct acpi_device *device, int type)
        if (!device || !cdev)
                return -EINVAL;
 
-       acpi_fan_remove_fs(device);
        sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
        sysfs_remove_link(&cdev->device.kobj, "device");
        thermal_cooling_device_unregister(cdev);
@@ -347,19 +224,9 @@ static int __init acpi_fan_init(void)
 {
        int result = 0;
 
-#ifdef CONFIG_ACPI_PROCFS
-       acpi_fan_dir = proc_mkdir(ACPI_FAN_CLASS, acpi_root_dir);
-       if (!acpi_fan_dir)
-               return -ENODEV;
-#endif
-
        result = acpi_bus_register_driver(&acpi_fan_driver);
-       if (result < 0) {
-#ifdef CONFIG_ACPI_PROCFS
-               remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir);
-#endif
+       if (result < 0)
                return -ENODEV;
-       }
 
        return 0;
 }
@@ -369,10 +236,6 @@ static void __exit acpi_fan_exit(void)
 
        acpi_bus_unregister_driver(&acpi_fan_driver);
 
-#ifdef CONFIG_ACPI_PROCFS
-       remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir);
-#endif
-
        return;
 }
 
index 65b25a303b86e495d3f539e94d6a912ae3252325..966feddf6b1ba29457fde294223018ad23666e39 100644 (file)
@@ -95,8 +95,25 @@ struct acpi_res_list {
 static LIST_HEAD(resource_list_head);
 static DEFINE_SPINLOCK(acpi_res_lock);
 
+/*
+ * This list of permanent mappings is for memory that may be accessed from
+ * interrupt context, where we can't do the ioremap().
+ */
+struct acpi_ioremap {
+       struct list_head list;
+       void __iomem *virt;
+       acpi_physical_address phys;
+       acpi_size size;
+       struct kref ref;
+};
+
+static LIST_HEAD(acpi_ioremaps);
+static DEFINE_SPINLOCK(acpi_ioremap_lock);
+
 #define        OSI_STRING_LENGTH_MAX 64        /* arbitrary */
-static char osi_additional_string[OSI_STRING_LENGTH_MAX];
+static char osi_setup_string[OSI_STRING_LENGTH_MAX];
+
+static void __init acpi_osi_setup_late(void);
 
 /*
  * The story of _OSI(Linux)
@@ -138,6 +155,20 @@ static struct osi_linux {
        unsigned int    known:1;
 } osi_linux = { 0, 0, 0, 0};
 
+static u32 acpi_osi_handler(acpi_string interface, u32 supported)
+{
+       if (!strcmp("Linux", interface)) {
+
+               printk(KERN_NOTICE FW_BUG PREFIX
+                       "BIOS _OSI(Linux) query %s%s\n",
+                       osi_linux.enable ? "honored" : "ignored",
+                       osi_linux.cmdline ? " via cmdline" :
+                       osi_linux.dmi ? " via DMI" : "");
+       }
+
+       return supported;
+}
+
 static void __init acpi_request_region (struct acpi_generic_address *addr,
        unsigned int length, char *desc)
 {
@@ -185,36 +216,6 @@ static int __init acpi_reserve_resources(void)
 }
 device_initcall(acpi_reserve_resources);
 
-acpi_status __init acpi_os_initialize(void)
-{
-       return AE_OK;
-}
-
-acpi_status acpi_os_initialize1(void)
-{
-       kacpid_wq = create_workqueue("kacpid");
-       kacpi_notify_wq = create_workqueue("kacpi_notify");
-       kacpi_hotplug_wq = create_workqueue("kacpi_hotplug");
-       BUG_ON(!kacpid_wq);
-       BUG_ON(!kacpi_notify_wq);
-       BUG_ON(!kacpi_hotplug_wq);
-       return AE_OK;
-}
-
-acpi_status acpi_os_terminate(void)
-{
-       if (acpi_irq_handler) {
-               acpi_os_remove_interrupt_handler(acpi_irq_irq,
-                                                acpi_irq_handler);
-       }
-
-       destroy_workqueue(kacpid_wq);
-       destroy_workqueue(kacpi_notify_wq);
-       destroy_workqueue(kacpi_hotplug_wq);
-
-       return AE_OK;
-}
-
 void acpi_os_printf(const char *fmt, ...)
 {
        va_list args;
@@ -260,29 +261,135 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
        }
 }
 
+/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
+static struct acpi_ioremap *
+acpi_map_lookup(acpi_physical_address phys, acpi_size size)
+{
+       struct acpi_ioremap *map;
+
+       list_for_each_entry_rcu(map, &acpi_ioremaps, list)
+               if (map->phys <= phys &&
+                   phys + size <= map->phys + map->size)
+                       return map;
+
+       return NULL;
+}
+
+/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
+static void __iomem *
+acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
+{
+       struct acpi_ioremap *map;
+
+       map = acpi_map_lookup(phys, size);
+       if (map)
+               return map->virt + (phys - map->phys);
+
+       return NULL;
+}
+
+/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
+static struct acpi_ioremap *
+acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
+{
+       struct acpi_ioremap *map;
+
+       list_for_each_entry_rcu(map, &acpi_ioremaps, list)
+               if (map->virt <= virt &&
+                   virt + size <= map->virt + map->size)
+                       return map;
+
+       return NULL;
+}
+
 void __iomem *__init_refok
 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
 {
+       struct acpi_ioremap *map, *tmp_map;
+       unsigned long flags, pg_sz;
+       void __iomem *virt;
+       phys_addr_t pg_off;
+
        if (phys > ULONG_MAX) {
                printk(KERN_ERR PREFIX "Cannot map memory that high\n");
                return NULL;
        }
-       if (acpi_gbl_permanent_mmap)
-               /*
-               * ioremap checks to ensure this is in reserved space
-               */
-               return ioremap((unsigned long)phys, size);
-       else
+
+       if (!acpi_gbl_permanent_mmap)
                return __acpi_map_table((unsigned long)phys, size);
+
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
+       if (!map)
+               return NULL;
+
+       pg_off = round_down(phys, PAGE_SIZE);
+       pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
+       virt = ioremap(pg_off, pg_sz);
+       if (!virt) {
+               kfree(map);
+               return NULL;
+       }
+
+       INIT_LIST_HEAD(&map->list);
+       map->virt = virt;
+       map->phys = pg_off;
+       map->size = pg_sz;
+       kref_init(&map->ref);
+
+       spin_lock_irqsave(&acpi_ioremap_lock, flags);
+       /* Check if page has already been mapped. */
+       tmp_map = acpi_map_lookup(phys, size);
+       if (tmp_map) {
+               kref_get(&tmp_map->ref);
+               spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+               iounmap(map->virt);
+               kfree(map);
+               return tmp_map->virt + (phys - tmp_map->phys);
+       }
+       list_add_tail_rcu(&map->list, &acpi_ioremaps);
+       spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+
+       return map->virt + (phys - map->phys);
 }
 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
 
+static void acpi_kref_del_iomap(struct kref *ref)
+{
+       struct acpi_ioremap *map;
+
+       map = container_of(ref, struct acpi_ioremap, ref);
+       list_del_rcu(&map->list);
+}
+
 void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
 {
-       if (acpi_gbl_permanent_mmap)
-               iounmap(virt);
-       else
+       struct acpi_ioremap *map;
+       unsigned long flags;
+       int del;
+
+       if (!acpi_gbl_permanent_mmap) {
                __acpi_unmap_table(virt, size);
+               return;
+       }
+
+       spin_lock_irqsave(&acpi_ioremap_lock, flags);
+       map = acpi_map_lookup_virt(virt, size);
+       if (!map) {
+               spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+               printk(KERN_ERR PREFIX "%s: bad address %p\n", __func__, virt);
+               dump_stack();
+               return;
+       }
+
+       del = kref_put(&map->ref, acpi_kref_del_iomap);
+       spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+
+       if (!del)
+               return;
+
+       synchronize_rcu();
+       iounmap(map->virt);
+       kfree(map);
 }
 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
 
@@ -292,6 +399,44 @@ void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
                __acpi_unmap_table(virt, size);
 }
 
+int acpi_os_map_generic_address(struct acpi_generic_address *addr)
+{
+       void __iomem *virt;
+
+       if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+               return 0;
+
+       if (!addr->address || !addr->bit_width)
+               return -EINVAL;
+
+       virt = acpi_os_map_memory(addr->address, addr->bit_width / 8);
+       if (!virt)
+               return -EIO;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_os_map_generic_address);
+
+void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
+{
+       void __iomem *virt;
+       unsigned long flags;
+       acpi_size size = addr->bit_width / 8;
+
+       if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+               return;
+
+       if (!addr->address || !addr->bit_width)
+               return;
+
+       spin_lock_irqsave(&acpi_ioremap_lock, flags);
+       virt = acpi_map_vaddr_lookup(addr->address, size);
+       spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
+
+       acpi_os_unmap_memory(virt, size);
+}
+EXPORT_SYMBOL_GPL(acpi_os_unmap_generic_address);
+
 #ifdef ACPI_FUTURE_USAGE
 acpi_status
 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
@@ -495,8 +640,15 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
 {
        u32 dummy;
        void __iomem *virt_addr;
-
-       virt_addr = ioremap(phys_addr, width);
+       int size = width / 8, unmap = 0;
+
+       rcu_read_lock();
+       virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
+       rcu_read_unlock();
+       if (!virt_addr) {
+               virt_addr = ioremap(phys_addr, size);
+               unmap = 1;
+       }
        if (!value)
                value = &dummy;
 
@@ -514,7 +666,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
                BUG();
        }
 
-       iounmap(virt_addr);
+       if (unmap)
+               iounmap(virt_addr);
 
        return AE_OK;
 }
@@ -523,8 +676,15 @@ acpi_status
 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
 {
        void __iomem *virt_addr;
-
-       virt_addr = ioremap(phys_addr, width);
+       int size = width / 8, unmap = 0;
+
+       rcu_read_lock();
+       virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
+       rcu_read_unlock();
+       if (!virt_addr) {
+               virt_addr = ioremap(phys_addr, size);
+               unmap = 1;
+       }
 
        switch (width) {
        case 8:
@@ -540,16 +700,18 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
                BUG();
        }
 
-       iounmap(virt_addr);
+       if (unmap)
+               iounmap(virt_addr);
 
        return AE_OK;
 }
 
 acpi_status
 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
-                              u32 *value, u32 width)
+                              u64 *value, u32 width)
 {
        int result, size;
+       u32 value32;
 
        if (!value)
                return AE_BAD_PARAMETER;
@@ -570,7 +732,8 @@ acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
 
        result = raw_pci_read(pci_id->segment, pci_id->bus,
                                PCI_DEVFN(pci_id->device, pci_id->function),
-                               reg, size, value);
+                               reg, size, &value32);
+       *value = value32;
 
        return (result ? AE_ERROR : AE_OK);
 }
@@ -602,74 +765,6 @@ acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
        return (result ? AE_ERROR : AE_OK);
 }
 
-/* TODO: Change code to take advantage of driver model more */
-static void acpi_os_derive_pci_id_2(acpi_handle rhandle,       /* upper bound  */
-                                   acpi_handle chandle,        /* current node */
-                                   struct acpi_pci_id **id,
-                                   int *is_bridge, u8 * bus_number)
-{
-       acpi_handle handle;
-       struct acpi_pci_id *pci_id = *id;
-       acpi_status status;
-       unsigned long long temp;
-       acpi_object_type type;
-
-       acpi_get_parent(chandle, &handle);
-       if (handle != rhandle) {
-               acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
-                                       bus_number);
-
-               status = acpi_get_type(handle, &type);
-               if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
-                       return;
-
-               status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
-                                         &temp);
-               if (ACPI_SUCCESS(status)) {
-                       u32 val;
-                       pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
-                       pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
-
-                       if (*is_bridge)
-                               pci_id->bus = *bus_number;
-
-                       /* any nicer way to get bus number of bridge ? */
-                       status =
-                           acpi_os_read_pci_configuration(pci_id, 0x0e, &val,
-                                                          8);
-                       if (ACPI_SUCCESS(status)
-                           && ((val & 0x7f) == 1 || (val & 0x7f) == 2)) {
-                               status =
-                                   acpi_os_read_pci_configuration(pci_id, 0x18,
-                                                                  &val, 8);
-                               if (!ACPI_SUCCESS(status)) {
-                                       /* Certainly broken...  FIX ME */
-                                       return;
-                               }
-                               *is_bridge = 1;
-                               pci_id->bus = val;
-                               status =
-                                   acpi_os_read_pci_configuration(pci_id, 0x19,
-                                                                  &val, 8);
-                               if (ACPI_SUCCESS(status)) {
-                                       *bus_number = val;
-                               }
-                       } else
-                               *is_bridge = 0;
-               }
-       }
-}
-
-void acpi_os_derive_pci_id(acpi_handle rhandle,        /* upper bound  */
-                          acpi_handle chandle, /* current node */
-                          struct acpi_pci_id **id)
-{
-       int is_bridge = 1;
-       u8 bus_number = (*id)->bus;
-
-       acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
-}
-
 static void acpi_os_execute_deferred(struct work_struct *work)
 {
        struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
@@ -779,16 +874,6 @@ void acpi_os_wait_events_complete(void *context)
 
 EXPORT_SYMBOL(acpi_os_wait_events_complete);
 
-/*
- * Allocate the memory for a spinlock and initialize it.
- */
-acpi_status acpi_os_create_lock(acpi_spinlock * handle)
-{
-       spin_lock_init(*handle);
-
-       return AE_OK;
-}
-
 /*
  * Deallocate the memory for a spinlock.
  */
@@ -977,6 +1062,12 @@ static void __init set_osi_linux(unsigned int enable)
                printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
                        enable ? "Add": "Delet");
        }
+
+       if (osi_linux.enable)
+               acpi_osi_setup("Linux");
+       else
+               acpi_osi_setup("!Linux");
+
        return;
 }
 
@@ -1011,21 +1102,33 @@ void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
  * string starting with '!' disables that string
  * otherwise string is added to list, augmenting built-in strings
  */
-int __init acpi_osi_setup(char *str)
+static void __init acpi_osi_setup_late(void)
 {
-       if (str == NULL || *str == '\0') {
-               printk(KERN_INFO PREFIX "_OSI method disabled\n");
-               acpi_gbl_create_osi_method = FALSE;
-       } else if (!strcmp("!Linux", str)) {
+       char *str = osi_setup_string;
+
+       if (*str == '\0')
+               return;
+
+       if (!strcmp("!Linux", str)) {
                acpi_cmdline_osi_linux(0);      /* !enable */
        } else if (*str == '!') {
-               if (acpi_osi_invalidate(++str) == AE_OK)
+               if (acpi_remove_interface(++str) == AE_OK)
                        printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
        } else if (!strcmp("Linux", str)) {
                acpi_cmdline_osi_linux(1);      /* enable */
-       } else if (*osi_additional_string == '\0') {
-               strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
-               printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
+       } else {
+               if (acpi_install_interface(str) == AE_OK)
+                       printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
+       }
+}
+
+int __init acpi_osi_setup(char *str)
+{
+       if (str == NULL || *str == '\0') {
+               printk(KERN_INFO PREFIX "_OSI method disabled\n");
+               acpi_gbl_create_osi_method = FALSE;
+       } else {
+               strncpy(osi_setup_string, str, OSI_STRING_LENGTH_MAX);
        }
 
        return 1;
@@ -1152,21 +1255,6 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
 }
 EXPORT_SYMBOL(acpi_check_region);
 
-int acpi_check_mem_region(resource_size_t start, resource_size_t n,
-                     const char *name)
-{
-       struct resource res = {
-               .start = start,
-               .end   = start + n - 1,
-               .name  = name,
-               .flags = IORESOURCE_MEM,
-       };
-
-       return acpi_check_resource_conflict(&res);
-
-}
-EXPORT_SYMBOL(acpi_check_mem_region);
-
 /*
  * Let drivers know whether the resource checks are effective
  */
@@ -1282,38 +1370,6 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
        return (AE_OK);
 }
 
-/******************************************************************************
- *
- * FUNCTION:    acpi_os_validate_interface
- *
- * PARAMETERS:  interface           - Requested interface to be validated
- *
- * RETURN:      AE_OK if interface is supported, AE_SUPPORT otherwise
- *
- * DESCRIPTION: Match an interface string to the interfaces supported by the
- *              host. Strings originate from an AML call to the _OSI method.
- *
- *****************************************************************************/
-
-acpi_status
-acpi_os_validate_interface (char *interface)
-{
-       if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
-               return AE_OK;
-       if (!strcmp("Linux", interface)) {
-
-               printk(KERN_NOTICE PREFIX
-                       "BIOS _OSI(Linux) query %s%s\n",
-                       osi_linux.enable ? "honored" : "ignored",
-                       osi_linux.cmdline ? " via cmdline" :
-                       osi_linux.dmi ? " via DMI" : "");
-
-               if (osi_linux.enable)
-                       return AE_OK;
-       }
-       return AE_SUPPORT;
-}
-
 static inline int acpi_res_list_add(struct acpi_res_list *res)
 {
        struct acpi_res_list *res_list_elem;
@@ -1462,5 +1518,46 @@ acpi_os_validate_address (
        }
        return AE_OK;
 }
-
 #endif
+
+acpi_status __init acpi_os_initialize(void)
+{
+       acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
+       acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
+       acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
+       acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
+
+       return AE_OK;
+}
+
+acpi_status acpi_os_initialize1(void)
+{
+       kacpid_wq = create_workqueue("kacpid");
+       kacpi_notify_wq = create_workqueue("kacpi_notify");
+       kacpi_hotplug_wq = create_workqueue("kacpi_hotplug");
+       BUG_ON(!kacpid_wq);
+       BUG_ON(!kacpi_notify_wq);
+       BUG_ON(!kacpi_hotplug_wq);
+       acpi_install_interface_handler(acpi_osi_handler);
+       acpi_osi_setup_late();
+       return AE_OK;
+}
+
+acpi_status acpi_os_terminate(void)
+{
+       if (acpi_irq_handler) {
+               acpi_os_remove_interrupt_handler(acpi_irq_irq,
+                                                acpi_irq_handler);
+       }
+
+       acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
+       acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
+       acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
+       acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
+
+       destroy_workqueue(kacpid_wq);
+       destroy_workqueue(kacpi_notify_wq);
+       destroy_workqueue(kacpi_hotplug_wq);
+
+       return AE_OK;
+}
index e4804fb05e233d8dae18aacce47835a06dca16ec..f907cfbfa13c89cc0d173161b14e4f176d62296f 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
-#include <linux/proc_fs.h>
 #include <linux/spinlock.h>
 #include <linux/pm.h>
 #include <linux/pci.h>
index 8d47a5846aebc192479d943b0a4826030b5a4344..9ff80a7e9f6ada0aef1c57bea5e3f6b142037de5 100644 (file)
@@ -34,7 +34,6 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
-#include <linux/proc_fs.h>
 #include <linux/spinlock.h>
 #include <linux/pm.h>
 #include <linux/pci.h>
index 3ba8d1f44a73ed3b234e810f5316f6531e4d6718..96668ad096227add7ca952fd66cb9cf8c80bf20f 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
-#include <linux/proc_fs.h>
 #include <linux/spinlock.h>
 #include <linux/pm.h>
 #include <linux/pm_runtime.h>
index 844c155aeb0faa558b789fa7a3b680971df8f536..67dedeed144cf82c8e13859e7222b976d0eebb2e 100644 (file)
@@ -80,18 +80,13 @@ static struct acpi_driver acpi_power_driver = {
                },
 };
 
-struct acpi_power_reference {
-       struct list_head node;
-       struct acpi_device *device;
-};
-
 struct acpi_power_resource {
        struct acpi_device * device;
        acpi_bus_id name;
        u32 system_level;
        u32 order;
+       unsigned int ref_count;
        struct mutex resource_lock;
-       struct list_head reference;
 };
 
 static struct list_head acpi_power_resource_list;
@@ -184,101 +179,89 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
        return result;
 }
 
-static int acpi_power_on(acpi_handle handle, struct acpi_device *dev)
+static int __acpi_power_on(struct acpi_power_resource *resource)
 {
-       int result = 0;
-       int found = 0;
        acpi_status status = AE_OK;
-       struct acpi_power_resource *resource = NULL;
-       struct list_head *node, *next;
-       struct acpi_power_reference *ref;
 
+       status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL);
+       if (ACPI_FAILURE(status))
+               return -ENODEV;
+
+       /* Update the power resource's _device_ power state */
+       resource->device->power.state = ACPI_STATE_D0;
+
+       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n",
+                         resource->name));
+
+       return 0;
+}
+
+static int acpi_power_on(acpi_handle handle)
+{
+       int result = 0;
+       struct acpi_power_resource *resource = NULL;
 
        result = acpi_power_get_context(handle, &resource);
        if (result)
                return result;
 
        mutex_lock(&resource->resource_lock);
-       list_for_each_safe(node, next, &resource->reference) {
-               ref = container_of(node, struct acpi_power_reference, node);
-               if (dev->handle == ref->device->handle) {
-                       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] already referenced by resource [%s]\n",
-                                 dev->pnp.bus_id, resource->name));
-                       found = 1;
-                       break;
-               }
-       }
 
-       if (!found) {
-               ref = kmalloc(sizeof (struct acpi_power_reference),
-                   irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
-               if (!ref) {
-                       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "kmalloc() failed\n"));
-                       mutex_unlock(&resource->resource_lock);
-                       return -ENOMEM;
-               }
-               list_add_tail(&ref->node, &resource->reference);
-               ref->device = dev;
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] added to resource [%s] references\n",
-                         dev->pnp.bus_id, resource->name));
+       if (resource->ref_count++) {
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+                                 "Power resource [%s] already on",
+                                 resource->name));
+       } else {
+               result = __acpi_power_on(resource);
        }
-       mutex_unlock(&resource->resource_lock);
 
-       status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL);
-       if (ACPI_FAILURE(status))
-               return -ENODEV;
-
-       /* Update the power resource's _device_ power state */
-       resource->device->power.state = ACPI_STATE_D0;
+       mutex_unlock(&resource->resource_lock);
 
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] turned on\n",
-                         resource->name));
        return 0;
 }
 
-static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev)
+static int acpi_power_off_device(acpi_handle handle)
 {
        int result = 0;
        acpi_status status = AE_OK;
        struct acpi_power_resource *resource = NULL;
-       struct list_head *node, *next;
-       struct acpi_power_reference *ref;
 
        result = acpi_power_get_context(handle, &resource);
        if (result)
                return result;
 
        mutex_lock(&resource->resource_lock);
-       list_for_each_safe(node, next, &resource->reference) {
-               ref = container_of(node, struct acpi_power_reference, node);
-               if (dev->handle == ref->device->handle) {
-                       list_del(&ref->node);
-                       kfree(ref);
-                       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] removed from resource [%s] references\n",
-                           dev->pnp.bus_id, resource->name));
-                       break;
-               }
+
+       if (!resource->ref_count) {
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+                                 "Power resource [%s] already off",
+                                 resource->name));
+               goto unlock;
        }
 
-       if (!list_empty(&resource->reference)) {
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Cannot turn resource [%s] off - resource is in use\n",
-                   resource->name));
-               mutex_unlock(&resource->resource_lock);
-               return 0;
+       if (--resource->ref_count) {
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+                                 "Power resource [%s] still in use\n",
+                                 resource->name));
+               goto unlock;
        }
-       mutex_unlock(&resource->resource_lock);
 
        status = acpi_evaluate_object(resource->device->handle, "_OFF", NULL, NULL);
-       if (ACPI_FAILURE(status))
-               return -ENODEV;
+       if (ACPI_FAILURE(status)) {
+               result = -ENODEV;
+       } else {
+               /* Update the power resource's _device_ power state */
+               resource->device->power.state = ACPI_STATE_D3;
 
-       /* Update the power resource's _device_ power state */
-       resource->device->power.state = ACPI_STATE_D3;
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+                                 "Power resource [%s] turned off\n",
+                                 resource->name));
+       }
 
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] turned off\n",
-                         resource->name));
+ unlock:
+       mutex_unlock(&resource->resource_lock);
 
-       return 0;
+       return result;
 }
 
 /**
@@ -364,7 +347,7 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
 
        /* Open power resource */
        for (i = 0; i < dev->wakeup.resources.count; i++) {
-               int ret = acpi_power_on(dev->wakeup.resources.handles[i], dev);
+               int ret = acpi_power_on(dev->wakeup.resources.handles[i]);
                if (ret) {
                        printk(KERN_ERR PREFIX "Transition power state\n");
                        dev->wakeup.flags.valid = 0;
@@ -420,7 +403,7 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
        /* Close power resource */
        for (i = 0; i < dev->wakeup.resources.count; i++) {
                int ret = acpi_power_off_device(
-                               dev->wakeup.resources.handles[i], dev);
+                               dev->wakeup.resources.handles[i]);
                if (ret) {
                        printk(KERN_ERR PREFIX "Transition power state\n");
                        dev->wakeup.flags.valid = 0;
@@ -500,7 +483,7 @@ int acpi_power_transition(struct acpi_device *device, int state)
         * (e.g. so the device doesn't lose power while transitioning).
         */
        for (i = 0; i < tl->count; i++) {
-               result = acpi_power_on(tl->handles[i], device);
+               result = acpi_power_on(tl->handles[i]);
                if (result)
                        goto end;
        }
@@ -513,7 +496,7 @@ int acpi_power_transition(struct acpi_device *device, int state)
         * Then we dereference all power resources used in the current list.
         */
        for (i = 0; i < cl->count; i++) {
-               result = acpi_power_off_device(cl->handles[i], device);
+               result = acpi_power_off_device(cl->handles[i]);
                if (result)
                        goto end;
        }
@@ -551,7 +534,6 @@ static int acpi_power_add(struct acpi_device *device)
 
        resource->device = device;
        mutex_init(&resource->resource_lock);
-       INIT_LIST_HEAD(&resource->reference);
        strcpy(resource->name, device->pnp.bus_id);
        strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
        strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
@@ -594,22 +576,14 @@ static int acpi_power_add(struct acpi_device *device)
 
 static int acpi_power_remove(struct acpi_device *device, int type)
 {
-       struct acpi_power_resource *resource = NULL;
-       struct list_head *node, *next;
+       struct acpi_power_resource *resource;
 
-
-       if (!device || !acpi_driver_data(device))
+       if (!device)
                return -EINVAL;
 
        resource = acpi_driver_data(device);
-
-       mutex_lock(&resource->resource_lock);
-       list_for_each_safe(node, next, &resource->reference) {
-               struct acpi_power_reference *ref = container_of(node, struct acpi_power_reference, node);
-               list_del(&ref->node);
-               kfree(ref);
-       }
-       mutex_unlock(&resource->resource_lock);
+       if (!resource)
+               return -EINVAL;
 
        kfree(resource);
 
@@ -619,29 +593,28 @@ static int acpi_power_remove(struct acpi_device *device, int type)
 static int acpi_power_resume(struct acpi_device *device)
 {
        int result = 0, state;
-       struct acpi_power_resource *resource = NULL;
-       struct acpi_power_reference *ref;
+       struct acpi_power_resource *resource;
 
-       if (!device || !acpi_driver_data(device))
+       if (!device)
                return -EINVAL;
 
        resource = acpi_driver_data(device);
+       if (!resource)
+               return -EINVAL;
+
+       mutex_lock(&resource->resource_lock);
 
        result = acpi_power_get_state(device->handle, &state);
        if (result)
-               return result;
+               goto unlock;
 
-       mutex_lock(&resource->resource_lock);
-       if (state == ACPI_POWER_RESOURCE_STATE_OFF &&
-           !list_empty(&resource->reference)) {
-               ref = container_of(resource->reference.next, struct acpi_power_reference, node);
-               mutex_unlock(&resource->resource_lock);
-               result = acpi_power_on(device->handle, ref->device);
-               return result;
-       }
+       if (state == ACPI_POWER_RESOURCE_STATE_OFF && resource->ref_count)
+               result = __acpi_power_on(resource);
 
+ unlock:
        mutex_unlock(&resource->resource_lock);
-       return 0;
+
+       return result;
 }
 
 int __init acpi_power_init(void)
index 347eb21b235302d44d0c5e2d768ce3a8870142f2..85e48047d7b0fd8f549fca988f96ed0f1770a9ae 100644 (file)
 #include <linux/pm.h>
 #include <linux/cpufreq.h>
 #include <linux/cpu.h>
+#ifdef CONFIG_ACPI_PROCFS
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
+#endif
 #include <linux/dmi.h>
 #include <linux/moduleparam.h>
 #include <linux/cpuidle.h>
@@ -244,6 +246,7 @@ static int acpi_processor_errata(struct acpi_processor *pr)
        return result;
 }
 
+#ifdef CONFIG_ACPI_PROCFS
 static struct proc_dir_entry *acpi_processor_dir = NULL;
 
 static int __cpuinit acpi_processor_add_fs(struct acpi_device *device)
@@ -280,7 +283,16 @@ static int acpi_processor_remove_fs(struct acpi_device *device)
 
        return 0;
 }
-
+#else
+static inline int acpi_processor_add_fs(struct acpi_device *device)
+{
+       return 0;
+}
+static inline int acpi_processor_remove_fs(struct acpi_device *device)
+{
+       return 0;
+}
+#endif
 /* --------------------------------------------------------------------------
                                  Driver Interface
    -------------------------------------------------------------------------- */
@@ -842,9 +854,11 @@ static int __init acpi_processor_init(void)
 
        memset(&errata, 0, sizeof(errata));
 
+#ifdef CONFIG_ACPI_PROCFS
        acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
        if (!acpi_processor_dir)
                return -ENOMEM;
+#endif
 
        if (!cpuidle_register_driver(&acpi_idle_driver)) {
                printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
@@ -871,7 +885,9 @@ static int __init acpi_processor_init(void)
 out_cpuidle:
        cpuidle_unregister_driver(&acpi_idle_driver);
 
+#ifdef CONFIG_ACPI_PROCFS
        remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
+#endif
 
        return result;
 }
@@ -891,7 +907,9 @@ static void __exit acpi_processor_exit(void)
 
        cpuidle_unregister_driver(&acpi_idle_driver);
 
+#ifdef CONFIG_ACPI_PROCFS
        remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
+#endif
 
        return;
 }
@@ -899,6 +917,4 @@ static void __exit acpi_processor_exit(void)
 module_init(acpi_processor_init);
 module_exit(acpi_processor_exit);
 
-EXPORT_SYMBOL(acpi_processor_set_thermal_limit);
-
 MODULE_ALIAS("processor");
index f4428e82b352940f7b9f8687c3c242b213d48421..dcb38f8ddfda09142f5962cfcfe38b9aa9fc5570 100644 (file)
@@ -64,7 +64,6 @@
 #define ACPI_PROCESSOR_CLASS            "processor"
 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
 ACPI_MODULE_NAME("processor_idle");
-#define ACPI_PROCESSOR_FILE_POWER      "power"
 #define PM_TIMER_TICK_NS               (1000000000ULL/PM_TIMER_FREQUENCY)
 #define C2_OVERHEAD                    1       /* 1us */
 #define C3_OVERHEAD                    1       /* 1us */
@@ -1013,7 +1012,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
                strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
                state->exit_latency = cx->latency;
                state->target_residency = cx->latency * latency_factor;
-               state->power_usage = cx->power;
 
                state->flags = 0;
                switch (cx->type) {
index 953b25fb986968ad47be94d0b8ea000347e4468e..fde49b9b1d99ed04eaf4192473ff7ce3bb913fd6 100644 (file)
 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
 ACPI_MODULE_NAME("processor_thermal");
 
-/* --------------------------------------------------------------------------
-                                 Limit Interface
-   -------------------------------------------------------------------------- */
-static int acpi_processor_apply_limit(struct acpi_processor *pr)
-{
-       int result = 0;
-       u16 px = 0;
-       u16 tx = 0;
-
-
-       if (!pr)
-               return -EINVAL;
-
-       if (!pr->flags.limit)
-               return -ENODEV;
-
-       if (pr->flags.throttling) {
-               if (pr->limit.user.tx > tx)
-                       tx = pr->limit.user.tx;
-               if (pr->limit.thermal.tx > tx)
-                       tx = pr->limit.thermal.tx;
-
-               result = acpi_processor_set_throttling(pr, tx, false);
-               if (result)
-                       goto end;
-       }
-
-       pr->limit.state.px = px;
-       pr->limit.state.tx = tx;
-
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                         "Processor [%d] limit set to (P%d:T%d)\n", pr->id,
-                         pr->limit.state.px, pr->limit.state.tx));
-
-      end:
-       if (result)
-               printk(KERN_ERR PREFIX "Unable to set limit\n");
-
-       return result;
-}
-
 #ifdef CONFIG_CPU_FREQ
 
 /* If a passive cooling situation is detected, primarily CPUfreq is used, as it
@@ -107,36 +66,6 @@ static int cpu_has_cpufreq(unsigned int cpu)
        return 1;
 }
 
-static int acpi_thermal_cpufreq_increase(unsigned int cpu)
-{
-       if (!cpu_has_cpufreq(cpu))
-               return -ENODEV;
-
-       if (per_cpu(cpufreq_thermal_reduction_pctg, cpu) <
-               CPUFREQ_THERMAL_MAX_STEP) {
-               per_cpu(cpufreq_thermal_reduction_pctg, cpu)++;
-               cpufreq_update_policy(cpu);
-               return 0;
-       }
-
-       return -ERANGE;
-}
-
-static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
-{
-       if (!cpu_has_cpufreq(cpu))
-               return -ENODEV;
-
-       if (per_cpu(cpufreq_thermal_reduction_pctg, cpu) >
-               (CPUFREQ_THERMAL_MIN_STEP + 1))
-               per_cpu(cpufreq_thermal_reduction_pctg, cpu)--;
-       else
-               per_cpu(cpufreq_thermal_reduction_pctg, cpu) = 0;
-       cpufreq_update_policy(cpu);
-       /* We reached max freq again and can leave passive mode */
-       return !per_cpu(cpufreq_thermal_reduction_pctg, cpu);
-}
-
 static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
                                         unsigned long event, void *data)
 {
@@ -238,113 +167,6 @@ static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
 
 #endif
 
-int acpi_processor_set_thermal_limit(acpi_handle handle, int type)
-{
-       int result = 0;
-       struct acpi_processor *pr = NULL;
-       struct acpi_device *device = NULL;
-       int tx = 0, max_tx_px = 0;
-
-
-       if ((type < ACPI_PROCESSOR_LIMIT_NONE)
-           || (type > ACPI_PROCESSOR_LIMIT_DECREMENT))
-               return -EINVAL;
-
-       result = acpi_bus_get_device(handle, &device);
-       if (result)
-               return result;
-
-       pr = acpi_driver_data(device);
-       if (!pr)
-               return -ENODEV;
-
-       /* Thermal limits are always relative to the current Px/Tx state. */
-       if (pr->flags.throttling)
-               pr->limit.thermal.tx = pr->throttling.state;
-
-       /*
-        * Our default policy is to only use throttling at the lowest
-        * performance state.
-        */
-
-       tx = pr->limit.thermal.tx;
-
-       switch (type) {
-
-       case ACPI_PROCESSOR_LIMIT_NONE:
-               do {
-                       result = acpi_thermal_cpufreq_decrease(pr->id);
-               } while (!result);
-               tx = 0;
-               break;
-
-       case ACPI_PROCESSOR_LIMIT_INCREMENT:
-               /* if going up: P-states first, T-states later */
-
-               result = acpi_thermal_cpufreq_increase(pr->id);
-               if (!result)
-                       goto end;
-               else if (result == -ERANGE)
-                       ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                         "At maximum performance state\n"));
-
-               if (pr->flags.throttling) {
-                       if (tx == (pr->throttling.state_count - 1))
-                               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                                 "At maximum throttling state\n"));
-                       else
-                               tx++;
-               }
-               break;
-
-       case ACPI_PROCESSOR_LIMIT_DECREMENT:
-               /* if going down: T-states first, P-states later */
-
-               if (pr->flags.throttling) {
-                       if (tx == 0) {
-                               max_tx_px = 1;
-                               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                                 "At minimum throttling state\n"));
-                       } else {
-                               tx--;
-                               goto end;
-                       }
-               }
-
-               result = acpi_thermal_cpufreq_decrease(pr->id);
-               if (result) {
-                       /*
-                        * We only could get -ERANGE, 1 or 0.
-                        * In the first two cases we reached max freq again.
-                        */
-                       ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                         "At minimum performance state\n"));
-                       max_tx_px = 1;
-               } else
-                       max_tx_px = 0;
-
-               break;
-       }
-
-      end:
-       if (pr->flags.throttling) {
-               pr->limit.thermal.px = 0;
-               pr->limit.thermal.tx = tx;
-
-               result = acpi_processor_apply_limit(pr);
-               if (result)
-                       printk(KERN_ERR PREFIX "Unable to set thermal limit\n");
-
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Thermal limit now (P%d:T%d)\n",
-                                 pr->limit.thermal.px, pr->limit.thermal.tx));
-       } else
-               result = 0;
-       if (max_tx_px)
-               return 1;
-       else
-               return result;
-}
-
 int acpi_processor_get_limit_info(struct acpi_processor *pr)
 {
 
index 730863855ed54ebdb49c35cca733bf3a92596c0d..ff3632717c5140bd7c8692e0055b74681ece037c 100644 (file)
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/cpufreq.h>
+#ifdef CONFIG_ACPI_PROCFS
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
+#endif
 
 #include <asm/io.h>
 #include <asm/uaccess.h>
@@ -1214,6 +1216,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
        return result;
 }
 
+#ifdef CONFIG_ACPI_PROCFS
 /* proc interface */
 static int acpi_processor_throttling_seq_show(struct seq_file *seq,
                                              void *offset)
@@ -1322,3 +1325,4 @@ const struct file_operations acpi_processor_throttling_fops = {
        .llseek = seq_lseek,
        .release = single_release,
 };
+#endif
index 4ff76e8174ebf716db53b1d5f2d3bbecd82ad332..e5dbedb16bbfdde815170acbea225b69c65224fd 100644 (file)
 #include <linux/timer.h>
 #include <linux/jiffies.h>
 #include <linux/delay.h>
-
-#ifdef CONFIG_ACPI_SYSFS_POWER
 #include <linux/power_supply.h>
-#endif
 
 #include "sbshc.h"
 
@@ -85,9 +82,7 @@ static const struct acpi_device_id sbs_device_ids[] = {
 MODULE_DEVICE_TABLE(acpi, sbs_device_ids);
 
 struct acpi_battery {
-#ifdef CONFIG_ACPI_SYSFS_POWER
        struct power_supply bat;
-#endif
        struct acpi_sbs *sbs;
 #ifdef CONFIG_ACPI_PROCFS_POWER
        struct proc_dir_entry *proc_entry;
@@ -120,9 +115,7 @@ struct acpi_battery {
 #define to_acpi_battery(x) container_of(x, struct acpi_battery, bat);
 
 struct acpi_sbs {
-#ifdef CONFIG_ACPI_SYSFS_POWER
        struct power_supply charger;
-#endif
        struct acpi_device *device;
        struct acpi_smb_hc *hc;
        struct mutex lock;
@@ -166,7 +159,6 @@ static inline int acpi_battery_scale(struct acpi_battery *battery)
            acpi_battery_ipscale(battery);
 }
 
-#ifdef CONFIG_ACPI_SYSFS_POWER
 static int sbs_get_ac_property(struct power_supply *psy,
                               enum power_supply_property psp,
                               union power_supply_propval *val)
@@ -313,7 +305,6 @@ static enum power_supply_property sbs_energy_battery_props[] = {
        POWER_SUPPLY_PROP_MANUFACTURER,
 };
 
-#endif
 
 /* --------------------------------------------------------------------------
                             Smart Battery System Management
@@ -449,7 +440,6 @@ static int acpi_ac_get_present(struct acpi_sbs *sbs)
        return result;
 }
 
-#ifdef CONFIG_ACPI_SYSFS_POWER
 static ssize_t acpi_battery_alarm_show(struct device *dev,
                                        struct device_attribute *attr,
                                        char *buf)
@@ -479,7 +469,6 @@ static struct device_attribute alarm_attr = {
        .show = acpi_battery_alarm_show,
        .store = acpi_battery_alarm_store,
 };
-#endif
 
 /* --------------------------------------------------------------------------
                               FS Interface (/proc/acpi)
@@ -798,7 +787,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
                        &acpi_battery_state_fops, &acpi_battery_alarm_fops,
                        battery);
 #endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
        battery->bat.name = battery->name;
        battery->bat.type = POWER_SUPPLY_TYPE_BATTERY;
        if (!acpi_battery_mode(battery)) {
@@ -819,7 +807,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
                goto end;
        battery->have_sysfs_alarm = 1;
       end:
-#endif
        printk(KERN_INFO PREFIX "%s [%s]: Battery Slot [%s] (battery %s)\n",
               ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
               battery->name, battery->present ? "present" : "absent");
@@ -828,17 +815,13 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
 
 static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
 {
-#if defined(CONFIG_ACPI_SYSFS_POWER) || defined(CONFIG_ACPI_PROCFS_POWER)
        struct acpi_battery *battery = &sbs->battery[id];
-#endif
 
-#ifdef CONFIG_ACPI_SYSFS_POWER
        if (battery->bat.dev) {
                if (battery->have_sysfs_alarm)
                        device_remove_file(battery->bat.dev, &alarm_attr);
                power_supply_unregister(&battery->bat);
        }
-#endif
 #ifdef CONFIG_ACPI_PROCFS_POWER
        if (battery->proc_entry)
                acpi_sbs_remove_fs(&battery->proc_entry, acpi_battery_dir);
@@ -859,14 +842,12 @@ static int acpi_charger_add(struct acpi_sbs *sbs)
        if (result)
                goto end;
 #endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
        sbs->charger.name = "sbs-charger";
        sbs->charger.type = POWER_SUPPLY_TYPE_MAINS;
        sbs->charger.properties = sbs_ac_props;
        sbs->charger.num_properties = ARRAY_SIZE(sbs_ac_props);
        sbs->charger.get_property = sbs_get_ac_property;
        power_supply_register(&sbs->device->dev, &sbs->charger);
-#endif
        printk(KERN_INFO PREFIX "%s [%s]: AC Adapter [%s] (%s)\n",
               ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
               ACPI_AC_DIR_NAME, sbs->charger_present ? "on-line" : "off-line");
@@ -876,10 +857,8 @@ static int acpi_charger_add(struct acpi_sbs *sbs)
 
 static void acpi_charger_remove(struct acpi_sbs *sbs)
 {
-#ifdef CONFIG_ACPI_SYSFS_POWER
        if (sbs->charger.dev)
                power_supply_unregister(&sbs->charger);
-#endif
 #ifdef CONFIG_ACPI_PROCFS_POWER
        if (sbs->charger_entry)
                acpi_sbs_remove_fs(&sbs->charger_entry, acpi_ac_dir);
@@ -900,9 +879,7 @@ static void acpi_sbs_callback(void *context)
                                              ACPI_SBS_NOTIFY_STATUS,
                                              sbs->charger_present);
 #endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
                kobject_uevent(&sbs->charger.dev->kobj, KOBJ_CHANGE);
-#endif
        }
        if (sbs->manager_present) {
                for (id = 0; id < MAX_SBS_BAT; ++id) {
@@ -919,9 +896,7 @@ static void acpi_sbs_callback(void *context)
                                                      ACPI_SBS_NOTIFY_STATUS,
                                                      bat->present);
 #endif
-#ifdef CONFIG_ACPI_SYSFS_POWER
                        kobject_uevent(&bat->bat.dev->kobj, KOBJ_CHANGE);
-#endif
                }
        }
 }
index b23825ecfa372c192ff92fcf79b36c6534332e9c..2b6c21d86b9885571b41679586636fff3b6c53bb 100644 (file)
@@ -26,6 +26,8 @@ extern struct acpi_device *acpi_root;
 
 #define ACPI_IS_ROOT_DEVICE(device)    (!(device)->parent)
 
+static const char *dummy_hid = "device";
+
 static LIST_HEAD(acpi_device_list);
 static LIST_HEAD(acpi_bus_id_list);
 DEFINE_MUTEX(acpi_device_lock);
@@ -49,6 +51,9 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
        int count;
        struct acpi_hardware_id *id;
 
+       if (list_empty(&acpi_dev->pnp.ids))
+               return 0;
+
        len = snprintf(modalias, size, "acpi:");
        size -= len;
 
@@ -202,13 +207,15 @@ static int acpi_device_setup_files(struct acpi_device *dev)
                        goto end;
        }
 
-       result = device_create_file(&dev->dev, &dev_attr_hid);
-       if (result)
-               goto end;
+       if (!list_empty(&dev->pnp.ids)) {
+               result = device_create_file(&dev->dev, &dev_attr_hid);
+               if (result)
+                       goto end;
 
-       result = device_create_file(&dev->dev, &dev_attr_modalias);
-       if (result)
-               goto end;
+               result = device_create_file(&dev->dev, &dev_attr_modalias);
+               if (result)
+                       goto end;
+       }
 
         /*
          * If device has _EJ0, 'eject' file is created that is used to trigger
@@ -316,6 +323,9 @@ static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
        struct acpi_device *acpi_dev = to_acpi_device(dev);
        int len;
 
+       if (list_empty(&acpi_dev->pnp.ids))
+               return 0;
+
        if (add_uevent_var(env, "MODALIAS="))
                return -ENOMEM;
        len = create_modalias(acpi_dev, &env->buf[env->buflen - 1],
@@ -1010,10 +1020,13 @@ static int acpi_dock_match(struct acpi_device *device)
        return acpi_get_handle(device->handle, "_DCK", &tmp);
 }
 
-char *acpi_device_hid(struct acpi_device *device)
+const char *acpi_device_hid(struct acpi_device *device)
 {
        struct acpi_hardware_id *hid;
 
+       if (list_empty(&device->pnp.ids))
+               return dummy_hid;
+
        hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list);
        return hid->id;
 }
@@ -1142,16 +1155,6 @@ static void acpi_device_set_id(struct acpi_device *device)
                acpi_add_id(device, ACPI_BUTTON_HID_SLEEPF);
                break;
        }
-
-       /*
-        * We build acpi_devices for some objects that don't have _HID or _CID,
-        * e.g., PCI bridges and slots.  Drivers can't bind to these objects,
-        * but we do use them indirectly by traversing the acpi_device tree.
-        * This generic ID isn't useful for driver binding, but it provides
-        * the useful property that "every acpi_device has an ID."
-        */
-       if (list_empty(&device->pnp.ids))
-               acpi_add_id(device, "device");
 }
 
 static int acpi_device_set_context(struct acpi_device *device)
@@ -1431,6 +1434,7 @@ EXPORT_SYMBOL(acpi_bus_add);
 int acpi_bus_start(struct acpi_device *device)
 {
        struct acpi_bus_ops ops;
+       int result;
 
        if (!device)
                return -EINVAL;
@@ -1438,7 +1442,11 @@ int acpi_bus_start(struct acpi_device *device)
        memset(&ops, 0, sizeof(ops));
        ops.acpi_op_start = 1;
 
-       return acpi_bus_scan(device->handle, &ops, NULL);
+       result = acpi_bus_scan(device->handle, &ops, NULL);
+
+       acpi_update_gpes();
+
+       return result;
 }
 EXPORT_SYMBOL(acpi_bus_start);
 
@@ -1552,6 +1560,8 @@ int __init acpi_scan_init(void)
 
        if (result)
                acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
+       else
+               acpi_update_gpes();
 
        return result;
 }
index 4754ff6e70e6daa25b13df6f115745eb7bfb7e55..721d93b3ceee78c3b271f5f84ffe4fc795e435e1 100644 (file)
@@ -25,7 +25,9 @@
 #include "internal.h"
 #include "sleep.h"
 
-u8 sleep_states[ACPI_S_STATE_COUNT];
+static u8 sleep_states[ACPI_S_STATE_COUNT];
+
+static u32 acpi_target_sleep_state = ACPI_STATE_S0;
 
 static void acpi_sleep_tts_switch(u32 acpi_state)
 {
@@ -79,8 +81,6 @@ static int acpi_sleep_prepare(u32 acpi_state)
 }
 
 #ifdef CONFIG_ACPI_SLEEP
-static u32 acpi_target_sleep_state = ACPI_STATE_S0;
-
 /*
  * The ACPI specification wants us to save NVS memory regions during hibernation
  * and to restore them during the subsequent resume.  Windows does that also for
@@ -419,6 +419,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
                },
        },
+       {
+       .callback = init_nvs_nosave,
+       .ident = "Sony Vaio VPCEB1Z1E",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
+               },
+       },
        {},
 };
 #endif /* CONFIG_SUSPEND */
@@ -562,7 +570,7 @@ int acpi_suspend(u32 acpi_state)
        return -EINVAL;
 }
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM_OPS
 /**
  *     acpi_pm_device_sleep_state - return preferred power state of ACPI device
  *             in the system sleep state given by %acpi_target_sleep_state
@@ -624,7 +632,7 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
         * can wake the system.  _S0W may be valid, too.
         */
        if (acpi_target_sleep_state == ACPI_STATE_S0 ||
-           (device_may_wakeup(dev) && adev->wakeup.state.enabled &&
+           (device_may_wakeup(dev) &&
             adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
                acpi_status status;
 
@@ -632,7 +640,9 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
                status = acpi_evaluate_integer(handle, acpi_method, NULL,
                                                &d_max);
                if (ACPI_FAILURE(status)) {
-                       d_max = d_min;
+                       if (acpi_target_sleep_state != ACPI_STATE_S0 ||
+                           status != AE_NOT_FOUND)
+                               d_max = d_min;
                } else if (d_max < d_min) {
                        /* Warn the user of the broken DSDT */
                        printk(KERN_WARNING "ACPI: Wrong value from %s\n",
@@ -646,7 +656,9 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
                *d_min_p = d_min;
        return d_max;
 }
+#endif /* CONFIG_PM_OPS */
 
+#ifdef CONFIG_PM_SLEEP
 /**
  *     acpi_pm_device_sleep_wake - enable or disable the system wake-up
  *                                  capability of given device
@@ -677,7 +689,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
 
        return error;
 }
-#endif
+#endif  /* CONFIG_PM_SLEEP */
 
 static void acpi_power_off_prepare(void)
 {
@@ -702,7 +714,7 @@ static void acpi_power_off(void)
  * paths through the BIOS, so disable _GTS and _BFS by default,
  * but do speak up and offer the option to enable them.
  */
-void __init acpi_gts_bfs_check(void)
+static void __init acpi_gts_bfs_check(void)
 {
        acpi_handle dummy;
 
index d8821805c3bc9ab4637732d5d7ee6f105620b83f..74d59c8f4678a385ccd575816c6f7ad1e1727456 100644 (file)
@@ -1,5 +1,4 @@
 
-extern u8 sleep_states[];
 extern int acpi_suspend(u32 state);
 
 extern void acpi_enable_wakeup_devices(u8 sleep_state);
index 2f8f17131d9fb9b19094284fcfa2dd5f0092f7b8..5a27b0a313151714976f88b2299df0937d29c8be 100644 (file)
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/types.h>
-
-#ifdef CONFIG_ACPI_PROCFS
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#endif
-
 #include <linux/jiffies.h>
 #include <linux/kmod.h>
 #include <linux/reboot.h>
@@ -195,61 +189,6 @@ struct acpi_thermal {
        struct mutex lock;
 };
 
-#ifdef CONFIG_ACPI_PROCFS
-static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file);
-static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file);
-static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file);
-static int acpi_thermal_cooling_open_fs(struct inode *inode, struct file *file);
-static ssize_t acpi_thermal_write_cooling_mode(struct file *,
-                                              const char __user *, size_t,
-                                              loff_t *);
-static int acpi_thermal_polling_open_fs(struct inode *inode, struct file *file);
-static ssize_t acpi_thermal_write_polling(struct file *, const char __user *,
-                                         size_t, loff_t *);
-
-static const struct file_operations acpi_thermal_state_fops = {
-       .owner = THIS_MODULE,
-       .open = acpi_thermal_state_open_fs,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-static const struct file_operations acpi_thermal_temp_fops = {
-       .owner = THIS_MODULE,
-       .open = acpi_thermal_temp_open_fs,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-static const struct file_operations acpi_thermal_trip_fops = {
-       .owner = THIS_MODULE,
-       .open = acpi_thermal_trip_open_fs,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-static const struct file_operations acpi_thermal_cooling_fops = {
-       .owner = THIS_MODULE,
-       .open = acpi_thermal_cooling_open_fs,
-       .read = seq_read,
-       .write = acpi_thermal_write_cooling_mode,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-static const struct file_operations acpi_thermal_polling_fops = {
-       .owner = THIS_MODULE,
-       .open = acpi_thermal_polling_open_fs,
-       .read = seq_read,
-       .write = acpi_thermal_write_polling,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-#endif /* CONFIG_ACPI_PROCFS*/
-
 /* --------------------------------------------------------------------------
                              Thermal Zone Management
    -------------------------------------------------------------------------- */
@@ -957,358 +896,6 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
 }
 
 
-/* --------------------------------------------------------------------------
-                              FS Interface (/proc)
-   -------------------------------------------------------------------------- */
-#ifdef CONFIG_ACPI_PROCFS
-static struct proc_dir_entry *acpi_thermal_dir;
-
-static int acpi_thermal_state_seq_show(struct seq_file *seq, void *offset)
-{
-       struct acpi_thermal *tz = seq->private;
-
-
-       if (!tz)
-               goto end;
-
-       seq_puts(seq, "state:                   ");
-
-       if (!tz->state.critical && !tz->state.hot && !tz->state.passive
-           && !tz->state.active)
-               seq_puts(seq, "ok\n");
-       else {
-               if (tz->state.critical)
-                       seq_puts(seq, "critical ");
-               if (tz->state.hot)
-                       seq_puts(seq, "hot ");
-               if (tz->state.passive)
-                       seq_puts(seq, "passive ");
-               if (tz->state.active)
-                       seq_printf(seq, "active[%d]", tz->state.active_index);
-               seq_puts(seq, "\n");
-       }
-
-      end:
-       return 0;
-}
-
-static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_thermal_state_seq_show, PDE(inode)->data);
-}
-
-static int acpi_thermal_temp_seq_show(struct seq_file *seq, void *offset)
-{
-       int result = 0;
-       struct acpi_thermal *tz = seq->private;
-
-
-       if (!tz)
-               goto end;
-
-       result = acpi_thermal_get_temperature(tz);
-       if (result)
-               goto end;
-
-       seq_printf(seq, "temperature:             %ld C\n",
-                  KELVIN_TO_CELSIUS(tz->temperature));
-
-      end:
-       return 0;
-}
-
-static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_thermal_temp_seq_show, PDE(inode)->data);
-}
-
-static int acpi_thermal_trip_seq_show(struct seq_file *seq, void *offset)
-{
-       struct acpi_thermal *tz = seq->private;
-       struct acpi_device *device;
-       acpi_status status;
-
-       int i = 0;
-       int j = 0;
-
-
-       if (!tz)
-               goto end;
-
-       if (tz->trips.critical.flags.valid)
-               seq_printf(seq, "critical (S5):           %ld C%s",
-                          KELVIN_TO_CELSIUS(tz->trips.critical.temperature),
-                          nocrt ? " <disabled>\n" : "\n");
-
-       if (tz->trips.hot.flags.valid)
-               seq_printf(seq, "hot (S4):                %ld C%s",
-                          KELVIN_TO_CELSIUS(tz->trips.hot.temperature),
-                          nocrt ? " <disabled>\n" : "\n");
-
-       if (tz->trips.passive.flags.valid) {
-               seq_printf(seq,
-                          "passive:                 %ld C: tc1=%lu tc2=%lu tsp=%lu devices=",
-                          KELVIN_TO_CELSIUS(tz->trips.passive.temperature),
-                          tz->trips.passive.tc1, tz->trips.passive.tc2,
-                          tz->trips.passive.tsp);
-               for (j = 0; j < tz->trips.passive.devices.count; j++) {
-                       status = acpi_bus_get_device(tz->trips.passive.devices.
-                                                    handles[j], &device);
-                       seq_printf(seq, "%4.4s ", status ? "" :
-                                  acpi_device_bid(device));
-               }
-               seq_puts(seq, "\n");
-       } else {
-               seq_printf(seq, "passive (forced):");
-               if (tz->thermal_zone->forced_passive)
-                       seq_printf(seq, "        %i C\n",
-                                  tz->thermal_zone->forced_passive / 1000);
-               else
-                       seq_printf(seq, "<not set>\n");
-       }
-
-       for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
-               if (!(tz->trips.active[i].flags.valid))
-                       break;
-               seq_printf(seq, "active[%d]:               %ld C: devices=",
-                          i,
-                          KELVIN_TO_CELSIUS(tz->trips.active[i].temperature));
-               for (j = 0; j < tz->trips.active[i].devices.count; j++){
-                       status = acpi_bus_get_device(tz->trips.active[i].
-                                                    devices.handles[j],
-                                                    &device);
-                       seq_printf(seq, "%4.4s ", status ? "" :
-                                  acpi_device_bid(device));
-               }
-               seq_puts(seq, "\n");
-       }
-
-      end:
-       return 0;
-}
-
-static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_thermal_trip_seq_show, PDE(inode)->data);
-}
-
-static int acpi_thermal_cooling_seq_show(struct seq_file *seq, void *offset)
-{
-       struct acpi_thermal *tz = seq->private;
-
-
-       if (!tz)
-               goto end;
-
-       if (!tz->flags.cooling_mode)
-               seq_puts(seq, "<setting not supported>\n");
-       else
-               seq_puts(seq, "0 - Active; 1 - Passive\n");
-
-      end:
-       return 0;
-}
-
-static int acpi_thermal_cooling_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_thermal_cooling_seq_show,
-                          PDE(inode)->data);
-}
-
-static ssize_t
-acpi_thermal_write_cooling_mode(struct file *file,
-                               const char __user * buffer,
-                               size_t count, loff_t * ppos)
-{
-       struct seq_file *m = file->private_data;
-       struct acpi_thermal *tz = m->private;
-       int result = 0;
-       char mode_string[12] = { '\0' };
-
-
-       if (!tz || (count > sizeof(mode_string) - 1))
-               return -EINVAL;
-
-       if (!tz->flags.cooling_mode)
-               return -ENODEV;
-
-       if (copy_from_user(mode_string, buffer, count))
-               return -EFAULT;
-
-       mode_string[count] = '\0';
-
-       result = acpi_thermal_set_cooling_mode(tz,
-                                              simple_strtoul(mode_string, NULL,
-                                                             0));
-       if (result)
-               return result;
-
-       acpi_thermal_check(tz);
-
-       return count;
-}
-
-static int acpi_thermal_polling_seq_show(struct seq_file *seq, void *offset)
-{
-       struct acpi_thermal *tz = seq->private;
-
-
-       if (!tz)
-               goto end;
-
-       if (!tz->thermal_zone->polling_delay) {
-               seq_puts(seq, "<polling disabled>\n");
-               goto end;
-       }
-
-       seq_printf(seq, "polling frequency:       %d seconds\n",
-                  (tz->thermal_zone->polling_delay / 1000));
-
-      end:
-       return 0;
-}
-
-static int acpi_thermal_polling_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_thermal_polling_seq_show,
-                          PDE(inode)->data);
-}
-
-static int acpi_thermal_set_polling(struct acpi_thermal *tz, int seconds)
-{
-       if (!tz)
-               return -EINVAL;
-
-       /* Convert value to deci-seconds */
-       tz->polling_frequency = seconds * 10;
-
-       tz->thermal_zone->polling_delay = seconds * 1000;
-
-       if (tz->tz_enabled)
-               thermal_zone_device_update(tz->thermal_zone);
-
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                         "Polling frequency set to %lu seconds\n",
-                         tz->polling_frequency/10));
-
-       return 0;
-}
-
-static ssize_t
-acpi_thermal_write_polling(struct file *file,
-                          const char __user * buffer,
-                          size_t count, loff_t * ppos)
-{
-       struct seq_file *m = file->private_data;
-       struct acpi_thermal *tz = m->private;
-       int result = 0;
-       char polling_string[12] = { '\0' };
-       int seconds = 0;
-
-
-       if (!tz || (count > sizeof(polling_string) - 1))
-               return -EINVAL;
-
-       if (copy_from_user(polling_string, buffer, count))
-               return -EFAULT;
-
-       polling_string[count] = '\0';
-
-       seconds = simple_strtoul(polling_string, NULL, 0);
-
-       result = acpi_thermal_set_polling(tz, seconds);
-       if (result)
-               return result;
-
-       acpi_thermal_check(tz);
-
-       return count;
-}
-
-static int acpi_thermal_add_fs(struct acpi_device *device)
-{
-       struct proc_dir_entry *entry = NULL;
-
-
-       if (!acpi_device_dir(device)) {
-               acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
-                                                    acpi_thermal_dir);
-               if (!acpi_device_dir(device))
-                       return -ENODEV;
-       }
-
-       /* 'state' [R] */
-       entry = proc_create_data(ACPI_THERMAL_FILE_STATE,
-                                S_IRUGO, acpi_device_dir(device),
-                                &acpi_thermal_state_fops,
-                                acpi_driver_data(device));
-       if (!entry)
-               return -ENODEV;
-
-       /* 'temperature' [R] */
-       entry = proc_create_data(ACPI_THERMAL_FILE_TEMPERATURE,
-                                S_IRUGO, acpi_device_dir(device),
-                                &acpi_thermal_temp_fops,
-                                acpi_driver_data(device));
-       if (!entry)
-               return -ENODEV;
-
-       /* 'trip_points' [R] */
-       entry = proc_create_data(ACPI_THERMAL_FILE_TRIP_POINTS,
-                                S_IRUGO,
-                                acpi_device_dir(device),
-                                &acpi_thermal_trip_fops,
-                                acpi_driver_data(device));
-       if (!entry)
-               return -ENODEV;
-
-       /* 'cooling_mode' [R/W] */
-       entry = proc_create_data(ACPI_THERMAL_FILE_COOLING_MODE,
-                                S_IFREG | S_IRUGO | S_IWUSR,
-                                acpi_device_dir(device),
-                                &acpi_thermal_cooling_fops,
-                                acpi_driver_data(device));
-       if (!entry)
-               return -ENODEV;
-
-       /* 'polling_frequency' [R/W] */
-       entry = proc_create_data(ACPI_THERMAL_FILE_POLLING_FREQ,
-                                S_IFREG | S_IRUGO | S_IWUSR,
-                                acpi_device_dir(device),
-                                &acpi_thermal_polling_fops,
-                                acpi_driver_data(device));
-       if (!entry)
-               return -ENODEV;
-       return 0;
-}
-
-static int acpi_thermal_remove_fs(struct acpi_device *device)
-{
-
-       if (acpi_device_dir(device)) {
-               remove_proc_entry(ACPI_THERMAL_FILE_POLLING_FREQ,
-                                 acpi_device_dir(device));
-               remove_proc_entry(ACPI_THERMAL_FILE_COOLING_MODE,
-                                 acpi_device_dir(device));
-               remove_proc_entry(ACPI_THERMAL_FILE_TRIP_POINTS,
-                                 acpi_device_dir(device));
-               remove_proc_entry(ACPI_THERMAL_FILE_TEMPERATURE,
-                                 acpi_device_dir(device));
-               remove_proc_entry(ACPI_THERMAL_FILE_STATE,
-                                 acpi_device_dir(device));
-               remove_proc_entry(acpi_device_bid(device), acpi_thermal_dir);
-               acpi_device_dir(device) = NULL;
-       }
-
-       return 0;
-}
-#else
-static inline int acpi_thermal_add_fs(struct acpi_device *device) { return 0; }
-static inline int acpi_thermal_remove_fs(struct acpi_device *device)
-{
-       return 0;
-}
-#endif /* CONFIG_ACPI_PROCFS */
 /* --------------------------------------------------------------------------
                                  Driver Interface
    -------------------------------------------------------------------------- */
@@ -1428,17 +1015,11 @@ static int acpi_thermal_add(struct acpi_device *device)
        if (result)
                goto free_memory;
 
-       result = acpi_thermal_add_fs(device);
-       if (result)
-               goto unregister_thermal_zone;
-
        printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n",
               acpi_device_name(device), acpi_device_bid(device),
               KELVIN_TO_CELSIUS(tz->temperature));
        goto end;
 
-unregister_thermal_zone:
-       thermal_zone_device_unregister(tz->thermal_zone);
 free_memory:
        kfree(tz);
 end:
@@ -1454,7 +1035,6 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
 
        tz = acpi_driver_data(device);
 
-       acpi_thermal_remove_fs(device);
        acpi_thermal_unregister_thermal_zone(tz);
        mutex_destroy(&tz->lock);
        kfree(tz);
@@ -1580,19 +1160,9 @@ static int __init acpi_thermal_init(void)
                return -ENODEV;
        }
 
-#ifdef CONFIG_ACPI_PROCFS
-       acpi_thermal_dir = proc_mkdir(ACPI_THERMAL_CLASS, acpi_root_dir);
-       if (!acpi_thermal_dir)
-               return -ENODEV;
-#endif
-
        result = acpi_bus_register_driver(&acpi_thermal_driver);
-       if (result < 0) {
-#ifdef CONFIG_ACPI_PROCFS
-               remove_proc_entry(ACPI_THERMAL_CLASS, acpi_root_dir);
-#endif
+       if (result < 0)
                return -ENODEV;
-       }
 
        return 0;
 }
@@ -1602,10 +1172,6 @@ static void __exit acpi_thermal_exit(void)
 
        acpi_bus_unregister_driver(&acpi_thermal_driver);
 
-#ifdef CONFIG_ACPI_PROCFS
-       remove_proc_entry(ACPI_THERMAL_CLASS, acpi_root_dir);
-#endif
-
        return;
 }
 
index 67dec0c675aae4d59e71c689ccc450ff5896c5c6..5cd0228d2daa0c65d22b8c00c52c50d3124aaa1a 100644 (file)
@@ -30,8 +30,6 @@
 #include <linux/types.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
 #include <linux/input.h>
 #include <linux/backlight.h>
 #include <linux/thermal.h>
@@ -152,9 +150,6 @@ struct acpi_video_bus {
        struct acpi_video_bus_flags flags;
        struct list_head video_device_list;
        struct mutex device_list_lock;  /* protects video_device_list */
-#ifdef CONFIG_ACPI_PROCFS
-       struct proc_dir_entry *dir;
-#endif
        struct input_dev *input;
        char phys[32];  /* for input device */
        struct notifier_block pm_nb;
@@ -210,108 +205,6 @@ struct acpi_video_device {
        struct output_device *output_dev;
 };
 
-#ifdef CONFIG_ACPI_PROCFS
-/* bus */
-static int acpi_video_bus_info_open_fs(struct inode *inode, struct file *file);
-static const struct file_operations acpi_video_bus_info_fops = {
-       .owner = THIS_MODULE,
-       .open = acpi_video_bus_info_open_fs,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-static int acpi_video_bus_ROM_open_fs(struct inode *inode, struct file *file);
-static const struct file_operations acpi_video_bus_ROM_fops = {
-       .owner = THIS_MODULE,
-       .open = acpi_video_bus_ROM_open_fs,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-static int acpi_video_bus_POST_info_open_fs(struct inode *inode,
-                                           struct file *file);
-static const struct file_operations acpi_video_bus_POST_info_fops = {
-       .owner = THIS_MODULE,
-       .open = acpi_video_bus_POST_info_open_fs,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-static int acpi_video_bus_POST_open_fs(struct inode *inode, struct file *file);
-static ssize_t acpi_video_bus_write_POST(struct file *file,
-       const char __user *buffer, size_t count, loff_t *data);
-static const struct file_operations acpi_video_bus_POST_fops = {
-       .owner = THIS_MODULE,
-       .open = acpi_video_bus_POST_open_fs,
-       .read = seq_read,
-       .write = acpi_video_bus_write_POST,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-static int acpi_video_bus_DOS_open_fs(struct inode *inode, struct file *file);
-static ssize_t acpi_video_bus_write_DOS(struct file *file,
-       const char __user *buffer, size_t count, loff_t *data);
-static const struct file_operations acpi_video_bus_DOS_fops = {
-       .owner = THIS_MODULE,
-       .open = acpi_video_bus_DOS_open_fs,
-       .read = seq_read,
-       .write = acpi_video_bus_write_DOS,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-/* device */
-static int acpi_video_device_info_open_fs(struct inode *inode,
-                                         struct file *file);
-static const struct file_operations acpi_video_device_info_fops = {
-       .owner = THIS_MODULE,
-       .open = acpi_video_device_info_open_fs,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-static int acpi_video_device_state_open_fs(struct inode *inode,
-                                          struct file *file);
-static ssize_t acpi_video_device_write_state(struct file *file,
-       const char __user *buffer, size_t count, loff_t *data);
-static const struct file_operations acpi_video_device_state_fops = {
-       .owner = THIS_MODULE,
-       .open = acpi_video_device_state_open_fs,
-       .read = seq_read,
-       .write = acpi_video_device_write_state,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-static int acpi_video_device_brightness_open_fs(struct inode *inode,
-                                               struct file *file);
-static ssize_t acpi_video_device_write_brightness(struct file *file,
-       const char __user *buffer, size_t count, loff_t *data);
-static const struct file_operations acpi_video_device_brightness_fops = {
-       .owner = THIS_MODULE,
-       .open = acpi_video_device_brightness_open_fs,
-       .read = seq_read,
-       .write = acpi_video_device_write_brightness,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-static int acpi_video_device_EDID_open_fs(struct inode *inode,
-                                         struct file *file);
-static const struct file_operations acpi_video_device_EDID_fops = {
-       .owner = THIS_MODULE,
-       .open = acpi_video_device_EDID_open_fs,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-#endif /* CONFIG_ACPI_PROCFS */
-
 static const char device_decode[][30] = {
        "motherboard VGA device",
        "PCI VGA device",
@@ -1110,646 +1003,6 @@ static int acpi_video_bus_check(struct acpi_video_bus *video)
        return status;
 }
 
-/* --------------------------------------------------------------------------
-                              FS Interface (/proc)
-   -------------------------------------------------------------------------- */
-#ifdef CONFIG_ACPI_PROCFS
-
-static struct proc_dir_entry *acpi_video_dir;
-
-/* video devices */
-
-static int acpi_video_device_info_seq_show(struct seq_file *seq, void *offset)
-{
-       struct acpi_video_device *dev = seq->private;
-
-
-       if (!dev)
-               goto end;
-
-       seq_printf(seq, "device_id:    0x%04x\n", (u32) dev->device_id);
-       seq_printf(seq, "type:         ");
-       if (dev->flags.crt)
-               seq_printf(seq, "CRT\n");
-       else if (dev->flags.lcd)
-               seq_printf(seq, "LCD\n");
-       else if (dev->flags.tvout)
-               seq_printf(seq, "TVOUT\n");
-       else if (dev->flags.dvi)
-               seq_printf(seq, "DVI\n");
-       else
-               seq_printf(seq, "UNKNOWN\n");
-
-       seq_printf(seq, "known by bios: %s\n", dev->flags.bios ? "yes" : "no");
-
-      end:
-       return 0;
-}
-
-static int
-acpi_video_device_info_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_video_device_info_seq_show,
-                          PDE(inode)->data);
-}
-
-static int
-acpi_video_device_query(struct acpi_video_device *device,
-                       unsigned long long *state)
-{
-       int status;
-
-       status = acpi_evaluate_integer(device->dev->handle, "_DGS",
-                                      NULL, state);
-
-       return status;
-}
-
-static int acpi_video_device_state_seq_show(struct seq_file *seq, void *offset)
-{
-       int status;
-       struct acpi_video_device *dev = seq->private;
-       unsigned long long state;
-
-
-       if (!dev)
-               goto end;
-
-       status = acpi_video_device_get_state(dev, &state);
-       seq_printf(seq, "state:     ");
-       if (ACPI_SUCCESS(status))
-               seq_printf(seq, "0x%02llx\n", state);
-       else
-               seq_printf(seq, "<not supported>\n");
-
-       status = acpi_video_device_query(dev, &state);
-       seq_printf(seq, "query:     ");
-       if (ACPI_SUCCESS(status))
-               seq_printf(seq, "0x%02llx\n", state);
-       else
-               seq_printf(seq, "<not supported>\n");
-
-      end:
-       return 0;
-}
-
-static int
-acpi_video_device_state_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_video_device_state_seq_show,
-                          PDE(inode)->data);
-}
-
-static ssize_t
-acpi_video_device_write_state(struct file *file,
-                             const char __user * buffer,
-                             size_t count, loff_t * data)
-{
-       int status;
-       struct seq_file *m = file->private_data;
-       struct acpi_video_device *dev = m->private;
-       char str[12] = { 0 };
-       u32 state = 0;
-
-
-       if (!dev || count >= sizeof(str))
-               return -EINVAL;
-
-       if (copy_from_user(str, buffer, count))
-               return -EFAULT;
-
-       str[count] = 0;
-       state = simple_strtoul(str, NULL, 0);
-       state &= ((1ul << 31) | (1ul << 30) | (1ul << 0));
-
-       status = acpi_video_device_set_state(dev, state);
-
-       if (status)
-               return -EFAULT;
-
-       return count;
-}
-
-static int
-acpi_video_device_brightness_seq_show(struct seq_file *seq, void *offset)
-{
-       struct acpi_video_device *dev = seq->private;
-       int i;
-
-
-       if (!dev || !dev->brightness) {
-               seq_printf(seq, "<not supported>\n");
-               return 0;
-       }
-
-       seq_printf(seq, "levels: ");
-       for (i = 2; i < dev->brightness->count; i++)
-               seq_printf(seq, " %d", dev->brightness->levels[i]);
-       seq_printf(seq, "\ncurrent: %d\n", dev->brightness->curr);
-
-       return 0;
-}
-
-static int
-acpi_video_device_brightness_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_video_device_brightness_seq_show,
-                          PDE(inode)->data);
-}
-
-static ssize_t
-acpi_video_device_write_brightness(struct file *file,
-                                  const char __user * buffer,
-                                  size_t count, loff_t * data)
-{
-       struct seq_file *m = file->private_data;
-       struct acpi_video_device *dev = m->private;
-       char str[5] = { 0 };
-       unsigned int level = 0;
-       int i;
-
-
-       if (!dev || !dev->brightness || count >= sizeof(str))
-               return -EINVAL;
-
-       if (copy_from_user(str, buffer, count))
-               return -EFAULT;
-
-       str[count] = 0;
-       level = simple_strtoul(str, NULL, 0);
-
-       if (level > 100)
-               return -EFAULT;
-
-       /* validate through the list of available levels */
-       for (i = 2; i < dev->brightness->count; i++)
-               if (level == dev->brightness->levels[i]) {
-                       if (!acpi_video_device_lcd_set_level(dev, level))
-                               return count;
-                       break;
-               }
-
-       return -EINVAL;
-}
-
-static int acpi_video_device_EDID_seq_show(struct seq_file *seq, void *offset)
-{
-       struct acpi_video_device *dev = seq->private;
-       int status;
-       int i;
-       union acpi_object *edid = NULL;
-
-
-       if (!dev)
-               goto out;
-
-       status = acpi_video_device_EDID(dev, &edid, 128);
-       if (ACPI_FAILURE(status)) {
-               status = acpi_video_device_EDID(dev, &edid, 256);
-       }
-
-       if (ACPI_FAILURE(status)) {
-               goto out;
-       }
-
-       if (edid && edid->type == ACPI_TYPE_BUFFER) {
-               for (i = 0; i < edid->buffer.length; i++)
-                       seq_putc(seq, edid->buffer.pointer[i]);
-       }
-
-      out:
-       if (!edid)
-               seq_printf(seq, "<not supported>\n");
-       else
-               kfree(edid);
-
-       return 0;
-}
-
-static int
-acpi_video_device_EDID_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_video_device_EDID_seq_show,
-                          PDE(inode)->data);
-}
-
-static int acpi_video_device_add_fs(struct acpi_device *device)
-{
-       struct proc_dir_entry *entry, *device_dir;
-       struct acpi_video_device *vid_dev;
-
-       vid_dev = acpi_driver_data(device);
-       if (!vid_dev)
-               return -ENODEV;
-
-       device_dir = proc_mkdir(acpi_device_bid(device),
-                               vid_dev->video->dir);
-       if (!device_dir)
-               return -ENOMEM;
-
-       /* 'info' [R] */
-       entry = proc_create_data("info", S_IRUGO, device_dir,
-                       &acpi_video_device_info_fops, acpi_driver_data(device));
-       if (!entry)
-               goto err_remove_dir;
-
-       /* 'state' [R/W] */
-       entry = proc_create_data("state", S_IFREG | S_IRUGO | S_IWUSR,
-                                device_dir,
-                                &acpi_video_device_state_fops,
-                                acpi_driver_data(device));
-       if (!entry)
-               goto err_remove_info;
-
-       /* 'brightness' [R/W] */
-       entry = proc_create_data("brightness", S_IFREG | S_IRUGO | S_IWUSR,
-                                device_dir,
-                                &acpi_video_device_brightness_fops,
-                                acpi_driver_data(device));
-       if (!entry)
-               goto err_remove_state;
-
-       /* 'EDID' [R] */
-       entry = proc_create_data("EDID", S_IRUGO, device_dir,
-                                &acpi_video_device_EDID_fops,
-                                acpi_driver_data(device));
-       if (!entry)
-               goto err_remove_brightness;
-
-       acpi_device_dir(device) = device_dir;
-
-       return 0;
-
- err_remove_brightness:
-       remove_proc_entry("brightness", device_dir);
- err_remove_state:
-       remove_proc_entry("state", device_dir);
- err_remove_info:
-       remove_proc_entry("info", device_dir);
- err_remove_dir:
-       remove_proc_entry(acpi_device_bid(device), vid_dev->video->dir);
-       return -ENOMEM;
-}
-
-static int acpi_video_device_remove_fs(struct acpi_device *device)
-{
-       struct acpi_video_device *vid_dev;
-       struct proc_dir_entry *device_dir;
-
-       vid_dev = acpi_driver_data(device);
-       if (!vid_dev || !vid_dev->video || !vid_dev->video->dir)
-               return -ENODEV;
-
-       device_dir = acpi_device_dir(device);
-       if (device_dir) {
-               remove_proc_entry("info", device_dir);
-               remove_proc_entry("state", device_dir);
-               remove_proc_entry("brightness", device_dir);
-               remove_proc_entry("EDID", device_dir);
-               remove_proc_entry(acpi_device_bid(device), vid_dev->video->dir);
-               acpi_device_dir(device) = NULL;
-       }
-
-       return 0;
-}
-
-/* video bus */
-static int acpi_video_bus_info_seq_show(struct seq_file *seq, void *offset)
-{
-       struct acpi_video_bus *video = seq->private;
-
-
-       if (!video)
-               goto end;
-
-       seq_printf(seq, "Switching heads:              %s\n",
-                  video->flags.multihead ? "yes" : "no");
-       seq_printf(seq, "Video ROM:                    %s\n",
-                  video->flags.rom ? "yes" : "no");
-       seq_printf(seq, "Device to be POSTed on boot:  %s\n",
-                  video->flags.post ? "yes" : "no");
-
-      end:
-       return 0;
-}
-
-static int acpi_video_bus_info_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_video_bus_info_seq_show,
-                          PDE(inode)->data);
-}
-
-static int acpi_video_bus_ROM_seq_show(struct seq_file *seq, void *offset)
-{
-       struct acpi_video_bus *video = seq->private;
-
-
-       if (!video)
-               goto end;
-
-       printk(KERN_INFO PREFIX "Please implement %s\n", __func__);
-       seq_printf(seq, "<TODO>\n");
-
-      end:
-       return 0;
-}
-
-static int acpi_video_bus_ROM_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_video_bus_ROM_seq_show, PDE(inode)->data);
-}
-
-static int
-acpi_video_bus_POST_options(struct acpi_video_bus *video,
-                           unsigned long long *options)
-{
-       int status;
-
-       status = acpi_evaluate_integer(video->device->handle, "_VPO",
-                                      NULL, options);
-       *options &= 3;
-
-       return status;
-}
-
-static int acpi_video_bus_POST_info_seq_show(struct seq_file *seq, void *offset)
-{
-       struct acpi_video_bus *video = seq->private;
-       unsigned long long options;
-       int status;
-
-
-       if (!video)
-               goto end;
-
-       status = acpi_video_bus_POST_options(video, &options);
-       if (ACPI_SUCCESS(status)) {
-               if (!(options & 1)) {
-                       printk(KERN_WARNING PREFIX
-                              "The motherboard VGA device is not listed as a possible POST device.\n");
-                       printk(KERN_WARNING PREFIX
-                              "This indicates a BIOS bug. Please contact the manufacturer.\n");
-               }
-               printk(KERN_WARNING "%llx\n", options);
-               seq_printf(seq, "can POST: <integrated video>");
-               if (options & 2)
-                       seq_printf(seq, " <PCI video>");
-               if (options & 4)
-                       seq_printf(seq, " <AGP video>");
-               seq_putc(seq, '\n');
-       } else
-               seq_printf(seq, "<not supported>\n");
-      end:
-       return 0;
-}
-
-static int
-acpi_video_bus_POST_info_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_video_bus_POST_info_seq_show,
-                          PDE(inode)->data);
-}
-
-static int
-acpi_video_bus_get_POST(struct acpi_video_bus *video, unsigned long long *id)
-{
-       int status;
-
-       status = acpi_evaluate_integer(video->device->handle, "_GPD", NULL, id);
-
-       return status;
-}
-
-static int acpi_video_bus_POST_seq_show(struct seq_file *seq, void *offset)
-{
-       struct acpi_video_bus *video = seq->private;
-       int status;
-       unsigned long long id;
-
-
-       if (!video)
-               goto end;
-
-       status = acpi_video_bus_get_POST(video, &id);
-       if (!ACPI_SUCCESS(status)) {
-               seq_printf(seq, "<not supported>\n");
-               goto end;
-       }
-       seq_printf(seq, "device POSTed is <%s>\n", device_decode[id & 3]);
-
-      end:
-       return 0;
-}
-
-static int acpi_video_bus_DOS_seq_show(struct seq_file *seq, void *offset)
-{
-       struct acpi_video_bus *video = seq->private;
-
-
-       seq_printf(seq, "DOS setting: <%d>\n", video->dos_setting);
-
-       return 0;
-}
-
-static int acpi_video_bus_POST_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_video_bus_POST_seq_show,
-                          PDE(inode)->data);
-}
-
-static int acpi_video_bus_DOS_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_video_bus_DOS_seq_show, PDE(inode)->data);
-}
-
-static int
-acpi_video_bus_set_POST(struct acpi_video_bus *video, unsigned long option)
-{
-       int status;
-       unsigned long long tmp;
-       union acpi_object arg0 = { ACPI_TYPE_INTEGER };
-       struct acpi_object_list args = { 1, &arg0 };
-
-
-       arg0.integer.value = option;
-
-       status = acpi_evaluate_integer(video->device->handle, "_SPD",
-                                      &args, &tmp);
-       if (ACPI_SUCCESS(status))
-               status = tmp ? (-EINVAL) : (AE_OK);
-
-       return status;
-}
-
-static ssize_t
-acpi_video_bus_write_POST(struct file *file,
-                         const char __user * buffer,
-                         size_t count, loff_t * data)
-{
-       int status;
-       struct seq_file *m = file->private_data;
-       struct acpi_video_bus *video = m->private;
-       char str[12] = { 0 };
-       unsigned long long opt, options;
-
-
-       if (!video || count >= sizeof(str))
-               return -EINVAL;
-
-       status = acpi_video_bus_POST_options(video, &options);
-       if (!ACPI_SUCCESS(status))
-               return -EINVAL;
-
-       if (copy_from_user(str, buffer, count))
-               return -EFAULT;
-
-       str[count] = 0;
-       opt = strtoul(str, NULL, 0);
-       if (opt > 3)
-               return -EFAULT;
-
-       /* just in case an OEM 'forgot' the motherboard... */
-       options |= 1;
-
-       if (options & (1ul << opt)) {
-               status = acpi_video_bus_set_POST(video, opt);
-               if (!ACPI_SUCCESS(status))
-                       return -EFAULT;
-
-       }
-
-       return count;
-}
-
-static ssize_t
-acpi_video_bus_write_DOS(struct file *file,
-                        const char __user * buffer,
-                        size_t count, loff_t * data)
-{
-       int status;
-       struct seq_file *m = file->private_data;
-       struct acpi_video_bus *video = m->private;
-       char str[12] = { 0 };
-       unsigned long opt;
-
-
-       if (!video || count >= sizeof(str))
-               return -EINVAL;
-
-       if (copy_from_user(str, buffer, count))
-               return -EFAULT;
-
-       str[count] = 0;
-       opt = strtoul(str, NULL, 0);
-       if (opt > 7)
-               return -EFAULT;
-
-       status = acpi_video_bus_DOS(video, opt & 0x3, (opt & 0x4) >> 2);
-
-       if (!ACPI_SUCCESS(status))
-               return -EFAULT;
-
-       return count;
-}
-
-static int acpi_video_bus_add_fs(struct acpi_device *device)
-{
-       struct acpi_video_bus *video = acpi_driver_data(device);
-       struct proc_dir_entry *device_dir;
-       struct proc_dir_entry *entry;
-
-       device_dir = proc_mkdir(acpi_device_bid(device), acpi_video_dir);
-       if (!device_dir)
-               return -ENOMEM;
-
-       /* 'info' [R] */
-       entry = proc_create_data("info", S_IRUGO, device_dir,
-                                &acpi_video_bus_info_fops,
-                                acpi_driver_data(device));
-       if (!entry)
-               goto err_remove_dir;
-
-       /* 'ROM' [R] */
-       entry = proc_create_data("ROM", S_IRUGO, device_dir,
-                                &acpi_video_bus_ROM_fops,
-                                acpi_driver_data(device));
-       if (!entry)
-               goto err_remove_info;
-
-       /* 'POST_info' [R] */
-       entry = proc_create_data("POST_info", S_IRUGO, device_dir,
-                                &acpi_video_bus_POST_info_fops,
-                                acpi_driver_data(device));
-       if (!entry)
-               goto err_remove_rom;
-
-       /* 'POST' [R/W] */
-       entry = proc_create_data("POST", S_IFREG | S_IRUGO | S_IWUSR,
-                                device_dir,
-                                &acpi_video_bus_POST_fops,
-                                acpi_driver_data(device));
-       if (!entry)
-               goto err_remove_post_info;
-
-       /* 'DOS' [R/W] */
-       entry = proc_create_data("DOS", S_IFREG | S_IRUGO | S_IWUSR,
-                                device_dir,
-                                &acpi_video_bus_DOS_fops,
-                                acpi_driver_data(device));
-       if (!entry)
-               goto err_remove_post;
-
-       video->dir = acpi_device_dir(device) = device_dir;
-       return 0;
-
- err_remove_post:
-       remove_proc_entry("POST", device_dir);
- err_remove_post_info:
-       remove_proc_entry("POST_info", device_dir);
- err_remove_rom:
-       remove_proc_entry("ROM", device_dir);
- err_remove_info:
-       remove_proc_entry("info", device_dir);
- err_remove_dir:
-       remove_proc_entry(acpi_device_bid(device), acpi_video_dir);
-       return -ENOMEM;
-}
-
-static int acpi_video_bus_remove_fs(struct acpi_device *device)
-{
-       struct proc_dir_entry *device_dir = acpi_device_dir(device);
-
-       if (device_dir) {
-               remove_proc_entry("info", device_dir);
-               remove_proc_entry("ROM", device_dir);
-               remove_proc_entry("POST_info", device_dir);
-               remove_proc_entry("POST", device_dir);
-               remove_proc_entry("DOS", device_dir);
-               remove_proc_entry(acpi_device_bid(device), acpi_video_dir);
-               acpi_device_dir(device) = NULL;
-       }
-
-       return 0;
-}
-#else
-static inline int acpi_video_device_add_fs(struct acpi_device *device)
-{
-       return 0;
-}
-static inline int acpi_video_device_remove_fs(struct acpi_device *device)
-{
-       return 0;
-}
-static inline int acpi_video_bus_add_fs(struct acpi_device *device)
-{
-       return 0;
-}
-static inline int acpi_video_bus_remove_fs(struct acpi_device *device)
-{
-       return 0;
-}
-#endif /* CONFIG_ACPI_PROCFS */
-
 /* --------------------------------------------------------------------------
                                  Driver Interface
    -------------------------------------------------------------------------- */
@@ -1877,8 +1130,6 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
                list_add_tail(&data->entry, &video->video_device_list);
                mutex_unlock(&video->device_list_lock);
 
-               acpi_video_device_add_fs(device);
-
                return 0;
        }
 
@@ -2181,8 +1432,6 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
        if (!device || !device->video)
                return -ENOENT;
 
-       acpi_video_device_remove_fs(device->dev);
-
        status = acpi_remove_notify_handler(device->dev->handle,
                                            ACPI_DEVICE_NOTIFY,
                                            acpi_video_device_notify);
@@ -2466,10 +1715,6 @@ static int acpi_video_bus_add(struct acpi_device *device)
        if (error)
                goto err_free_video;
 
-       error = acpi_video_bus_add_fs(device);
-       if (error)
-               goto err_free_video;
-
        mutex_init(&video->device_list_lock);
        INIT_LIST_HEAD(&video->video_device_list);
 
@@ -2522,7 +1767,6 @@ static int acpi_video_bus_add(struct acpi_device *device)
        acpi_video_bus_stop_devices(video);
        acpi_video_bus_put_devices(video);
        kfree(video->attached_array);
-       acpi_video_bus_remove_fs(device);
  err_free_video:
        kfree(video);
        device->driver_data = NULL;
@@ -2544,7 +1788,6 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type)
 
        acpi_video_bus_stop_devices(video);
        acpi_video_bus_put_devices(video);
-       acpi_video_bus_remove_fs(device);
 
        input_unregister_device(video->input);
        kfree(video->attached_array);
@@ -2584,17 +1827,9 @@ int acpi_video_register(void)
                return 0;
        }
 
-#ifdef CONFIG_ACPI_PROCFS
-       acpi_video_dir = proc_mkdir(ACPI_VIDEO_CLASS, acpi_root_dir);
-       if (!acpi_video_dir)
-               return -ENODEV;
-#endif
-
        result = acpi_bus_register_driver(&acpi_video_bus);
-       if (result < 0) {
-               remove_proc_entry(ACPI_VIDEO_CLASS, acpi_root_dir);
+       if (result < 0)
                return -ENODEV;
-       }
 
        /*
         * When the acpi_video_bus is loaded successfully, increase
@@ -2617,10 +1852,6 @@ void acpi_video_unregister(void)
        }
        acpi_bus_unregister_driver(&acpi_video_bus);
 
-#ifdef CONFIG_ACPI_PROCFS
-       remove_proc_entry(ACPI_VIDEO_CLASS, acpi_root_dir);
-#endif
-
        register_count = 0;
 
        return;
index ee53558b452fb1076867e595820faf9f07909014..ce012a9c6201ace3fcf143fcf02f424ce0d5c29b 100644 (file)
@@ -160,6 +160,18 @@ static ssize_t node_read_numastat(struct sys_device * dev,
 }
 static SYSDEV_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
 
+static ssize_t node_read_vmstat(struct sys_device *dev,
+                               struct sysdev_attribute *attr, char *buf)
+{
+       int nid = dev->id;
+       return sprintf(buf,
+               "nr_written %lu\n"
+               "nr_dirtied %lu\n",
+               node_page_state(nid, NR_WRITTEN),
+               node_page_state(nid, NR_DIRTIED));
+}
+static SYSDEV_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
+
 static ssize_t node_read_distance(struct sys_device * dev,
                        struct sysdev_attribute *attr, char * buf)
 {
@@ -243,6 +255,7 @@ int register_node(struct node *node, int num, struct node *parent)
                sysdev_create_file(&node->sysdev, &attr_meminfo);
                sysdev_create_file(&node->sysdev, &attr_numastat);
                sysdev_create_file(&node->sysdev, &attr_distance);
+               sysdev_create_file(&node->sysdev, &attr_vmstat);
 
                scan_unevictable_register_node(node);
 
@@ -267,6 +280,7 @@ void unregister_node(struct node *node)
        sysdev_remove_file(&node->sysdev, &attr_meminfo);
        sysdev_remove_file(&node->sysdev, &attr_numastat);
        sysdev_remove_file(&node->sysdev, &attr_distance);
+       sysdev_remove_file(&node->sysdev, &attr_vmstat);
 
        scan_unevictable_unregister_node(node);
        hugetlb_unregister_node(node);          /* no-op, if memoryless node */
index 1dd8676d7f55705a84ad26ff77544b0518c02723..126ca492dd08fe3694f0e78d823bbb8ec6a692b1 100644 (file)
@@ -503,7 +503,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
         * the resume will actually succeed.
         */
        if (dev->power.no_callbacks && !parent && dev->parent) {
-               spin_lock(&dev->parent->power.lock);
+               spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
                if (dev->parent->power.disable_depth > 0
                    || dev->parent->power.ignore_children
                    || dev->parent->power.runtime_status == RPM_ACTIVE) {
index 6c48b3545f84583d0e32f1e87308c39a42b2133c..450c958b514fea941e5f11de2b248dec6e7477c0 100644 (file)
@@ -101,8 +101,8 @@ static int transfer_none(struct loop_device *lo, int cmd,
        else
                memcpy(raw_buf, loop_buf, size);
 
-       kunmap_atomic(raw_buf, KM_USER0);
        kunmap_atomic(loop_buf, KM_USER1);
+       kunmap_atomic(raw_buf, KM_USER0);
        cond_resched();
        return 0;
 }
@@ -130,8 +130,8 @@ static int transfer_xor(struct loop_device *lo, int cmd,
        for (i = 0; i < size; i++)
                *out++ = *in++ ^ key[(i & 511) % keysize];
 
-       kunmap_atomic(raw_buf, KM_USER0);
        kunmap_atomic(loop_buf, KM_USER1);
+       kunmap_atomic(raw_buf, KM_USER0);
        cond_resched();
        return 0;
 }
index 627f542827c7069b4352bb1692d0baae2af63d38..8eb56e273e75719f51d617fe3a2ed5c4b9cc06f2 100644 (file)
@@ -13,6 +13,7 @@ obj-$(CONFIG_AGP_HP_ZX1)      += hp-agp.o
 obj-$(CONFIG_AGP_PARISC)       += parisc-agp.o
 obj-$(CONFIG_AGP_I460)         += i460-agp.o
 obj-$(CONFIG_AGP_INTEL)                += intel-agp.o
+obj-$(CONFIG_AGP_INTEL)                += intel-gtt.o
 obj-$(CONFIG_AGP_NVIDIA)       += nvidia-agp.o
 obj-$(CONFIG_AGP_SGI_TIOCA)    += sgi-agp.o
 obj-$(CONFIG_AGP_SIS)          += sis-agp.o
index 120490949997b71c86785d3af5156ea63b3c2330..5259065f3c792b16829b00c6200e73938250d74a 100644 (file)
@@ -121,11 +121,6 @@ struct agp_bridge_driver {
        void (*agp_destroy_pages)(struct agp_memory *);
        int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
        void (*chipset_flush)(struct agp_bridge_data *);
-
-       int (*agp_map_page)(struct page *page, dma_addr_t *ret);
-       void (*agp_unmap_page)(struct page *page, dma_addr_t dma);
-       int (*agp_map_memory)(struct agp_memory *mem);
-       void (*agp_unmap_memory)(struct agp_memory *mem);
 };
 
 struct agp_bridge_data {
index b6b1568314c8a5f63a44de50d70ca176b57b8090..b1b4362bc6487c88c8b7fb2e69c828f407727c67 100644 (file)
@@ -309,7 +309,8 @@ static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
 
        num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
 
-       if (type != 0 || mem->type != 0)
+       if (type != mem->type ||
+           agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type))
                return -EINVAL;
 
        if ((pg_start + mem->page_count) > num_entries)
@@ -348,7 +349,8 @@ static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
        unsigned long __iomem *cur_gatt;
        unsigned long addr;
 
-       if (type != 0 || mem->type != 0)
+       if (type != mem->type ||
+           agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type))
                return -EINVAL;
 
        for (i = pg_start; i < (mem->page_count + pg_start); i++) {
index ee4f855611b676b589046fad44f1e934bd615896..f27d0d0816d3ce5371a9948bc9841e74ba0f533b 100644 (file)
@@ -151,17 +151,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
                }
 
                bridge->scratch_page_page = page;
-               if (bridge->driver->agp_map_page) {
-                       if (bridge->driver->agp_map_page(page,
-                                                        &bridge->scratch_page_dma)) {
-                               dev_err(&bridge->dev->dev,
-                                       "unable to dma-map scratch page\n");
-                               rc = -ENOMEM;
-                               goto err_out_nounmap;
-                       }
-               } else {
-                       bridge->scratch_page_dma = page_to_phys(page);
-               }
+               bridge->scratch_page_dma = page_to_phys(page);
 
                bridge->scratch_page = bridge->driver->mask_memory(bridge,
                                                   bridge->scratch_page_dma, 0);
@@ -204,12 +194,6 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
        return 0;
 
 err_out:
-       if (bridge->driver->needs_scratch_page &&
-           bridge->driver->agp_unmap_page) {
-               bridge->driver->agp_unmap_page(bridge->scratch_page_page,
-                                              bridge->scratch_page_dma);
-       }
-err_out_nounmap:
        if (bridge->driver->needs_scratch_page) {
                void *va = page_address(bridge->scratch_page_page);
 
@@ -240,10 +224,6 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
            bridge->driver->needs_scratch_page) {
                void *va = page_address(bridge->scratch_page_page);
 
-               if (bridge->driver->agp_unmap_page)
-                       bridge->driver->agp_unmap_page(bridge->scratch_page_page,
-                                                      bridge->scratch_page_dma);
-
                bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
                bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
        }
index 64255cef8a7db93f780c700d712c5fa4b7fe5907..4956f1c8f9d5da6d0902390e3ffb44e35455e208 100644 (file)
@@ -437,11 +437,6 @@ int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
                curr->is_flushed = true;
        }
 
-       if (curr->bridge->driver->agp_map_memory) {
-               ret_val = curr->bridge->driver->agp_map_memory(curr);
-               if (ret_val)
-                       return ret_val;
-       }
        ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
 
        if (ret_val != 0)
@@ -483,9 +478,6 @@ int agp_unbind_memory(struct agp_memory *curr)
        if (ret_val != 0)
                return ret_val;
 
-       if (curr->bridge->driver->agp_unmap_memory)
-               curr->bridge->driver->agp_unmap_memory(curr);
-
        curr->is_bound = false;
        curr->pg_start = 0;
        spin_lock(&curr->bridge->mapped_lock);
index cd18493c952795317904229197016e40d7c408cd..e72f49d52202da8dcddf2055a366abb31e9029d9 100644 (file)
@@ -12,9 +12,6 @@
 #include <asm/smp.h>
 #include "agp.h"
 #include "intel-agp.h"
-#include <linux/intel-gtt.h>
-
-#include "intel-gtt.c"
 
 int intel_agp_enabled;
 EXPORT_SYMBOL(intel_agp_enabled);
@@ -703,179 +700,37 @@ static const struct agp_bridge_driver intel_7505_driver = {
        .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 };
 
-static int find_gmch(u16 device)
-{
-       struct pci_dev *gmch_device;
-
-       gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
-       if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
-               gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
-                                            device, gmch_device);
-       }
-
-       if (!gmch_device)
-               return 0;
-
-       intel_private.pcidev = gmch_device;
-       return 1;
-}
-
 /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
  * driver and gmch_driver must be non-null, and find_gmch will determine
  * which one should be used if a gmch_chip_id is present.
  */
-static const struct intel_driver_description {
+static const struct intel_agp_driver_description {
        unsigned int chip_id;
-       unsigned int gmch_chip_id;
        char *name;
        const struct agp_bridge_driver *driver;
-       const struct agp_bridge_driver *gmch_driver;
 } intel_agp_chipsets[] = {
-       { PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
-               NULL, &intel_810_driver },
-       { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
-               NULL, &intel_810_driver },
-       { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
-               NULL, &intel_810_driver },
-       { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
-               &intel_815_driver, &intel_810_driver },
-       { PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
-               &intel_830mp_driver, &intel_830_driver },
-       { PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
-               &intel_845_driver, &intel_830_driver },
-       { PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854",
-               &intel_845_driver, &intel_830_driver },
-       { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
-               &intel_845_driver, &intel_830_driver },
-       { PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865",
-               &intel_845_driver, &intel_830_driver },
-       { PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
-               NULL, &intel_915_driver },
-       { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
-               NULL, &intel_915_driver },
-       { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
-               NULL, &intel_915_driver },
-       { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
-               NULL, &intel_915_driver },
-       { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
-               NULL, &intel_915_driver },
-       { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
-               NULL, &intel_915_driver },
-       { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
-               NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
-               NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
-               NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
-               NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
-               NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
-               NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL },
-       { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33",
-               NULL, &intel_g33_driver },
-       { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
-               NULL, &intel_g33_driver },
-       { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
-               NULL, &intel_g33_driver },
-       { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
-               NULL, &intel_g33_driver },
-       { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
-               NULL, &intel_g33_driver },
-       { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG,
-           "GM45", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG,
-           "Eaglelake", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG,
-           "Q45/Q43", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG,
-           "G45/G43", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
-           "B43", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG,
-           "B43", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
-           "G41", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
-           "HD Graphics", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
-           "HD Graphics", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
-           "HD Graphics", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
-           "HD Graphics", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
-           "Sandybridge", NULL, &intel_gen6_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
-           "Sandybridge", NULL, &intel_gen6_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
-           "Sandybridge", NULL, &intel_gen6_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
-           "Sandybridge", NULL, &intel_gen6_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
-           "Sandybridge", NULL, &intel_gen6_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
-           "Sandybridge", NULL, &intel_gen6_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
-           "Sandybridge", NULL, &intel_gen6_driver },
-       { 0, 0, NULL, NULL, NULL }
+       { PCI_DEVICE_ID_INTEL_82443LX_0, "440LX", &intel_generic_driver },
+       { PCI_DEVICE_ID_INTEL_82443BX_0, "440BX", &intel_generic_driver },
+       { PCI_DEVICE_ID_INTEL_82443GX_0, "440GX", &intel_generic_driver },
+       { PCI_DEVICE_ID_INTEL_82815_MC, "i815", &intel_815_driver },
+       { PCI_DEVICE_ID_INTEL_82820_HB, "i820", &intel_820_driver },
+       { PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver },
+       { PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver },
+       { PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver },
+       { PCI_DEVICE_ID_INTEL_82845_HB, "845G", &intel_845_driver },
+       { PCI_DEVICE_ID_INTEL_82845G_HB, "830M", &intel_845_driver },
+       { PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver },
+       { PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver },
+       { PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver },
+       { PCI_DEVICE_ID_INTEL_82855GM_HB, "855GM", &intel_845_driver },
+       { PCI_DEVICE_ID_INTEL_82860_HB, "i860", &intel_860_driver },
+       { PCI_DEVICE_ID_INTEL_82865_HB, "865", &intel_845_driver },
+       { PCI_DEVICE_ID_INTEL_82875_HB, "i875", &intel_845_driver },
+       { PCI_DEVICE_ID_INTEL_7505_0, "E7505", &intel_7505_driver },
+       { PCI_DEVICE_ID_INTEL_7205_0, "E7205", &intel_7505_driver },
+       { 0, NULL, NULL }
 };
 
-static int __devinit intel_gmch_probe(struct pci_dev *pdev,
-                                     struct agp_bridge_data *bridge)
-{
-       int i, mask;
-
-       bridge->driver = NULL;
-
-       for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
-               if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
-                       find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
-                       bridge->driver =
-                               intel_agp_chipsets[i].gmch_driver;
-                       break;
-               }
-       }
-
-       if (!bridge->driver)
-               return 0;
-
-       bridge->dev_private_data = &intel_private;
-       bridge->dev = pdev;
-
-       dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
-
-       if (bridge->driver->mask_memory == intel_gen6_mask_memory)
-               mask = 40;
-       else if (bridge->driver->mask_memory == intel_i965_mask_memory)
-               mask = 36;
-       else
-               mask = 32;
-
-       if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
-               dev_err(&intel_private.pcidev->dev,
-                       "set gfx device dma mask %d-bit failed!\n", mask);
-       else
-               pci_set_consistent_dma_mask(intel_private.pcidev,
-                                           DMA_BIT_MASK(mask));
-
-       return 1;
-}
-
 static int __devinit agp_intel_probe(struct pci_dev *pdev,
                                     const struct pci_device_id *ent)
 {
@@ -905,7 +760,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
                }
        }
 
-       if (intel_agp_chipsets[i].name == NULL) {
+       if (!bridge->driver) {
                if (cap_ptr)
                        dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n",
                                 pdev->vendor, pdev->device);
@@ -913,14 +768,6 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
                return -ENODEV;
        }
 
-       if (!bridge->driver) {
-               if (cap_ptr)
-                       dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
-                                intel_agp_chipsets[i].gmch_chip_id);
-               agp_put_bridge(bridge);
-               return -ENODEV;
-       }
-
        bridge->dev = pdev;
        bridge->dev_private_data = NULL;
 
@@ -972,8 +819,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
 
        agp_remove_bridge(bridge);
 
-       if (intel_private.pcidev)
-               pci_dev_put(intel_private.pcidev);
+       intel_gmch_remove(pdev);
 
        agp_put_bridge(bridge);
 }
@@ -1049,6 +895,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
        ID(PCI_DEVICE_ID_INTEL_G45_HB),
        ID(PCI_DEVICE_ID_INTEL_G41_HB),
        ID(PCI_DEVICE_ID_INTEL_B43_HB),
+       ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
        ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
        ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
        ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
index d09b1ab7e8abeac5bbdf0cc8a8dd5f333f9c22a3..90539df02504ca0ee75ebf1923fd814a50a738bc 100644 (file)
 #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB           0x0108  /* Server */
 #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG           0x010A
 
-/* cover 915 and 945 variants */
-#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
-                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
-                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
-                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
-                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
-                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
-
-#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
-                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
-                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
-                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
-                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
-                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
-
-#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-
-#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-
-#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB)
-
-#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
-               IS_SNB)
-
+int intel_gmch_probe(struct pci_dev *pdev,
+                              struct agp_bridge_data *bridge);
+void intel_gmch_remove(struct pci_dev *pdev);
 #endif
index 75e0a3497888d295d25dd414bda8a2ce69eee724..6b6760ea2435bac1865325aec74d579f9687f87e 100644 (file)
  * /fairy-tale-mode off
  */
 
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+#include <linux/agp_backend.h>
+#include <asm/smp.h>
+#include "agp.h"
+#include "intel-agp.h"
+#include <linux/intel-gtt.h>
+#include <drm/intel-gtt.h>
+
 /*
  * If we have Intel graphics, we're not going to have anything other than
  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
  */
 #ifdef CONFIG_DMAR
 #define USE_PCI_DMA_API 1
+#else
+#define USE_PCI_DMA_API 0
 #endif
 
 /* Max amount of stolen space, anything above will be returned to Linux */
 int intel_max_stolen = 32 * 1024 * 1024;
-EXPORT_SYMBOL(intel_max_stolen);
 
 static const struct aper_size_info_fixed intel_i810_sizes[] =
 {
@@ -55,32 +68,36 @@ static struct gatt_mask intel_i810_masks[] =
 #define INTEL_AGP_CACHED_MEMORY_LLC_MLC        3
 #define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT   4
 
-static struct gatt_mask intel_gen6_masks[] =
-{
-       {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
-        .type = INTEL_AGP_UNCACHED_MEMORY },
-       {.mask = I810_PTE_VALID | GEN6_PTE_LLC,
-         .type = INTEL_AGP_CACHED_MEMORY_LLC },
-       {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
-         .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
-       {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
-         .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
-       {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
-         .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
+struct intel_gtt_driver {
+       unsigned int gen : 8;
+       unsigned int is_g33 : 1;
+       unsigned int is_pineview : 1;
+       unsigned int is_ironlake : 1;
+       unsigned int dma_mask_size : 8;
+       /* Chipset specific GTT setup */
+       int (*setup)(void);
+       /* This should undo anything done in ->setup() save the unmapping
+        * of the mmio register file, that's done in the generic code. */
+       void (*cleanup)(void);
+       void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
+       /* Flags is a more or less chipset specific opaque value.
+        * For chipsets that need to support old ums (non-gem) code, this
+        * needs to be identical to the various supported agp memory types! */
+       bool (*check_flags)(unsigned int flags);
+       void (*chipset_flush)(void);
 };
 
 static struct _intel_private {
+       struct intel_gtt base;
+       const struct intel_gtt_driver *driver;
        struct pci_dev *pcidev; /* device one */
+       struct pci_dev *bridge_dev;
        u8 __iomem *registers;
+       phys_addr_t gtt_bus_addr;
+       phys_addr_t gma_bus_addr;
+       phys_addr_t pte_bus_addr;
        u32 __iomem *gtt;               /* I915G */
        int num_dcache_entries;
-       /* gtt_entries is the number of gtt entries that are already mapped
-        * to stolen memory.  Stolen memory is larger than the memory mapped
-        * through gtt_entries, as it includes some reserved space for the BIOS
-        * popup and for the GTT.
-        */
-       int gtt_entries;                        /* i830+ */
-       int gtt_total_size;
        union {
                void __iomem *i9xx_flush_page;
                void *i8xx_flush_page;
@@ -88,23 +105,14 @@ static struct _intel_private {
        struct page *i8xx_page;
        struct resource ifp_resource;
        int resource_valid;
+       struct page *scratch_page;
+       dma_addr_t scratch_page_dma;
 } intel_private;
 
-#ifdef USE_PCI_DMA_API
-static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
-{
-       *ret = pci_map_page(intel_private.pcidev, page, 0,
-                           PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-       if (pci_dma_mapping_error(intel_private.pcidev, *ret))
-               return -EINVAL;
-       return 0;
-}
-
-static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
-{
-       pci_unmap_page(intel_private.pcidev, dma,
-                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-}
+#define INTEL_GTT_GEN  intel_private.driver->gen
+#define IS_G33         intel_private.driver->is_g33
+#define IS_PINEVIEW    intel_private.driver->is_pineview
+#define IS_IRONLAKE    intel_private.driver->is_ironlake
 
 static void intel_agp_free_sglist(struct agp_memory *mem)
 {
@@ -125,6 +133,9 @@ static int intel_agp_map_memory(struct agp_memory *mem)
        struct scatterlist *sg;
        int i;
 
+       if (mem->sg_list)
+               return 0; /* already mapped (for e.g. resume */
+
        DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
 
        if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
@@ -156,70 +167,17 @@ static void intel_agp_unmap_memory(struct agp_memory *mem)
        intel_agp_free_sglist(mem);
 }
 
-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
-                                       off_t pg_start, int mask_type)
-{
-       struct scatterlist *sg;
-       int i, j;
-
-       j = pg_start;
-
-       WARN_ON(!mem->num_sg);
-
-       if (mem->num_sg == mem->page_count) {
-               for_each_sg(mem->sg_list, sg, mem->page_count, i) {
-                       writel(agp_bridge->driver->mask_memory(agp_bridge,
-                                       sg_dma_address(sg), mask_type),
-                                       intel_private.gtt+j);
-                       j++;
-               }
-       } else {
-               /* sg may merge pages, but we have to separate
-                * per-page addr for GTT */
-               unsigned int len, m;
-
-               for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
-                       len = sg_dma_len(sg) / PAGE_SIZE;
-                       for (m = 0; m < len; m++) {
-                               writel(agp_bridge->driver->mask_memory(agp_bridge,
-                                                                      sg_dma_address(sg) + m * PAGE_SIZE,
-                                                                      mask_type),
-                                      intel_private.gtt+j);
-                               j++;
-                       }
-               }
-       }
-       readl(intel_private.gtt+j-1);
-}
-
-#else
-
-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
-                                       off_t pg_start, int mask_type)
-{
-       int i, j;
-
-       for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-               writel(agp_bridge->driver->mask_memory(agp_bridge,
-                               page_to_phys(mem->pages[i]), mask_type),
-                      intel_private.gtt+j);
-       }
-
-       readl(intel_private.gtt+j-1);
-}
-
-#endif
-
 static int intel_i810_fetch_size(void)
 {
        u32 smram_miscc;
        struct aper_size_info_fixed *values;
 
-       pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
+       pci_read_config_dword(intel_private.bridge_dev,
+                             I810_SMRAM_MISCC, &smram_miscc);
        values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
 
        if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
-               dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
+               dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n");
                return 0;
        }
        if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
@@ -284,7 +242,7 @@ static void intel_i810_cleanup(void)
        iounmap(intel_private.registers);
 }
 
-static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
 {
        return;
 }
@@ -319,34 +277,6 @@ static void i8xx_destroy_pages(struct page *page)
        atomic_dec(&agp_bridge->current_memory_agp);
 }
 
-static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
-                                       int type)
-{
-       if (type < AGP_USER_TYPES)
-               return type;
-       else if (type == AGP_USER_CACHED_MEMORY)
-               return INTEL_AGP_CACHED_MEMORY;
-       else
-               return 0;
-}
-
-static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
-                                       int type)
-{
-       unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
-       unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
-
-       if (type_mask == AGP_USER_UNCACHED_MEMORY)
-               return INTEL_AGP_UNCACHED_MEMORY;
-       else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
-               return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
-                             INTEL_AGP_CACHED_MEMORY_LLC_MLC;
-       else /* set 'normal'/'cached' to LLC by default */
-               return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
-                             INTEL_AGP_CACHED_MEMORY_LLC;
-}
-
-
 static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
                                int type)
 {
@@ -514,8 +444,33 @@ static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
        return addr | bridge->driver->masks[type].mask;
 }
 
-static struct aper_size_info_fixed intel_i830_sizes[] =
+static int intel_gtt_setup_scratch_page(void)
 {
+       struct page *page;
+       dma_addr_t dma_addr;
+
+       page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+       if (page == NULL)
+               return -ENOMEM;
+       get_page(page);
+       set_pages_uc(page, 1);
+
+       if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
+               dma_addr = pci_map_page(intel_private.pcidev, page, 0,
+                                   PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+               if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
+                       return -EINVAL;
+
+               intel_private.scratch_page_dma = dma_addr;
+       } else
+               intel_private.scratch_page_dma = page_to_phys(page);
+
+       intel_private.scratch_page = page;
+
+       return 0;
+}
+
+static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = {
        {128, 32768, 5},
        /* The 64M mode still requires a 128k gatt */
        {64, 16384, 5},
@@ -523,102 +478,49 @@ static struct aper_size_info_fixed intel_i830_sizes[] =
        {512, 131072, 7},
 };
 
-static void intel_i830_init_gtt_entries(void)
+static unsigned int intel_gtt_stolen_entries(void)
 {
        u16 gmch_ctrl;
-       int gtt_entries = 0;
        u8 rdct;
        int local = 0;
        static const int ddt[4] = { 0, 16, 32, 64 };
-       int size; /* reserved space (in kb) at the top of stolen memory */
+       unsigned int overhead_entries, stolen_entries;
+       unsigned int stolen_size = 0;
 
-       pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+       pci_read_config_word(intel_private.bridge_dev,
+                            I830_GMCH_CTRL, &gmch_ctrl);
 
-       if (IS_I965) {
-               u32 pgetbl_ctl;
-               pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+       if (INTEL_GTT_GEN > 4 || IS_PINEVIEW)
+               overhead_entries = 0;
+       else
+               overhead_entries = intel_private.base.gtt_mappable_entries
+                       / 1024;
 
-               /* The 965 has a field telling us the size of the GTT,
-                * which may be larger than what is necessary to map the
-                * aperture.
-                */
-               switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
-               case I965_PGETBL_SIZE_128KB:
-                       size = 128;
-                       break;
-               case I965_PGETBL_SIZE_256KB:
-                       size = 256;
-                       break;
-               case I965_PGETBL_SIZE_512KB:
-                       size = 512;
-                       break;
-               case I965_PGETBL_SIZE_1MB:
-                       size = 1024;
-                       break;
-               case I965_PGETBL_SIZE_2MB:
-                       size = 2048;
-                       break;
-               case I965_PGETBL_SIZE_1_5MB:
-                       size = 1024 + 512;
-                       break;
-               default:
-                       dev_info(&intel_private.pcidev->dev,
-                                "unknown page table size, assuming 512KB\n");
-                       size = 512;
-               }
-               size += 4; /* add in BIOS popup space */
-       } else if (IS_G33 && !IS_PINEVIEW) {
-       /* G33's GTT size defined in gmch_ctrl */
-               switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
-               case G33_PGETBL_SIZE_1M:
-                       size = 1024;
-                       break;
-               case G33_PGETBL_SIZE_2M:
-                       size = 2048;
-                       break;
-               default:
-                       dev_info(&agp_bridge->dev->dev,
-                                "unknown page table size 0x%x, assuming 512KB\n",
-                               (gmch_ctrl & G33_PGETBL_SIZE_MASK));
-                       size = 512;
-               }
-               size += 4;
-       } else if (IS_G4X || IS_PINEVIEW) {
-               /* On 4 series hardware, GTT stolen is separate from graphics
-                * stolen, ignore it in stolen gtt entries counting.  However,
-                * 4KB of the stolen memory doesn't get mapped to the GTT.
-                */
-               size = 4;
-       } else {
-               /* On previous hardware, the GTT size was just what was
-                * required to map the aperture.
-                */
-               size = agp_bridge->driver->fetch_size() + 4;
-       }
+       overhead_entries += 1; /* BIOS popup */
 
-       if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
-           agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
+       if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
+           intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
                switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
                case I830_GMCH_GMS_STOLEN_512:
-                       gtt_entries = KB(512) - KB(size);
+                       stolen_size = KB(512);
                        break;
                case I830_GMCH_GMS_STOLEN_1024:
-                       gtt_entries = MB(1) - KB(size);
+                       stolen_size = MB(1);
                        break;
                case I830_GMCH_GMS_STOLEN_8192:
-                       gtt_entries = MB(8) - KB(size);
+                       stolen_size = MB(8);
                        break;
                case I830_GMCH_GMS_LOCAL:
                        rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
-                       gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
+                       stolen_size = (I830_RDRAM_ND(rdct) + 1) *
                                        MB(ddt[I830_RDRAM_DDT(rdct)]);
                        local = 1;
                        break;
                default:
-                       gtt_entries = 0;
+                       stolen_size = 0;
                        break;
                }
-       } else if (IS_SNB) {
+       } else if (INTEL_GTT_GEN == 6) {
                /*
                 * SandyBridge has new memory control reg at 0x50.w
                 */
@@ -626,149 +528,292 @@ static void intel_i830_init_gtt_entries(void)
                pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
                switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
                case SNB_GMCH_GMS_STOLEN_32M:
-                       gtt_entries = MB(32) - KB(size);
+                       stolen_size = MB(32);
                        break;
                case SNB_GMCH_GMS_STOLEN_64M:
-                       gtt_entries = MB(64) - KB(size);
+                       stolen_size = MB(64);
                        break;
                case SNB_GMCH_GMS_STOLEN_96M:
-                       gtt_entries = MB(96) - KB(size);
+                       stolen_size = MB(96);
                        break;
                case SNB_GMCH_GMS_STOLEN_128M:
-                       gtt_entries = MB(128) - KB(size);
+                       stolen_size = MB(128);
                        break;
                case SNB_GMCH_GMS_STOLEN_160M:
-                       gtt_entries = MB(160) - KB(size);
+                       stolen_size = MB(160);
                        break;
                case SNB_GMCH_GMS_STOLEN_192M:
-                       gtt_entries = MB(192) - KB(size);
+                       stolen_size = MB(192);
                        break;
                case SNB_GMCH_GMS_STOLEN_224M:
-                       gtt_entries = MB(224) - KB(size);
+                       stolen_size = MB(224);
                        break;
                case SNB_GMCH_GMS_STOLEN_256M:
-                       gtt_entries = MB(256) - KB(size);
+                       stolen_size = MB(256);
                        break;
                case SNB_GMCH_GMS_STOLEN_288M:
-                       gtt_entries = MB(288) - KB(size);
+                       stolen_size = MB(288);
                        break;
                case SNB_GMCH_GMS_STOLEN_320M:
-                       gtt_entries = MB(320) - KB(size);
+                       stolen_size = MB(320);
                        break;
                case SNB_GMCH_GMS_STOLEN_352M:
-                       gtt_entries = MB(352) - KB(size);
+                       stolen_size = MB(352);
                        break;
                case SNB_GMCH_GMS_STOLEN_384M:
-                       gtt_entries = MB(384) - KB(size);
+                       stolen_size = MB(384);
                        break;
                case SNB_GMCH_GMS_STOLEN_416M:
-                       gtt_entries = MB(416) - KB(size);
+                       stolen_size = MB(416);
                        break;
                case SNB_GMCH_GMS_STOLEN_448M:
-                       gtt_entries = MB(448) - KB(size);
+                       stolen_size = MB(448);
                        break;
                case SNB_GMCH_GMS_STOLEN_480M:
-                       gtt_entries = MB(480) - KB(size);
+                       stolen_size = MB(480);
                        break;
                case SNB_GMCH_GMS_STOLEN_512M:
-                       gtt_entries = MB(512) - KB(size);
+                       stolen_size = MB(512);
                        break;
                }
        } else {
                switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
                case I855_GMCH_GMS_STOLEN_1M:
-                       gtt_entries = MB(1) - KB(size);
+                       stolen_size = MB(1);
                        break;
                case I855_GMCH_GMS_STOLEN_4M:
-                       gtt_entries = MB(4) - KB(size);
+                       stolen_size = MB(4);
                        break;
                case I855_GMCH_GMS_STOLEN_8M:
-                       gtt_entries = MB(8) - KB(size);
+                       stolen_size = MB(8);
                        break;
                case I855_GMCH_GMS_STOLEN_16M:
-                       gtt_entries = MB(16) - KB(size);
+                       stolen_size = MB(16);
                        break;
                case I855_GMCH_GMS_STOLEN_32M:
-                       gtt_entries = MB(32) - KB(size);
+                       stolen_size = MB(32);
                        break;
                case I915_GMCH_GMS_STOLEN_48M:
-                       /* Check it's really I915G */
-                       if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
-                               gtt_entries = MB(48) - KB(size);
-                       else
-                               gtt_entries = 0;
+                       stolen_size = MB(48);
                        break;
                case I915_GMCH_GMS_STOLEN_64M:
-                       /* Check it's really I915G */
-                       if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
-                               gtt_entries = MB(64) - KB(size);
-                       else
-                               gtt_entries = 0;
+                       stolen_size = MB(64);
                        break;
                case G33_GMCH_GMS_STOLEN_128M:
-                       if (IS_G33 || IS_I965 || IS_G4X)
-                               gtt_entries = MB(128) - KB(size);
-                       else
-                               gtt_entries = 0;
+                       stolen_size = MB(128);
                        break;
                case G33_GMCH_GMS_STOLEN_256M:
-                       if (IS_G33 || IS_I965 || IS_G4X)
-                               gtt_entries = MB(256) - KB(size);
-                       else
-                               gtt_entries = 0;
+                       stolen_size = MB(256);
                        break;
                case INTEL_GMCH_GMS_STOLEN_96M:
-                       if (IS_I965 || IS_G4X)
-                               gtt_entries = MB(96) - KB(size);
-                       else
-                               gtt_entries = 0;
+                       stolen_size = MB(96);
                        break;
                case INTEL_GMCH_GMS_STOLEN_160M:
-                       if (IS_I965 || IS_G4X)
-                               gtt_entries = MB(160) - KB(size);
-                       else
-                               gtt_entries = 0;
+                       stolen_size = MB(160);
                        break;
                case INTEL_GMCH_GMS_STOLEN_224M:
-                       if (IS_I965 || IS_G4X)
-                               gtt_entries = MB(224) - KB(size);
-                       else
-                               gtt_entries = 0;
+                       stolen_size = MB(224);
                        break;
                case INTEL_GMCH_GMS_STOLEN_352M:
-                       if (IS_I965 || IS_G4X)
-                               gtt_entries = MB(352) - KB(size);
-                       else
-                               gtt_entries = 0;
+                       stolen_size = MB(352);
                        break;
                default:
-                       gtt_entries = 0;
+                       stolen_size = 0;
                        break;
                }
        }
-       if (!local && gtt_entries > intel_max_stolen) {
-               dev_info(&agp_bridge->dev->dev,
+
+       if (!local && stolen_size > intel_max_stolen) {
+               dev_info(&intel_private.bridge_dev->dev,
                         "detected %dK stolen memory, trimming to %dK\n",
-                        gtt_entries / KB(1), intel_max_stolen / KB(1));
-               gtt_entries = intel_max_stolen / KB(4);
-       } else if (gtt_entries > 0) {
-               dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
-                      gtt_entries / KB(1), local ? "local" : "stolen");
-               gtt_entries /= KB(4);
+                        stolen_size / KB(1), intel_max_stolen / KB(1));
+               stolen_size = intel_max_stolen;
+       } else if (stolen_size > 0) {
+               dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
+                      stolen_size / KB(1), local ? "local" : "stolen");
        } else {
-               dev_info(&agp_bridge->dev->dev,
+               dev_info(&intel_private.bridge_dev->dev,
                       "no pre-allocated video memory detected\n");
-               gtt_entries = 0;
+               stolen_size = 0;
+       }
+
+       stolen_entries = stolen_size/KB(4) - overhead_entries;
+
+       return stolen_entries;
+}
+
+static unsigned int intel_gtt_total_entries(void)
+{
+       int size;
+
+       if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) {
+               u32 pgetbl_ctl;
+               pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+
+               switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
+               case I965_PGETBL_SIZE_128KB:
+                       size = KB(128);
+                       break;
+               case I965_PGETBL_SIZE_256KB:
+                       size = KB(256);
+                       break;
+               case I965_PGETBL_SIZE_512KB:
+                       size = KB(512);
+                       break;
+               case I965_PGETBL_SIZE_1MB:
+                       size = KB(1024);
+                       break;
+               case I965_PGETBL_SIZE_2MB:
+                       size = KB(2048);
+                       break;
+               case I965_PGETBL_SIZE_1_5MB:
+                       size = KB(1024 + 512);
+                       break;
+               default:
+                       dev_info(&intel_private.pcidev->dev,
+                                "unknown page table size, assuming 512KB\n");
+                       size = KB(512);
+               }
+
+               return size/4;
+       } else if (INTEL_GTT_GEN == 6) {
+               u16 snb_gmch_ctl;
+
+               pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+               switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
+               default:
+               case SNB_GTT_SIZE_0M:
+                       printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
+                       size = MB(0);
+                       break;
+               case SNB_GTT_SIZE_1M:
+                       size = MB(1);
+                       break;
+               case SNB_GTT_SIZE_2M:
+                       size = MB(2);
+                       break;
+               }
+               return size/4;
+       } else {
+               /* On previous hardware, the GTT size was just what was
+                * required to map the aperture.
+                */
+               return intel_private.base.gtt_mappable_entries;
+       }
+}
+
+static unsigned int intel_gtt_mappable_entries(void)
+{
+       unsigned int aperture_size;
+
+       if (INTEL_GTT_GEN == 2) {
+               u16 gmch_ctrl;
+
+               pci_read_config_word(intel_private.bridge_dev,
+                                    I830_GMCH_CTRL, &gmch_ctrl);
+
+               if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
+                       aperture_size = MB(64);
+               else
+                       aperture_size = MB(128);
+       } else {
+               /* 9xx supports large sizes, just look at the length */
+               aperture_size = pci_resource_len(intel_private.pcidev, 2);
+       }
+
+       return aperture_size >> PAGE_SHIFT;
+}
+
+static void intel_gtt_teardown_scratch_page(void)
+{
+       set_pages_wb(intel_private.scratch_page, 1);
+       pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
+                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+       put_page(intel_private.scratch_page);
+       __free_page(intel_private.scratch_page);
+}
+
+static void intel_gtt_cleanup(void)
+{
+       intel_private.driver->cleanup();
+
+       iounmap(intel_private.gtt);
+       iounmap(intel_private.registers);
+       
+       intel_gtt_teardown_scratch_page();
+}
+
+static int intel_gtt_init(void)
+{
+       u32 gtt_map_size;
+       int ret;
+
+       ret = intel_private.driver->setup();
+       if (ret != 0)
+               return ret;
+
+       intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
+       intel_private.base.gtt_total_entries = intel_gtt_total_entries();
+
+       dev_info(&intel_private.bridge_dev->dev,
+                       "detected gtt size: %dK total, %dK mappable\n",
+                       intel_private.base.gtt_total_entries * 4,
+                       intel_private.base.gtt_mappable_entries * 4);
+
+       gtt_map_size = intel_private.base.gtt_total_entries * 4;
+
+       intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
+                                   gtt_map_size);
+       if (!intel_private.gtt) {
+               intel_private.driver->cleanup();
+               iounmap(intel_private.registers);
+               return -ENOMEM;
+       }
+
+       global_cache_flush();   /* FIXME: ? */
+
+       /* we have to call this as early as possible after the MMIO base address is known */
+       intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries();
+       if (intel_private.base.gtt_stolen_entries == 0) {
+               intel_private.driver->cleanup();
+               iounmap(intel_private.registers);
+               iounmap(intel_private.gtt);
+               return -ENOMEM;
+       }
+
+       ret = intel_gtt_setup_scratch_page();
+       if (ret != 0) {
+               intel_gtt_cleanup();
+               return ret;
+       }
+
+       return 0;
+}
+
+static int intel_fake_agp_fetch_size(void)
+{
+       int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
+       unsigned int aper_size;
+       int i;
+
+       aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
+                   / MB(1);
+
+       for (i = 0; i < num_sizes; i++) {
+               if (aper_size == intel_fake_agp_sizes[i].size) {
+                       agp_bridge->current_size =
+                               (void *) (intel_fake_agp_sizes + i);
+                       return aper_size;
+               }
        }
 
-       intel_private.gtt_entries = gtt_entries;
+       return 0;
 }
 
-static void intel_i830_fini_flush(void)
+static void i830_cleanup(void)
 {
        kunmap(intel_private.i8xx_page);
        intel_private.i8xx_flush_page = NULL;
-       unmap_page_from_agp(intel_private.i8xx_page);
 
        __free_page(intel_private.i8xx_page);
        intel_private.i8xx_page = NULL;
@@ -780,13 +825,13 @@ static void intel_i830_setup_flush(void)
        if (intel_private.i8xx_page)
                return;
 
-       intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
+       intel_private.i8xx_page = alloc_page(GFP_KERNEL);
        if (!intel_private.i8xx_page)
                return;
 
        intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
        if (!intel_private.i8xx_flush_page)
-               intel_i830_fini_flush();
+               i830_cleanup();
 }
 
 /* The chipset_flush interface needs to get data that has already been
@@ -799,7 +844,7 @@ static void intel_i830_setup_flush(void)
  * that buffer out, we just fill 1KB and clflush it out, on the assumption
  * that it'll push whatever was in there out.  It appears to work.
  */
-static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
+static void i830_chipset_flush(void)
 {
        unsigned int *pg = intel_private.i8xx_flush_page;
 
@@ -811,169 +856,184 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
                printk(KERN_ERR "Timed out waiting for cache flush.\n");
 }
 
-/* The intel i830 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
+static void i830_write_entry(dma_addr_t addr, unsigned int entry,
+                            unsigned int flags)
 {
-       int page_order;
-       struct aper_size_info_fixed *size;
-       int num_entries;
-       u32 temp;
+       u32 pte_flags = I810_PTE_VALID;
+       
+       switch (flags) {
+       case AGP_DCACHE_MEMORY:
+               pte_flags |= I810_PTE_LOCAL;
+               break;
+       case AGP_USER_CACHED_MEMORY:
+               pte_flags |= I830_PTE_SYSTEM_CACHED;
+               break;
+       }
 
-       size = agp_bridge->current_size;
-       page_order = size->page_order;
-       num_entries = size->num_entries;
-       agp_bridge->gatt_table_real = NULL;
+       writel(addr | pte_flags, intel_private.gtt + entry);
+}
 
-       pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
-       temp &= 0xfff80000;
+static void intel_enable_gtt(void)
+{
+       u32 gma_addr;
+       u16 gmch_ctrl;
 
-       intel_private.registers = ioremap(temp, 128 * 4096);
-       if (!intel_private.registers)
-               return -ENOMEM;
+       if (INTEL_GTT_GEN == 2)
+               pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
+                                     &gma_addr);
+       else
+               pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
+                                     &gma_addr);
 
-       temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
-       global_cache_flush();   /* FIXME: ?? */
+       intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
 
-       /* we have to call this as early as possible after the MMIO base address is known */
-       intel_i830_init_gtt_entries();
-       if (intel_private.gtt_entries == 0) {
-               iounmap(intel_private.registers);
+       pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl);
+       gmch_ctrl |= I830_GMCH_ENABLED;
+       pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl);
+
+       writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED,
+              intel_private.registers+I810_PGETBL_CTL);
+       readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+}
+
+static int i830_setup(void)
+{
+       u32 reg_addr;
+
+       pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
+       reg_addr &= 0xfff80000;
+
+       intel_private.registers = ioremap(reg_addr, KB(64));
+       if (!intel_private.registers)
                return -ENOMEM;
-       }
 
-       agp_bridge->gatt_table = NULL;
+       intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
+       intel_private.pte_bus_addr =
+               readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
 
-       agp_bridge->gatt_bus_addr = temp;
+       intel_i830_setup_flush();
 
        return 0;
 }
 
-/* Return the gatt table to a sane state. Use the top of stolen
- * memory for the GTT.
- */
-static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
+static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
 {
+       agp_bridge->gatt_table_real = NULL;
+       agp_bridge->gatt_table = NULL;
+       agp_bridge->gatt_bus_addr = 0;
+
        return 0;
 }
 
-static int intel_i830_fetch_size(void)
+static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
 {
-       u16 gmch_ctrl;
-       struct aper_size_info_fixed *values;
+       return 0;
+}
 
-       values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+static int intel_fake_agp_configure(void)
+{
+       int i;
 
-       if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
-           agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
-               /* 855GM/852GM/865G has 128MB aperture size */
-               agp_bridge->current_size = (void *) values;
-               agp_bridge->aperture_size_idx = 0;
-               return values[0].size;
-       }
+       intel_enable_gtt();
 
-       pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+       agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
 
-       if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
-               agp_bridge->current_size = (void *) values;
-               agp_bridge->aperture_size_idx = 0;
-               return values[0].size;
-       } else {
-               agp_bridge->current_size = (void *) (values + 1);
-               agp_bridge->aperture_size_idx = 1;
-               return values[1].size;
+       for (i = intel_private.base.gtt_stolen_entries;
+                       i < intel_private.base.gtt_total_entries; i++) {
+               intel_private.driver->write_entry(intel_private.scratch_page_dma,
+                                                 i, 0);
        }
+       readl(intel_private.gtt+i-1);   /* PCI Posting. */
+
+       global_cache_flush();
 
        return 0;
 }
 
-static int intel_i830_configure(void)
+static bool i830_check_flags(unsigned int flags)
 {
-       struct aper_size_info_fixed *current_size;
-       u32 temp;
-       u16 gmch_ctrl;
-       int i;
-
-       current_size = A_SIZE_FIX(agp_bridge->current_size);
+       switch (flags) {
+       case 0:
+       case AGP_PHYS_MEMORY:
+       case AGP_USER_CACHED_MEMORY:
+       case AGP_USER_MEMORY:
+               return true;
+       }
 
-       pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
-       agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+       return false;
+}
 
-       pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-       gmch_ctrl |= I830_GMCH_ENABLED;
-       pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
+static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
+                                       unsigned int sg_len,
+                                       unsigned int pg_start,
+                                       unsigned int flags)
+{
+       struct scatterlist *sg;
+       unsigned int len, m;
+       int i, j;
 
-       writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
-       readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+       j = pg_start;
 
-       if (agp_bridge->driver->needs_scratch_page) {
-               for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
-                       writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+       /* sg may merge pages, but we have to separate
+        * per-page addr for GTT */
+       for_each_sg(sg_list, sg, sg_len, i) {
+               len = sg_dma_len(sg) >> PAGE_SHIFT;
+               for (m = 0; m < len; m++) {
+                       dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+                       intel_private.driver->write_entry(addr,
+                                                         j, flags);
+                       j++;
                }
-               readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
        }
-
-       global_cache_flush();
-
-       intel_i830_setup_flush();
-       return 0;
-}
-
-static void intel_i830_cleanup(void)
-{
-       iounmap(intel_private.registers);
+       readl(intel_private.gtt+j-1);
 }
 
-static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
-                                    int type)
+static int intel_fake_agp_insert_entries(struct agp_memory *mem,
+                                        off_t pg_start, int type)
 {
-       int i, j, num_entries;
-       void *temp;
+       int i, j;
        int ret = -EINVAL;
-       int mask_type;
 
        if (mem->page_count == 0)
                goto out;
 
-       temp = agp_bridge->current_size;
-       num_entries = A_SIZE_FIX(temp)->num_entries;
-
-       if (pg_start < intel_private.gtt_entries) {
+       if (pg_start < intel_private.base.gtt_stolen_entries) {
                dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
-                          "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
-                          pg_start, intel_private.gtt_entries);
+                          "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
+                          pg_start, intel_private.base.gtt_stolen_entries);
 
                dev_info(&intel_private.pcidev->dev,
                         "trying to insert into local/stolen memory\n");
                goto out_err;
        }
 
-       if ((pg_start + mem->page_count) > num_entries)
+       if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries)
                goto out_err;
 
-       /* The i830 can't check the GTT for entries since its read only,
-        * depend on the caller to make the correct offset decisions.
-        */
-
        if (type != mem->type)
                goto out_err;
 
-       mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
-       if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
-           mask_type != INTEL_AGP_CACHED_MEMORY)
+       if (!intel_private.driver->check_flags(type))
                goto out_err;
 
        if (!mem->is_flushed)
                global_cache_flush();
 
-       for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-               writel(agp_bridge->driver->mask_memory(agp_bridge,
-                               page_to_phys(mem->pages[i]), mask_type),
-                      intel_private.registers+I810_PTE_BASE+(j*4));
+       if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
+               ret = intel_agp_map_memory(mem);
+               if (ret != 0)
+                       return ret;
+
+               intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
+                                           pg_start, type);
+       } else {
+               for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+                       dma_addr_t addr = page_to_phys(mem->pages[i]);
+                       intel_private.driver->write_entry(addr,
+                                                         j, type);
+               }
+               readl(intel_private.gtt+j-1);
        }
-       readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
 
 out:
        ret = 0;
@@ -982,29 +1042,39 @@ out_err:
        return ret;
 }
 
-static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
-                                    int type)
+static int intel_fake_agp_remove_entries(struct agp_memory *mem,
+                                        off_t pg_start, int type)
 {
        int i;
 
        if (mem->page_count == 0)
                return 0;
 
-       if (pg_start < intel_private.gtt_entries) {
+       if (pg_start < intel_private.base.gtt_stolen_entries) {
                dev_info(&intel_private.pcidev->dev,
                         "trying to disable local/stolen memory\n");
                return -EINVAL;
        }
 
+       if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2)
+               intel_agp_unmap_memory(mem);
+
        for (i = pg_start; i < (mem->page_count + pg_start); i++) {
-               writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+               intel_private.driver->write_entry(intel_private.scratch_page_dma,
+                                                 i, 0);
        }
-       readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+       readl(intel_private.gtt+i-1);
 
        return 0;
 }
 
-static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
+static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge)
+{
+       intel_private.driver->chipset_flush();
+}
+
+static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
+                                                      int type)
 {
        if (type == AGP_PHYS_MEMORY)
                return alloc_agpphysmem_i8xx(pg_count, type);
@@ -1015,9 +1085,9 @@ static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
 static int intel_alloc_chipset_flush_resource(void)
 {
        int ret;
-       ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
+       ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
                                     PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
-                                    pcibios_align_resource, agp_bridge->dev);
+                                    pcibios_align_resource, intel_private.bridge_dev);
 
        return ret;
 }
@@ -1027,11 +1097,11 @@ static void intel_i915_setup_chipset_flush(void)
        int ret;
        u32 temp;
 
-       pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
+       pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
        if (!(temp & 0x1)) {
                intel_alloc_chipset_flush_resource();
                intel_private.resource_valid = 1;
-               pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+               pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
        } else {
                temp &= ~1;
 
@@ -1050,17 +1120,17 @@ static void intel_i965_g33_setup_chipset_flush(void)
        u32 temp_hi, temp_lo;
        int ret;
 
-       pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
-       pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
+       pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
+       pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
 
        if (!(temp_lo & 0x1)) {
 
                intel_alloc_chipset_flush_resource();
 
                intel_private.resource_valid = 1;
-               pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
+               pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
                        upper_32_bits(intel_private.ifp_resource.start));
-               pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+               pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
        } else {
                u64 l64;
 
@@ -1083,7 +1153,7 @@ static void intel_i9xx_setup_flush(void)
        if (intel_private.ifp_resource.start)
                return;
 
-       if (IS_SNB)
+       if (INTEL_GTT_GEN == 6)
                return;
 
        /* setup a resource for this object */
@@ -1091,7 +1161,7 @@ static void intel_i9xx_setup_flush(void)
        intel_private.ifp_resource.flags = IORESOURCE_MEM;
 
        /* Setup chipset flush for 915 */
-       if (IS_I965 || IS_G33 || IS_G4X) {
+       if (IS_G33 || INTEL_GTT_GEN >= 4) {
                intel_i965_g33_setup_chipset_flush();
        } else {
                intel_i915_setup_chipset_flush();
@@ -1104,41 +1174,7 @@ static void intel_i9xx_setup_flush(void)
                        "can't ioremap flush page - no chipset flushing\n");
 }
 
-static int intel_i9xx_configure(void)
-{
-       struct aper_size_info_fixed *current_size;
-       u32 temp;
-       u16 gmch_ctrl;
-       int i;
-
-       current_size = A_SIZE_FIX(agp_bridge->current_size);
-
-       pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
-
-       agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
-       pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-       gmch_ctrl |= I830_GMCH_ENABLED;
-       pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
-
-       writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
-       readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
-
-       if (agp_bridge->driver->needs_scratch_page) {
-               for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
-                       writel(agp_bridge->scratch_page, intel_private.gtt+i);
-               }
-               readl(intel_private.gtt+i-1);   /* PCI Posting. */
-       }
-
-       global_cache_flush();
-
-       intel_i9xx_setup_flush();
-
-       return 0;
-}
-
-static void intel_i915_cleanup(void)
+static void i9xx_cleanup(void)
 {
        if (intel_private.i9xx_flush_page)
                iounmap(intel_private.i9xx_flush_page);
@@ -1146,320 +1182,93 @@ static void intel_i915_cleanup(void)
                release_resource(&intel_private.ifp_resource);
        intel_private.ifp_resource.start = 0;
        intel_private.resource_valid = 0;
-       iounmap(intel_private.gtt);
-       iounmap(intel_private.registers);
 }
 
-static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
+static void i9xx_chipset_flush(void)
 {
        if (intel_private.i9xx_flush_page)
                writel(1, intel_private.i9xx_flush_page);
 }
 
-static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
-                                    int type)
+static void i965_write_entry(dma_addr_t addr, unsigned int entry,
+                            unsigned int flags)
 {
-       int num_entries;
-       void *temp;
-       int ret = -EINVAL;
-       int mask_type;
-
-       if (mem->page_count == 0)
-               goto out;
-
-       temp = agp_bridge->current_size;
-       num_entries = A_SIZE_FIX(temp)->num_entries;
-
-       if (pg_start < intel_private.gtt_entries) {
-               dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
-                          "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
-                          pg_start, intel_private.gtt_entries);
-
-               dev_info(&intel_private.pcidev->dev,
-                        "trying to insert into local/stolen memory\n");
-               goto out_err;
-       }
-
-       if ((pg_start + mem->page_count) > num_entries)
-               goto out_err;
-
-       /* The i915 can't check the GTT for entries since it's read only;
-        * depend on the caller to make the correct offset decisions.
-        */
-
-       if (type != mem->type)
-               goto out_err;
-
-       mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
-       if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
-           mask_type != INTEL_AGP_CACHED_MEMORY)
-               goto out_err;
-
-       if (!mem->is_flushed)
-               global_cache_flush();
-
-       intel_agp_insert_sg_entries(mem, pg_start, mask_type);
-
- out:
-       ret = 0;
- out_err:
-       mem->is_flushed = true;
-       return ret;
+       /* Shift high bits down */
+       addr |= (addr >> 28) & 0xf0;
+       writel(addr | I810_PTE_VALID, intel_private.gtt + entry);
 }
 
-static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
-                                    int type)
+static bool gen6_check_flags(unsigned int flags)
 {
-       int i;
-
-       if (mem->page_count == 0)
-               return 0;
-
-       if (pg_start < intel_private.gtt_entries) {
-               dev_info(&intel_private.pcidev->dev,
-                        "trying to disable local/stolen memory\n");
-               return -EINVAL;
-       }
-
-       for (i = pg_start; i < (mem->page_count + pg_start); i++)
-               writel(agp_bridge->scratch_page, intel_private.gtt+i);
-
-       readl(intel_private.gtt+i-1);
-
-       return 0;
+       return true;
 }
 
-/* Return the aperture size by just checking the resource length.  The effect
- * described in the spec of the MSAC registers is just changing of the
- * resource size.
- */
-static int intel_i9xx_fetch_size(void)
+static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
+                            unsigned int flags)
 {
-       int num_sizes = ARRAY_SIZE(intel_i830_sizes);
-       int aper_size; /* size in megabytes */
-       int i;
+       unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
+       unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
+       u32 pte_flags;
 
-       aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
-
-       for (i = 0; i < num_sizes; i++) {
-               if (aper_size == intel_i830_sizes[i].size) {
-                       agp_bridge->current_size = intel_i830_sizes + i;
-                       return aper_size;
-               }
+       if (type_mask == AGP_USER_UNCACHED_MEMORY)
+               pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
+       else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
+               pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
+               if (gfdt)
+                       pte_flags |= GEN6_PTE_GFDT;
+       } else { /* set 'normal'/'cached' to LLC by default */
+               pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
+               if (gfdt)
+                       pte_flags |= GEN6_PTE_GFDT;
        }
 
-       return 0;
+       /* gen6 has bit11-4 for physical addr bit39-32 */
+       addr |= (addr >> 28) & 0xff0;
+       writel(addr | pte_flags, intel_private.gtt + entry);
 }
 
-static int intel_i915_get_gtt_size(void)
+static void gen6_cleanup(void)
 {
-       int size;
-
-       if (IS_G33) {
-               u16 gmch_ctrl;
-
-               /* G33's GTT size defined in gmch_ctrl */
-               pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-               switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
-               case I830_GMCH_GMS_STOLEN_512:
-                       size = 512;
-                       break;
-               case I830_GMCH_GMS_STOLEN_1024:
-                       size = 1024;
-                       break;
-               case I830_GMCH_GMS_STOLEN_8192:
-                       size = 8*1024;
-                       break;
-               default:
-                       dev_info(&agp_bridge->dev->dev,
-                                "unknown page table size 0x%x, assuming 512KB\n",
-                               (gmch_ctrl & I830_GMCH_GMS_MASK));
-                       size = 512;
-               }
-       } else {
-               /* On previous hardware, the GTT size was just what was
-                * required to map the aperture.
-                */
-               size = agp_bridge->driver->fetch_size();
-       }
-
-       return KB(size);
 }
 
-/* The intel i915 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
+static int i9xx_setup(void)
 {
-       int page_order;
-       struct aper_size_info_fixed *size;
-       int num_entries;
-       u32 temp, temp2;
-       int gtt_map_size;
-
-       size = agp_bridge->current_size;
-       page_order = size->page_order;
-       num_entries = size->num_entries;
-       agp_bridge->gatt_table_real = NULL;
-
-       pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
-       pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
+       u32 reg_addr;
 
-       gtt_map_size = intel_i915_get_gtt_size();
+       pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
 
-       intel_private.gtt = ioremap(temp2, gtt_map_size);
-       if (!intel_private.gtt)
-               return -ENOMEM;
-
-       intel_private.gtt_total_size = gtt_map_size / 4;
-
-       temp &= 0xfff80000;
-
-       intel_private.registers = ioremap(temp, 128 * 4096);
-       if (!intel_private.registers) {
-               iounmap(intel_private.gtt);
-               return -ENOMEM;
-       }
+       reg_addr &= 0xfff80000;
 
-       temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
-       global_cache_flush();   /* FIXME: ? */
-
-       /* we have to call this as early as possible after the MMIO base address is known */
-       intel_i830_init_gtt_entries();
-       if (intel_private.gtt_entries == 0) {
-               iounmap(intel_private.gtt);
-               iounmap(intel_private.registers);
+       intel_private.registers = ioremap(reg_addr, 128 * 4096);
+       if (!intel_private.registers)
                return -ENOMEM;
-       }
-
-       agp_bridge->gatt_table = NULL;
 
-       agp_bridge->gatt_bus_addr = temp;
-
-       return 0;
-}
-
-/*
- * The i965 supports 36-bit physical addresses, but to keep
- * the format of the GTT the same, the bits that don't fit
- * in a 32-bit word are shifted down to bits 4..7.
- *
- * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
- * is always zero on 32-bit architectures, so no need to make
- * this conditional.
- */
-static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
-                                           dma_addr_t addr, int type)
-{
-       /* Shift high bits down */
-       addr |= (addr >> 28) & 0xf0;
-
-       /* Type checking must be done elsewhere */
-       return addr | bridge->driver->masks[type].mask;
-}
+       if (INTEL_GTT_GEN == 3) {
+               u32 gtt_addr;
 
-static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
-                                           dma_addr_t addr, int type)
-{
-       /* gen6 has bit11-4 for physical addr bit39-32 */
-       addr |= (addr >> 28) & 0xff0;
-
-       /* Type checking must be done elsewhere */
-       return addr | bridge->driver->masks[type].mask;
-}
-
-static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
-{
-       u16 snb_gmch_ctl;
-
-       switch (agp_bridge->dev->device) {
-       case PCI_DEVICE_ID_INTEL_GM45_HB:
-       case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
-       case PCI_DEVICE_ID_INTEL_Q45_HB:
-       case PCI_DEVICE_ID_INTEL_G45_HB:
-       case PCI_DEVICE_ID_INTEL_G41_HB:
-       case PCI_DEVICE_ID_INTEL_B43_HB:
-       case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
-       case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
-       case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
-       case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
-               *gtt_offset = *gtt_size = MB(2);
-               break;
-       case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
-       case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
-       case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB:
-               *gtt_offset = MB(2);
+               pci_read_config_dword(intel_private.pcidev,
+                                     I915_PTEADDR, &gtt_addr);
+               intel_private.gtt_bus_addr = gtt_addr;
+       } else {
+               u32 gtt_offset;
 
-               pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-               switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
-               default:
-               case SNB_GTT_SIZE_0M:
-                       printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
-                       *gtt_size = MB(0);
+               switch (INTEL_GTT_GEN) {
+               case 5:
+               case 6:
+                       gtt_offset = MB(2);
                        break;
-               case SNB_GTT_SIZE_1M:
-                       *gtt_size = MB(1);
-                       break;
-               case SNB_GTT_SIZE_2M:
-                       *gtt_size = MB(2);
+               case 4:
+               default:
+                       gtt_offset =  KB(512);
                        break;
                }
-               break;
-       default:
-               *gtt_offset = *gtt_size = KB(512);
-       }
-}
-
-/* The intel i965 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
-{
-       int page_order;
-       struct aper_size_info_fixed *size;
-       int num_entries;
-       u32 temp;
-       int gtt_offset, gtt_size;
-
-       size = agp_bridge->current_size;
-       page_order = size->page_order;
-       num_entries = size->num_entries;
-       agp_bridge->gatt_table_real = NULL;
-
-       pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
-
-       temp &= 0xfff00000;
-
-       intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
-
-       intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
-
-       if (!intel_private.gtt)
-               return -ENOMEM;
-
-       intel_private.gtt_total_size = gtt_size / 4;
-
-       intel_private.registers = ioremap(temp, 128 * 4096);
-       if (!intel_private.registers) {
-               iounmap(intel_private.gtt);
-               return -ENOMEM;
-       }
-
-       temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
-       global_cache_flush();   /* FIXME: ? */
-
-       /* we have to call this as early as possible after the MMIO base address is known */
-       intel_i830_init_gtt_entries();
-       if (intel_private.gtt_entries == 0) {
-               iounmap(intel_private.gtt);
-               iounmap(intel_private.registers);
-               return -ENOMEM;
+               intel_private.gtt_bus_addr = reg_addr + gtt_offset;
        }
 
-       agp_bridge->gatt_table = NULL;
+       intel_private.pte_bus_addr =
+               readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
 
-       agp_bridge->gatt_bus_addr = temp;
+       intel_i9xx_setup_flush();
 
        return 0;
 }
@@ -1475,7 +1284,7 @@ static const struct agp_bridge_driver intel_810_driver = {
        .cleanup                = intel_i810_cleanup,
        .mask_memory            = intel_i810_mask_memory,
        .masks                  = intel_i810_masks,
-       .agp_enable             = intel_i810_agp_enable,
+       .agp_enable             = intel_fake_agp_enable,
        .cache_flush            = global_cache_flush,
        .create_gatt_table      = agp_generic_create_gatt_table,
        .free_gatt_table        = agp_generic_free_gatt_table,
@@ -1490,161 +1299,282 @@ static const struct agp_bridge_driver intel_810_driver = {
        .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 };
 
-static const struct agp_bridge_driver intel_830_driver = {
+static const struct agp_bridge_driver intel_fake_agp_driver = {
        .owner                  = THIS_MODULE,
-       .aperture_sizes         = intel_i830_sizes,
        .size_type              = FIXED_APER_SIZE,
-       .num_aperture_sizes     = 4,
-       .needs_scratch_page     = true,
-       .configure              = intel_i830_configure,
-       .fetch_size             = intel_i830_fetch_size,
-       .cleanup                = intel_i830_cleanup,
-       .mask_memory            = intel_i810_mask_memory,
-       .masks                  = intel_i810_masks,
-       .agp_enable             = intel_i810_agp_enable,
+       .aperture_sizes         = intel_fake_agp_sizes,
+       .num_aperture_sizes     = ARRAY_SIZE(intel_fake_agp_sizes),
+       .configure              = intel_fake_agp_configure,
+       .fetch_size             = intel_fake_agp_fetch_size,
+       .cleanup                = intel_gtt_cleanup,
+       .agp_enable             = intel_fake_agp_enable,
        .cache_flush            = global_cache_flush,
-       .create_gatt_table      = intel_i830_create_gatt_table,
-       .free_gatt_table        = intel_i830_free_gatt_table,
-       .insert_memory          = intel_i830_insert_entries,
-       .remove_memory          = intel_i830_remove_entries,
-       .alloc_by_type          = intel_i830_alloc_by_type,
+       .create_gatt_table      = intel_fake_agp_create_gatt_table,
+       .free_gatt_table        = intel_fake_agp_free_gatt_table,
+       .insert_memory          = intel_fake_agp_insert_entries,
+       .remove_memory          = intel_fake_agp_remove_entries,
+       .alloc_by_type          = intel_fake_agp_alloc_by_type,
        .free_by_type           = intel_i810_free_by_type,
        .agp_alloc_page         = agp_generic_alloc_page,
        .agp_alloc_pages        = agp_generic_alloc_pages,
        .agp_destroy_page       = agp_generic_destroy_page,
        .agp_destroy_pages      = agp_generic_destroy_pages,
-       .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
-       .chipset_flush          = intel_i830_chipset_flush,
+       .chipset_flush          = intel_fake_agp_chipset_flush,
 };
 
-static const struct agp_bridge_driver intel_915_driver = {
-       .owner                  = THIS_MODULE,
-       .aperture_sizes         = intel_i830_sizes,
-       .size_type              = FIXED_APER_SIZE,
-       .num_aperture_sizes     = 4,
-       .needs_scratch_page     = true,
-       .configure              = intel_i9xx_configure,
-       .fetch_size             = intel_i9xx_fetch_size,
-       .cleanup                = intel_i915_cleanup,
-       .mask_memory            = intel_i810_mask_memory,
-       .masks                  = intel_i810_masks,
-       .agp_enable             = intel_i810_agp_enable,
-       .cache_flush            = global_cache_flush,
-       .create_gatt_table      = intel_i915_create_gatt_table,
-       .free_gatt_table        = intel_i830_free_gatt_table,
-       .insert_memory          = intel_i915_insert_entries,
-       .remove_memory          = intel_i915_remove_entries,
-       .alloc_by_type          = intel_i830_alloc_by_type,
-       .free_by_type           = intel_i810_free_by_type,
-       .agp_alloc_page         = agp_generic_alloc_page,
-       .agp_alloc_pages        = agp_generic_alloc_pages,
-       .agp_destroy_page       = agp_generic_destroy_page,
-       .agp_destroy_pages      = agp_generic_destroy_pages,
-       .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
-       .chipset_flush          = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
-       .agp_map_page           = intel_agp_map_page,
-       .agp_unmap_page         = intel_agp_unmap_page,
-       .agp_map_memory         = intel_agp_map_memory,
-       .agp_unmap_memory       = intel_agp_unmap_memory,
-#endif
+static const struct intel_gtt_driver i81x_gtt_driver = {
+       .gen = 1,
+       .dma_mask_size = 32,
 };
-
-static const struct agp_bridge_driver intel_i965_driver = {
-       .owner                  = THIS_MODULE,
-       .aperture_sizes         = intel_i830_sizes,
-       .size_type              = FIXED_APER_SIZE,
-       .num_aperture_sizes     = 4,
-       .needs_scratch_page     = true,
-       .configure              = intel_i9xx_configure,
-       .fetch_size             = intel_i9xx_fetch_size,
-       .cleanup                = intel_i915_cleanup,
-       .mask_memory            = intel_i965_mask_memory,
-       .masks                  = intel_i810_masks,
-       .agp_enable             = intel_i810_agp_enable,
-       .cache_flush            = global_cache_flush,
-       .create_gatt_table      = intel_i965_create_gatt_table,
-       .free_gatt_table        = intel_i830_free_gatt_table,
-       .insert_memory          = intel_i915_insert_entries,
-       .remove_memory          = intel_i915_remove_entries,
-       .alloc_by_type          = intel_i830_alloc_by_type,
-       .free_by_type           = intel_i810_free_by_type,
-       .agp_alloc_page         = agp_generic_alloc_page,
-       .agp_alloc_pages        = agp_generic_alloc_pages,
-       .agp_destroy_page       = agp_generic_destroy_page,
-       .agp_destroy_pages      = agp_generic_destroy_pages,
-       .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
-       .chipset_flush          = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
-       .agp_map_page           = intel_agp_map_page,
-       .agp_unmap_page         = intel_agp_unmap_page,
-       .agp_map_memory         = intel_agp_map_memory,
-       .agp_unmap_memory       = intel_agp_unmap_memory,
-#endif
+static const struct intel_gtt_driver i8xx_gtt_driver = {
+       .gen = 2,
+       .setup = i830_setup,
+       .cleanup = i830_cleanup,
+       .write_entry = i830_write_entry,
+       .dma_mask_size = 32,
+       .check_flags = i830_check_flags,
+       .chipset_flush = i830_chipset_flush,
 };
-
-static const struct agp_bridge_driver intel_gen6_driver = {
-       .owner                  = THIS_MODULE,
-       .aperture_sizes         = intel_i830_sizes,
-       .size_type              = FIXED_APER_SIZE,
-       .num_aperture_sizes     = 4,
-       .needs_scratch_page     = true,
-       .configure              = intel_i9xx_configure,
-       .fetch_size             = intel_i9xx_fetch_size,
-       .cleanup                = intel_i915_cleanup,
-       .mask_memory            = intel_gen6_mask_memory,
-       .masks                  = intel_gen6_masks,
-       .agp_enable             = intel_i810_agp_enable,
-       .cache_flush            = global_cache_flush,
-       .create_gatt_table      = intel_i965_create_gatt_table,
-       .free_gatt_table        = intel_i830_free_gatt_table,
-       .insert_memory          = intel_i915_insert_entries,
-       .remove_memory          = intel_i915_remove_entries,
-       .alloc_by_type          = intel_i830_alloc_by_type,
-       .free_by_type           = intel_i810_free_by_type,
-       .agp_alloc_page         = agp_generic_alloc_page,
-       .agp_alloc_pages        = agp_generic_alloc_pages,
-       .agp_destroy_page       = agp_generic_destroy_page,
-       .agp_destroy_pages      = agp_generic_destroy_pages,
-       .agp_type_to_mask_type  = intel_gen6_type_to_mask_type,
-       .chipset_flush          = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
-       .agp_map_page           = intel_agp_map_page,
-       .agp_unmap_page         = intel_agp_unmap_page,
-       .agp_map_memory         = intel_agp_map_memory,
-       .agp_unmap_memory       = intel_agp_unmap_memory,
-#endif
+static const struct intel_gtt_driver i915_gtt_driver = {
+       .gen = 3,
+       .setup = i9xx_setup,
+       .cleanup = i9xx_cleanup,
+       /* i945 is the last gpu to need phys mem (for overlay and cursors). */
+       .write_entry = i830_write_entry, 
+       .dma_mask_size = 32,
+       .check_flags = i830_check_flags,
+       .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver g33_gtt_driver = {
+       .gen = 3,
+       .is_g33 = 1,
+       .setup = i9xx_setup,
+       .cleanup = i9xx_cleanup,
+       .write_entry = i965_write_entry,
+       .dma_mask_size = 36,
+       .check_flags = i830_check_flags,
+       .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver pineview_gtt_driver = {
+       .gen = 3,
+       .is_pineview = 1, .is_g33 = 1,
+       .setup = i9xx_setup,
+       .cleanup = i9xx_cleanup,
+       .write_entry = i965_write_entry,
+       .dma_mask_size = 36,
+       .check_flags = i830_check_flags,
+       .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver i965_gtt_driver = {
+       .gen = 4,
+       .setup = i9xx_setup,
+       .cleanup = i9xx_cleanup,
+       .write_entry = i965_write_entry,
+       .dma_mask_size = 36,
+       .check_flags = i830_check_flags,
+       .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver g4x_gtt_driver = {
+       .gen = 5,
+       .setup = i9xx_setup,
+       .cleanup = i9xx_cleanup,
+       .write_entry = i965_write_entry,
+       .dma_mask_size = 36,
+       .check_flags = i830_check_flags,
+       .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver ironlake_gtt_driver = {
+       .gen = 5,
+       .is_ironlake = 1,
+       .setup = i9xx_setup,
+       .cleanup = i9xx_cleanup,
+       .write_entry = i965_write_entry,
+       .dma_mask_size = 36,
+       .check_flags = i830_check_flags,
+       .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver sandybridge_gtt_driver = {
+       .gen = 6,
+       .setup = i9xx_setup,
+       .cleanup = gen6_cleanup,
+       .write_entry = gen6_write_entry,
+       .dma_mask_size = 40,
+       .check_flags = gen6_check_flags,
+       .chipset_flush = i9xx_chipset_flush,
 };
 
-static const struct agp_bridge_driver intel_g33_driver = {
-       .owner                  = THIS_MODULE,
-       .aperture_sizes         = intel_i830_sizes,
-       .size_type              = FIXED_APER_SIZE,
-       .num_aperture_sizes     = 4,
-       .needs_scratch_page     = true,
-       .configure              = intel_i9xx_configure,
-       .fetch_size             = intel_i9xx_fetch_size,
-       .cleanup                = intel_i915_cleanup,
-       .mask_memory            = intel_i965_mask_memory,
-       .masks                  = intel_i810_masks,
-       .agp_enable             = intel_i810_agp_enable,
-       .cache_flush            = global_cache_flush,
-       .create_gatt_table      = intel_i915_create_gatt_table,
-       .free_gatt_table        = intel_i830_free_gatt_table,
-       .insert_memory          = intel_i915_insert_entries,
-       .remove_memory          = intel_i915_remove_entries,
-       .alloc_by_type          = intel_i830_alloc_by_type,
-       .free_by_type           = intel_i810_free_by_type,
-       .agp_alloc_page         = agp_generic_alloc_page,
-       .agp_alloc_pages        = agp_generic_alloc_pages,
-       .agp_destroy_page       = agp_generic_destroy_page,
-       .agp_destroy_pages      = agp_generic_destroy_pages,
-       .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
-       .chipset_flush          = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
-       .agp_map_page           = intel_agp_map_page,
-       .agp_unmap_page         = intel_agp_unmap_page,
-       .agp_map_memory         = intel_agp_map_memory,
-       .agp_unmap_memory       = intel_agp_unmap_memory,
-#endif
+/* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
+ * driver and gmch_driver must be non-null, and find_gmch will determine
+ * which one should be used if a gmch_chip_id is present.
+ */
+static const struct intel_gtt_driver_description {
+       unsigned int gmch_chip_id;
+       char *name;
+       const struct agp_bridge_driver *gmch_driver;
+       const struct intel_gtt_driver *gtt_driver;
+} intel_gtt_chipsets[] = {
+       { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver,
+               &i81x_gtt_driver},
+       { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver,
+               &i81x_gtt_driver},
+       { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver,
+               &i81x_gtt_driver},
+       { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver,
+               &i81x_gtt_driver},
+       { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
+               &intel_fake_agp_driver, &i8xx_gtt_driver},
+       { PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
+               &intel_fake_agp_driver, &i8xx_gtt_driver},
+       { PCI_DEVICE_ID_INTEL_82854_IG, "854",
+               &intel_fake_agp_driver, &i8xx_gtt_driver},
+       { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
+               &intel_fake_agp_driver, &i8xx_gtt_driver},
+       { PCI_DEVICE_ID_INTEL_82865_IG, "865",
+               &intel_fake_agp_driver, &i8xx_gtt_driver},
+       { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
+               &intel_fake_agp_driver, &i915_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
+               &intel_fake_agp_driver, &i915_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
+               &intel_fake_agp_driver, &i915_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
+               &intel_fake_agp_driver, &i915_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
+               &intel_fake_agp_driver, &i915_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
+               &intel_fake_agp_driver, &i915_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
+               &intel_fake_agp_driver, &i965_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
+               &intel_fake_agp_driver, &i965_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
+               &intel_fake_agp_driver, &i965_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
+               &intel_fake_agp_driver, &i965_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
+               &intel_fake_agp_driver, &i965_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
+               &intel_fake_agp_driver, &i965_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
+               &intel_fake_agp_driver, &g33_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
+               &intel_fake_agp_driver, &g33_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
+               &intel_fake_agp_driver, &g33_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
+               &intel_fake_agp_driver, &pineview_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
+               &intel_fake_agp_driver, &pineview_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
+               &intel_fake_agp_driver, &g4x_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
+               &intel_fake_agp_driver, &g4x_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
+               &intel_fake_agp_driver, &g4x_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
+               &intel_fake_agp_driver, &g4x_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
+               &intel_fake_agp_driver, &g4x_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
+               &intel_fake_agp_driver, &g4x_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
+               &intel_fake_agp_driver, &g4x_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
+           "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
+           "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
+           "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
+           "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
+           "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
+           "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
+           "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
+           "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
+           "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+       { 0, NULL, NULL }
 };
+
+static int find_gmch(u16 device)
+{
+       struct pci_dev *gmch_device;
+
+       gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
+       if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
+               gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
+                                            device, gmch_device);
+       }
+
+       if (!gmch_device)
+               return 0;
+
+       intel_private.pcidev = gmch_device;
+       return 1;
+}
+
+int intel_gmch_probe(struct pci_dev *pdev,
+                                     struct agp_bridge_data *bridge)
+{
+       int i, mask;
+       bridge->driver = NULL;
+
+       for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
+               if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
+                       bridge->driver =
+                               intel_gtt_chipsets[i].gmch_driver;
+                       intel_private.driver = 
+                               intel_gtt_chipsets[i].gtt_driver;
+                       break;
+               }
+       }
+
+       if (!bridge->driver)
+               return 0;
+
+       bridge->dev_private_data = &intel_private;
+       bridge->dev = pdev;
+
+       intel_private.bridge_dev = pci_dev_get(pdev);
+
+       dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
+
+       mask = intel_private.driver->dma_mask_size;
+       if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
+               dev_err(&intel_private.pcidev->dev,
+                       "set gfx device dma mask %d-bit failed!\n", mask);
+       else
+               pci_set_consistent_dma_mask(intel_private.pcidev,
+                                           DMA_BIT_MASK(mask));
+
+       if (bridge->driver == &intel_810_driver)
+               return 1;
+
+       if (intel_gtt_init() != 0)
+               return 0;
+
+       return 1;
+}
+EXPORT_SYMBOL(intel_gmch_probe);
+
+struct intel_gtt *intel_gtt_get(void)
+{
+       return &intel_private.base;
+}
+EXPORT_SYMBOL(intel_gtt_get);
+
+void intel_gmch_remove(struct pci_dev *pdev)
+{
+       if (intel_private.pcidev)
+               pci_dev_put(intel_private.pcidev);
+       if (intel_private.bridge_dev)
+               pci_dev_put(intel_private.bridge_dev);
+}
+EXPORT_SYMBOL(intel_gmch_remove);
+
+MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_LICENSE("GPL and additional rights");
index a4eee324eb1ece730780079d0d968b316330a1e6..55b8667f739f64a82731ab926738601825cbb00c 100644 (file)
 #include <linux/bitops.h>
 #include <linux/compat.h>
 #include <linux/clocksource.h>
+#include <linux/uaccess.h>
 #include <linux/slab.h>
+#include <linux/io.h>
 
 #include <asm/current.h>
-#include <asm/uaccess.h>
 #include <asm/system.h>
-#include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/div64.h>
 
@@ -81,13 +81,13 @@ static cycle_t read_hpet(struct clocksource *cs)
 }
 
 static struct clocksource clocksource_hpet = {
-        .name           = "hpet",
-        .rating         = 250,
-        .read           = read_hpet,
-        .mask           = CLOCKSOURCE_MASK(64),
-       .mult           = 0, /* to be calculated */
-        .shift          = 10,
-        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
+       .name           = "hpet",
+       .rating         = 250,
+       .read           = read_hpet,
+       .mask           = CLOCKSOURCE_MASK(64),
+       .mult           = 0,            /* to be calculated */
+       .shift          = 10,
+       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 static struct clocksource *hpet_clocksource;
 #endif
@@ -465,6 +465,21 @@ static int hpet_ioctl_ieon(struct hpet_dev *devp)
        if (irq) {
                unsigned long irq_flags;
 
+               if (devp->hd_flags & HPET_SHARED_IRQ) {
+                       /*
+                        * To prevent the interrupt handler from seeing an
+                        * unwanted interrupt status bit, program the timer
+                        * so that it will not fire in the near future ...
+                        */
+                       writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK,
+                              &timer->hpet_config);
+                       write_counter(read_counter(&hpet->hpet_mc),
+                                     &timer->hpet_compare);
+                       /* ... and clear any left-over status. */
+                       isr = 1 << (devp - devp->hd_hpets->hp_dev);
+                       writel(isr, &hpet->hpet_isr);
+               }
+
                sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev));
                irq_flags = devp->hd_flags & HPET_SHARED_IRQ
                                                ? IRQF_SHARED : IRQF_DISABLED;
@@ -581,11 +596,10 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
                break;
        case HPET_INFO:
                {
+                       memset(info, 0, sizeof(*info));
                        if (devp->hd_ireqfreq)
                                info->hi_ireqfreq =
                                        hpet_time_div(hpetp, devp->hd_ireqfreq);
-                       else
-                               info->hi_ireqfreq = 0;
                        info->hi_flags =
                            readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
                        info->hi_hpet = hpetp->hp_which;
@@ -811,7 +825,7 @@ int hpet_alloc(struct hpet_data *hdp)
        struct hpets *hpetp;
        size_t siz;
        struct hpet __iomem *hpet;
-       static struct hpets *last = NULL;
+       static struct hpets *last;
        unsigned long period;
        unsigned long long temp;
        u32 remainder;
@@ -1000,6 +1014,8 @@ static int hpet_acpi_add(struct acpi_device *device)
                return -ENODEV;
 
        if (!data.hd_address || !data.hd_nirqs) {
+               if (data.hd_address)
+                       iounmap(data.hd_address);
                printk("%s: no address or irqs in _CRS\n", __func__);
                return -ENODEV;
        }
index c4efb55cbc03d380fded78b6e5bc61e63406cbcb..7a84a0595477512581b4247f8a0165d775d34aba 100644 (file)
@@ -61,7 +61,8 @@ console_initcall(hvc_tile_console_init);
 
 static int __init hvc_tile_init(void)
 {
-       hvc_alloc(0, 0, &hvc_tile_get_put_ops, 128);
-       return 0;
+       struct hvc_struct *s;
+       s = hvc_alloc(0, 0, &hvc_tile_get_put_ops, 128);
+       return IS_ERR(s) ? PTR_ERR(s) : 0;
 }
 device_initcall(hvc_tile_init);
index 60446f82a3fc2653c905adb3a6f944c1686f0863..6b8e6d18a8e6e7c8c44b2644ed7c72beb953f1f5 100644 (file)
@@ -74,7 +74,8 @@ static int __write_console(const char *data, int len)
        wmb();                  /* write ring before updating pointer */
        intf->out_prod = prod;
 
-       notify_daemon();
+       if (sent)
+               notify_daemon();
        return sent;
 }
 
index e537610d2f09475bf321b8652815fb9eaa46c762..b293d57d30a79ee11d6177fb9296a2250c64f1a3 100644 (file)
@@ -1665,6 +1665,17 @@ static int check_hotmod_int_op(const char *curr, const char *option,
        return 0;
 }
 
+static struct smi_info *smi_info_alloc(void)
+{
+       struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
+
+       if (info) {
+               spin_lock_init(&info->si_lock);
+               spin_lock_init(&info->msg_lock);
+       }
+       return info;
+}
+
 static int hotmod_handler(const char *val, struct kernel_param *kp)
 {
        char *str = kstrdup(val, GFP_KERNEL);
@@ -1779,7 +1790,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
                }
 
                if (op == HM_ADD) {
-                       info = kzalloc(sizeof(*info), GFP_KERNEL);
+                       info = smi_info_alloc();
                        if (!info) {
                                rv = -ENOMEM;
                                goto out;
@@ -1844,7 +1855,7 @@ static __devinit void hardcode_find_bmc(void)
                if (!ports[i] && !addrs[i])
                        continue;
 
-               info = kzalloc(sizeof(*info), GFP_KERNEL);
+               info = smi_info_alloc();
                if (!info)
                        return;
 
@@ -2027,7 +2038,7 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
                return -ENODEV;
        }
 
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = smi_info_alloc();
        if (!info) {
                printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
                return -ENOMEM;
@@ -2137,7 +2148,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
        if (!acpi_dev)
                return -ENODEV;
 
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = smi_info_alloc();
        if (!info)
                return -ENOMEM;
 
@@ -2318,7 +2329,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
 {
        struct smi_info *info;
 
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = smi_info_alloc();
        if (!info) {
                printk(KERN_ERR PFX "Could not allocate SI data\n");
                return;
@@ -2425,7 +2436,7 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
        int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
        struct smi_info *info;
 
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = smi_info_alloc();
        if (!info)
                return -ENOMEM;
 
@@ -2566,7 +2577,7 @@ static int __devinit ipmi_of_probe(struct platform_device *dev,
                return -EINVAL;
        }
 
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = smi_info_alloc();
 
        if (!info) {
                dev_err(&dev->dev,
@@ -3013,7 +3024,7 @@ static __devinit void default_find_bmc(void)
                if (check_legacy_ioport(ipmi_defaults[i].port))
                        continue;
 #endif
-               info = kzalloc(sizeof(*info), GFP_KERNEL);
+               info = smi_info_alloc();
                if (!info)
                        return;
 
@@ -3138,9 +3149,6 @@ static int try_smi_init(struct smi_info *new_smi)
                goto out_err;
        }
 
-       spin_lock_init(&(new_smi->si_lock));
-       spin_lock_init(&(new_smi->msg_lock));
-
        /* Do low-level detection first. */
        if (new_smi->handlers->detect(new_smi->si_sm)) {
                if (new_smi->addr_source)
index e985b1c2730e800db599ad445371a8b100eee506..1256454b2d4364f7fa6ad95b661d30ad62b673b4 100644 (file)
@@ -876,6 +876,10 @@ static int memory_open(struct inode *inode, struct file *filp)
        if (dev->dev_info)
                filp->f_mapping->backing_dev_info = dev->dev_info;
 
+       /* Is /dev/mem or /dev/kmem ? */
+       if (dev->dev_info == &directly_mappable_cdev_bdi)
+               filp->f_mode |= FMODE_UNSIGNED_OFFSET;
+
        if (dev->fops->open)
                return dev->fops->open(inode, filp);
 
index 0eac3da566ba23ff8aaf3c33256b5c3ca6280281..a84250a5dd5111f08f79c58c9ff34875b744ff65 100644 (file)
@@ -1467,7 +1467,7 @@ static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
                return -EINVAL;
 
        while (size) {
-               copy = min(drest, min(size, dst->length));
+               copy = min3(drest, size, dst->length);
 
                size -= copy;
                drest -= copy;
@@ -1729,7 +1729,7 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset
                return -EINVAL;
 
        while (size) {
-               copy = min(srest, min(dst->length, size));
+               copy = min3(srest, dst->length, size);
 
                daddr = kmap_atomic(sg_page(dst), KM_IRQ0);
                memcpy(daddr + dst->offset + offset, saddr, copy);
index a2b12aa1f2b93920b55b255efbf60ad037a4c0d0..501866662e055131bc3dad3f4a5fb0b104913060 100644 (file)
@@ -345,7 +345,7 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
 
        do {
                level = __ffs(pending);
-               handle_nested_irq(level + chip->irq_base);
+               generic_handle_irq(level + chip->irq_base);
 
                pending &= ~(1 << level);
        } while (pending);
@@ -360,7 +360,8 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
        struct pca953x_platform_data *pdata = client->dev.platform_data;
        int ret;
 
-       if (pdata->irq_base && (id->driver_data & PCA953X_INT)) {
+       if (pdata->irq_base != -1
+                       && (id->driver_data & PCA953X_INT)) {
                int lvl;
 
                ret = pca953x_read_reg(chip, PCA953X_INPUT,
@@ -383,7 +384,6 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
                        set_irq_chip_data(irq, chip);
                        set_irq_chip_and_handler(irq, &pca953x_irq_chip,
                                                 handle_edge_irq);
-                       set_irq_nested_thread(irq, 1);
 #ifdef CONFIG_ARM
                        set_irq_flags(irq, IRQF_VALID);
 #else
@@ -394,6 +394,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
                ret = request_threaded_irq(client->irq,
                                           NULL,
                                           pca953x_irq_handler,
+                                          IRQF_TRIGGER_RISING |
                                           IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                           dev_name(&client->dev), chip);
                if (ret) {
@@ -408,13 +409,13 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
        return 0;
 
 out_failed:
-       chip->irq_base = 0;
+       chip->irq_base = -1;
        return ret;
 }
 
 static void pca953x_irq_teardown(struct pca953x_chip *chip)
 {
-       if (chip->irq_base)
+       if (chip->irq_base != -1)
                free_irq(chip->client->irq, chip);
 }
 #else /* CONFIG_GPIO_PCA953X_IRQ */
@@ -424,7 +425,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
        struct i2c_client *client = chip->client;
        struct pca953x_platform_data *pdata = client->dev.platform_data;
 
-       if (pdata->irq_base && (id->driver_data & PCA953X_INT))
+       if (pdata->irq_base != -1 && (id->driver_data & PCA953X_INT))
                dev_warn(&client->dev, "interrupt support not compiled in\n");
 
        return 0;
index 30879df3daead6c3e208cfed0e8e7d191d31eaa9..cc9277885dd071b2da83edeae9250b6ea5985020 100644 (file)
@@ -1 +1 @@
-obj-y                  += drm/ vga/
+obj-y                  += drm/ vga/ stub/
index f3a23a329f4e29e7817755bf7f9f367cbdd74d0a..997c43d0490947cd927bb9e1be7a1182776a3d65 100644 (file)
@@ -5,7 +5,7 @@
 ccflags-y := -Iinclude/drm
 
 drm-y       := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
-               drm_context.o drm_dma.o drm_drawable.o \
+               drm_context.o drm_dma.o \
                drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
                drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
                drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
index ba38e0147220e7b199b73b9d0d13804ff80254f3..252fdb98b73a62b77fd812bb69823a38037010a7 100644 (file)
@@ -193,7 +193,7 @@ int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
  * \return zero on success or a negative number on failure.
  *
  * Verifies the AGP device is present and has been acquired, allocates the
- * memory via alloc_agp() and creates a drm_agp_mem entry for it.
+ * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
  */
 int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
 {
@@ -211,7 +211,7 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
 
        pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
        type = (u32) request->type;
-       if (!(memory = drm_alloc_agp(dev, pages, type))) {
+       if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) {
                kfree(entry);
                return -ENOMEM;
        }
@@ -423,38 +423,6 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
        return head;
 }
 
-/** Calls agp_allocate_memory() */
-DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data * bridge,
-                                    size_t pages, u32 type)
-{
-       return agp_allocate_memory(bridge, pages, type);
-}
-
-/** Calls agp_free_memory() */
-int drm_agp_free_memory(DRM_AGP_MEM * handle)
-{
-       if (!handle)
-               return 0;
-       agp_free_memory(handle);
-       return 1;
-}
-
-/** Calls agp_bind_memory() */
-int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start)
-{
-       if (!handle)
-               return -EINVAL;
-       return agp_bind_memory(handle, start);
-}
-
-/** Calls agp_unbind_memory() */
-int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
-{
-       if (!handle)
-               return -EINVAL;
-       return agp_unbind_memory(handle);
-}
-
 /**
  * Binds a collection of pages into AGP memory at the given offset, returning
  * the AGP memory structure containing them.
@@ -474,7 +442,7 @@ drm_agp_bind_pages(struct drm_device *dev,
 
        DRM_DEBUG("\n");
 
-       mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
+       mem = agp_allocate_memory(dev->agp->bridge, num_pages,
                                      type);
        if (mem == NULL) {
                DRM_ERROR("Failed to allocate memory for %ld pages\n",
@@ -487,7 +455,7 @@ drm_agp_bind_pages(struct drm_device *dev,
        mem->page_count = num_pages;
 
        mem->is_flushed = true;
-       ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
+       ret = agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
        if (ret != 0) {
                DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
                agp_free_memory(mem);
index 2607753a320bfb84b7bf8caafafe7856f4cfc0b8..6d440fb894cf2acba53c863d20d10b16ccfa14b5 100644 (file)
@@ -333,14 +333,6 @@ int drm_addctx(struct drm_device *dev, void *data,
                return -ENOMEM;
        }
 
-       if (ctx->handle != DRM_KERNEL_CONTEXT) {
-               if (dev->driver->context_ctor)
-                       if (!dev->driver->context_ctor(dev, ctx->handle)) {
-                               DRM_DEBUG("Running out of ctxs or memory.\n");
-                               return -ENOMEM;
-                       }
-       }
-
        ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
        if (!ctx_entry) {
                DRM_DEBUG("out of memory\n");
index 37e0b4fa482a810afc9eded6fda136a90bcc5cc0..6985cb1da72cbc232919ab2bb93a52f26304a345 100644 (file)
@@ -1854,7 +1854,8 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
        }
 
        if (fb->funcs->dirty) {
-               ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
+               ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
+                                      clips, num_clips);
        } else {
                ret = -ENOSYS;
                goto out_err2;
index 677b275fa721affe2fec2508e780c1b1970f4f73..9d8c892d07c93b1d4e855adc839e28bea5959aab 100644 (file)
@@ -48,7 +48,6 @@ static struct drm_info_list drm_debugfs_list[] = {
        {"queues", drm_queues_info, 0},
        {"bufs", drm_bufs_info, 0},
        {"gem_names", drm_gem_name_info, DRIVER_GEM},
-       {"gem_objects", drm_gem_object_info, DRIVER_GEM},
 #if DRM_DEBUG_CODE
        {"vma", drm_vma_info, 0},
 #endif
diff --git a/drivers/gpu/drm/drm_drawable.c b/drivers/gpu/drm/drm_drawable.c
deleted file mode 100644 (file)
index c53c976..0000000
+++ /dev/null
@@ -1,198 +0,0 @@
-/**
- * \file drm_drawable.c
- * IOCTLs for drawables
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- * \author Michel Dänzer <michel@tungstengraphics.com>
- */
-
-/*
- * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-/**
- * Allocate drawable ID and memory to store information about it.
- */
-int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-       unsigned long irqflags;
-       struct drm_draw *draw = data;
-       int new_id = 0;
-       int ret;
-
-again:
-       if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
-               DRM_ERROR("Out of memory expanding drawable idr\n");
-               return -ENOMEM;
-       }
-
-       spin_lock_irqsave(&dev->drw_lock, irqflags);
-       ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
-       if (ret == -EAGAIN) {
-               spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-               goto again;
-       }
-
-       spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
-       draw->handle = new_id;
-
-       DRM_DEBUG("%d\n", draw->handle);
-
-       return 0;
-}
-
-/**
- * Free drawable ID and memory to store information about it.
- */
-int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-       struct drm_draw *draw = data;
-       unsigned long irqflags;
-       struct drm_drawable_info *info;
-
-       spin_lock_irqsave(&dev->drw_lock, irqflags);
-
-       info = drm_get_drawable_info(dev, draw->handle);
-       if (info == NULL) {
-               spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-               return -EINVAL;
-       }
-       kfree(info->rects);
-       kfree(info);
-
-       idr_remove(&dev->drw_idr, draw->handle);
-
-       spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-       DRM_DEBUG("%d\n", draw->handle);
-       return 0;
-}
-
-int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-       struct drm_update_draw *update = data;
-       unsigned long irqflags;
-       struct drm_clip_rect *rects;
-       struct drm_drawable_info *info;
-       int err;
-
-       info = idr_find(&dev->drw_idr, update->handle);
-       if (!info) {
-               info = kzalloc(sizeof(*info), GFP_KERNEL);
-               if (!info)
-                       return -ENOMEM;
-               if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
-                       DRM_ERROR("No such drawable %d\n", update->handle);
-                       kfree(info);
-                       return -EINVAL;
-               }
-       }
-
-       switch (update->type) {
-       case DRM_DRAWABLE_CLIPRECTS:
-               if (update->num == 0)
-                       rects = NULL;
-               else if (update->num != info->num_rects) {
-                       rects = kmalloc(update->num *
-                                       sizeof(struct drm_clip_rect),
-                                       GFP_KERNEL);
-               } else
-                       rects = info->rects;
-
-               if (update->num && !rects) {
-                       DRM_ERROR("Failed to allocate cliprect memory\n");
-                       err = -ENOMEM;
-                       goto error;
-               }
-
-               if (update->num && DRM_COPY_FROM_USER(rects,
-                                                    (struct drm_clip_rect __user *)
-                                                    (unsigned long)update->data,
-                                                    update->num *
-                                                    sizeof(*rects))) {
-                       DRM_ERROR("Failed to copy cliprects from userspace\n");
-                       err = -EFAULT;
-                       goto error;
-               }
-
-               spin_lock_irqsave(&dev->drw_lock, irqflags);
-
-               if (rects != info->rects) {
-                       kfree(info->rects);
-               }
-
-               info->rects = rects;
-               info->num_rects = update->num;
-
-               spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
-               DRM_DEBUG("Updated %d cliprects for drawable %d\n",
-                         info->num_rects, update->handle);
-               break;
-       default:
-               DRM_ERROR("Invalid update type %d\n", update->type);
-               return -EINVAL;
-       }
-
-       return 0;
-
-error:
-       if (rects != info->rects)
-               kfree(rects);
-
-       return err;
-}
-
-/**
- * Caller must hold the drawable spinlock!
- */
-struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id)
-{
-       return idr_find(&dev->drw_idr, id);
-}
-EXPORT_SYMBOL(drm_get_drawable_info);
-
-static int drm_drawable_free(int idr, void *p, void *data)
-{
-       struct drm_drawable_info *info = p;
-
-       if (info) {
-               kfree(info->rects);
-               kfree(info);
-       }
-
-       return 0;
-}
-
-void drm_drawable_free_all(struct drm_device *dev)
-{
-       idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
-       idr_remove_all(&dev->drw_idr);
-}
index ff6690f4fc87a2593c4e350bad8aaf030daee164..271835a71570aaf073c807bc3002f5cb3a0cdaaa 100644 (file)
@@ -91,8 +91,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
 
-       DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
        DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
@@ -127,7 +127,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
 
        DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
 
-       DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
        DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
@@ -180,10 +180,6 @@ int drm_lastclose(struct drm_device * dev)
 
        mutex_lock(&dev->struct_mutex);
 
-       /* Free drawable information memory */
-       drm_drawable_free_all(dev);
-       del_timer(&dev->timer);
-
        /* Clear AGP information */
        if (drm_core_has_AGP(dev) && dev->agp &&
                        !drm_core_check_feature(dev, DRIVER_MODESET)) {
index 96e96310822513bfd9c984054a913eac7b5acc50..c1a26217a5305ee65a07b85fa77e4c15db77436b 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
 #include "drmP.h"
 #include "drm_edid.h"
 #include "drm_edid_modes.h"
@@ -1268,34 +1267,51 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
 }
 
 #define HDMI_IDENTIFIER 0x000C03
+#define AUDIO_BLOCK    0x01
 #define VENDOR_BLOCK    0x03
+#define EDID_BASIC_AUDIO       (1 << 6)
+
 /**
- * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
- * @edid: monitor EDID information
- *
- * Parse the CEA extension according to CEA-861-B.
- * Return true if HDMI, false if not or unknown.
+ * Search EDID for CEA extension block.
  */
-bool drm_detect_hdmi_monitor(struct edid *edid)
+static u8 *drm_find_cea_extension(struct edid *edid)
 {
-       char *edid_ext = NULL;
-       int i, hdmi_id;
-       int start_offset, end_offset;
-       bool is_hdmi = false;
+       u8 *edid_ext = NULL;
+       int i;
 
        /* No EDID or EDID extensions */
        if (edid == NULL || edid->extensions == 0)
-               goto end;
+               return NULL;
 
        /* Find CEA extension */
        for (i = 0; i < edid->extensions; i++) {
-               edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
-               /* This block is CEA extension */
-               if (edid_ext[0] == 0x02)
+               edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
+               if (edid_ext[0] == CEA_EXT)
                        break;
        }
 
        if (i == edid->extensions)
+               return NULL;
+
+       return edid_ext;
+}
+
+/**
+ * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
+ * @edid: monitor EDID information
+ *
+ * Parse the CEA extension according to CEA-861-B.
+ * Return true if HDMI, false if not or unknown.
+ */
+bool drm_detect_hdmi_monitor(struct edid *edid)
+{
+       u8 *edid_ext;
+       int i, hdmi_id;
+       int start_offset, end_offset;
+       bool is_hdmi = false;
+
+       edid_ext = drm_find_cea_extension(edid);
+       if (!edid_ext)
                goto end;
 
        /* Data block offset in CEA extension block */
@@ -1325,6 +1341,53 @@ end:
 }
 EXPORT_SYMBOL(drm_detect_hdmi_monitor);
 
+/**
+ * drm_detect_monitor_audio - check monitor audio capability
+ *
+ * Monitor should have CEA extension block.
+ * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
+ * audio' only. If there is any audio extension block and supported
+ * audio format, assume at least 'basic audio' support, even if 'basic
+ * audio' is not defined in EDID.
+ *
+ */
+bool drm_detect_monitor_audio(struct edid *edid)
+{
+       u8 *edid_ext;
+       int i, j;
+       bool has_audio = false;
+       int start_offset, end_offset;
+
+       edid_ext = drm_find_cea_extension(edid);
+       if (!edid_ext)
+               goto end;
+
+       has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
+
+       if (has_audio) {
+               DRM_DEBUG_KMS("Monitor has basic audio support\n");
+               goto end;
+       }
+
+       /* Data block offset in CEA extension block */
+       start_offset = 4;
+       end_offset = edid_ext[2];
+
+       for (i = start_offset; i < end_offset;
+                       i += ((edid_ext[i] & 0x1f) + 1)) {
+               if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
+                       has_audio = true;
+                       for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
+                               DRM_DEBUG_KMS("CEA audio format %d\n",
+                                             (edid_ext[i + j] >> 3) & 0xf);
+                       goto end;
+               }
+       }
+end:
+       return has_audio;
+}
+EXPORT_SYMBOL(drm_detect_monitor_audio);
+
 /**
  * drm_add_edid_modes - add modes from EDID data, if available
  * @connector: connector we're probing
index 6a5e403f9aa160b54caad1bca22ba93dd27415e6..d2849e4ea4d00a45b6f9dfdfe3da1f0da7824533 100644 (file)
@@ -242,6 +242,30 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
        return 0;
 }
 
+static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
+{
+       uint16_t *r_base, *g_base, *b_base;
+       int i;
+
+       r_base = crtc->gamma_store;
+       g_base = r_base + crtc->gamma_size;
+       b_base = g_base + crtc->gamma_size;
+
+       for (i = 0; i < crtc->gamma_size; i++)
+               helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i);
+}
+
+static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
+{
+       uint16_t *r_base, *g_base, *b_base;
+
+       r_base = crtc->gamma_store;
+       g_base = r_base + crtc->gamma_size;
+       b_base = g_base + crtc->gamma_size;
+
+       crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+}
+
 int drm_fb_helper_debug_enter(struct fb_info *info)
 {
        struct drm_fb_helper *helper = info->par;
@@ -260,11 +284,12 @@ int drm_fb_helper_debug_enter(struct fb_info *info)
                                continue;
 
                        funcs = mode_set->crtc->helper_private;
+                       drm_fb_helper_save_lut_atomic(mode_set->crtc, helper);
                        funcs->mode_set_base_atomic(mode_set->crtc,
                                                    mode_set->fb,
                                                    mode_set->x,
-                                                   mode_set->y);
-
+                                                   mode_set->y,
+                                                   ENTER_ATOMIC_MODE_SET);
                }
        }
 
@@ -308,8 +333,9 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
                        continue;
                }
 
+               drm_fb_helper_restore_lut_atomic(mode_set->crtc);
                funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
-                                           crtc->y);
+                                           crtc->y, LEAVE_ATOMIC_MODE_SET);
        }
 
        return 0;
index 5663d2719063de9231ca6cc153b63b30422e17aa..ea1c4b019ebf96290c0768202d7277cda47ba0f9 100644 (file)
@@ -92,12 +92,6 @@ drm_gem_init(struct drm_device *dev)
 
        spin_lock_init(&dev->object_name_lock);
        idr_init(&dev->object_name_idr);
-       atomic_set(&dev->object_count, 0);
-       atomic_set(&dev->object_memory, 0);
-       atomic_set(&dev->pin_count, 0);
-       atomic_set(&dev->pin_memory, 0);
-       atomic_set(&dev->gtt_count, 0);
-       atomic_set(&dev->gtt_memory, 0);
 
        mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
        if (!mm) {
@@ -151,9 +145,6 @@ int drm_gem_object_init(struct drm_device *dev,
        atomic_set(&obj->handle_count, 0);
        obj->size = size;
 
-       atomic_inc(&dev->object_count);
-       atomic_add(obj->size, &dev->object_memory);
-
        return 0;
 }
 EXPORT_SYMBOL(drm_gem_object_init);
@@ -180,8 +171,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
        return obj;
 fput:
        /* Object_init mangles the global counters - readjust them. */
-       atomic_dec(&dev->object_count);
-       atomic_sub(obj->size, &dev->object_memory);
        fput(obj->filp);
 free:
        kfree(obj);
@@ -436,10 +425,7 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
 void
 drm_gem_object_release(struct drm_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
        fput(obj->filp);
-       atomic_dec(&dev->object_count);
-       atomic_sub(obj->size, &dev->object_memory);
 }
 EXPORT_SYMBOL(drm_gem_object_release);
 
index 974e970ce3f81ce014170b90ad1b8adc8a1dd5a9..3cdbaf379bb51324110d5d0ace0c9dc2dd24059e 100644 (file)
@@ -270,20 +270,6 @@ int drm_gem_name_info(struct seq_file *m, void *data)
        return 0;
 }
 
-int drm_gem_object_info(struct seq_file *m, void* data)
-{
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-
-       seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
-       seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
-       seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
-       seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
-       seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
-       seq_printf(m, "%d gtt total\n", dev->gtt_total);
-       return 0;
-}
-
 #if DRM_DEBUG_CODE
 
 int drm_vma_info(struct seq_file *m, void *data)
index 9bf93bc9a32c27798791684ab73512a046e92a9c..632ae243ede0b9f1df4b42f2e96b30479be4e05a 100644 (file)
@@ -37,6 +37,8 @@
 
 static int drm_notifier(void *priv);
 
+static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
+
 /**
  * Lock ioctl.
  *
@@ -124,9 +126,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
                block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
        }
 
-       if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
-               dev->driver->dma_ready(dev);
-
        if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
        {
                if (dev->driver->dma_quiescent(dev)) {
@@ -136,12 +135,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
                }
        }
 
-       if (dev->driver->kernel_context_switch &&
-           dev->last_context != lock->context) {
-               dev->driver->kernel_context_switch(dev, dev->last_context,
-                                                  lock->context);
-       }
-
        return 0;
 }
 
@@ -169,15 +162,8 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
 
        atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
 
-       /* kernel_context_switch isn't used by any of the x86 drm
-        * modules but is required by the Sparc driver.
-        */
-       if (dev->driver->kernel_context_switch_unlock)
-               dev->driver->kernel_context_switch_unlock(dev);
-       else {
-               if (drm_lock_free(&master->lock, lock->context)) {
-                       /* FIXME: Should really bail out here. */
-               }
+       if (drm_lock_free(&master->lock, lock->context)) {
+               /* FIXME: Should really bail out here. */
        }
 
        unblock_all_signals();
@@ -193,6 +179,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
  *
  * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
  */
+static
 int drm_lock_take(struct drm_lock_data *lock_data,
                  unsigned int context)
 {
@@ -229,7 +216,6 @@ int drm_lock_take(struct drm_lock_data *lock_data,
        }
        return 0;
 }
-EXPORT_SYMBOL(drm_lock_take);
 
 /**
  * This takes a lock forcibly and hands it to context. Should ONLY be used
@@ -297,7 +283,6 @@ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
        wake_up_interruptible(&lock_data->lock_queue);
        return 0;
 }
-EXPORT_SYMBOL(drm_lock_free);
 
 /**
  * If we get here, it means that the process has called DRM_IOCTL_LOCK
@@ -360,7 +345,6 @@ void drm_idlelock_take(struct drm_lock_data *lock_data)
        }
        spin_unlock_bh(&lock_data->spinlock);
 }
-EXPORT_SYMBOL(drm_idlelock_take);
 
 void drm_idlelock_release(struct drm_lock_data *lock_data)
 {
@@ -380,8 +364,6 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
        }
        spin_unlock_bh(&lock_data->spinlock);
 }
-EXPORT_SYMBOL(drm_idlelock_release);
-
 
 int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
 {
@@ -390,5 +372,3 @@ int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
                _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
                master->lock.file_priv == file_priv);
 }
-
-EXPORT_SYMBOL(drm_i_have_hw_lock);
index 7732268eced2564d37f229eefdd7c38ead97dcfd..c9b805000a11e827c1677c0d81ccb33c5be1442c 100644 (file)
@@ -99,29 +99,23 @@ static void *agp_remap(unsigned long offset, unsigned long size,
        return addr;
 }
 
-/** Wrapper around agp_allocate_memory() */
-DRM_AGP_MEM *drm_alloc_agp(struct drm_device * dev, int pages, u32 type)
-{
-       return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
-}
-
 /** Wrapper around agp_free_memory() */
-int drm_free_agp(DRM_AGP_MEM * handle, int pages)
+void drm_free_agp(DRM_AGP_MEM * handle, int pages)
 {
-       return drm_agp_free_memory(handle) ? 0 : -EINVAL;
+       agp_free_memory(handle);
 }
 EXPORT_SYMBOL(drm_free_agp);
 
 /** Wrapper around agp_bind_memory() */
 int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
 {
-       return drm_agp_bind_memory(handle, start);
+       return agp_bind_memory(handle, start);
 }
 
 /** Wrapper around agp_unbind_memory() */
 int drm_unbind_agp(DRM_AGP_MEM * handle)
 {
-       return drm_agp_unbind_memory(handle);
+       return agp_unbind_memory(handle);
 }
 EXPORT_SYMBOL(drm_unbind_agp);
 
index a9ba6b69ad3526dacce63039457f0c48f961e3db..9e5b07efebb72172ef97894ec2fa42a7548bd896 100644 (file)
@@ -55,7 +55,6 @@ static struct drm_info_list drm_proc_list[] = {
        {"queues", drm_queues_info, 0},
        {"bufs", drm_bufs_info, 0},
        {"gem_names", drm_gem_name_info, DRIVER_GEM},
-       {"gem_objects", drm_gem_object_info, DRIVER_GEM},
 #if DRM_DEBUG_CODE
        {"vma", drm_vma_info, 0},
 #endif
@@ -151,7 +150,6 @@ fail:
 int drm_proc_init(struct drm_minor *minor, int minor_id,
                  struct proc_dir_entry *root)
 {
-       struct drm_device *dev = minor->dev;
        char name[64];
        int ret;
 
@@ -172,14 +170,6 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
                return ret;
        }
 
-       if (dev->driver->proc_init) {
-               ret = dev->driver->proc_init(minor);
-               if (ret) {
-                       DRM_ERROR("DRM: Driver failed to initialize "
-                                 "/proc/dri.\n");
-                       return ret;
-               }
-       }
        return 0;
 }
 
@@ -216,15 +206,11 @@ int drm_proc_remove_files(struct drm_info_list *files, int count,
  */
 int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
 {
-       struct drm_device *dev = minor->dev;
        char name[64];
 
        if (!root || !minor->proc_root)
                return 0;
 
-       if (dev->driver->proc_cleanup)
-               dev->driver->proc_cleanup(minor);
-
        drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
 
        sprintf(name, "%d", minor->index);
index 9034c4c6100dd2352c78b8b6d207001b0ce53bb5..d15e09b0ae0bf8c322712d5e3101f7dc8d53eb6a 100644 (file)
@@ -184,8 +184,6 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
        drm_sg_cleanup(entry);
        return -ENOMEM;
 }
-EXPORT_SYMBOL(drm_sg_alloc);
-
 
 int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
index d1ad57450df1543af9cc502a281b23661075b331..cdc89ee042cc22b83ebe610ddd67d01cf86ea048 100644 (file)
@@ -240,14 +240,10 @@ int drm_fill_in_dev(struct drm_device *dev,
        INIT_LIST_HEAD(&dev->vblank_event_list);
 
        spin_lock_init(&dev->count_lock);
-       spin_lock_init(&dev->drw_lock);
        spin_lock_init(&dev->event_lock);
-       init_timer(&dev->timer);
        mutex_init(&dev->struct_mutex);
        mutex_init(&dev->ctxlist_mutex);
 
-       idr_init(&dev->drw_idr);
-
        if (drm_ht_create(&dev->map_hash, 12)) {
                return -ENOMEM;
        }
index 5df450683aab8649511aaa96aaa759452b022fc0..2c3fcbdfd8ff64f8c5a53ff35884f59ce51bd621 100644 (file)
@@ -523,14 +523,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
        return 0;
 }
 
-resource_size_t drm_core_get_map_ofs(struct drm_local_map * map)
-{
-       return map->offset;
-}
-
-EXPORT_SYMBOL(drm_core_get_map_ofs);
-
-resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
+static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
 {
 #ifdef __alpha__
        return dev->hose->dense_mem_base - dev->hose->mem_space->start;
@@ -539,8 +532,6 @@ resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
 #endif
 }
 
-EXPORT_SYMBOL(drm_core_get_reg_ofs);
-
 /**
  * mmap DMA memory.
  *
@@ -627,7 +618,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
 #endif
        case _DRM_FRAME_BUFFER:
        case _DRM_REGISTERS:
-               offset = dev->driver->get_reg_ofs(dev);
+               offset = drm_core_get_reg_ofs(dev);
                vma->vm_flags |= VM_IO; /* not in core dump */
                vma->vm_page_prot = drm_io_prot(map->type, vma);
 #if !defined(__arm__)
index fe69914ce507b3df89ed8193fb32ca58c4a73c0c..88bcd331e7c53f5f6a68d8925f1a0947132ed1c8 100644 (file)
@@ -52,8 +52,6 @@ static struct drm_driver driver = {
        .device_is_agp = i810_driver_device_is_agp,
        .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
        .dma_quiescent = i810_driver_dma_quiescent,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = i810_ioctls,
        .fops = {
                 .owner = THIS_MODULE,
index 5b6298b24e249960bcf6053cb4d29c4573a285d3..f655ab7977da2091a4b6975e6499324267d2da17 100644 (file)
@@ -57,8 +57,6 @@ static struct drm_driver driver = {
        .device_is_agp = i830_driver_device_is_agp,
        .reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
        .dma_quiescent = i830_driver_dma_quiescent,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
 #if USE_IRQS
        .irq_preinstall = i830_driver_irq_preinstall,
        .irq_postinstall = i830_driver_irq_postinstall,
index 5c8e53458edbfee1c37e54b7b982d49e747d563e..fdc833d5cc7b473eb2f69cb2e856aaf8811215ce 100644 (file)
@@ -26,15 +26,17 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
          intel_dvo.o \
          intel_ringbuffer.o \
          intel_overlay.o \
+         intel_opregion.o \
          dvo_ch7xxx.o \
          dvo_ch7017.o \
          dvo_ivch.o \
          dvo_tfp410.o \
          dvo_sil164.o
 
-i915-$(CONFIG_ACPI)    += i915_opregion.o
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
 
+i915-$(CONFIG_ACPI)    += intel_acpi.o
+
 obj-$(CONFIG_DRM_I915)  += i915.o
 
 CFLAGS_i915_trace_points.o := -I$(src)
index 14d59804acd77b00637ee68c0662a1183cdbca87..af70337567ce35a0167ffb193d8b1ffae769fedd 100644 (file)
@@ -165,67 +165,44 @@ struct ch7017_priv {
 static void ch7017_dump_regs(struct intel_dvo_device *dvo);
 static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
 
-static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
+static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
 {
-       struct i2c_adapter *adapter = dvo->i2c_bus;
-       struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
-       u8 out_buf[2];
-       u8 in_buf[2];
-
        struct i2c_msg msgs[] = {
                {
                        .addr = dvo->slave_addr,
                        .flags = 0,
                        .len = 1,
-                       .buf = out_buf,
+                       .buf = &addr,
                },
                {
                        .addr = dvo->slave_addr,
                        .flags = I2C_M_RD,
                        .len = 1,
-                       .buf = in_buf,
+                       .buf = val,
                }
        };
-
-       out_buf[0] = addr;
-       out_buf[1] = 0;
-
-       if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
-               *val= in_buf[0];
-               return true;
-       };
-
-       return false;
+       return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2;
 }
 
-static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
+static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
 {
-       struct i2c_adapter *adapter = dvo->i2c_bus;
-       struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
-       uint8_t out_buf[2];
+       uint8_t buf[2] = { addr, val };
        struct i2c_msg msg = {
                .addr = dvo->slave_addr,
                .flags = 0,
                .len = 2,
-               .buf = out_buf,
+               .buf = buf,
        };
-
-       out_buf[0] = addr;
-       out_buf[1] = val;
-
-       if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
-               return true;
-
-       return false;
+       return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1;
 }
 
 /** Probes for a CH7017 on the given bus and slave address. */
 static bool ch7017_init(struct intel_dvo_device *dvo,
                        struct i2c_adapter *adapter)
 {
-       struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
        struct ch7017_priv *priv;
-       uint8_t val;
+       const char *str;
+       u8 val;
 
        priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
        if (priv == NULL)
@@ -237,16 +214,27 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
        if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
                goto fail;
 
-       if (val != CH7017_DEVICE_ID_VALUE &&
-           val != CH7018_DEVICE_ID_VALUE &&
-           val != CH7019_DEVICE_ID_VALUE) {
+       switch (val) {
+       case CH7017_DEVICE_ID_VALUE:
+               str = "ch7017";
+               break;
+       case CH7018_DEVICE_ID_VALUE:
+               str = "ch7018";
+               break;
+       case CH7019_DEVICE_ID_VALUE:
+               str = "ch7019";
+               break;
+       default:
                DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
-                               "Slave %d.\n",
-                         val, i2cbus->adapter.name,dvo->slave_addr);
+                             "slave %d.\n",
+                             val, adapter->name,dvo->slave_addr);
                goto fail;
        }
 
+       DRM_DEBUG_KMS("%s detected on %s, addr %d\n",
+                     str, adapter->name, dvo->slave_addr);
        return true;
+
 fail:
        kfree(priv);
        return false;
@@ -368,7 +356,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
        }
 
        /* XXX: Should actually wait for update power status somehow */
-       udelay(20000);
+       msleep(20);
 }
 
 static void ch7017_dump_regs(struct intel_dvo_device *dvo)
index 6f1944b244416c03475e8883a94a4c358fe8d881..7eaa94e4ff061b5450c9d769f70f02fb6459db2a 100644 (file)
@@ -113,7 +113,6 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
 {
        struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
-       struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
        u8 out_buf[2];
        u8 in_buf[2];
 
@@ -135,14 +134,14 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
        out_buf[0] = addr;
        out_buf[1] = 0;
 
-       if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+       if (i2c_transfer(adapter, msgs, 2) == 2) {
                *ch = in_buf[0];
                return true;
        };
 
        if (!ch7xxx->quiet) {
                DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
-                         addr, i2cbus->adapter.name, dvo->slave_addr);
+                         addr, adapter->name, dvo->slave_addr);
        }
        return false;
 }
@@ -152,7 +151,6 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
 {
        struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
-       struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
        uint8_t out_buf[2];
        struct i2c_msg msg = {
                .addr = dvo->slave_addr,
@@ -164,12 +162,12 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
        out_buf[0] = addr;
        out_buf[1] = ch;
 
-       if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+       if (i2c_transfer(adapter, &msg, 1) == 1)
                return true;
 
        if (!ch7xxx->quiet) {
                DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
-                         addr, i2cbus->adapter.name, dvo->slave_addr);
+                         addr, adapter->name, dvo->slave_addr);
        }
 
        return false;
index a2ec3f4872023fbe3510ce0bc54c673c5e4ec56b..a12ed9414cc7358e462dfa78766fe6f16948eacd 100644 (file)
@@ -167,7 +167,6 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
 {
        struct ivch_priv *priv = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
-       struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
        u8 out_buf[1];
        u8 in_buf[2];
 
@@ -193,7 +192,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
 
        out_buf[0] = addr;
 
-       if (i2c_transfer(&i2cbus->adapter, msgs, 3) == 3) {
+       if (i2c_transfer(adapter, msgs, 3) == 3) {
                *data = (in_buf[1] << 8) | in_buf[0];
                return true;
        };
@@ -201,7 +200,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
        if (!priv->quiet) {
                DRM_DEBUG_KMS("Unable to read register 0x%02x from "
                                "%s:%02x.\n",
-                         addr, i2cbus->adapter.name, dvo->slave_addr);
+                         addr, adapter->name, dvo->slave_addr);
        }
        return false;
 }
@@ -211,7 +210,6 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
 {
        struct ivch_priv *priv = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
-       struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
        u8 out_buf[3];
        struct i2c_msg msg = {
                .addr = dvo->slave_addr,
@@ -224,12 +222,12 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
        out_buf[1] = data & 0xff;
        out_buf[2] = data >> 8;
 
-       if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+       if (i2c_transfer(adapter, &msg, 1) == 1)
                return true;
 
        if (!priv->quiet) {
                DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
-                         addr, i2cbus->adapter.name, dvo->slave_addr);
+                         addr, adapter->name, dvo->slave_addr);
        }
 
        return false;
index 9b8e6765cf260fa8f91f2fa1a9f4432fce2b0287..e4b4091df942faf1196e4c32c057dff77e4a467e 100644 (file)
@@ -69,7 +69,6 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
 {
        struct sil164_priv *sil = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
-       struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
        u8 out_buf[2];
        u8 in_buf[2];
 
@@ -91,14 +90,14 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
        out_buf[0] = addr;
        out_buf[1] = 0;
 
-       if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+       if (i2c_transfer(adapter, msgs, 2) == 2) {
                *ch = in_buf[0];
                return true;
        };
 
        if (!sil->quiet) {
                DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
-                         addr, i2cbus->adapter.name, dvo->slave_addr);
+                         addr, adapter->name, dvo->slave_addr);
        }
        return false;
 }
@@ -107,7 +106,6 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
 {
        struct sil164_priv *sil= dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
-       struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
        uint8_t out_buf[2];
        struct i2c_msg msg = {
                .addr = dvo->slave_addr,
@@ -119,12 +117,12 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
        out_buf[0] = addr;
        out_buf[1] = ch;
 
-       if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+       if (i2c_transfer(adapter, &msg, 1) == 1)
                return true;
 
        if (!sil->quiet) {
                DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
-                         addr, i2cbus->adapter.name, dvo->slave_addr);
+                         addr, adapter->name, dvo->slave_addr);
        }
 
        return false;
index 56f66426207f0a1a293dcb073360985d328fd85e..8ab2855bb544ae88851e4d90ce107382165e5442 100644 (file)
@@ -94,7 +94,6 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
 {
        struct tfp410_priv *tfp = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
-       struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
        u8 out_buf[2];
        u8 in_buf[2];
 
@@ -116,14 +115,14 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
        out_buf[0] = addr;
        out_buf[1] = 0;
 
-       if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+       if (i2c_transfer(adapter, msgs, 2) == 2) {
                *ch = in_buf[0];
                return true;
        };
 
        if (!tfp->quiet) {
                DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
-                         addr, i2cbus->adapter.name, dvo->slave_addr);
+                         addr, adapter->name, dvo->slave_addr);
        }
        return false;
 }
@@ -132,7 +131,6 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
 {
        struct tfp410_priv *tfp = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
-       struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
        uint8_t out_buf[2];
        struct i2c_msg msg = {
                .addr = dvo->slave_addr,
@@ -144,12 +142,12 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
        out_buf[0] = addr;
        out_buf[1] = ch;
 
-       if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+       if (i2c_transfer(adapter, &msg, 1) == 1)
                return true;
 
        if (!tfp->quiet) {
                DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
-                         addr, i2cbus->adapter.name, dvo->slave_addr);
+                         addr, adapter->name, dvo->slave_addr);
        }
 
        return false;
index 048149748fdc9457f48ee5884f74d5cb849e43c0..1f4f3ceb63c715cdeb46a121df5dbebdd5e4fef1 100644 (file)
 
 #if defined(CONFIG_DEBUG_FS)
 
-#define ACTIVE_LIST    1
-#define FLUSHING_LIST  2
-#define INACTIVE_LIST  3
+enum {
+       ACTIVE_LIST,
+       FLUSHING_LIST,
+       INACTIVE_LIST,
+       PINNED_LIST,
+       DEFERRED_FREE_LIST,
+};
+
+static const char *yesno(int v)
+{
+       return v ? "yes" : "no";
+}
+
+static int i915_capabilities(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       const struct intel_device_info *info = INTEL_INFO(dev);
+
+       seq_printf(m, "gen: %d\n", info->gen);
+#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
+       B(is_mobile);
+       B(is_i85x);
+       B(is_i915g);
+       B(is_i945gm);
+       B(is_g33);
+       B(need_gfx_hws);
+       B(is_g4x);
+       B(is_pineview);
+       B(is_broadwater);
+       B(is_crestline);
+       B(has_fbc);
+       B(has_rc6);
+       B(has_pipe_cxsr);
+       B(has_hotplug);
+       B(cursor_needs_physical);
+       B(has_overlay);
+       B(overlay_needs_physical);
+       B(supports_tv);
+       B(has_bsd_ring);
+       B(has_blt_ring);
+#undef B
+
+       return 0;
+}
 
 static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
 {
@@ -64,6 +106,29 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
     }
 }
 
+static void
+describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
+{
+       seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
+                  &obj->base,
+                  get_pin_flag(obj),
+                  get_tiling_flag(obj),
+                  obj->base.size,
+                  obj->base.read_domains,
+                  obj->base.write_domain,
+                  obj->last_rendering_seqno,
+                  obj->dirty ? " dirty" : "",
+                  obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+       if (obj->base.name)
+               seq_printf(m, " (name: %d)", obj->base.name);
+       if (obj->fence_reg != I915_FENCE_REG_NONE)
+               seq_printf(m, " (fence: %d)", obj->fence_reg);
+       if (obj->gtt_space != NULL)
+               seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
+       if (obj->ring != NULL)
+               seq_printf(m, " (%s)", obj->ring->name);
+}
+
 static int i915_gem_object_list_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -72,56 +137,80 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv;
-       spinlock_t *lock = NULL;
+       size_t total_obj_size, total_gtt_size;
+       int count, ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
 
        switch (list) {
        case ACTIVE_LIST:
                seq_printf(m, "Active:\n");
-               lock = &dev_priv->mm.active_list_lock;
-               head = &dev_priv->render_ring.active_list;
+               head = &dev_priv->mm.active_list;
                break;
        case INACTIVE_LIST:
                seq_printf(m, "Inactive:\n");
                head = &dev_priv->mm.inactive_list;
                break;
+       case PINNED_LIST:
+               seq_printf(m, "Pinned:\n");
+               head = &dev_priv->mm.pinned_list;
+               break;
        case FLUSHING_LIST:
                seq_printf(m, "Flushing:\n");
                head = &dev_priv->mm.flushing_list;
                break;
+       case DEFERRED_FREE_LIST:
+               seq_printf(m, "Deferred free:\n");
+               head = &dev_priv->mm.deferred_free_list;
+               break;
        default:
-               DRM_INFO("Ooops, unexpected list\n");
-               return 0;
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
        }
 
-       if (lock)
-               spin_lock(lock);
-       list_for_each_entry(obj_priv, head, list)
-       {
-               seq_printf(m, "    %p: %s %8zd %08x %08x %d%s%s",
-                          &obj_priv->base,
-                          get_pin_flag(obj_priv),
-                          obj_priv->base.size,
-                          obj_priv->base.read_domains,
-                          obj_priv->base.write_domain,
-                          obj_priv->last_rendering_seqno,
-                          obj_priv->dirty ? " dirty" : "",
-                          obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
-
-               if (obj_priv->base.name)
-                       seq_printf(m, " (name: %d)", obj_priv->base.name);
-               if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
-                       seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
-               if (obj_priv->gtt_space != NULL)
-                       seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset);
-
+       total_obj_size = total_gtt_size = count = 0;
+       list_for_each_entry(obj_priv, head, mm_list) {
+               seq_printf(m, "   ");
+               describe_obj(m, obj_priv);
                seq_printf(m, "\n");
+               total_obj_size += obj_priv->base.size;
+               total_gtt_size += obj_priv->gtt_space->size;
+               count++;
        }
+       mutex_unlock(&dev->struct_mutex);
 
-       if (lock)
-           spin_unlock(lock);
+       seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+                  count, total_obj_size, total_gtt_size);
        return 0;
 }
 
+static int i915_gem_object_info(struct seq_file *m, void* data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
+       seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
+       seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
+       seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
+       seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
+       seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
+       seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+
 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -176,6 +265,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_request *gem_request;
+       int ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
 
        seq_printf(m, "Request:\n");
        list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
@@ -184,6 +278,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
                           gem_request->seqno,
                           (int) (jiffies - gem_request->emitted_jiffies));
        }
+       mutex_unlock(&dev->struct_mutex);
+
        return 0;
 }
 
@@ -192,16 +288,24 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       int ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
 
        if (dev_priv->render_ring.status_page.page_addr != NULL) {
                seq_printf(m, "Current sequence: %d\n",
-                          i915_get_gem_seqno(dev,  &dev_priv->render_ring));
+                          dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
        } else {
                seq_printf(m, "Current sequence: hws uninitialized\n");
        }
        seq_printf(m, "Waiter sequence:  %d\n",
                        dev_priv->mm.waiting_gem_seqno);
        seq_printf(m, "IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
+
+       mutex_unlock(&dev->struct_mutex);
+
        return 0;
 }
 
@@ -211,6 +315,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       int ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
 
        if (!HAS_PCH_SPLIT(dev)) {
                seq_printf(m, "Interrupt enable:    %08x\n",
@@ -247,7 +356,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                   atomic_read(&dev_priv->irq_received));
        if (dev_priv->render_ring.status_page.page_addr != NULL) {
                seq_printf(m, "Current sequence:    %d\n",
-                          i915_get_gem_seqno(dev,  &dev_priv->render_ring));
+                          dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
        } else {
                seq_printf(m, "Current sequence:    hws uninitialized\n");
        }
@@ -255,6 +364,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                   dev_priv->mm.waiting_gem_seqno);
        seq_printf(m, "IRQ sequence:        %d\n",
                   dev_priv->mm.irq_gem_seqno);
+       mutex_unlock(&dev->struct_mutex);
+
        return 0;
 }
 
@@ -263,7 +374,11 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       int i;
+       int i, ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
 
        seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
        seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
@@ -289,6 +404,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
                        seq_printf(m, "\n");
                }
        }
+       mutex_unlock(&dev->struct_mutex);
 
        return 0;
 }
@@ -313,16 +429,19 @@ static int i915_hws_info(struct seq_file *m, void *data)
        return 0;
 }
 
-static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
+static void i915_dump_object(struct seq_file *m,
+                            struct io_mapping *mapping,
+                            struct drm_i915_gem_object *obj_priv)
 {
-       int page, i;
-       uint32_t *mem;
+       int page, page_count, i;
 
+       page_count = obj_priv->base.size / PAGE_SIZE;
        for (page = 0; page < page_count; page++) {
-               mem = kmap_atomic(pages[page], KM_USER0);
+               u32 *mem = io_mapping_map_wc(mapping,
+                                            obj_priv->gtt_offset + page * PAGE_SIZE);
                for (i = 0; i < PAGE_SIZE; i += 4)
                        seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
-               kunmap_atomic(mem, KM_USER0);
+               io_mapping_unmap(mem);
        }
 }
 
@@ -335,27 +454,20 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
        struct drm_i915_gem_object *obj_priv;
        int ret;
 
-       spin_lock(&dev_priv->mm.active_list_lock);
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
 
-       list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
-                       list) {
+       list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
                obj = &obj_priv->base;
                if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
-                   ret = i915_gem_object_get_pages(obj, 0);
-                   if (ret) {
-                           DRM_ERROR("Failed to get pages: %d\n", ret);
-                           spin_unlock(&dev_priv->mm.active_list_lock);
-                           return ret;
-                   }
-
-                   seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
-                   i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
-
-                   i915_gem_object_put_pages(obj);
+                   seq_printf(m, "--- gtt_offset = 0x%08x\n",
+                              obj_priv->gtt_offset);
+                   i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
                }
        }
 
-       spin_unlock(&dev_priv->mm.active_list_lock);
+       mutex_unlock(&dev->struct_mutex);
 
        return 0;
 }
@@ -365,20 +477,24 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       u8 *virt;
-       uint32_t *ptr, off;
+       int ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
 
        if (!dev_priv->render_ring.gem_object) {
                seq_printf(m, "No ringbuffer setup\n");
-               return 0;
-       }
-
-       virt = dev_priv->render_ring.virtual_start;
+       } else {
+               u8 *virt = dev_priv->render_ring.virtual_start;
+               uint32_t off;
 
-       for (off = 0; off < dev_priv->render_ring.size; off += 4) {
-               ptr = (uint32_t *)(virt + off);
-               seq_printf(m, "%08x :  %08x\n", off, *ptr);
+               for (off = 0; off < dev_priv->render_ring.size; off += 4) {
+                       uint32_t *ptr = (uint32_t *)(virt + off);
+                       seq_printf(m, "%08x :  %08x\n", off, *ptr);
+               }
        }
+       mutex_unlock(&dev->struct_mutex);
 
        return 0;
 }
@@ -396,7 +512,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
        seq_printf(m, "RingHead :  %08x\n", head);
        seq_printf(m, "RingTail :  %08x\n", tail);
        seq_printf(m, "RingSize :  %08lx\n", dev_priv->render_ring.size);
-       seq_printf(m, "Acthd :     %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
+       seq_printf(m, "Acthd :     %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
 
        return 0;
 }
@@ -458,7 +574,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
        seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr);
        seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone);
        seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
-       if (IS_I965G(dev)) {
+       if (INTEL_INFO(dev)->gen >= 4) {
                seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
                seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
        }
@@ -642,6 +758,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
        } else {
                seq_printf(m, "FBC disabled: ");
                switch (dev_priv->no_fbc_reason) {
+               case FBC_NO_OUTPUT:
+                       seq_printf(m, "no outputs");
+                       break;
                case FBC_STOLEN_TOO_SMALL:
                        seq_printf(m, "not enough stolen memory");
                        break;
@@ -675,15 +794,17 @@ static int i915_sr_status(struct seq_file *m, void *unused)
        drm_i915_private_t *dev_priv = dev->dev_private;
        bool sr_enabled = false;
 
-       if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev))
+       if (IS_GEN5(dev))
+               sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
+       else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
                sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
        else if (IS_I915GM(dev))
                sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
        else if (IS_PINEVIEW(dev))
                sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
 
-       seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
-                  "disabled");
+       seq_printf(m, "self-refresh: %s\n",
+                  sr_enabled ? "enabled" : "disabled");
 
        return 0;
 }
@@ -694,10 +815,16 @@ static int i915_emon_status(struct seq_file *m, void *unused)
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        unsigned long temp, chipset, gfx;
+       int ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
 
        temp = i915_mch_val(dev_priv);
        chipset = i915_chipset_val(dev_priv);
        gfx = i915_gfx_val(dev_priv);
+       mutex_unlock(&dev->struct_mutex);
 
        seq_printf(m, "GMCH temp: %ld\n", temp);
        seq_printf(m, "Chipset power: %ld\n", chipset);
@@ -718,6 +845,68 @@ static int i915_gfxec(struct seq_file *m, void *unused)
        return 0;
 }
 
+static int i915_opregion(struct seq_file *m, void *unused)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_opregion *opregion = &dev_priv->opregion;
+       int ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       if (opregion->header)
+               seq_write(m, opregion->header, OPREGION_SIZE);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_fbdev *ifbdev;
+       struct intel_framebuffer *fb;
+       int ret;
+
+       ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+       if (ret)
+               return ret;
+
+       ifbdev = dev_priv->fbdev;
+       fb = to_intel_framebuffer(ifbdev->helper.fb);
+
+       seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
+                  fb->base.width,
+                  fb->base.height,
+                  fb->base.depth,
+                  fb->base.bits_per_pixel);
+       describe_obj(m, to_intel_bo(fb->obj));
+       seq_printf(m, "\n");
+
+       list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
+               if (&fb->base == ifbdev->helper.fb)
+                       continue;
+
+               seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
+                          fb->base.width,
+                          fb->base.height,
+                          fb->base.depth,
+                          fb->base.bits_per_pixel);
+               describe_obj(m, to_intel_bo(fb->obj));
+               seq_printf(m, "\n");
+       }
+
+       mutex_unlock(&dev->mode_config.mutex);
+
+       return 0;
+}
+
 static int
 i915_wedged_open(struct inode *inode,
                 struct file *filp)
@@ -741,6 +930,9 @@ i915_wedged_read(struct file *filp,
                       "wedged :  %d\n",
                       atomic_read(&dev_priv->mm.wedged));
 
+       if (len > sizeof (buf))
+               len = sizeof (buf);
+
        return simple_read_from_buffer(ubuf, max, ppos, buf, len);
 }
 
@@ -770,7 +962,7 @@ i915_wedged_write(struct file *filp,
 
        atomic_set(&dev_priv->mm.wedged, val);
        if (val) {
-               DRM_WAKEUP(&dev_priv->irq_queue);
+               wake_up_all(&dev_priv->irq_queue);
                queue_work(dev_priv->wq, &dev_priv->error_work);
        }
 
@@ -824,9 +1016,13 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
 }
 
 static struct drm_info_list i915_debugfs_list[] = {
+       {"i915_capabilities", i915_capabilities, 0, 0},
+       {"i915_gem_objects", i915_gem_object_info, 0},
        {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
        {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
        {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+       {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
+       {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
        {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
        {"i915_gem_request", i915_gem_request_info, 0},
        {"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -846,6 +1042,8 @@ static struct drm_info_list i915_debugfs_list[] = {
        {"i915_gfxec", i915_gfxec, 0},
        {"i915_fbc_status", i915_fbc_status, 0},
        {"i915_sr_status", i915_sr_status, 0},
+       {"i915_opregion", i915_opregion, 0},
+       {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
 };
 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
 
index 2dd2c93ebfa35dace7916b38c6df18c835161978..7a26f4dd21ae0a055036f78ce588ea4def08d73a 100644 (file)
@@ -40,8 +40,7 @@
 #include <linux/pnp.h>
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
-
-extern int intel_max_stolen; /* from AGP driver */
+#include <acpi/video.h>
 
 /**
  * Sets up the hardware status page for devices that need a physical address
@@ -64,7 +63,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
 
        memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
 
-       if (IS_I965G(dev))
+       if (INTEL_INFO(dev)->gen >= 4)
                dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
                                             0xf0;
 
@@ -133,8 +132,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
 
        mutex_lock(&dev->struct_mutex);
        intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
-       if (HAS_BSD(dev))
-               intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+       intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+       intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
        mutex_unlock(&dev->struct_mutex);
 
        /* Clear the HWS virtual address at teardown */
@@ -222,7 +221,7 @@ static int i915_dma_resume(struct drm_device * dev)
        DRM_DEBUG_DRIVER("hw status page @ %p\n",
                                ring->status_page.page_addr);
        if (ring->status_page.gfx_addr != 0)
-               ring->setup_status_page(dev, ring);
+               intel_ring_setup_status_page(dev, ring);
        else
                I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
 
@@ -377,7 +376,7 @@ i915_emit_box(struct drm_device *dev,
                return -EINVAL;
        }
 
-       if (IS_I965G(dev)) {
+       if (INTEL_INFO(dev)->gen >= 4) {
                BEGIN_LP_RING(4);
                OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
                OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
@@ -481,7 +480,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
 
                if (!IS_I830(dev) && !IS_845G(dev)) {
                        BEGIN_LP_RING(2);
-                       if (IS_I965G(dev)) {
+                       if (INTEL_INFO(dev)->gen >= 4) {
                                OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
                                OUT_RING(batch->start);
                        } else {
@@ -500,7 +499,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
        }
 
 
-       if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+       if (IS_G4X(dev) || IS_GEN5(dev)) {
                BEGIN_LP_RING(2);
                OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
                OUT_RING(MI_NOOP);
@@ -765,6 +764,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
        case I915_PARAM_HAS_BSD:
                value = HAS_BSD(dev);
                break;
+       case I915_PARAM_HAS_BLT:
+               value = HAS_BLT(dev);
+               break;
        default:
                DRM_DEBUG_DRIVER("Unknown parameter %d\n",
                                 param->param);
@@ -888,12 +890,12 @@ static int
 intel_alloc_mchbar_resource(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+       int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
        u32 temp_lo, temp_hi = 0;
        u64 mchbar_addr;
        int ret;
 
-       if (IS_I965G(dev))
+       if (INTEL_INFO(dev)->gen >= 4)
                pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
        pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
        mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
@@ -920,7 +922,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
                return ret;
        }
 
-       if (IS_I965G(dev))
+       if (INTEL_INFO(dev)->gen >= 4)
                pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
                                       upper_32_bits(dev_priv->mch_res.start));
 
@@ -934,7 +936,7 @@ static void
 intel_setup_mchbar(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+       int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
        u32 temp;
        bool enabled;
 
@@ -971,7 +973,7 @@ static void
 intel_teardown_mchbar(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+       int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
        u32 temp;
 
        if (dev_priv->mchbar_need_disable) {
@@ -990,174 +992,6 @@ intel_teardown_mchbar(struct drm_device *dev)
                release_resource(&dev_priv->mch_res);
 }
 
-/**
- * i915_probe_agp - get AGP bootup configuration
- * @pdev: PCI device
- * @aperture_size: returns AGP aperture configured size
- * @preallocated_size: returns size of BIOS preallocated AGP space
- *
- * Since Intel integrated graphics are UMA, the BIOS has to set aside
- * some RAM for the framebuffer at early boot.  This code figures out
- * how much was set aside so we can use it for our own purposes.
- */
-static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
-                         uint32_t *preallocated_size,
-                         uint32_t *start)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u16 tmp = 0;
-       unsigned long overhead;
-       unsigned long stolen;
-
-       /* Get the fb aperture size and "stolen" memory amount. */
-       pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp);
-
-       *aperture_size = 1024 * 1024;
-       *preallocated_size = 1024 * 1024;
-
-       switch (dev->pdev->device) {
-       case PCI_DEVICE_ID_INTEL_82830_CGC:
-       case PCI_DEVICE_ID_INTEL_82845G_IG:
-       case PCI_DEVICE_ID_INTEL_82855GM_IG:
-       case PCI_DEVICE_ID_INTEL_82865_IG:
-               if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
-                       *aperture_size *= 64;
-               else
-                       *aperture_size *= 128;
-               break;
-       default:
-               /* 9xx supports large sizes, just look at the length */
-               *aperture_size = pci_resource_len(dev->pdev, 2);
-               break;
-       }
-
-       /*
-        * Some of the preallocated space is taken by the GTT
-        * and popup.  GTT is 1K per MB of aperture size, and popup is 4K.
-        */
-       if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
-               overhead = 4096;
-       else
-               overhead = (*aperture_size / 1024) + 4096;
-
-       if (IS_GEN6(dev)) {
-               /* SNB has memory control reg at 0x50.w */
-               pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp);
-
-               switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) {
-               case INTEL_855_GMCH_GMS_DISABLED:
-                       DRM_ERROR("video memory is disabled\n");
-                       return -1;
-               case SNB_GMCH_GMS_STOLEN_32M:
-                       stolen = 32 * 1024 * 1024;
-                       break;
-               case SNB_GMCH_GMS_STOLEN_64M:
-                       stolen = 64 * 1024 * 1024;
-                       break;
-               case SNB_GMCH_GMS_STOLEN_96M:
-                       stolen = 96 * 1024 * 1024;
-                       break;
-               case SNB_GMCH_GMS_STOLEN_128M:
-                       stolen = 128 * 1024 * 1024;
-                       break;
-               case SNB_GMCH_GMS_STOLEN_160M:
-                       stolen = 160 * 1024 * 1024;
-                       break;
-               case SNB_GMCH_GMS_STOLEN_192M:
-                       stolen = 192 * 1024 * 1024;
-                       break;
-               case SNB_GMCH_GMS_STOLEN_224M:
-                       stolen = 224 * 1024 * 1024;
-                       break;
-               case SNB_GMCH_GMS_STOLEN_256M:
-                       stolen = 256 * 1024 * 1024;
-                       break;
-               case SNB_GMCH_GMS_STOLEN_288M:
-                       stolen = 288 * 1024 * 1024;
-                       break;
-               case SNB_GMCH_GMS_STOLEN_320M:
-                       stolen = 320 * 1024 * 1024;
-                       break;
-               case SNB_GMCH_GMS_STOLEN_352M:
-                       stolen = 352 * 1024 * 1024;
-                       break;
-               case SNB_GMCH_GMS_STOLEN_384M:
-                       stolen = 384 * 1024 * 1024;
-                       break;
-               case SNB_GMCH_GMS_STOLEN_416M:
-                       stolen = 416 * 1024 * 1024;
-                       break;
-               case SNB_GMCH_GMS_STOLEN_448M:
-                       stolen = 448 * 1024 * 1024;
-                       break;
-               case SNB_GMCH_GMS_STOLEN_480M:
-                       stolen = 480 * 1024 * 1024;
-                       break;
-               case SNB_GMCH_GMS_STOLEN_512M:
-                       stolen = 512 * 1024 * 1024;
-                       break;
-               default:
-                       DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
-                                 tmp & SNB_GMCH_GMS_STOLEN_MASK);
-                       return -1;
-               }
-       } else {
-               switch (tmp & INTEL_GMCH_GMS_MASK) {
-               case INTEL_855_GMCH_GMS_DISABLED:
-                       DRM_ERROR("video memory is disabled\n");
-                       return -1;
-               case INTEL_855_GMCH_GMS_STOLEN_1M:
-                       stolen = 1 * 1024 * 1024;
-                       break;
-               case INTEL_855_GMCH_GMS_STOLEN_4M:
-                       stolen = 4 * 1024 * 1024;
-                       break;
-               case INTEL_855_GMCH_GMS_STOLEN_8M:
-                       stolen = 8 * 1024 * 1024;
-                       break;
-               case INTEL_855_GMCH_GMS_STOLEN_16M:
-                       stolen = 16 * 1024 * 1024;
-                       break;
-               case INTEL_855_GMCH_GMS_STOLEN_32M:
-                       stolen = 32 * 1024 * 1024;
-                       break;
-               case INTEL_915G_GMCH_GMS_STOLEN_48M:
-                       stolen = 48 * 1024 * 1024;
-                       break;
-               case INTEL_915G_GMCH_GMS_STOLEN_64M:
-                       stolen = 64 * 1024 * 1024;
-                       break;
-               case INTEL_GMCH_GMS_STOLEN_128M:
-                       stolen = 128 * 1024 * 1024;
-                       break;
-               case INTEL_GMCH_GMS_STOLEN_256M:
-                       stolen = 256 * 1024 * 1024;
-                       break;
-               case INTEL_GMCH_GMS_STOLEN_96M:
-                       stolen = 96 * 1024 * 1024;
-                       break;
-               case INTEL_GMCH_GMS_STOLEN_160M:
-                       stolen = 160 * 1024 * 1024;
-                       break;
-               case INTEL_GMCH_GMS_STOLEN_224M:
-                       stolen = 224 * 1024 * 1024;
-                       break;
-               case INTEL_GMCH_GMS_STOLEN_352M:
-                       stolen = 352 * 1024 * 1024;
-                       break;
-               default:
-                       DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
-                                 tmp & INTEL_GMCH_GMS_MASK);
-                       return -1;
-               }
-       }
-
-       *preallocated_size = stolen - overhead;
-       *start = overhead;
-
-       return 0;
-}
-
 #define PTE_ADDRESS_MASK               0xfffff000
 #define PTE_ADDRESS_MASK_HIGH          0x000000f0 /* i915+ */
 #define PTE_MAPPING_TYPE_UNCACHED      (0 << 1)
@@ -1181,11 +1015,11 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
 {
        unsigned long *gtt;
        unsigned long entry, phys;
-       int gtt_bar = IS_I9XX(dev) ? 0 : 1;
+       int gtt_bar = IS_GEN2(dev) ? 1 : 0;
        int gtt_offset, gtt_size;
 
-       if (IS_I965G(dev)) {
-               if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
+       if (INTEL_INFO(dev)->gen >= 4) {
+               if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) {
                        gtt_offset = 2*1024*1024;
                        gtt_size = 2*1024*1024;
                } else {
@@ -1210,10 +1044,8 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
        DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
 
        /* Mask out these reserved bits on this hardware. */
-       if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
-           IS_I945G(dev) || IS_I945GM(dev)) {
+       if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev))
                entry &= ~PTE_ADDRESS_MASK_HIGH;
-       }
 
        /* If it's not a mapping type we know, then bail. */
        if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
@@ -1252,7 +1084,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
        unsigned long ll_base = 0;
 
        /* Leave 1M for line length buffer & misc. */
-       compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
+       compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0);
        if (!compressed_fb) {
                dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
                i915_warn_stolen(dev);
@@ -1273,7 +1105,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
        }
 
        if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
-               compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
+               compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096,
                                                    4096, 0);
                if (!compressed_llb) {
                        i915_warn_stolen(dev);
@@ -1343,10 +1175,8 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
                /* i915 resume handler doesn't set to D0 */
                pci_set_power_state(dev->pdev, PCI_D0);
                i915_resume(dev);
-               drm_kms_helper_poll_enable(dev);
        } else {
                printk(KERN_ERR "i915: switched off\n");
-               drm_kms_helper_poll_disable(dev);
                i915_suspend(dev, pmm);
        }
 }
@@ -1363,23 +1193,14 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
 }
 
 static int i915_load_modeset_init(struct drm_device *dev,
-                                 unsigned long prealloc_start,
                                  unsigned long prealloc_size,
                                  unsigned long agp_size)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int fb_bar = IS_I9XX(dev) ? 2 : 0;
        int ret = 0;
 
-       dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) &
-               0xff000000;
-
-       /* Basic memrange allocator for stolen space (aka vram) */
-       drm_mm_init(&dev_priv->vram, 0, prealloc_size);
-       DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
-
-       /* We're off and running w/KMS */
-       dev_priv->mm.suspended = 0;
+       /* Basic memrange allocator for stolen space (aka mm.vram) */
+       drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size);
 
        /* Let GEM Manage from end of prealloc space to end of aperture.
         *
@@ -1414,7 +1235,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
         */
        dev_priv->allow_batchbuffer = 1;
 
-       ret = intel_init_bios(dev);
+       ret = intel_parse_bios(dev);
        if (ret)
                DRM_INFO("failed to find VBIOS tables\n");
 
@@ -1423,6 +1244,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
        if (ret)
                goto cleanup_ringbuffer;
 
+       intel_register_dsm_handler();
+
        ret = vga_switcheroo_register_client(dev->pdev,
                                             i915_switcheroo_set_state,
                                             i915_switcheroo_can_switch);
@@ -1443,17 +1266,15 @@ static int i915_load_modeset_init(struct drm_device *dev,
        /* FIXME: do pre/post-mode set stuff in core KMS code */
        dev->vblank_disable_allowed = 1;
 
-       /*
-        * Initialize the hardware status page IRQ location.
-        */
-
-       I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
-
        ret = intel_fbdev_init(dev);
        if (ret)
                goto cleanup_irq;
 
        drm_kms_helper_poll_init(dev);
+
+       /* We're off and running w/KMS */
+       dev_priv->mm.suspended = 0;
+
        return 0;
 
 cleanup_irq:
@@ -1907,7 +1728,7 @@ static struct drm_i915_private *i915_mch_dev;
  *   - dev_priv->fmax
  *   - dev_priv->gpu_busy
  */
-DEFINE_SPINLOCK(mchdev_lock);
+static DEFINE_SPINLOCK(mchdev_lock);
 
 /**
  * i915_read_mch_val - return value for IPS use
@@ -2062,7 +1883,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        struct drm_i915_private *dev_priv;
        resource_size_t base, size;
        int ret = 0, mmio_bar;
-       uint32_t agp_size, prealloc_size, prealloc_start;
+       uint32_t agp_size, prealloc_size;
        /* i915 has 4 more counters */
        dev->counters += 4;
        dev->types[6] = _DRM_STAT_IRQ;
@@ -2079,7 +1900,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        dev_priv->info = (struct intel_device_info *) flags;
 
        /* Add register map (needed for suspend/resume) */
-       mmio_bar = IS_I9XX(dev) ? 0 : 1;
+       mmio_bar = IS_GEN2(dev) ? 1 : 0;
        base = pci_resource_start(dev->pdev, mmio_bar);
        size = pci_resource_len(dev->pdev, mmio_bar);
 
@@ -2121,17 +1942,32 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                         "performance may suffer.\n");
        }
 
-       ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start);
-       if (ret)
+       dev_priv->mm.gtt = intel_gtt_get();
+       if (!dev_priv->mm.gtt) {
+               DRM_ERROR("Failed to initialize GTT\n");
+               ret = -ENODEV;
                goto out_iomapfree;
-
-       if (prealloc_size > intel_max_stolen) {
-               DRM_INFO("detected %dM stolen memory, trimming to %dM\n",
-                        prealloc_size >> 20, intel_max_stolen >> 20);
-               prealloc_size = intel_max_stolen;
        }
 
-       dev_priv->wq = create_singlethread_workqueue("i915");
+       prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT;
+       agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
+       /* The i915 workqueue is primarily used for batched retirement of
+        * requests (and thus managing bo) once the task has been completed
+        * by the GPU. i915_gem_retire_requests() is called directly when we
+        * need high-priority retirement, such as waiting for an explicit
+        * bo.
+        *
+        * It is also used for periodic low-priority events, such as
+        * idle-timers and hangcheck.
+        *
+        * All tasks on the workqueue are expected to acquire the dev mutex
+        * so there is no point in running more than one instance of the
+        * workqueue at any time: max_active = 1 and NON_REENTRANT.
+        */
+       dev_priv->wq = alloc_workqueue("i915",
+                                      WQ_UNBOUND | WQ_NON_REENTRANT,
+                                      1);
        if (dev_priv->wq == NULL) {
                DRM_ERROR("Failed to create our workqueue.\n");
                ret = -ENOMEM;
@@ -2159,13 +1995,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        dev->driver->get_vblank_counter = i915_get_vblank_counter;
        dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
-       if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
+       if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
                dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
                dev->driver->get_vblank_counter = gm45_get_vblank_counter;
        }
 
        /* Try to make sure MCHBAR is enabled before poking at it */
        intel_setup_mchbar(dev);
+       intel_setup_gmbus(dev);
+       intel_opregion_setup(dev);
+
+       /* Make sure the bios did its job and set up vital registers */
+       intel_setup_bios(dev);
 
        i915_gem_load(dev);
 
@@ -2178,7 +2019,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        if (IS_PINEVIEW(dev))
                i915_pineview_get_mem_freq(dev);
-       else if (IS_IRONLAKE(dev))
+       else if (IS_GEN5(dev))
                i915_ironlake_get_mem_freq(dev);
 
        /* On the 945G/GM, the chipset reports the MSI capability on the
@@ -2212,8 +2053,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        intel_detect_pch(dev);
 
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               ret = i915_load_modeset_init(dev, prealloc_start,
-                                            prealloc_size, agp_size);
+               ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
                if (ret < 0) {
                        DRM_ERROR("failed to init modeset\n");
                        goto out_workqueue_free;
@@ -2221,7 +2061,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        }
 
        /* Must be done after probing outputs */
-       intel_opregion_init(dev, 0);
+       intel_opregion_init(dev);
+       acpi_video_register();
 
        setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
                    (unsigned long) dev);
@@ -2231,9 +2072,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        dev_priv->mchdev_lock = &mchdev_lock;
        spin_unlock(&mchdev_lock);
 
-       /* XXX Prevent module unload due to memory corruption bugs. */
-       __module_get(THIS_MODULE);
-
        return 0;
 
 out_workqueue_free:
@@ -2252,15 +2090,20 @@ free_priv:
 int i915_driver_unload(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-
-       i915_destroy_error_state(dev);
+       int ret;
 
        spin_lock(&mchdev_lock);
        i915_mch_dev = NULL;
        spin_unlock(&mchdev_lock);
 
-       destroy_workqueue(dev_priv->wq);
-       del_timer_sync(&dev_priv->hangcheck_timer);
+       mutex_lock(&dev->struct_mutex);
+       ret = i915_gpu_idle(dev);
+       if (ret)
+               DRM_ERROR("failed to idle hardware: %d\n", ret);
+       mutex_unlock(&dev->struct_mutex);
+
+       /* Cancel the retire work handler, which should be idle now. */
+       cancel_delayed_work_sync(&dev_priv->mm.retire_work);
 
        io_mapping_free(dev_priv->mm.gtt_mapping);
        if (dev_priv->mm.gtt_mtrr >= 0) {
@@ -2269,7 +2112,10 @@ int i915_driver_unload(struct drm_device *dev)
                dev_priv->mm.gtt_mtrr = -1;
        }
 
+       acpi_video_unregister();
+
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               intel_fbdev_fini(dev);
                intel_modeset_cleanup(dev);
 
                /*
@@ -2281,20 +2127,25 @@ int i915_driver_unload(struct drm_device *dev)
                        dev_priv->child_dev = NULL;
                        dev_priv->child_dev_num = 0;
                }
-               drm_irq_uninstall(dev);
+
                vga_switcheroo_unregister_client(dev->pdev);
                vga_client_register(dev->pdev, NULL, NULL, NULL);
        }
 
+       /* Free error state after interrupts are fully disabled. */
+       del_timer_sync(&dev_priv->hangcheck_timer);
+       cancel_work_sync(&dev_priv->error_work);
+       i915_destroy_error_state(dev);
+
        if (dev->pdev->msi_enabled)
                pci_disable_msi(dev->pdev);
 
-       if (dev_priv->regs != NULL)
-               iounmap(dev_priv->regs);
-
-       intel_opregion_free(dev, 0);
+       intel_opregion_fini(dev);
 
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               /* Flush any outstanding unpin_work. */
+               flush_workqueue(dev_priv->wq);
+
                i915_gem_free_all_phys_object(dev);
 
                mutex_lock(&dev->struct_mutex);
@@ -2302,34 +2153,41 @@ int i915_driver_unload(struct drm_device *dev)
                mutex_unlock(&dev->struct_mutex);
                if (I915_HAS_FBC(dev) && i915_powersave)
                        i915_cleanup_compression(dev);
-               drm_mm_takedown(&dev_priv->vram);
-               i915_gem_lastclose(dev);
+               drm_mm_takedown(&dev_priv->mm.vram);
 
                intel_cleanup_overlay(dev);
+
+               if (!I915_NEED_GFX_HWS(dev))
+                       i915_free_hws(dev);
        }
 
+       if (dev_priv->regs != NULL)
+               iounmap(dev_priv->regs);
+
+       intel_teardown_gmbus(dev);
        intel_teardown_mchbar(dev);
 
+       destroy_workqueue(dev_priv->wq);
+
        pci_dev_put(dev_priv->bridge_dev);
        kfree(dev->dev_private);
 
        return 0;
 }
 
-int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+int i915_driver_open(struct drm_device *dev, struct drm_file *file)
 {
-       struct drm_i915_file_private *i915_file_priv;
+       struct drm_i915_file_private *file_priv;
 
        DRM_DEBUG_DRIVER("\n");
-       i915_file_priv = (struct drm_i915_file_private *)
-           kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
-
-       if (!i915_file_priv)
+       file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
+       if (!file_priv)
                return -ENOMEM;
 
-       file_priv->driver_priv = i915_file_priv;
+       file->driver_priv = file_priv;
 
-       INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
+       spin_lock_init(&file_priv->mm.lock);
+       INIT_LIST_HEAD(&file_priv->mm.request_list);
 
        return 0;
 }
@@ -2372,11 +2230,11 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
                i915_mem_release(dev, file_priv, dev_priv->agp_heap);
 }
 
-void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
+void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
 {
-       struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+       struct drm_i915_file_private *file_priv = file->driver_priv;
 
-       kfree(i915_file_priv);
+       kfree(file_priv);
 }
 
 struct drm_ioctl_desc i915_ioctls[] = {
index 895ab896e336c660c279622a6b79bfe138a24a5d..3467dd420760fa51084a2ba00d097b90d6c214a2 100644 (file)
@@ -32,6 +32,7 @@
 #include "drm.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
+#include "intel_drv.h"
 
 #include <linux/console.h>
 #include "drm_crtc_helper.h"
@@ -61,86 +62,110 @@ extern int intel_agp_enabled;
        .driver_data = (unsigned long) info }
 
 static const struct intel_device_info intel_i830_info = {
-       .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+       .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_845g_info = {
-       .gen = 2, .is_i8xx = 1,
+       .gen = 2,
+       .has_overlay = 1, .overlay_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_i85x_info = {
-       .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
+       .gen = 2, .is_i85x = 1, .is_mobile = 1,
        .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_i865g_info = {
-       .gen = 2, .is_i8xx = 1,
+       .gen = 2,
+       .has_overlay = 1, .overlay_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_i915g_info = {
-       .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
+       .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
 };
 static const struct intel_device_info intel_i915gm_info = {
-       .gen = 3, .is_i9xx = 1,  .is_mobile = 1,
+       .gen = 3, .is_mobile = 1,
        .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .supports_tv = 1,
 };
 static const struct intel_device_info intel_i945g_info = {
-       .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
+       .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
 };
 static const struct intel_device_info intel_i945gm_info = {
-       .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
+       .gen = 3, .is_i945gm = 1, .is_mobile = 1,
        .has_hotplug = 1, .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .supports_tv = 1,
 };
 
 static const struct intel_device_info intel_i965g_info = {
-       .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1,
+       .gen = 4, .is_broadwater = 1,
        .has_hotplug = 1,
+       .has_overlay = 1,
 };
 
 static const struct intel_device_info intel_i965gm_info = {
-       .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
+       .gen = 4, .is_crestline = 1,
        .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+       .has_overlay = 1,
+       .supports_tv = 1,
 };
 
 static const struct intel_device_info intel_g33_info = {
-       .gen = 3, .is_g33 = 1, .is_i9xx = 1,
+       .gen = 3, .is_g33 = 1,
        .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_overlay = 1,
 };
 
 static const struct intel_device_info intel_g45_info = {
-       .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+       .gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
        .has_pipe_cxsr = 1, .has_hotplug = 1,
+       .has_bsd_ring = 1,
 };
 
 static const struct intel_device_info intel_gm45_info = {
-       .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
+       .gen = 4, .is_g4x = 1,
        .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
        .has_pipe_cxsr = 1, .has_hotplug = 1,
+       .supports_tv = 1,
+       .has_bsd_ring = 1,
 };
 
 static const struct intel_device_info intel_pineview_info = {
-       .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
+       .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
        .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_overlay = 1,
 };
 
 static const struct intel_device_info intel_ironlake_d_info = {
-       .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1,
+       .gen = 5,
        .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
+       .has_bsd_ring = 1,
 };
 
 static const struct intel_device_info intel_ironlake_m_info = {
-       .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
+       .gen = 5, .is_mobile = 1,
        .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+       .has_bsd_ring = 1,
 };
 
 static const struct intel_device_info intel_sandybridge_d_info = {
-       .gen = 6, .is_i965g = 1, .is_i9xx = 1,
+       .gen = 6,
        .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_bsd_ring = 1,
+       .has_blt_ring = 1,
 };
 
 static const struct intel_device_info intel_sandybridge_m_info = {
-       .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1,
+       .gen = 6, .is_mobile = 1,
        .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_bsd_ring = 1,
+       .has_blt_ring = 1,
 };
 
 static const struct pci_device_id pciidlist[] = {              /* aka */
@@ -237,7 +262,7 @@ static int i915_drm_freeze(struct drm_device *dev)
 
        i915_save_state(dev);
 
-       intel_opregion_free(dev, 1);
+       intel_opregion_fini(dev);
 
        /* Modeset on resume, not lid events */
        dev_priv->modeset_on_lid = 0;
@@ -258,6 +283,8 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
        if (state.event == PM_EVENT_PRETHAW)
                return 0;
 
+       drm_kms_helper_poll_disable(dev);
+
        error = i915_drm_freeze(dev);
        if (error)
                return error;
@@ -277,8 +304,7 @@ static int i915_drm_thaw(struct drm_device *dev)
        int error = 0;
 
        i915_restore_state(dev);
-
-       intel_opregion_init(dev, 1);
+       intel_opregion_setup(dev);
 
        /* KMS EnterVT equivalent */
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -294,6 +320,8 @@ static int i915_drm_thaw(struct drm_device *dev)
                drm_helper_resume_force_mode(dev);
        }
 
+       intel_opregion_init(dev);
+
        dev_priv->modeset_on_lid = 0;
 
        return error;
@@ -301,12 +329,79 @@ static int i915_drm_thaw(struct drm_device *dev)
 
 int i915_resume(struct drm_device *dev)
 {
+       int ret;
+
        if (pci_enable_device(dev->pdev))
                return -EIO;
 
        pci_set_master(dev->pdev);
 
-       return i915_drm_thaw(dev);
+       ret = i915_drm_thaw(dev);
+       if (ret)
+               return ret;
+
+       drm_kms_helper_poll_enable(dev);
+       return 0;
+}
+
+static int i8xx_do_reset(struct drm_device *dev, u8 flags)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (IS_I85X(dev))
+               return -ENODEV;
+
+       I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
+       POSTING_READ(D_STATE);
+
+       if (IS_I830(dev) || IS_845G(dev)) {
+               I915_WRITE(DEBUG_RESET_I830,
+                          DEBUG_RESET_DISPLAY |
+                          DEBUG_RESET_RENDER |
+                          DEBUG_RESET_FULL);
+               POSTING_READ(DEBUG_RESET_I830);
+               msleep(1);
+
+               I915_WRITE(DEBUG_RESET_I830, 0);
+               POSTING_READ(DEBUG_RESET_I830);
+       }
+
+       msleep(1);
+
+       I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
+       POSTING_READ(D_STATE);
+
+       return 0;
+}
+
+static int i965_reset_complete(struct drm_device *dev)
+{
+       u8 gdrst;
+       pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+       return gdrst & 0x1;
+}
+
+static int i965_do_reset(struct drm_device *dev, u8 flags)
+{
+       u8 gdrst;
+
+       /*
+        * Set the domains we want to reset (GRDOM/bits 2 and 3) as
+        * well as the reset bit (GR/bit 0).  Setting the GR bit
+        * triggers the reset; when done, the hardware will clear it.
+        */
+       pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+       pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
+
+       return wait_for(i965_reset_complete(dev), 500);
+}
+
+static int ironlake_do_reset(struct drm_device *dev, u8 flags)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+       I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
+       return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
 }
 
 /**
@@ -325,54 +420,39 @@ int i915_resume(struct drm_device *dev)
  *   - re-init interrupt state
  *   - re-init display
  */
-int i965_reset(struct drm_device *dev, u8 flags)
+int i915_reset(struct drm_device *dev, u8 flags)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       unsigned long timeout;
-       u8 gdrst;
        /*
         * We really should only reset the display subsystem if we actually
         * need to
         */
        bool need_display = true;
+       int ret;
 
        mutex_lock(&dev->struct_mutex);
 
-       /*
-        * Clear request list
-        */
-       i915_gem_retire_requests(dev);
-
-       if (need_display)
-               i915_save_display(dev);
-
-       if (IS_I965G(dev) || IS_G4X(dev)) {
-               /*
-                * Set the domains we want to reset, then the reset bit (bit 0).
-                * Clear the reset bit after a while and wait for hardware status
-                * bit (bit 1) to be set
-                */
-               pci_read_config_byte(dev->pdev, GDRST, &gdrst);
-               pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0));
-               udelay(50);
-               pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe);
-
-               /* ...we don't want to loop forever though, 500ms should be plenty */
-              timeout = jiffies + msecs_to_jiffies(500);
-               do {
-                       udelay(100);
-                       pci_read_config_byte(dev->pdev, GDRST, &gdrst);
-               } while ((gdrst & 0x1) && time_after(timeout, jiffies));
-
-               if (gdrst & 0x1) {
-                       WARN(true, "i915: Failed to reset chip\n");
-                       mutex_unlock(&dev->struct_mutex);
-                       return -EIO;
-               }
-       } else {
-               DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
+       i915_gem_reset(dev);
+
+       ret = -ENODEV;
+       if (get_seconds() - dev_priv->last_gpu_reset < 5) {
+               DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
+       } else switch (INTEL_INFO(dev)->gen) {
+       case 5:
+               ret = ironlake_do_reset(dev, flags);
+               break;
+       case 4:
+               ret = i965_do_reset(dev, flags);
+               break;
+       case 2:
+               ret = i8xx_do_reset(dev, flags);
+               break;
+       }
+       dev_priv->last_gpu_reset = get_seconds();
+       if (ret) {
+               DRM_ERROR("Failed to reset chip.\n");
                mutex_unlock(&dev->struct_mutex);
-               return -ENODEV;
+               return ret;
        }
 
        /* Ok, now get things going again... */
@@ -400,13 +480,19 @@ int i965_reset(struct drm_device *dev, u8 flags)
                mutex_lock(&dev->struct_mutex);
        }
 
+       mutex_unlock(&dev->struct_mutex);
+
        /*
-        * Display needs restore too...
+        * Perform a full modeset as on later generations, e.g. Ironlake, we may
+        * need to retrain the display link and cannot just restore the register
+        * values.
         */
-       if (need_display)
-               i915_restore_display(dev);
+       if (need_display) {
+               mutex_lock(&dev->mode_config.mutex);
+               drm_helper_resume_force_mode(dev);
+               mutex_unlock(&dev->mode_config.mutex);
+       }
 
-       mutex_unlock(&dev->struct_mutex);
        return 0;
 }
 
@@ -524,8 +610,6 @@ static struct drm_driver driver = {
        .irq_uninstall = i915_driver_irq_uninstall,
        .irq_handler = i915_driver_irq_handler,
        .reclaim_buffers = drm_core_reclaim_buffers,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .master_create = i915_master_create,
        .master_destroy = i915_master_destroy,
 #if defined(CONFIG_DEBUG_FS)
index af4a263cf25782a6906f80a1d0783841000ac9b1..2c2c19b6285ecf331edbac9c53e0bdd44b2093ad 100644 (file)
@@ -34,6 +34,8 @@
 #include "intel_bios.h"
 #include "intel_ringbuffer.h"
 #include <linux/io-mapping.h>
+#include <linux/i2c.h>
+#include <drm/intel-gtt.h>
 
 /* General customization:
  */
@@ -73,11 +75,9 @@ enum plane {
 #define DRIVER_PATCHLEVEL      0
 
 #define WATCH_COHERENCY        0
-#define WATCH_BUF      0
 #define WATCH_EXEC     0
-#define WATCH_LRU      0
 #define WATCH_RELOC    0
-#define WATCH_INACTIVE 0
+#define WATCH_LISTS    0
 #define WATCH_PWRITE   0
 
 #define I915_GEM_PHYS_CURSOR_0 1
@@ -110,8 +110,9 @@ struct intel_opregion {
        struct opregion_acpi *acpi;
        struct opregion_swsci *swsci;
        struct opregion_asle *asle;
-       int enabled;
+       void *vbt;
 };
+#define OPREGION_SIZE            (8*1024)
 
 struct intel_overlay;
 struct intel_overlay_error_state;
@@ -125,13 +126,16 @@ struct drm_i915_master_private {
 struct drm_i915_fence_reg {
        struct drm_gem_object *obj;
        struct list_head lru_list;
+       bool gpu;
 };
 
 struct sdvo_device_mapping {
+       u8 initialized;
        u8 dvo_port;
        u8 slave_addr;
        u8 dvo_wiring;
-       u8 initialized;
+       u8 i2c_pin;
+       u8 i2c_speed;
        u8 ddc_pin;
 };
 
@@ -193,28 +197,29 @@ struct drm_i915_display_funcs {
 struct intel_device_info {
        u8 gen;
        u8 is_mobile : 1;
-       u8 is_i8xx : 1;
        u8 is_i85x : 1;
        u8 is_i915g : 1;
-       u8 is_i9xx : 1;
        u8 is_i945gm : 1;
-       u8 is_i965g : 1;
-       u8 is_i965gm : 1;
        u8 is_g33 : 1;
        u8 need_gfx_hws : 1;
        u8 is_g4x : 1;
        u8 is_pineview : 1;
        u8 is_broadwater : 1;
        u8 is_crestline : 1;
-       u8 is_ironlake : 1;
        u8 has_fbc : 1;
        u8 has_rc6 : 1;
        u8 has_pipe_cxsr : 1;
        u8 has_hotplug : 1;
        u8 cursor_needs_physical : 1;
+       u8 has_overlay : 1;
+       u8 overlay_needs_physical : 1;
+       u8 supports_tv : 1;
+       u8 has_bsd_ring : 1;
+       u8 has_blt_ring : 1;
 };
 
 enum no_fbc_reason {
+       FBC_NO_OUTPUT, /* no outputs enabled to compress */
        FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
        FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
        FBC_MODE_TOO_LARGE, /* mode too large for compression */
@@ -241,9 +246,16 @@ typedef struct drm_i915_private {
 
        void __iomem *regs;
 
+       struct intel_gmbus {
+               struct i2c_adapter adapter;
+               struct i2c_adapter *force_bit;
+               u32 reg0;
+       } *gmbus;
+
        struct pci_dev *bridge_dev;
        struct intel_ring_buffer render_ring;
        struct intel_ring_buffer bsd_ring;
+       struct intel_ring_buffer blt_ring;
        uint32_t next_seqno;
 
        drm_dma_handle_t *status_page_dmah;
@@ -263,6 +275,9 @@ typedef struct drm_i915_private {
        int front_offset;
        int current_page;
        int page_flipping;
+#define I915_DEBUG_READ (1<<0)
+#define I915_DEBUG_WRITE (1<<1)
+       unsigned long debug_flags;
 
        wait_queue_head_t irq_queue;
        atomic_t irq_received;
@@ -289,24 +304,21 @@ typedef struct drm_i915_private {
        unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
        int vblank_pipe;
        int num_pipe;
-       u32 flush_rings;
-#define FLUSH_RENDER_RING      0x1
-#define FLUSH_BSD_RING         0x2
 
        /* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
+#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */
        struct timer_list hangcheck_timer;
        int hangcheck_count;
        uint32_t last_acthd;
        uint32_t last_instdone;
        uint32_t last_instdone1;
 
-       struct drm_mm vram;
-
        unsigned long cfb_size;
        unsigned long cfb_pitch;
+       unsigned long cfb_offset;
        int cfb_fence;
        int cfb_plane;
+       int cfb_y;
 
        int irq_enabled;
 
@@ -316,8 +328,7 @@ typedef struct drm_i915_private {
        struct intel_overlay *overlay;
 
        /* LVDS info */
-       int backlight_duty_cycle;  /* restore backlight to this value */
-       bool panel_wants_dither;
+       int backlight_level;  /* restore backlight to this value */
        struct drm_display_mode *panel_fixed_mode;
        struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
        struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -328,13 +339,23 @@ typedef struct drm_i915_private {
        unsigned int lvds_vbt:1;
        unsigned int int_crt_support:1;
        unsigned int lvds_use_ssc:1;
-       unsigned int edp_support:1;
        int lvds_ssc_freq;
-       int edp_bpp;
+       struct {
+               int rate;
+               int lanes;
+               int preemphasis;
+               int vswing;
+
+               bool initialized;
+               bool support;
+               int bpp;
+               struct edp_power_seq pps;
+       } edp;
+       bool no_aux_handshake;
 
        struct notifier_block lid_notifier;
 
-       int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
+       int crt_ddc_pin;
        struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
        int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
        int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -344,6 +365,7 @@ typedef struct drm_i915_private {
        spinlock_t error_lock;
        struct drm_i915_error_state *first_error;
        struct work_struct error_work;
+       struct completion error_completion;
        struct workqueue_struct *wq;
 
        /* Display functions */
@@ -507,6 +529,11 @@ typedef struct drm_i915_private {
        u32 saveMCHBAR_RENDER_STANDBY;
 
        struct {
+               /** Bridge to intel-gtt-ko */
+               struct intel_gtt *gtt;
+               /** Memory allocator for GTT stolen memory */
+               struct drm_mm vram;
+               /** Memory allocator for GTT */
                struct drm_mm gtt_space;
 
                struct io_mapping *gtt_mapping;
@@ -521,7 +548,16 @@ typedef struct drm_i915_private {
                 */
                struct list_head shrink_list;
 
-               spinlock_t active_list_lock;
+               /**
+                * List of objects currently involved in rendering.
+                *
+                * Includes buffers having the contents of their GPU caches
+                * flushed, not necessarily primitives.  last_rendering_seqno
+                * represents when the rendering involved will be completed.
+                *
+                * A reference is held on the buffer while on this list.
+                */
+               struct list_head active_list;
 
                /**
                 * List of objects which are not in the ringbuffer but which
@@ -534,15 +570,6 @@ typedef struct drm_i915_private {
                 */
                struct list_head flushing_list;
 
-               /**
-                * List of objects currently pending a GPU write flush.
-                *
-                * All elements on this list will belong to either the
-                * active_list or flushing_list, last_rendering_seqno can
-                * be used to differentiate between the two elements.
-                */
-               struct list_head gpu_write_list;
-
                /**
                 * LRU list of objects which are not in the ringbuffer and
                 * are ready to unbind, but are still in the GTT.
@@ -555,6 +582,12 @@ typedef struct drm_i915_private {
                 */
                struct list_head inactive_list;
 
+               /**
+                * LRU list of objects which are not in the ringbuffer but
+                * are still pinned in the GTT.
+                */
+               struct list_head pinned_list;
+
                /** LRU list of objects with fence regs on them. */
                struct list_head fence_list;
 
@@ -611,6 +644,17 @@ typedef struct drm_i915_private {
 
                /* storage for physical objects */
                struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
+
+               uint32_t flush_rings;
+
+               /* accounting, useful for userland debugging */
+               size_t object_memory;
+               size_t pin_memory;
+               size_t gtt_memory;
+               size_t gtt_total;
+               u32 object_count;
+               u32 pin_count;
+               u32 gtt_count;
        } mm;
        struct sdvo_device_mapping sdvo_mappings[2];
        /* indicate whether the LVDS_BORDER should be enabled or not */
@@ -626,8 +670,6 @@ typedef struct drm_i915_private {
        /* Reclocking support */
        bool render_reclock_avail;
        bool lvds_downclock_avail;
-       /* indicate whether the LVDS EDID is OK */
-       bool lvds_edid_good;
        /* indicates the reduced downclock for LVDS*/
        int lvds_downclock;
        struct work_struct idle_work;
@@ -661,6 +703,8 @@ typedef struct drm_i915_private {
        struct drm_mm_node *compressed_fb;
        struct drm_mm_node *compressed_llb;
 
+       unsigned long last_gpu_reset;
+
        /* list of fbdev register on this device */
        struct intel_fbdev *fbdev;
 } drm_i915_private_t;
@@ -673,7 +717,8 @@ struct drm_i915_gem_object {
        struct drm_mm_node *gtt_space;
 
        /** This object's place on the active/flushing/inactive lists */
-       struct list_head list;
+       struct list_head ring_list;
+       struct list_head mm_list;
        /** This object's place on GPU write list */
        struct list_head gpu_write_list;
        /** This object's place on eviction list */
@@ -816,12 +861,14 @@ struct drm_i915_gem_request {
        /** global list entry for this request */
        struct list_head list;
 
+       struct drm_i915_file_private *file_priv;
        /** file_priv list entry for this request */
        struct list_head client_list;
 };
 
 struct drm_i915_file_private {
        struct {
+               struct spinlock lock;
                struct list_head request_list;
        } mm;
 };
@@ -862,7 +909,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 extern int i915_emit_box(struct drm_device *dev,
                         struct drm_clip_rect *boxes,
                         int i, int DR1, int DR4);
-extern int i965_reset(struct drm_device *dev, u8 flags);
+extern int i915_reset(struct drm_device *dev, u8 flags);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -871,7 +918,6 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
 
 /* i915_irq.c */
 void i915_hangcheck_elapsed(unsigned long data);
-void i915_destroy_error_state(struct drm_device *dev);
 extern int i915_irq_emit(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
 extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -908,6 +954,12 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 
 void intel_enable_asle (struct drm_device *dev);
 
+#ifdef CONFIG_DEBUG_FS
+extern void i915_destroy_error_state(struct drm_device *dev);
+#else
+#define i915_destroy_error_state(x)
+#endif
+
 
 /* i915_mem.c */
 extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -922,6 +974,7 @@ extern void i915_mem_takedown(struct mem_block **heap);
 extern void i915_mem_release(struct drm_device * dev,
                             struct drm_file *file_priv, struct mem_block *heap);
 /* i915_gem.c */
+int i915_gem_check_is_wedged(struct drm_device *dev);
 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -972,13 +1025,22 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
 int i915_gem_object_unbind(struct drm_gem_object *obj);
 void i915_gem_release_mmap(struct drm_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
-uint32_t i915_get_gem_seqno(struct drm_device *dev,
-               struct intel_ring_buffer *ring);
-bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
-int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
-int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
+
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static inline bool
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+       return (int32_t)(seq1 - seq2) >= 0;
+}
+
+int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
+                                 bool interruptible);
+int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
+                                 bool interruptible);
 void i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_retire_work_handler(struct work_struct *work);
+void i915_gem_reset(struct drm_device *dev);
 void i915_gem_clflush_object(struct drm_gem_object *obj);
 int i915_gem_object_set_domain(struct drm_gem_object *obj,
                               uint32_t read_domains,
@@ -990,16 +1052,18 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
 int i915_gpu_idle(struct drm_device *dev);
 int i915_gem_idle(struct drm_device *dev);
 uint32_t i915_add_request(struct drm_device *dev,
-               struct drm_file *file_priv,
-               uint32_t flush_domains,
-               struct intel_ring_buffer *ring);
+                         struct drm_file *file_priv,
+                         struct drm_i915_gem_request *request,
+                         struct intel_ring_buffer *ring);
 int i915_do_wait_request(struct drm_device *dev,
-               uint32_t seqno, int interruptible,
-               struct intel_ring_buffer *ring);
+                        uint32_t seqno,
+                        bool interruptible,
+                        struct intel_ring_buffer *ring);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
                                      int write);
-int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
+int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
+                                        bool pipelined);
 int i915_gem_attach_phys_object(struct drm_device *dev,
                                struct drm_gem_object *obj,
                                int id,
@@ -1007,10 +1071,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
 void i915_gem_detach_phys_object(struct drm_device *dev,
                                 struct drm_gem_object *obj);
 void i915_gem_free_all_phys_object(struct drm_device *dev);
-int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
-void i915_gem_object_put_pages(struct drm_gem_object *obj);
 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
-int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
 
 void i915_gem_shrinker_init(void);
 void i915_gem_shrinker_exit(void);
@@ -1032,15 +1093,14 @@ bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
 /* i915_gem_debug.c */
 void i915_gem_dump_object(struct drm_gem_object *obj, int len,
                          const char *where, uint32_t mark);
-#if WATCH_INACTIVE
-void i915_verify_inactive(struct drm_device *dev, char *file, int line);
+#if WATCH_LISTS
+int i915_verify_lists(struct drm_device *dev);
 #else
-#define i915_verify_inactive(dev, file, line)
+#define i915_verify_lists(dev) 0
 #endif
 void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
 void i915_gem_dump_object(struct drm_gem_object *obj, int len,
                          const char *where, uint32_t mark);
-void i915_dump_lru(struct drm_device *dev, const char *where);
 
 /* i915_debugfs.c */
 int i915_debugfs_init(struct drm_minor *minor);
@@ -1054,21 +1114,42 @@ extern int i915_restore_state(struct drm_device *dev);
 extern int i915_save_state(struct drm_device *dev);
 extern int i915_restore_state(struct drm_device *dev);
 
+/* intel_i2c.c */
+extern int intel_setup_gmbus(struct drm_device *dev);
+extern void intel_teardown_gmbus(struct drm_device *dev);
+extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
+{
+       return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
+}
+extern void intel_i2c_reset(struct drm_device *dev);
+
+/* intel_opregion.c */
+extern int intel_opregion_setup(struct drm_device *dev);
 #ifdef CONFIG_ACPI
-/* i915_opregion.c */
-extern int intel_opregion_init(struct drm_device *dev, int resume);
-extern void intel_opregion_free(struct drm_device *dev, int suspend);
-extern void opregion_asle_intr(struct drm_device *dev);
-extern void ironlake_opregion_gse_intr(struct drm_device *dev);
-extern void opregion_enable_asle(struct drm_device *dev);
+extern void intel_opregion_init(struct drm_device *dev);
+extern void intel_opregion_fini(struct drm_device *dev);
+extern void intel_opregion_asle_intr(struct drm_device *dev);
+extern void intel_opregion_gse_intr(struct drm_device *dev);
+extern void intel_opregion_enable_asle(struct drm_device *dev);
 #else
-static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; }
-static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; }
-static inline void opregion_asle_intr(struct drm_device *dev) { return; }
-static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; }
-static inline void opregion_enable_asle(struct drm_device *dev) { return; }
+static inline void intel_opregion_init(struct drm_device *dev) { return; }
+static inline void intel_opregion_fini(struct drm_device *dev) { return; }
+static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
+static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
 #endif
 
+/* intel_acpi.c */
+#ifdef CONFIG_ACPI
+extern void intel_register_dsm_handler(void);
+extern void intel_unregister_dsm_handler(void);
+#else
+static inline void intel_register_dsm_handler(void) { return; }
+static inline void intel_unregister_dsm_handler(void) { return; }
+#endif /* CONFIG_ACPI */
+
 /* modesetting */
 extern void intel_modeset_init(struct drm_device *dev);
 extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -1084,8 +1165,10 @@ extern void intel_detect_pch (struct drm_device *dev);
 extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
 
 /* overlay */
+#ifdef CONFIG_DEBUG_FS
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
 extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
+#endif
 
 /**
  * Lock test for when it's just for synchronization of ring access.
@@ -1099,8 +1182,26 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
                LOCK_TEST_WITH_RETURN(dev, file_priv);                  \
 } while (0)
 
-#define I915_READ(reg)          readl(dev_priv->regs + (reg))
-#define I915_WRITE(reg, val)     writel(val, dev_priv->regs + (reg))
+static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+       u32 val;
+
+       val = readl(dev_priv->regs + reg);
+       if (dev_priv->debug_flags & I915_DEBUG_READ)
+               printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg);
+       return val;
+}
+
+static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
+                             u32 val)
+{
+       writel(val, dev_priv->regs + reg);
+       if (dev_priv->debug_flags & I915_DEBUG_WRITE)
+               printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg);
+}
+
+#define I915_READ(reg)          i915_read(dev_priv, (reg))
+#define I915_WRITE(reg, val)    i915_write(dev_priv, (reg), (val))
 #define I915_READ16(reg)       readw(dev_priv->regs + (reg))
 #define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
 #define I915_READ8(reg)                readb(dev_priv->regs + (reg))
@@ -1110,6 +1211,11 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
 #define POSTING_READ(reg)      (void)I915_READ(reg)
 #define POSTING_READ16(reg)    (void)I915_READ16(reg)
 
+#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \
+                               I915_DEBUG_WRITE)
+#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \
+                                                           I915_DEBUG_WRITE))
+
 #define I915_VERBOSE 0
 
 #define BEGIN_LP_RING(n)  do { \
@@ -1166,8 +1272,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
 #define IS_I915GM(dev)         ((dev)->pci_device == 0x2592)
 #define IS_I945G(dev)          ((dev)->pci_device == 0x2772)
 #define IS_I945GM(dev)         (INTEL_INFO(dev)->is_i945gm)
-#define IS_I965G(dev)          (INTEL_INFO(dev)->is_i965g)
-#define IS_I965GM(dev)         (INTEL_INFO(dev)->is_i965gm)
 #define IS_BROADWATER(dev)     (INTEL_INFO(dev)->is_broadwater)
 #define IS_CRESTLINE(dev)      (INTEL_INFO(dev)->is_crestline)
 #define IS_GM45(dev)           ((dev)->pci_device == 0x2A42)
@@ -1178,8 +1282,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
 #define IS_G33(dev)            (INTEL_INFO(dev)->is_g33)
 #define IS_IRONLAKE_D(dev)     ((dev)->pci_device == 0x0042)
 #define IS_IRONLAKE_M(dev)     ((dev)->pci_device == 0x0046)
-#define IS_IRONLAKE(dev)       (INTEL_INFO(dev)->is_ironlake)
-#define IS_I9XX(dev)           (INTEL_INFO(dev)->is_i9xx)
 #define IS_MOBILE(dev)         (INTEL_INFO(dev)->is_mobile)
 
 #define IS_GEN2(dev)   (INTEL_INFO(dev)->gen == 2)
@@ -1188,33 +1290,34 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
 #define IS_GEN5(dev)   (INTEL_INFO(dev)->gen == 5)
 #define IS_GEN6(dev)   (INTEL_INFO(dev)->gen == 6)
 
-#define HAS_BSD(dev)            (IS_IRONLAKE(dev) || IS_G4X(dev))
+#define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
+#define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
 
+#define HAS_OVERLAY(dev)               (INTEL_INFO(dev)->has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev)    (INTEL_INFO(dev)->overlay_needs_physical)
+
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
  * rows, which changed the alignment requirements and fence programming.
  */
-#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
+#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
                                                      IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev)  (IS_I9XX(dev) && !IS_PINEVIEW(dev))
-#define SUPPORTS_INTEGRATED_HDMI(dev)  (IS_G4X(dev) || IS_IRONLAKE(dev))
-#define SUPPORTS_INTEGRATED_DP(dev)    (IS_G4X(dev) || IS_IRONLAKE(dev))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev)  (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev)  (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_INTEGRATED_DP(dev)    (IS_G4X(dev) || IS_GEN5(dev))
 #define SUPPORTS_EDP(dev)              (IS_IRONLAKE_M(dev))
-#define SUPPORTS_TV(dev)               (IS_I9XX(dev) && IS_MOBILE(dev) && \
-                                       !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \
-                                       !IS_GEN6(dev))
+#define SUPPORTS_TV(dev)               (INTEL_INFO(dev)->supports_tv)
 #define I915_HAS_HOTPLUG(dev)           (INTEL_INFO(dev)->has_hotplug)
 /* dsparb controlled by hw only */
 #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
 
-#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
 #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
 
-#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) ||        \
-                           IS_GEN6(dev))
-#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
+#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
+#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
 
 #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
index 90b1d6753b9d493d3ed8d2c45153bf2047b54d8f..8eb8453208b5ce491e77c1c06c77c1c0a2da41bc 100644 (file)
@@ -37,7 +37,9 @@
 #include <linux/intel-gtt.h>
 
 static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
-static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
+
+static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
+                                                 bool pipelined);
 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
@@ -46,7 +48,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
                                                     uint64_t offset,
                                                     uint64_t size);
 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
+static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
+                                         bool interruptible);
 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
                                           unsigned alignment);
 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
@@ -55,9 +58,111 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o
                                struct drm_file *file_priv);
 static void i915_gem_free_object_tail(struct drm_gem_object *obj);
 
+static int
+i915_gem_object_get_pages(struct drm_gem_object *obj,
+                         gfp_t gfpmask);
+
+static void
+i915_gem_object_put_pages(struct drm_gem_object *obj);
+
 static LIST_HEAD(shrink_list);
 static DEFINE_SPINLOCK(shrink_list_lock);
 
+/* some bookkeeping */
+static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
+                                 size_t size)
+{
+       dev_priv->mm.object_count++;
+       dev_priv->mm.object_memory += size;
+}
+
+static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
+                                    size_t size)
+{
+       dev_priv->mm.object_count--;
+       dev_priv->mm.object_memory -= size;
+}
+
+static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
+                                 size_t size)
+{
+       dev_priv->mm.gtt_count++;
+       dev_priv->mm.gtt_memory += size;
+}
+
+static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
+                                    size_t size)
+{
+       dev_priv->mm.gtt_count--;
+       dev_priv->mm.gtt_memory -= size;
+}
+
+static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
+                                 size_t size)
+{
+       dev_priv->mm.pin_count++;
+       dev_priv->mm.pin_memory += size;
+}
+
+static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
+                                    size_t size)
+{
+       dev_priv->mm.pin_count--;
+       dev_priv->mm.pin_memory -= size;
+}
+
+int
+i915_gem_check_is_wedged(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct completion *x = &dev_priv->error_completion;
+       unsigned long flags;
+       int ret;
+
+       if (!atomic_read(&dev_priv->mm.wedged))
+               return 0;
+
+       ret = wait_for_completion_interruptible(x);
+       if (ret)
+               return ret;
+
+       /* Success, we reset the GPU! */
+       if (!atomic_read(&dev_priv->mm.wedged))
+               return 0;
+
+       /* GPU is hung, bump the completion count to account for
+        * the token we just consumed so that we never hit zero and
+        * end up waiting upon a subsequent completion event that
+        * will never happen.
+        */
+       spin_lock_irqsave(&x->wait.lock, flags);
+       x->done++;
+       spin_unlock_irqrestore(&x->wait.lock, flags);
+       return -EIO;
+}
+
+static int i915_mutex_lock_interruptible(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
+       ret = i915_gem_check_is_wedged(dev);
+       if (ret)
+               return ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       if (atomic_read(&dev_priv->mm.wedged)) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EAGAIN;
+       }
+
+       WARN_ON(i915_verify_lists(dev));
+       return 0;
+}
+
 static inline bool
 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
 {
@@ -66,7 +171,8 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
                obj_priv->pin_count == 0;
 }
 
-int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+int i915_gem_do_init(struct drm_device *dev,
+                    unsigned long start,
                     unsigned long end)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -80,7 +186,7 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
        drm_mm_init(&dev_priv->mm.gtt_space, start,
                    end - start);
 
-       dev->gtt_total = (uint32_t) (end - start);
+       dev_priv->mm.gtt_total = end - start;
 
        return 0;
 }
@@ -103,14 +209,16 @@ int
 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
                            struct drm_file *file_priv)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_get_aperture *args = data;
 
        if (!(dev->driver->driver_features & DRIVER_GEM))
                return -ENODEV;
 
-       args->aper_size = dev->gtt_total;
-       args->aper_available_size = (args->aper_size -
-                                    atomic_read(&dev->pin_memory));
+       mutex_lock(&dev->struct_mutex);
+       args->aper_size = dev_priv->mm.gtt_total;
+       args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
+       mutex_unlock(&dev->struct_mutex);
 
        return 0;
 }
@@ -136,12 +244,17 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
                return -ENOMEM;
 
        ret = drm_gem_handle_create(file_priv, obj, &handle);
-       /* drop reference from allocate - handle holds it now */
-       drm_gem_object_unreference_unlocked(obj);
        if (ret) {
+               drm_gem_object_release(obj);
+               i915_gem_info_remove_obj(dev->dev_private, obj->size);
+               kfree(obj);
                return ret;
        }
 
+       /* drop reference from allocate - handle holds it now */
+       drm_gem_object_unreference(obj);
+       trace_i915_gem_object_create(obj);
+
        args->handle = handle;
        return 0;
 }
@@ -152,19 +265,14 @@ fast_shmem_read(struct page **pages,
                char __user *data,
                int length)
 {
-       char __iomem *vaddr;
-       int unwritten;
-
-       vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
-       if (vaddr == NULL)
-               return -ENOMEM;
-       unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
-       kunmap_atomic(vaddr, KM_USER0);
+       char *vaddr;
+       int ret;
 
-       if (unwritten)
-               return -EFAULT;
+       vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
+       ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
+       kunmap_atomic(vaddr);
 
-       return 0;
+       return ret;
 }
 
 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
@@ -258,22 +366,10 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
        loff_t offset, page_base;
        char __user *user_data;
        int page_offset, page_length;
-       int ret;
 
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
 
-       mutex_lock(&dev->struct_mutex);
-
-       ret = i915_gem_object_get_pages(obj, 0);
-       if (ret != 0)
-               goto fail_unlock;
-
-       ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
-                                                       args->size);
-       if (ret != 0)
-               goto fail_put_pages;
-
        obj_priv = to_intel_bo(obj);
        offset = args->offset;
 
@@ -290,23 +386,17 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
                if ((page_offset + remain) > PAGE_SIZE)
                        page_length = PAGE_SIZE - page_offset;
 
-               ret = fast_shmem_read(obj_priv->pages,
-                                     page_base, page_offset,
-                                     user_data, page_length);
-               if (ret)
-                       goto fail_put_pages;
+               if (fast_shmem_read(obj_priv->pages,
+                                   page_base, page_offset,
+                                   user_data, page_length))
+                       return -EFAULT;
 
                remain -= page_length;
                user_data += page_length;
                offset += page_length;
        }
 
-fail_put_pages:
-       i915_gem_object_put_pages(obj);
-fail_unlock:
-       mutex_unlock(&dev->struct_mutex);
-
-       return ret;
+       return 0;
 }
 
 static int
@@ -367,31 +457,28 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
        last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
        num_pages = last_data_page - first_data_page + 1;
 
-       user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
+       user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
        if (user_pages == NULL)
                return -ENOMEM;
 
+       mutex_unlock(&dev->struct_mutex);
        down_read(&mm->mmap_sem);
        pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
                                      num_pages, 1, 0, user_pages, NULL);
        up_read(&mm->mmap_sem);
+       mutex_lock(&dev->struct_mutex);
        if (pinned_pages < num_pages) {
                ret = -EFAULT;
-               goto fail_put_user_pages;
+               goto out;
        }
 
-       do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-
-       mutex_lock(&dev->struct_mutex);
-
-       ret = i915_gem_object_get_pages_or_evict(obj);
+       ret = i915_gem_object_set_cpu_read_domain_range(obj,
+                                                       args->offset,
+                                                       args->size);
        if (ret)
-               goto fail_unlock;
+               goto out;
 
-       ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
-                                                       args->size);
-       if (ret != 0)
-               goto fail_put_pages;
+       do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
        obj_priv = to_intel_bo(obj);
        offset = args->offset;
@@ -436,11 +523,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
                offset += page_length;
        }
 
-fail_put_pages:
-       i915_gem_object_put_pages(obj);
-fail_unlock:
-       mutex_unlock(&dev->struct_mutex);
-fail_put_user_pages:
+out:
        for (i = 0; i < pinned_pages; i++) {
                SetPageDirty(user_pages[i]);
                page_cache_release(user_pages[i]);
@@ -462,37 +545,64 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_pread *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
-       int ret;
+       int ret = 0;
+
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
 
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-       if (obj == NULL)
-               return -ENOENT;
+       if (obj == NULL) {
+               ret = -ENOENT;
+               goto unlock;
+       }
        obj_priv = to_intel_bo(obj);
 
        /* Bounds check source.  */
        if (args->offset > obj->size || args->size > obj->size - args->offset) {
                ret = -EINVAL;
-               goto err;
+               goto out;
        }
 
+       if (args->size == 0)
+               goto out;
+
        if (!access_ok(VERIFY_WRITE,
                       (char __user *)(uintptr_t)args->data_ptr,
                       args->size)) {
                ret = -EFAULT;
-               goto err;
+               goto out;
        }
 
-       if (i915_gem_object_needs_bit17_swizzle(obj)) {
-               ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
-       } else {
-               ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
-               if (ret != 0)
-                       ret = i915_gem_shmem_pread_slow(dev, obj, args,
-                                                       file_priv);
+       ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
+                                      args->size);
+       if (ret) {
+               ret = -EFAULT;
+               goto out;
        }
 
-err:
-       drm_gem_object_unreference_unlocked(obj);
+       ret = i915_gem_object_get_pages_or_evict(obj);
+       if (ret)
+               goto out;
+
+       ret = i915_gem_object_set_cpu_read_domain_range(obj,
+                                                       args->offset,
+                                                       args->size);
+       if (ret)
+               goto out_put;
+
+       ret = -EFAULT;
+       if (!i915_gem_object_needs_bit17_swizzle(obj))
+               ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
+       if (ret == -EFAULT)
+               ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
+
+out_put:
+       i915_gem_object_put_pages(obj);
+out:
+       drm_gem_object_unreference(obj);
+unlock:
+       mutex_unlock(&dev->struct_mutex);
        return ret;
 }
 
@@ -509,13 +619,11 @@ fast_user_write(struct io_mapping *mapping,
        char *vaddr_atomic;
        unsigned long unwritten;
 
-       vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0);
+       vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
        unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
                                                      user_data, length);
-       io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
-       if (unwritten)
-               return -EFAULT;
-       return 0;
+       io_mapping_unmap_atomic(vaddr_atomic);
+       return unwritten;
 }
 
 /* Here's the write path which can sleep for
@@ -548,18 +656,14 @@ fast_shmem_write(struct page **pages,
                 char __user *data,
                 int length)
 {
-       char __iomem *vaddr;
-       unsigned long unwritten;
+       char *vaddr;
+       int ret;
 
-       vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
-       if (vaddr == NULL)
-               return -ENOMEM;
-       unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
-       kunmap_atomic(vaddr, KM_USER0);
+       vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
+       ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
+       kunmap_atomic(vaddr);
 
-       if (unwritten)
-               return -EFAULT;
-       return 0;
+       return ret;
 }
 
 /**
@@ -577,22 +681,10 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
        loff_t offset, page_base;
        char __user *user_data;
        int page_offset, page_length;
-       int ret;
 
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
 
-
-       mutex_lock(&dev->struct_mutex);
-       ret = i915_gem_object_pin(obj, 0);
-       if (ret) {
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
-       ret = i915_gem_object_set_to_gtt_domain(obj, 1);
-       if (ret)
-               goto fail;
-
        obj_priv = to_intel_bo(obj);
        offset = obj_priv->gtt_offset + args->offset;
 
@@ -609,26 +701,21 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
                if ((page_offset + remain) > PAGE_SIZE)
                        page_length = PAGE_SIZE - page_offset;
 
-               ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
-                                      page_offset, user_data, page_length);
-
                /* If we get a fault while copying data, then (presumably) our
                 * source page isn't available.  Return the error and we'll
                 * retry in the slow path.
                 */
-               if (ret)
-                       goto fail;
+               if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
+                                   page_offset, user_data, page_length))
+
+                       return -EFAULT;
 
                remain -= page_length;
                user_data += page_length;
                offset += page_length;
        }
 
-fail:
-       i915_gem_object_unpin(obj);
-       mutex_unlock(&dev->struct_mutex);
-
-       return ret;
+       return 0;
 }
 
 /**
@@ -665,27 +752,24 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
        last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
        num_pages = last_data_page - first_data_page + 1;
 
-       user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
+       user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
        if (user_pages == NULL)
                return -ENOMEM;
 
+       mutex_unlock(&dev->struct_mutex);
        down_read(&mm->mmap_sem);
        pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
                                      num_pages, 0, 0, user_pages, NULL);
        up_read(&mm->mmap_sem);
+       mutex_lock(&dev->struct_mutex);
        if (pinned_pages < num_pages) {
                ret = -EFAULT;
                goto out_unpin_pages;
        }
 
-       mutex_lock(&dev->struct_mutex);
-       ret = i915_gem_object_pin(obj, 0);
-       if (ret)
-               goto out_unlock;
-
        ret = i915_gem_object_set_to_gtt_domain(obj, 1);
        if (ret)
-               goto out_unpin_object;
+               goto out_unpin_pages;
 
        obj_priv = to_intel_bo(obj);
        offset = obj_priv->gtt_offset + args->offset;
@@ -721,10 +805,6 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
                data_ptr += page_length;
        }
 
-out_unpin_object:
-       i915_gem_object_unpin(obj);
-out_unlock:
-       mutex_unlock(&dev->struct_mutex);
 out_unpin_pages:
        for (i = 0; i < pinned_pages; i++)
                page_cache_release(user_pages[i]);
@@ -747,21 +827,10 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
        loff_t offset, page_base;
        char __user *user_data;
        int page_offset, page_length;
-       int ret;
 
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
 
-       mutex_lock(&dev->struct_mutex);
-
-       ret = i915_gem_object_get_pages(obj, 0);
-       if (ret != 0)
-               goto fail_unlock;
-
-       ret = i915_gem_object_set_to_cpu_domain(obj, 1);
-       if (ret != 0)
-               goto fail_put_pages;
-
        obj_priv = to_intel_bo(obj);
        offset = args->offset;
        obj_priv->dirty = 1;
@@ -779,23 +848,17 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
                if ((page_offset + remain) > PAGE_SIZE)
                        page_length = PAGE_SIZE - page_offset;
 
-               ret = fast_shmem_write(obj_priv->pages,
+               if (fast_shmem_write(obj_priv->pages,
                                       page_base, page_offset,
-                                      user_data, page_length);
-               if (ret)
-                       goto fail_put_pages;
+                                      user_data, page_length))
+                       return -EFAULT;
 
                remain -= page_length;
                user_data += page_length;
                offset += page_length;
        }
 
-fail_put_pages:
-       i915_gem_object_put_pages(obj);
-fail_unlock:
-       mutex_unlock(&dev->struct_mutex);
-
-       return ret;
+       return 0;
 }
 
 /**
@@ -833,30 +896,26 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
        last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
        num_pages = last_data_page - first_data_page + 1;
 
-       user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
+       user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
        if (user_pages == NULL)
                return -ENOMEM;
 
+       mutex_unlock(&dev->struct_mutex);
        down_read(&mm->mmap_sem);
        pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
                                      num_pages, 0, 0, user_pages, NULL);
        up_read(&mm->mmap_sem);
+       mutex_lock(&dev->struct_mutex);
        if (pinned_pages < num_pages) {
                ret = -EFAULT;
-               goto fail_put_user_pages;
+               goto out;
        }
 
-       do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-
-       mutex_lock(&dev->struct_mutex);
-
-       ret = i915_gem_object_get_pages_or_evict(obj);
+       ret = i915_gem_object_set_to_cpu_domain(obj, 1);
        if (ret)
-               goto fail_unlock;
+               goto out;
 
-       ret = i915_gem_object_set_to_cpu_domain(obj, 1);
-       if (ret != 0)
-               goto fail_put_pages;
+       do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
        obj_priv = to_intel_bo(obj);
        offset = args->offset;
@@ -902,11 +961,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
                offset += page_length;
        }
 
-fail_put_pages:
-       i915_gem_object_put_pages(obj);
-fail_unlock:
-       mutex_unlock(&dev->struct_mutex);
-fail_put_user_pages:
+out:
        for (i = 0; i < pinned_pages; i++)
                page_cache_release(user_pages[i]);
        drm_free_large(user_pages);
@@ -921,29 +976,46 @@ fail_put_user_pages:
  */
 int
 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
-                     struct drm_file *file_priv)
+                     struct drm_file *file)
 {
        struct drm_i915_gem_pwrite *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
        int ret = 0;
 
-       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-       if (obj == NULL)
-               return -ENOENT;
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
+
+       obj = drm_gem_object_lookup(dev, file, args->handle);
+       if (obj == NULL) {
+               ret = -ENOENT;
+               goto unlock;
+       }
        obj_priv = to_intel_bo(obj);
 
+
        /* Bounds check destination. */
        if (args->offset > obj->size || args->size > obj->size - args->offset) {
                ret = -EINVAL;
-               goto err;
+               goto out;
        }
 
+       if (args->size == 0)
+               goto out;
+
        if (!access_ok(VERIFY_READ,
                       (char __user *)(uintptr_t)args->data_ptr,
                       args->size)) {
                ret = -EFAULT;
-               goto err;
+               goto out;
+       }
+
+       ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
+                                     args->size);
+       if (ret) {
+               ret = -EFAULT;
+               goto out;
        }
 
        /* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -953,32 +1025,47 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
         * perspective, requiring manual detiling by the client.
         */
        if (obj_priv->phys_obj)
-               ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
+               ret = i915_gem_phys_pwrite(dev, obj, args, file);
        else if (obj_priv->tiling_mode == I915_TILING_NONE &&
-                dev->gtt_total != 0 &&
+                obj_priv->gtt_space &&
                 obj->write_domain != I915_GEM_DOMAIN_CPU) {
-               ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
-               if (ret == -EFAULT) {
-                       ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
-                                                      file_priv);
-               }
-       } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
-               ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
+               ret = i915_gem_object_pin(obj, 0);
+               if (ret)
+                       goto out;
+
+               ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+               if (ret)
+                       goto out_unpin;
+
+               ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
+               if (ret == -EFAULT)
+                       ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
+
+out_unpin:
+               i915_gem_object_unpin(obj);
        } else {
-               ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
-               if (ret == -EFAULT) {
-                       ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
-                                                        file_priv);
-               }
-       }
+               ret = i915_gem_object_get_pages_or_evict(obj);
+               if (ret)
+                       goto out;
 
-#if WATCH_PWRITE
-       if (ret)
-               DRM_INFO("pwrite failed %d\n", ret);
-#endif
+               ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+               if (ret)
+                       goto out_put;
 
-err:
-       drm_gem_object_unreference_unlocked(obj);
+               ret = -EFAULT;
+               if (!i915_gem_object_needs_bit17_swizzle(obj))
+                       ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
+               if (ret == -EFAULT)
+                       ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
+
+out_put:
+               i915_gem_object_put_pages(obj);
+       }
+
+out:
+       drm_gem_object_unreference(obj);
+unlock:
+       mutex_unlock(&dev->struct_mutex);
        return ret;
 }
 
@@ -1014,19 +1101,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        if (write_domain != 0 && read_domains != write_domain)
                return -EINVAL;
 
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
+
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-       if (obj == NULL)
-               return -ENOENT;
+       if (obj == NULL) {
+               ret = -ENOENT;
+               goto unlock;
+       }
        obj_priv = to_intel_bo(obj);
 
-       mutex_lock(&dev->struct_mutex);
-
        intel_mark_busy(dev, obj);
 
-#if WATCH_BUF
-       DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
-                obj, obj->size, read_domains, write_domain);
-#endif
        if (read_domains & I915_GEM_DOMAIN_GTT) {
                ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
 
@@ -1050,12 +1137,12 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
        }
 
-       
        /* Maintain LRU order of "inactive" objects */
        if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
-               list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+               list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
 
        drm_gem_object_unreference(obj);
+unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
 }
@@ -1069,30 +1156,27 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_gem_sw_finish *args = data;
        struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
        int ret = 0;
 
        if (!(dev->driver->driver_features & DRIVER_GEM))
                return -ENODEV;
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
+
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
-               mutex_unlock(&dev->struct_mutex);
-               return -ENOENT;
+               ret = -ENOENT;
+               goto unlock;
        }
 
-#if WATCH_BUF
-       DRM_INFO("%s: sw_finish %d (%p %zd)\n",
-                __func__, args->handle, obj, obj->size);
-#endif
-       obj_priv = to_intel_bo(obj);
-
        /* Pinned buffers may be scanout, so flush the cache */
-       if (obj_priv->pin_count)
+       if (to_intel_bo(obj)->pin_count)
                i915_gem_object_flush_cpu_write_domain(obj);
 
        drm_gem_object_unreference(obj);
+unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
 }
@@ -1181,13 +1265,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        /* Need a new fence register? */
        if (obj_priv->tiling_mode != I915_TILING_NONE) {
-               ret = i915_gem_object_get_fence_reg(obj);
+               ret = i915_gem_object_get_fence_reg(obj, true);
                if (ret)
                        goto unlock;
        }
 
        if (i915_gem_object_is_inactive(obj_priv))
-               list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+               list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
 
        pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
                page_offset;
@@ -1246,7 +1330,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
                                                    obj->size / PAGE_SIZE, 0, 0);
        if (!list->file_offset_node) {
                DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
-               ret = -ENOMEM;
+               ret = -ENOSPC;
                goto out_free_list;
        }
 
@@ -1258,9 +1342,9 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
        }
 
        list->hash.key = list->file_offset_node->start;
-       if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
+       ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+       if (ret) {
                DRM_ERROR("failed to add to map hash\n");
-               ret = -ENOMEM;
                goto out_free_mm;
        }
 
@@ -1345,14 +1429,14 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
         * Minimum alignment is 4k (GTT page size), but might be greater
         * if a fence register is needed for the object.
         */
-       if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
+       if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
                return 4096;
 
        /*
         * Previous chips need to be aligned to the size of the smallest
         * fence register that can contain the object.
         */
-       if (IS_I9XX(dev))
+       if (INTEL_INFO(dev)->gen == 3)
                start = 1024*1024;
        else
                start = 512*1024;
@@ -1390,29 +1474,27 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
        if (!(dev->driver->driver_features & DRIVER_GEM))
                return -ENODEV;
 
-       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-       if (obj == NULL)
-               return -ENOENT;
-
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
 
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (obj == NULL) {
+               ret = -ENOENT;
+               goto unlock;
+       }
        obj_priv = to_intel_bo(obj);
 
        if (obj_priv->madv != I915_MADV_WILLNEED) {
                DRM_ERROR("Attempting to mmap a purgeable buffer\n");
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
-
        if (!obj_priv->mmap_offset) {
                ret = i915_gem_create_mmap_offset(obj);
-               if (ret) {
-                       drm_gem_object_unreference(obj);
-                       mutex_unlock(&dev->struct_mutex);
-                       return ret;
-               }
+               if (ret)
+                       goto out;
        }
 
        args->offset = obj_priv->mmap_offset;
@@ -1423,20 +1505,18 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
         */
        if (!obj_priv->agp_mem) {
                ret = i915_gem_object_bind_to_gtt(obj, 0);
-               if (ret) {
-                       drm_gem_object_unreference(obj);
-                       mutex_unlock(&dev->struct_mutex);
-                       return ret;
-               }
+               if (ret)
+                       goto out;
        }
 
+out:
        drm_gem_object_unreference(obj);
+unlock:
        mutex_unlock(&dev->struct_mutex);
-
-       return 0;
+       return ret;
 }
 
-void
+static void
 i915_gem_object_put_pages(struct drm_gem_object *obj)
 {
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
@@ -1470,13 +1550,25 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
        obj_priv->pages = NULL;
 }
 
+static uint32_t
+i915_gem_next_request_seqno(struct drm_device *dev,
+                           struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       ring->outstanding_lazy_request = true;
+       return dev_priv->next_seqno;
+}
+
 static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
+i915_gem_object_move_to_active(struct drm_gem_object *obj,
                               struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = obj->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
+
        BUG_ON(ring == NULL);
        obj_priv->ring = ring;
 
@@ -1485,10 +1577,10 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
                drm_gem_object_reference(obj);
                obj_priv->active = 1;
        }
+
        /* Move from whatever list we were on to the tail of execution. */
-       spin_lock(&dev_priv->mm.active_list_lock);
-       list_move_tail(&obj_priv->list, &ring->active_list);
-       spin_unlock(&dev_priv->mm.active_list_lock);
+       list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
+       list_move_tail(&obj_priv->ring_list, &ring->active_list);
        obj_priv->last_rendering_seqno = seqno;
 }
 
@@ -1500,7 +1592,8 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        BUG_ON(!obj_priv->active);
-       list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
+       list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
+       list_del_init(&obj_priv->ring_list);
        obj_priv->last_rendering_seqno = 0;
 }
 
@@ -1538,11 +1631,11 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
-       i915_verify_inactive(dev, __FILE__, __LINE__);
        if (obj_priv->pin_count != 0)
-               list_del_init(&obj_priv->list);
+               list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
        else
-               list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+               list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+       list_del_init(&obj_priv->ring_list);
 
        BUG_ON(!list_empty(&obj_priv->gpu_write_list));
 
@@ -1552,30 +1645,28 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
                obj_priv->active = 0;
                drm_gem_object_unreference(obj);
        }
-       i915_verify_inactive(dev, __FILE__, __LINE__);
+       WARN_ON(i915_verify_lists(dev));
 }
 
 static void
 i915_gem_process_flushing_list(struct drm_device *dev,
-                              uint32_t flush_domains, uint32_t seqno,
+                              uint32_t flush_domains,
                               struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv, *next;
 
        list_for_each_entry_safe(obj_priv, next,
-                                &dev_priv->mm.gpu_write_list,
+                                &ring->gpu_write_list,
                                 gpu_write_list) {
                struct drm_gem_object *obj = &obj_priv->base;
 
-               if ((obj->write_domain & flush_domains) ==
-                   obj->write_domain &&
-                   obj_priv->ring->ring_flag == ring->ring_flag) {
+               if (obj->write_domain & flush_domains) {
                        uint32_t old_write_domain = obj->write_domain;
 
                        obj->write_domain = 0;
                        list_del_init(&obj_priv->gpu_write_list);
-                       i915_gem_object_move_to_active(obj, seqno, ring);
+                       i915_gem_object_move_to_active(obj, ring);
 
                        /* update the fence lru list */
                        if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -1593,23 +1684,27 @@ i915_gem_process_flushing_list(struct drm_device *dev,
 }
 
 uint32_t
-i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
-                uint32_t flush_domains, struct intel_ring_buffer *ring)
+i915_add_request(struct drm_device *dev,
+                struct drm_file *file,
+                struct drm_i915_gem_request *request,
+                struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_file_private *i915_file_priv = NULL;
-       struct drm_i915_gem_request *request;
+       struct drm_i915_file_private *file_priv = NULL;
        uint32_t seqno;
        int was_empty;
 
-       if (file_priv != NULL)
-               i915_file_priv = file_priv->driver_priv;
+       if (file != NULL)
+               file_priv = file->driver_priv;
 
-       request = kzalloc(sizeof(*request), GFP_KERNEL);
-       if (request == NULL)
-               return 0;
+       if (request == NULL) {
+               request = kzalloc(sizeof(*request), GFP_KERNEL);
+               if (request == NULL)
+                       return 0;
+       }
 
-       seqno = ring->add_request(dev, ring, file_priv, flush_domains);
+       seqno = ring->add_request(dev, ring, 0);
+       ring->outstanding_lazy_request = false;
 
        request->seqno = seqno;
        request->ring = ring;
@@ -1617,23 +1712,20 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
        was_empty = list_empty(&ring->request_list);
        list_add_tail(&request->list, &ring->request_list);
 
-       if (i915_file_priv) {
+       if (file_priv) {
+               spin_lock(&file_priv->mm.lock);
+               request->file_priv = file_priv;
                list_add_tail(&request->client_list,
-                             &i915_file_priv->mm.request_list);
-       } else {
-               INIT_LIST_HEAD(&request->client_list);
+                             &file_priv->mm.request_list);
+               spin_unlock(&file_priv->mm.lock);
        }
 
-       /* Associate any objects on the flushing list matching the write
-        * domain we're flushing with our flush.
-        */
-       if (flush_domains != 0) 
-               i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
-
        if (!dev_priv->mm.suspended) {
-               mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+               mod_timer(&dev_priv->hangcheck_timer,
+                         jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
                if (was_empty)
-                       queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+                       queue_delayed_work(dev_priv->wq,
+                                          &dev_priv->mm.retire_work, HZ);
        }
        return seqno;
 }
@@ -1644,91 +1736,105 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
  * Ensures that all commands in the ring are finished
  * before signalling the CPU
  */
-static uint32_t
+static void
 i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
 {
        uint32_t flush_domains = 0;
 
        /* The sampler always gets flushed on i965 (sigh) */
-       if (IS_I965G(dev))
+       if (INTEL_INFO(dev)->gen >= 4)
                flush_domains |= I915_GEM_DOMAIN_SAMPLER;
 
        ring->flush(dev, ring,
                        I915_GEM_DOMAIN_COMMAND, flush_domains);
-       return flush_domains;
 }
 
-/**
- * Moves buffers associated only with the given active seqno from the active
- * to inactive list, potentially freeing them.
- */
-static void
-i915_gem_retire_request(struct drm_device *dev,
-                       struct drm_i915_gem_request *request)
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_file_private *file_priv = request->file_priv;
 
-       trace_i915_gem_request_retire(dev, request->seqno);
+       if (!file_priv)
+               return;
 
-       /* Move any buffers on the active list that are no longer referenced
-        * by the ringbuffer to the flushing/inactive lists as appropriate.
-        */
-       spin_lock(&dev_priv->mm.active_list_lock);
-       while (!list_empty(&request->ring->active_list)) {
-               struct drm_gem_object *obj;
-               struct drm_i915_gem_object *obj_priv;
+       spin_lock(&file_priv->mm.lock);
+       list_del(&request->client_list);
+       request->file_priv = NULL;
+       spin_unlock(&file_priv->mm.lock);
+}
 
-               obj_priv = list_first_entry(&request->ring->active_list,
-                                           struct drm_i915_gem_object,
-                                           list);
-               obj = &obj_priv->base;
+static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
+                                     struct intel_ring_buffer *ring)
+{
+       while (!list_empty(&ring->request_list)) {
+               struct drm_i915_gem_request *request;
 
-               /* If the seqno being retired doesn't match the oldest in the
-                * list, then the oldest in the list must still be newer than
-                * this seqno.
-                */
-               if (obj_priv->last_rendering_seqno != request->seqno)
-                       goto out;
+               request = list_first_entry(&ring->request_list,
+                                          struct drm_i915_gem_request,
+                                          list);
 
-#if WATCH_LRU
-               DRM_INFO("%s: retire %d moves to inactive list %p\n",
-                        __func__, request->seqno, obj);
-#endif
+               list_del(&request->list);
+               i915_gem_request_remove_from_client(request);
+               kfree(request);
+       }
 
-               if (obj->write_domain != 0)
-                       i915_gem_object_move_to_flushing(obj);
-               else {
-                       /* Take a reference on the object so it won't be
-                        * freed while the spinlock is held.  The list
-                        * protection for this spinlock is safe when breaking
-                        * the lock like this since the next thing we do
-                        * is just get the head of the list again.
-                        */
-                       drm_gem_object_reference(obj);
-                       i915_gem_object_move_to_inactive(obj);
-                       spin_unlock(&dev_priv->mm.active_list_lock);
-                       drm_gem_object_unreference(obj);
-                       spin_lock(&dev_priv->mm.active_list_lock);
-               }
+       while (!list_empty(&ring->active_list)) {
+               struct drm_i915_gem_object *obj_priv;
+
+               obj_priv = list_first_entry(&ring->active_list,
+                                           struct drm_i915_gem_object,
+                                           ring_list);
+
+               obj_priv->base.write_domain = 0;
+               list_del_init(&obj_priv->gpu_write_list);
+               i915_gem_object_move_to_inactive(&obj_priv->base);
        }
-out:
-       spin_unlock(&dev_priv->mm.active_list_lock);
 }
 
-/**
- * Returns true if seq1 is later than seq2.
- */
-bool
-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+void i915_gem_reset(struct drm_device *dev)
 {
-       return (int32_t)(seq1 - seq2) >= 0;
-}
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
+       int i;
 
-uint32_t
-i915_get_gem_seqno(struct drm_device *dev,
-                  struct intel_ring_buffer *ring)
-{
-       return ring->get_gem_seqno(dev, ring);
+       i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
+       i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
+       i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
+
+       /* Remove anything from the flushing lists. The GPU cache is likely
+        * to be lost on reset along with the data, so simply move the
+        * lost bo to the inactive list.
+        */
+       while (!list_empty(&dev_priv->mm.flushing_list)) {
+               obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
+                                           struct drm_i915_gem_object,
+                                           mm_list);
+
+               obj_priv->base.write_domain = 0;
+               list_del_init(&obj_priv->gpu_write_list);
+               i915_gem_object_move_to_inactive(&obj_priv->base);
+       }
+
+       /* Move everything out of the GPU domains to ensure we do any
+        * necessary invalidation upon reuse.
+        */
+       list_for_each_entry(obj_priv,
+                           &dev_priv->mm.inactive_list,
+                           mm_list)
+       {
+               obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+       }
+
+       /* The fence registers are invalidated so clear them out */
+       for (i = 0; i < 16; i++) {
+               struct drm_i915_fence_reg *reg;
+
+               reg = &dev_priv->fence_regs[i];
+               if (!reg->obj)
+                       continue;
+
+               i915_gem_clear_fence_reg(reg->obj);
+       }
 }
 
 /**
@@ -1741,38 +1847,58 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
        drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t seqno;
 
-       if (!ring->status_page.page_addr
-                       || list_empty(&ring->request_list))
+       if (!ring->status_page.page_addr ||
+           list_empty(&ring->request_list))
                return;
 
-       seqno = i915_get_gem_seqno(dev, ring);
+       WARN_ON(i915_verify_lists(dev));
 
+       seqno = ring->get_seqno(dev, ring);
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
-               uint32_t retiring_seqno;
 
                request = list_first_entry(&ring->request_list,
                                           struct drm_i915_gem_request,
                                           list);
-               retiring_seqno = request->seqno;
 
-               if (i915_seqno_passed(seqno, retiring_seqno) ||
-                   atomic_read(&dev_priv->mm.wedged)) {
-                       i915_gem_retire_request(dev, request);
+               if (!i915_seqno_passed(seqno, request->seqno))
+                       break;
+
+               trace_i915_gem_request_retire(dev, request->seqno);
+
+               list_del(&request->list);
+               i915_gem_request_remove_from_client(request);
+               kfree(request);
+       }
 
-                       list_del(&request->list);
-                       list_del(&request->client_list);
-                       kfree(request);
-               } else
+       /* Move any buffers on the active list that are no longer referenced
+        * by the ringbuffer to the flushing/inactive lists as appropriate.
+        */
+       while (!list_empty(&ring->active_list)) {
+               struct drm_gem_object *obj;
+               struct drm_i915_gem_object *obj_priv;
+
+               obj_priv = list_first_entry(&ring->active_list,
+                                           struct drm_i915_gem_object,
+                                           ring_list);
+
+               if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
                        break;
+
+               obj = &obj_priv->base;
+               if (obj->write_domain != 0)
+                       i915_gem_object_move_to_flushing(obj);
+               else
+                       i915_gem_object_move_to_inactive(obj);
        }
 
        if (unlikely (dev_priv->trace_irq_seqno &&
                      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
-
                ring->user_irq_put(dev, ring);
                dev_priv->trace_irq_seqno = 0;
        }
+
+       WARN_ON(i915_verify_lists(dev));
 }
 
 void
@@ -1790,16 +1916,16 @@ i915_gem_retire_requests(struct drm_device *dev)
             */
            list_for_each_entry_safe(obj_priv, tmp,
                                     &dev_priv->mm.deferred_free_list,
-                                    list)
+                                    mm_list)
                    i915_gem_free_object_tail(&obj_priv->base);
        }
 
        i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
-       if (HAS_BSD(dev))
-               i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
+       i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
+       i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
 }
 
-void
+static void
 i915_gem_retire_work_handler(struct work_struct *work)
 {
        drm_i915_private_t *dev_priv;
@@ -1809,20 +1935,25 @@ i915_gem_retire_work_handler(struct work_struct *work)
                                mm.retire_work.work);
        dev = dev_priv->dev;
 
-       mutex_lock(&dev->struct_mutex);
+       /* Come back later if the device is busy... */
+       if (!mutex_trylock(&dev->struct_mutex)) {
+               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+               return;
+       }
+
        i915_gem_retire_requests(dev);
 
        if (!dev_priv->mm.suspended &&
                (!list_empty(&dev_priv->render_ring.request_list) ||
-                       (HAS_BSD(dev) &&
-                        !list_empty(&dev_priv->bsd_ring.request_list))))
+                !list_empty(&dev_priv->bsd_ring.request_list) ||
+                !list_empty(&dev_priv->blt_ring.request_list)))
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
        mutex_unlock(&dev->struct_mutex);
 }
 
 int
 i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
-               int interruptible, struct intel_ring_buffer *ring)
+                    bool interruptible, struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        u32 ier;
@@ -1831,9 +1962,16 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
        BUG_ON(seqno == 0);
 
        if (atomic_read(&dev_priv->mm.wedged))
-               return -EIO;
+               return -EAGAIN;
+
+       if (ring->outstanding_lazy_request) {
+               seqno = i915_add_request(dev, NULL, NULL, ring);
+               if (seqno == 0)
+                       return -ENOMEM;
+       }
+       BUG_ON(seqno == dev_priv->next_seqno);
 
-       if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
+       if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
                if (HAS_PCH_SPLIT(dev))
                        ier = I915_READ(DEIER) | I915_READ(GTIER);
                else
@@ -1852,12 +1990,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
                if (interruptible)
                        ret = wait_event_interruptible(ring->irq_queue,
                                i915_seqno_passed(
-                                       ring->get_gem_seqno(dev, ring), seqno)
+                                       ring->get_seqno(dev, ring), seqno)
                                || atomic_read(&dev_priv->mm.wedged));
                else
                        wait_event(ring->irq_queue,
                                i915_seqno_passed(
-                                       ring->get_gem_seqno(dev, ring), seqno)
+                                       ring->get_seqno(dev, ring), seqno)
                                || atomic_read(&dev_priv->mm.wedged));
 
                ring->user_irq_put(dev, ring);
@@ -1866,11 +2004,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
                trace_i915_gem_request_wait_end(dev, seqno);
        }
        if (atomic_read(&dev_priv->mm.wedged))
-               ret = -EIO;
+               ret = -EAGAIN;
 
        if (ret && ret != -ERESTARTSYS)
-               DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
-                         __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
+               DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
+                         __func__, ret, seqno, ring->get_seqno(dev, ring),
+                         dev_priv->next_seqno);
 
        /* Directly dispatch request retiring.  While we have the work queue
         * to handle this, the waiter on a request often wants an associated
@@ -1889,27 +2028,48 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
  */
 static int
 i915_wait_request(struct drm_device *dev, uint32_t seqno,
-               struct intel_ring_buffer *ring)
+                 struct intel_ring_buffer *ring)
 {
        return i915_do_wait_request(dev, seqno, 1, ring);
 }
 
+static void
+i915_gem_flush_ring(struct drm_device *dev,
+                   struct drm_file *file_priv,
+                   struct intel_ring_buffer *ring,
+                   uint32_t invalidate_domains,
+                   uint32_t flush_domains)
+{
+       ring->flush(dev, ring, invalidate_domains, flush_domains);
+       i915_gem_process_flushing_list(dev, flush_domains, ring);
+}
+
 static void
 i915_gem_flush(struct drm_device *dev,
+              struct drm_file *file_priv,
               uint32_t invalidate_domains,
-              uint32_t flush_domains)
+              uint32_t flush_domains,
+              uint32_t flush_rings)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
+
        if (flush_domains & I915_GEM_DOMAIN_CPU)
                drm_agp_chipset_flush(dev);
-       dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
-                       invalidate_domains,
-                       flush_domains);
-
-       if (HAS_BSD(dev))
-               dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
-                               invalidate_domains,
-                               flush_domains);
+
+       if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
+               if (flush_rings & RING_RENDER)
+                       i915_gem_flush_ring(dev, file_priv,
+                                           &dev_priv->render_ring,
+                                           invalidate_domains, flush_domains);
+               if (flush_rings & RING_BSD)
+                       i915_gem_flush_ring(dev, file_priv,
+                                           &dev_priv->bsd_ring,
+                                           invalidate_domains, flush_domains);
+               if (flush_rings & RING_BLT)
+                       i915_gem_flush_ring(dev, file_priv,
+                                           &dev_priv->blt_ring,
+                                           invalidate_domains, flush_domains);
+       }
 }
 
 /**
@@ -1917,7 +2077,8 @@ i915_gem_flush(struct drm_device *dev,
  * safe to unbind from the GTT or access from the CPU.
  */
 static int
-i915_gem_object_wait_rendering(struct drm_gem_object *obj)
+i915_gem_object_wait_rendering(struct drm_gem_object *obj,
+                              bool interruptible)
 {
        struct drm_device *dev = obj->dev;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
@@ -1932,13 +2093,11 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
         * it.
         */
        if (obj_priv->active) {
-#if WATCH_BUF
-               DRM_INFO("%s: object %p wait for seqno %08x\n",
-                         __func__, obj, obj_priv->last_rendering_seqno);
-#endif
-               ret = i915_wait_request(dev,
-                               obj_priv->last_rendering_seqno, obj_priv->ring);
-               if (ret != 0)
+               ret = i915_do_wait_request(dev,
+                                          obj_priv->last_rendering_seqno,
+                                          interruptible,
+                                          obj_priv->ring);
+               if (ret)
                        return ret;
        }
 
@@ -1952,14 +2111,10 @@ int
 i915_gem_object_unbind(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret = 0;
 
-#if WATCH_BUF
-       DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
-       DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
-#endif
        if (obj_priv->gtt_space == NULL)
                return 0;
 
@@ -1984,33 +2139,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
         * should be safe and we need to cleanup or else we might
         * cause memory corruption through use-after-free.
         */
+       if (ret) {
+               i915_gem_clflush_object(obj);
+               obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
+       }
 
        /* release the fence reg _after_ flushing */
        if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
                i915_gem_clear_fence_reg(obj);
 
-       if (obj_priv->agp_mem != NULL) {
-               drm_unbind_agp(obj_priv->agp_mem);
-               drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
-               obj_priv->agp_mem = NULL;
-       }
+       drm_unbind_agp(obj_priv->agp_mem);
+       drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
 
        i915_gem_object_put_pages(obj);
        BUG_ON(obj_priv->pages_refcount);
 
-       if (obj_priv->gtt_space) {
-               atomic_dec(&dev->gtt_count);
-               atomic_sub(obj->size, &dev->gtt_memory);
-
-               drm_mm_put_block(obj_priv->gtt_space);
-               obj_priv->gtt_space = NULL;
-       }
+       i915_gem_info_remove_gtt(dev_priv, obj->size);
+       list_del_init(&obj_priv->mm_list);
 
-       /* Remove ourselves from the LRU list if present. */
-       spin_lock(&dev_priv->mm.active_list_lock);
-       if (!list_empty(&obj_priv->list))
-               list_del_init(&obj_priv->list);
-       spin_unlock(&dev_priv->mm.active_list_lock);
+       drm_mm_put_block(obj_priv->gtt_space);
+       obj_priv->gtt_space = NULL;
+       obj_priv->gtt_offset = 0;
 
        if (i915_gem_object_is_purgeable(obj_priv))
                i915_gem_object_truncate(obj);
@@ -2020,48 +2169,50 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
        return ret;
 }
 
+static int i915_ring_idle(struct drm_device *dev,
+                         struct intel_ring_buffer *ring)
+{
+       if (list_empty(&ring->gpu_write_list))
+               return 0;
+
+       i915_gem_flush_ring(dev, NULL, ring,
+                           I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       return i915_wait_request(dev,
+                                i915_gem_next_request_seqno(dev, ring),
+                                ring);
+}
+
 int
 i915_gpu_idle(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        bool lists_empty;
-       uint32_t seqno1, seqno2;
        int ret;
 
-       spin_lock(&dev_priv->mm.active_list_lock);
        lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
                       list_empty(&dev_priv->render_ring.active_list) &&
-                      (!HAS_BSD(dev) ||
-                       list_empty(&dev_priv->bsd_ring.active_list)));
-       spin_unlock(&dev_priv->mm.active_list_lock);
-
+                      list_empty(&dev_priv->bsd_ring.active_list) &&
+                      list_empty(&dev_priv->blt_ring.active_list));
        if (lists_empty)
                return 0;
 
        /* Flush everything onto the inactive list. */
-       i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-       seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
-                       &dev_priv->render_ring);
-       if (seqno1 == 0)
-               return -ENOMEM;
-       ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
-
-       if (HAS_BSD(dev)) {
-               seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
-                               &dev_priv->bsd_ring);
-               if (seqno2 == 0)
-                       return -ENOMEM;
+       ret = i915_ring_idle(dev, &dev_priv->render_ring);
+       if (ret)
+               return ret;
 
-               ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
-               if (ret)
-                       return ret;
-       }
+       ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
+       if (ret)
+               return ret;
 
+       ret = i915_ring_idle(dev, &dev_priv->blt_ring);
+       if (ret)
+               return ret;
 
-       return ret;
+       return 0;
 }
 
-int
+static int
 i915_gem_object_get_pages(struct drm_gem_object *obj,
                          gfp_t gfpmask)
 {
@@ -2241,7 +2392,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
        I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
 }
 
-static int i915_find_fence_reg(struct drm_device *dev)
+static int i915_find_fence_reg(struct drm_device *dev,
+                              bool interruptible)
 {
        struct drm_i915_fence_reg *reg = NULL;
        struct drm_i915_gem_object *obj_priv = NULL;
@@ -2286,7 +2438,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
         * private reference to obj like the other callers of put_fence_reg
         * (set_tiling ioctl) do. */
        drm_gem_object_reference(obj);
-       ret = i915_gem_object_put_fence_reg(obj);
+       ret = i915_gem_object_put_fence_reg(obj, interruptible);
        drm_gem_object_unreference(obj);
        if (ret != 0)
                return ret;
@@ -2308,7 +2460,8 @@ static int i915_find_fence_reg(struct drm_device *dev)
  * and tiling format.
  */
 int
-i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
+i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
+                             bool interruptible)
 {
        struct drm_device *dev = obj->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2343,7 +2496,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
                break;
        }
 
-       ret = i915_find_fence_reg(dev);
+       ret = i915_find_fence_reg(dev, interruptible);
        if (ret < 0)
                return ret;
 
@@ -2421,15 +2574,19 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
  * i915_gem_object_put_fence_reg - waits on outstanding fenced access
  * to the buffer to finish, and then resets the fence register.
  * @obj: tiled object holding a fence register.
+ * @bool: whether the wait upon the fence is interruptible
  *
  * Zeroes out the fence register itself and clears out the associated
  * data structures in dev_priv and obj_priv.
  */
 int
-i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
+i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
+                             bool interruptible)
 {
        struct drm_device *dev = obj->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       struct drm_i915_fence_reg *reg;
 
        if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
                return 0;
@@ -2444,20 +2601,23 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
         * therefore we must wait for any outstanding access to complete
         * before clearing the fence.
         */
-       if (!IS_I965G(dev)) {
+       reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+       if (reg->gpu) {
                int ret;
 
-               ret = i915_gem_object_flush_gpu_write_domain(obj);
-               if (ret != 0)
+               ret = i915_gem_object_flush_gpu_write_domain(obj, true);
+               if (ret)
                        return ret;
 
-               ret = i915_gem_object_wait_rendering(obj);
-               if (ret != 0)
+               ret = i915_gem_object_wait_rendering(obj, interruptible);
+               if (ret)
                        return ret;
+
+               reg->gpu = false;
        }
 
        i915_gem_object_flush_gtt_write_domain(obj);
-       i915_gem_clear_fence_reg (obj);
+       i915_gem_clear_fence_reg(obj);
 
        return 0;
 }
@@ -2490,7 +2650,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
        /* If the object is bigger than the entire aperture, reject it early
         * before evicting everything in a vain attempt to find space.
         */
-       if (obj->size > dev->gtt_total) {
+       if (obj->size > dev_priv->mm.gtt_total) {
                DRM_ERROR("Attempting to bind an object larger than the aperture\n");
                return -E2BIG;
        }
@@ -2498,19 +2658,13 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
  search_free:
        free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
                                        obj->size, alignment, 0);
-       if (free_space != NULL) {
+       if (free_space != NULL)
                obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
                                                       alignment);
-               if (obj_priv->gtt_space != NULL)
-                       obj_priv->gtt_offset = obj_priv->gtt_space->start;
-       }
        if (obj_priv->gtt_space == NULL) {
                /* If the gtt is empty and we're still having trouble
                 * fitting our object in, we're out of memory.
                 */
-#if WATCH_LRU
-               DRM_INFO("%s: GTT full, evicting something\n", __func__);
-#endif
                ret = i915_gem_evict_something(dev, obj->size, alignment);
                if (ret)
                        return ret;
@@ -2518,10 +2672,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
                goto search_free;
        }
 
-#if WATCH_BUF
-       DRM_INFO("Binding object of size %zd at 0x%08x\n",
-                obj->size, obj_priv->gtt_offset);
-#endif
        ret = i915_gem_object_get_pages(obj, gfpmask);
        if (ret) {
                drm_mm_put_block(obj_priv->gtt_space);
@@ -2553,7 +2703,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
        obj_priv->agp_mem = drm_agp_bind_pages(dev,
                                               obj_priv->pages,
                                               obj->size >> PAGE_SHIFT,
-                                              obj_priv->gtt_offset,
+                                              obj_priv->gtt_space->start,
                                               obj_priv->agp_type);
        if (obj_priv->agp_mem == NULL) {
                i915_gem_object_put_pages(obj);
@@ -2566,11 +2716,10 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
 
                goto search_free;
        }
-       atomic_inc(&dev->gtt_count);
-       atomic_add(obj->size, &dev->gtt_memory);
 
        /* keep track of bounds object by adding it to the inactive list */
-       list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+       list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+       i915_gem_info_add_gtt(dev_priv, obj->size);
 
        /* Assert that the object is not currently in any GPU domain. As it
         * wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2579,6 +2728,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
        BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
        BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
 
+       obj_priv->gtt_offset = obj_priv->gtt_space->start;
        trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
 
        return 0;
@@ -2603,25 +2753,30 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
 
 /** Flushes any GPU write domain for the object if it's dirty. */
 static int
-i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
+                                      bool pipelined)
 {
        struct drm_device *dev = obj->dev;
        uint32_t old_write_domain;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
                return 0;
 
        /* Queue the GPU write cache flushing we need. */
        old_write_domain = obj->write_domain;
-       i915_gem_flush(dev, 0, obj->write_domain);
-       if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0)
-               return -ENOMEM;
+       i915_gem_flush_ring(dev, NULL,
+                           to_intel_bo(obj)->ring,
+                           0, obj->write_domain);
+       BUG_ON(obj->write_domain);
 
        trace_i915_gem_object_change_domain(obj,
                                            obj->read_domains,
                                            old_write_domain);
-       return 0;
+
+       if (pipelined)
+               return 0;
+
+       return i915_gem_object_wait_rendering(obj, true);
 }
 
 /** Flushes the GTT write domain for the object if it's dirty. */
@@ -2665,26 +2820,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
                                            old_write_domain);
 }
 
-int
-i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
-{
-       int ret = 0;
-
-       switch (obj->write_domain) {
-       case I915_GEM_DOMAIN_GTT:
-               i915_gem_object_flush_gtt_write_domain(obj);
-               break;
-       case I915_GEM_DOMAIN_CPU:
-               i915_gem_object_flush_cpu_write_domain(obj);
-               break;
-       default:
-               ret = i915_gem_object_flush_gpu_write_domain(obj);
-               break;
-       }
-
-       return ret;
-}
-
 /**
  * Moves a single object to the GTT read, and possibly write domain.
  *
@@ -2702,32 +2837,28 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
        if (obj_priv->gtt_space == NULL)
                return -EINVAL;
 
-       ret = i915_gem_object_flush_gpu_write_domain(obj);
+       ret = i915_gem_object_flush_gpu_write_domain(obj, false);
        if (ret != 0)
                return ret;
 
-       /* Wait on any GPU rendering and flushing to occur. */
-       ret = i915_gem_object_wait_rendering(obj);
-       if (ret != 0)
-               return ret;
+       i915_gem_object_flush_cpu_write_domain(obj);
+
+       if (write) {
+               ret = i915_gem_object_wait_rendering(obj, true);
+               if (ret)
+                       return ret;
+       }
 
        old_write_domain = obj->write_domain;
        old_read_domains = obj->read_domains;
 
-       /* If we're writing through the GTT domain, then CPU and GPU caches
-        * will need to be invalidated at next use.
-        */
-       if (write)
-               obj->read_domains &= I915_GEM_DOMAIN_GTT;
-
-       i915_gem_object_flush_cpu_write_domain(obj);
-
        /* It should now be out of any other write domains, and we can update
         * the domain values for our changes.
         */
        BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
        obj->read_domains |= I915_GEM_DOMAIN_GTT;
        if (write) {
+               obj->read_domains = I915_GEM_DOMAIN_GTT;
                obj->write_domain = I915_GEM_DOMAIN_GTT;
                obj_priv->dirty = 1;
        }
@@ -2744,51 +2875,36 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
  * wait, as in modesetting process we're not supposed to be interrupted.
  */
 int
-i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
+i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
+                                    bool pipelined)
 {
-       struct drm_device *dev = obj->dev;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       uint32_t old_write_domain, old_read_domains;
+       uint32_t old_read_domains;
        int ret;
 
        /* Not valid to be called on unbound objects. */
        if (obj_priv->gtt_space == NULL)
                return -EINVAL;
 
-       ret = i915_gem_object_flush_gpu_write_domain(obj);
+       ret = i915_gem_object_flush_gpu_write_domain(obj, true);
        if (ret)
                return ret;
 
-       /* Wait on any GPU rendering and flushing to occur. */
-       if (obj_priv->active) {
-#if WATCH_BUF
-               DRM_INFO("%s: object %p wait for seqno %08x\n",
-                         __func__, obj, obj_priv->last_rendering_seqno);
-#endif
-               ret = i915_do_wait_request(dev,
-                               obj_priv->last_rendering_seqno,
-                               0,
-                               obj_priv->ring);
-               if (ret != 0)
+       /* Currently, we are always called from an non-interruptible context. */
+       if (!pipelined) {
+               ret = i915_gem_object_wait_rendering(obj, false);
+               if (ret)
                        return ret;
        }
 
        i915_gem_object_flush_cpu_write_domain(obj);
 
-       old_write_domain = obj->write_domain;
        old_read_domains = obj->read_domains;
-
-       /* It should now be out of any other write domains, and we can update
-        * the domain values for our changes.
-        */
-       BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
-       obj->read_domains = I915_GEM_DOMAIN_GTT;
-       obj->write_domain = I915_GEM_DOMAIN_GTT;
-       obj_priv->dirty = 1;
+       obj->read_domains |= I915_GEM_DOMAIN_GTT;
 
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
-                                           old_write_domain);
+                                           obj->write_domain);
 
        return 0;
 }
@@ -2805,12 +2921,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
        uint32_t old_write_domain, old_read_domains;
        int ret;
 
-       ret = i915_gem_object_flush_gpu_write_domain(obj);
-       if (ret)
-               return ret;
-
-       /* Wait on any GPU rendering and flushing to occur. */
-       ret = i915_gem_object_wait_rendering(obj);
+       ret = i915_gem_object_flush_gpu_write_domain(obj, false);
        if (ret != 0)
                return ret;
 
@@ -2821,6 +2932,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
         */
        i915_gem_object_set_to_full_cpu_read_domain(obj);
 
+       if (write) {
+               ret = i915_gem_object_wait_rendering(obj, true);
+               if (ret)
+                       return ret;
+       }
+
        old_write_domain = obj->write_domain;
        old_read_domains = obj->read_domains;
 
@@ -2840,7 +2957,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
         * need to be invalidated at next use.
         */
        if (write) {
-               obj->read_domains &= I915_GEM_DOMAIN_CPU;
+               obj->read_domains = I915_GEM_DOMAIN_CPU;
                obj->write_domain = I915_GEM_DOMAIN_CPU;
        }
 
@@ -2963,26 +3080,18 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
  *             drm_agp_chipset_flush
  */
 static void
-i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
+i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
+                                 struct intel_ring_buffer *ring)
 {
        struct drm_device               *dev = obj->dev;
-       drm_i915_private_t              *dev_priv = dev->dev_private;
+       struct drm_i915_private         *dev_priv = dev->dev_private;
        struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
        uint32_t                        invalidate_domains = 0;
        uint32_t                        flush_domains = 0;
        uint32_t                        old_read_domains;
 
-       BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
-       BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
-
        intel_mark_busy(dev, obj);
 
-#if WATCH_BUF
-       DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
-                __func__, obj,
-                obj->read_domains, obj->pending_read_domains,
-                obj->write_domain, obj->pending_write_domain);
-#endif
        /*
         * If the object isn't moving to a new write domain,
         * let the object stay in multiple read domains
@@ -3009,13 +3118,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
         * stale data. That is, any new read domains.
         */
        invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
-       if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
-#if WATCH_BUF
-               DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
-                        __func__, flush_domains, invalidate_domains);
-#endif
+       if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
                i915_gem_clflush_object(obj);
-       }
 
        old_read_domains = obj->read_domains;
 
@@ -3029,21 +3133,12 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
                obj->pending_write_domain = obj->write_domain;
        obj->read_domains = obj->pending_read_domains;
 
-       if (flush_domains & I915_GEM_GPU_DOMAINS) {
-               if (obj_priv->ring == &dev_priv->render_ring)
-                       dev_priv->flush_rings |= FLUSH_RENDER_RING;
-               else if (obj_priv->ring == &dev_priv->bsd_ring)
-                       dev_priv->flush_rings |= FLUSH_BSD_RING;
-       }
-
        dev->invalidate_domains |= invalidate_domains;
        dev->flush_domains |= flush_domains;
-#if WATCH_BUF
-       DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
-                __func__,
-                obj->read_domains, obj->write_domain,
-                dev->invalidate_domains, dev->flush_domains);
-#endif
+       if (flush_domains & I915_GEM_GPU_DOMAINS)
+               dev_priv->mm.flush_rings |= obj_priv->ring->id;
+       if (invalidate_domains & I915_GEM_GPU_DOMAINS)
+               dev_priv->mm.flush_rings |= ring->id;
 
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
@@ -3106,12 +3201,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
        if (offset == 0 && size == obj->size)
                return i915_gem_object_set_to_cpu_domain(obj, 0);
 
-       ret = i915_gem_object_flush_gpu_write_domain(obj);
-       if (ret)
-               return ret;
-
-       /* Wait on any GPU rendering and flushing to occur. */
-       ret = i915_gem_object_wait_rendering(obj);
+       ret = i915_gem_object_flush_gpu_write_domain(obj, false);
        if (ret != 0)
                return ret;
        i915_gem_object_flush_gtt_write_domain(obj);
@@ -3164,66 +3254,42 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
  * Pin an object to the GTT and evaluate the relocations landing in it.
  */
 static int
-i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
-                                struct drm_file *file_priv,
-                                struct drm_i915_gem_exec_object2 *entry,
-                                struct drm_i915_gem_relocation_entry *relocs)
+i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
+                            struct drm_file *file_priv,
+                            struct drm_i915_gem_exec_object2 *entry)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       int i, ret;
-       void __iomem *reloc_page;
-       bool need_fence;
-
-       need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
-                    obj_priv->tiling_mode != I915_TILING_NONE;
-
-       /* Check fence reg constraints and rebind if necessary */
-       if (need_fence &&
-           !i915_gem_object_fence_offset_ok(obj,
-                                            obj_priv->tiling_mode)) {
-               ret = i915_gem_object_unbind(obj);
-               if (ret)
-                       return ret;
-       }
+       struct drm_i915_gem_relocation_entry __user *user_relocs;
+       struct drm_gem_object *target_obj = NULL;
+       uint32_t target_handle = 0;
+       int i, ret = 0;
 
-       /* Choose the GTT offset for our buffer and put it there. */
-       ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
-       if (ret)
-               return ret;
+       user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
+       for (i = 0; i < entry->relocation_count; i++) {
+               struct drm_i915_gem_relocation_entry reloc;
+               uint32_t target_offset;
 
-       /*
-        * Pre-965 chips need a fence register set up in order to
-        * properly handle blits to/from tiled surfaces.
-        */
-       if (need_fence) {
-               ret = i915_gem_object_get_fence_reg(obj);
-               if (ret != 0) {
-                       i915_gem_object_unpin(obj);
-                       return ret;
+               if (__copy_from_user_inatomic(&reloc,
+                                             user_relocs+i,
+                                             sizeof(reloc))) {
+                       ret = -EFAULT;
+                       break;
                }
-       }
 
-       entry->offset = obj_priv->gtt_offset;
+               if (reloc.target_handle != target_handle) {
+                       drm_gem_object_unreference(target_obj);
 
-       /* Apply the relocations, using the GTT aperture to avoid cache
-        * flushing requirements.
-        */
-       for (i = 0; i < entry->relocation_count; i++) {
-               struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
-               struct drm_gem_object *target_obj;
-               struct drm_i915_gem_object *target_obj_priv;
-               uint32_t reloc_val, reloc_offset;
-               uint32_t __iomem *reloc_entry;
-
-               target_obj = drm_gem_object_lookup(obj->dev, file_priv,
-                                                  reloc->target_handle);
-               if (target_obj == NULL) {
-                       i915_gem_object_unpin(obj);
-                       return -ENOENT;
+                       target_obj = drm_gem_object_lookup(dev, file_priv,
+                                                          reloc.target_handle);
+                       if (target_obj == NULL) {
+                               ret = -ENOENT;
+                               break;
+                       }
+
+                       target_handle = reloc.target_handle;
                }
-               target_obj_priv = to_intel_bo(target_obj);
+               target_offset = to_intel_bo(target_obj)->gtt_offset;
 
 #if WATCH_RELOC
                DRM_INFO("%s: obj %p offset %08x target %d "
@@ -3231,268 +3297,266 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
                         "presumed %08x delta %08x\n",
                         __func__,
                         obj,
-                        (int) reloc->offset,
-                        (int) reloc->target_handle,
-                        (int) reloc->read_domains,
-                        (int) reloc->write_domain,
-                        (int) target_obj_priv->gtt_offset,
-                        (int) reloc->presumed_offset,
-                        reloc->delta);
+                        (int) reloc.offset,
+                        (int) reloc.target_handle,
+                        (int) reloc.read_domains,
+                        (int) reloc.write_domain,
+                        (int) target_offset,
+                        (int) reloc.presumed_offset,
+                        reloc.delta);
 #endif
 
                /* The target buffer should have appeared before us in the
                 * exec_object list, so it should have a GTT space bound by now.
                 */
-               if (target_obj_priv->gtt_space == NULL) {
+               if (target_offset == 0) {
                        DRM_ERROR("No GTT space found for object %d\n",
-                                 reloc->target_handle);
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
+                                 reloc.target_handle);
+                       ret = -EINVAL;
+                       break;
                }
 
                /* Validate that the target is in a valid r/w GPU domain */
-               if (reloc->write_domain & (reloc->write_domain - 1)) {
+               if (reloc.write_domain & (reloc.write_domain - 1)) {
                        DRM_ERROR("reloc with multiple write domains: "
                                  "obj %p target %d offset %d "
                                  "read %08x write %08x",
-                                 obj, reloc->target_handle,
-                                 (int) reloc->offset,
-                                 reloc->read_domains,
-                                 reloc->write_domain);
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
+                                 obj, reloc.target_handle,
+                                 (int) reloc.offset,
+                                 reloc.read_domains,
+                                 reloc.write_domain);
+                       ret = -EINVAL;
+                       break;
                }
-               if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
-                   reloc->read_domains & I915_GEM_DOMAIN_CPU) {
+               if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
+                   reloc.read_domains & I915_GEM_DOMAIN_CPU) {
                        DRM_ERROR("reloc with read/write CPU domains: "
                                  "obj %p target %d offset %d "
                                  "read %08x write %08x",
-                                 obj, reloc->target_handle,
-                                 (int) reloc->offset,
-                                 reloc->read_domains,
-                                 reloc->write_domain);
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
+                                 obj, reloc.target_handle,
+                                 (int) reloc.offset,
+                                 reloc.read_domains,
+                                 reloc.write_domain);
+                       ret = -EINVAL;
+                       break;
                }
-               if (reloc->write_domain && target_obj->pending_write_domain &&
-                   reloc->write_domain != target_obj->pending_write_domain) {
+               if (reloc.write_domain && target_obj->pending_write_domain &&
+                   reloc.write_domain != target_obj->pending_write_domain) {
                        DRM_ERROR("Write domain conflict: "
                                  "obj %p target %d offset %d "
                                  "new %08x old %08x\n",
-                                 obj, reloc->target_handle,
-                                 (int) reloc->offset,
-                                 reloc->write_domain,
+                                 obj, reloc.target_handle,
+                                 (int) reloc.offset,
+                                 reloc.write_domain,
                                  target_obj->pending_write_domain);
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       break;
                }
 
-               target_obj->pending_read_domains |= reloc->read_domains;
-               target_obj->pending_write_domain |= reloc->write_domain;
+               target_obj->pending_read_domains |= reloc.read_domains;
+               target_obj->pending_write_domain |= reloc.write_domain;
 
                /* If the relocation already has the right value in it, no
                 * more work needs to be done.
                 */
-               if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
-                       drm_gem_object_unreference(target_obj);
+               if (target_offset == reloc.presumed_offset)
                        continue;
-               }
 
                /* Check that the relocation address is valid... */
-               if (reloc->offset > obj->size - 4) {
+               if (reloc.offset > obj->base.size - 4) {
                        DRM_ERROR("Relocation beyond object bounds: "
                                  "obj %p target %d offset %d size %d.\n",
-                                 obj, reloc->target_handle,
-                                 (int) reloc->offset, (int) obj->size);
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
+                                 obj, reloc.target_handle,
+                                 (int) reloc.offset, (int) obj->base.size);
+                       ret = -EINVAL;
+                       break;
                }
-               if (reloc->offset & 3) {
+               if (reloc.offset & 3) {
                        DRM_ERROR("Relocation not 4-byte aligned: "
                                  "obj %p target %d offset %d.\n",
-                                 obj, reloc->target_handle,
-                                 (int) reloc->offset);
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
+                                 obj, reloc.target_handle,
+                                 (int) reloc.offset);
+                       ret = -EINVAL;
+                       break;
                }
 
                /* and points to somewhere within the target object. */
-               if (reloc->delta >= target_obj->size) {
+               if (reloc.delta >= target_obj->size) {
                        DRM_ERROR("Relocation beyond target object bounds: "
                                  "obj %p target %d delta %d size %d.\n",
-                                 obj, reloc->target_handle,
-                                 (int) reloc->delta, (int) target_obj->size);
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
-               }
-
-               ret = i915_gem_object_set_to_gtt_domain(obj, 1);
-               if (ret != 0) {
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
+                                 obj, reloc.target_handle,
+                                 (int) reloc.delta, (int) target_obj->size);
+                       ret = -EINVAL;
+                       break;
                }
 
-               /* Map the page containing the relocation we're going to
-                * perform.
-                */
-               reloc_offset = obj_priv->gtt_offset + reloc->offset;
-               reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
-                                                     (reloc_offset &
-                                                      ~(PAGE_SIZE - 1)),
-                                                     KM_USER0);
-               reloc_entry = (uint32_t __iomem *)(reloc_page +
-                                                  (reloc_offset & (PAGE_SIZE - 1)));
-               reloc_val = target_obj_priv->gtt_offset + reloc->delta;
-
-#if WATCH_BUF
-               DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
-                         obj, (unsigned int) reloc->offset,
-                         readl(reloc_entry), reloc_val);
-#endif
-               writel(reloc_val, reloc_entry);
-               io_mapping_unmap_atomic(reloc_page, KM_USER0);
-
-               /* The updated presumed offset for this entry will be
-                * copied back out to the user.
-                */
-               reloc->presumed_offset = target_obj_priv->gtt_offset;
-
-               drm_gem_object_unreference(target_obj);
-       }
-
-#if WATCH_BUF
-       if (0)
-               i915_gem_dump_object(obj, 128, __func__, ~0);
-#endif
-       return 0;
-}
-
-/* Throttle our rendering by waiting until the ring has completed our requests
- * emitted over 20 msec ago.
- *
- * Note that if we were to use the current jiffies each time around the loop,
- * we wouldn't escape the function with any frames outstanding if the time to
- * render a frame was over 20ms.
- *
- * This should get us reasonable parallelism between CPU and GPU but also
- * relatively low latency when blocking on a particular request to finish.
- */
-static int
-i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
-{
-       struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
-       int ret = 0;
-       unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
+               reloc.delta += target_offset;
+               if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
+                       uint32_t page_offset = reloc.offset & ~PAGE_MASK;
+                       char *vaddr;
 
-       mutex_lock(&dev->struct_mutex);
-       while (!list_empty(&i915_file_priv->mm.request_list)) {
-               struct drm_i915_gem_request *request;
+                       vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]);
+                       *(uint32_t *)(vaddr + page_offset) = reloc.delta;
+                       kunmap_atomic(vaddr);
+               } else {
+                       uint32_t __iomem *reloc_entry;
+                       void __iomem *reloc_page;
 
-               request = list_first_entry(&i915_file_priv->mm.request_list,
-                                          struct drm_i915_gem_request,
-                                          client_list);
+                       ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
+                       if (ret)
+                               break;
 
-               if (time_after_eq(request->emitted_jiffies, recent_enough))
-                       break;
+                       /* Map the page containing the relocation we're going to perform.  */
+                       reloc.offset += obj->gtt_offset;
+                       reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+                                                             reloc.offset & PAGE_MASK);
+                       reloc_entry = (uint32_t __iomem *)
+                               (reloc_page + (reloc.offset & ~PAGE_MASK));
+                       iowrite32(reloc.delta, reloc_entry);
+                       io_mapping_unmap_atomic(reloc_page);
+               }
 
-               ret = i915_wait_request(dev, request->seqno, request->ring);
-               if (ret != 0)
-                       break;
+               /* and update the user's relocation entry */
+               reloc.presumed_offset = target_offset;
+               if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
+                                             &reloc.presumed_offset,
+                                             sizeof(reloc.presumed_offset))) {
+                   ret = -EFAULT;
+                   break;
+               }
        }
-       mutex_unlock(&dev->struct_mutex);
 
+       drm_gem_object_unreference(target_obj);
        return ret;
 }
 
 static int
-i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
-                             uint32_t buffer_count,
-                             struct drm_i915_gem_relocation_entry **relocs)
+i915_gem_execbuffer_pin(struct drm_device *dev,
+                       struct drm_file *file,
+                       struct drm_gem_object **object_list,
+                       struct drm_i915_gem_exec_object2 *exec_list,
+                       int count)
 {
-       uint32_t reloc_count = 0, reloc_index = 0, i;
-       int ret;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret, i, retry;
 
-       *relocs = NULL;
-       for (i = 0; i < buffer_count; i++) {
-               if (reloc_count + exec_list[i].relocation_count < reloc_count)
-                       return -EINVAL;
-               reloc_count += exec_list[i].relocation_count;
-       }
+       /* attempt to pin all of the buffers into the GTT */
+       for (retry = 0; retry < 2; retry++) {
+               ret = 0;
+               for (i = 0; i < count; i++) {
+                       struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
+                       struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
+                       bool need_fence =
+                               entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+                               obj->tiling_mode != I915_TILING_NONE;
+
+                       /* Check fence reg constraints and rebind if necessary */
+                       if (need_fence &&
+                           !i915_gem_object_fence_offset_ok(&obj->base,
+                                                            obj->tiling_mode)) {
+                               ret = i915_gem_object_unbind(&obj->base);
+                               if (ret)
+                                       break;
+                       }
 
-       *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
-       if (*relocs == NULL) {
-               DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
-               return -ENOMEM;
-       }
+                       ret = i915_gem_object_pin(&obj->base, entry->alignment);
+                       if (ret)
+                               break;
 
-       for (i = 0; i < buffer_count; i++) {
-               struct drm_i915_gem_relocation_entry __user *user_relocs;
+                       /*
+                        * Pre-965 chips need a fence register set up in order
+                        * to properly handle blits to/from tiled surfaces.
+                        */
+                       if (need_fence) {
+                               ret = i915_gem_object_get_fence_reg(&obj->base, true);
+                               if (ret) {
+                                       i915_gem_object_unpin(&obj->base);
+                                       break;
+                               }
 
-               user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+                               dev_priv->fence_regs[obj->fence_reg].gpu = true;
+                       }
 
-               ret = copy_from_user(&(*relocs)[reloc_index],
-                                    user_relocs,
-                                    exec_list[i].relocation_count *
-                                    sizeof(**relocs));
-               if (ret != 0) {
-                       drm_free_large(*relocs);
-                       *relocs = NULL;
-                       return -EFAULT;
+                       entry->offset = obj->gtt_offset;
                }
 
-               reloc_index += exec_list[i].relocation_count;
+               while (i--)
+                       i915_gem_object_unpin(object_list[i]);
+
+               if (ret == 0)
+                       break;
+
+               if (ret != -ENOSPC || retry)
+                       return ret;
+
+               ret = i915_gem_evict_everything(dev);
+               if (ret)
+                       return ret;
        }
 
        return 0;
 }
 
+/* Throttle our rendering by waiting until the ring has completed our requests
+ * emitted over 20 msec ago.
+ *
+ * Note that if we were to use the current jiffies each time around the loop,
+ * we wouldn't escape the function with any frames outstanding if the time to
+ * render a frame was over 20ms.
+ *
+ * This should get us reasonable parallelism between CPU and GPU but also
+ * relatively low latency when blocking on a particular request to finish.
+ */
 static int
-i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
-                           uint32_t buffer_count,
-                           struct drm_i915_gem_relocation_entry *relocs)
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 {
-       uint32_t reloc_count = 0, i;
-       int ret = 0;
-
-       if (relocs == NULL)
-           return 0;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_file_private *file_priv = file->driver_priv;
+       unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
+       struct drm_i915_gem_request *request;
+       struct intel_ring_buffer *ring = NULL;
+       u32 seqno = 0;
+       int ret;
 
-       for (i = 0; i < buffer_count; i++) {
-               struct drm_i915_gem_relocation_entry __user *user_relocs;
-               int unwritten;
+       spin_lock(&file_priv->mm.lock);
+       list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
+               if (time_after_eq(request->emitted_jiffies, recent_enough))
+                       break;
 
-               user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+               ring = request->ring;
+               seqno = request->seqno;
+       }
+       spin_unlock(&file_priv->mm.lock);
 
-               unwritten = copy_to_user(user_relocs,
-                                        &relocs[reloc_count],
-                                        exec_list[i].relocation_count *
-                                        sizeof(*relocs));
+       if (seqno == 0)
+               return 0;
 
-               if (unwritten) {
-                       ret = -EFAULT;
-                       goto err;
-               }
+       ret = 0;
+       if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+               /* And wait for the seqno passing without holding any locks and
+                * causing extra latency for others. This is safe as the irq
+                * generation is designed to be run atomically and so is
+                * lockless.
+                */
+               ring->user_irq_get(dev, ring);
+               ret = wait_event_interruptible(ring->irq_queue,
+                                              i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
+                                              || atomic_read(&dev_priv->mm.wedged));
+               ring->user_irq_put(dev, ring);
 
-               reloc_count += exec_list[i].relocation_count;
+               if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+                       ret = -EIO;
        }
 
-err:
-       drm_free_large(relocs);
+       if (ret == 0)
+               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
        return ret;
 }
 
 static int
-i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
-                          uint64_t exec_offset)
+i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
+                         uint64_t exec_offset)
 {
        uint32_t exec_start, exec_len;
 
@@ -3509,44 +3573,32 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
 }
 
 static int
-i915_gem_wait_for_pending_flip(struct drm_device *dev,
-                              struct drm_gem_object **object_list,
-                              int count)
+validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+                  int count)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv;
-       DEFINE_WAIT(wait);
-       int i, ret = 0;
+       int i;
 
-       for (;;) {
-               prepare_to_wait(&dev_priv->pending_flip_queue,
-                               &wait, TASK_INTERRUPTIBLE);
-               for (i = 0; i < count; i++) {
-                       obj_priv = to_intel_bo(object_list[i]);
-                       if (atomic_read(&obj_priv->pending_flip) > 0)
-                               break;
-               }
-               if (i == count)
-                       break;
+       for (i = 0; i < count; i++) {
+               char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
+               size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry);
 
-               if (!signal_pending(current)) {
-                       mutex_unlock(&dev->struct_mutex);
-                       schedule();
-                       mutex_lock(&dev->struct_mutex);
-                       continue;
-               }
-               ret = -ERESTARTSYS;
-               break;
+               if (!access_ok(VERIFY_READ, ptr, length))
+                       return -EFAULT;
+
+               /* we may also need to update the presumed offsets */
+               if (!access_ok(VERIFY_WRITE, ptr, length))
+                       return -EFAULT;
+
+               if (fault_in_pages_readable(ptr, length))
+                       return -EFAULT;
        }
-       finish_wait(&dev_priv->pending_flip_queue, &wait);
 
-       return ret;
+       return 0;
 }
 
-
-int
+static int
 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
-                      struct drm_file *file_priv,
+                      struct drm_file *file,
                       struct drm_i915_gem_execbuffer2 *args,
                       struct drm_i915_gem_exec_object2 *exec_list)
 {
@@ -3555,26 +3607,47 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        struct drm_gem_object *batch_obj;
        struct drm_i915_gem_object *obj_priv;
        struct drm_clip_rect *cliprects = NULL;
-       struct drm_i915_gem_relocation_entry *relocs = NULL;
-       int ret = 0, ret2, i, pinned = 0;
+       struct drm_i915_gem_request *request = NULL;
+       int ret, i, flips;
        uint64_t exec_offset;
-       uint32_t seqno, flush_domains, reloc_index;
-       int pin_tries, flips;
 
        struct intel_ring_buffer *ring = NULL;
 
+       ret = i915_gem_check_is_wedged(dev);
+       if (ret)
+               return ret;
+
+       ret = validate_exec_list(exec_list, args->buffer_count);
+       if (ret)
+               return ret;
+
 #if WATCH_EXEC
        DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
                  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
 #endif
-       if (args->flags & I915_EXEC_BSD) {
+       switch (args->flags & I915_EXEC_RING_MASK) {
+       case I915_EXEC_DEFAULT:
+       case I915_EXEC_RENDER:
+               ring = &dev_priv->render_ring;
+               break;
+       case I915_EXEC_BSD:
                if (!HAS_BSD(dev)) {
-                       DRM_ERROR("execbuf with wrong flag\n");
+                       DRM_ERROR("execbuf with invalid ring (BSD)\n");
                        return -EINVAL;
                }
                ring = &dev_priv->bsd_ring;
-       } else {
-               ring = &dev_priv->render_ring;
+               break;
+       case I915_EXEC_BLT:
+               if (!HAS_BLT(dev)) {
+                       DRM_ERROR("execbuf with invalid ring (BLT)\n");
+                       return -EINVAL;
+               }
+               ring = &dev_priv->blt_ring;
+               break;
+       default:
+               DRM_ERROR("execbuf with unknown ring: %d\n",
+                         (int)(args->flags & I915_EXEC_RING_MASK));
+               return -EINVAL;
        }
 
        if (args->buffer_count < 1) {
@@ -3609,20 +3682,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                }
        }
 
-       ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
-                                           &relocs);
-       if (ret != 0)
+       request = kzalloc(sizeof(*request), GFP_KERNEL);
+       if (request == NULL) {
+               ret = -ENOMEM;
                goto pre_mutex_err;
+       }
 
-       mutex_lock(&dev->struct_mutex);
-
-       i915_verify_inactive(dev, __FILE__, __LINE__);
-
-       if (atomic_read(&dev_priv->mm.wedged)) {
-               mutex_unlock(&dev->struct_mutex);
-               ret = -EIO;
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
                goto pre_mutex_err;
-       }
 
        if (dev_priv->mm.suspended) {
                mutex_unlock(&dev->struct_mutex);
@@ -3631,9 +3699,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        }
 
        /* Look up object handles */
-       flips = 0;
        for (i = 0; i < args->buffer_count; i++) {
-               object_list[i] = drm_gem_object_lookup(dev, file_priv,
+               object_list[i] = drm_gem_object_lookup(dev, file,
                                                       exec_list[i].handle);
                if (object_list[i] == NULL) {
                        DRM_ERROR("Invalid object handle %d at index %d\n",
@@ -3654,75 +3721,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                        goto err;
                }
                obj_priv->in_execbuffer = true;
-               flips += atomic_read(&obj_priv->pending_flip);
-       }
-
-       if (flips > 0) {
-               ret = i915_gem_wait_for_pending_flip(dev, object_list,
-                                                    args->buffer_count);
-               if (ret)
-                       goto err;
        }
 
-       /* Pin and relocate */
-       for (pin_tries = 0; ; pin_tries++) {
-               ret = 0;
-               reloc_index = 0;
-
-               for (i = 0; i < args->buffer_count; i++) {
-                       object_list[i]->pending_read_domains = 0;
-                       object_list[i]->pending_write_domain = 0;
-                       ret = i915_gem_object_pin_and_relocate(object_list[i],
-                                                              file_priv,
-                                                              &exec_list[i],
-                                                              &relocs[reloc_index]);
-                       if (ret)
-                               break;
-                       pinned = i + 1;
-                       reloc_index += exec_list[i].relocation_count;
-               }
-               /* success */
-               if (ret == 0)
-                       break;
-
-               /* error other than GTT full, or we've already tried again */
-               if (ret != -ENOSPC || pin_tries >= 1) {
-                       if (ret != -ERESTARTSYS) {
-                               unsigned long long total_size = 0;
-                               int num_fences = 0;
-                               for (i = 0; i < args->buffer_count; i++) {
-                                       obj_priv = to_intel_bo(object_list[i]);
-
-                                       total_size += object_list[i]->size;
-                                       num_fences +=
-                                               exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
-                                               obj_priv->tiling_mode != I915_TILING_NONE;
-                               }
-                               DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
-                                         pinned+1, args->buffer_count,
-                                         total_size, num_fences,
-                                         ret);
-                               DRM_ERROR("%d objects [%d pinned], "
-                                         "%d object bytes [%d pinned], "
-                                         "%d/%d gtt bytes\n",
-                                         atomic_read(&dev->object_count),
-                                         atomic_read(&dev->pin_count),
-                                         atomic_read(&dev->object_memory),
-                                         atomic_read(&dev->pin_memory),
-                                         atomic_read(&dev->gtt_memory),
-                                         dev->gtt_total);
-                       }
-                       goto err;
-               }
-
-               /* unpin all of our buffers */
-               for (i = 0; i < pinned; i++)
-                       i915_gem_object_unpin(object_list[i]);
-               pinned = 0;
+       /* Move the objects en-masse into the GTT, evicting if necessary. */
+       ret = i915_gem_execbuffer_pin(dev, file,
+                                     object_list, exec_list,
+                                     args->buffer_count);
+       if (ret)
+               goto err;
 
-               /* evict everyone we can from the aperture */
-               ret = i915_gem_evict_everything(dev);
-               if (ret && ret != -ENOSPC)
+       /* The objects are in their final locations, apply the relocations. */
+       for (i = 0; i < args->buffer_count; i++) {
+               struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
+               obj->base.pending_read_domains = 0;
+               obj->base.pending_write_domain = 0;
+               ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]);
+               if (ret)
                        goto err;
        }
 
@@ -3735,33 +3749,29 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        }
        batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
 
-       /* Sanity check the batch buffer, prior to moving objects */
-       exec_offset = exec_list[args->buffer_count - 1].offset;
-       ret = i915_gem_check_execbuffer (args, exec_offset);
+       /* Sanity check the batch buffer */
+       exec_offset = to_intel_bo(batch_obj)->gtt_offset;
+       ret = i915_gem_check_execbuffer(args, exec_offset);
        if (ret != 0) {
                DRM_ERROR("execbuf with invalid offset/length\n");
                goto err;
        }
 
-       i915_verify_inactive(dev, __FILE__, __LINE__);
-
        /* Zero the global flush/invalidate flags. These
         * will be modified as new domains are computed
         * for each object
         */
        dev->invalidate_domains = 0;
        dev->flush_domains = 0;
-       dev_priv->flush_rings = 0;
+       dev_priv->mm.flush_rings = 0;
 
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
 
                /* Compute new gpu domains and update invalidate/flush */
-               i915_gem_object_set_to_gpu_domain(obj);
+               i915_gem_object_set_to_gpu_domain(obj, ring);
        }
 
-       i915_verify_inactive(dev, __FILE__, __LINE__);
-
        if (dev->invalidate_domains | dev->flush_domains) {
 #if WATCH_EXEC
                DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
@@ -3769,38 +3779,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                         dev->invalidate_domains,
                         dev->flush_domains);
 #endif
-               i915_gem_flush(dev,
+               i915_gem_flush(dev, file,
                               dev->invalidate_domains,
-                              dev->flush_domains);
-               if (dev_priv->flush_rings & FLUSH_RENDER_RING)
-                       (void)i915_add_request(dev, file_priv,
-                                              dev->flush_domains,
-                                              &dev_priv->render_ring);
-               if (dev_priv->flush_rings & FLUSH_BSD_RING)
-                       (void)i915_add_request(dev, file_priv,
-                                              dev->flush_domains,
-                                              &dev_priv->bsd_ring);
+                              dev->flush_domains,
+                              dev_priv->mm.flush_rings);
        }
 
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
-               struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
                uint32_t old_write_domain = obj->write_domain;
-
                obj->write_domain = obj->pending_write_domain;
-               if (obj->write_domain)
-                       list_move_tail(&obj_priv->gpu_write_list,
-                                      &dev_priv->mm.gpu_write_list);
-               else
-                       list_del_init(&obj_priv->gpu_write_list);
-
                trace_i915_gem_object_change_domain(obj,
                                                    obj->read_domains,
                                                    old_write_domain);
        }
 
-       i915_verify_inactive(dev, __FILE__, __LINE__);
-
 #if WATCH_COHERENCY
        for (i = 0; i < args->buffer_count; i++) {
                i915_gem_object_check_coherency(object_list[i],
@@ -3815,9 +3808,38 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                              ~0);
 #endif
 
+       /* Check for any pending flips. As we only maintain a flip queue depth
+        * of 1, we can simply insert a WAIT for the next display flip prior
+        * to executing the batch and avoid stalling the CPU.
+        */
+       flips = 0;
+       for (i = 0; i < args->buffer_count; i++) {
+               if (object_list[i]->write_domain)
+                       flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
+       }
+       if (flips) {
+               int plane, flip_mask;
+
+               for (plane = 0; flips >> plane; plane++) {
+                       if (((flips >> plane) & 1) == 0)
+                               continue;
+
+                       if (plane)
+                               flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+                       else
+                               flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
+                       intel_ring_begin(dev, ring, 2);
+                       intel_ring_emit(dev, ring,
+                                       MI_WAIT_FOR_EVENT | flip_mask);
+                       intel_ring_emit(dev, ring, MI_NOOP);
+                       intel_ring_advance(dev, ring);
+               }
+       }
+
        /* Exec the batchbuffer */
        ret = ring->dispatch_gem_execbuffer(dev, ring, args,
-                       cliprects, exec_offset);
+                                           cliprects, exec_offset);
        if (ret) {
                DRM_ERROR("dispatch failed %d\n", ret);
                goto err;
@@ -3827,38 +3849,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
         * Ensure that the commands in the batch buffer are
         * finished before the interrupt fires
         */
-       flush_domains = i915_retire_commands(dev, ring);
-
-       i915_verify_inactive(dev, __FILE__, __LINE__);
+       i915_retire_commands(dev, ring);
 
-       /*
-        * Get a seqno representing the execution of the current buffer,
-        * which we can wait on.  We would like to mitigate these interrupts,
-        * likely by only creating seqnos occasionally (so that we have
-        * *some* interrupts representing completion of buffers that we can
-        * wait on when trying to clear up gtt space).
-        */
-       seqno = i915_add_request(dev, file_priv, flush_domains, ring);
-       BUG_ON(seqno == 0);
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
-               obj_priv = to_intel_bo(obj);
 
-               i915_gem_object_move_to_active(obj, seqno, ring);
-#if WATCH_LRU
-               DRM_INFO("%s: move to exec list %p\n", __func__, obj);
-#endif
+               i915_gem_object_move_to_active(obj, ring);
+               if (obj->write_domain)
+                       list_move_tail(&to_intel_bo(obj)->gpu_write_list,
+                                      &ring->gpu_write_list);
        }
-#if WATCH_LRU
-       i915_dump_lru(dev, __func__);
-#endif
 
-       i915_verify_inactive(dev, __FILE__, __LINE__);
+       i915_add_request(dev, file, request, ring);
+       request = NULL;
 
 err:
-       for (i = 0; i < pinned; i++)
-               i915_gem_object_unpin(object_list[i]);
-
        for (i = 0; i < args->buffer_count; i++) {
                if (object_list[i]) {
                        obj_priv = to_intel_bo(object_list[i]);
@@ -3870,22 +3875,9 @@ err:
        mutex_unlock(&dev->struct_mutex);
 
 pre_mutex_err:
-       /* Copy the updated relocations out regardless of current error
-        * state.  Failure to update the relocs would mean that the next
-        * time userland calls execbuf, it would do so with presumed offset
-        * state that didn't match the actual object state.
-        */
-       ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
-                                          relocs);
-       if (ret2 != 0) {
-               DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
-
-               if (ret == 0)
-                       ret = ret2;
-       }
-
        drm_free_large(object_list);
        kfree(cliprects);
+       kfree(request);
 
        return ret;
 }
@@ -3942,7 +3934,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
                exec2_list[i].alignment = exec_list[i].alignment;
                exec2_list[i].offset = exec_list[i].offset;
-               if (!IS_I965G(dev))
+               if (INTEL_INFO(dev)->gen < 4)
                        exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
                else
                        exec2_list[i].flags = 0;
@@ -4039,12 +4031,12 @@ int
 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
 {
        struct drm_device *dev = obj->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret;
 
        BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
-
-       i915_verify_inactive(dev, __FILE__, __LINE__);
+       WARN_ON(i915_verify_lists(dev));
 
        if (obj_priv->gtt_space != NULL) {
                if (alignment == 0)
@@ -4072,14 +4064,13 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
         * remove it from the inactive list
         */
        if (obj_priv->pin_count == 1) {
-               atomic_inc(&dev->pin_count);
-               atomic_add(obj->size, &dev->pin_memory);
-               if (!obj_priv->active &&
-                   (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
-                       list_del_init(&obj_priv->list);
+               i915_gem_info_add_pin(dev_priv, obj->size);
+               if (!obj_priv->active)
+                       list_move_tail(&obj_priv->mm_list,
+                                      &dev_priv->mm.pinned_list);
        }
-       i915_verify_inactive(dev, __FILE__, __LINE__);
 
+       WARN_ON(i915_verify_lists(dev));
        return 0;
 }
 
@@ -4090,7 +4081,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
-       i915_verify_inactive(dev, __FILE__, __LINE__);
+       WARN_ON(i915_verify_lists(dev));
        obj_priv->pin_count--;
        BUG_ON(obj_priv->pin_count < 0);
        BUG_ON(obj_priv->gtt_space == NULL);
@@ -4100,14 +4091,12 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
         * the inactive list
         */
        if (obj_priv->pin_count == 0) {
-               if (!obj_priv->active &&
-                   (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
-                       list_move_tail(&obj_priv->list,
+               if (!obj_priv->active)
+                       list_move_tail(&obj_priv->mm_list,
                                       &dev_priv->mm.inactive_list);
-               atomic_dec(&dev->pin_count);
-               atomic_sub(obj->size, &dev->pin_memory);
+               i915_gem_info_remove_pin(dev_priv, obj->size);
        }
-       i915_verify_inactive(dev, __FILE__, __LINE__);
+       WARN_ON(i915_verify_lists(dev));
 }
 
 int
@@ -4119,41 +4108,36 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_object *obj_priv;
        int ret;
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
 
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
-               DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
-                         args->handle);
-               mutex_unlock(&dev->struct_mutex);
-               return -ENOENT;
+               ret = -ENOENT;
+               goto unlock;
        }
        obj_priv = to_intel_bo(obj);
 
        if (obj_priv->madv != I915_MADV_WILLNEED) {
                DRM_ERROR("Attempting to pin a purgeable buffer\n");
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
                DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        obj_priv->user_pin_count++;
        obj_priv->pin_filp = file_priv;
        if (obj_priv->user_pin_count == 1) {
                ret = i915_gem_object_pin(obj, args->alignment);
-               if (ret != 0) {
-                       drm_gem_object_unreference(obj);
-                       mutex_unlock(&dev->struct_mutex);
-                       return ret;
-               }
+               if (ret)
+                       goto out;
        }
 
        /* XXX - flush the CPU caches for pinned objects
@@ -4161,10 +4145,11 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
         */
        i915_gem_object_flush_cpu_write_domain(obj);
        args->offset = obj_priv->gtt_offset;
+out:
        drm_gem_object_unreference(obj);
+unlock:
        mutex_unlock(&dev->struct_mutex);
-
-       return 0;
+       return ret;
 }
 
 int
@@ -4174,24 +4159,24 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_pin *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
+       int ret;
 
-       mutex_lock(&dev->struct_mutex);
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
 
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
-               DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
-                         args->handle);
-               mutex_unlock(&dev->struct_mutex);
-               return -ENOENT;
+               ret = -ENOENT;
+               goto unlock;
        }
-
        obj_priv = to_intel_bo(obj);
+
        if (obj_priv->pin_filp != file_priv) {
                DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
        obj_priv->user_pin_count--;
        if (obj_priv->user_pin_count == 0) {
@@ -4199,9 +4184,11 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
                i915_gem_object_unpin(obj);
        }
 
+out:
        drm_gem_object_unreference(obj);
+unlock:
        mutex_unlock(&dev->struct_mutex);
-       return 0;
+       return ret;
 }
 
 int
@@ -4211,22 +4198,24 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_busy *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
+       int ret;
+
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
 
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
-               DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
-                         args->handle);
-               return -ENOENT;
+               ret = -ENOENT;
+               goto unlock;
        }
-
-       mutex_lock(&dev->struct_mutex);
+       obj_priv = to_intel_bo(obj);
 
        /* Count all active objects as busy, even if they are currently not used
         * by the gpu. Users of this interface expect objects to eventually
         * become non-busy without any further actions, therefore emit any
         * necessary flushes here.
         */
-       obj_priv = to_intel_bo(obj);
        args->busy = obj_priv->active;
        if (args->busy) {
                /* Unconditionally flush objects, even when the gpu still uses this
@@ -4234,10 +4223,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                 * use this buffer rather sooner than later, so issuing the required
                 * flush earlier is beneficial.
                 */
-               if (obj->write_domain) {
-                       i915_gem_flush(dev, 0, obj->write_domain);
-                       (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
-               }
+               if (obj->write_domain & I915_GEM_GPU_DOMAINS)
+                       i915_gem_flush_ring(dev, file_priv,
+                                           obj_priv->ring,
+                                           0, obj->write_domain);
 
                /* Update the active list for the hardware's current position.
                 * Otherwise this only updates on a delayed timer or when irqs
@@ -4250,8 +4239,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        }
 
        drm_gem_object_unreference(obj);
+unlock:
        mutex_unlock(&dev->struct_mutex);
-       return 0;
+       return ret;
 }
 
 int
@@ -4268,6 +4258,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_madvise *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
+       int ret;
 
        switch (args->madv) {
        case I915_MADV_DONTNEED:
@@ -4277,22 +4268,20 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
            return -EINVAL;
        }
 
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
+
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
-               DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
-                         args->handle);
-               return -ENOENT;
+               ret = -ENOENT;
+               goto unlock;
        }
-
-       mutex_lock(&dev->struct_mutex);
        obj_priv = to_intel_bo(obj);
 
        if (obj_priv->pin_count) {
-               drm_gem_object_unreference(obj);
-               mutex_unlock(&dev->struct_mutex);
-
-               DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        if (obj_priv->madv != __I915_MADV_PURGED)
@@ -4305,15 +4294,17 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 
        args->retained = obj_priv->madv != __I915_MADV_PURGED;
 
+out:
        drm_gem_object_unreference(obj);
+unlock:
        mutex_unlock(&dev->struct_mutex);
-
-       return 0;
+       return ret;
 }
 
 struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
                                              size_t size)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
 
        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
@@ -4325,18 +4316,19 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
                return NULL;
        }
 
+       i915_gem_info_add_obj(dev_priv, size);
+
        obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        obj->base.read_domains = I915_GEM_DOMAIN_CPU;
 
        obj->agp_type = AGP_USER_MEMORY;
        obj->base.driver_private = NULL;
        obj->fence_reg = I915_FENCE_REG_NONE;
-       INIT_LIST_HEAD(&obj->list);
+       INIT_LIST_HEAD(&obj->mm_list);
+       INIT_LIST_HEAD(&obj->ring_list);
        INIT_LIST_HEAD(&obj->gpu_write_list);
        obj->madv = I915_MADV_WILLNEED;
 
-       trace_i915_gem_object_create(&obj->base);
-
        return &obj->base;
 }
 
@@ -4356,7 +4348,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj)
 
        ret = i915_gem_object_unbind(obj);
        if (ret == -ERESTARTSYS) {
-               list_move(&obj_priv->list,
+               list_move(&obj_priv->mm_list,
                          &dev_priv->mm.deferred_free_list);
                return;
        }
@@ -4365,6 +4357,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj)
                i915_gem_free_mmap_offset(obj);
 
        drm_gem_object_release(obj);
+       i915_gem_info_remove_obj(dev_priv, obj->size);
 
        kfree(obj_priv->page_cpu_valid);
        kfree(obj_priv->bit_17);
@@ -4395,10 +4388,7 @@ i915_gem_idle(struct drm_device *dev)
 
        mutex_lock(&dev->struct_mutex);
 
-       if (dev_priv->mm.suspended ||
-                       (dev_priv->render_ring.gem_object == NULL) ||
-                       (HAS_BSD(dev) &&
-                        dev_priv->bsd_ring.gem_object == NULL)) {
+       if (dev_priv->mm.suspended) {
                mutex_unlock(&dev->struct_mutex);
                return 0;
        }
@@ -4423,7 +4413,7 @@ i915_gem_idle(struct drm_device *dev)
         * And not confound mm.suspended!
         */
        dev_priv->mm.suspended = 1;
-       del_timer(&dev_priv->hangcheck_timer);
+       del_timer_sync(&dev_priv->hangcheck_timer);
 
        i915_kernel_lost_context(dev);
        i915_gem_cleanup_ringbuffer(dev);
@@ -4503,36 +4493,34 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
 
-       dev_priv->render_ring = render_ring;
-
-       if (!I915_NEED_GFX_HWS(dev)) {
-               dev_priv->render_ring.status_page.page_addr
-                       = dev_priv->status_page_dmah->vaddr;
-               memset(dev_priv->render_ring.status_page.page_addr,
-                               0, PAGE_SIZE);
-       }
-
        if (HAS_PIPE_CONTROL(dev)) {
                ret = i915_gem_init_pipe_control(dev);
                if (ret)
                        return ret;
        }
 
-       ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
+       ret = intel_init_render_ring_buffer(dev);
        if (ret)
                goto cleanup_pipe_control;
 
        if (HAS_BSD(dev)) {
-               dev_priv->bsd_ring = bsd_ring;
-               ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+               ret = intel_init_bsd_ring_buffer(dev);
                if (ret)
                        goto cleanup_render_ring;
        }
 
+       if (HAS_BLT(dev)) {
+               ret = intel_init_blt_ring_buffer(dev);
+               if (ret)
+                       goto cleanup_bsd_ring;
+       }
+
        dev_priv->next_seqno = 1;
 
        return 0;
 
+cleanup_bsd_ring:
+       intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
 cleanup_render_ring:
        intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
 cleanup_pipe_control:
@@ -4547,8 +4535,8 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
 
        intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
-       if (HAS_BSD(dev))
-               intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+       intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+       intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
        if (HAS_PIPE_CONTROL(dev))
                i915_gem_cleanup_pipe_control(dev);
 }
@@ -4577,15 +4565,15 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
                return ret;
        }
 
-       spin_lock(&dev_priv->mm.active_list_lock);
+       BUG_ON(!list_empty(&dev_priv->mm.active_list));
        BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
-       BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
-       spin_unlock(&dev_priv->mm.active_list_lock);
-
+       BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
+       BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
        BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
        BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
        BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
-       BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
+       BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
+       BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
        mutex_unlock(&dev->struct_mutex);
 
        ret = drm_irq_install(dev);
@@ -4627,28 +4615,34 @@ i915_gem_lastclose(struct drm_device *dev)
                DRM_ERROR("failed to idle hardware: %d\n", ret);
 }
 
+static void
+init_ring_lists(struct intel_ring_buffer *ring)
+{
+       INIT_LIST_HEAD(&ring->active_list);
+       INIT_LIST_HEAD(&ring->request_list);
+       INIT_LIST_HEAD(&ring->gpu_write_list);
+}
+
 void
 i915_gem_load(struct drm_device *dev)
 {
        int i;
        drm_i915_private_t *dev_priv = dev->dev_private;
 
-       spin_lock_init(&dev_priv->mm.active_list_lock);
+       INIT_LIST_HEAD(&dev_priv->mm.active_list);
        INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
-       INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+       INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
-       INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
-       INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
-       if (HAS_BSD(dev)) {
-               INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
-               INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
-       }
+       init_ring_lists(&dev_priv->render_ring);
+       init_ring_lists(&dev_priv->bsd_ring);
+       init_ring_lists(&dev_priv->blt_ring);
        for (i = 0; i < 16; i++)
                INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
+       init_completion(&dev_priv->error_completion);
        spin_lock(&shrink_list_lock);
        list_add(&dev_priv->mm.shrink_list, &shrink_list);
        spin_unlock(&shrink_list_lock);
@@ -4667,21 +4661,30 @@ i915_gem_load(struct drm_device *dev)
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                dev_priv->fence_reg_start = 3;
 
-       if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+       if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
                dev_priv->num_fence_regs = 16;
        else
                dev_priv->num_fence_regs = 8;
 
        /* Initialize fence registers to zero */
-       if (IS_I965G(dev)) {
+       switch (INTEL_INFO(dev)->gen) {
+       case 6:
+               for (i = 0; i < 16; i++)
+                       I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
+               break;
+       case 5:
+       case 4:
                for (i = 0; i < 16; i++)
                        I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
-       } else {
-               for (i = 0; i < 8; i++)
-                       I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
+               break;
+       case 3:
                if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
                        for (i = 0; i < 8; i++)
                                I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
+       case 2:
+               for (i = 0; i < 8; i++)
+                       I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
+               break;
        }
        i915_gem_detect_bit_6_swizzle(dev);
        init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -4691,8 +4694,8 @@ i915_gem_load(struct drm_device *dev)
  * Create a physically contiguous memory object for this object
  * e.g. for cursor + overlay regs
  */
-int i915_gem_init_phys_object(struct drm_device *dev,
-                             int id, int size, int align)
+static int i915_gem_init_phys_object(struct drm_device *dev,
+                                    int id, int size, int align)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_phys_object *phys_obj;
@@ -4724,7 +4727,7 @@ kfree_obj:
        return ret;
 }
 
-void i915_gem_free_phys_object(struct drm_device *dev, int id)
+static void i915_gem_free_phys_object(struct drm_device *dev, int id)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_phys_object *phys_obj;
@@ -4772,11 +4775,11 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
        page_count = obj->size / PAGE_SIZE;
 
        for (i = 0; i < page_count; i++) {
-               char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
+               char *dst = kmap_atomic(obj_priv->pages[i]);
                char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
 
                memcpy(dst, src, PAGE_SIZE);
-               kunmap_atomic(dst, KM_USER0);
+               kunmap_atomic(dst);
        }
        drm_clflush_pages(obj_priv->pages, page_count);
        drm_agp_chipset_flush(dev);
@@ -4833,11 +4836,11 @@ i915_gem_attach_phys_object(struct drm_device *dev,
        page_count = obj->size / PAGE_SIZE;
 
        for (i = 0; i < page_count; i++) {
-               char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
+               char *src = kmap_atomic(obj_priv->pages[i]);
                char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
 
                memcpy(dst, src, PAGE_SIZE);
-               kunmap_atomic(src, KM_USER0);
+               kunmap_atomic(src);
        }
 
        i915_gem_object_put_pages(obj);
@@ -4869,18 +4872,25 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
        return 0;
 }
 
-void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
+void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 {
-       struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+       struct drm_i915_file_private *file_priv = file->driver_priv;
 
        /* Clean up our request list when the client is going away, so that
         * later retire_requests won't dereference our soon-to-be-gone
         * file_priv.
         */
-       mutex_lock(&dev->struct_mutex);
-       while (!list_empty(&i915_file_priv->mm.request_list))
-               list_del_init(i915_file_priv->mm.request_list.next);
-       mutex_unlock(&dev->struct_mutex);
+       spin_lock(&file_priv->mm.lock);
+       while (!list_empty(&file_priv->mm.request_list)) {
+               struct drm_i915_gem_request *request;
+
+               request = list_first_entry(&file_priv->mm.request_list,
+                                          struct drm_i915_gem_request,
+                                          client_list);
+               list_del(&request->client_list);
+               request->file_priv = NULL;
+       }
+       spin_unlock(&file_priv->mm.lock);
 }
 
 static int
@@ -4889,12 +4899,10 @@ i915_gpu_is_active(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        int lists_empty;
 
-       spin_lock(&dev_priv->mm.active_list_lock);
        lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-                     list_empty(&dev_priv->render_ring.active_list);
-       if (HAS_BSD(dev))
-               lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
-       spin_unlock(&dev_priv->mm.active_list_lock);
+                     list_empty(&dev_priv->render_ring.active_list) &&
+                     list_empty(&dev_priv->bsd_ring.active_list) &&
+                     list_empty(&dev_priv->blt_ring.active_list);
 
        return !lists_empty;
 }
@@ -4916,7 +4924,7 @@ i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
                        if (mutex_trylock(&dev->struct_mutex)) {
                                list_for_each_entry(obj_priv,
                                                    &dev_priv->mm.inactive_list,
-                                                   list)
+                                                   mm_list)
                                        cnt++;
                                mutex_unlock(&dev->struct_mutex);
                        }
@@ -4942,7 +4950,7 @@ rescan:
 
                list_for_each_entry_safe(obj_priv, next_obj,
                                         &dev_priv->mm.inactive_list,
-                                        list) {
+                                        mm_list) {
                        if (i915_gem_object_is_purgeable(obj_priv)) {
                                i915_gem_object_unbind(&obj_priv->base);
                                if (--nr_to_scan <= 0)
@@ -4971,7 +4979,7 @@ rescan:
 
                list_for_each_entry_safe(obj_priv, next_obj,
                                         &dev_priv->mm.inactive_list,
-                                        list) {
+                                        mm_list) {
                        if (nr_to_scan > 0) {
                                i915_gem_object_unbind(&obj_priv->base);
                                nr_to_scan--;
index 80f380b1d951fe8875260b90849cbf78ced35d5a..48644b840a8dc08dfac36c6d5570265427324f6a 100644 (file)
 #include "i915_drm.h"
 #include "i915_drv.h"
 
-#if WATCH_INACTIVE
-void
-i915_verify_inactive(struct drm_device *dev, char *file, int line)
+#if WATCH_LISTS
+int
+i915_verify_lists(struct drm_device *dev)
 {
+       static int warned;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
-
-       list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
-               obj = &obj_priv->base;
-               if (obj_priv->pin_count || obj_priv->active ||
-                   (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
-                                          I915_GEM_DOMAIN_GTT)))
-                       DRM_ERROR("inactive %p (p %d a %d w %x)  %s:%d\n",
+       struct drm_i915_gem_object *obj;
+       int err = 0;
+
+       if (warned)
+               return 0;
+
+       list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
+               if (obj->base.dev != dev ||
+                   !atomic_read(&obj->base.refcount.refcount)) {
+                       DRM_ERROR("freed render active %p\n", obj);
+                       err++;
+                       break;
+               } else if (!obj->active ||
+                          (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
+                       DRM_ERROR("invalid render active %p (a %d r %x)\n",
+                                 obj,
+                                 obj->active,
+                                 obj->base.read_domains);
+                       err++;
+               } else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
+                       DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
+                                 obj,
+                                 obj->base.write_domain,
+                                 !list_empty(&obj->gpu_write_list));
+                       err++;
+               }
+       }
+
+       list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
+               if (obj->base.dev != dev ||
+                   !atomic_read(&obj->base.refcount.refcount)) {
+                       DRM_ERROR("freed flushing %p\n", obj);
+                       err++;
+                       break;
+               } else if (!obj->active ||
+                          (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
+                          list_empty(&obj->gpu_write_list)){
+                       DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
                                  obj,
-                                 obj_priv->pin_count, obj_priv->active,
-                                 obj->write_domain, file, line);
+                                 obj->active,
+                                 obj->base.write_domain,
+                                 !list_empty(&obj->gpu_write_list));
+                       err++;
+               }
+       }
+
+       list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
+               if (obj->base.dev != dev ||
+                   !atomic_read(&obj->base.refcount.refcount)) {
+                       DRM_ERROR("freed gpu write %p\n", obj);
+                       err++;
+                       break;
+               } else if (!obj->active ||
+                          (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
+                       DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
+                                 obj,
+                                 obj->active,
+                                 obj->base.write_domain);
+                       err++;
+               }
+       }
+
+       list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
+               if (obj->base.dev != dev ||
+                   !atomic_read(&obj->base.refcount.refcount)) {
+                       DRM_ERROR("freed inactive %p\n", obj);
+                       err++;
+                       break;
+               } else if (obj->pin_count || obj->active ||
+                          (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
+                       DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
+                                 obj,
+                                 obj->pin_count, obj->active,
+                                 obj->base.write_domain);
+                       err++;
+               }
        }
+
+       list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
+               if (obj->base.dev != dev ||
+                   !atomic_read(&obj->base.refcount.refcount)) {
+                       DRM_ERROR("freed pinned %p\n", obj);
+                       err++;
+                       break;
+               } else if (!obj->pin_count || obj->active ||
+                          (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
+                       DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
+                                 obj,
+                                 obj->pin_count, obj->active,
+                                 obj->base.write_domain);
+                       err++;
+               }
+       }
+
+       return warned = err;
 }
 #endif /* WATCH_INACTIVE */
 
 
-#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
+#if WATCH_EXEC | WATCH_PWRITE
 static void
 i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
                   uint32_t bias, uint32_t mark)
@@ -97,41 +180,6 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
 }
 #endif
 
-#if WATCH_LRU
-void
-i915_dump_lru(struct drm_device *dev, const char *where)
-{
-       drm_i915_private_t              *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object      *obj_priv;
-
-       DRM_INFO("active list %s {\n", where);
-       spin_lock(&dev_priv->mm.active_list_lock);
-       list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
-                           list)
-       {
-               DRM_INFO("    %p: %08x\n", obj_priv,
-                        obj_priv->last_rendering_seqno);
-       }
-       spin_unlock(&dev_priv->mm.active_list_lock);
-       DRM_INFO("}\n");
-       DRM_INFO("flushing list %s {\n", where);
-       list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
-                           list)
-       {
-               DRM_INFO("    %p: %08x\n", obj_priv,
-                        obj_priv->last_rendering_seqno);
-       }
-       DRM_INFO("}\n");
-       DRM_INFO("inactive %s {\n", where);
-       list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
-               DRM_INFO("    %p: %08x\n", obj_priv,
-                        obj_priv->last_rendering_seqno);
-       }
-       DRM_INFO("}\n");
-}
-#endif
-
-
 #if WATCH_COHERENCY
 void
 i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
index 5c428fa3e0b34049e94786184b646a98ee87c06d..43a4013f53fa24e212849af68940384b777d30a1 100644 (file)
 #include "i915_drv.h"
 #include "i915_drm.h"
 
-static struct drm_i915_gem_object *
-i915_gem_next_active_object(struct drm_device *dev,
-                           struct list_head **render_iter,
-                           struct list_head **bsd_iter)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
-
-       if (*render_iter != &dev_priv->render_ring.active_list)
-               render_obj = list_entry(*render_iter,
-                                       struct drm_i915_gem_object,
-                                       list);
-
-       if (HAS_BSD(dev)) {
-               if (*bsd_iter != &dev_priv->bsd_ring.active_list)
-                       bsd_obj = list_entry(*bsd_iter,
-                                            struct drm_i915_gem_object,
-                                            list);
-
-               if (render_obj == NULL) {
-                       *bsd_iter = (*bsd_iter)->next;
-                       return bsd_obj;
-               }
-
-               if (bsd_obj == NULL) {
-                       *render_iter = (*render_iter)->next;
-                       return render_obj;
-               }
-
-               /* XXX can we handle seqno wrapping? */
-               if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
-                       *render_iter = (*render_iter)->next;
-                       return render_obj;
-               } else {
-                       *bsd_iter = (*bsd_iter)->next;
-                       return bsd_obj;
-               }
-       } else {
-               *render_iter = (*render_iter)->next;
-               return render_obj;
-       }
-}
-
 static bool
 mark_free(struct drm_i915_gem_object *obj_priv,
           struct list_head *unwind)
@@ -83,18 +40,12 @@ mark_free(struct drm_i915_gem_object *obj_priv,
        return drm_mm_scan_add_block(obj_priv->gtt_space);
 }
 
-#define i915_for_each_active_object(OBJ, R, B) \
-       *(R) = dev_priv->render_ring.active_list.next; \
-       *(B) = dev_priv->bsd_ring.active_list.next; \
-       while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
-
 int
 i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct list_head eviction_list, unwind_list;
        struct drm_i915_gem_object *obj_priv;
-       struct list_head *render_iter, *bsd_iter;
        int ret = 0;
 
        i915_gem_retire_requests(dev);
@@ -131,13 +82,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
        drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
 
        /* First see if there is a large enough contiguous idle region... */
-       list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+       list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
                if (mark_free(obj_priv, &unwind_list))
                        goto found;
        }
 
        /* Now merge in the soon-to-be-expired objects... */
-       i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
+       list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
                /* Does the object require an outstanding flush? */
                if (obj_priv->base.write_domain || obj_priv->pin_count)
                        continue;
@@ -147,14 +98,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
        }
 
        /* Finally add anything with a pending flush (in order of retirement) */
-       list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+       list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
                if (obj_priv->pin_count)
                        continue;
 
                if (mark_free(obj_priv, &unwind_list))
                        goto found;
        }
-       i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
+       list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
                if (! obj_priv->base.write_domain || obj_priv->pin_count)
                        continue;
 
@@ -212,14 +163,11 @@ i915_gem_evict_everything(struct drm_device *dev)
        int ret;
        bool lists_empty;
 
-       spin_lock(&dev_priv->mm.active_list_lock);
        lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
                       list_empty(&dev_priv->mm.flushing_list) &&
                       list_empty(&dev_priv->render_ring.active_list) &&
-                      (!HAS_BSD(dev)
-                       || list_empty(&dev_priv->bsd_ring.active_list)));
-       spin_unlock(&dev_priv->mm.active_list_lock);
-
+                      list_empty(&dev_priv->bsd_ring.active_list) &&
+                      list_empty(&dev_priv->blt_ring.active_list));
        if (lists_empty)
                return -ENOSPC;
 
@@ -234,13 +182,11 @@ i915_gem_evict_everything(struct drm_device *dev)
        if (ret)
                return ret;
 
-       spin_lock(&dev_priv->mm.active_list_lock);
        lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
                       list_empty(&dev_priv->mm.flushing_list) &&
                       list_empty(&dev_priv->render_ring.active_list) &&
-                      (!HAS_BSD(dev)
-                       || list_empty(&dev_priv->bsd_ring.active_list)));
-       spin_unlock(&dev_priv->mm.active_list_lock);
+                      list_empty(&dev_priv->bsd_ring.active_list) &&
+                      list_empty(&dev_priv->blt_ring.active_list));
        BUG_ON(!lists_empty);
 
        return 0;
@@ -258,7 +204,7 @@ i915_gem_evict_inactive(struct drm_device *dev)
 
                obj = &list_first_entry(&dev_priv->mm.inactive_list,
                                        struct drm_i915_gem_object,
-                                       list)->base;
+                                       mm_list)->base;
 
                ret = i915_gem_object_unbind(obj);
                if (ret != 0) {
index 710eca70b323953e376d3c214a33e78996bed1d2..af352de70be199d8eb8a3be49a138d279f80f9e3 100644 (file)
@@ -92,13 +92,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
        uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
        uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 
-       if (IS_IRONLAKE(dev) || IS_GEN6(dev)) {
+       if (IS_GEN5(dev) || IS_GEN6(dev)) {
                /* On Ironlake whatever DRAM config, GPU always do
                 * same swizzling setup.
                 */
                swizzle_x = I915_BIT_6_SWIZZLE_9_10;
                swizzle_y = I915_BIT_6_SWIZZLE_9;
-       } else if (!IS_I9XX(dev)) {
+       } else if (IS_GEN2(dev)) {
                /* As far as we know, the 865 doesn't have these bit 6
                 * swizzling issues.
                 */
@@ -190,19 +190,19 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
        if (tiling_mode == I915_TILING_NONE)
                return true;
 
-       if (!IS_I9XX(dev) ||
+       if (IS_GEN2(dev) ||
            (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
                tile_width = 128;
        else
                tile_width = 512;
 
        /* check maximum stride & object size */
-       if (IS_I965G(dev)) {
+       if (INTEL_INFO(dev)->gen >= 4) {
                /* i965 stores the end address of the gtt mapping in the fence
                 * reg, so dont bother to check the size */
                if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
                        return false;
-       } else if (IS_GEN3(dev) || IS_GEN2(dev)) {
+       } else {
                if (stride > 8192)
                        return false;
 
@@ -216,7 +216,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
        }
 
        /* 965+ just needs multiples of tile width */
-       if (IS_I965G(dev)) {
+       if (INTEL_INFO(dev)->gen >= 4) {
                if (stride & (tile_width - 1))
                        return false;
                return true;
@@ -244,16 +244,18 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
        if (tiling_mode == I915_TILING_NONE)
                return true;
 
-       if (!IS_I965G(dev)) {
-               if (obj_priv->gtt_offset & (obj->size - 1))
+       if (INTEL_INFO(dev)->gen >= 4)
+               return true;
+
+       if (obj_priv->gtt_offset & (obj->size - 1))
+               return false;
+
+       if (IS_GEN3(dev)) {
+               if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
+                       return false;
+       } else {
+               if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
                        return false;
-               if (IS_I9XX(dev)) {
-                       if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
-                               return false;
-               } else {
-                       if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
-                               return false;
-               }
        }
 
        return true;
@@ -271,7 +273,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
-       int ret = 0;
+       int ret;
+
+       ret = i915_gem_check_is_wedged(dev);
+       if (ret)
+               return ret;
 
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL)
@@ -328,7 +334,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
                        ret = i915_gem_object_unbind(obj);
                else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
-                       ret = i915_gem_object_put_fence_reg(obj);
+                       ret = i915_gem_object_put_fence_reg(obj, true);
                else
                        i915_gem_release_mmap(obj);
 
@@ -399,16 +405,14 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
  * bit 17 of its physical address and therefore being interpreted differently
  * by the GPU.
  */
-static int
+static void
 i915_gem_swizzle_page(struct page *page)
 {
+       char temp[64];
        char *vaddr;
        int i;
-       char temp[64];
 
        vaddr = kmap(page);
-       if (vaddr == NULL)
-               return -ENOMEM;
 
        for (i = 0; i < PAGE_SIZE; i += 128) {
                memcpy(temp, &vaddr[i], 64);
@@ -417,8 +421,6 @@ i915_gem_swizzle_page(struct page *page)
        }
 
        kunmap(page);
-
-       return 0;
 }
 
 void
@@ -440,11 +442,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
                char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
                if ((new_bit_17 & 0x1) !=
                    (test_bit(i, obj_priv->bit_17) != 0)) {
-                       int ret = i915_gem_swizzle_page(obj_priv->pages[i]);
-                       if (ret != 0) {
-                               DRM_ERROR("Failed to swizzle page\n");
-                               return;
-                       }
+                       i915_gem_swizzle_page(obj_priv->pages[i]);
                        set_page_dirty(obj_priv->pages[i]);
                }
        }
index 744225ebb4b25d5988fab454441de95d7db94115..729fd0c91d7b2c1f30de2afacae4227238ccdbd5 100644 (file)
@@ -85,7 +85,7 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
 }
 
 /* For display hotplug interrupt */
-void
+static void
 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
        if ((dev_priv->irq_mask_reg & mask) != 0) {
@@ -172,7 +172,7 @@ void intel_enable_asle (struct drm_device *dev)
        else {
                i915_enable_pipestat(dev_priv, 1,
                                     PIPE_LEGACY_BLC_EVENT_ENABLE);
-               if (IS_I965G(dev))
+               if (INTEL_INFO(dev)->gen >= 4)
                        i915_enable_pipestat(dev_priv, 0,
                                             PIPE_LEGACY_BLC_EVENT_ENABLE);
        }
@@ -191,12 +191,7 @@ static int
 i915_pipe_enabled(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
-
-       if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
-               return 1;
-
-       return 0;
+       return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
 }
 
 /* Called from drm generic code, passed a 'crtc', which
@@ -207,10 +202,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long high_frame;
        unsigned long low_frame;
-       u32 high1, high2, low, count;
-
-       high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
-       low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+       u32 high1, high2, low;
 
        if (!i915_pipe_enabled(dev, pipe)) {
                DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
@@ -218,23 +210,23 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
                return 0;
        }
 
+       high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
+       low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+
        /*
         * High & low register fields aren't synchronized, so make sure
         * we get a low value that's stable across two reads of the high
         * register.
         */
        do {
-               high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
-                        PIPE_FRAME_HIGH_SHIFT);
-               low =  ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
-                       PIPE_FRAME_LOW_SHIFT);
-               high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
-                        PIPE_FRAME_HIGH_SHIFT);
+               high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+               low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
+               high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
        } while (high1 != high2);
 
-       count = (high1 << 8) | low;
-
-       return count;
+       high1 >>= PIPE_FRAME_HIGH_SHIFT;
+       low >>= PIPE_FRAME_LOW_SHIFT;
+       return (high1 << 8) | low;
 }
 
 u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -260,16 +252,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
                                                    hotplug_work);
        struct drm_device *dev = dev_priv->dev;
        struct drm_mode_config *mode_config = &dev->mode_config;
-       struct drm_encoder *encoder;
-
-       if (mode_config->num_encoder) {
-               list_for_each_entry(encoder, &mode_config->encoder_list, head) {
-                       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-       
-                       if (intel_encoder->hot_plug)
-                               (*intel_encoder->hot_plug) (intel_encoder);
-               }
-       }
+       struct intel_encoder *encoder;
+
+       list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
+               if (encoder->hot_plug)
+                       encoder->hot_plug(encoder);
+
        /* Just fire off a uevent and let userspace tell us what to do */
        drm_helper_hpd_irq_event(dev);
 }
@@ -305,13 +293,30 @@ static void i915_handle_rps_change(struct drm_device *dev)
        return;
 }
 
-irqreturn_t ironlake_irq_handler(struct drm_device *dev)
+static void notify_ring(struct drm_device *dev,
+                       struct intel_ring_buffer *ring)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 seqno = ring->get_seqno(dev, ring);
+       ring->irq_gem_seqno = seqno;
+       trace_i915_gem_request_complete(dev, seqno);
+       wake_up_all(&ring->irq_queue);
+       dev_priv->hangcheck_count = 0;
+       mod_timer(&dev_priv->hangcheck_timer,
+                 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+}
+
+static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int ret = IRQ_NONE;
        u32 de_iir, gt_iir, de_ier, pch_iir;
+       u32 hotplug_mask;
        struct drm_i915_master_private *master_priv;
-       struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+       u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
+
+       if (IS_GEN6(dev))
+               bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
 
        /* disable master interrupt before clearing iir  */
        de_ier = I915_READ(DEIER);
@@ -325,6 +330,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
        if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
                goto done;
 
+       if (HAS_PCH_CPT(dev))
+               hotplug_mask = SDE_HOTPLUG_MASK_CPT;
+       else
+               hotplug_mask = SDE_HOTPLUG_MASK;
+
        ret = IRQ_HANDLED;
 
        if (dev->primary->master) {
@@ -334,29 +344,24 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
                                READ_BREADCRUMB(dev_priv);
        }
 
-       if (gt_iir & GT_PIPE_NOTIFY) {
-               u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
-               render_ring->irq_gem_seqno = seqno;
-               trace_i915_gem_request_complete(dev, seqno);
-               DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
-               dev_priv->hangcheck_count = 0;
-               mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
-       }
-       if (gt_iir & GT_BSD_USER_INTERRUPT)
-               DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
-
+       if (gt_iir & GT_PIPE_NOTIFY)
+               notify_ring(dev, &dev_priv->render_ring);
+       if (gt_iir & bsd_usr_interrupt)
+               notify_ring(dev, &dev_priv->bsd_ring);
+       if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT)
+               notify_ring(dev, &dev_priv->blt_ring);
 
        if (de_iir & DE_GSE)
-               ironlake_opregion_gse_intr(dev);
+               intel_opregion_gse_intr(dev);
 
        if (de_iir & DE_PLANEA_FLIP_DONE) {
                intel_prepare_page_flip(dev, 0);
-               intel_finish_page_flip(dev, 0);
+               intel_finish_page_flip_plane(dev, 0);
        }
 
        if (de_iir & DE_PLANEB_FLIP_DONE) {
                intel_prepare_page_flip(dev, 1);
-               intel_finish_page_flip(dev, 1);
+               intel_finish_page_flip_plane(dev, 1);
        }
 
        if (de_iir & DE_PIPEA_VBLANK)
@@ -366,10 +371,8 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
                drm_handle_vblank(dev, 1);
 
        /* check event from PCH */
-       if ((de_iir & DE_PCH_EVENT) &&
-           (pch_iir & SDE_HOTPLUG_MASK)) {
+       if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask))
                queue_work(dev_priv->wq, &dev_priv->hotplug_work);
-       }
 
        if (de_iir & DE_PCU_EVENT) {
                I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
@@ -404,23 +407,20 @@ static void i915_error_work_func(struct work_struct *work)
        char *reset_event[] = { "RESET=1", NULL };
        char *reset_done_event[] = { "ERROR=0", NULL };
 
-       DRM_DEBUG_DRIVER("generating error event\n");
        kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
 
        if (atomic_read(&dev_priv->mm.wedged)) {
-               if (IS_I965G(dev)) {
-                       DRM_DEBUG_DRIVER("resetting chip\n");
-                       kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
-                       if (!i965_reset(dev, GDRST_RENDER)) {
-                               atomic_set(&dev_priv->mm.wedged, 0);
-                               kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
-                       }
-               } else {
-                       DRM_DEBUG_DRIVER("reboot required\n");
+               DRM_DEBUG_DRIVER("resetting chip\n");
+               kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
+               if (!i915_reset(dev, GRDOM_RENDER)) {
+                       atomic_set(&dev_priv->mm.wedged, 0);
+                       kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
                }
+               complete_all(&dev_priv->error_completion);
        }
 }
 
+#ifdef CONFIG_DEBUG_FS
 static struct drm_i915_error_object *
 i915_error_object_create(struct drm_device *dev,
                         struct drm_gem_object *src)
@@ -456,10 +456,9 @@ i915_error_object_create(struct drm_device *dev,
 
                local_irq_save(flags);
                s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
-                                            reloc_offset,
-                                            KM_IRQ0);
+                                            reloc_offset);
                memcpy_fromio(d, s, PAGE_SIZE);
-               io_mapping_unmap_atomic(s, KM_IRQ0);
+               io_mapping_unmap_atomic(s);
                local_irq_restore(flags);
 
                dst->pages[page] = d;
@@ -511,7 +510,7 @@ i915_get_bbaddr(struct drm_device *dev, u32 *ring)
 
        if (IS_I830(dev) || IS_845G(dev))
                cmd = MI_BATCH_BUFFER;
-       else if (IS_I965G(dev))
+       else if (INTEL_INFO(dev)->gen >= 4)
                cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
                       MI_BATCH_NON_SECURE_I965);
        else
@@ -584,13 +583,16 @@ static void i915_capture_error_state(struct drm_device *dev)
                return;
        }
 
-       error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
+       DRM_DEBUG_DRIVER("generating error event\n");
+
+       error->seqno =
+               dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring);
        error->eir = I915_READ(EIR);
        error->pgtbl_er = I915_READ(PGTBL_ER);
        error->pipeastat = I915_READ(PIPEASTAT);
        error->pipebstat = I915_READ(PIPEBSTAT);
        error->instpm = I915_READ(INSTPM);
-       if (!IS_I965G(dev)) {
+       if (INTEL_INFO(dev)->gen < 4) {
                error->ipeir = I915_READ(IPEIR);
                error->ipehr = I915_READ(IPEHR);
                error->instdone = I915_READ(INSTDONE);
@@ -612,9 +614,7 @@ static void i915_capture_error_state(struct drm_device *dev)
        batchbuffer[0] = NULL;
        batchbuffer[1] = NULL;
        count = 0;
-       list_for_each_entry(obj_priv,
-                       &dev_priv->render_ring.active_list, list) {
-
+       list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
                struct drm_gem_object *obj = &obj_priv->base;
 
                if (batchbuffer[0] == NULL &&
@@ -631,7 +631,7 @@ static void i915_capture_error_state(struct drm_device *dev)
        }
        /* Scan the other lists for completeness for those bizarre errors. */
        if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
-               list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+               list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
                        struct drm_gem_object *obj = &obj_priv->base;
 
                        if (batchbuffer[0] == NULL &&
@@ -649,7 +649,7 @@ static void i915_capture_error_state(struct drm_device *dev)
                }
        }
        if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
-               list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+               list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
                        struct drm_gem_object *obj = &obj_priv->base;
 
                        if (batchbuffer[0] == NULL &&
@@ -668,7 +668,7 @@ static void i915_capture_error_state(struct drm_device *dev)
        }
 
        /* We need to copy these to an anonymous buffer as the simplest
-        * method to avoid being overwritten by userpace.
+        * method to avoid being overwritten by userspace.
         */
        error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
        if (batchbuffer[1] != batchbuffer[0])
@@ -690,8 +690,7 @@ static void i915_capture_error_state(struct drm_device *dev)
 
        if (error->active_bo) {
                int i = 0;
-               list_for_each_entry(obj_priv,
-                               &dev_priv->render_ring.active_list, list) {
+               list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
                        struct drm_gem_object *obj = &obj_priv->base;
 
                        error->active_bo[i].size = obj->size;
@@ -744,6 +743,9 @@ void i915_destroy_error_state(struct drm_device *dev)
        if (error)
                i915_error_state_free(dev, error);
 }
+#else
+#define i915_capture_error_state(x)
+#endif
 
 static void i915_report_and_clear_eir(struct drm_device *dev)
 {
@@ -785,7 +787,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
                }
        }
 
-       if (IS_I9XX(dev)) {
+       if (!IS_GEN2(dev)) {
                if (eir & I915_ERROR_PAGE_TABLE) {
                        u32 pgtbl_err = I915_READ(PGTBL_ER);
                        printk(KERN_ERR "page table error\n");
@@ -811,7 +813,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
                printk(KERN_ERR "instruction error\n");
                printk(KERN_ERR "  INSTPM: 0x%08x\n",
                       I915_READ(INSTPM));
-               if (!IS_I965G(dev)) {
+               if (INTEL_INFO(dev)->gen < 4) {
                        u32 ipeir = I915_READ(IPEIR);
 
                        printk(KERN_ERR "  IPEIR: 0x%08x\n",
@@ -876,12 +878,17 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
        i915_report_and_clear_eir(dev);
 
        if (wedged) {
+               INIT_COMPLETION(dev_priv->error_completion);
                atomic_set(&dev_priv->mm.wedged, 1);
 
                /*
                 * Wakeup waiting processes so they don't hang
                 */
-               DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+               wake_up_all(&dev_priv->render_ring.irq_queue);
+               if (HAS_BSD(dev))
+                       wake_up_all(&dev_priv->bsd_ring.irq_queue);
+               if (HAS_BLT(dev))
+                       wake_up_all(&dev_priv->blt_ring.irq_queue);
        }
 
        queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -912,7 +919,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
 
        /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
        obj_priv = to_intel_bo(work->pending_flip_obj);
-       if(IS_I965G(dev)) {
+       if (INTEL_INFO(dev)->gen >= 4) {
                int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
                stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
        } else {
@@ -942,7 +949,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
        unsigned long irqflags;
        int irq_received;
        int ret = IRQ_NONE;
-       struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
 
        atomic_inc(&dev_priv->irq_received);
 
@@ -951,7 +957,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 
        iir = I915_READ(IIR);
 
-       if (IS_I965G(dev))
+       if (INTEL_INFO(dev)->gen >= 4)
                vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
        else
                vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
@@ -1019,18 +1025,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
                                        READ_BREADCRUMB(dev_priv);
                }
 
-               if (iir & I915_USER_INTERRUPT) {
-                       u32 seqno =
-                               render_ring->get_gem_seqno(dev, render_ring);
-                       render_ring->irq_gem_seqno = seqno;
-                       trace_i915_gem_request_complete(dev, seqno);
-                       DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
-                       dev_priv->hangcheck_count = 0;
-                       mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
-               }
-
+               if (iir & I915_USER_INTERRUPT)
+                       notify_ring(dev, &dev_priv->render_ring);
                if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
-                       DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+                       notify_ring(dev, &dev_priv->bsd_ring);
 
                if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
                        intel_prepare_page_flip(dev, 0);
@@ -1065,7 +1063,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
                if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
                    (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
                    (iir & I915_ASLE_INTERRUPT))
-                       opregion_asle_intr(dev);
+                       intel_opregion_asle_intr(dev);
 
                /* With MSI, interrupts are only generated when iir
                 * transitions from zero to nonzero.  If another bit got
@@ -1207,18 +1205,15 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
-       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
-       u32 pipeconf;
 
-       pipeconf = I915_READ(pipeconf_reg);
-       if (!(pipeconf & PIPEACONF_ENABLE))
+       if (!i915_pipe_enabled(dev, pipe))
                return -EINVAL;
 
        spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
        if (HAS_PCH_SPLIT(dev))
                ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 
                                            DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
-       else if (IS_I965G(dev))
+       else if (INTEL_INFO(dev)->gen >= 4)
                i915_enable_pipestat(dev_priv, pipe,
                                     PIPE_START_VBLANK_INTERRUPT_ENABLE);
        else
@@ -1252,7 +1247,7 @@ void i915_enable_interrupt (struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        if (!HAS_PCH_SPLIT(dev))
-               opregion_enable_asle(dev);
+               intel_opregion_enable_asle(dev);
        dev_priv->irq_enabled = 1;
 }
 
@@ -1311,7 +1306,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
        return -EINVAL;
 }
 
-struct drm_i915_gem_request *
+static struct drm_i915_gem_request *
 i915_get_tail_request(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -1331,11 +1326,7 @@ void i915_hangcheck_elapsed(unsigned long data)
        drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t acthd, instdone, instdone1;
 
-       /* No reset support on this chip yet. */
-       if (IS_GEN6(dev))
-               return;
-
-       if (!IS_I965G(dev)) {
+       if (INTEL_INFO(dev)->gen < 4) {
                acthd = I915_READ(ACTHD);
                instdone = I915_READ(INSTDONE);
                instdone1 = 0;
@@ -1347,9 +1338,8 @@ void i915_hangcheck_elapsed(unsigned long data)
 
        /* If all work is done then ACTHD clearly hasn't advanced. */
        if (list_empty(&dev_priv->render_ring.request_list) ||
-               i915_seqno_passed(i915_get_gem_seqno(dev,
-                               &dev_priv->render_ring),
-                       i915_get_tail_request(dev)->seqno)) {
+               i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring),
+                                 i915_get_tail_request(dev)->seqno)) {
                bool missed_wakeup = false;
 
                dev_priv->hangcheck_count = 0;
@@ -1357,13 +1347,19 @@ void i915_hangcheck_elapsed(unsigned long data)
                /* Issue a wake-up to catch stuck h/w. */
                if (dev_priv->render_ring.waiting_gem_seqno &&
                    waitqueue_active(&dev_priv->render_ring.irq_queue)) {
-                       DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+                       wake_up_all(&dev_priv->render_ring.irq_queue);
                        missed_wakeup = true;
                }
 
                if (dev_priv->bsd_ring.waiting_gem_seqno &&
                    waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
-                       DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+                       wake_up_all(&dev_priv->bsd_ring.irq_queue);
+                       missed_wakeup = true;
+               }
+
+               if (dev_priv->blt_ring.waiting_gem_seqno &&
+                   waitqueue_active(&dev_priv->blt_ring.irq_queue)) {
+                       wake_up_all(&dev_priv->blt_ring.irq_queue);
                        missed_wakeup = true;
                }
 
@@ -1377,6 +1373,21 @@ void i915_hangcheck_elapsed(unsigned long data)
            dev_priv->last_instdone1 == instdone1) {
                if (dev_priv->hangcheck_count++ > 1) {
                        DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+
+                       if (!IS_GEN2(dev)) {
+                               /* Is the chip hanging on a WAIT_FOR_EVENT?
+                                * If so we can simply poke the RB_WAIT bit
+                                * and break the hang. This should work on
+                                * all but the second generation chipsets.
+                                */
+                               u32 tmp = I915_READ(PRB0_CTL);
+                               if (tmp & RING_WAIT) {
+                                       I915_WRITE(PRB0_CTL, tmp);
+                                       POSTING_READ(PRB0_CTL);
+                                       goto out;
+                               }
+                       }
+
                        i915_handle_error(dev, true);
                        return;
                }
@@ -1388,8 +1399,10 @@ void i915_hangcheck_elapsed(unsigned long data)
                dev_priv->last_instdone1 = instdone1;
        }
 
+out:
        /* Reset timer case chip hangs without another request being added */
-       mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+       mod_timer(&dev_priv->hangcheck_timer,
+                 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
 }
 
 /* drm_dma.h hooks
@@ -1424,8 +1437,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
        u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
                           DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
        u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
-       u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
-                          SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
+       u32 hotplug_mask;
 
        dev_priv->irq_mask_reg = ~display_mask;
        dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
@@ -1436,20 +1448,35 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
        I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
        (void) I915_READ(DEIER);
 
-       /* Gen6 only needs render pipe_control now */
-       if (IS_GEN6(dev))
-               render_mask = GT_PIPE_NOTIFY;
+       if (IS_GEN6(dev)) {
+               render_mask =
+                       GT_PIPE_NOTIFY |
+                       GT_GEN6_BSD_USER_INTERRUPT |
+                       GT_BLT_USER_INTERRUPT;
+       }
 
        dev_priv->gt_irq_mask_reg = ~render_mask;
        dev_priv->gt_irq_enable_reg = render_mask;
 
        I915_WRITE(GTIIR, I915_READ(GTIIR));
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
-       if (IS_GEN6(dev))
+       if (IS_GEN6(dev)) {
                I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
+               I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT);
+               I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
+       }
+
        I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
        (void) I915_READ(GTIER);
 
+       if (HAS_PCH_CPT(dev)) {
+               hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT  |
+                              SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ;
+       } else {
+               hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
+                              SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
+       }
+
        dev_priv->pch_irq_mask_reg = ~hotplug_mask;
        dev_priv->pch_irq_enable_reg = hotplug_mask;
 
@@ -1506,9 +1533,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
        u32 error_mask;
 
        DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
-
        if (HAS_BSD(dev))
                DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
+       if (HAS_BLT(dev))
+               DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue);
 
        dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
 
@@ -1578,7 +1606,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
                I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
        }
 
-       opregion_enable_asle(dev);
+       intel_opregion_enable_asle(dev);
 
        return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
deleted file mode 100644 (file)
index ea5d3fe..0000000
+++ /dev/null
@@ -1,562 +0,0 @@
-/*
- * Copyright 2008 Intel Corporation <hong.liu@intel.com>
- * Copyright 2008 Red Hat <mjg@redhat.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NON-INFRINGEMENT.  IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/acpi.h>
-#include <acpi/video.h>
-
-#include "drmP.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-#define PCI_ASLE 0xe4
-#define PCI_LBPC 0xf4
-#define PCI_ASLS 0xfc
-
-#define OPREGION_SZ            (8*1024)
-#define OPREGION_HEADER_OFFSET 0
-#define OPREGION_ACPI_OFFSET   0x100
-#define OPREGION_SWSCI_OFFSET  0x200
-#define OPREGION_ASLE_OFFSET   0x300
-#define OPREGION_VBT_OFFSET    0x1000
-
-#define OPREGION_SIGNATURE "IntelGraphicsMem"
-#define MBOX_ACPI      (1<<0)
-#define MBOX_SWSCI     (1<<1)
-#define MBOX_ASLE      (1<<2)
-
-struct opregion_header {
-       u8 signature[16];
-       u32 size;
-       u32 opregion_ver;
-       u8 bios_ver[32];
-       u8 vbios_ver[16];
-       u8 driver_ver[16];
-       u32 mboxes;
-       u8 reserved[164];
-} __attribute__((packed));
-
-/* OpRegion mailbox #1: public ACPI methods */
-struct opregion_acpi {
-       u32 drdy;       /* driver readiness */
-       u32 csts;       /* notification status */
-       u32 cevt;       /* current event */
-       u8 rsvd1[20];
-       u32 didl[8];    /* supported display devices ID list */
-       u32 cpdl[8];    /* currently presented display list */
-       u32 cadl[8];    /* currently active display list */
-       u32 nadl[8];    /* next active devices list */
-       u32 aslp;       /* ASL sleep time-out */
-       u32 tidx;       /* toggle table index */
-       u32 chpd;       /* current hotplug enable indicator */
-       u32 clid;       /* current lid state*/
-       u32 cdck;       /* current docking state */
-       u32 sxsw;       /* Sx state resume */
-       u32 evts;       /* ASL supported events */
-       u32 cnot;       /* current OS notification */
-       u32 nrdy;       /* driver status */
-       u8 rsvd2[60];
-} __attribute__((packed));
-
-/* OpRegion mailbox #2: SWSCI */
-struct opregion_swsci {
-       u32 scic;       /* SWSCI command|status|data */
-       u32 parm;       /* command parameters */
-       u32 dslp;       /* driver sleep time-out */
-       u8 rsvd[244];
-} __attribute__((packed));
-
-/* OpRegion mailbox #3: ASLE */
-struct opregion_asle {
-       u32 ardy;       /* driver readiness */
-       u32 aslc;       /* ASLE interrupt command */
-       u32 tche;       /* technology enabled indicator */
-       u32 alsi;       /* current ALS illuminance reading */
-       u32 bclp;       /* backlight brightness to set */
-       u32 pfit;       /* panel fitting state */
-       u32 cblv;       /* current brightness level */
-       u16 bclm[20];   /* backlight level duty cycle mapping table */
-       u32 cpfm;       /* current panel fitting mode */
-       u32 epfm;       /* enabled panel fitting modes */
-       u8 plut[74];    /* panel LUT and identifier */
-       u32 pfmb;       /* PWM freq and min brightness */
-       u8 rsvd[102];
-} __attribute__((packed));
-
-/* ASLE irq request bits */
-#define ASLE_SET_ALS_ILLUM     (1 << 0)
-#define ASLE_SET_BACKLIGHT     (1 << 1)
-#define ASLE_SET_PFIT          (1 << 2)
-#define ASLE_SET_PWM_FREQ      (1 << 3)
-#define ASLE_REQ_MSK           0xf
-
-/* response bits of ASLE irq request */
-#define ASLE_ALS_ILLUM_FAILED  (1<<10)
-#define ASLE_BACKLIGHT_FAILED  (1<<12)
-#define ASLE_PFIT_FAILED       (1<<14)
-#define ASLE_PWM_FREQ_FAILED   (1<<16)
-
-/* ASLE backlight brightness to set */
-#define ASLE_BCLP_VALID                (1<<31)
-#define ASLE_BCLP_MSK          (~(1<<31))
-
-/* ASLE panel fitting request */
-#define ASLE_PFIT_VALID         (1<<31)
-#define ASLE_PFIT_CENTER (1<<0)
-#define ASLE_PFIT_STRETCH_TEXT (1<<1)
-#define ASLE_PFIT_STRETCH_GFX (1<<2)
-
-/* PWM frequency and minimum brightness */
-#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
-#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
-#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
-#define ASLE_PFMB_PWM_VALID (1<<31)
-
-#define ASLE_CBLV_VALID         (1<<31)
-
-#define ACPI_OTHER_OUTPUT (0<<8)
-#define ACPI_VGA_OUTPUT (1<<8)
-#define ACPI_TV_OUTPUT (2<<8)
-#define ACPI_DIGITAL_OUTPUT (3<<8)
-#define ACPI_LVDS_OUTPUT (4<<8)
-
-static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct opregion_asle *asle = dev_priv->opregion.asle;
-       u32 blc_pwm_ctl, blc_pwm_ctl2;
-       u32 max_backlight, level, shift;
-
-       if (!(bclp & ASLE_BCLP_VALID))
-               return ASLE_BACKLIGHT_FAILED;
-
-       bclp &= ASLE_BCLP_MSK;
-       if (bclp < 0 || bclp > 255)
-               return ASLE_BACKLIGHT_FAILED;
-
-       blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
-       blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
-
-       if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
-               pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
-       else {
-               if (IS_PINEVIEW(dev)) {
-                       blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
-                       max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> 
-                                       BACKLIGHT_MODULATION_FREQ_SHIFT;
-                       shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1;
-               } else {
-                       blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
-                       max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> 
-                                       BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
-                       shift = BACKLIGHT_DUTY_CYCLE_SHIFT;
-               }
-               level = (bclp * max_backlight) / 255;
-               I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift));
-       }
-       asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
-
-       return 0;
-}
-
-static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
-{
-       /* alsi is the current ALS reading in lux. 0 indicates below sensor
-          range, 0xffff indicates above sensor range. 1-0xfffe are valid */
-       return 0;
-}
-
-static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       if (pfmb & ASLE_PFMB_PWM_VALID) {
-               u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
-               u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
-               blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
-               pwm = pwm >> 9;
-               /* FIXME - what do we do with the PWM? */
-       }
-       return 0;
-}
-
-static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
-{
-       /* Panel fitting is currently controlled by the X code, so this is a
-          noop until modesetting support works fully */
-       if (!(pfit & ASLE_PFIT_VALID))
-               return ASLE_PFIT_FAILED;
-       return 0;
-}
-
-void opregion_asle_intr(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct opregion_asle *asle = dev_priv->opregion.asle;
-       u32 asle_stat = 0;
-       u32 asle_req;
-
-       if (!asle)
-               return;
-
-       asle_req = asle->aslc & ASLE_REQ_MSK;
-
-       if (!asle_req) {
-               DRM_DEBUG_DRIVER("non asle set request??\n");
-               return;
-       }
-
-       if (asle_req & ASLE_SET_ALS_ILLUM)
-               asle_stat |= asle_set_als_illum(dev, asle->alsi);
-
-       if (asle_req & ASLE_SET_BACKLIGHT)
-               asle_stat |= asle_set_backlight(dev, asle->bclp);
-
-       if (asle_req & ASLE_SET_PFIT)
-               asle_stat |= asle_set_pfit(dev, asle->pfit);
-
-       if (asle_req & ASLE_SET_PWM_FREQ)
-               asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
-
-       asle->aslc = asle_stat;
-}
-
-static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct opregion_asle *asle = dev_priv->opregion.asle;
-       u32 cpu_pwm_ctl, pch_pwm_ctl2;
-       u32 max_backlight, level;
-
-       if (!(bclp & ASLE_BCLP_VALID))
-               return ASLE_BACKLIGHT_FAILED;
-
-       bclp &= ASLE_BCLP_MSK;
-       if (bclp < 0 || bclp > 255)
-               return ASLE_BACKLIGHT_FAILED;
-
-       cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL);
-       pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
-       /* get the max PWM frequency */
-       max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK;
-       /* calculate the expected PMW frequency */
-       level = (bclp * max_backlight) / 255;
-       /* reserve the high 16 bits */
-       cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK);
-       /* write the updated PWM frequency */
-       I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level);
-
-       asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
-
-       return 0;
-}
-
-void ironlake_opregion_gse_intr(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct opregion_asle *asle = dev_priv->opregion.asle;
-       u32 asle_stat = 0;
-       u32 asle_req;
-
-       if (!asle)
-               return;
-
-       asle_req = asle->aslc & ASLE_REQ_MSK;
-
-       if (!asle_req) {
-               DRM_DEBUG_DRIVER("non asle set request??\n");
-               return;
-       }
-
-       if (asle_req & ASLE_SET_ALS_ILLUM) {
-               DRM_DEBUG_DRIVER("Illum is not supported\n");
-               asle_stat |= ASLE_ALS_ILLUM_FAILED;
-       }
-
-       if (asle_req & ASLE_SET_BACKLIGHT)
-               asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp);
-
-       if (asle_req & ASLE_SET_PFIT) {
-               DRM_DEBUG_DRIVER("Pfit is not supported\n");
-               asle_stat |= ASLE_PFIT_FAILED;
-       }
-
-       if (asle_req & ASLE_SET_PWM_FREQ) {
-               DRM_DEBUG_DRIVER("PWM freq is not supported\n");
-               asle_stat |= ASLE_PWM_FREQ_FAILED;
-       }
-
-       asle->aslc = asle_stat;
-}
-#define ASLE_ALS_EN    (1<<0)
-#define ASLE_BLC_EN    (1<<1)
-#define ASLE_PFIT_EN   (1<<2)
-#define ASLE_PFMB_EN   (1<<3)
-
-void opregion_enable_asle(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct opregion_asle *asle = dev_priv->opregion.asle;
-
-       if (asle) {
-               if (IS_MOBILE(dev)) {
-                       unsigned long irqflags;
-
-                       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-                       intel_enable_asle(dev);
-                       spin_unlock_irqrestore(&dev_priv->user_irq_lock,
-                                              irqflags);
-               }
-
-               asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
-                       ASLE_PFMB_EN;
-               asle->ardy = 1;
-       }
-}
-
-#define ACPI_EV_DISPLAY_SWITCH (1<<0)
-#define ACPI_EV_LID            (1<<1)
-#define ACPI_EV_DOCK           (1<<2)
-
-static struct intel_opregion *system_opregion;
-
-static int intel_opregion_video_event(struct notifier_block *nb,
-                                     unsigned long val, void *data)
-{
-       /* The only video events relevant to opregion are 0x80. These indicate
-          either a docking event, lid switch or display switch request. In
-          Linux, these are handled by the dock, button and video drivers.
-          We might want to fix the video driver to be opregion-aware in
-          future, but right now we just indicate to the firmware that the
-          request has been handled */
-
-       struct opregion_acpi *acpi;
-
-       if (!system_opregion)
-               return NOTIFY_DONE;
-
-       acpi = system_opregion->acpi;
-       acpi->csts = 0;
-
-       return NOTIFY_OK;
-}
-
-static struct notifier_block intel_opregion_notifier = {
-       .notifier_call = intel_opregion_video_event,
-};
-
-/*
- * Initialise the DIDL field in opregion. This passes a list of devices to
- * the firmware. Values are defined by section B.4.2 of the ACPI specification
- * (version 3)
- */
-
-static void intel_didl_outputs(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_opregion *opregion = &dev_priv->opregion;
-       struct drm_connector *connector;
-       acpi_handle handle;
-       struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
-       unsigned long long device_id;
-       acpi_status status;
-       int i = 0;
-
-       handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
-       if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
-               return;
-
-       if (acpi_is_video_device(acpi_dev))
-               acpi_video_bus = acpi_dev;
-       else {
-               list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
-                       if (acpi_is_video_device(acpi_cdev)) {
-                               acpi_video_bus = acpi_cdev;
-                               break;
-                       }
-               }
-       }
-
-       if (!acpi_video_bus) {
-               printk(KERN_WARNING "No ACPI video bus found\n");
-               return;
-       }
-
-       list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
-               if (i >= 8) {
-                       dev_printk (KERN_ERR, &dev->pdev->dev,
-                                   "More than 8 outputs detected\n");
-                       return;
-               }
-               status =
-                       acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
-                                               NULL, &device_id);
-               if (ACPI_SUCCESS(status)) {
-                       if (!device_id)
-                               goto blind_set;
-                       opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
-                       i++;
-               }
-       }
-
-end:
-       /* If fewer than 8 outputs, the list must be null terminated */
-       if (i < 8)
-               opregion->acpi->didl[i] = 0;
-       return;
-
-blind_set:
-       i = 0;
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               int output_type = ACPI_OTHER_OUTPUT;
-               if (i >= 8) {
-                       dev_printk (KERN_ERR, &dev->pdev->dev,
-                                   "More than 8 outputs detected\n");
-                       return;
-               }
-               switch (connector->connector_type) {
-               case DRM_MODE_CONNECTOR_VGA:
-               case DRM_MODE_CONNECTOR_DVIA:
-                       output_type = ACPI_VGA_OUTPUT;
-                       break;
-               case DRM_MODE_CONNECTOR_Composite:
-               case DRM_MODE_CONNECTOR_SVIDEO:
-               case DRM_MODE_CONNECTOR_Component:
-               case DRM_MODE_CONNECTOR_9PinDIN:
-                       output_type = ACPI_TV_OUTPUT;
-                       break;
-               case DRM_MODE_CONNECTOR_DVII:
-               case DRM_MODE_CONNECTOR_DVID:
-               case DRM_MODE_CONNECTOR_DisplayPort:
-               case DRM_MODE_CONNECTOR_HDMIA:
-               case DRM_MODE_CONNECTOR_HDMIB:
-                       output_type = ACPI_DIGITAL_OUTPUT;
-                       break;
-               case DRM_MODE_CONNECTOR_LVDS:
-                       output_type = ACPI_LVDS_OUTPUT;
-                       break;
-               }
-               opregion->acpi->didl[i] |= (1<<31) | output_type | i;
-               i++;
-       }
-       goto end;
-}
-
-int intel_opregion_init(struct drm_device *dev, int resume)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_opregion *opregion = &dev_priv->opregion;
-       void *base;
-       u32 asls, mboxes;
-       int err = 0;
-
-       pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
-       DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
-       if (asls == 0) {
-               DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
-               return -ENOTSUPP;
-       }
-
-       base = ioremap(asls, OPREGION_SZ);
-       if (!base)
-               return -ENOMEM;
-
-       opregion->header = base;
-       if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
-               DRM_DEBUG_DRIVER("opregion signature mismatch\n");
-               err = -EINVAL;
-               goto err_out;
-       }
-
-       mboxes = opregion->header->mboxes;
-       if (mboxes & MBOX_ACPI) {
-               DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
-               opregion->acpi = base + OPREGION_ACPI_OFFSET;
-               if (drm_core_check_feature(dev, DRIVER_MODESET))
-                       intel_didl_outputs(dev);
-       } else {
-               DRM_DEBUG_DRIVER("Public ACPI methods not supported\n");
-               err = -ENOTSUPP;
-               goto err_out;
-       }
-       opregion->enabled = 1;
-
-       if (mboxes & MBOX_SWSCI) {
-               DRM_DEBUG_DRIVER("SWSCI supported\n");
-               opregion->swsci = base + OPREGION_SWSCI_OFFSET;
-       }
-       if (mboxes & MBOX_ASLE) {
-               DRM_DEBUG_DRIVER("ASLE supported\n");
-               opregion->asle = base + OPREGION_ASLE_OFFSET;
-               opregion_enable_asle(dev);
-       }
-
-       if (!resume)
-               acpi_video_register();
-
-
-       /* Notify BIOS we are ready to handle ACPI video ext notifs.
-        * Right now, all the events are handled by the ACPI video module.
-        * We don't actually need to do anything with them. */
-       opregion->acpi->csts = 0;
-       opregion->acpi->drdy = 1;
-
-       system_opregion = opregion;
-       register_acpi_notifier(&intel_opregion_notifier);
-
-       return 0;
-
-err_out:
-       iounmap(opregion->header);
-       opregion->header = NULL;
-       acpi_video_register();
-       return err;
-}
-
-void intel_opregion_free(struct drm_device *dev, int suspend)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_opregion *opregion = &dev_priv->opregion;
-
-       if (!opregion->enabled)
-               return;
-
-       if (!suspend)
-               acpi_video_unregister();
-
-       opregion->acpi->drdy = 0;
-
-       system_opregion = NULL;
-       unregister_acpi_notifier(&intel_opregion_notifier);
-
-       /* just clear all opregion memory pointers now */
-       iounmap(opregion->header);
-       opregion->header = NULL;
-       opregion->acpi = NULL;
-       opregion->swsci = NULL;
-       opregion->asle = NULL;
-
-       opregion->enabled = 0;
-}
index 4f5e15577e89e3e6f7005cd92f9f87ab0636b4eb..25ed911a31127256b753970d2b2fca22d67f755d 100644 (file)
 #ifndef _I915_REG_H_
 #define _I915_REG_H_
 
+#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+
 /*
  * The Bridge device's PCI config space has information about the
  * fb aperture size and the amount of pre-reserved memory.
+ * This is all handled in the intel-gtt.ko module. i915.ko only
+ * cares about the vga bit for the vga rbiter.
  */
 #define INTEL_GMCH_CTRL                0x52
 #define INTEL_GMCH_VGA_DISABLE  (1 << 1)
-#define INTEL_GMCH_ENABLED     0x4
-#define INTEL_GMCH_MEM_MASK    0x1
-#define INTEL_GMCH_MEM_64M     0x1
-#define INTEL_GMCH_MEM_128M    0
-
-#define INTEL_GMCH_GMS_MASK            (0xf << 4)
-#define INTEL_855_GMCH_GMS_DISABLED    (0x0 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_1M   (0x1 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_4M   (0x2 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_8M   (0x3 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_16M  (0x4 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_32M  (0x5 << 4)
-
-#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
-#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
-#define INTEL_GMCH_GMS_STOLEN_128M     (0x8 << 4)
-#define INTEL_GMCH_GMS_STOLEN_256M     (0x9 << 4)
-#define INTEL_GMCH_GMS_STOLEN_96M      (0xa << 4)
-#define INTEL_GMCH_GMS_STOLEN_160M     (0xb << 4)
-#define INTEL_GMCH_GMS_STOLEN_224M     (0xc << 4)
-#define INTEL_GMCH_GMS_STOLEN_352M     (0xd << 4)
-
-#define SNB_GMCH_CTRL  0x50
-#define SNB_GMCH_GMS_STOLEN_MASK       0xF8
-#define SNB_GMCH_GMS_STOLEN_32M                (1 << 3)
-#define SNB_GMCH_GMS_STOLEN_64M                (2 << 3)
-#define SNB_GMCH_GMS_STOLEN_96M                (3 << 3)
-#define SNB_GMCH_GMS_STOLEN_128M       (4 << 3)
-#define SNB_GMCH_GMS_STOLEN_160M       (5 << 3)
-#define SNB_GMCH_GMS_STOLEN_192M       (6 << 3)
-#define SNB_GMCH_GMS_STOLEN_224M       (7 << 3)
-#define SNB_GMCH_GMS_STOLEN_256M       (8 << 3)
-#define SNB_GMCH_GMS_STOLEN_288M       (9 << 3)
-#define SNB_GMCH_GMS_STOLEN_320M       (0xa << 3)
-#define SNB_GMCH_GMS_STOLEN_352M       (0xb << 3)
-#define SNB_GMCH_GMS_STOLEN_384M       (0xc << 3)
-#define SNB_GMCH_GMS_STOLEN_416M       (0xd << 3)
-#define SNB_GMCH_GMS_STOLEN_448M       (0xe << 3)
-#define SNB_GMCH_GMS_STOLEN_480M       (0xf << 3)
-#define SNB_GMCH_GMS_STOLEN_512M       (0x10 << 3)
 
 /* PCI config space */
 
 #define   I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
 #define   I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
 #define LBB    0xf4
-#define GDRST 0xc0
-#define  GDRST_FULL    (0<<2)
-#define  GDRST_RENDER  (1<<2)
-#define  GDRST_MEDIA   (3<<2)
+
+/* Graphics reset regs */
+#define I965_GDRST 0xc0 /* PCI config register */
+#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
+#define  GRDOM_FULL    (0<<2)
+#define  GRDOM_RENDER  (1<<2)
+#define  GRDOM_MEDIA   (3<<2)
 
 /* VGA stuff */
 
 #define MI_STORE_DWORD_INDEX   MI_INSTR(0x21, 1)
 #define   MI_STORE_DWORD_INDEX_SHIFT 2
 #define MI_LOAD_REGISTER_IMM   MI_INSTR(0x22, 1)
+#define MI_FLUSH_DW            MI_INSTR(0x26, 2) /* for GEN6 */
 #define MI_BATCH_BUFFER                MI_INSTR(0x30, 1)
 #define   MI_BATCH_NON_SECURE  (1)
 #define   MI_BATCH_NON_SECURE_I965 (1<<8)
 #define MI_BATCH_BUFFER_START  MI_INSTR(0x31, 0)
-
 /*
  * 3D instructions used by the kernel
  */
 #define   PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
 #define   PIPE_CONTROL_STALL_EN        (1<<1) /* in addr word, Ironlake+ only */
 
+
+/*
+ * Reset registers
+ */
+#define DEBUG_RESET_I830               0x6070
+#define  DEBUG_RESET_FULL              (1<<7)
+#define  DEBUG_RESET_RENDER            (1<<8)
+#define  DEBUG_RESET_DISPLAY           (1<<9)
+
+
 /*
  * Fence registers
  */
 #define PRB0_HEAD      0x02034
 #define PRB0_START     0x02038
 #define PRB0_CTL       0x0203c
+#define RENDER_RING_BASE       0x02000
+#define BSD_RING_BASE          0x04000
+#define GEN6_BSD_RING_BASE     0x12000
+#define BLT_RING_BASE          0x22000
+#define RING_TAIL(base)                ((base)+0x30)
+#define RING_HEAD(base)                ((base)+0x34)
+#define RING_START(base)       ((base)+0x38)
+#define RING_CTL(base)         ((base)+0x3c)
+#define RING_HWS_PGA(base)     ((base)+0x80)
+#define RING_HWS_PGA_GEN6(base)        ((base)+0x2080)
+#define RING_ACTHD(base)       ((base)+0x74)
 #define   TAIL_ADDR            0x001FFFF8
 #define   HEAD_WRAP_COUNT      0xFFE00000
 #define   HEAD_WRAP_ONE                0x00200000
 #define   RING_VALID_MASK      0x00000001
 #define   RING_VALID           0x00000001
 #define   RING_INVALID         0x00000000
+#define   RING_WAIT_I8XX       (1<<0) /* gen2, PRBx_HEAD */
+#define   RING_WAIT            (1<<11) /* gen3+, PRBx_CTL */
 #define PRB1_TAIL      0x02040 /* 915+ only */
 #define PRB1_HEAD      0x02044 /* 915+ only */
 #define PRB1_START     0x02048 /* 915+ only */
 #define INSTDONE1      0x0207c /* 965+ only */
 #define ACTHD_I965     0x02074
 #define HWS_PGA                0x02080
-#define HWS_PGA_GEN6   0x04080
 #define HWS_ADDRESS_MASK       0xfffff000
 #define HWS_START_ADDRESS_SHIFT        4
 #define PWRCTXA                0x2088 /* 965GM+ only */
 #define   GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR     (1 << 25)
 #define   GEN6_BLITTER_SYNC_STATUS                     (1 << 24)
 #define   GEN6_BLITTER_USER_INTERRUPT                  (1 << 22)
-/*
- * BSD (bit stream decoder instruction and interrupt control register defines
- * (G4X and Ironlake only)
- */
 
-#define BSD_RING_TAIL          0x04030
-#define BSD_RING_HEAD          0x04034
-#define BSD_RING_START         0x04038
-#define BSD_RING_CTL           0x0403c
-#define BSD_RING_ACTHD         0x04074
-#define BSD_HWS_PGA            0x04080
+#define GEN6_BSD_SLEEP_PSMI_CONTROL    0x12050
+#define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK      (1 << 16)
+#define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE          (1 << 0)
+#define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE           0
+#define   GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR                   (1 << 3)
+
+#define GEN6_BSD_IMR                   0x120a8
+#define   GEN6_BSD_IMR_USER_INTERRUPT  (1 << 12)
+
+#define GEN6_BSD_RNCID                 0x12198
 
 /*
  * Framebuffer compression (915+ only)
 # define GPIO_DATA_VAL_IN              (1 << 12)
 # define GPIO_DATA_PULLUP_DISABLE      (1 << 13)
 
-#define GMBUS0                 0x5100
-#define GMBUS1                 0x5104
-#define GMBUS2                 0x5108
-#define GMBUS3                 0x510c
-#define GMBUS4                 0x5110
-#define GMBUS5                 0x5120
+#define GMBUS0                 0x5100 /* clock/port select */
+#define   GMBUS_RATE_100KHZ    (0<<8)
+#define   GMBUS_RATE_50KHZ     (1<<8)
+#define   GMBUS_RATE_400KHZ    (2<<8) /* reserved on Pineview */
+#define   GMBUS_RATE_1MHZ      (3<<8) /* reserved on Pineview */
+#define   GMBUS_HOLD_EXT       (1<<7) /* 300ns hold time, rsvd on Pineview */
+#define   GMBUS_PORT_DISABLED  0
+#define   GMBUS_PORT_SSC       1
+#define   GMBUS_PORT_VGADDC    2
+#define   GMBUS_PORT_PANEL     3
+#define   GMBUS_PORT_DPC       4 /* HDMIC */
+#define   GMBUS_PORT_DPB       5 /* SDVO, HDMIB */
+                                 /* 6 reserved */
+#define   GMBUS_PORT_DPD       7 /* HDMID */
+#define   GMBUS_NUM_PORTS       8
+#define GMBUS1                 0x5104 /* command/status */
+#define   GMBUS_SW_CLR_INT     (1<<31)
+#define   GMBUS_SW_RDY         (1<<30)
+#define   GMBUS_ENT            (1<<29) /* enable timeout */
+#define   GMBUS_CYCLE_NONE     (0<<25)
+#define   GMBUS_CYCLE_WAIT     (1<<25)
+#define   GMBUS_CYCLE_INDEX    (2<<25)
+#define   GMBUS_CYCLE_STOP     (4<<25)
+#define   GMBUS_BYTE_COUNT_SHIFT 16
+#define   GMBUS_SLAVE_INDEX_SHIFT 8
+#define   GMBUS_SLAVE_ADDR_SHIFT 1
+#define   GMBUS_SLAVE_READ     (1<<0)
+#define   GMBUS_SLAVE_WRITE    (0<<0)
+#define GMBUS2                 0x5108 /* status */
+#define   GMBUS_INUSE          (1<<15)
+#define   GMBUS_HW_WAIT_PHASE  (1<<14)
+#define   GMBUS_STALL_TIMEOUT  (1<<13)
+#define   GMBUS_INT            (1<<12)
+#define   GMBUS_HW_RDY         (1<<11)
+#define   GMBUS_SATOER         (1<<10)
+#define   GMBUS_ACTIVE         (1<<9)
+#define GMBUS3                 0x510c /* data buffer bytes 3-0 */
+#define GMBUS4                 0x5110 /* interrupt mask (Pineview+) */
+#define   GMBUS_SLAVE_TIMEOUT_EN (1<<4)
+#define   GMBUS_NAK_EN         (1<<3)
+#define   GMBUS_IDLE_EN                (1<<2)
+#define   GMBUS_HW_WAIT_EN     (1<<1)
+#define   GMBUS_HW_RDY_EN      (1<<0)
+#define GMBUS5                 0x5120 /* byte index */
+#define   GMBUS_2BYTE_INDEX_EN (1<<31)
 
 /*
  * Clock control & power management
 #define   VGA1_PD_P1_MASK      (0x1f << 8)
 #define DPLL_A 0x06014
 #define DPLL_B 0x06018
+#define DPLL(pipe) _PIPE(pipe, DPLL_A, DPLL_B)
 #define   DPLL_VCO_ENABLE              (1 << 31)
 #define   DPLL_DVO_HIGH_SPEED          (1 << 30)
 #define   DPLL_SYNCLOCK_ENABLE         (1 << 29)
 #define LVDS                   0x61180
 #define LVDS_ON                        (1<<31)
 
-#define ADPA                   0x61100
-#define ADPA_DPMS_MASK         (~(3<<10))
-#define ADPA_DPMS_ON           (0<<10)
-#define ADPA_DPMS_SUSPEND      (1<<10)
-#define ADPA_DPMS_STANDBY      (2<<10)
-#define ADPA_DPMS_OFF          (3<<10)
-
-#define RING_TAIL              0x00
-#define TAIL_ADDR              0x001FFFF8
-#define RING_HEAD              0x04
-#define HEAD_WRAP_COUNT                0xFFE00000
-#define HEAD_WRAP_ONE          0x00200000
-#define HEAD_ADDR              0x001FFFFC
-#define RING_START             0x08
-#define START_ADDR             0xFFFFF000
-#define RING_LEN               0x0C
-#define RING_NR_PAGES          0x001FF000
-#define RING_REPORT_MASK       0x00000006
-#define RING_REPORT_64K                0x00000002
-#define RING_REPORT_128K       0x00000004
-#define RING_NO_REPORT         0x00000000
-#define RING_VALID_MASK                0x00000001
-#define RING_VALID             0x00000001
-#define RING_INVALID           0x00000000
-
 /* Scratch pad debug 0 reg:
  */
 #define   DPLL_FPA01_P1_POST_DIV_MASK_I830     0x001f0000
 #define   DPLL_MD_VGA_UDI_MULTIPLIER_MASK      0x0000003f
 #define   DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT     0
 #define DPLL_B_MD 0x06020 /* 965+ only */
+#define DPLL_MD(pipe) _PIPE(pipe, DPLL_A_MD, DPLL_B_MD)
 #define FPA0   0x06040
 #define FPA1   0x06044
 #define FPB0   0x06048
 #define FPB1   0x0604c
+#define FP0(pipe) _PIPE(pipe, FPA0, FPB0)
+#define FP1(pipe) _PIPE(pipe, FPA1, FPB1)
 #define   FP_N_DIV_MASK                0x003f0000
 #define   FP_N_PINEVIEW_DIV_MASK       0x00ff0000
 #define   FP_N_DIV_SHIFT               16
 #define   DPLLA_TEST_M_BYPASS          (1 << 2)
 #define   DPLLA_INPUT_BUFFER_ENABLE    (1 << 0)
 #define D_STATE                0x6104
+#define  DSTATE_GFX_RESET_I830                 (1<<6)
 #define  DSTATE_PLL_D3_OFF                     (1<<3)
 #define  DSTATE_GFX_CLOCK_GATING               (1<<1)
 #define  DSTATE_DOT_CLOCK_GATING               (1<<0)
 #define CLKCFG_MEM_800                                 (3 << 4)
 #define CLKCFG_MEM_MASK                                        (7 << 4)
 
+#define TSC1                   0x11001
+#define   TSE                  (1<<0)
 #define TR1                    0x11006
 #define TSFS                   0x11020
 #define   TSFS_SLOPE_MASK      0x0000ff00
 #define   MEMSTAT_SRC_CTL_STDBY 3
 #define RCPREVBSYTUPAVG                0x113b8
 #define RCPREVBSYTDNAVG                0x113bc
+#define PMMISC                 0x11214
+#define   MCPPCE_EN            (1<<0) /* enable PM_MSG from PCH->MPC */
 #define SDEW                   0x1124c
 #define CSIEW0                 0x11250
 #define CSIEW1                 0x11254
 #define PIPEBSRC       0x6101c
 #define BCLRPAT_B      0x61020
 
+#define HTOTAL(pipe) _PIPE(pipe, HTOTAL_A, HTOTAL_B)
+#define HBLANK(pipe) _PIPE(pipe, HBLANK_A, HBLANK_B)
+#define HSYNC(pipe) _PIPE(pipe, HSYNC_A, HSYNC_B)
+#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
+#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
+#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
+#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
+#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
+
 /* VGA port control */
 #define ADPA                   0x61100
 #define   ADPA_DAC_ENABLE      (1<<31)
 #define   ADPA_DPMS_STANDBY    (2<<10)
 #define   ADPA_DPMS_OFF                (3<<10)
 
+
 /* Hotplug control (945+ only) */
 #define PORT_HOTPLUG_EN                0x61110
 #define   HDMIB_HOTPLUG_INT_EN                 (1 << 29)
 #define   LVDS_B0B3_POWER_DOWN         (0 << 2)
 #define   LVDS_B0B3_POWER_UP           (3 << 2)
 
+/* Video Data Island Packet control */
+#define VIDEO_DIP_DATA         0x61178
+#define VIDEO_DIP_CTL          0x61170
+#define   VIDEO_DIP_ENABLE             (1 << 31)
+#define   VIDEO_DIP_PORT_B             (1 << 29)
+#define   VIDEO_DIP_PORT_C             (2 << 29)
+#define   VIDEO_DIP_ENABLE_AVI         (1 << 21)
+#define   VIDEO_DIP_ENABLE_VENDOR      (2 << 21)
+#define   VIDEO_DIP_ENABLE_SPD         (8 << 21)
+#define   VIDEO_DIP_SELECT_AVI         (0 << 19)
+#define   VIDEO_DIP_SELECT_VENDOR      (1 << 19)
+#define   VIDEO_DIP_SELECT_SPD         (3 << 19)
+#define   VIDEO_DIP_FREQ_ONCE          (0 << 16)
+#define   VIDEO_DIP_FREQ_VSYNC         (1 << 16)
+#define   VIDEO_DIP_FREQ_2VSYNC                (2 << 16)
+
 /* Panel power sequencing */
 #define PP_STATUS      0x61200
 #define   PP_ON                (1 << 31)
 #define   PP_SEQUENCE_ON       (1 << 28)
 #define   PP_SEQUENCE_OFF      (2 << 28)
 #define   PP_SEQUENCE_MASK     0x30000000
+#define   PP_CYCLE_DELAY_ACTIVE        (1 << 27)
+#define   PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
+#define   PP_SEQUENCE_STATE_MASK 0x0000000f
 #define PP_CONTROL     0x61204
 #define   POWER_TARGET_ON      (1 << 0)
 #define PP_ON_DELAYS   0x61208
 # define TV_TEST_MODE_MASK             (7 << 0)
 
 #define TV_DAC                 0x68004
+# define TV_DAC_SAVE           0x00ffff00
 /**
  * Reports that DAC state change logic has reported change (RO).
  *
 
 /* Display & cursor control */
 
-/* dithering flag on Ironlake */
-#define PIPE_ENABLE_DITHER             (1 << 4)
-#define PIPE_DITHER_TYPE_MASK          (3 << 2)
-#define PIPE_DITHER_TYPE_SPATIAL       (0 << 2)
-#define PIPE_DITHER_TYPE_ST01          (1 << 2)
 /* Pipe A */
 #define PIPEADSL               0x70000
-#define   DSL_LINEMASK         0x00000fff
+#define   DSL_LINEMASK         0x00000fff
 #define PIPEACONF              0x70008
-#define   PIPEACONF_ENABLE     (1<<31)
-#define   PIPEACONF_DISABLE    0
-#define   PIPEACONF_DOUBLE_WIDE        (1<<30)
+#define   PIPECONF_ENABLE      (1<<31)
+#define   PIPECONF_DISABLE     0
+#define   PIPECONF_DOUBLE_WIDE (1<<30)
 #define   I965_PIPECONF_ACTIVE (1<<30)
-#define   PIPEACONF_SINGLE_WIDE        0
-#define   PIPEACONF_PIPE_UNLOCKED 0
-#define   PIPEACONF_PIPE_LOCKED        (1<<25)
-#define   PIPEACONF_PALETTE    0
-#define   PIPEACONF_GAMMA              (1<<24)
+#define   PIPECONF_SINGLE_WIDE 0
+#define   PIPECONF_PIPE_UNLOCKED 0
+#define   PIPECONF_PIPE_LOCKED (1<<25)
+#define   PIPECONF_PALETTE     0
+#define   PIPECONF_GAMMA               (1<<24)
 #define   PIPECONF_FORCE_BORDER        (1<<25)
 #define   PIPECONF_PROGRESSIVE (0 << 21)
 #define   PIPECONF_INTERLACE_W_FIELD_INDICATION        (6 << 21)
 #define   PIPECONF_INTERLACE_FIELD_0_ONLY              (7 << 21)
 #define   PIPECONF_CXSR_DOWNCLOCK      (1<<16)
+#define   PIPECONF_BPP_MASK    (0x000000e0)
+#define   PIPECONF_BPP_8       (0<<5)
+#define   PIPECONF_BPP_10      (1<<5)
+#define   PIPECONF_BPP_6       (2<<5)
+#define   PIPECONF_BPP_12      (3<<5)
+#define   PIPECONF_DITHER_EN   (1<<4)
+#define   PIPECONF_DITHER_TYPE_MASK (0x0000000c)
+#define   PIPECONF_DITHER_TYPE_SP (0<<2)
+#define   PIPECONF_DITHER_TYPE_ST1 (1<<2)
+#define   PIPECONF_DITHER_TYPE_ST2 (2<<2)
+#define   PIPECONF_DITHER_TYPE_TEMP (3<<2)
 #define PIPEASTAT              0x70024
 #define   PIPE_FIFO_UNDERRUN_STATUS            (1UL<<31)
 #define   PIPE_CRC_ERROR_ENABLE                        (1UL<<29)
 #define   PIPE_START_VBLANK_INTERRUPT_STATUS   (1UL<<2) /* 965 or later */
 #define   PIPE_VBLANK_INTERRUPT_STATUS         (1UL<<1)
 #define   PIPE_OVERLAY_UPDATED_STATUS          (1UL<<0)
-#define   PIPE_BPC_MASK                        (7 << 5) /* Ironlake */
+#define   PIPE_BPC_MASK                                (7 << 5) /* Ironlake */
 #define   PIPE_8BPC                            (0 << 5)
 #define   PIPE_10BPC                           (1 << 5)
 #define   PIPE_6BPC                            (2 << 5)
 #define   PIPE_12BPC                           (3 << 5)
 
+#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
+#define PIPEDSL(pipe)  _PIPE(pipe, PIPEADSL, PIPEBDSL)
+
 #define DSPARB                 0x70030
 #define   DSPARB_CSTART_MASK   (0x7f << 7)
 #define   DSPARB_CSTART_SHIFT  7
 #define  WM1_LP_SR_EN          (1<<31)
 #define  WM1_LP_LATENCY_SHIFT  24
 #define  WM1_LP_LATENCY_MASK   (0x7f<<24)
-#define  WM1_LP_FBC_LP1_MASK   (0xf<<20)
-#define  WM1_LP_FBC_LP1_SHIFT  20
+#define  WM1_LP_FBC_MASK       (0xf<<20)
+#define  WM1_LP_FBC_SHIFT      20
 #define  WM1_LP_SR_MASK                (0x1ff<<8)
 #define  WM1_LP_SR_SHIFT       8
 #define  WM1_LP_CURSOR_MASK    (0x3f)
 #define DSPASURF               0x7019C /* 965+ only */
 #define DSPATILEOFF            0x701A4 /* 965+ only */
 
+#define DSPCNTR(plane) _PIPE(plane, DSPACNTR, DSPBCNTR)
+#define DSPADDR(plane) _PIPE(plane, DSPAADDR, DSPBADDR)
+#define DSPSTRIDE(plane) _PIPE(plane, DSPASTRIDE, DSPBSTRIDE)
+#define DSPPOS(plane) _PIPE(plane, DSPAPOS, DSPBPOS)
+#define DSPSIZE(plane) _PIPE(plane, DSPASIZE, DSPBSIZE)
+#define DSPSURF(plane) _PIPE(plane, DSPASURF, DSPBSURF)
+#define DSPTILEOFF(plane) _PIPE(plane, DSPATILEOFF, DSPBTILEOFF)
+
 /* VBIOS flags */
 #define SWF00                  0x71410
 #define SWF01                  0x71414
 #define  RR_HW_HIGH_POWER_FRAMES_MASK   0xff00
 
 #define FDI_PLL_BIOS_0  0x46000
+#define  FDI_PLL_FB_CLOCK_MASK  0xff
 #define FDI_PLL_BIOS_1  0x46004
 #define FDI_PLL_BIOS_2  0x46008
 #define DISPLAY_PORT_PLL_BIOS_0         0x4600c
 #define PIPEA_DATA_M1           0x60030
 #define  TU_SIZE(x)             (((x)-1) << 25) /* default size 64 */
 #define  TU_SIZE_MASK           0x7e000000
-#define  PIPEA_DATA_M1_OFFSET   0
+#define  PIPE_DATA_M1_OFFSET    0
 #define PIPEA_DATA_N1           0x60034
-#define  PIPEA_DATA_N1_OFFSET   0
+#define  PIPE_DATA_N1_OFFSET    0
 
 #define PIPEA_DATA_M2           0x60038
-#define  PIPEA_DATA_M2_OFFSET   0
+#define  PIPE_DATA_M2_OFFSET    0
 #define PIPEA_DATA_N2           0x6003c
-#define  PIPEA_DATA_N2_OFFSET   0
+#define  PIPE_DATA_N2_OFFSET    0
 
 #define PIPEA_LINK_M1           0x60040
-#define  PIPEA_LINK_M1_OFFSET   0
+#define  PIPE_LINK_M1_OFFSET    0
 #define PIPEA_LINK_N1           0x60044
-#define  PIPEA_LINK_N1_OFFSET   0
+#define  PIPE_LINK_N1_OFFSET    0
 
 #define PIPEA_LINK_M2           0x60048
-#define  PIPEA_LINK_M2_OFFSET   0
+#define  PIPE_LINK_M2_OFFSET    0
 #define PIPEA_LINK_N2           0x6004c
-#define  PIPEA_LINK_N2_OFFSET   0
+#define  PIPE_LINK_N2_OFFSET    0
 
 /* PIPEB timing regs are same start from 0x61000 */
 
 #define PIPEB_DATA_M1           0x61030
-#define  PIPEB_DATA_M1_OFFSET   0
 #define PIPEB_DATA_N1           0x61034
-#define  PIPEB_DATA_N1_OFFSET   0
 
 #define PIPEB_DATA_M2           0x61038
-#define  PIPEB_DATA_M2_OFFSET   0
 #define PIPEB_DATA_N2           0x6103c
-#define  PIPEB_DATA_N2_OFFSET   0
 
 #define PIPEB_LINK_M1           0x61040
-#define  PIPEB_LINK_M1_OFFSET   0
 #define PIPEB_LINK_N1           0x61044
-#define  PIPEB_LINK_N1_OFFSET   0
 
 #define PIPEB_LINK_M2           0x61048
-#define  PIPEB_LINK_M2_OFFSET   0
 #define PIPEB_LINK_N2           0x6104c
-#define  PIPEB_LINK_N2_OFFSET   0
+
+#define PIPE_DATA_M1(pipe) _PIPE(pipe, PIPEA_DATA_M1, PIPEB_DATA_M1)
+#define PIPE_DATA_N1(pipe) _PIPE(pipe, PIPEA_DATA_N1, PIPEB_DATA_N1)
+#define PIPE_DATA_M2(pipe) _PIPE(pipe, PIPEA_DATA_M2, PIPEB_DATA_M2)
+#define PIPE_DATA_N2(pipe) _PIPE(pipe, PIPEA_DATA_N2, PIPEB_DATA_N2)
+#define PIPE_LINK_M1(pipe) _PIPE(pipe, PIPEA_LINK_M1, PIPEB_LINK_M1)
+#define PIPE_LINK_N1(pipe) _PIPE(pipe, PIPEA_LINK_N1, PIPEB_LINK_N1)
+#define PIPE_LINK_M2(pipe) _PIPE(pipe, PIPEA_LINK_M2, PIPEB_LINK_M2)
+#define PIPE_LINK_N2(pipe) _PIPE(pipe, PIPEA_LINK_N2, PIPEB_LINK_N2)
 
 /* CPU panel fitter */
 #define PFA_CTL_1               0x68080
 #define GT_SYNC_STATUS          (1 << 2)
 #define GT_USER_INTERRUPT       (1 << 0)
 #define GT_BSD_USER_INTERRUPT   (1 << 5)
-
+#define GT_GEN6_BSD_USER_INTERRUPT     (1 << 12)
+#define GT_BLT_USER_INTERRUPT  (1 << 22)
 
 #define GTISR   0x44010
 #define GTIMR   0x44014
 #define SDE_PORTD_HOTPLUG_CPT  (1 << 23)
 #define SDE_PORTC_HOTPLUG_CPT  (1 << 22)
 #define SDE_PORTB_HOTPLUG_CPT  (1 << 21)
+#define SDE_HOTPLUG_MASK_CPT   (SDE_CRT_HOTPLUG_CPT |          \
+                                SDE_PORTD_HOTPLUG_CPT |        \
+                                SDE_PORTC_HOTPLUG_CPT |        \
+                                SDE_PORTB_HOTPLUG_CPT)
 
 #define SDEISR  0xc4000
 #define SDEIMR  0xc4004
 
 #define PCH_DPLL_A              0xc6014
 #define PCH_DPLL_B              0xc6018
+#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
 
 #define PCH_FPA0                0xc6040
 #define PCH_FPA1                0xc6044
 #define PCH_FPB0                0xc6048
 #define PCH_FPB1                0xc604c
+#define PCH_FP0(pipe) _PIPE(pipe, PCH_FPA0, PCH_FPB0)
+#define PCH_FP1(pipe) _PIPE(pipe, PCH_FPA1, PCH_FPB1)
 
 #define PCH_DPLL_TEST           0xc606c
 
 #define TRANS_VBLANK_B          0xe1010
 #define TRANS_VSYNC_B           0xe1014
 
+#define TRANS_HTOTAL(pipe) _PIPE(pipe, TRANS_HTOTAL_A, TRANS_HTOTAL_B)
+#define TRANS_HBLANK(pipe) _PIPE(pipe, TRANS_HBLANK_A, TRANS_HBLANK_B)
+#define TRANS_HSYNC(pipe) _PIPE(pipe, TRANS_HSYNC_A, TRANS_HSYNC_B)
+#define TRANS_VTOTAL(pipe) _PIPE(pipe, TRANS_VTOTAL_A, TRANS_VTOTAL_B)
+#define TRANS_VBLANK(pipe) _PIPE(pipe, TRANS_VBLANK_A, TRANS_VBLANK_B)
+#define TRANS_VSYNC(pipe) _PIPE(pipe, TRANS_VSYNC_A, TRANS_VSYNC_B)
+
 #define TRANSB_DATA_M1          0xe1030
 #define TRANSB_DATA_N1          0xe1034
 #define TRANSB_DATA_M2          0xe1038
 
 #define TRANSACONF              0xf0008
 #define TRANSBCONF              0xf1008
+#define TRANSCONF(plane) _PIPE(plane, TRANSACONF, TRANSBCONF)
 #define  TRANS_DISABLE          (0<<31)
 #define  TRANS_ENABLE           (1<<31)
 #define  TRANS_STATE_MASK       (1<<30)
 #define FDI_RXA_CHICKEN         0xc200c
 #define FDI_RXB_CHICKEN         0xc2010
 #define  FDI_RX_PHASE_SYNC_POINTER_ENABLE       (1)
+#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN)
+
+#define SOUTH_DSPCLK_GATE_D    0xc2020
+#define  PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
 
 /* CPU: FDI_TX */
 #define FDI_TXA_CTL             0x60100
 #define FDI_TXB_CTL             0x61100
+#define FDI_TX_CTL(pipe) _PIPE(pipe, FDI_TXA_CTL, FDI_TXB_CTL)
 #define  FDI_TX_DISABLE         (0<<31)
 #define  FDI_TX_ENABLE          (1<<31)
 #define  FDI_LINK_TRAIN_PATTERN_1       (0<<28)
 /* FDI_RX, FDI_X is hard-wired to Transcoder_X */
 #define FDI_RXA_CTL             0xf000c
 #define FDI_RXB_CTL             0xf100c
+#define FDI_RX_CTL(pipe) _PIPE(pipe, FDI_RXA_CTL, FDI_RXB_CTL)
 #define  FDI_RX_ENABLE          (1<<31)
-#define  FDI_RX_DISABLE         (0<<31)
 /* train, dp width same as FDI_TX */
 #define  FDI_DP_PORT_WIDTH_X8           (7<<19)
 #define  FDI_8BPC                       (0<<16)
 #define  FDI_FS_ERR_REPORT_ENABLE       (1<<9)
 #define  FDI_FE_ERR_REPORT_ENABLE       (1<<8)
 #define  FDI_RX_ENHANCE_FRAME_ENABLE    (1<<6)
-#define  FDI_SEL_RAWCLK                 (0<<4)
-#define  FDI_SEL_PCDCLK                 (1<<4)
+#define  FDI_PCDCLK                    (1<<4)
 /* CPT */
 #define  FDI_AUTO_TRAINING                     (1<<10)
 #define  FDI_LINK_TRAIN_PATTERN_1_CPT          (0<<8)
 #define FDI_RXA_TUSIZE2         0xf0038
 #define FDI_RXB_TUSIZE1         0xf1030
 #define FDI_RXB_TUSIZE2         0xf1038
+#define FDI_RX_MISC(pipe) _PIPE(pipe, FDI_RXA_MISC, FDI_RXB_MISC)
+#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, FDI_RXA_TUSIZE1, FDI_RXB_TUSIZE1)
+#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, FDI_RXA_TUSIZE2, FDI_RXB_TUSIZE2)
 
 /* FDI_RX interrupt register format */
 #define FDI_RX_INTER_LANE_ALIGN         (1<<10)
 #define FDI_RXA_IMR             0xf0018
 #define FDI_RXB_IIR             0xf1014
 #define FDI_RXB_IMR             0xf1018
+#define FDI_RX_IIR(pipe) _PIPE(pipe, FDI_RXA_IIR, FDI_RXB_IIR)
+#define FDI_RX_IMR(pipe) _PIPE(pipe, FDI_RXA_IMR, FDI_RXB_IMR)
 
 #define FDI_PLL_CTL_1           0xfe000
 #define FDI_PLL_CTL_2           0xfe004
 #define TRANS_DP_CTL_A         0xe0300
 #define TRANS_DP_CTL_B         0xe1300
 #define TRANS_DP_CTL_C         0xe2300
+#define TRANS_DP_CTL(pipe)     (TRANS_DP_CTL_A + (pipe) * 0x01000)
 #define  TRANS_DP_OUTPUT_ENABLE        (1<<31)
 #define  TRANS_DP_PORT_SEL_B   (0<<29)
 #define  TRANS_DP_PORT_SEL_C   (1<<29)
index 31f08581e93a46dbdc2ca1cfa563faec31e9ffc5..989c19d2d959b6bc6c54a0e4bf31d9aee093efe6 100644 (file)
@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
                dev_priv->saveFPA1 = I915_READ(FPA1);
                dev_priv->saveDPLL_A = I915_READ(DPLL_A);
        }
-       if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+       if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
                dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
        dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
        dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -294,7 +294,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
        dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
        dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
        dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
-       if (IS_I965G(dev)) {
+       if (INTEL_INFO(dev)->gen >= 4) {
                dev_priv->saveDSPASURF = I915_READ(DSPASURF);
                dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
        }
@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
                dev_priv->saveFPB1 = I915_READ(FPB1);
                dev_priv->saveDPLL_B = I915_READ(DPLL_B);
        }
-       if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+       if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
                dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
        dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
        dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -351,7 +351,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
        dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
        dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
        dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
-       if (IS_I965GM(dev) || IS_GM45(dev)) {
+       if (INTEL_INFO(dev)->gen >= 4) {
                dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
                dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
        }
@@ -404,7 +404,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
        I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
        POSTING_READ(dpll_a_reg);
        udelay(150);
-       if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+       if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
                I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
                POSTING_READ(DPLL_A_MD);
        }
@@ -448,7 +448,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
        I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
        I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
        I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
-       if (IS_I965G(dev)) {
+       if (INTEL_INFO(dev)->gen >= 4) {
                I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
                I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
        }
@@ -473,7 +473,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
        I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
        POSTING_READ(dpll_b_reg);
        udelay(150);
-       if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+       if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
                I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
                POSTING_READ(DPLL_B_MD);
        }
@@ -517,7 +517,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
        I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
        I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
        I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
-       if (IS_I965G(dev)) {
+       if (INTEL_INFO(dev)->gen >= 4) {
                I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
                I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
        }
@@ -550,7 +550,7 @@ void i915_save_display(struct drm_device *dev)
        dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
        dev_priv->saveCURBPOS = I915_READ(CURBPOS);
        dev_priv->saveCURBBASE = I915_READ(CURBBASE);
-       if (!IS_I9XX(dev))
+       if (IS_GEN2(dev))
                dev_priv->saveCURSIZE = I915_READ(CURSIZE);
 
        /* CRT state */
@@ -573,7 +573,7 @@ void i915_save_display(struct drm_device *dev)
                dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
                dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
                dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
-               if (IS_I965G(dev))
+               if (INTEL_INFO(dev)->gen >= 4)
                        dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
                if (IS_MOBILE(dev) && !IS_I830(dev))
                        dev_priv->saveLVDS = I915_READ(LVDS);
@@ -664,7 +664,7 @@ void i915_restore_display(struct drm_device *dev)
        I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
        I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
        I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
-       if (!IS_I9XX(dev))
+       if (IS_GEN2(dev))
                I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
 
        /* CRT state */
@@ -674,7 +674,7 @@ void i915_restore_display(struct drm_device *dev)
                I915_WRITE(ADPA, dev_priv->saveADPA);
 
        /* LVDS state */
-       if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+       if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
                I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
 
        if (HAS_PCH_SPLIT(dev)) {
@@ -878,9 +878,7 @@ int i915_restore_state(struct drm_device *dev)
        for (i = 0; i < 3; i++)
                I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
 
-       /* I2C state */
-       intel_i2c_reset_gmbus(dev);
+       intel_i2c_reset(dev);
 
        return 0;
 }
-
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
new file mode 100644 (file)
index 0000000..65c88f9
--- /dev/null
@@ -0,0 +1,286 @@
+/*
+ * Intel ACPI functions
+ *
+ * _DSM related code stolen from nouveau_acpi.c.
+ */
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/vga_switcheroo.h>
+#include <acpi/acpi_drivers.h>
+
+#include "drmP.h"
+
+#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
+
+#define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */
+#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
+
+static struct intel_dsm_priv {
+       acpi_handle dhandle;
+} intel_dsm_priv;
+
+static const u8 intel_dsm_guid[] = {
+       0xd3, 0x73, 0xd8, 0x7e,
+       0xd0, 0xc2,
+       0x4f, 0x4e,
+       0xa8, 0x54,
+       0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
+};
+
+static int intel_dsm(acpi_handle handle, int func, int arg)
+{
+       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+       struct acpi_object_list input;
+       union acpi_object params[4];
+       union acpi_object *obj;
+       u32 result;
+       int ret = 0;
+
+       input.count = 4;
+       input.pointer = params;
+       params[0].type = ACPI_TYPE_BUFFER;
+       params[0].buffer.length = sizeof(intel_dsm_guid);
+       params[0].buffer.pointer = (char *)intel_dsm_guid;
+       params[1].type = ACPI_TYPE_INTEGER;
+       params[1].integer.value = INTEL_DSM_REVISION_ID;
+       params[2].type = ACPI_TYPE_INTEGER;
+       params[2].integer.value = func;
+       params[3].type = ACPI_TYPE_INTEGER;
+       params[3].integer.value = arg;
+
+       ret = acpi_evaluate_object(handle, "_DSM", &input, &output);
+       if (ret) {
+               DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
+               return ret;
+       }
+
+       obj = (union acpi_object *)output.pointer;
+
+       result = 0;
+       switch (obj->type) {
+       case ACPI_TYPE_INTEGER:
+               result = obj->integer.value;
+               break;
+
+       case ACPI_TYPE_BUFFER:
+               if (obj->buffer.length == 4) {
+                       result =(obj->buffer.pointer[0] |
+                               (obj->buffer.pointer[1] <<  8) |
+                               (obj->buffer.pointer[2] << 16) |
+                               (obj->buffer.pointer[3] << 24));
+                       break;
+               }
+       default:
+               ret = -EINVAL;
+               break;
+       }
+       if (result == 0x80000002)
+               ret = -ENODEV;
+
+       kfree(output.pointer);
+       return ret;
+}
+
+static char *intel_dsm_port_name(u8 id)
+{
+       switch (id) {
+       case 0:
+               return "Reserved";
+       case 1:
+               return "Analog VGA";
+       case 2:
+               return "LVDS";
+       case 3:
+               return "Reserved";
+       case 4:
+               return "HDMI/DVI_B";
+       case 5:
+               return "HDMI/DVI_C";
+       case 6:
+               return "HDMI/DVI_D";
+       case 7:
+               return "DisplayPort_A";
+       case 8:
+               return "DisplayPort_B";
+       case 9:
+               return "DisplayPort_C";
+       case 0xa:
+               return "DisplayPort_D";
+       case 0xb:
+       case 0xc:
+       case 0xd:
+               return "Reserved";
+       case 0xe:
+               return "WiDi";
+       default:
+               return "bad type";
+       }
+}
+
+static char *intel_dsm_mux_type(u8 type)
+{
+       switch (type) {
+       case 0:
+               return "unknown";
+       case 1:
+               return "No MUX, iGPU only";
+       case 2:
+               return "No MUX, dGPU only";
+       case 3:
+               return "MUXed between iGPU and dGPU";
+       default:
+               return "bad type";
+       }
+}
+
+static void intel_dsm_platform_mux_info(void)
+{
+       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+       struct acpi_object_list input;
+       union acpi_object params[4];
+       union acpi_object *pkg;
+       int i, ret;
+
+       input.count = 4;
+       input.pointer = params;
+       params[0].type = ACPI_TYPE_BUFFER;
+       params[0].buffer.length = sizeof(intel_dsm_guid);
+       params[0].buffer.pointer = (char *)intel_dsm_guid;
+       params[1].type = ACPI_TYPE_INTEGER;
+       params[1].integer.value = INTEL_DSM_REVISION_ID;
+       params[2].type = ACPI_TYPE_INTEGER;
+       params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO;
+       params[3].type = ACPI_TYPE_INTEGER;
+       params[3].integer.value = 0;
+
+       ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input,
+                                  &output);
+       if (ret) {
+               DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
+               goto out;
+       }
+
+       pkg = (union acpi_object *)output.pointer;
+
+       if (pkg->type == ACPI_TYPE_PACKAGE) {
+               union acpi_object *connector_count = &pkg->package.elements[0];
+               DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
+                         (unsigned long long)connector_count->integer.value);
+               for (i = 1; i < pkg->package.count; i++) {
+                       union acpi_object *obj = &pkg->package.elements[i];
+                       union acpi_object *connector_id =
+                               &obj->package.elements[0];
+                       union acpi_object *info = &obj->package.elements[1];
+                       DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
+                                 (unsigned long long)connector_id->integer.value);
+                       DRM_DEBUG_DRIVER("  port id: %s\n",
+                              intel_dsm_port_name(info->buffer.pointer[0]));
+                       DRM_DEBUG_DRIVER("  display mux info: %s\n",
+                              intel_dsm_mux_type(info->buffer.pointer[1]));
+                       DRM_DEBUG_DRIVER("  aux/dc mux info: %s\n",
+                              intel_dsm_mux_type(info->buffer.pointer[2]));
+                       DRM_DEBUG_DRIVER("  hpd mux info: %s\n",
+                              intel_dsm_mux_type(info->buffer.pointer[3]));
+               }
+       } else {
+               DRM_ERROR("MUX INFO call failed\n");
+       }
+
+out:
+       kfree(output.pointer);
+}
+
+static int intel_dsm_switchto(enum vga_switcheroo_client_id id)
+{
+       return 0;
+}
+
+static int intel_dsm_power_state(enum vga_switcheroo_client_id id,
+                                enum vga_switcheroo_state state)
+{
+       return 0;
+}
+
+static int intel_dsm_init(void)
+{
+       return 0;
+}
+
+static int intel_dsm_get_client_id(struct pci_dev *pdev)
+{
+       if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+               return VGA_SWITCHEROO_IGD;
+       else
+               return VGA_SWITCHEROO_DIS;
+}
+
+static struct vga_switcheroo_handler intel_dsm_handler = {
+       .switchto = intel_dsm_switchto,
+       .power_state = intel_dsm_power_state,
+       .init = intel_dsm_init,
+       .get_client_id = intel_dsm_get_client_id,
+};
+
+static bool intel_dsm_pci_probe(struct pci_dev *pdev)
+{
+       acpi_handle dhandle, intel_handle;
+       acpi_status status;
+       int ret;
+
+       dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+       if (!dhandle)
+               return false;
+
+       status = acpi_get_handle(dhandle, "_DSM", &intel_handle);
+       if (ACPI_FAILURE(status)) {
+               DRM_DEBUG_KMS("no _DSM method for intel device\n");
+               return false;
+       }
+
+       ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
+       if (ret < 0) {
+               DRM_ERROR("failed to get supported _DSM functions\n");
+               return false;
+       }
+
+       intel_dsm_priv.dhandle = dhandle;
+
+       intel_dsm_platform_mux_info();
+       return true;
+}
+
+static bool intel_dsm_detect(void)
+{
+       char acpi_method_name[255] = { 0 };
+       struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+       struct pci_dev *pdev = NULL;
+       bool has_dsm = false;
+       int vga_count = 0;
+
+       while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+               vga_count++;
+               has_dsm |= intel_dsm_pci_probe(pdev);
+       }
+
+       if (vga_count == 2 && has_dsm) {
+               acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
+               DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n",
+                                acpi_method_name);
+               return true;
+       }
+
+       return false;
+}
+
+void intel_register_dsm_handler(void)
+{
+       if (!intel_dsm_detect())
+               return;
+
+       vga_switcheroo_register_handler(&intel_dsm_handler);
+}
+
+void intel_unregister_dsm_handler(void)
+{
+       vga_switcheroo_unregister_handler();
+}
index 96f75d7f663319c77ed5f77d279c8cc4794591d2..b0b1200ed6500055b05bc1e51a932df26b6600ef 100644 (file)
@@ -24,6 +24,7 @@
  *    Eric Anholt <eric@anholt.net>
  *
  */
+#include <drm/drm_dp_helper.h>
 #include "drmP.h"
 #include "drm.h"
 #include "i915_drm.h"
@@ -129,10 +130,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
        int i, temp_downclock;
        struct drm_display_mode *temp_mode;
 
-       /* Defaults if we can't find VBT info */
-       dev_priv->lvds_dither = 0;
-       dev_priv->lvds_vbt = 0;
-
        lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
        if (!lvds_options)
                return;
@@ -140,6 +137,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
        dev_priv->lvds_dither = lvds_options->pixel_dither;
        if (lvds_options->panel_type == 0xff)
                return;
+
        panel_type = lvds_options->panel_type;
 
        lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
@@ -169,6 +167,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
                        ((unsigned char *)entry + dvo_timing_offset);
 
        panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+       if (!panel_fixed_mode)
+               return;
 
        fill_detail_timing_data(panel_fixed_mode, dvo_timing);
 
@@ -230,8 +230,6 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
        struct lvds_dvo_timing *dvo_timing;
        struct drm_display_mode *panel_fixed_mode;
 
-       dev_priv->sdvo_lvds_vbt_mode = NULL;
-
        sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
        if (!sdvo_lvds_options)
                return;
@@ -260,10 +258,6 @@ parse_general_features(struct drm_i915_private *dev_priv,
        struct drm_device *dev = dev_priv->dev;
        struct bdb_general_features *general;
 
-       /* Set sensible defaults in case we can't find the general block */
-       dev_priv->int_tv_support = 1;
-       dev_priv->int_crt_support = 1;
-
        general = find_section(bdb, BDB_GENERAL_FEATURES);
        if (general) {
                dev_priv->int_tv_support = general->int_tv_support;
@@ -271,10 +265,10 @@ parse_general_features(struct drm_i915_private *dev_priv,
                dev_priv->lvds_use_ssc = general->enable_ssc;
 
                if (dev_priv->lvds_use_ssc) {
-                       if (IS_I85X(dev_priv->dev))
+                       if (IS_I85X(dev))
                                dev_priv->lvds_ssc_freq =
                                        general->ssc_freq ? 66 : 48;
-                       else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev))
+                       else if (IS_GEN5(dev) || IS_GEN6(dev))
                                dev_priv->lvds_ssc_freq =
                                        general->ssc_freq ? 100 : 120;
                        else
@@ -289,14 +283,6 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
                          struct bdb_header *bdb)
 {
        struct bdb_general_definitions *general;
-       const int crt_bus_map_table[] = {
-               GPIOB,
-               GPIOA,
-               GPIOC,
-               GPIOD,
-               GPIOE,
-               GPIOF,
-       };
 
        general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
        if (general) {
@@ -304,10 +290,8 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
                if (block_size >= sizeof(*general)) {
                        int bus_pin = general->crt_ddc_gmbus_pin;
                        DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
-                       if ((bus_pin >= 1) && (bus_pin <= 6)) {
-                               dev_priv->crt_ddc_bus =
-                                       crt_bus_map_table[bus_pin-1];
-                       }
+                       if (bus_pin >= 1 && bus_pin <= 6)
+                               dev_priv->crt_ddc_pin = bus_pin;
                } else {
                        DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
                                  block_size);
@@ -317,7 +301,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
 
 static void
 parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
-                      struct bdb_header *bdb)
+                         struct bdb_header *bdb)
 {
        struct sdvo_device_mapping *p_mapping;
        struct bdb_general_definitions *p_defs;
@@ -327,7 +311,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
 
        p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
        if (!p_defs) {
-               DRM_DEBUG_KMS("No general definition block is found\n");
+               DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
                return;
        }
        /* judge whether the size of child device meets the requirements.
@@ -377,7 +361,16 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
                        p_mapping->slave_addr = p_child->slave_addr;
                        p_mapping->dvo_wiring = p_child->dvo_wiring;
                        p_mapping->ddc_pin = p_child->ddc_pin;
+                       p_mapping->i2c_pin = p_child->i2c_pin;
+                       p_mapping->i2c_speed = p_child->i2c_speed;
                        p_mapping->initialized = 1;
+                       DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n",
+                                     p_mapping->dvo_port,
+                                     p_mapping->slave_addr,
+                                     p_mapping->dvo_wiring,
+                                     p_mapping->ddc_pin,
+                                     p_mapping->i2c_pin,
+                                     p_mapping->i2c_speed);
                } else {
                        DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
                                         "two SDVO device.\n");
@@ -409,14 +402,11 @@ parse_driver_features(struct drm_i915_private *dev_priv,
        if (!driver)
                return;
 
-       if (driver && SUPPORTS_EDP(dev) &&
-           driver->lvds_config == BDB_DRIVER_FEATURE_EDP) {
-               dev_priv->edp_support = 1;
-       } else {
-               dev_priv->edp_support = 0;
-       }
+       if (SUPPORTS_EDP(dev) &&
+           driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+               dev_priv->edp.support = 1;
 
-       if (driver && driver->dual_frequency)
+       if (driver->dual_frequency)
                dev_priv->render_reclock_avail = true;
 }
 
@@ -424,27 +414,78 @@ static void
 parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
 {
        struct bdb_edp *edp;
+       struct edp_power_seq *edp_pps;
+       struct edp_link_params *edp_link_params;
 
        edp = find_section(bdb, BDB_EDP);
        if (!edp) {
-               if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) {
+               if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
                        DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
-                                     "supported, assume 18bpp panel color "
-                                     "depth.\n");
-                       dev_priv->edp_bpp = 18;
+                                     "supported, assume %dbpp panel color "
+                                     "depth.\n",
+                                     dev_priv->edp.bpp);
                }
                return;
        }
 
        switch ((edp->color_depth >> (panel_type * 2)) & 3) {
        case EDP_18BPP:
-               dev_priv->edp_bpp = 18;
+               dev_priv->edp.bpp = 18;
                break;
        case EDP_24BPP:
-               dev_priv->edp_bpp = 24;
+               dev_priv->edp.bpp = 24;
                break;
        case EDP_30BPP:
-               dev_priv->edp_bpp = 30;
+               dev_priv->edp.bpp = 30;
+               break;
+       }
+
+       /* Get the eDP sequencing and link info */
+       edp_pps = &edp->power_seqs[panel_type];
+       edp_link_params = &edp->link_params[panel_type];
+
+       dev_priv->edp.pps = *edp_pps;
+
+       dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
+               DP_LINK_BW_1_62;
+       switch (edp_link_params->lanes) {
+       case 0:
+               dev_priv->edp.lanes = 1;
+               break;
+       case 1:
+               dev_priv->edp.lanes = 2;
+               break;
+       case 3:
+       default:
+               dev_priv->edp.lanes = 4;
+               break;
+       }
+       switch (edp_link_params->preemphasis) {
+       case 0:
+               dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
+               break;
+       case 1:
+               dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
+               break;
+       case 2:
+               dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
+               break;
+       case 3:
+               dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
+               break;
+       }
+       switch (edp_link_params->vswing) {
+       case 0:
+               dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
+               break;
+       case 1:
+               dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
+               break;
+       case 2:
+               dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
+               break;
+       case 3:
+               dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
                break;
        }
 }
@@ -460,7 +501,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
 
        p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
        if (!p_defs) {
-               DRM_DEBUG_KMS("No general definition block is found\n");
+               DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
                return;
        }
        /* judge whether the size of child device meets the requirements.
@@ -513,50 +554,83 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
        }
        return;
 }
+
+static void
+init_vbt_defaults(struct drm_i915_private *dev_priv)
+{
+       dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
+
+       /* LFP panel data */
+       dev_priv->lvds_dither = 1;
+       dev_priv->lvds_vbt = 0;
+
+       /* SDVO panel data */
+       dev_priv->sdvo_lvds_vbt_mode = NULL;
+
+       /* general features */
+       dev_priv->int_tv_support = 1;
+       dev_priv->int_crt_support = 1;
+       dev_priv->lvds_use_ssc = 0;
+
+       /* eDP data */
+       dev_priv->edp.bpp = 18;
+}
+
 /**
- * intel_init_bios - initialize VBIOS settings & find VBT
+ * intel_parse_bios - find VBT and initialize settings from the BIOS
  * @dev: DRM device
  *
  * Loads the Video BIOS and checks that the VBT exists.  Sets scratch registers
  * to appropriate values.
  *
- * VBT existence is a sanity check that is relied on by other i830_bios.c code.
- * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
- * feed an updated VBT back through that, compared to what we'll fetch using
- * this method of groping around in the BIOS data.
- *
  * Returns 0 on success, nonzero on failure.
  */
 bool
-intel_init_bios(struct drm_device *dev)
+intel_parse_bios(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct pci_dev *pdev = dev->pdev;
-       struct vbt_header *vbt = NULL;
-       struct bdb_header *bdb;
-       u8 __iomem *bios;
-       size_t size;
-       int i;
-
-       bios = pci_map_rom(pdev, &size);
-       if (!bios)
-               return -1;
-
-       /* Scour memory looking for the VBT signature */
-       for (i = 0; i + 4 < size; i++) {
-               if (!memcmp(bios + i, "$VBT", 4)) {
-                       vbt = (struct vbt_header *)(bios + i);
-                       break;
-               }
+       struct bdb_header *bdb = NULL;
+       u8 __iomem *bios = NULL;
+
+       init_vbt_defaults(dev_priv);
+
+       /* XXX Should this validation be moved to intel_opregion.c? */
+       if (dev_priv->opregion.vbt) {
+               struct vbt_header *vbt = dev_priv->opregion.vbt;
+               if (memcmp(vbt->signature, "$VBT", 4) == 0) {
+                       DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n",
+                                        vbt->signature);
+                       bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
+               } else
+                       dev_priv->opregion.vbt = NULL;
        }
 
-       if (!vbt) {
-               DRM_ERROR("VBT signature missing\n");
-               pci_unmap_rom(pdev, bios);
-               return -1;
-       }
+       if (bdb == NULL) {
+               struct vbt_header *vbt = NULL;
+               size_t size;
+               int i;
 
-       bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+               bios = pci_map_rom(pdev, &size);
+               if (!bios)
+                       return -1;
+
+               /* Scour memory looking for the VBT signature */
+               for (i = 0; i + 4 < size; i++) {
+                       if (!memcmp(bios + i, "$VBT", 4)) {
+                               vbt = (struct vbt_header *)(bios + i);
+                               break;
+                       }
+               }
+
+               if (!vbt) {
+                       DRM_ERROR("VBT signature missing\n");
+                       pci_unmap_rom(pdev, bios);
+                       return -1;
+               }
+
+               bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+       }
 
        /* Grab useful general definitions */
        parse_general_features(dev_priv, bdb);
@@ -568,7 +642,25 @@ intel_init_bios(struct drm_device *dev)
        parse_driver_features(dev_priv, bdb);
        parse_edp(dev_priv, bdb);
 
-       pci_unmap_rom(pdev, bios);
+       if (bios)
+               pci_unmap_rom(pdev, bios);
 
        return 0;
 }
+
+/* Ensure that vital registers have been initialised, even if the BIOS
+ * is absent or just failing to do its job.
+ */
+void intel_setup_bios(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+        /* Set the Panel Power On/Off timings if uninitialized. */
+       if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
+               /* Set T2 to 40ms and T5 to 200ms */
+               I915_WRITE(PP_ON_DELAYS, 0x019007d0);
+
+               /* Set T3 to 35ms and Tx to 200ms */
+               I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
+       }
+}
index 4c18514f6f80f469d15a039ef47d37bd0e249ce4..5f8e4edcbbb901937a20a522f60057ea762a0a9d 100644 (file)
@@ -197,7 +197,8 @@ struct bdb_general_features {
 struct child_device_config {
        u16 handle;
        u16 device_type;
-       u8  device_id[10]; /* See DEVICE_TYPE_* above */
+       u8  i2c_speed;
+       u8  rsvd[9];
        u16 addin_offset;
        u8  dvo_port; /* See Device_PORT_* above */
        u8  i2c_pin;
@@ -466,7 +467,8 @@ struct bdb_edp {
        struct edp_link_params link_params[16];
 } __attribute__ ((packed));
 
-bool intel_init_bios(struct drm_device *dev);
+void intel_setup_bios(struct drm_device *dev);
+bool intel_parse_bios(struct drm_device *dev);
 
 /*
  * Driver<->VBIOS interaction occurs through scratch bits in
index 197d4f32585a59b5b336328b470fa038bc922781..c55c77043357cb0243a4b368a1e6d63b7a4af398 100644 (file)
@@ -79,7 +79,7 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
        if (mode->clock < 25000)
                return MODE_CLOCK_LOW;
 
-       if (!IS_I9XX(dev))
+       if (IS_GEN2(dev))
                max_clock = 350000;
        else
                max_clock = 400000;
@@ -123,7 +123,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
         * Disable separate mode multiplier used when cloning SDVO to CRT
         * XXX this needs to be adjusted when we really are cloning
         */
-       if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+       if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
                dpll_md = I915_READ(dpll_md_reg);
                I915_WRITE(dpll_md_reg,
                           dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -187,11 +187,12 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
        I915_WRITE(PCH_ADPA, adpa);
 
        if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
-                    1000, 1))
+                    1000))
                DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
 
        if (turn_off_dac) {
-               I915_WRITE(PCH_ADPA, temp);
+               /* Make sure hotplug is enabled */
+               I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE);
                (void)I915_READ(PCH_ADPA);
        }
 
@@ -244,7 +245,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
                /* wait for FORCE_DETECT to go off */
                if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
                              CRT_HOTPLUG_FORCE_DETECT) == 0,
-                            1000, 1))
+                            1000))
                        DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
        }
 
@@ -261,21 +262,47 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
        return ret;
 }
 
+static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
+{
+       u8 buf;
+       struct i2c_msg msgs[] = {
+               {
+                       .addr = 0xA0,
+                       .flags = 0,
+                       .len = 1,
+                       .buf = &buf,
+               },
+       };
+       /* DDC monitor detect: Does it ACK a write to 0xA0? */
+       return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
+}
+
 static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
 {
-       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+       struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+       struct drm_i915_private *dev_priv = encoder->dev->dev_private;
 
        /* CRT should always be at 0, but check anyway */
        if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
                return false;
 
-       return intel_ddc_probe(intel_encoder);
+       if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) {
+               DRM_DEBUG_KMS("CRT detected via DDC:0xa0\n");
+               return true;
+       }
+
+       if (intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin)) {
+               DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+               return true;
+       }
+
+       return false;
 }
 
 static enum drm_connector_status
 intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
 {
-       struct drm_encoder *encoder = &intel_encoder->enc;
+       struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -295,6 +322,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
        uint8_t st00;
        enum drm_connector_status status;
 
+       DRM_DEBUG_KMS("starting load-detect on CRT\n");
+
        if (pipe == 0) {
                bclrpat_reg = BCLRPAT_A;
                vtotal_reg = VTOTAL_A;
@@ -324,9 +353,10 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
        /* Set the border color to purple. */
        I915_WRITE(bclrpat_reg, 0x500050);
 
-       if (IS_I9XX(dev)) {
+       if (!IS_GEN2(dev)) {
                uint32_t pipeconf = I915_READ(pipeconf_reg);
                I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
+               POSTING_READ(pipeconf_reg);
                /* Wait for next Vblank to substitue
                 * border color for Color info */
                intel_wait_for_vblank(dev, pipe);
@@ -404,34 +434,37 @@ static enum drm_connector_status
 intel_crt_detect(struct drm_connector *connector, bool force)
 {
        struct drm_device *dev = connector->dev;
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+       struct intel_encoder *encoder = intel_attached_encoder(connector);
        struct drm_crtc *crtc;
        int dpms_mode;
        enum drm_connector_status status;
 
-       if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
-               if (intel_crt_detect_hotplug(connector))
+       if (I915_HAS_HOTPLUG(dev)) {
+               if (intel_crt_detect_hotplug(connector)) {
+                       DRM_DEBUG_KMS("CRT detected via hotplug\n");
                        return connector_status_connected;
-               else
+               else
                        return connector_status_disconnected;
        }
 
-       if (intel_crt_detect_ddc(encoder))
+       if (intel_crt_detect_ddc(&encoder->base))
                return connector_status_connected;
 
        if (!force)
                return connector->status;
 
        /* for pre-945g platforms use load detect */
-       if (encoder->crtc && encoder->crtc->enabled) {
-               status = intel_crt_load_detect(encoder->crtc, intel_encoder);
+       if (encoder->base.crtc && encoder->base.crtc->enabled) {
+               status = intel_crt_load_detect(encoder->base.crtc, encoder);
        } else {
-               crtc = intel_get_load_detect_pipe(intel_encoder, connector,
+               crtc = intel_get_load_detect_pipe(encoder, connector,
                                                  NULL, &dpms_mode);
                if (crtc) {
-                       status = intel_crt_load_detect(crtc, intel_encoder);
-                       intel_release_load_detect_pipe(intel_encoder,
+                       if (intel_crt_detect_ddc(&encoder->base))
+                               status = connector_status_connected;
+                       else
+                               status = intel_crt_load_detect(crtc, encoder);
+                       intel_release_load_detect_pipe(encoder,
                                                       connector, dpms_mode);
                } else
                        status = connector_status_unknown;
@@ -449,32 +482,18 @@ static void intel_crt_destroy(struct drm_connector *connector)
 
 static int intel_crt_get_modes(struct drm_connector *connector)
 {
-       int ret;
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-       struct i2c_adapter *ddc_bus;
        struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
 
-
-       ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+       ret = intel_ddc_get_modes(connector,
+                                &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
        if (ret || !IS_G4X(dev))
-               goto end;
+               return ret;
 
        /* Try to probe digital port for output in DVI-I -> VGA mode. */
-       ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
-
-       if (!ddc_bus) {
-               dev_printk(KERN_ERR, &connector->dev->pdev->dev,
-                          "DDC bus registration failed for CRTDDC_D.\n");
-               goto end;
-       }
-       /* Try to get modes by GPIOD port */
-       ret = intel_ddc_get_modes(connector, ddc_bus);
-       intel_i2c_destroy(ddc_bus);
-
-end:
-       return ret;
-
+       return intel_ddc_get_modes(connector,
+                                  &dev_priv->gmbus[GMBUS_PORT_DPB].adapter);
 }
 
 static int intel_crt_set_property(struct drm_connector *connector,
@@ -507,7 +526,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
 static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
        .mode_valid = intel_crt_mode_valid,
        .get_modes = intel_crt_get_modes,
-       .best_encoder = intel_attached_encoder,
+       .best_encoder = intel_best_encoder,
 };
 
 static const struct drm_encoder_funcs intel_crt_enc_funcs = {
@@ -520,7 +539,6 @@ void intel_crt_init(struct drm_device *dev)
        struct intel_encoder *intel_encoder;
        struct intel_connector *intel_connector;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 i2c_reg;
 
        intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL);
        if (!intel_encoder)
@@ -536,27 +554,10 @@ void intel_crt_init(struct drm_device *dev)
        drm_connector_init(dev, &intel_connector->base,
                           &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
 
-       drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
+       drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs,
                         DRM_MODE_ENCODER_DAC);
 
-       drm_mode_connector_attach_encoder(&intel_connector->base,
-                                         &intel_encoder->enc);
-
-       /* Set up the DDC bus. */
-       if (HAS_PCH_SPLIT(dev))
-               i2c_reg = PCH_GPIOA;
-       else {
-               i2c_reg = GPIOA;
-               /* Use VBT information for CRT DDC if available */
-               if (dev_priv->crt_ddc_bus != 0)
-                       i2c_reg = dev_priv->crt_ddc_bus;
-       }
-       intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
-       if (!intel_encoder->ddc_bus) {
-               dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
-                          "failed.\n");
-               return;
-       }
+       intel_connector_attach_encoder(intel_connector, intel_encoder);
 
        intel_encoder->type = INTEL_OUTPUT_ANALOG;
        intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
@@ -566,7 +567,7 @@ void intel_crt_init(struct drm_device *dev)
        connector->interlace_allowed = 1;
        connector->doublescan_allowed = 0;
 
-       drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs);
+       drm_encoder_helper_add(&intel_encoder->base, &intel_crt_helper_funcs);
        drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
 
        drm_sysfs_connector_add(connector);
index 979228594599a28ac7737762679f1c97fd5981bf..990f065374b22eacfc2fcddd68c873a5081ee773 100644 (file)
@@ -43,8 +43,8 @@
 
 bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
 static void intel_update_watermarks(struct drm_device *dev);
-static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule);
-static void intel_crtc_update_cursor(struct drm_crtc *crtc);
+static void intel_increase_pllclock(struct drm_crtc *crtc);
+static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
 typedef struct {
     /* given values */
@@ -342,6 +342,16 @@ static bool
 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
                           int target, int refclk, intel_clock_t *best_clock);
 
+static inline u32 /* units of 100MHz */
+intel_fdi_link_freq(struct drm_device *dev)
+{
+       if (IS_GEN5(dev)) {
+               struct drm_i915_private *dev_priv = dev->dev_private;
+               return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
+       } else
+               return 27;
+}
+
 static const intel_limit_t intel_limits_i8xx_dvo = {
         .dot = { .min = I8XX_DOT_MIN,          .max = I8XX_DOT_MAX },
         .vco = { .min = I8XX_VCO_MIN,          .max = I8XX_VCO_MAX },
@@ -701,16 +711,16 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
                limit = intel_ironlake_limit(crtc);
        else if (IS_G4X(dev)) {
                limit = intel_g4x_limit(crtc);
-       } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) {
-               if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
-                       limit = &intel_limits_i9xx_lvds;
-               else
-                       limit = &intel_limits_i9xx_sdvo;
        } else if (IS_PINEVIEW(dev)) {
                if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
                        limit = &intel_limits_pineview_lvds;
                else
                        limit = &intel_limits_pineview_sdvo;
+       } else if (!IS_GEN2(dev)) {
+               if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+                       limit = &intel_limits_i9xx_lvds;
+               else
+                       limit = &intel_limits_i9xx_sdvo;
        } else {
                if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
                        limit = &intel_limits_i8xx_lvds;
@@ -744,20 +754,17 @@ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock
 /**
  * Returns whether any output on the specified pipe is of the specified type
  */
-bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
+bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
 {
-    struct drm_device *dev = crtc->dev;
-    struct drm_mode_config *mode_config = &dev->mode_config;
-    struct drm_encoder *l_entry;
+       struct drm_device *dev = crtc->dev;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct intel_encoder *encoder;
 
-    list_for_each_entry(l_entry, &mode_config->encoder_list, head) {
-           if (l_entry && l_entry->crtc == crtc) {
-                   struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry);
-                   if (intel_encoder->type == type)
-                           return true;
-           }
-    }
-    return false;
+       list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
+               if (encoder->base.crtc == crtc && encoder->type == type)
+                       return true;
+
+       return false;
 }
 
 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
@@ -928,10 +935,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
        struct drm_device *dev = crtc->dev;
        intel_clock_t clock;
 
-       /* return directly when it is eDP */
-       if (HAS_eDP)
-               return true;
-
        if (target < 200000) {
                clock.n = 1;
                clock.p1 = 2;
@@ -955,26 +958,26 @@ static bool
 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
                      int target, int refclk, intel_clock_t *best_clock)
 {
-    intel_clock_t clock;
-    if (target < 200000) {
-       clock.p1 = 2;
-       clock.p2 = 10;
-       clock.n = 2;
-       clock.m1 = 23;
-       clock.m2 = 8;
-    } else {
-       clock.p1 = 1;
-       clock.p2 = 10;
-       clock.n = 1;
-       clock.m1 = 14;
-       clock.m2 = 2;
-    }
-    clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
-    clock.p = (clock.p1 * clock.p2);
-    clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
-    clock.vco = 0;
-    memcpy(best_clock, &clock, sizeof(intel_clock_t));
-    return true;
+       intel_clock_t clock;
+       if (target < 200000) {
+               clock.p1 = 2;
+               clock.p2 = 10;
+               clock.n = 2;
+               clock.m1 = 23;
+               clock.m2 = 8;
+       } else {
+               clock.p1 = 1;
+               clock.p2 = 10;
+               clock.n = 1;
+               clock.m1 = 14;
+               clock.m2 = 2;
+       }
+       clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
+       clock.p = (clock.p1 * clock.p2);
+       clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
+       clock.vco = 0;
+       memcpy(best_clock, &clock, sizeof(intel_clock_t));
+       return true;
 }
 
 /**
@@ -1007,9 +1010,9 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
                   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
 
        /* Wait for vblank interrupt bit to set */
-       if (wait_for((I915_READ(pipestat_reg) &
-                     PIPE_VBLANK_INTERRUPT_STATUS),
-                    50, 0))
+       if (wait_for(I915_READ(pipestat_reg) &
+                    PIPE_VBLANK_INTERRUPT_STATUS,
+                    50))
                DRM_DEBUG_KMS("vblank wait timed out\n");
 }
 
@@ -1028,36 +1031,35 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
  * Otherwise:
  *   wait for the display line value to settle (it usually
  *   ends up stopping at the start of the next frame).
- *  
+ *
  */
-static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
+void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        if (INTEL_INFO(dev)->gen >= 4) {
-               int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF);
+               int reg = PIPECONF(pipe);
 
                /* Wait for the Pipe State to go off */
-               if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0,
-                            100, 0))
+               if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
+                            100))
                        DRM_DEBUG_KMS("pipe_off wait timed out\n");
        } else {
                u32 last_line;
-               int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
+               int reg = PIPEDSL(pipe);
                unsigned long timeout = jiffies + msecs_to_jiffies(100);
 
                /* Wait for the display line to settle */
                do {
-                       last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
+                       last_line = I915_READ(reg) & DSL_LINEMASK;
                        mdelay(5);
-               } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
+               } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
                         time_after(timeout, jiffies));
                if (time_after(jiffies, timeout))
                        DRM_DEBUG_KMS("pipe_off wait timed out\n");
        }
 }
 
-/* Parameters have changed, update FBC info */
 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 {
        struct drm_device *dev = crtc->dev;
@@ -1069,6 +1071,14 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        int plane, i;
        u32 fbc_ctl, fbc_ctl2;
 
+       if (fb->pitch == dev_priv->cfb_pitch &&
+           obj_priv->fence_reg == dev_priv->cfb_fence &&
+           intel_crtc->plane == dev_priv->cfb_plane &&
+           I915_READ(FBC_CONTROL) & FBC_CTL_EN)
+               return;
+
+       i8xx_disable_fbc(dev);
+
        dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
 
        if (fb->pitch < dev_priv->cfb_pitch)
@@ -1102,7 +1112,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        I915_WRITE(FBC_CONTROL, fbc_ctl);
 
        DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
-                 dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
+                     dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
 }
 
 void i8xx_disable_fbc(struct drm_device *dev)
@@ -1110,19 +1120,16 @@ void i8xx_disable_fbc(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 fbc_ctl;
 
-       if (!I915_HAS_FBC(dev))
-               return;
-
-       if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN))
-               return; /* Already off, just return */
-
        /* Disable compression */
        fbc_ctl = I915_READ(FBC_CONTROL);
+       if ((fbc_ctl & FBC_CTL_EN) == 0)
+               return;
+
        fbc_ctl &= ~FBC_CTL_EN;
        I915_WRITE(FBC_CONTROL, fbc_ctl);
 
        /* Wait for compressing bit to clear */
-       if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) {
+       if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
                DRM_DEBUG_KMS("FBC idle timed out\n");
                return;
        }
@@ -1145,14 +1152,27 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
        struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
-                    DPFC_CTL_PLANEB);
+       int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
        unsigned long stall_watermark = 200;
        u32 dpfc_ctl;
 
+       dpfc_ctl = I915_READ(DPFC_CONTROL);
+       if (dpfc_ctl & DPFC_CTL_EN) {
+               if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
+                   dev_priv->cfb_fence == obj_priv->fence_reg &&
+                   dev_priv->cfb_plane == intel_crtc->plane &&
+                   dev_priv->cfb_y == crtc->y)
+                       return;
+
+               I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
+               POSTING_READ(DPFC_CONTROL);
+               intel_wait_for_vblank(dev, intel_crtc->pipe);
+       }
+
        dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
        dev_priv->cfb_fence = obj_priv->fence_reg;
        dev_priv->cfb_plane = intel_crtc->plane;
+       dev_priv->cfb_y = crtc->y;
 
        dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
        if (obj_priv->tiling_mode != I915_TILING_NONE) {
@@ -1162,7 +1182,6 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
                I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
        }
 
-       I915_WRITE(DPFC_CONTROL, dpfc_ctl);
        I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
                   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
                   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
@@ -1181,10 +1200,12 @@ void g4x_disable_fbc(struct drm_device *dev)
 
        /* Disable compression */
        dpfc_ctl = I915_READ(DPFC_CONTROL);
-       dpfc_ctl &= ~DPFC_CTL_EN;
-       I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+       if (dpfc_ctl & DPFC_CTL_EN) {
+               dpfc_ctl &= ~DPFC_CTL_EN;
+               I915_WRITE(DPFC_CONTROL, dpfc_ctl);
 
-       DRM_DEBUG_KMS("disabled FBC\n");
+               DRM_DEBUG_KMS("disabled FBC\n");
+       }
 }
 
 static bool g4x_fbc_enabled(struct drm_device *dev)
@@ -1202,16 +1223,30 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
        struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA :
-                                              DPFC_CTL_PLANEB;
+       int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
        unsigned long stall_watermark = 200;
        u32 dpfc_ctl;
 
+       dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+       if (dpfc_ctl & DPFC_CTL_EN) {
+               if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
+                   dev_priv->cfb_fence == obj_priv->fence_reg &&
+                   dev_priv->cfb_plane == intel_crtc->plane &&
+                   dev_priv->cfb_offset == obj_priv->gtt_offset &&
+                   dev_priv->cfb_y == crtc->y)
+                       return;
+
+               I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
+               POSTING_READ(ILK_DPFC_CONTROL);
+               intel_wait_for_vblank(dev, intel_crtc->pipe);
+       }
+
        dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
        dev_priv->cfb_fence = obj_priv->fence_reg;
        dev_priv->cfb_plane = intel_crtc->plane;
+       dev_priv->cfb_offset = obj_priv->gtt_offset;
+       dev_priv->cfb_y = crtc->y;
 
-       dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
        dpfc_ctl &= DPFC_RESERVED;
        dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
        if (obj_priv->tiling_mode != I915_TILING_NONE) {
@@ -1221,15 +1256,13 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
                I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
        }
 
-       I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
        I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
                   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
                   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
        I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
        I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
        /* enable it... */
-       I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) |
-                  DPFC_CTL_EN);
+       I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
        DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
 }
@@ -1241,10 +1274,12 @@ void ironlake_disable_fbc(struct drm_device *dev)
 
        /* Disable compression */
        dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
-       dpfc_ctl &= ~DPFC_CTL_EN;
-       I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+       if (dpfc_ctl & DPFC_CTL_EN) {
+               dpfc_ctl &= ~DPFC_CTL_EN;
+               I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
 
-       DRM_DEBUG_KMS("disabled FBC\n");
+               DRM_DEBUG_KMS("disabled FBC\n");
+       }
 }
 
 static bool ironlake_fbc_enabled(struct drm_device *dev)
@@ -1286,8 +1321,7 @@ void intel_disable_fbc(struct drm_device *dev)
 
 /**
  * intel_update_fbc - enable/disable FBC as needed
- * @crtc: CRTC to point the compressor at
- * @mode: mode in use
+ * @dev: the drm_device
  *
  * Set up the framebuffer compression hardware at mode set time.  We
  * enable it if possible:
@@ -1304,18 +1338,14 @@ void intel_disable_fbc(struct drm_device *dev)
  *
  * We need to enable/disable FBC on a global basis.
  */
-static void intel_update_fbc(struct drm_crtc *crtc,
-                            struct drm_display_mode *mode)
+static void intel_update_fbc(struct drm_device *dev)
 {
-       struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_framebuffer *fb = crtc->fb;
+       struct drm_crtc *crtc = NULL, *tmp_crtc;
+       struct intel_crtc *intel_crtc;
+       struct drm_framebuffer *fb;
        struct intel_framebuffer *intel_fb;
        struct drm_i915_gem_object *obj_priv;
-       struct drm_crtc *tmp_crtc;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int plane = intel_crtc->plane;
-       int crtcs_enabled = 0;
 
        DRM_DEBUG_KMS("\n");
 
@@ -1325,12 +1355,6 @@ static void intel_update_fbc(struct drm_crtc *crtc,
        if (!I915_HAS_FBC(dev))
                return;
 
-       if (!crtc->fb)
-               return;
-
-       intel_fb = to_intel_framebuffer(fb);
-       obj_priv = to_intel_bo(intel_fb->obj);
-
        /*
         * If FBC is already on, we just have to verify that we can
         * keep it that way...
@@ -1341,35 +1365,47 @@ static void intel_update_fbc(struct drm_crtc *crtc,
         *   - going to an unsupported config (interlace, pixel multiply, etc.)
         */
        list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
-               if (tmp_crtc->enabled)
-                       crtcs_enabled++;
+               if (tmp_crtc->enabled) {
+                       if (crtc) {
+                               DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+                               dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+                               goto out_disable;
+                       }
+                       crtc = tmp_crtc;
+               }
        }
-       DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled);
-       if (crtcs_enabled > 1) {
-               DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
-               dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+
+       if (!crtc || crtc->fb == NULL) {
+               DRM_DEBUG_KMS("no output, disabling\n");
+               dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
                goto out_disable;
        }
+
+       intel_crtc = to_intel_crtc(crtc);
+       fb = crtc->fb;
+       intel_fb = to_intel_framebuffer(fb);
+       obj_priv = to_intel_bo(intel_fb->obj);
+
        if (intel_fb->obj->size > dev_priv->cfb_size) {
                DRM_DEBUG_KMS("framebuffer too large, disabling "
-                               "compression\n");
+                             "compression\n");
                dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
                goto out_disable;
        }
-       if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
-           (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
+       if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
+           (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
                DRM_DEBUG_KMS("mode incompatible with compression, "
-                               "disabling\n");
+                             "disabling\n");
                dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
                goto out_disable;
        }
-       if ((mode->hdisplay > 2048) ||
-           (mode->vdisplay > 1536)) {
+       if ((crtc->mode.hdisplay > 2048) ||
+           (crtc->mode.vdisplay > 1536)) {
                DRM_DEBUG_KMS("mode too large for compression, disabling\n");
                dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
                goto out_disable;
        }
-       if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
+       if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
                DRM_DEBUG_KMS("plane not 0, disabling compression\n");
                dev_priv->no_fbc_reason = FBC_BAD_PLANE;
                goto out_disable;
@@ -1384,18 +1420,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
        if (in_dbg_master())
                goto out_disable;
 
-       if (intel_fbc_enabled(dev)) {
-               /* We can re-enable it in this case, but need to update pitch */
-               if ((fb->pitch > dev_priv->cfb_pitch) ||
-                   (obj_priv->fence_reg != dev_priv->cfb_fence) ||
-                   (plane != dev_priv->cfb_plane))
-                       intel_disable_fbc(dev);
-       }
-
-       /* Now try to turn it back on if possible */
-       if (!intel_fbc_enabled(dev))
-               intel_enable_fbc(crtc, 500);
-
+       intel_enable_fbc(crtc, 500);
        return;
 
 out_disable:
@@ -1407,7 +1432,9 @@ out_disable:
 }
 
 int
-intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
+intel_pin_and_fence_fb_obj(struct drm_device *dev,
+                          struct drm_gem_object *obj,
+                          bool pipelined)
 {
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        u32 alignment;
@@ -1417,7 +1444,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
        case I915_TILING_NONE:
                if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
                        alignment = 128 * 1024;
-               else if (IS_I965G(dev))
+               else if (INTEL_INFO(dev)->gen >= 4)
                        alignment = 4 * 1024;
                else
                        alignment = 64 * 1024;
@@ -1435,9 +1462,13 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
        }
 
        ret = i915_gem_object_pin(obj, alignment);
-       if (ret != 0)
+       if (ret)
                return ret;
 
+       ret = i915_gem_object_set_to_display_plane(obj, pipelined);
+       if (ret)
+               goto err_unpin;
+
        /* Install a fence for tiled scan-out. Pre-i965 always needs a
         * fence, whereas 965+ only requires a fence if using
         * framebuffer compression.  For simplicity, we always install
@@ -1445,20 +1476,22 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
         */
        if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
            obj_priv->tiling_mode != I915_TILING_NONE) {
-               ret = i915_gem_object_get_fence_reg(obj);
-               if (ret != 0) {
-                       i915_gem_object_unpin(obj);
-                       return ret;
-               }
+               ret = i915_gem_object_get_fence_reg(obj, false);
+               if (ret)
+                       goto err_unpin;
        }
 
        return 0;
+
+err_unpin:
+       i915_gem_object_unpin(obj);
+       return ret;
 }
 
 /* Assume fb object is pinned & idle & fenced and just update base pointers */
 static int
 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
-                          int x, int y)
+                          int x, int y, enum mode_set_atomic state)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1468,12 +1501,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        struct drm_gem_object *obj;
        int plane = intel_crtc->plane;
        unsigned long Start, Offset;
-       int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
-       int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
-       int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
-       int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
-       int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
        u32 dspcntr;
+       u32 reg;
 
        switch (plane) {
        case 0:
@@ -1488,7 +1517,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        obj = intel_fb->obj;
        obj_priv = to_intel_bo(obj);
 
-       dspcntr = I915_READ(dspcntr_reg);
+       reg = DSPCNTR(plane);
+       dspcntr = I915_READ(reg);
        /* Mask out pixel format bits in case we change it */
        dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
        switch (fb->bits_per_pixel) {
@@ -1509,7 +1539,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                DRM_ERROR("Unknown color depth\n");
                return -EINVAL;
        }
-       if (IS_I965G(dev)) {
+       if (INTEL_INFO(dev)->gen >= 4) {
                if (obj_priv->tiling_mode != I915_TILING_NONE)
                        dspcntr |= DISPPLANE_TILED;
                else
@@ -1520,28 +1550,24 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                /* must disable */
                dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
-       I915_WRITE(dspcntr_reg, dspcntr);
+       I915_WRITE(reg, dspcntr);
 
        Start = obj_priv->gtt_offset;
        Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
 
        DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
                      Start, Offset, x, y, fb->pitch);
-       I915_WRITE(dspstride, fb->pitch);
-       if (IS_I965G(dev)) {
-               I915_WRITE(dspsurf, Start);
-               I915_WRITE(dsptileoff, (y << 16) | x);
-               I915_WRITE(dspbase, Offset);
-       } else {
-               I915_WRITE(dspbase, Start + Offset);
-       }
-       POSTING_READ(dspbase);
+       I915_WRITE(DSPSTRIDE(plane), fb->pitch);
+       if (INTEL_INFO(dev)->gen >= 4) {
+               I915_WRITE(DSPSURF(plane), Start);
+               I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+               I915_WRITE(DSPADDR(plane), Offset);
+       } else
+               I915_WRITE(DSPADDR(plane), Start + Offset);
+       POSTING_READ(reg);
 
-       if (IS_I965G(dev) || plane == 0)
-               intel_update_fbc(crtc, &crtc->mode);
-
-       intel_wait_for_vblank(dev, intel_crtc->pipe);
-       intel_increase_pllclock(crtc, true);
+       intel_update_fbc(dev);
+       intel_increase_pllclock(crtc);
 
        return 0;
 }
@@ -1553,11 +1579,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
        struct drm_device *dev = crtc->dev;
        struct drm_i915_master_private *master_priv;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_framebuffer *intel_fb;
-       struct drm_i915_gem_object *obj_priv;
-       struct drm_gem_object *obj;
-       int pipe = intel_crtc->pipe;
-       int plane = intel_crtc->plane;
        int ret;
 
        /* no fb bound */
@@ -1566,45 +1587,42 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                return 0;
        }
 
-       switch (plane) {
+       switch (intel_crtc->plane) {
        case 0:
        case 1:
                break;
        default:
-               DRM_ERROR("Can't update plane %d in SAREA\n", plane);
                return -EINVAL;
        }
 
-       intel_fb = to_intel_framebuffer(crtc->fb);
-       obj = intel_fb->obj;
-       obj_priv = to_intel_bo(obj);
-
        mutex_lock(&dev->struct_mutex);
-       ret = intel_pin_and_fence_fb_obj(dev, obj);
+       ret = intel_pin_and_fence_fb_obj(dev,
+                                        to_intel_framebuffer(crtc->fb)->obj,
+                                        false);
        if (ret != 0) {
                mutex_unlock(&dev->struct_mutex);
                return ret;
        }
 
-       ret = i915_gem_object_set_to_display_plane(obj);
-       if (ret != 0) {
-               i915_gem_object_unpin(obj);
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
+       if (old_fb) {
+               struct drm_i915_private *dev_priv = dev->dev_private;
+               struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+               struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+
+               wait_event(dev_priv->pending_flip_queue,
+                          atomic_read(&obj_priv->pending_flip) == 0);
        }
 
-       ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y);
+       ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
+                                        LEAVE_ATOMIC_MODE_SET);
        if (ret) {
-               i915_gem_object_unpin(obj);
+               i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
                mutex_unlock(&dev->struct_mutex);
                return ret;
        }
 
-       if (old_fb) {
-               intel_fb = to_intel_framebuffer(old_fb);
-               obj_priv = to_intel_bo(intel_fb->obj);
-               i915_gem_object_unpin(intel_fb->obj);
-       }
+       if (old_fb)
+               i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
 
        mutex_unlock(&dev->struct_mutex);
 
@@ -1615,7 +1633,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
        if (!master_priv->sarea_priv)
                return 0;
 
-       if (pipe) {
+       if (intel_crtc->pipe) {
                master_priv->sarea_priv->pipeB_x = x;
                master_priv->sarea_priv->pipeB_y = y;
        } else {
@@ -1626,7 +1644,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
        return 0;
 }
 
-static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
+static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1659,6 +1677,7 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
        }
        I915_WRITE(DP_A, dpa_ctl);
 
+       POSTING_READ(DP_A);
        udelay(500);
 }
 
@@ -1669,84 +1688,109 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
-       int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
-       int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
-       int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
-       int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
-       u32 temp, tries = 0;
+       u32 reg, temp, tries;
 
        /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
           for train result */
-       temp = I915_READ(fdi_rx_imr_reg);
+       reg = FDI_RX_IMR(pipe);
+       temp = I915_READ(reg);
        temp &= ~FDI_RX_SYMBOL_LOCK;
        temp &= ~FDI_RX_BIT_LOCK;
-       I915_WRITE(fdi_rx_imr_reg, temp);
-       I915_READ(fdi_rx_imr_reg);
+       I915_WRITE(reg, temp);
+       I915_READ(reg);
        udelay(150);
 
        /* enable CPU FDI TX and PCH FDI RX */
-       temp = I915_READ(fdi_tx_reg);
-       temp |= FDI_TX_ENABLE;
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
        temp &= ~(7 << 19);
        temp |= (intel_crtc->fdi_lanes - 1) << 19;
        temp &= ~FDI_LINK_TRAIN_NONE;
        temp |= FDI_LINK_TRAIN_PATTERN_1;
-       I915_WRITE(fdi_tx_reg, temp);
-       I915_READ(fdi_tx_reg);
+       I915_WRITE(reg, temp | FDI_TX_ENABLE);
 
-       temp = I915_READ(fdi_rx_reg);
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
        temp &= ~FDI_LINK_TRAIN_NONE;
        temp |= FDI_LINK_TRAIN_PATTERN_1;
-       I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
-       I915_READ(fdi_rx_reg);
+       I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+       POSTING_READ(reg);
        udelay(150);
 
+       /* Ironlake workaround, enable clock pointer after FDI enable*/
+       I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE);
+
+       reg = FDI_RX_IIR(pipe);
        for (tries = 0; tries < 5; tries++) {
-               temp = I915_READ(fdi_rx_iir_reg);
+               temp = I915_READ(reg);
                DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 
                if ((temp & FDI_RX_BIT_LOCK)) {
                        DRM_DEBUG_KMS("FDI train 1 done.\n");
-                       I915_WRITE(fdi_rx_iir_reg,
-                                  temp | FDI_RX_BIT_LOCK);
+                       I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
                        break;
                }
        }
        if (tries == 5)
-               DRM_DEBUG_KMS("FDI train 1 fail!\n");
+               DRM_ERROR("FDI train 1 fail!\n");
 
        /* Train 2 */
-       temp = I915_READ(fdi_tx_reg);
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
        temp &= ~FDI_LINK_TRAIN_NONE;
        temp |= FDI_LINK_TRAIN_PATTERN_2;
-       I915_WRITE(fdi_tx_reg, temp);
+       I915_WRITE(reg, temp);
 
-       temp = I915_READ(fdi_rx_reg);
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
        temp &= ~FDI_LINK_TRAIN_NONE;
        temp |= FDI_LINK_TRAIN_PATTERN_2;
-       I915_WRITE(fdi_rx_reg, temp);
-       udelay(150);
+       I915_WRITE(reg, temp);
 
-       tries = 0;
+       POSTING_READ(reg);
+       udelay(150);
 
+       reg = FDI_RX_IIR(pipe);
        for (tries = 0; tries < 5; tries++) {
-               temp = I915_READ(fdi_rx_iir_reg);
+               temp = I915_READ(reg);
                DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 
                if (temp & FDI_RX_SYMBOL_LOCK) {
-                       I915_WRITE(fdi_rx_iir_reg,
-                                  temp | FDI_RX_SYMBOL_LOCK);
+                       I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
                        DRM_DEBUG_KMS("FDI train 2 done.\n");
                        break;
                }
        }
        if (tries == 5)
-               DRM_DEBUG_KMS("FDI train 2 fail!\n");
+               DRM_ERROR("FDI train 2 fail!\n");
 
        DRM_DEBUG_KMS("FDI train done\n");
+
+       /* enable normal train */
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+       I915_WRITE(reg, temp);
+
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
+       if (HAS_PCH_CPT(dev)) {
+               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+               temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+       } else {
+               temp &= ~FDI_LINK_TRAIN_NONE;
+               temp |= FDI_LINK_TRAIN_NONE;
+       }
+       I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+       /* wait one idle pattern time */
+       POSTING_READ(reg);
+       udelay(1000);
 }
 
-static int snb_b_fdi_train_param [] = {
+static const int const snb_b_fdi_train_param [] = {
        FDI_LINK_TRAIN_400MV_0DB_SNB_B,
        FDI_LINK_TRAIN_400MV_6DB_SNB_B,
        FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
@@ -1760,24 +1804,22 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
-       int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
-       int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
-       int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
-       int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
-       u32 temp, i;
+       u32 reg, temp, i;
 
        /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
           for train result */
-       temp = I915_READ(fdi_rx_imr_reg);
+       reg = FDI_RX_IMR(pipe);
+       temp = I915_READ(reg);
        temp &= ~FDI_RX_SYMBOL_LOCK;
        temp &= ~FDI_RX_BIT_LOCK;
-       I915_WRITE(fdi_rx_imr_reg, temp);
-       I915_READ(fdi_rx_imr_reg);
+       I915_WRITE(reg, temp);
+
+       POSTING_READ(reg);
        udelay(150);
 
        /* enable CPU FDI TX and PCH FDI RX */
-       temp = I915_READ(fdi_tx_reg);
-       temp |= FDI_TX_ENABLE;
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
        temp &= ~(7 << 19);
        temp |= (intel_crtc->fdi_lanes - 1) << 19;
        temp &= ~FDI_LINK_TRAIN_NONE;
@@ -1785,10 +1827,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
        temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
        /* SNB-B */
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
-       I915_WRITE(fdi_tx_reg, temp);
-       I915_READ(fdi_tx_reg);
+       I915_WRITE(reg, temp | FDI_TX_ENABLE);
 
-       temp = I915_READ(fdi_rx_reg);
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
        if (HAS_PCH_CPT(dev)) {
                temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
                temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
@@ -1796,32 +1838,37 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
                temp &= ~FDI_LINK_TRAIN_NONE;
                temp |= FDI_LINK_TRAIN_PATTERN_1;
        }
-       I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
-       I915_READ(fdi_rx_reg);
+       I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+       POSTING_READ(reg);
        udelay(150);
 
        for (i = 0; i < 4; i++ ) {
-               temp = I915_READ(fdi_tx_reg);
+               reg = FDI_TX_CTL(pipe);
+               temp = I915_READ(reg);
                temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
                temp |= snb_b_fdi_train_param[i];
-               I915_WRITE(fdi_tx_reg, temp);
+               I915_WRITE(reg, temp);
+
+               POSTING_READ(reg);
                udelay(500);
 
-               temp = I915_READ(fdi_rx_iir_reg);
+               reg = FDI_RX_IIR(pipe);
+               temp = I915_READ(reg);
                DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 
                if (temp & FDI_RX_BIT_LOCK) {
-                       I915_WRITE(fdi_rx_iir_reg,
-                                  temp | FDI_RX_BIT_LOCK);
+                       I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
                        DRM_DEBUG_KMS("FDI train 1 done.\n");
                        break;
                }
        }
        if (i == 4)
-               DRM_DEBUG_KMS("FDI train 1 fail!\n");
+               DRM_ERROR("FDI train 1 fail!\n");
 
        /* Train 2 */
-       temp = I915_READ(fdi_tx_reg);
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
        temp &= ~FDI_LINK_TRAIN_NONE;
        temp |= FDI_LINK_TRAIN_PATTERN_2;
        if (IS_GEN6(dev)) {
@@ -1829,9 +1876,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
                /* SNB-B */
                temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
        }
-       I915_WRITE(fdi_tx_reg, temp);
+       I915_WRITE(reg, temp);
 
-       temp = I915_READ(fdi_rx_reg);
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
        if (HAS_PCH_CPT(dev)) {
                temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
                temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
@@ -1839,535 +1887,593 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
                temp &= ~FDI_LINK_TRAIN_NONE;
                temp |= FDI_LINK_TRAIN_PATTERN_2;
        }
-       I915_WRITE(fdi_rx_reg, temp);
+       I915_WRITE(reg, temp);
+
+       POSTING_READ(reg);
        udelay(150);
 
        for (i = 0; i < 4; i++ ) {
-               temp = I915_READ(fdi_tx_reg);
+               reg = FDI_TX_CTL(pipe);
+               temp = I915_READ(reg);
                temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
                temp |= snb_b_fdi_train_param[i];
-               I915_WRITE(fdi_tx_reg, temp);
+               I915_WRITE(reg, temp);
+
+               POSTING_READ(reg);
                udelay(500);
 
-               temp = I915_READ(fdi_rx_iir_reg);
+               reg = FDI_RX_IIR(pipe);
+               temp = I915_READ(reg);
                DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 
                if (temp & FDI_RX_SYMBOL_LOCK) {
-                       I915_WRITE(fdi_rx_iir_reg,
-                                  temp | FDI_RX_SYMBOL_LOCK);
+                       I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
                        DRM_DEBUG_KMS("FDI train 2 done.\n");
                        break;
                }
        }
        if (i == 4)
-               DRM_DEBUG_KMS("FDI train 2 fail!\n");
+               DRM_ERROR("FDI train 2 fail!\n");
 
        DRM_DEBUG_KMS("FDI train done.\n");
 }
 
-static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void ironlake_fdi_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
-       int plane = intel_crtc->plane;
-       int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
-       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
-       int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
-       int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
-       int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
-       int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
-       int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
-       int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
-       int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
-       int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
-       int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
-       int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
-       int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
-       int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B;
-       int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B;
-       int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B;
-       int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
-       int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
-       int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
-       int trans_dpll_sel = (pipe == 0) ? 0 : 1;
-       u32 temp;
-       u32 pipe_bpc;
-
-       temp = I915_READ(pipeconf_reg);
-       pipe_bpc = temp & PIPE_BPC_MASK;
+       u32 reg, temp;
 
-       /* XXX: When our outputs are all unaware of DPMS modes other than off
-        * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
-        */
-       switch (mode) {
-       case DRM_MODE_DPMS_ON:
-       case DRM_MODE_DPMS_STANDBY:
-       case DRM_MODE_DPMS_SUSPEND:
-               DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
+       /* Write the TU size bits so error detection works */
+       I915_WRITE(FDI_RX_TUSIZE1(pipe),
+                  I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
 
-               if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
-                       temp = I915_READ(PCH_LVDS);
-                       if ((temp & LVDS_PORT_EN) == 0) {
-                               I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
-                               POSTING_READ(PCH_LVDS);
-                       }
-               }
+       /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
+       temp &= ~((0x7 << 19) | (0x7 << 16));
+       temp |= (intel_crtc->fdi_lanes - 1) << 19;
+       temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+       I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
 
-               if (!HAS_eDP) {
+       POSTING_READ(reg);
+       udelay(200);
 
-                       /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
-                       temp = I915_READ(fdi_rx_reg);
-                       /*
-                        * make the BPC in FDI Rx be consistent with that in
-                        * pipeconf reg.
-                        */
-                       temp &= ~(0x7 << 16);
-                       temp |= (pipe_bpc << 11);
-                       temp &= ~(7 << 19);
-                       temp |= (intel_crtc->fdi_lanes - 1) << 19;
-                       I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
-                       I915_READ(fdi_rx_reg);
-                       udelay(200);
+       /* Switch from Rawclk to PCDclk */
+       temp = I915_READ(reg);
+       I915_WRITE(reg, temp | FDI_PCDCLK);
 
-                       /* Switch from Rawclk to PCDclk */
-                       temp = I915_READ(fdi_rx_reg);
-                       I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
-                       I915_READ(fdi_rx_reg);
-                       udelay(200);
+       POSTING_READ(reg);
+       udelay(200);
 
-                       /* Enable CPU FDI TX PLL, always on for Ironlake */
-                       temp = I915_READ(fdi_tx_reg);
-                       if ((temp & FDI_TX_PLL_ENABLE) == 0) {
-                               I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
-                               I915_READ(fdi_tx_reg);
-                               udelay(100);
-                       }
-               }
+       /* Enable CPU FDI TX PLL, always on for Ironlake */
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
+       if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+               I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
 
-               /* Enable panel fitting for LVDS */
-               if (dev_priv->pch_pf_size &&
-                   (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
-                   || HAS_eDP || intel_pch_has_edp(crtc))) {
-                       /* Force use of hard-coded filter coefficients
-                        * as some pre-programmed values are broken,
-                        * e.g. x201.
-                        */
-                       I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
-                                  PF_ENABLE | PF_FILTER_MED_3x3);
-                       I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
-                                  dev_priv->pch_pf_pos);
-                       I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
-                                  dev_priv->pch_pf_size);
-               }
+               POSTING_READ(reg);
+               udelay(100);
+       }
+}
 
-               /* Enable CPU pipe */
-               temp = I915_READ(pipeconf_reg);
-               if ((temp & PIPEACONF_ENABLE) == 0) {
-                       I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
-                       I915_READ(pipeconf_reg);
-                       udelay(100);
-               }
+static void intel_flush_display_plane(struct drm_device *dev,
+                                     int plane)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 reg = DSPADDR(plane);
+       I915_WRITE(reg, I915_READ(reg));
+}
 
-               /* configure and enable CPU plane */
-               temp = I915_READ(dspcntr_reg);
-               if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
-                       I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
-                       /* Flush the plane changes */
-                       I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
-               }
+/*
+ * When we disable a pipe, we need to clear any pending scanline wait events
+ * to avoid hanging the ring, which we assume we are waiting on.
+ */
+static void intel_clear_scanline_wait(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 tmp;
 
-               if (!HAS_eDP) {
-                       /* For PCH output, training FDI link */
-                       if (IS_GEN6(dev))
-                               gen6_fdi_link_train(crtc);
-                       else
-                               ironlake_fdi_link_train(crtc);
+       if (IS_GEN2(dev))
+               /* Can't break the hang on i8xx */
+               return;
 
-                       /* enable PCH DPLL */
-                       temp = I915_READ(pch_dpll_reg);
-                       if ((temp & DPLL_VCO_ENABLE) == 0) {
-                               I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
-                               I915_READ(pch_dpll_reg);
-                       }
-                       udelay(200);
+       tmp = I915_READ(PRB0_CTL);
+       if (tmp & RING_WAIT) {
+               I915_WRITE(PRB0_CTL, tmp);
+               POSTING_READ(PRB0_CTL);
+       }
+}
 
-                       if (HAS_PCH_CPT(dev)) {
-                               /* Be sure PCH DPLL SEL is set */
-                               temp = I915_READ(PCH_DPLL_SEL);
-                               if (trans_dpll_sel == 0 &&
-                                               (temp & TRANSA_DPLL_ENABLE) == 0)
-                                       temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
-                               else if (trans_dpll_sel == 1 &&
-                                               (temp & TRANSB_DPLL_ENABLE) == 0)
-                                       temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
-                               I915_WRITE(PCH_DPLL_SEL, temp);
-                               I915_READ(PCH_DPLL_SEL);
-                       }
+static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
+{
+       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_private *dev_priv;
 
-                       /* set transcoder timing */
-                       I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
-                       I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg));
-                       I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg));
-
-                       I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg));
-                       I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
-                       I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
-
-                       /* enable normal train */
-                       temp = I915_READ(fdi_tx_reg);
-                       temp &= ~FDI_LINK_TRAIN_NONE;
-                       I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
-                                       FDI_TX_ENHANCE_FRAME_ENABLE);
-                       I915_READ(fdi_tx_reg);
-
-                       temp = I915_READ(fdi_rx_reg);
-                       if (HAS_PCH_CPT(dev)) {
-                               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-                               temp |= FDI_LINK_TRAIN_NORMAL_CPT;
-                       } else {
-                               temp &= ~FDI_LINK_TRAIN_NONE;
-                               temp |= FDI_LINK_TRAIN_NONE;
-                       }
-                       I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
-                       I915_READ(fdi_rx_reg);
+       if (crtc->fb == NULL)
+               return;
 
-                       /* wait one idle pattern time */
-                       udelay(100);
+       obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj);
+       dev_priv = crtc->dev->dev_private;
+       wait_event(dev_priv->pending_flip_queue,
+                  atomic_read(&obj_priv->pending_flip) == 0);
+}
 
-                       /* For PCH DP, enable TRANS_DP_CTL */
-                       if (HAS_PCH_CPT(dev) &&
-                           intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
-                               int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
-                               int reg;
-
-                               reg = I915_READ(trans_dp_ctl);
-                               reg &= ~(TRANS_DP_PORT_SEL_MASK |
-                                        TRANS_DP_SYNC_MASK);
-                               reg |= (TRANS_DP_OUTPUT_ENABLE |
-                                       TRANS_DP_ENH_FRAMING);
-
-                               if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
-                                     reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
-                               if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
-                                     reg |= TRANS_DP_VSYNC_ACTIVE_HIGH;
-
-                               switch (intel_trans_dp_port_sel(crtc)) {
-                               case PCH_DP_B:
-                                       reg |= TRANS_DP_PORT_SEL_B;
-                                       break;
-                               case PCH_DP_C:
-                                       reg |= TRANS_DP_PORT_SEL_C;
-                                       break;
-                               case PCH_DP_D:
-                                       reg |= TRANS_DP_PORT_SEL_D;
-                                       break;
-                               default:
-                                       DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
-                                       reg |= TRANS_DP_PORT_SEL_B;
-                                       break;
-                               }
+static void ironlake_crtc_enable(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       int plane = intel_crtc->plane;
+       u32 reg, temp;
 
-                               I915_WRITE(trans_dp_ctl, reg);
-                               POSTING_READ(trans_dp_ctl);
-                       }
+       if (intel_crtc->active)
+               return;
 
-                       /* enable PCH transcoder */
-                       temp = I915_READ(transconf_reg);
-                       /*
-                        * make the BPC in transcoder be consistent with
-                        * that in pipeconf reg.
-                        */
-                       temp &= ~PIPE_BPC_MASK;
-                       temp |= pipe_bpc;
-                       I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
-                       I915_READ(transconf_reg);
+       intel_crtc->active = true;
+       intel_update_watermarks(dev);
 
-                       if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1))
-                               DRM_ERROR("failed to enable transcoder\n");
-               }
+       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+               temp = I915_READ(PCH_LVDS);
+               if ((temp & LVDS_PORT_EN) == 0)
+                       I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
+       }
 
-               intel_crtc_load_lut(crtc);
+       ironlake_fdi_enable(crtc);
 
-               intel_update_fbc(crtc, &crtc->mode);
-               break;
+       /* Enable panel fitting for LVDS */
+       if (dev_priv->pch_pf_size &&
+           (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
+               /* Force use of hard-coded filter coefficients
+                * as some pre-programmed values are broken,
+                * e.g. x201.
+                */
+               I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
+                          PF_ENABLE | PF_FILTER_MED_3x3);
+               I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
+                          dev_priv->pch_pf_pos);
+               I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
+                          dev_priv->pch_pf_size);
+       }
+
+       /* Enable CPU pipe */
+       reg = PIPECONF(pipe);
+       temp = I915_READ(reg);
+       if ((temp & PIPECONF_ENABLE) == 0) {
+               I915_WRITE(reg, temp | PIPECONF_ENABLE);
+               POSTING_READ(reg);
+               intel_wait_for_vblank(dev, intel_crtc->pipe);
+       }
+
+       /* configure and enable CPU plane */
+       reg = DSPCNTR(plane);
+       temp = I915_READ(reg);
+       if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+               I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
+               intel_flush_display_plane(dev, plane);
+       }
+
+       /* For PCH output, training FDI link */
+       if (IS_GEN6(dev))
+               gen6_fdi_link_train(crtc);
+       else
+               ironlake_fdi_link_train(crtc);
+
+       /* enable PCH DPLL */
+       reg = PCH_DPLL(pipe);
+       temp = I915_READ(reg);
+       if ((temp & DPLL_VCO_ENABLE) == 0) {
+               I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
+               POSTING_READ(reg);
+               udelay(200);
+       }
 
-       case DRM_MODE_DPMS_OFF:
-               DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
+       if (HAS_PCH_CPT(dev)) {
+               /* Be sure PCH DPLL SEL is set */
+               temp = I915_READ(PCH_DPLL_SEL);
+               if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
+                       temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+               else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
+                       temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+               I915_WRITE(PCH_DPLL_SEL, temp);
+       }
 
-               drm_vblank_off(dev, pipe);
-               /* Disable display plane */
-               temp = I915_READ(dspcntr_reg);
-               if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
-                       I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
-                       /* Flush the plane changes */
-                       I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
-                       I915_READ(dspbase_reg);
+       /* set transcoder timing */
+       I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
+       I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
+       I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
+
+       I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
+       I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
+       I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
+
+       /* For PCH DP, enable TRANS_DP_CTL */
+       if (HAS_PCH_CPT(dev) &&
+           intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+               reg = TRANS_DP_CTL(pipe);
+               temp = I915_READ(reg);
+               temp &= ~(TRANS_DP_PORT_SEL_MASK |
+                         TRANS_DP_SYNC_MASK);
+               temp |= (TRANS_DP_OUTPUT_ENABLE |
+                        TRANS_DP_ENH_FRAMING);
+
+               if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+                       temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
+               if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+                       temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
+
+               switch (intel_trans_dp_port_sel(crtc)) {
+               case PCH_DP_B:
+                       temp |= TRANS_DP_PORT_SEL_B;
+                       break;
+               case PCH_DP_C:
+                       temp |= TRANS_DP_PORT_SEL_C;
+                       break;
+               case PCH_DP_D:
+                       temp |= TRANS_DP_PORT_SEL_D;
+                       break;
+               default:
+                       DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
+                       temp |= TRANS_DP_PORT_SEL_B;
+                       break;
                }
 
-               if (dev_priv->cfb_plane == plane &&
-                   dev_priv->display.disable_fbc)
-                       dev_priv->display.disable_fbc(dev);
+               I915_WRITE(reg, temp);
+       }
+
+       /* enable PCH transcoder */
+       reg = TRANSCONF(pipe);
+       temp = I915_READ(reg);
+       /*
+        * make the BPC in transcoder be consistent with
+        * that in pipeconf reg.
+        */
+       temp &= ~PIPE_BPC_MASK;
+       temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
+       I915_WRITE(reg, temp | TRANS_ENABLE);
+       if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
+               DRM_ERROR("failed to enable transcoder %d\n", pipe);
 
-               /* disable cpu pipe, disable after all planes disabled */
-               temp = I915_READ(pipeconf_reg);
-               if ((temp & PIPEACONF_ENABLE) != 0) {
-                       I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+       intel_crtc_load_lut(crtc);
+       intel_update_fbc(dev);
+       intel_crtc_update_cursor(crtc, true);
+}
 
-                       /* wait for cpu pipe off, pipe state */
-                       if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1))
-                               DRM_ERROR("failed to turn off cpu pipe\n");
-               } else
-                       DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
+static void ironlake_crtc_disable(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       int plane = intel_crtc->plane;
+       u32 reg, temp;
 
-               udelay(100);
+       if (!intel_crtc->active)
+               return;
 
-               /* Disable PF */
-               I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
-               I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
+       intel_crtc_wait_for_pending_flips(crtc);
+       drm_vblank_off(dev, pipe);
+       intel_crtc_update_cursor(crtc, false);
 
-               /* disable CPU FDI tx and PCH FDI rx */
-               temp = I915_READ(fdi_tx_reg);
-               I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE);
-               I915_READ(fdi_tx_reg);
+       /* Disable display plane */
+       reg = DSPCNTR(plane);
+       temp = I915_READ(reg);
+       if (temp & DISPLAY_PLANE_ENABLE) {
+               I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
+               intel_flush_display_plane(dev, plane);
+       }
 
-               temp = I915_READ(fdi_rx_reg);
-               /* BPC in FDI rx is consistent with that in pipeconf */
-               temp &= ~(0x07 << 16);
-               temp |= (pipe_bpc << 11);
-               I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
-               I915_READ(fdi_rx_reg);
+       if (dev_priv->cfb_plane == plane &&
+           dev_priv->display.disable_fbc)
+               dev_priv->display.disable_fbc(dev);
 
-               udelay(100);
+       /* disable cpu pipe, disable after all planes disabled */
+       reg = PIPECONF(pipe);
+       temp = I915_READ(reg);
+       if (temp & PIPECONF_ENABLE) {
+               I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
+               POSTING_READ(reg);
+               /* wait for cpu pipe off, pipe state */
+               intel_wait_for_pipe_off(dev, intel_crtc->pipe);
+       }
+
+       /* Disable PF */
+       I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
+       I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
+
+       /* disable CPU FDI tx and PCH FDI rx */
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
+       I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+       POSTING_READ(reg);
+
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
+       temp &= ~(0x7 << 16);
+       temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+       I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+       POSTING_READ(reg);
+       udelay(100);
+
+       /* Ironlake workaround, disable clock pointer after downing FDI */
+       I915_WRITE(FDI_RX_CHICKEN(pipe),
+                  I915_READ(FDI_RX_CHICKEN(pipe) &
+                            ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
+
+       /* still set train pattern 1 */
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_PATTERN_1;
+       I915_WRITE(reg, temp);
 
-               /* still set train pattern 1 */
-               temp = I915_READ(fdi_tx_reg);
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
+       if (HAS_PCH_CPT(dev)) {
+               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+               temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+       } else {
                temp &= ~FDI_LINK_TRAIN_NONE;
                temp |= FDI_LINK_TRAIN_PATTERN_1;
-               I915_WRITE(fdi_tx_reg, temp);
-               POSTING_READ(fdi_tx_reg);
-
-               temp = I915_READ(fdi_rx_reg);
-               if (HAS_PCH_CPT(dev)) {
-                       temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-                       temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
-               } else {
-                       temp &= ~FDI_LINK_TRAIN_NONE;
-                       temp |= FDI_LINK_TRAIN_PATTERN_1;
-               }
-               I915_WRITE(fdi_rx_reg, temp);
-               POSTING_READ(fdi_rx_reg);
+       }
+       /* BPC in FDI rx is consistent with that in PIPECONF */
+       temp &= ~(0x07 << 16);
+       temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+       I915_WRITE(reg, temp);
 
-               udelay(100);
+       POSTING_READ(reg);
+       udelay(100);
 
-               if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
-                       temp = I915_READ(PCH_LVDS);
+       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+               temp = I915_READ(PCH_LVDS);
+               if (temp & LVDS_PORT_EN) {
                        I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
-                       I915_READ(PCH_LVDS);
+                       POSTING_READ(PCH_LVDS);
                        udelay(100);
                }
+       }
 
-               /* disable PCH transcoder */
-               temp = I915_READ(transconf_reg);
-               if ((temp & TRANS_ENABLE) != 0) {
-                       I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
+       /* disable PCH transcoder */
+       reg = TRANSCONF(plane);
+       temp = I915_READ(reg);
+       if (temp & TRANS_ENABLE) {
+               I915_WRITE(reg, temp & ~TRANS_ENABLE);
+               /* wait for PCH transcoder off, transcoder state */
+               if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
+                       DRM_ERROR("failed to disable transcoder\n");
+       }
 
-                       /* wait for PCH transcoder off, transcoder state */
-                       if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1))
-                               DRM_ERROR("failed to disable transcoder\n");
-               }
+       if (HAS_PCH_CPT(dev)) {
+               /* disable TRANS_DP_CTL */
+               reg = TRANS_DP_CTL(pipe);
+               temp = I915_READ(reg);
+               temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+               I915_WRITE(reg, temp);
 
-               temp = I915_READ(transconf_reg);
-               /* BPC in transcoder is consistent with that in pipeconf */
-               temp &= ~PIPE_BPC_MASK;
-               temp |= pipe_bpc;
-               I915_WRITE(transconf_reg, temp);
-               I915_READ(transconf_reg);
-               udelay(100);
+               /* disable DPLL_SEL */
+               temp = I915_READ(PCH_DPLL_SEL);
+               if (pipe == 0)
+                       temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
+               else
+                       temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+               I915_WRITE(PCH_DPLL_SEL, temp);
+       }
 
-               if (HAS_PCH_CPT(dev)) {
-                       /* disable TRANS_DP_CTL */
-                       int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
-                       int reg;
+       /* disable PCH DPLL */
+       reg = PCH_DPLL(pipe);
+       temp = I915_READ(reg);
+       I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
 
-                       reg = I915_READ(trans_dp_ctl);
-                       reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
-                       I915_WRITE(trans_dp_ctl, reg);
-                       POSTING_READ(trans_dp_ctl);
+       /* Switch from PCDclk to Rawclk */
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
+       I915_WRITE(reg, temp & ~FDI_PCDCLK);
 
-                       /* disable DPLL_SEL */
-                       temp = I915_READ(PCH_DPLL_SEL);
-                       if (trans_dpll_sel == 0)
-                               temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
-                       else
-                               temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
-                       I915_WRITE(PCH_DPLL_SEL, temp);
-                       I915_READ(PCH_DPLL_SEL);
+       /* Disable CPU FDI TX PLL */
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
+       I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
 
-               }
+       POSTING_READ(reg);
+       udelay(100);
 
-               /* disable PCH DPLL */
-               temp = I915_READ(pch_dpll_reg);
-               I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
-               I915_READ(pch_dpll_reg);
-
-               /* Switch from PCDclk to Rawclk */
-               temp = I915_READ(fdi_rx_reg);
-               temp &= ~FDI_SEL_PCDCLK;
-               I915_WRITE(fdi_rx_reg, temp);
-               I915_READ(fdi_rx_reg);
-
-               /* Disable CPU FDI TX PLL */
-               temp = I915_READ(fdi_tx_reg);
-               I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
-               I915_READ(fdi_tx_reg);
-               udelay(100);
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
+       I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
 
-               temp = I915_READ(fdi_rx_reg);
-               temp &= ~FDI_RX_PLL_ENABLE;
-               I915_WRITE(fdi_rx_reg, temp);
-               I915_READ(fdi_rx_reg);
+       /* Wait for the clocks to turn off. */
+       POSTING_READ(reg);
+       udelay(100);
 
-               /* Wait for the clocks to turn off. */
-               udelay(100);
+       intel_crtc->active = false;
+       intel_update_watermarks(dev);
+       intel_update_fbc(dev);
+       intel_clear_scanline_wait(dev);
+}
+
+static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       int plane = intel_crtc->plane;
+
+       /* XXX: When our outputs are all unaware of DPMS modes other than off
+        * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+        */
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+               DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
+               ironlake_crtc_enable(crtc);
+               break;
+
+       case DRM_MODE_DPMS_OFF:
+               DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
+               ironlake_crtc_disable(crtc);
                break;
        }
 }
 
 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
 {
-       struct intel_overlay *overlay;
-       int ret;
-
        if (!enable && intel_crtc->overlay) {
-               overlay = intel_crtc->overlay;
-               mutex_lock(&overlay->dev->struct_mutex);
-               for (;;) {
-                       ret = intel_overlay_switch_off(overlay);
-                       if (ret == 0)
-                               break;
+               struct drm_device *dev = intel_crtc->base.dev;
 
-                       ret = intel_overlay_recover_from_interrupt(overlay, 0);
-                       if (ret != 0) {
-                               /* overlay doesn't react anymore. Usually
-                                * results in a black screen and an unkillable
-                                * X server. */
-                               BUG();
-                               overlay->hw_wedged = HW_WEDGED;
-                               break;
-                       }
-               }
-               mutex_unlock(&overlay->dev->struct_mutex);
+               mutex_lock(&dev->struct_mutex);
+               (void) intel_overlay_switch_off(intel_crtc->overlay, false);
+               mutex_unlock(&dev->struct_mutex);
        }
-       /* Let userspace switch the overlay on again. In most cases userspace
-        * has to recompute where to put it anyway. */
 
-       return;
+       /* Let userspace switch the overlay on again. In most cases userspace
+        * has to recompute where to put it anyway.
+        */
 }
 
-static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void i9xx_crtc_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        int plane = intel_crtc->plane;
-       int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
-       int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
-       int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
-       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
-       u32 temp;
+       u32 reg, temp;
 
-       /* XXX: When our outputs are all unaware of DPMS modes other than off
-        * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
-        */
-       switch (mode) {
-       case DRM_MODE_DPMS_ON:
-       case DRM_MODE_DPMS_STANDBY:
-       case DRM_MODE_DPMS_SUSPEND:
-               /* Enable the DPLL */
-               temp = I915_READ(dpll_reg);
-               if ((temp & DPLL_VCO_ENABLE) == 0) {
-                       I915_WRITE(dpll_reg, temp);
-                       I915_READ(dpll_reg);
-                       /* Wait for the clocks to stabilize. */
-                       udelay(150);
-                       I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
-                       I915_READ(dpll_reg);
-                       /* Wait for the clocks to stabilize. */
-                       udelay(150);
-                       I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
-                       I915_READ(dpll_reg);
-                       /* Wait for the clocks to stabilize. */
-                       udelay(150);
-               }
+       if (intel_crtc->active)
+               return;
 
-               /* Enable the pipe */
-               temp = I915_READ(pipeconf_reg);
-               if ((temp & PIPEACONF_ENABLE) == 0)
-                       I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
-
-               /* Enable the plane */
-               temp = I915_READ(dspcntr_reg);
-               if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
-                       I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
-                       /* Flush the plane changes */
-                       I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
-               }
+       intel_crtc->active = true;
+       intel_update_watermarks(dev);
 
-               intel_crtc_load_lut(crtc);
+       /* Enable the DPLL */
+       reg = DPLL(pipe);
+       temp = I915_READ(reg);
+       if ((temp & DPLL_VCO_ENABLE) == 0) {
+               I915_WRITE(reg, temp);
 
-               if ((IS_I965G(dev) || plane == 0))
-                       intel_update_fbc(crtc, &crtc->mode);
+               /* Wait for the clocks to stabilize. */
+               POSTING_READ(reg);
+               udelay(150);
 
-               /* Give the overlay scaler a chance to enable if it's on this pipe */
-               intel_crtc_dpms_overlay(intel_crtc, true);
-       break;
-       case DRM_MODE_DPMS_OFF:
-               /* Give the overlay scaler a chance to disable if it's on this pipe */
-               intel_crtc_dpms_overlay(intel_crtc, false);
-               drm_vblank_off(dev, pipe);
-
-               if (dev_priv->cfb_plane == plane &&
-                   dev_priv->display.disable_fbc)
-                       dev_priv->display.disable_fbc(dev);
-
-               /* Disable display plane */
-               temp = I915_READ(dspcntr_reg);
-               if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
-                       I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
-                       /* Flush the plane changes */
-                       I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
-                       I915_READ(dspbase_reg);
-               }
+               I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
+
+               /* Wait for the clocks to stabilize. */
+               POSTING_READ(reg);
+               udelay(150);
+
+               I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
+
+               /* Wait for the clocks to stabilize. */
+               POSTING_READ(reg);
+               udelay(150);
+       }
+
+       /* Enable the pipe */
+       reg = PIPECONF(pipe);
+       temp = I915_READ(reg);
+       if ((temp & PIPECONF_ENABLE) == 0)
+               I915_WRITE(reg, temp | PIPECONF_ENABLE);
+
+       /* Enable the plane */
+       reg = DSPCNTR(plane);
+       temp = I915_READ(reg);
+       if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+               I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
+               intel_flush_display_plane(dev, plane);
+       }
+
+       intel_crtc_load_lut(crtc);
+       intel_update_fbc(dev);
+
+       /* Give the overlay scaler a chance to enable if it's on this pipe */
+       intel_crtc_dpms_overlay(intel_crtc, true);
+       intel_crtc_update_cursor(crtc, true);
+}
 
-               /* Don't disable pipe A or pipe A PLLs if needed */
-               if (pipeconf_reg == PIPEACONF &&
-                   (dev_priv->quirks & QUIRK_PIPEA_FORCE)) {
-                       /* Wait for vblank for the disable to take effect */
+static void i9xx_crtc_disable(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       int plane = intel_crtc->plane;
+       u32 reg, temp;
+
+       if (!intel_crtc->active)
+               return;
+
+       /* Give the overlay scaler a chance to disable if it's on this pipe */
+       intel_crtc_wait_for_pending_flips(crtc);
+       drm_vblank_off(dev, pipe);
+       intel_crtc_dpms_overlay(intel_crtc, false);
+       intel_crtc_update_cursor(crtc, false);
+
+       if (dev_priv->cfb_plane == plane &&
+           dev_priv->display.disable_fbc)
+               dev_priv->display.disable_fbc(dev);
+
+       /* Disable display plane */
+       reg = DSPCNTR(plane);
+       temp = I915_READ(reg);
+       if (temp & DISPLAY_PLANE_ENABLE) {
+               I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
+               /* Flush the plane changes */
+               intel_flush_display_plane(dev, plane);
+
+               /* Wait for vblank for the disable to take effect */
+               if (IS_GEN2(dev))
                        intel_wait_for_vblank(dev, pipe);
-                       goto skip_pipe_off;
-               }
+       }
 
-               /* Next, disable display pipes */
-               temp = I915_READ(pipeconf_reg);
-               if ((temp & PIPEACONF_ENABLE) != 0) {
-                       I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
-                       I915_READ(pipeconf_reg);
-               }
+       /* Don't disable pipe A or pipe A PLLs if needed */
+       if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+               goto done;
+
+       /* Next, disable display pipes */
+       reg = PIPECONF(pipe);
+       temp = I915_READ(reg);
+       if (temp & PIPECONF_ENABLE) {
+               I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
 
                /* Wait for the pipe to turn off */
+               POSTING_READ(reg);
                intel_wait_for_pipe_off(dev, pipe);
+       }
+
+       reg = DPLL(pipe);
+       temp = I915_READ(reg);
+       if (temp & DPLL_VCO_ENABLE) {
+               I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
 
-               temp = I915_READ(dpll_reg);
-               if ((temp & DPLL_VCO_ENABLE) != 0) {
-                       I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
-                       I915_READ(dpll_reg);
-               }
-       skip_pipe_off:
                /* Wait for the clocks to turn off. */
+               POSTING_READ(reg);
                udelay(150);
+       }
+
+done:
+       intel_crtc->active = false;
+       intel_update_fbc(dev);
+       intel_update_watermarks(dev);
+       intel_clear_scanline_wait(dev);
+}
+
+static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       /* XXX: When our outputs are all unaware of DPMS modes other than off
+        * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+        */
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+               i9xx_crtc_enable(crtc);
+               break;
+       case DRM_MODE_DPMS_OFF:
+               i9xx_crtc_disable(crtc);
                break;
        }
 }
@@ -2388,26 +2494,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
                return;
 
        intel_crtc->dpms_mode = mode;
-       intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
-
-       /* When switching on the display, ensure that SR is disabled
-        * with multiple pipes prior to enabling to new pipe.
-        *
-        * When switching off the display, make sure the cursor is
-        * properly hidden prior to disabling the pipe.
-        */
-       if (mode == DRM_MODE_DPMS_ON)
-               intel_update_watermarks(dev);
-       else
-               intel_crtc_update_cursor(crtc);
 
        dev_priv->display.dpms(crtc, mode);
 
-       if (mode == DRM_MODE_DPMS_ON)
-               intel_crtc_update_cursor(crtc);
-       else
-               intel_update_watermarks(dev);
-
        if (!dev->primary->master)
                return;
 
@@ -2432,16 +2521,46 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
        }
 }
 
-static void intel_crtc_prepare (struct drm_crtc *crtc)
+static void intel_crtc_disable(struct drm_crtc *crtc)
 {
        struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       struct drm_device *dev = crtc->dev;
+
        crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+       if (crtc->fb) {
+               mutex_lock(&dev->struct_mutex);
+               i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
+               mutex_unlock(&dev->struct_mutex);
+       }
+}
+
+/* Prepare for a mode set.
+ *
+ * Note we could be a lot smarter here.  We need to figure out which outputs
+ * will be enabled, which disabled (in short, how the config will changes)
+ * and perform the minimum necessary steps to accomplish that, e.g. updating
+ * watermarks, FBC configuration, making sure PLLs are programmed correctly,
+ * panel fitting is in the proper state, etc.
+ */
+static void i9xx_crtc_prepare(struct drm_crtc *crtc)
+{
+       i9xx_crtc_disable(crtc);
 }
 
-static void intel_crtc_commit (struct drm_crtc *crtc)
+static void i9xx_crtc_commit(struct drm_crtc *crtc)
 {
-       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+       i9xx_crtc_enable(crtc);
+}
+
+static void ironlake_crtc_prepare(struct drm_crtc *crtc)
+{
+       ironlake_crtc_disable(crtc);
+}
+
+static void ironlake_crtc_commit(struct drm_crtc *crtc)
+{
+       ironlake_crtc_enable(crtc);
 }
 
 void intel_encoder_prepare (struct drm_encoder *encoder)
@@ -2460,13 +2579,7 @@ void intel_encoder_commit (struct drm_encoder *encoder)
 
 void intel_encoder_destroy(struct drm_encoder *encoder)
 {
-       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
-       if (intel_encoder->ddc_bus)
-               intel_i2c_destroy(intel_encoder->ddc_bus);
-
-       if (intel_encoder->i2c_bus)
-               intel_i2c_destroy(intel_encoder->i2c_bus);
+       struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
 
        drm_encoder_cleanup(encoder);
        kfree(intel_encoder);
@@ -2557,33 +2670,6 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
        return 133000;
 }
 
-/**
- * Return the pipe currently connected to the panel fitter,
- * or -1 if the panel fitter is not present or not in use
- */
-int intel_panel_fitter_pipe (struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32  pfit_control;
-
-       /* i830 doesn't have a panel fitter */
-       if (IS_I830(dev))
-               return -1;
-
-       pfit_control = I915_READ(PFIT_CONTROL);
-
-       /* See if the panel fitter is in use */
-       if ((pfit_control & PFIT_ENABLE) == 0)
-               return -1;
-
-       /* 965 can place panel fitter on either pipe */
-       if (IS_I965G(dev))
-               return (pfit_control >> 29) & 0x3;
-
-       /* older chips can only use pipe 1 */
-       return 1;
-}
-
 struct fdi_m_n {
        u32        tu;
        u32        gmch_m;
@@ -2902,7 +2988,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
                size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
 
        DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
-                       plane ? "B" : "A", size);
+                     plane ? "B" : "A", size);
 
        return size;
 }
@@ -2919,7 +3005,7 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
        size >>= 1; /* Convert to cachelines */
 
        DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
-                       plane ? "B" : "A", size);
+                     plane ? "B" : "A", size);
 
        return size;
 }
@@ -2934,8 +3020,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
        size >>= 2; /* Convert to cachelines */
 
        DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
-                       plane ? "B" : "A",
-                 size);
+                     plane ? "B" : "A",
+                     size);
 
        return size;
 }
@@ -2950,14 +3036,14 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
        size >>= 1; /* Convert to cachelines */
 
        DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
-                       plane ? "B" : "A", size);
+                     plane ? "B" : "A", size);
 
        return size;
 }
 
 static void pineview_update_wm(struct drm_device *dev,  int planea_clock,
-                         int planeb_clock, int sr_hdisplay, int unused,
-                         int pixel_size)
+                              int planeb_clock, int sr_hdisplay, int unused,
+                              int pixel_size)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        const struct cxsr_latency *latency;
@@ -3069,13 +3155,13 @@ static void g4x_update_wm(struct drm_device *dev,  int planea_clock,
 
                /* Use ns/us then divide to preserve precision */
                sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
-                             pixel_size * sr_hdisplay;
+                       pixel_size * sr_hdisplay;
                sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
 
                entries_required = (((sr_latency_ns / line_time_us) +
                                     1000) / 1000) * pixel_size * 64;
                entries_required = DIV_ROUND_UP(entries_required,
-                                          g4x_cursor_wm_info.cacheline_size);
+                                               g4x_cursor_wm_info.cacheline_size);
                cursor_sr = entries_required + g4x_cursor_wm_info.guard_size;
 
                if (cursor_sr > g4x_cursor_wm_info.max_wm)
@@ -3087,7 +3173,7 @@ static void g4x_update_wm(struct drm_device *dev,  int planea_clock,
        } else {
                /* Turn off self refresh if both pipes are enabled */
                I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
-                                       & ~FW_BLC_SELF_EN);
+                          & ~FW_BLC_SELF_EN);
        }
 
        DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -3125,7 +3211,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
 
                /* Use ns/us then divide to preserve precision */
                sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
-                             pixel_size * sr_hdisplay;
+                       pixel_size * sr_hdisplay;
                sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE);
                DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
                srwm = I965_FIFO_SIZE - sr_entries;
@@ -3134,11 +3220,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
                srwm &= 0x1ff;
 
                sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
-                            pixel_size * 64;
+                       pixel_size * 64;
                sr_entries = DIV_ROUND_UP(sr_entries,
                                          i965_cursor_wm_info.cacheline_size);
                cursor_sr = i965_cursor_wm_info.fifo_size -
-                           (sr_entries + i965_cursor_wm_info.guard_size);
+                       (sr_entries + i965_cursor_wm_info.guard_size);
 
                if (cursor_sr > i965_cursor_wm_info.max_wm)
                        cursor_sr = i965_cursor_wm_info.max_wm;
@@ -3146,11 +3232,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
                DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
                              "cursor %d\n", srwm, cursor_sr);
 
-               if (IS_I965GM(dev))
+               if (IS_CRESTLINE(dev))
                        I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
        } else {
                /* Turn off self refresh if both pipes are enabled */
-               if (IS_I965GM(dev))
+               if (IS_CRESTLINE(dev))
                        I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
                                   & ~FW_BLC_SELF_EN);
        }
@@ -3180,9 +3266,9 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
        int sr_clock, sr_entries = 0;
 
        /* Create copies of the base settings for each pipe */
-       if (IS_I965GM(dev) || IS_I945GM(dev))
+       if (IS_CRESTLINE(dev) || IS_I945GM(dev))
                planea_params = planeb_params = i945_wm_info;
-       else if (IS_I9XX(dev))
+       else if (!IS_GEN2(dev))
                planea_params = planeb_params = i915_wm_info;
        else
                planea_params = planeb_params = i855_wm_info;
@@ -3217,7 +3303,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
 
                /* Use ns/us then divide to preserve precision */
                sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
-                             pixel_size * sr_hdisplay;
+                       pixel_size * sr_hdisplay;
                sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
                DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
                srwm = total_size - sr_entries;
@@ -3242,7 +3328,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
        }
 
        DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
-                 planea_wm, planeb_wm, cwm, srwm);
+                     planea_wm, planeb_wm, cwm, srwm);
 
        fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
        fwater_hi = (cwm & 0x1f);
@@ -3276,146 +3362,130 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
 #define ILK_LP0_PLANE_LATENCY          700
 #define ILK_LP0_CURSOR_LATENCY         1300
 
-static void ironlake_update_wm(struct drm_device *dev,  int planea_clock,
-                      int planeb_clock, int sr_hdisplay, int sr_htotal,
-                      int pixel_size)
+static bool ironlake_compute_wm0(struct drm_device *dev,
+                                int pipe,
+                                int *plane_wm,
+                                int *cursor_wm)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
-       int sr_wm, cursor_wm;
-       unsigned long line_time_us;
-       int sr_clock, entries_required;
-       u32 reg_value;
-       int line_count;
-       int planea_htotal = 0, planeb_htotal = 0;
        struct drm_crtc *crtc;
+       int htotal, hdisplay, clock, pixel_size = 0;
+       int line_time_us, line_count, entries;
 
-       /* Need htotal for all active display plane */
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-               if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
-                       if (intel_crtc->plane == 0)
-                               planea_htotal = crtc->mode.htotal;
-                       else
-                               planeb_htotal = crtc->mode.htotal;
-               }
-       }
-
-       /* Calculate and update the watermark for plane A */
-       if (planea_clock) {
-               entries_required = ((planea_clock / 1000) * pixel_size *
-                                    ILK_LP0_PLANE_LATENCY) / 1000;
-               entries_required = DIV_ROUND_UP(entries_required,
-                                               ironlake_display_wm_info.cacheline_size);
-               planea_wm = entries_required +
-                           ironlake_display_wm_info.guard_size;
-
-               if (planea_wm > (int)ironlake_display_wm_info.max_wm)
-                       planea_wm = ironlake_display_wm_info.max_wm;
-
-               /* Use the large buffer method to calculate cursor watermark */
-               line_time_us = (planea_htotal * 1000) / planea_clock;
-
-               /* Use ns/us then divide to preserve precision */
-               line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
-
-               /* calculate the cursor watermark for cursor A */
-               entries_required = line_count * 64 * pixel_size;
-               entries_required = DIV_ROUND_UP(entries_required,
-                                               ironlake_cursor_wm_info.cacheline_size);
-               cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size;
-               if (cursora_wm > ironlake_cursor_wm_info.max_wm)
-                       cursora_wm = ironlake_cursor_wm_info.max_wm;
-
-               reg_value = I915_READ(WM0_PIPEA_ILK);
-               reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
-               reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
-                            (cursora_wm & WM0_PIPE_CURSOR_MASK);
-               I915_WRITE(WM0_PIPEA_ILK, reg_value);
-               DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, "
-                               "cursor: %d\n", planea_wm, cursora_wm);
-       }
-       /* Calculate and update the watermark for plane B */
-       if (planeb_clock) {
-               entries_required = ((planeb_clock / 1000) * pixel_size *
-                                    ILK_LP0_PLANE_LATENCY) / 1000;
-               entries_required = DIV_ROUND_UP(entries_required,
-                                               ironlake_display_wm_info.cacheline_size);
-               planeb_wm = entries_required +
-                           ironlake_display_wm_info.guard_size;
-
-               if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
-                       planeb_wm = ironlake_display_wm_info.max_wm;
+       crtc = intel_get_crtc_for_pipe(dev, pipe);
+       if (crtc->fb == NULL || !crtc->enabled)
+               return false;
 
-               /* Use the large buffer method to calculate cursor watermark */
-               line_time_us = (planeb_htotal * 1000) / planeb_clock;
+       htotal = crtc->mode.htotal;
+       hdisplay = crtc->mode.hdisplay;
+       clock = crtc->mode.clock;
+       pixel_size = crtc->fb->bits_per_pixel / 8;
+
+       /* Use the small buffer method to calculate plane watermark */
+       entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000;
+       entries = DIV_ROUND_UP(entries,
+                              ironlake_display_wm_info.cacheline_size);
+       *plane_wm = entries + ironlake_display_wm_info.guard_size;
+       if (*plane_wm > (int)ironlake_display_wm_info.max_wm)
+               *plane_wm = ironlake_display_wm_info.max_wm;
+
+       /* Use the large buffer method to calculate cursor watermark */
+       line_time_us = ((htotal * 1000) / clock);
+       line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+       entries = line_count * 64 * pixel_size;
+       entries = DIV_ROUND_UP(entries,
+                              ironlake_cursor_wm_info.cacheline_size);
+       *cursor_wm = entries + ironlake_cursor_wm_info.guard_size;
+       if (*cursor_wm > ironlake_cursor_wm_info.max_wm)
+               *cursor_wm = ironlake_cursor_wm_info.max_wm;
 
-               /* Use ns/us then divide to preserve precision */
-               line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+       return true;
+}
 
-               /* calculate the cursor watermark for cursor B */
-               entries_required = line_count * 64 * pixel_size;
-               entries_required = DIV_ROUND_UP(entries_required,
-                                               ironlake_cursor_wm_info.cacheline_size);
-               cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size;
-               if (cursorb_wm > ironlake_cursor_wm_info.max_wm)
-                       cursorb_wm = ironlake_cursor_wm_info.max_wm;
+static void ironlake_update_wm(struct drm_device *dev,
+                              int planea_clock, int planeb_clock,
+                              int sr_hdisplay, int sr_htotal,
+                              int pixel_size)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int plane_wm, cursor_wm, enabled;
+       int tmp;
+
+       enabled = 0;
+       if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) {
+               I915_WRITE(WM0_PIPEA_ILK,
+                          (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+               DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+                             " plane %d, " "cursor: %d\n",
+                             plane_wm, cursor_wm);
+               enabled++;
+       }
 
-               reg_value = I915_READ(WM0_PIPEB_ILK);
-               reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
-               reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
-                            (cursorb_wm & WM0_PIPE_CURSOR_MASK);
-               I915_WRITE(WM0_PIPEB_ILK, reg_value);
-               DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, "
-                               "cursor: %d\n", planeb_wm, cursorb_wm);
+       if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) {
+               I915_WRITE(WM0_PIPEB_ILK,
+                          (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+               DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+                             " plane %d, cursor: %d\n",
+                             plane_wm, cursor_wm);
+               enabled++;
        }
 
        /*
         * Calculate and update the self-refresh watermark only when one
         * display plane is used.
         */
-       if (!planea_clock || !planeb_clock) {
-
+       tmp = 0;
+       if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) {
+               unsigned long line_time_us;
+               int small, large, plane_fbc;
+               int sr_clock, entries;
+               int line_count, line_size;
                /* Read the self-refresh latency. The unit is 0.5us */
                int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
 
                sr_clock = planea_clock ? planea_clock : planeb_clock;
-               line_time_us = ((sr_htotal * 1000) / sr_clock);
+               line_time_us = (sr_htotal * 1000) / sr_clock;
 
                /* Use ns/us then divide to preserve precision */
                line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
-                              / 1000;
+                       / 1000;
+               line_size = sr_hdisplay * pixel_size;
 
-               /* calculate the self-refresh watermark for display plane */
-               entries_required = line_count * sr_hdisplay * pixel_size;
-               entries_required = DIV_ROUND_UP(entries_required,
-                                               ironlake_display_srwm_info.cacheline_size);
-               sr_wm = entries_required +
-                       ironlake_display_srwm_info.guard_size;
+               /* Use the minimum of the small and large buffer method for primary */
+               small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000;
+               large = line_count * line_size;
 
-               /* calculate the self-refresh watermark for display cursor */
-               entries_required = line_count * pixel_size * 64;
-               entries_required = DIV_ROUND_UP(entries_required,
-                                               ironlake_cursor_srwm_info.cacheline_size);
-               cursor_wm = entries_required +
-                           ironlake_cursor_srwm_info.guard_size;
+               entries = DIV_ROUND_UP(min(small, large),
+                                      ironlake_display_srwm_info.cacheline_size);
 
-               /* configure watermark and enable self-refresh */
-               reg_value = I915_READ(WM1_LP_ILK);
-               reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
-                              WM1_LP_CURSOR_MASK);
-               reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
-                            (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
+               plane_fbc = entries * 64;
+               plane_fbc = DIV_ROUND_UP(plane_fbc, line_size);
 
-               I915_WRITE(WM1_LP_ILK, reg_value);
-               DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
-                               "cursor %d\n", sr_wm, cursor_wm);
+               plane_wm = entries + ironlake_display_srwm_info.guard_size;
+               if (plane_wm > (int)ironlake_display_srwm_info.max_wm)
+                       plane_wm = ironlake_display_srwm_info.max_wm;
 
-       } else {
-               /* Turn off self refresh if both pipes are enabled */
-               I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
-       }
+               /* calculate the self-refresh watermark for display cursor */
+               entries = line_count * pixel_size * 64;
+               entries = DIV_ROUND_UP(entries,
+                                      ironlake_cursor_srwm_info.cacheline_size);
+
+               cursor_wm = entries + ironlake_cursor_srwm_info.guard_size;
+               if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm)
+                       cursor_wm = ironlake_cursor_srwm_info.max_wm;
+
+               /* configure watermark and enable self-refresh */
+               tmp = (WM1_LP_SR_EN |
+                      (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+                      (plane_fbc << WM1_LP_FBC_SHIFT) |
+                      (plane_wm << WM1_LP_SR_SHIFT) |
+                      cursor_wm);
+               DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d,"
+                             " cursor %d\n", plane_wm, plane_fbc, cursor_wm);
+       }
+       I915_WRITE(WM1_LP_ILK, tmp);
+       /* XXX setup WM2 and WM3 */
 }
+
 /**
  * intel_update_watermarks - update FIFO watermark values based on current modes
  *
@@ -3447,7 +3517,7 @@ static void ironlake_update_wm(struct drm_device *dev,  int planea_clock,
  *
  * We don't use the sprite, so we can ignore that.  And on Crestline we have
  * to set the non-SR watermarks to 8.
 */
+ */
 static void intel_update_watermarks(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3463,15 +3533,15 @@ static void intel_update_watermarks(struct drm_device *dev)
        /* Get the clock config from both planes */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-               if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
+               if (intel_crtc->active) {
                        enabled++;
                        if (intel_crtc->plane == 0) {
                                DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
-                                         intel_crtc->pipe, crtc->mode.clock);
+                                             intel_crtc->pipe, crtc->mode.clock);
                                planea_clock = crtc->mode.clock;
                        } else {
                                DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
-                                         intel_crtc->pipe, crtc->mode.clock);
+                                             intel_crtc->pipe, crtc->mode.clock);
                                planeb_clock = crtc->mode.clock;
                        }
                        sr_hdisplay = crtc->mode.hdisplay;
@@ -3502,62 +3572,35 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        int plane = intel_crtc->plane;
-       int fp_reg = (pipe == 0) ? FPA0 : FPB0;
-       int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
-       int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
-       int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
-       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
-       int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
-       int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
-       int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
-       int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
-       int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
-       int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
-       int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
-       int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
-       int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+       u32 fp_reg, dpll_reg;
        int refclk, num_connectors = 0;
        intel_clock_t clock, reduced_clock;
-       u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
+       u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
        bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
        bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
        struct intel_encoder *has_edp_encoder = NULL;
        struct drm_mode_config *mode_config = &dev->mode_config;
-       struct drm_encoder *encoder;
+       struct intel_encoder *encoder;
        const intel_limit_t *limit;
        int ret;
        struct fdi_m_n m_n = {0};
-       int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1;
-       int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1;
-       int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1;
-       int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1;
-       int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
-       int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
-       int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
-       int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
-       int trans_dpll_sel = (pipe == 0) ? 0 : 1;
-       int lvds_reg = LVDS;
-       u32 temp;
-       int sdvo_pixel_multiply;
+       u32 reg, temp;
        int target_clock;
 
        drm_vblank_pre_modeset(dev, pipe);
 
-       list_for_each_entry(encoder, &mode_config->encoder_list, head) {
-               struct intel_encoder *intel_encoder;
-
-               if (encoder->crtc != crtc)
+       list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+               if (encoder->base.crtc != crtc)
                        continue;
 
-               intel_encoder = enc_to_intel_encoder(encoder);
-               switch (intel_encoder->type) {
+               switch (encoder->type) {
                case INTEL_OUTPUT_LVDS:
                        is_lvds = true;
                        break;
                case INTEL_OUTPUT_SDVO:
                case INTEL_OUTPUT_HDMI:
                        is_sdvo = true;
-                       if (intel_encoder->needs_tv_clock)
+                       if (encoder->needs_tv_clock)
                                is_tv = true;
                        break;
                case INTEL_OUTPUT_DVO:
@@ -3573,7 +3616,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                        is_dp = true;
                        break;
                case INTEL_OUTPUT_EDP:
-                       has_edp_encoder = intel_encoder;
+                       has_edp_encoder = encoder;
                        break;
                }
 
@@ -3583,15 +3626,15 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
        if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) {
                refclk = dev_priv->lvds_ssc_freq * 1000;
                DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
-                                       refclk / 1000);
-       } else if (IS_I9XX(dev)) {
+                             refclk / 1000);
+       } else if (!IS_GEN2(dev)) {
                refclk = 96000;
-               if (HAS_PCH_SPLIT(dev))
+               if (HAS_PCH_SPLIT(dev) &&
+                   (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)))
                        refclk = 120000; /* 120Mhz refclk */
        } else {
                refclk = 48000;
        }
-       
 
        /*
         * Returns a set of divisors for the desired target clock with the given
@@ -3607,13 +3650,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
        }
 
        /* Ensure that the cursor is valid for the new mode before changing... */
-       intel_crtc_update_cursor(crtc);
+       intel_crtc_update_cursor(crtc, true);
 
        if (is_lvds && dev_priv->lvds_downclock_avail) {
                has_reduced_clock = limit->find_pll(limit, crtc,
-                                                           dev_priv->lvds_downclock,
-                                                           refclk,
-                                                           &reduced_clock);
+                                                   dev_priv->lvds_downclock,
+                                                   refclk,
+                                                   &reduced_clock);
                if (has_reduced_clock && (clock.p != reduced_clock.p)) {
                        /*
                         * If the different P is found, it means that we can't
@@ -3622,7 +3665,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                         * feature.
                         */
                        DRM_DEBUG_KMS("Different P is found for "
-                                               "LVDS clock/downclock\n");
+                                     "LVDS clock/downclock\n");
                        has_reduced_clock = 0;
                }
        }
@@ -3630,14 +3673,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
           this mirrors vbios setting. */
        if (is_sdvo && is_tv) {
                if (adjusted_mode->clock >= 100000
-                               && adjusted_mode->clock < 140500) {
+                   && adjusted_mode->clock < 140500) {
                        clock.p1 = 2;
                        clock.p2 = 10;
                        clock.n = 3;
                        clock.m1 = 16;
                        clock.m2 = 8;
                } else if (adjusted_mode->clock >= 140500
-                               && adjusted_mode->clock <= 200000) {
+                          && adjusted_mode->clock <= 200000) {
                        clock.p1 = 1;
                        clock.p2 = 10;
                        clock.n = 6;
@@ -3649,34 +3692,41 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
        /* FDI link */
        if (HAS_PCH_SPLIT(dev)) {
                int lane = 0, link_bw, bpp;
-               /* eDP doesn't require FDI link, so just set DP M/N
+               /* CPU eDP doesn't require FDI link, so just set DP M/N
                   according to current link config */
-               if (has_edp_encoder) {
+               if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) {
                        target_clock = mode->clock;
                        intel_edp_link_config(has_edp_encoder,
                                              &lane, &link_bw);
                } else {
-                       /* DP over FDI requires target mode clock
+                       /* [e]DP over FDI requires target mode clock
                           instead of link clock */
-                       if (is_dp)
+                       if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
                                target_clock = mode->clock;
                        else
                                target_clock = adjusted_mode->clock;
-                       link_bw = 270000;
+
+                       /* FDI is a binary signal running at ~2.7GHz, encoding
+                        * each output octet as 10 bits. The actual frequency
+                        * is stored as a divider into a 100MHz clock, and the
+                        * mode pixel clock is stored in units of 1KHz.
+                        * Hence the bw of each lane in terms of the mode signal
+                        * is:
+                        */
+                       link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
                }
 
                /* determine panel color depth */
-               temp = I915_READ(pipeconf_reg);
+               temp = I915_READ(PIPECONF(pipe));
                temp &= ~PIPE_BPC_MASK;
                if (is_lvds) {
-                       int lvds_reg = I915_READ(PCH_LVDS);
                        /* the BPC will be 6 if it is 18-bit LVDS panel */
-                       if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
+                       if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
                                temp |= PIPE_8BPC;
                        else
                                temp |= PIPE_6BPC;
-               } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) {
-                       switch (dev_priv->edp_bpp/3) {
+               } else if (has_edp_encoder) {
+                       switch (dev_priv->edp.bpp/3) {
                        case 8:
                                temp |= PIPE_8BPC;
                                break;
@@ -3692,8 +3742,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                        }
                } else
                        temp |= PIPE_8BPC;
-               I915_WRITE(pipeconf_reg, temp);
-               I915_READ(pipeconf_reg);
+               I915_WRITE(PIPECONF(pipe), temp);
 
                switch (temp & PIPE_BPC_MASK) {
                case PIPE_8BPC:
@@ -3738,33 +3787,39 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                /* Always enable nonspread source */
                temp &= ~DREF_NONSPREAD_SOURCE_MASK;
                temp |= DREF_NONSPREAD_SOURCE_ENABLE;
-               I915_WRITE(PCH_DREF_CONTROL, temp);
-               POSTING_READ(PCH_DREF_CONTROL);
-
                temp &= ~DREF_SSC_SOURCE_MASK;
                temp |= DREF_SSC_SOURCE_ENABLE;
                I915_WRITE(PCH_DREF_CONTROL, temp);
-               POSTING_READ(PCH_DREF_CONTROL);
 
+               POSTING_READ(PCH_DREF_CONTROL);
                udelay(200);
 
                if (has_edp_encoder) {
                        if (dev_priv->lvds_use_ssc) {
                                temp |= DREF_SSC1_ENABLE;
                                I915_WRITE(PCH_DREF_CONTROL, temp);
-                               POSTING_READ(PCH_DREF_CONTROL);
-
-                               udelay(200);
 
-                               temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
-                               temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
-                               I915_WRITE(PCH_DREF_CONTROL, temp);
                                POSTING_READ(PCH_DREF_CONTROL);
+                               udelay(200);
+                       }
+                       temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+                       /* Enable CPU source on CPU attached eDP */
+                       if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+                               if (dev_priv->lvds_use_ssc)
+                                       temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+                               else
+                                       temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
                        } else {
-                               temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
-                               I915_WRITE(PCH_DREF_CONTROL, temp);
-                               POSTING_READ(PCH_DREF_CONTROL);
+                               /* Enable SSC on PCH eDP if needed */
+                               if (dev_priv->lvds_use_ssc) {
+                                       DRM_ERROR("enabling SSC on PCH\n");
+                                       temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
+                               }
                        }
+                       I915_WRITE(PCH_DREF_CONTROL, temp);
+                       POSTING_READ(PCH_DREF_CONTROL);
+                       udelay(200);
                }
        }
 
@@ -3780,23 +3835,26 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                                reduced_clock.m2;
        }
 
+       dpll = 0;
        if (!HAS_PCH_SPLIT(dev))
                dpll = DPLL_VGA_MODE_DIS;
 
-       if (IS_I9XX(dev)) {
+       if (!IS_GEN2(dev)) {
                if (is_lvds)
                        dpll |= DPLLB_MODE_LVDS;
                else
                        dpll |= DPLLB_MODE_DAC_SERIAL;
                if (is_sdvo) {
+                       int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+                       if (pixel_multiplier > 1) {
+                               if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+                                       dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+                               else if (HAS_PCH_SPLIT(dev))
+                                       dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+                       }
                        dpll |= DPLL_DVO_HIGH_SPEED;
-                       sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
-                       if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
-                               dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
-                       else if (HAS_PCH_SPLIT(dev))
-                               dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
                }
-               if (is_dp)
+               if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
                        dpll |= DPLL_DVO_HIGH_SPEED;
 
                /* compute bitmask from p1 value */
@@ -3824,7 +3882,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                        dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
                        break;
                }
-               if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+               if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
                        dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
        } else {
                if (is_lvds) {
@@ -3851,7 +3909,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                dpll |= PLL_REF_INPUT_DREFCLK;
 
        /* setup pipeconf */
-       pipeconf = I915_READ(pipeconf_reg);
+       pipeconf = I915_READ(PIPECONF(pipe));
 
        /* Set up the display plane register */
        dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -3865,7 +3923,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                        dspcntr |= DISPPLANE_SEL_PIPE_B;
        }
 
-       if (pipe == 0 && !IS_I965G(dev)) {
+       if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
                /* Enable pixel doubling when the dot clock is > 90% of the (display)
                 * core speed.
                 *
@@ -3874,51 +3932,47 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                 */
                if (mode->clock >
                    dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
-                       pipeconf |= PIPEACONF_DOUBLE_WIDE;
+                       pipeconf |= PIPECONF_DOUBLE_WIDE;
                else
-                       pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
+                       pipeconf &= ~PIPECONF_DOUBLE_WIDE;
        }
 
        dspcntr |= DISPLAY_PLANE_ENABLE;
-       pipeconf |= PIPEACONF_ENABLE;
+       pipeconf |= PIPECONF_ENABLE;
        dpll |= DPLL_VCO_ENABLE;
 
-
-       /* Disable the panel fitter if it was on our pipe */
-       if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
-               I915_WRITE(PFIT_CONTROL, 0);
-
        DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
        drm_mode_debug_printmodeline(mode);
 
        /* assign to Ironlake registers */
        if (HAS_PCH_SPLIT(dev)) {
-               fp_reg = pch_fp_reg;
-               dpll_reg = pch_dpll_reg;
+               fp_reg = PCH_FP0(pipe);
+               dpll_reg = PCH_DPLL(pipe);
+       } else {
+               fp_reg = FP0(pipe);
+               dpll_reg = DPLL(pipe);
        }
 
-       if (!has_edp_encoder) {
+       /* PCH eDP needs FDI, but CPU eDP does not */
+       if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
                I915_WRITE(fp_reg, fp);
                I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
-               I915_READ(dpll_reg);
+
+               POSTING_READ(dpll_reg);
                udelay(150);
        }
 
        /* enable transcoder DPLL */
        if (HAS_PCH_CPT(dev)) {
                temp = I915_READ(PCH_DPLL_SEL);
-               if (trans_dpll_sel == 0)
-                       temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+               if (pipe == 0)
+                       temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
                else
-                       temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+                       temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
                I915_WRITE(PCH_DPLL_SEL, temp);
-               I915_READ(PCH_DPLL_SEL);
-               udelay(150);
-       }
 
-       if (HAS_PCH_SPLIT(dev)) {
-               pipeconf &= ~PIPE_ENABLE_DITHER;
-               pipeconf &= ~PIPE_DITHER_TYPE_MASK;
+               POSTING_READ(PCH_DPLL_SEL);
+               udelay(150);
        }
 
        /* The LVDS pin pair needs to be on before the DPLLs are enabled.
@@ -3926,58 +3980,60 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
         * things on.
         */
        if (is_lvds) {
-               u32 lvds;
-
+               reg = LVDS;
                if (HAS_PCH_SPLIT(dev))
-                       lvds_reg = PCH_LVDS;
+                       reg = PCH_LVDS;
 
-               lvds = I915_READ(lvds_reg);
-               lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+               temp = I915_READ(reg);
+               temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
                if (pipe == 1) {
                        if (HAS_PCH_CPT(dev))
-                               lvds |= PORT_TRANS_B_SEL_CPT;
+                               temp |= PORT_TRANS_B_SEL_CPT;
                        else
-                               lvds |= LVDS_PIPEB_SELECT;
+                               temp |= LVDS_PIPEB_SELECT;
                } else {
                        if (HAS_PCH_CPT(dev))
-                               lvds &= ~PORT_TRANS_SEL_MASK;
+                               temp &= ~PORT_TRANS_SEL_MASK;
                        else
-                               lvds &= ~LVDS_PIPEB_SELECT;
+                               temp &= ~LVDS_PIPEB_SELECT;
                }
                /* set the corresponsding LVDS_BORDER bit */
-               lvds |= dev_priv->lvds_border_bits;
+               temp |= dev_priv->lvds_border_bits;
                /* Set the B0-B3 data pairs corresponding to whether we're going to
                 * set the DPLLs for dual-channel mode or not.
                 */
                if (clock.p2 == 7)
-                       lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+                       temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
                else
-                       lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+                       temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
 
                /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
                 * appropriately here, but we need to look more thoroughly into how
                 * panels behave in the two modes.
                 */
-               /* set the dithering flag */
-               if (IS_I965G(dev)) {
-                       if (dev_priv->lvds_dither) {
-                               if (HAS_PCH_SPLIT(dev)) {
-                                       pipeconf |= PIPE_ENABLE_DITHER;
-                                       pipeconf |= PIPE_DITHER_TYPE_ST01;
-                               } else
-                                       lvds |= LVDS_ENABLE_DITHER;
-                       } else {
-                               if (!HAS_PCH_SPLIT(dev)) {
-                                       lvds &= ~LVDS_ENABLE_DITHER;
-                               }
-                       }
+               /* set the dithering flag on non-PCH LVDS as needed */
+               if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+                       if (dev_priv->lvds_dither)
+                               temp |= LVDS_ENABLE_DITHER;
+                       else
+                               temp &= ~LVDS_ENABLE_DITHER;
+               }
+               I915_WRITE(reg, temp);
+       }
+
+       /* set the dithering flag and clear for anything other than a panel. */
+       if (HAS_PCH_SPLIT(dev)) {
+               pipeconf &= ~PIPECONF_DITHER_EN;
+               pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
+               if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) {
+                       pipeconf |= PIPECONF_DITHER_EN;
+                       pipeconf |= PIPECONF_DITHER_TYPE_ST1;
                }
-               I915_WRITE(lvds_reg, lvds);
-               I915_READ(lvds_reg);
        }
-       if (is_dp)
+
+       if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
                intel_dp_set_m_n(crtc, mode, adjusted_mode);
-       else if (HAS_PCH_SPLIT(dev)) {
+       else if (HAS_PCH_SPLIT(dev)) {
                /* For non-DP output, clear any trans DP clock recovery setting.*/
                if (pipe == 0) {
                        I915_WRITE(TRANSA_DATA_M1, 0);
@@ -3992,29 +4048,35 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                }
        }
 
-       if (!has_edp_encoder) {
+       if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
                I915_WRITE(fp_reg, fp);
                I915_WRITE(dpll_reg, dpll);
-               I915_READ(dpll_reg);
+
                /* Wait for the clocks to stabilize. */
+               POSTING_READ(dpll_reg);
                udelay(150);
 
-               if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+               if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+                       temp = 0;
                        if (is_sdvo) {
-                               sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
-                               I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
-                                       ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
-                       } else
-                               I915_WRITE(dpll_md_reg, 0);
+                               temp = intel_mode_get_pixel_multiplier(adjusted_mode);
+                               if (temp > 1)
+                                       temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+                               else
+                                       temp = 0;
+                       }
+                       I915_WRITE(DPLL_MD(pipe), temp);
                } else {
                        /* write it again -- the BIOS does, after all */
                        I915_WRITE(dpll_reg, dpll);
                }
-               I915_READ(dpll_reg);
+
                /* Wait for the clocks to stabilize. */
+               POSTING_READ(dpll_reg);
                udelay(150);
        }
 
+       intel_crtc->lowfreq_avail = false;
        if (is_lvds && has_reduced_clock && i915_powersave) {
                I915_WRITE(fp_reg + 4, fp2);
                intel_crtc->lowfreq_avail = true;
@@ -4024,7 +4086,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                }
        } else {
                I915_WRITE(fp_reg + 4, fp);
-               intel_crtc->lowfreq_avail = false;
                if (HAS_PIPE_CXSR(dev)) {
                        DRM_DEBUG_KMS("disabling CxSR downclocking\n");
                        pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
@@ -4043,70 +4104,62 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
        } else
                pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
 
-       I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+       I915_WRITE(HTOTAL(pipe),
+                  (adjusted_mode->crtc_hdisplay - 1) |
                   ((adjusted_mode->crtc_htotal - 1) << 16));
-       I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+       I915_WRITE(HBLANK(pipe),
+                  (adjusted_mode->crtc_hblank_start - 1) |
                   ((adjusted_mode->crtc_hblank_end - 1) << 16));
-       I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+       I915_WRITE(HSYNC(pipe),
+                  (adjusted_mode->crtc_hsync_start - 1) |
                   ((adjusted_mode->crtc_hsync_end - 1) << 16));
-       I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+
+       I915_WRITE(VTOTAL(pipe),
+                  (adjusted_mode->crtc_vdisplay - 1) |
                   ((adjusted_mode->crtc_vtotal - 1) << 16));
-       I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+       I915_WRITE(VBLANK(pipe),
+                  (adjusted_mode->crtc_vblank_start - 1) |
                   ((adjusted_mode->crtc_vblank_end - 1) << 16));
-       I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+       I915_WRITE(VSYNC(pipe),
+                  (adjusted_mode->crtc_vsync_start - 1) |
                   ((adjusted_mode->crtc_vsync_end - 1) << 16));
-       /* pipesrc and dspsize control the size that is scaled from, which should
-        * always be the user's requested size.
+
+       /* pipesrc and dspsize control the size that is scaled from,
+        * which should always be the user's requested size.
         */
        if (!HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
-                               (mode->hdisplay - 1));
-               I915_WRITE(dsppos_reg, 0);
+               I915_WRITE(DSPSIZE(plane),
+                          ((mode->vdisplay - 1) << 16) |
+                          (mode->hdisplay - 1));
+               I915_WRITE(DSPPOS(plane), 0);
        }
-       I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+       I915_WRITE(PIPESRC(pipe),
+                  ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
 
        if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
-               I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
-               I915_WRITE(link_m1_reg, m_n.link_m);
-               I915_WRITE(link_n1_reg, m_n.link_n);
+               I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
+               I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
+               I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
+               I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
 
-               if (has_edp_encoder) {
+               if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
                        ironlake_set_pll_edp(crtc, adjusted_mode->clock);
-               } else {
-                       /* enable FDI RX PLL too */
-                       temp = I915_READ(fdi_rx_reg);
-                       I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
-                       I915_READ(fdi_rx_reg);
-                       udelay(200);
-
-                       /* enable FDI TX PLL too */
-                       temp = I915_READ(fdi_tx_reg);
-                       I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
-                       I915_READ(fdi_tx_reg);
-
-                       /* enable FDI RX PCDCLK */
-                       temp = I915_READ(fdi_rx_reg);
-                       I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
-                       I915_READ(fdi_rx_reg);
-                       udelay(200);
                }
        }
 
-       I915_WRITE(pipeconf_reg, pipeconf);
-       I915_READ(pipeconf_reg);
+       I915_WRITE(PIPECONF(pipe), pipeconf);
+       POSTING_READ(PIPECONF(pipe));
 
        intel_wait_for_vblank(dev, pipe);
 
-       if (IS_IRONLAKE(dev)) {
+       if (IS_GEN5(dev)) {
                /* enable address swizzle for tiling buffer */
                temp = I915_READ(DISP_ARB_CTL);
                I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
        }
 
-       I915_WRITE(dspcntr_reg, dspcntr);
+       I915_WRITE(DSPCNTR(plane), dspcntr);
 
-       /* Flush the plane changes */
        ret = intel_pipe_set_base(crtc, x, y, old_fb);
 
        intel_update_watermarks(dev);
@@ -4199,7 +4252,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
 }
 
 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
-static void intel_crtc_update_cursor(struct drm_crtc *crtc)
+static void intel_crtc_update_cursor(struct drm_crtc *crtc,
+                                    bool on)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4212,7 +4266,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc)
 
        pos = 0;
 
-       if (intel_crtc->cursor_on && crtc->fb) {
+       if (on && crtc->enabled && crtc->fb) {
                base = intel_crtc->cursor_addr;
                if (x > (int) crtc->fb->width)
                        base = 0;
@@ -4324,7 +4378,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
                addr = obj_priv->phys_obj->handle->busaddr;
        }
 
-       if (!IS_I9XX(dev))
+       if (IS_GEN2(dev))
                I915_WRITE(CURSIZE, (height << 12) | width);
 
  finish:
@@ -4344,7 +4398,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
        intel_crtc->cursor_width = width;
        intel_crtc->cursor_height = height;
 
-       intel_crtc_update_cursor(crtc);
+       intel_crtc_update_cursor(crtc, true);
 
        return 0;
 fail_unpin:
@@ -4363,7 +4417,7 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
        intel_crtc->cursor_x = x;
        intel_crtc->cursor_y = y;
 
-       intel_crtc_update_cursor(crtc);
+       intel_crtc_update_cursor(crtc, true);
 
        return 0;
 }
@@ -4432,7 +4486,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
        struct intel_crtc *intel_crtc;
        struct drm_crtc *possible_crtc;
        struct drm_crtc *supported_crtc =NULL;
-       struct drm_encoder *encoder = &intel_encoder->enc;
+       struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_crtc *crtc = NULL;
        struct drm_device *dev = encoder->dev;
        struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -4513,7 +4567,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
 void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
                                    struct drm_connector *connector, int dpms_mode)
 {
-       struct drm_encoder *encoder = &intel_encoder->enc;
+       struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_device *dev = encoder->dev;
        struct drm_crtc *crtc = encoder->crtc;
        struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -4559,7 +4613,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
                clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
        }
 
-       if (IS_I9XX(dev)) {
+       if (!IS_GEN2(dev)) {
                if (IS_PINEVIEW(dev))
                        clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
                                DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
@@ -4663,8 +4717,6 @@ static void intel_gpu_idle_timer(unsigned long arg)
        struct drm_device *dev = (struct drm_device *)arg;
        drm_i915_private_t *dev_priv = dev->dev_private;
 
-       DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
-
        dev_priv->busy = false;
 
        queue_work(dev_priv->wq, &dev_priv->idle_work);
@@ -4678,14 +4730,12 @@ static void intel_crtc_idle_timer(unsigned long arg)
        struct drm_crtc *crtc = &intel_crtc->base;
        drm_i915_private_t *dev_priv = crtc->dev->dev_private;
 
-       DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
-
        intel_crtc->busy = false;
 
        queue_work(dev_priv->wq, &dev_priv->idle_work);
 }
 
-static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
+static void intel_increase_pllclock(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4720,9 +4770,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
        }
 
        /* Schedule downclock */
-       if (schedule)
-               mod_timer(&intel_crtc->idle_timer, jiffies +
-                         msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
+       mod_timer(&intel_crtc->idle_timer, jiffies +
+                 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
 }
 
 static void intel_decrease_pllclock(struct drm_crtc *crtc)
@@ -4858,7 +4907,7 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
                                        I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
                                }
                                /* Non-busy -> busy, upclock */
-                               intel_increase_pllclock(crtc, true);
+                               intel_increase_pllclock(crtc);
                                intel_crtc->busy = true;
                        } else {
                                /* Busy -> busy, put off timer */
@@ -4872,8 +4921,22 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
 static void intel_crtc_destroy(struct drm_crtc *crtc)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct intel_unpin_work *work;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+       work = intel_crtc->unpin_work;
+       intel_crtc->unpin_work = NULL;
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+
+       if (work) {
+               cancel_work_sync(&work->work);
+               kfree(work);
+       }
 
        drm_crtc_cleanup(crtc);
+
        kfree(intel_crtc);
 }
 
@@ -4928,12 +4991,11 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
 
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
-       obj_priv = to_intel_bo(work->pending_flip_obj);
-
-       /* Initial scanout buffer will have a 0 pending flip count */
-       if ((atomic_read(&obj_priv->pending_flip) == 0) ||
-           atomic_dec_and_test(&obj_priv->pending_flip))
-               DRM_WAKEUP(&dev_priv->pending_flip_queue);
+       obj_priv = to_intel_bo(work->old_fb_obj);
+       atomic_clear_mask(1 << intel_crtc->plane,
+                         &obj_priv->pending_flip.counter);
+       if (atomic_read(&obj_priv->pending_flip) == 0)
+               wake_up(&dev_priv->pending_flip_queue);
        schedule_work(&work->work);
 
        trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
@@ -5014,7 +5076,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        obj = intel_fb->obj;
 
        mutex_lock(&dev->struct_mutex);
-       ret = intel_pin_and_fence_fb_obj(dev, obj);
+       ret = intel_pin_and_fence_fb_obj(dev, obj, true);
        if (ret)
                goto cleanup_work;
 
@@ -5023,29 +5085,33 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        drm_gem_object_reference(obj);
 
        crtc->fb = fb;
-       ret = i915_gem_object_flush_write_domain(obj);
-       if (ret)
-               goto cleanup_objs;
 
        ret = drm_vblank_get(dev, intel_crtc->pipe);
        if (ret)
                goto cleanup_objs;
 
-       obj_priv = to_intel_bo(obj);
-       atomic_inc(&obj_priv->pending_flip);
+       /* Block clients from rendering to the new back buffer until
+        * the flip occurs and the object is no longer visible.
+        */
+       atomic_add(1 << intel_crtc->plane,
+                  &to_intel_bo(work->old_fb_obj)->pending_flip);
+
        work->pending_flip_obj = obj;
+       obj_priv = to_intel_bo(obj);
 
        if (IS_GEN3(dev) || IS_GEN2(dev)) {
                u32 flip_mask;
 
+               /* Can't queue multiple flips, so wait for the previous
+                * one to finish before executing the next.
+                */
+               BEGIN_LP_RING(2);
                if (intel_crtc->plane)
                        flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
                else
                        flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
-               BEGIN_LP_RING(2);
                OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
-               OUT_RING(0);
+               OUT_RING(MI_NOOP);
                ADVANCE_LP_RING();
        }
 
@@ -5126,15 +5192,14 @@ cleanup_work:
        return ret;
 }
 
-static const struct drm_crtc_helper_funcs intel_helper_funcs = {
+static struct drm_crtc_helper_funcs intel_helper_funcs = {
        .dpms = intel_crtc_dpms,
        .mode_fixup = intel_crtc_mode_fixup,
        .mode_set = intel_crtc_mode_set,
        .mode_set_base = intel_pipe_set_base,
        .mode_set_base_atomic = intel_pipe_set_base_atomic,
-       .prepare = intel_crtc_prepare,
-       .commit = intel_crtc_commit,
        .load_lut = intel_crtc_load_lut,
+       .disable = intel_crtc_disable,
 };
 
 static const struct drm_crtc_funcs intel_crtc_funcs = {
@@ -5160,8 +5225,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
        drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
 
        drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
-       intel_crtc->pipe = pipe;
-       intel_crtc->plane = pipe;
        for (i = 0; i < 256; i++) {
                intel_crtc->lut_r[i] = i;
                intel_crtc->lut_g[i] = i;
@@ -5171,9 +5234,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
        /* Swap pipes & planes for FBC on pre-965 */
        intel_crtc->pipe = pipe;
        intel_crtc->plane = pipe;
-       if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
+       if (IS_MOBILE(dev) && IS_GEN3(dev)) {
                DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
-               intel_crtc->plane = ((pipe == 0) ? 1 : 0);
+               intel_crtc->plane = !pipe;
        }
 
        BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
@@ -5183,6 +5246,16 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
 
        intel_crtc->cursor_addr = 0;
        intel_crtc->dpms_mode = -1;
+       intel_crtc->active = true; /* force the pipe off on setup_init_config */
+
+       if (HAS_PCH_SPLIT(dev)) {
+               intel_helper_funcs.prepare = ironlake_crtc_prepare;
+               intel_helper_funcs.commit = ironlake_crtc_commit;
+       } else {
+               intel_helper_funcs.prepare = i9xx_crtc_prepare;
+               intel_helper_funcs.commit = i9xx_crtc_commit;
+       }
+
        drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
 
        intel_crtc->busy = false;
@@ -5218,38 +5291,25 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
        return 0;
 }
 
-struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
-{
-       struct drm_crtc *crtc = NULL;
-
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-               if (intel_crtc->pipe == pipe)
-                       break;
-       }
-       return crtc;
-}
-
 static int intel_encoder_clones(struct drm_device *dev, int type_mask)
 {
+       struct intel_encoder *encoder;
        int index_mask = 0;
-       struct drm_encoder *encoder;
        int entry = 0;
 
-        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-               struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-               if (type_mask & intel_encoder->clone_mask)
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+               if (type_mask & encoder->clone_mask)
                        index_mask |= (1 << entry);
                entry++;
        }
+
        return index_mask;
 }
 
-
 static void intel_setup_outputs(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_encoder *encoder;
+       struct intel_encoder *encoder;
        bool dpd_is_edp = false;
 
        if (IS_MOBILE(dev) && !IS_I830(dev))
@@ -5338,12 +5398,10 @@ static void intel_setup_outputs(struct drm_device *dev)
        if (SUPPORTS_TV(dev))
                intel_tv_init(dev);
 
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-               struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
-               encoder->possible_crtcs = intel_encoder->crtc_mask;
-               encoder->possible_clones = intel_encoder_clones(dev,
-                                               intel_encoder->clone_mask);
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+               encoder->base.possible_crtcs = encoder->crtc_mask;
+               encoder->base.possible_clones =
+                       intel_encoder_clones(dev, encoder->clone_mask);
        }
 }
 
@@ -5377,8 +5435,25 @@ int intel_framebuffer_init(struct drm_device *dev,
                           struct drm_mode_fb_cmd *mode_cmd,
                           struct drm_gem_object *obj)
 {
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret;
 
+       if (obj_priv->tiling_mode == I915_TILING_Y)
+               return -EINVAL;
+
+       if (mode_cmd->pitch & 63)
+               return -EINVAL;
+
+       switch (mode_cmd->bpp) {
+       case 8:
+       case 16:
+       case 24:
+       case 32:
+               break;
+       default:
+               return -EINVAL;
+       }
+
        ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
        if (ret) {
                DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -5487,6 +5562,10 @@ void ironlake_enable_drps(struct drm_device *dev)
        u32 rgvmodectl = I915_READ(MEMMODECTL);
        u8 fmax, fmin, fstart, vstart;
 
+       /* Enable temp reporting */
+       I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
+       I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
+
        /* 100ms RC evaluation intervals */
        I915_WRITE(RCUPEI, 100000);
        I915_WRITE(RCDNEI, 100000);
@@ -5529,7 +5608,7 @@ void ironlake_enable_drps(struct drm_device *dev)
        rgvmodectl |= MEMMODE_SWMODE_EN;
        I915_WRITE(MEMMODECTL, rgvmodectl);
 
-       if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+       if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
                DRM_ERROR("stuck trying to change perf mode\n");
        msleep(1);
 
@@ -5660,7 +5739,7 @@ void intel_init_clock_gating(struct drm_device *dev)
        if (HAS_PCH_SPLIT(dev)) {
                uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
 
-               if (IS_IRONLAKE(dev)) {
+               if (IS_GEN5(dev)) {
                        /* Required for FBC */
                        dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE;
                        /* Required for CxSR */
@@ -5673,6 +5752,13 @@ void intel_init_clock_gating(struct drm_device *dev)
 
                I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
 
+               /*
+                * On Ibex Peak and Cougar Point, we need to disable clock
+                * gating for the panel power sequencer or it will fail to
+                * start up when no ports are active.
+                */
+               I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+
                /*
                 * According to the spec the following bits should be set in
                 * order to enable memory self-refresh
@@ -5680,7 +5766,7 @@ void intel_init_clock_gating(struct drm_device *dev)
                 * The bit 5 of 0x42020
                 * The bit 15 of 0x45000
                 */
-               if (IS_IRONLAKE(dev)) {
+               if (IS_GEN5(dev)) {
                        I915_WRITE(ILK_DISPLAY_CHICKEN2,
                                        (I915_READ(ILK_DISPLAY_CHICKEN2) |
                                        ILK_DPARB_GATE | ILK_VSDPFD_FULL));
@@ -5728,20 +5814,20 @@ void intel_init_clock_gating(struct drm_device *dev)
                if (IS_GM45(dev))
                        dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
                I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
-       } else if (IS_I965GM(dev)) {
+       } else if (IS_CRESTLINE(dev)) {
                I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
                I915_WRITE(RENCLK_GATE_D2, 0);
                I915_WRITE(DSPCLK_GATE_D, 0);
                I915_WRITE(RAMCLK_GATE_D, 0);
                I915_WRITE16(DEUC, 0);
-       } else if (IS_I965G(dev)) {
+       } else if (IS_BROADWATER(dev)) {
                I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
                       I965_RCC_CLOCK_GATE_DISABLE |
                       I965_RCPB_CLOCK_GATE_DISABLE |
                       I965_ISC_CLOCK_GATE_DISABLE |
                       I965_FBC_CLOCK_GATE_DISABLE);
                I915_WRITE(RENCLK_GATE_D2, 0);
-       } else if (IS_I9XX(dev)) {
+       } else if (IS_GEN3(dev)) {
                u32 dstate = I915_READ(D_STATE);
 
                dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
@@ -5823,7 +5909,7 @@ static void intel_init_display(struct drm_device *dev)
                        dev_priv->display.fbc_enabled = g4x_fbc_enabled;
                        dev_priv->display.enable_fbc = g4x_enable_fbc;
                        dev_priv->display.disable_fbc = g4x_disable_fbc;
-               } else if (IS_I965GM(dev)) {
+               } else if (IS_CRESTLINE(dev)) {
                        dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
                        dev_priv->display.enable_fbc = i8xx_enable_fbc;
                        dev_priv->display.disable_fbc = i8xx_disable_fbc;
@@ -5856,7 +5942,7 @@ static void intel_init_display(struct drm_device *dev)
 
        /* For FIFO watermark updates */
        if (HAS_PCH_SPLIT(dev)) {
-               if (IS_IRONLAKE(dev)) {
+               if (IS_GEN5(dev)) {
                        if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
                                dev_priv->display.update_wm = ironlake_update_wm;
                        else {
@@ -5883,9 +5969,9 @@ static void intel_init_display(struct drm_device *dev)
                        dev_priv->display.update_wm = pineview_update_wm;
        } else if (IS_G4X(dev))
                dev_priv->display.update_wm = g4x_update_wm;
-       else if (IS_I965G(dev))
+       else if (IS_GEN4(dev))
                dev_priv->display.update_wm = i965_update_wm;
-       else if (IS_I9XX(dev)) {
+       else if (IS_GEN3(dev)) {
                dev_priv->display.update_wm = i9xx_update_wm;
                dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
        } else if (IS_I85X(dev)) {
@@ -5999,24 +6085,24 @@ void intel_modeset_init(struct drm_device *dev)
 
        intel_init_display(dev);
 
-       if (IS_I965G(dev)) {
-               dev->mode_config.max_width = 8192;
-               dev->mode_config.max_height = 8192;
-       } else if (IS_I9XX(dev)) {
+       if (IS_GEN2(dev)) {
+               dev->mode_config.max_width = 2048;
+               dev->mode_config.max_height = 2048;
+       } else if (IS_GEN3(dev)) {
                dev->mode_config.max_width = 4096;
                dev->mode_config.max_height = 4096;
        } else {
-               dev->mode_config.max_width = 2048;
-               dev->mode_config.max_height = 2048;
+               dev->mode_config.max_width = 8192;
+               dev->mode_config.max_height = 8192;
        }
 
        /* set memory base */
-       if (IS_I9XX(dev))
-               dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
-       else
+       if (IS_GEN2(dev))
                dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
+       else
+               dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
 
-       if (IS_MOBILE(dev) || IS_I9XX(dev))
+       if (IS_MOBILE(dev) || !IS_GEN2(dev))
                dev_priv->num_pipe = 2;
        else
                dev_priv->num_pipe = 1;
@@ -6052,10 +6138,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
        struct drm_crtc *crtc;
        struct intel_crtc *intel_crtc;
 
+       drm_kms_helper_poll_fini(dev);
        mutex_lock(&dev->struct_mutex);
 
-       drm_kms_helper_poll_fini(dev);
-       intel_fbdev_fini(dev);
+       intel_unregister_dsm_handler();
+
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                /* Skip inactive CRTCs */
@@ -6063,12 +6150,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
                        continue;
 
                intel_crtc = to_intel_crtc(crtc);
-               intel_increase_pllclock(crtc, false);
-               del_timer_sync(&intel_crtc->idle_timer);
+               intel_increase_pllclock(crtc);
        }
 
-       del_timer_sync(&dev_priv->idle_timer);
-
        if (dev_priv->display.disable_fbc)
                dev_priv->display.disable_fbc(dev);
 
@@ -6097,33 +6181,36 @@ void intel_modeset_cleanup(struct drm_device *dev)
 
        mutex_unlock(&dev->struct_mutex);
 
+       /* Disable the irq before mode object teardown, for the irq might
+        * enqueue unpin/hotplug work. */
+       drm_irq_uninstall(dev);
+       cancel_work_sync(&dev_priv->hotplug_work);
+
+       /* Shut off idle work before the crtcs get freed. */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               intel_crtc = to_intel_crtc(crtc);
+               del_timer_sync(&intel_crtc->idle_timer);
+       }
+       del_timer_sync(&dev_priv->idle_timer);
+       cancel_work_sync(&dev_priv->idle_work);
+
        drm_mode_config_cleanup(dev);
 }
 
-
 /*
  * Return which encoder is currently attached for connector.
  */
-struct drm_encoder *intel_attached_encoder (struct drm_connector *connector)
+struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
 {
-       struct drm_mode_object *obj;
-       struct drm_encoder *encoder;
-       int i;
-
-       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               if (connector->encoder_ids[i] == 0)
-                       break;
-
-               obj = drm_mode_object_find(connector->dev,
-                                           connector->encoder_ids[i],
-                                           DRM_MODE_OBJECT_ENCODER);
-               if (!obj)
-                       continue;
+       return &intel_attached_encoder(connector)->base;
+}
 
-               encoder = obj_to_encoder(obj);
-               return encoder;
-       }
-       return NULL;
+void intel_connector_attach_encoder(struct intel_connector *connector,
+                                   struct intel_encoder *encoder)
+{
+       connector->encoder = encoder;
+       drm_mode_connector_attach_encoder(&connector->base,
+                                         &encoder->base);
 }
 
 /*
index 9ab8708ac6ba1370cea75680d6a660daa5f9b147..891f4f1d63b11570b7ede1c4e5e49814f2dcb5d0 100644 (file)
 
 #define DP_LINK_CONFIGURATION_SIZE     9
 
-#define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP)
-#define IS_PCH_eDP(i) ((i)->is_pch_edp)
-
 struct intel_dp {
        struct intel_encoder base;
        uint32_t output_reg;
        uint32_t DP;
        uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
        bool has_audio;
+       int force_audio;
        int dpms_mode;
        uint8_t link_bw;
        uint8_t lane_count;
@@ -58,14 +56,69 @@ struct intel_dp {
        struct i2c_adapter adapter;
        struct i2c_algo_dp_aux_data algo;
        bool is_pch_edp;
+       uint8_t train_set[4];
+       uint8_t link_status[DP_LINK_STATUS_SIZE];
+
+       struct drm_property *force_audio_property;
 };
 
+/**
+ * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
+ * @intel_dp: DP struct
+ *
+ * If a CPU or PCH DP output is attached to an eDP panel, this function
+ * will return true, and false otherwise.
+ */
+static bool is_edp(struct intel_dp *intel_dp)
+{
+       return intel_dp->base.type == INTEL_OUTPUT_EDP;
+}
+
+/**
+ * is_pch_edp - is the port on the PCH and attached to an eDP panel?
+ * @intel_dp: DP struct
+ *
+ * Returns true if the given DP struct corresponds to a PCH DP port attached
+ * to an eDP panel, false otherwise.  Helpful for determining whether we
+ * may need FDI resources for a given DP output or not.
+ */
+static bool is_pch_edp(struct intel_dp *intel_dp)
+{
+       return intel_dp->is_pch_edp;
+}
+
 static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
 {
-       return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base);
+       return container_of(encoder, struct intel_dp, base.base);
+}
+
+static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
+{
+       return container_of(intel_attached_encoder(connector),
+                           struct intel_dp, base);
+}
+
+/**
+ * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
+ * @encoder: DRM encoder
+ *
+ * Return true if @encoder corresponds to a PCH attached eDP panel.  Needed
+ * by intel_display.c.
+ */
+bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
+{
+       struct intel_dp *intel_dp;
+
+       if (!encoder)
+               return false;
+
+       intel_dp = enc_to_intel_dp(encoder);
+
+       return is_pch_edp(intel_dp);
 }
 
-static void intel_dp_link_train(struct intel_dp *intel_dp);
+static void intel_dp_start_link_train(struct intel_dp *intel_dp);
+static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
 static void intel_dp_link_down(struct intel_dp *intel_dp);
 
 void
@@ -129,8 +182,8 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
-               return (pixel_clock * dev_priv->edp_bpp) / 8;
+       if (is_edp(intel_dp))
+               return (pixel_clock * dev_priv->edp.bpp + 7) / 8;
        else
                return pixel_clock * 3;
 }
@@ -145,15 +198,13 @@ static int
 intel_dp_mode_valid(struct drm_connector *connector,
                    struct drm_display_mode *mode)
 {
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+       struct intel_dp *intel_dp = intel_attached_dp(connector);
        struct drm_device *dev = connector->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
        int max_lanes = intel_dp_max_lane_count(intel_dp);
 
-       if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
-           dev_priv->panel_fixed_mode) {
+       if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
                if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
                        return MODE_PANEL;
 
@@ -163,7 +214,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
 
        /* only refuse the mode on non eDP since we have seen some wierd eDP panels
           which are outside spec tolerances but somehow work by magic */
-       if (!IS_eDP(intel_dp) &&
+       if (!is_edp(intel_dp) &&
            (intel_dp_link_required(connector->dev, intel_dp, mode->clock)
             > intel_dp_max_data_rate(max_link_clock, max_lanes)))
                return MODE_CLOCK_HIGH;
@@ -233,7 +284,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
                uint8_t *recv, int recv_size)
 {
        uint32_t output_reg = intel_dp->output_reg;
-       struct drm_device *dev = intel_dp->base.enc.dev;
+       struct drm_device *dev = intel_dp->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t ch_ctl = output_reg + 0x10;
        uint32_t ch_data = ch_ctl + 4;
@@ -246,8 +297,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        /* The clock divider is based off the hrawclk,
         * and would like to run at 2MHz. So, take the
         * hrawclk value and divide by 2 and use that
+        *
+        * Note that PCH attached eDP panels should use a 125MHz input
+        * clock divider.
         */
-       if (IS_eDP(intel_dp)) {
+       if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
                if (IS_GEN6(dev))
                        aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
                else
@@ -519,8 +573,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
        int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
        static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 
-       if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
-           dev_priv->panel_fixed_mode) {
+       if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
                intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
                intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
                                        mode, adjusted_mode);
@@ -531,6 +584,17 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
                mode->clock = dev_priv->panel_fixed_mode->clock;
        }
 
+       /* Just use VBT values for eDP */
+       if (is_edp(intel_dp)) {
+               intel_dp->lane_count = dev_priv->edp.lanes;
+               intel_dp->link_bw = dev_priv->edp.rate;
+               adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
+               DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n",
+                             intel_dp->link_bw, intel_dp->lane_count,
+                             adjusted_mode->clock);
+               return true;
+       }
+
        for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
                for (clock = 0; clock <= max_clock; clock++) {
                        int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
@@ -549,19 +613,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
                }
        }
 
-       if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
-               /* okay we failed just pick the highest */
-               intel_dp->lane_count = max_lane_count;
-               intel_dp->link_bw = bws[max_clock];
-               adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
-               DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
-                             "count %d clock %d\n",
-                             intel_dp->link_bw, intel_dp->lane_count,
-                             adjusted_mode->clock);
-
-               return true;
-       }
-
        return false;
 }
 
@@ -598,25 +649,6 @@ intel_dp_compute_m_n(int bpp,
        intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
 }
 
-bool intel_pch_has_edp(struct drm_crtc *crtc)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_mode_config *mode_config = &dev->mode_config;
-       struct drm_encoder *encoder;
-
-       list_for_each_entry(encoder, &mode_config->encoder_list, head) {
-               struct intel_dp *intel_dp;
-
-               if (encoder->crtc != crtc)
-                       continue;
-
-               intel_dp = enc_to_intel_dp(encoder);
-               if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
-                       return intel_dp->is_pch_edp;
-       }
-       return false;
-}
-
 void
 intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
                 struct drm_display_mode *adjusted_mode)
@@ -641,8 +673,10 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
                intel_dp = enc_to_intel_dp(encoder);
                if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
                        lane_count = intel_dp->lane_count;
-                       if (IS_PCH_eDP(intel_dp))
-                               bpp = dev_priv->edp_bpp;
+                       break;
+               } else if (is_edp(intel_dp)) {
+                       lane_count = dev_priv->edp.lanes;
+                       bpp = dev_priv->edp.bpp;
                        break;
                }
        }
@@ -698,7 +732,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 {
        struct drm_device *dev = encoder->dev;
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-       struct drm_crtc *crtc = intel_dp->base.enc.crtc;
+       struct drm_crtc *crtc = intel_dp->base.base.crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
        intel_dp->DP = (DP_VOLTAGE_0_4 |
@@ -709,7 +743,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
        if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
                intel_dp->DP |= DP_SYNC_VS_HIGH;
 
-       if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+       if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
                intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
        else
                intel_dp->DP |= DP_LINK_TRAIN_OFF;
@@ -744,7 +778,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
        if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
                intel_dp->DP |= DP_PIPEB_SELECT;
 
-       if (IS_eDP(intel_dp)) {
+       if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
                /* don't miss out required setting for eDP */
                intel_dp->DP |= DP_PLL_ENABLE;
                if (adjusted_mode->clock < 200000)
@@ -754,13 +788,15 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
        }
 }
 
-static void ironlake_edp_panel_on (struct drm_device *dev)
+/* Returns true if the panel was already on when called */
+static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
 {
+       struct drm_device *dev = intel_dp->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 pp;
+       u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE;
 
        if (I915_READ(PCH_PP_STATUS) & PP_ON)
-               return;
+               return true;
 
        pp = I915_READ(PCH_PP_CONTROL);
 
@@ -771,21 +807,30 @@ static void ironlake_edp_panel_on (struct drm_device *dev)
 
        pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
        I915_WRITE(PCH_PP_CONTROL, pp);
+       POSTING_READ(PCH_PP_CONTROL);
+
+       /* Ouch. We need to wait here for some panels, like Dell e6510
+        * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
+        */
+       msleep(300);
 
-       if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10))
+       if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask,
+                    5000))
                DRM_ERROR("panel on wait timed out: 0x%08x\n",
                          I915_READ(PCH_PP_STATUS));
 
-       pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
        pp |= PANEL_POWER_RESET; /* restore panel reset bit */
        I915_WRITE(PCH_PP_CONTROL, pp);
        POSTING_READ(PCH_PP_CONTROL);
+
+       return false;
 }
 
 static void ironlake_edp_panel_off (struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 pp;
+       u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK |
+               PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
 
        pp = I915_READ(PCH_PP_CONTROL);
 
@@ -796,15 +841,20 @@ static void ironlake_edp_panel_off (struct drm_device *dev)
 
        pp &= ~POWER_TARGET_ON;
        I915_WRITE(PCH_PP_CONTROL, pp);
+       POSTING_READ(PCH_PP_CONTROL);
 
-       if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10))
+       if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
                DRM_ERROR("panel off wait timed out: 0x%08x\n",
                          I915_READ(PCH_PP_STATUS));
 
-       /* Make sure VDD is enabled so DP AUX will work */
-       pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */
+       pp |= PANEL_POWER_RESET; /* restore panel reset bit */
        I915_WRITE(PCH_PP_CONTROL, pp);
        POSTING_READ(PCH_PP_CONTROL);
+
+       /* Ouch. We need to wait here for some panels, like Dell e6510
+        * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
+        */
+       msleep(300);
 }
 
 static void ironlake_edp_backlight_on (struct drm_device *dev)
@@ -813,6 +863,13 @@ static void ironlake_edp_backlight_on (struct drm_device *dev)
        u32 pp;
 
        DRM_DEBUG_KMS("\n");
+       /*
+        * If we enable the backlight right away following a panel power
+        * on, we may see slight flicker as the panel syncs with the eDP
+        * link.  So delay a bit to make sure the image is solid before
+        * allowing it to appear.
+        */
+       msleep(300);
        pp = I915_READ(PCH_PP_CONTROL);
        pp |= EDP_BLC_ENABLE;
        I915_WRITE(PCH_PP_CONTROL, pp);
@@ -837,8 +894,10 @@ static void ironlake_edp_pll_on(struct drm_encoder *encoder)
 
        DRM_DEBUG_KMS("\n");
        dpa_ctl = I915_READ(DP_A);
-       dpa_ctl &= ~DP_PLL_ENABLE;
+       dpa_ctl |= DP_PLL_ENABLE;
        I915_WRITE(DP_A, dpa_ctl);
+       POSTING_READ(DP_A);
+       udelay(200);
 }
 
 static void ironlake_edp_pll_off(struct drm_encoder *encoder)
@@ -848,8 +907,9 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder)
        u32 dpa_ctl;
 
        dpa_ctl = I915_READ(DP_A);
-       dpa_ctl |= DP_PLL_ENABLE;
+       dpa_ctl &= ~DP_PLL_ENABLE;
        I915_WRITE(DP_A, dpa_ctl);
+       POSTING_READ(DP_A);
        udelay(200);
 }
 
@@ -857,29 +917,31 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t dp_reg = I915_READ(intel_dp->output_reg);
 
-       if (IS_eDP(intel_dp)) {
+       if (is_edp(intel_dp)) {
                ironlake_edp_backlight_off(dev);
-               ironlake_edp_panel_on(dev);
-               ironlake_edp_pll_on(encoder);
+               ironlake_edp_panel_on(intel_dp);
+               if (!is_pch_edp(intel_dp))
+                       ironlake_edp_pll_on(encoder);
+               else
+                       ironlake_edp_pll_off(encoder);
        }
-       if (dp_reg & DP_PORT_EN)
-               intel_dp_link_down(intel_dp);
+       intel_dp_link_down(intel_dp);
 }
 
 static void intel_dp_commit(struct drm_encoder *encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t dp_reg = I915_READ(intel_dp->output_reg);
 
-       if (!(dp_reg & DP_PORT_EN)) {
-               intel_dp_link_train(intel_dp);
-       }
-       if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+       intel_dp_start_link_train(intel_dp);
+
+       if (is_edp(intel_dp))
+               ironlake_edp_panel_on(intel_dp);
+
+       intel_dp_complete_link_train(intel_dp);
+
+       if (is_edp(intel_dp))
                ironlake_edp_backlight_on(dev);
 }
 
@@ -892,22 +954,22 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
        uint32_t dp_reg = I915_READ(intel_dp->output_reg);
 
        if (mode != DRM_MODE_DPMS_ON) {
-               if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
+               if (is_edp(intel_dp))
                        ironlake_edp_backlight_off(dev);
+               intel_dp_link_down(intel_dp);
+               if (is_edp(intel_dp))
                        ironlake_edp_panel_off(dev);
-               }
-               if (dp_reg & DP_PORT_EN)
-                       intel_dp_link_down(intel_dp);
-               if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+               if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
                        ironlake_edp_pll_off(encoder);
        } else {
+               if (is_edp(intel_dp))
+                       ironlake_edp_panel_on(intel_dp);
                if (!(dp_reg & DP_PORT_EN)) {
-                       if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
-                               ironlake_edp_panel_on(dev);
-                       intel_dp_link_train(intel_dp);
-                       if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
-                               ironlake_edp_backlight_on(dev);
+                       intel_dp_start_link_train(intel_dp);
+                       intel_dp_complete_link_train(intel_dp);
                }
+               if (is_edp(intel_dp))
+                       ironlake_edp_backlight_on(dev);
        }
        intel_dp->dpms_mode = mode;
 }
@@ -917,14 +979,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
  * link status information
  */
 static bool
-intel_dp_get_link_status(struct intel_dp *intel_dp,
-                        uint8_t link_status[DP_LINK_STATUS_SIZE])
+intel_dp_get_link_status(struct intel_dp *intel_dp)
 {
        int ret;
 
        ret = intel_dp_aux_native_read(intel_dp,
                                       DP_LANE0_1_STATUS,
-                                      link_status, DP_LINK_STATUS_SIZE);
+                                      intel_dp->link_status, DP_LINK_STATUS_SIZE);
        if (ret != DP_LINK_STATUS_SIZE)
                return false;
        return true;
@@ -999,18 +1060,15 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
 }
 
 static void
-intel_get_adjust_train(struct intel_dp *intel_dp,
-                      uint8_t link_status[DP_LINK_STATUS_SIZE],
-                      int lane_count,
-                      uint8_t train_set[4])
+intel_get_adjust_train(struct intel_dp *intel_dp)
 {
        uint8_t v = 0;
        uint8_t p = 0;
        int lane;
 
-       for (lane = 0; lane < lane_count; lane++) {
-               uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane);
-               uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane);
+       for (lane = 0; lane < intel_dp->lane_count; lane++) {
+               uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane);
+               uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
 
                if (this_v > v)
                        v = this_v;
@@ -1025,15 +1083,25 @@ intel_get_adjust_train(struct intel_dp *intel_dp,
                p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
 
        for (lane = 0; lane < 4; lane++)
-               train_set[lane] = v | p;
+               intel_dp->train_set[lane] = v | p;
 }
 
 static uint32_t
-intel_dp_signal_levels(uint8_t train_set, int lane_count)
+intel_dp_signal_levels(struct intel_dp *intel_dp)
 {
-       uint32_t        signal_levels = 0;
+       struct drm_device *dev = intel_dp->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t signal_levels = 0;
+       u8 train_set = intel_dp->train_set[0];
+       u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK;
+       u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK;
+
+       if (is_edp(intel_dp)) {
+               vswing = dev_priv->edp.vswing;
+               preemphasis = dev_priv->edp.preemphasis;
+       }
 
-       switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+       switch (vswing) {
        case DP_TRAIN_VOLTAGE_SWING_400:
        default:
                signal_levels |= DP_VOLTAGE_0_4;
@@ -1048,7 +1116,7 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
                signal_levels |= DP_VOLTAGE_1_2;
                break;
        }
-       switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+       switch (preemphasis) {
        case DP_TRAIN_PRE_EMPHASIS_0:
        default:
                signal_levels |= DP_PRE_EMPHASIS_0;
@@ -1116,178 +1184,213 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count
                         DP_LANE_CHANNEL_EQ_DONE|\
                         DP_LANE_SYMBOL_LOCKED)
 static bool
-intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+intel_channel_eq_ok(struct intel_dp *intel_dp)
 {
        uint8_t lane_align;
        uint8_t lane_status;
        int lane;
 
-       lane_align = intel_dp_link_status(link_status,
+       lane_align = intel_dp_link_status(intel_dp->link_status,
                                          DP_LANE_ALIGN_STATUS_UPDATED);
        if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
                return false;
-       for (lane = 0; lane < lane_count; lane++) {
-               lane_status = intel_get_lane_status(link_status, lane);
+       for (lane = 0; lane < intel_dp->lane_count; lane++) {
+               lane_status = intel_get_lane_status(intel_dp->link_status, lane);
                if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
                        return false;
        }
        return true;
 }
 
+static bool
+intel_dp_aux_handshake_required(struct intel_dp *intel_dp)
+{
+       struct drm_device *dev = intel_dp->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (is_edp(intel_dp) && dev_priv->no_aux_handshake)
+               return false;
+
+       return true;
+}
+
 static bool
 intel_dp_set_link_train(struct intel_dp *intel_dp,
                        uint32_t dp_reg_value,
-                       uint8_t dp_train_pat,
-                       uint8_t train_set[4])
+                       uint8_t dp_train_pat)
 {
-       struct drm_device *dev = intel_dp->base.enc.dev;
+       struct drm_device *dev = intel_dp->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
        I915_WRITE(intel_dp->output_reg, dp_reg_value);
        POSTING_READ(intel_dp->output_reg);
 
+       if (!intel_dp_aux_handshake_required(intel_dp))
+               return true;
+
        intel_dp_aux_native_write_1(intel_dp,
                                    DP_TRAINING_PATTERN_SET,
                                    dp_train_pat);
 
        ret = intel_dp_aux_native_write(intel_dp,
-                                       DP_TRAINING_LANE0_SET, train_set, 4);
+                                       DP_TRAINING_LANE0_SET,
+                                       intel_dp->train_set, 4);
        if (ret != 4)
                return false;
 
        return true;
 }
 
+/* Enable corresponding port and start training pattern 1 */
 static void
-intel_dp_link_train(struct intel_dp *intel_dp)
+intel_dp_start_link_train(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp->base.enc.dev;
+       struct drm_device *dev = intel_dp->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       uint8_t train_set[4];
-       uint8_t link_status[DP_LINK_STATUS_SIZE];
+       struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
        int i;
        uint8_t voltage;
        bool clock_recovery = false;
-       bool channel_eq = false;
        int tries;
        u32 reg;
        uint32_t DP = intel_dp->DP;
-       struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
 
        /* Enable output, wait for it to become active */
        I915_WRITE(intel_dp->output_reg, intel_dp->DP);
        POSTING_READ(intel_dp->output_reg);
        intel_wait_for_vblank(dev, intel_crtc->pipe);
 
-       /* Write the link configuration data */
-       intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
-                                 intel_dp->link_configuration,
-                                 DP_LINK_CONFIGURATION_SIZE);
+       if (intel_dp_aux_handshake_required(intel_dp))
+               /* Write the link configuration data */
+               intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
+                                         intel_dp->link_configuration,
+                                         DP_LINK_CONFIGURATION_SIZE);
 
        DP |= DP_PORT_EN;
-       if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+       if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
                DP &= ~DP_LINK_TRAIN_MASK_CPT;
        else
                DP &= ~DP_LINK_TRAIN_MASK;
-       memset(train_set, 0, 4);
+       memset(intel_dp->train_set, 0, 4);
        voltage = 0xff;
        tries = 0;
        clock_recovery = false;
        for (;;) {
-               /* Use train_set[0] to set the voltage and pre emphasis values */
+               /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
                uint32_t    signal_levels;
-               if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
-                       signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+               if (IS_GEN6(dev) && is_edp(intel_dp)) {
+                       signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
                        DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
                } else {
-                       signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
+                       signal_levels = intel_dp_signal_levels(intel_dp);
                        DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
                }
 
-               if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+               if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
                        reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
                else
                        reg = DP | DP_LINK_TRAIN_PAT_1;
 
                if (!intel_dp_set_link_train(intel_dp, reg,
-                                            DP_TRAINING_PATTERN_1, train_set))
+                                            DP_TRAINING_PATTERN_1))
                        break;
                /* Set training pattern 1 */
 
-               udelay(100);
-               if (!intel_dp_get_link_status(intel_dp, link_status))
+               udelay(500);
+               if (intel_dp_aux_handshake_required(intel_dp)) {
                        break;
+               } else {
+                       if (!intel_dp_get_link_status(intel_dp))
+                               break;
 
-               if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
-                       clock_recovery = true;
-                       break;
-               }
-
-               /* Check to see if we've tried the max voltage */
-               for (i = 0; i < intel_dp->lane_count; i++)
-                       if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+                       if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+                               clock_recovery = true;
                                break;
-               if (i == intel_dp->lane_count)
-                       break;
+                       }
 
-               /* Check to see if we've tried the same voltage 5 times */
-               if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
-                       ++tries;
-                       if (tries == 5)
+                       /* Check to see if we've tried the max voltage */
+                       for (i = 0; i < intel_dp->lane_count; i++)
+                               if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+                                       break;
+                       if (i == intel_dp->lane_count)
                                break;
-               } else
-                       tries = 0;
-               voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
 
-               /* Compute new train_set as requested by target */
-               intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
+                       /* Check to see if we've tried the same voltage 5 times */
+                       if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+                               ++tries;
+                               if (tries == 5)
+                                       break;
+                       } else
+                               tries = 0;
+                       voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+                       /* Compute new intel_dp->train_set as requested by target */
+                       intel_get_adjust_train(intel_dp);
+               }
        }
 
+       intel_dp->DP = DP;
+}
+
+static void
+intel_dp_complete_link_train(struct intel_dp *intel_dp)
+{
+       struct drm_device *dev = intel_dp->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       bool channel_eq = false;
+       int tries;
+       u32 reg;
+       uint32_t DP = intel_dp->DP;
+
        /* channel equalization */
        tries = 0;
        channel_eq = false;
        for (;;) {
-               /* Use train_set[0] to set the voltage and pre emphasis values */
+               /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
                uint32_t    signal_levels;
 
-               if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
-                       signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+               if (IS_GEN6(dev) && is_edp(intel_dp)) {
+                       signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
                        DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
                } else {
-                       signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
+                       signal_levels = intel_dp_signal_levels(intel_dp);
                        DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
                }
 
-               if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+               if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
                        reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
                else
                        reg = DP | DP_LINK_TRAIN_PAT_2;
 
                /* channel eq pattern */
                if (!intel_dp_set_link_train(intel_dp, reg,
-                                            DP_TRAINING_PATTERN_2, train_set))
+                                            DP_TRAINING_PATTERN_2))
                        break;
 
-               udelay(400);
-               if (!intel_dp_get_link_status(intel_dp, link_status))
-                       break;
+               udelay(500);
 
-               if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) {
-                       channel_eq = true;
+               if (!intel_dp_aux_handshake_required(intel_dp)) {
                        break;
-               }
+               } else {
+                       if (!intel_dp_get_link_status(intel_dp))
+                               break;
 
-               /* Try 5 times */
-               if (tries > 5)
-                       break;
+                       if (intel_channel_eq_ok(intel_dp)) {
+                               channel_eq = true;
+                               break;
+                       }
 
-               /* Compute new train_set as requested by target */
-               intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
-               ++tries;
-       }
+                       /* Try 5 times */
+                       if (tries > 5)
+                               break;
 
-       if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+                       /* Compute new intel_dp->train_set as requested by target */
+                       intel_get_adjust_train(intel_dp);
+                       ++tries;
+               }
+       }
+       if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
                reg = DP | DP_LINK_TRAIN_OFF_CPT;
        else
                reg = DP | DP_LINK_TRAIN_OFF;
@@ -1301,32 +1404,31 @@ intel_dp_link_train(struct intel_dp *intel_dp)
 static void
 intel_dp_link_down(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp->base.enc.dev;
+       struct drm_device *dev = intel_dp->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t DP = intel_dp->DP;
 
        DRM_DEBUG_KMS("\n");
 
-       if (IS_eDP(intel_dp)) {
+       if (is_edp(intel_dp)) {
                DP &= ~DP_PLL_ENABLE;
                I915_WRITE(intel_dp->output_reg, DP);
                POSTING_READ(intel_dp->output_reg);
                udelay(100);
        }
 
-       if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) {
+       if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) {
                DP &= ~DP_LINK_TRAIN_MASK_CPT;
                I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
-               POSTING_READ(intel_dp->output_reg);
        } else {
                DP &= ~DP_LINK_TRAIN_MASK;
                I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
-               POSTING_READ(intel_dp->output_reg);
        }
+       POSTING_READ(intel_dp->output_reg);
 
-       udelay(17000);
+       msleep(17);
 
-       if (IS_eDP(intel_dp))
+       if (is_edp(intel_dp))
                DP |= DP_LINK_TRAIN_OFF;
        I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
        POSTING_READ(intel_dp->output_reg);
@@ -1344,32 +1446,34 @@ intel_dp_link_down(struct intel_dp *intel_dp)
 static void
 intel_dp_check_link_status(struct intel_dp *intel_dp)
 {
-       uint8_t link_status[DP_LINK_STATUS_SIZE];
-
-       if (!intel_dp->base.enc.crtc)
+       if (!intel_dp->base.base.crtc)
                return;
 
-       if (!intel_dp_get_link_status(intel_dp, link_status)) {
+       if (!intel_dp_get_link_status(intel_dp)) {
                intel_dp_link_down(intel_dp);
                return;
        }
 
-       if (!intel_channel_eq_ok(link_status, intel_dp->lane_count))
-               intel_dp_link_train(intel_dp);
+       if (!intel_channel_eq_ok(intel_dp)) {
+               intel_dp_start_link_train(intel_dp);
+               intel_dp_complete_link_train(intel_dp);
+       }
 }
 
 static enum drm_connector_status
-ironlake_dp_detect(struct drm_connector *connector)
+ironlake_dp_detect(struct intel_dp *intel_dp)
 {
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
        enum drm_connector_status status;
 
+       /* Can't disconnect eDP */
+       if (is_edp(intel_dp))
+               return connector_status_connected;
+
        status = connector_status_disconnected;
        if (intel_dp_aux_native_read(intel_dp,
                                     0x000, intel_dp->dpcd,
-                                    sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
-       {
+                                    sizeof (intel_dp->dpcd))
+           == sizeof(intel_dp->dpcd)) {
                if (intel_dp->dpcd[0] != 0)
                        status = connector_status_connected;
        }
@@ -1378,26 +1482,13 @@ ironlake_dp_detect(struct drm_connector *connector)
        return status;
 }
 
-/**
- * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
- *
- * \return true if DP port is connected.
- * \return false if DP port is disconnected.
- */
 static enum drm_connector_status
-intel_dp_detect(struct drm_connector *connector, bool force)
+g4x_dp_detect(struct intel_dp *intel_dp)
 {
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-       struct drm_device *dev = intel_dp->base.enc.dev;
+       struct drm_device *dev = intel_dp->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t temp, bit;
        enum drm_connector_status status;
-
-       intel_dp->has_audio = false;
-
-       if (HAS_PCH_SPLIT(dev))
-               return ironlake_dp_detect(connector);
+       uint32_t temp, bit;
 
        switch (intel_dp->output_reg) {
        case DP_B:
@@ -1419,31 +1510,66 @@ intel_dp_detect(struct drm_connector *connector, bool force)
                return connector_status_disconnected;
 
        status = connector_status_disconnected;
-       if (intel_dp_aux_native_read(intel_dp,
-                                    0x000, intel_dp->dpcd,
+       if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd,
                                     sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
        {
                if (intel_dp->dpcd[0] != 0)
                        status = connector_status_connected;
        }
-       return status;
+
+       return bit;
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
+ *
+ * \return true if DP port is connected.
+ * \return false if DP port is disconnected.
+ */
+static enum drm_connector_status
+intel_dp_detect(struct drm_connector *connector, bool force)
+{
+       struct intel_dp *intel_dp = intel_attached_dp(connector);
+       struct drm_device *dev = intel_dp->base.base.dev;
+       enum drm_connector_status status;
+       struct edid *edid = NULL;
+
+       intel_dp->has_audio = false;
+
+       if (HAS_PCH_SPLIT(dev))
+               status = ironlake_dp_detect(intel_dp);
+       else
+               status = g4x_dp_detect(intel_dp);
+       if (status != connector_status_connected)
+               return status;
+
+       if (intel_dp->force_audio) {
+               intel_dp->has_audio = intel_dp->force_audio > 0;
+       } else {
+               edid = drm_get_edid(connector, &intel_dp->adapter);
+               if (edid) {
+                       intel_dp->has_audio = drm_detect_monitor_audio(edid);
+                       connector->display_info.raw_edid = NULL;
+                       kfree(edid);
+               }
+       }
+
+       return connector_status_connected;
 }
 
 static int intel_dp_get_modes(struct drm_connector *connector)
 {
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-       struct drm_device *dev = intel_dp->base.enc.dev;
+       struct intel_dp *intel_dp = intel_attached_dp(connector);
+       struct drm_device *dev = intel_dp->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
        /* We should parse the EDID data and find out if it has an audio sink
         */
 
-       ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus);
+       ret = intel_ddc_get_modes(connector, &intel_dp->adapter);
        if (ret) {
-               if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
-                   !dev_priv->panel_fixed_mode) {
+               if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) {
                        struct drm_display_mode *newmode;
                        list_for_each_entry(newmode, &connector->probed_modes,
                                            head) {
@@ -1459,7 +1585,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
        }
 
        /* if eDP has no EDID, try to use fixed panel mode from VBT */
-       if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
+       if (is_edp(intel_dp)) {
                if (dev_priv->panel_fixed_mode != NULL) {
                        struct drm_display_mode *mode;
                        mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
@@ -1470,6 +1596,46 @@ static int intel_dp_get_modes(struct drm_connector *connector)
        return 0;
 }
 
+static int
+intel_dp_set_property(struct drm_connector *connector,
+                     struct drm_property *property,
+                     uint64_t val)
+{
+       struct intel_dp *intel_dp = intel_attached_dp(connector);
+       int ret;
+
+       ret = drm_connector_property_set_value(connector, property, val);
+       if (ret)
+               return ret;
+
+       if (property == intel_dp->force_audio_property) {
+               if (val == intel_dp->force_audio)
+                       return 0;
+
+               intel_dp->force_audio = val;
+
+               if (val > 0 && intel_dp->has_audio)
+                       return 0;
+               if (val < 0 && !intel_dp->has_audio)
+                       return 0;
+
+               intel_dp->has_audio = val > 0;
+               goto done;
+       }
+
+       return -EINVAL;
+
+done:
+       if (intel_dp->base.base.crtc) {
+               struct drm_crtc *crtc = intel_dp->base.base.crtc;
+               drm_crtc_helper_set_mode(crtc, &crtc->mode,
+                                        crtc->x, crtc->y,
+                                        crtc->fb);
+       }
+
+       return 0;
+}
+
 static void
 intel_dp_destroy (struct drm_connector *connector)
 {
@@ -1478,6 +1644,15 @@ intel_dp_destroy (struct drm_connector *connector)
        kfree(connector);
 }
 
+static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+       i2c_del_adapter(&intel_dp->adapter);
+       drm_encoder_cleanup(encoder);
+       kfree(intel_dp);
+}
+
 static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
        .dpms = intel_dp_dpms,
        .mode_fixup = intel_dp_mode_fixup,
@@ -1490,20 +1665,21 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
        .dpms = drm_helper_connector_dpms,
        .detect = intel_dp_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
+       .set_property = intel_dp_set_property,
        .destroy = intel_dp_destroy,
 };
 
 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
        .get_modes = intel_dp_get_modes,
        .mode_valid = intel_dp_mode_valid,
-       .best_encoder = intel_attached_encoder,
+       .best_encoder = intel_best_encoder,
 };
 
 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
-       .destroy = intel_encoder_destroy,
+       .destroy = intel_dp_encoder_destroy,
 };
 
-void
+static void
 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
 {
        struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
@@ -1554,6 +1730,20 @@ bool intel_dpd_is_edp(struct drm_device *dev)
        return false;
 }
 
+static void
+intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+
+       intel_dp->force_audio_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
+       if (intel_dp->force_audio_property) {
+               intel_dp->force_audio_property->values[0] = -1;
+               intel_dp->force_audio_property->values[1] = 1;
+               drm_connector_attach_property(connector, intel_dp->force_audio_property, 0);
+       }
+}
+
 void
 intel_dp_init(struct drm_device *dev, int output_reg)
 {
@@ -1580,7 +1770,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
                if (intel_dpd_is_edp(dev))
                        intel_dp->is_pch_edp = true;
 
-       if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
+       if (output_reg == DP_A || is_pch_edp(intel_dp)) {
                type = DRM_MODE_CONNECTOR_eDP;
                intel_encoder->type = INTEL_OUTPUT_EDP;
        } else {
@@ -1601,7 +1791,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
        else if (output_reg == DP_D || output_reg == PCH_DP_D)
                intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
 
-       if (IS_eDP(intel_dp))
+       if (is_edp(intel_dp))
                intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
 
        intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
@@ -1612,12 +1802,11 @@ intel_dp_init(struct drm_device *dev, int output_reg)
        intel_dp->has_audio = false;
        intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
 
-       drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs,
+       drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
                         DRM_MODE_ENCODER_TMDS);
-       drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
+       drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
 
-       drm_mode_connector_attach_encoder(&intel_connector->base,
-                                         &intel_encoder->enc);
+       intel_connector_attach_encoder(intel_connector, intel_encoder);
        drm_sysfs_connector_add(connector);
 
        /* Set up the DDC bus. */
@@ -1647,10 +1836,29 @@ intel_dp_init(struct drm_device *dev, int output_reg)
 
        intel_dp_i2c_init(intel_dp, intel_connector, name);
 
-       intel_encoder->ddc_bus = &intel_dp->adapter;
+       /* Cache some DPCD data in the eDP case */
+       if (is_edp(intel_dp)) {
+               int ret;
+               bool was_on;
+
+               was_on = ironlake_edp_panel_on(intel_dp);
+               ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV,
+                                              intel_dp->dpcd,
+                                              sizeof(intel_dp->dpcd));
+               if (ret == sizeof(intel_dp->dpcd)) {
+                       if (intel_dp->dpcd[0] >= 0x11)
+                               dev_priv->no_aux_handshake = intel_dp->dpcd[3] &
+                                       DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
+               } else {
+                       DRM_ERROR("failed to retrieve link info\n");
+               }
+               if (!was_on)
+                       ironlake_edp_panel_off(dev);
+       }
+
        intel_encoder->hot_plug = intel_dp_hot_plug;
 
-       if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
+       if (is_edp(intel_dp)) {
                /* initialize panel mode from VBT if available for eDP */
                if (dev_priv->lfp_lvds_vbt_mode) {
                        dev_priv->panel_fixed_mode =
@@ -1662,6 +1870,8 @@ intel_dp_init(struct drm_device *dev, int output_reg)
                }
        }
 
+       intel_dp_add_properties(intel_dp, connector);
+
        /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
         * 0xd.  Failure to do so will result in spurious interrupts being
         * generated on the port when a cable is not attached.
index 8828b3ac6414eabff93134e34a41ae5c38d1cd34..9af9f86a8765c82833706aa1320ca9baed3fa416 100644 (file)
 #define __INTEL_DRV_H__
 
 #include <linux/i2c.h>
-#include <linux/i2c-id.h>
-#include <linux/i2c-algo-bit.h>
 #include "i915_drv.h"
 #include "drm_crtc.h"
-
 #include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
 
-#define wait_for(COND, MS, W) ({ \
+#define _wait_for(COND, MS, W) ({ \
        unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);       \
        int ret__ = 0;                                                  \
        while (! (COND)) {                                              \
                        ret__ = -ETIMEDOUT;                             \
                        break;                                          \
                }                                                       \
-               if (W) msleep(W);                                       \
+               if (W && !in_dbg_master()) msleep(W);                   \
        }                                                               \
        ret__;                                                          \
 })
 
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+
+#define MSLEEP(x) do { \
+       if (in_dbg_master()) \
+               mdelay(x); \
+       else \
+               msleep(x); \
+} while(0)
+
+#define KHz(x) (1000*x)
+#define MHz(x) KHz(1000*x)
+
 /*
  * Display related stuff
  */
 #define INTEL_DVO_CHIP_TMDS 2
 #define INTEL_DVO_CHIP_TVOUT 4
 
-struct intel_i2c_chan {
-       struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */
-       u32 reg; /* GPIO reg */
-       struct i2c_adapter adapter;
-       struct i2c_algo_bit_data algo;
-};
+/* drm_display_mode->private_flags */
+#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
+#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+
+static inline void
+intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
+                               int multiplier)
+{
+       mode->clock *= multiplier;
+       mode->private_flags |= multiplier;
+}
+
+static inline int
+intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
+{
+       return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
+}
 
 struct intel_framebuffer {
        struct drm_framebuffer base;
        struct drm_gem_object *obj;
 };
 
+struct intel_fbdev {
+       struct drm_fb_helper helper;
+       struct intel_framebuffer ifb;
+       struct list_head fbdev_list;
+       struct drm_display_mode *our_mode;
+};
 
 struct intel_encoder {
-       struct drm_encoder enc;
+       struct drm_encoder base;
        int type;
-       struct i2c_adapter *i2c_bus;
-       struct i2c_adapter *ddc_bus;
        bool load_detect_temp;
        bool needs_tv_clock;
        void (*hot_plug)(struct intel_encoder *);
@@ -123,32 +149,7 @@ struct intel_encoder {
 
 struct intel_connector {
        struct drm_connector base;
-};
-
-struct intel_crtc;
-struct intel_overlay {
-       struct drm_device *dev;
-       struct intel_crtc *crtc;
-       struct drm_i915_gem_object *vid_bo;
-       struct drm_i915_gem_object *old_vid_bo;
-       int active;
-       int pfit_active;
-       u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
-       u32 color_key;
-       u32 brightness, contrast, saturation;
-       u32 old_xscale, old_yscale;
-       /* register access */
-       u32 flip_addr;
-       struct drm_i915_gem_object *reg_bo;
-       void *virt_addr;
-       /* flip handling */
-       uint32_t last_flip_req;
-       int hw_wedged;
-#define HW_WEDGED              1
-#define NEEDS_WAIT_FOR_FLIP    2
-#define RELEASE_OLD_VID                3
-#define SWITCH_OFF_STAGE_1     4
-#define SWITCH_OFF_STAGE_2     5
+       struct intel_encoder *encoder;
 };
 
 struct intel_crtc {
@@ -157,6 +158,7 @@ struct intel_crtc {
        enum plane plane;
        u8 lut_r[256], lut_g[256], lut_b[256];
        int dpms_mode;
+       bool active; /* is the crtc on? independent of the dpms mode */
        bool busy; /* is scanout buffer being updated frequently? */
        struct timer_list idle_timer;
        bool lowfreq_avail;
@@ -168,14 +170,53 @@ struct intel_crtc {
        uint32_t cursor_addr;
        int16_t cursor_x, cursor_y;
        int16_t cursor_width, cursor_height;
-       bool cursor_visible, cursor_on;
+       bool cursor_visible;
 };
 
 #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
 #define to_intel_connector(x) container_of(x, struct intel_connector, base)
-#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
+#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
 #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
 
+#define DIP_TYPE_AVI    0x82
+#define DIP_VERSION_AVI 0x2
+#define DIP_LEN_AVI     13
+
+struct dip_infoframe {
+       uint8_t type;           /* HB0 */
+       uint8_t ver;            /* HB1 */
+       uint8_t len;            /* HB2 - body len, not including checksum */
+       uint8_t ecc;            /* Header ECC */
+       uint8_t checksum;       /* PB0 */
+       union {
+               struct {
+                       /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
+                       uint8_t Y_A_B_S;
+                       /* PB2 - C 7:6, M 5:4, R 3:0 */
+                       uint8_t C_M_R;
+                       /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
+                       uint8_t ITC_EC_Q_SC;
+                       /* PB4 - VIC 6:0 */
+                       uint8_t VIC;
+                       /* PB5 - PR 3:0 */
+                       uint8_t PR;
+                       /* PB6 to PB13 */
+                       uint16_t top_bar_end;
+                       uint16_t bottom_bar_start;
+                       uint16_t left_bar_end;
+                       uint16_t right_bar_start;
+               } avi;
+               uint8_t payload[27];
+       } __attribute__ ((packed)) body;
+} __attribute__((packed));
+
+static inline struct drm_crtc *
+intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       return dev_priv->pipe_to_crtc_mapping[pipe];
+}
+
 struct intel_unpin_work {
        struct work_struct work;
        struct drm_device *dev;
@@ -186,16 +227,12 @@ struct intel_unpin_work {
        bool enable_stall_check;
 };
 
-struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
-                                    const char *name);
-void intel_i2c_destroy(struct i2c_adapter *adapter);
 int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
-void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
-void intel_i2c_reset_gmbus(struct drm_device *dev);
+extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
 
 extern void intel_crt_init(struct drm_device *dev);
 extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
+void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
 extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
 extern void intel_dvo_init(struct drm_device *dev);
 extern void intel_tv_init(struct drm_device *dev);
@@ -205,32 +242,41 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg);
 void
 intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
                 struct drm_display_mode *adjusted_mode);
-extern bool intel_pch_has_edp(struct drm_crtc *crtc);
 extern bool intel_dpd_is_edp(struct drm_device *dev);
 extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
+extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
 
-
+/* intel_panel.c */
 extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
                                   struct drm_display_mode *adjusted_mode);
 extern void intel_pch_panel_fitting(struct drm_device *dev,
                                    int fitting_mode,
                                    struct drm_display_mode *mode,
                                    struct drm_display_mode *adjusted_mode);
+extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
+extern u32 intel_panel_get_backlight(struct drm_device *dev);
+extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
 
-extern int intel_panel_fitter_pipe (struct drm_device *dev);
 extern void intel_crtc_load_lut(struct drm_crtc *crtc);
 extern void intel_encoder_prepare (struct drm_encoder *encoder);
 extern void intel_encoder_commit (struct drm_encoder *encoder);
 extern void intel_encoder_destroy(struct drm_encoder *encoder);
 
-extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
+static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
+{
+       return to_intel_connector(connector)->encoder;
+}
+
+extern void intel_connector_attach_encoder(struct intel_connector *connector,
+                                          struct intel_encoder *encoder);
+extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
 
 extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
                                                    struct drm_crtc *crtc);
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
-extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
+extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
 extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
                                                   struct drm_connector *connector,
                                                   struct drm_display_mode *mode,
@@ -252,7 +298,8 @@ extern void ironlake_enable_drps(struct drm_device *dev);
 extern void ironlake_disable_drps(struct drm_device *dev);
 
 extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
-                                     struct drm_gem_object *obj);
+                                     struct drm_gem_object *obj,
+                                     bool pipelined);
 
 extern int intel_framebuffer_init(struct drm_device *dev,
                                  struct intel_framebuffer *ifb,
@@ -267,9 +314,8 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
 
 extern void intel_setup_overlay(struct drm_device *dev);
 extern void intel_cleanup_overlay(struct drm_device *dev);
-extern int intel_overlay_switch_off(struct intel_overlay *overlay);
-extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
-                                               int interruptible);
+extern int intel_overlay_switch_off(struct intel_overlay *overlay,
+                                   bool interruptible);
 extern int intel_overlay_put_image(struct drm_device *dev, void *data,
                                   struct drm_file *file_priv);
 extern int intel_overlay_attrs(struct drm_device *dev, void *data,
index 7c9ec1472d46ab3cbb08f6bffc8257af952a64b0..ea373283c93be6e16edabd4714ae0eb1c89b0da9 100644 (file)
@@ -72,7 +72,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
                .name = "ch7017",
                .dvo_reg = DVOC,
                .slave_addr = 0x75,
-               .gpio = GPIOE,
+               .gpio = GMBUS_PORT_DPB,
                .dev_ops = &ch7017_ops,
        }
 };
@@ -88,7 +88,13 @@ struct intel_dvo {
 
 static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
 {
-       return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base);
+       return container_of(encoder, struct intel_dvo, base.base);
+}
+
+static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
+{
+       return container_of(intel_attached_encoder(connector),
+                           struct intel_dvo, base);
 }
 
 static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
@@ -112,8 +118,7 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
 static int intel_dvo_mode_valid(struct drm_connector *connector,
                                struct drm_display_mode *mode)
 {
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+       struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
 
        if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return MODE_NO_DBLESCAN;
@@ -224,23 +229,22 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
 static enum drm_connector_status
 intel_dvo_detect(struct drm_connector *connector, bool force)
 {
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
-
+       struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
        return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
 }
 
 static int intel_dvo_get_modes(struct drm_connector *connector)
 {
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+       struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+       struct drm_i915_private *dev_priv = connector->dev->dev_private;
 
        /* We should probably have an i2c driver get_modes function for those
         * devices which will have a fixed set of modes determined by the chip
         * (TV-out, for example), but for now with just TMDS and LVDS,
         * that's not the case.
         */
-       intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus);
+       intel_ddc_get_modes(connector,
+                           &dev_priv->gmbus[GMBUS_PORT_DPC].adapter);
        if (!list_empty(&connector->probed_modes))
                return 1;
 
@@ -281,7 +285,7 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
 static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
        .mode_valid = intel_dvo_mode_valid,
        .get_modes = intel_dvo_get_modes,
-       .best_encoder = intel_attached_encoder,
+       .best_encoder = intel_best_encoder,
 };
 
 static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
@@ -311,8 +315,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+       struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
        uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
        struct drm_display_mode *mode = NULL;
 
@@ -323,7 +326,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
                struct drm_crtc *crtc;
                int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
 
-               crtc = intel_get_crtc_from_pipe(dev, pipe);
+               crtc = intel_get_crtc_for_pipe(dev, pipe);
                if (crtc) {
                        mode = intel_crtc_mode_get(dev, crtc);
                        if (mode) {
@@ -341,11 +344,10 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
 
 void intel_dvo_init(struct drm_device *dev)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_encoder *intel_encoder;
        struct intel_dvo *intel_dvo;
        struct intel_connector *intel_connector;
-       struct i2c_adapter *i2cbus = NULL;
-       int ret = 0;
        int i;
        int encoder_type = DRM_MODE_ENCODER_NONE;
 
@@ -360,16 +362,14 @@ void intel_dvo_init(struct drm_device *dev)
        }
 
        intel_encoder = &intel_dvo->base;
-
-       /* Set up the DDC bus */
-       intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
-       if (!intel_encoder->ddc_bus)
-               goto free_intel;
+       drm_encoder_init(dev, &intel_encoder->base,
+                        &intel_dvo_enc_funcs, encoder_type);
 
        /* Now, try to find a controller */
        for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
                struct drm_connector *connector = &intel_connector->base;
                const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
+               struct i2c_adapter *i2c;
                int gpio;
 
                /* Allow the I2C driver info to specify the GPIO to be used in
@@ -379,24 +379,18 @@ void intel_dvo_init(struct drm_device *dev)
                if (dvo->gpio != 0)
                        gpio = dvo->gpio;
                else if (dvo->type == INTEL_DVO_CHIP_LVDS)
-                       gpio = GPIOB;
+                       gpio = GMBUS_PORT_SSC;
                else
-                       gpio = GPIOE;
+                       gpio = GMBUS_PORT_DPB;
 
                /* Set up the I2C bus necessary for the chip we're probing.
                 * It appears that everything is on GPIOE except for panels
                 * on i830 laptops, which are on GPIOB (DVOA).
                 */
-               if (i2cbus != NULL)
-                       intel_i2c_destroy(i2cbus);
-               if (!(i2cbus = intel_i2c_create(dev, gpio,
-                       gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
-                       continue;
-               }
+               i2c = &dev_priv->gmbus[gpio].adapter;
 
                intel_dvo->dev = *dvo;
-               ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus);
-               if (!ret)
+               if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
                        continue;
 
                intel_encoder->type = INTEL_OUTPUT_DVO;
@@ -427,13 +421,10 @@ void intel_dvo_init(struct drm_device *dev)
                connector->interlace_allowed = false;
                connector->doublescan_allowed = false;
 
-               drm_encoder_init(dev, &intel_encoder->enc,
-                                &intel_dvo_enc_funcs, encoder_type);
-               drm_encoder_helper_add(&intel_encoder->enc,
+               drm_encoder_helper_add(&intel_encoder->base,
                                       &intel_dvo_helper_funcs);
 
-               drm_mode_connector_attach_encoder(&intel_connector->base,
-                                                 &intel_encoder->enc);
+               intel_connector_attach_encoder(intel_connector, intel_encoder);
                if (dvo->type == INTEL_DVO_CHIP_LVDS) {
                        /* For our LVDS chipsets, we should hopefully be able
                         * to dig the fixed panel mode out of the BIOS data.
@@ -451,11 +442,7 @@ void intel_dvo_init(struct drm_device *dev)
                return;
        }
 
-       intel_i2c_destroy(intel_encoder->ddc_bus);
-       /* Didn't find a chip, so tear down. */
-       if (i2cbus != NULL)
-               intel_i2c_destroy(i2cbus);
-free_intel:
+       drm_encoder_cleanup(&intel_encoder->base);
        kfree(intel_dvo);
        kfree(intel_connector);
 }
index b61966c126d3e3839d33be6c8df2c0170c5d1376..af2a1dddc28e2e908529db44a24cea44ff10d387 100644 (file)
 #include "i915_drm.h"
 #include "i915_drv.h"
 
-struct intel_fbdev {
-       struct drm_fb_helper helper;
-       struct intel_framebuffer ifb;
-       struct list_head fbdev_list;
-       struct drm_display_mode *our_mode;
-};
-
 static struct fb_ops intelfb_ops = {
        .owner = THIS_MODULE,
        .fb_check_var = drm_fb_helper_check_var,
@@ -75,7 +68,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
        struct drm_gem_object *fbo = NULL;
        struct drm_i915_gem_object *obj_priv;
        struct device *device = &dev->pdev->dev;
-       int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
+       int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0;
 
        /* we don't do packed 24bpp */
        if (sizes->surface_bpp == 24)
@@ -100,19 +93,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
 
        mutex_lock(&dev->struct_mutex);
 
-       ret = intel_pin_and_fence_fb_obj(dev, fbo);
+       /* Flush everything out, we'll be doing GTT only from now on */
+       ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
        if (ret) {
                DRM_ERROR("failed to pin fb: %d\n", ret);
                goto out_unref;
        }
 
-       /* Flush everything out, we'll be doing GTT only from now on */
-       ret = i915_gem_object_set_to_gtt_domain(fbo, 1);
-       if (ret) {
-               DRM_ERROR("failed to bind fb: %d.\n", ret);
-               goto out_unpin;
-       }
-
        info = framebuffer_alloc(0, device);
        if (!info) {
                ret = -ENOMEM;
@@ -142,7 +129,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
                goto out_unpin;
        }
        info->apertures->ranges[0].base = dev->mode_config.fb_base;
-       if (IS_I9XX(dev))
+       if (!IS_GEN2(dev))
                info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
        else
                info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
@@ -219,8 +206,8 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
        .fb_probe = intel_fb_find_or_create_single,
 };
 
-int intel_fbdev_destroy(struct drm_device *dev,
-                       struct intel_fbdev *ifbdev)
+static void intel_fbdev_destroy(struct drm_device *dev,
+                               struct intel_fbdev *ifbdev)
 {
        struct fb_info *info;
        struct intel_framebuffer *ifb = &ifbdev->ifb;
@@ -238,11 +225,9 @@ int intel_fbdev_destroy(struct drm_device *dev,
 
        drm_framebuffer_cleanup(&ifb->base);
        if (ifb->obj) {
-               drm_gem_object_unreference(ifb->obj);
+               drm_gem_object_unreference_unlocked(ifb->obj);
                ifb->obj = NULL;
        }
-
-       return 0;
 }
 
 int intel_fbdev_init(struct drm_device *dev)
index 926934a482ec085c63256567e27f0309b51b24cf..0d0273e7b029296725032098d26eeb08fb8b5bc6 100644 (file)
 struct intel_hdmi {
        struct intel_encoder base;
        u32 sdvox_reg;
+       int ddc_bus;
        bool has_hdmi_sink;
+       bool has_audio;
+       int force_audio;
+       struct drm_property *force_audio_property;
 };
 
 static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
 {
-       return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base);
+       return container_of(encoder, struct intel_hdmi, base.base);
+}
+
+static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
+{
+       return container_of(intel_attached_encoder(connector),
+                           struct intel_hdmi, base);
+}
+
+void intel_dip_infoframe_csum(struct dip_infoframe *avi_if)
+{
+       uint8_t *data = (uint8_t *)avi_if;
+       uint8_t sum = 0;
+       unsigned i;
+
+       avi_if->checksum = 0;
+       avi_if->ecc = 0;
+
+       for (i = 0; i < sizeof(*avi_if); i++)
+               sum += data[i];
+
+       avi_if->checksum = 0x100 - sum;
+}
+
+static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
+{
+       struct dip_infoframe avi_if = {
+               .type = DIP_TYPE_AVI,
+               .ver = DIP_VERSION_AVI,
+               .len = DIP_LEN_AVI,
+       };
+       uint32_t *data = (uint32_t *)&avi_if;
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+       u32 port;
+       unsigned i;
+
+       if (!intel_hdmi->has_hdmi_sink)
+               return;
+
+       /* XXX first guess at handling video port, is this corrent? */
+       if (intel_hdmi->sdvox_reg == SDVOB)
+               port = VIDEO_DIP_PORT_B;
+       else if (intel_hdmi->sdvox_reg == SDVOC)
+               port = VIDEO_DIP_PORT_C;
+       else
+               return;
+
+       I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
+                  VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC);
+
+       intel_dip_infoframe_csum(&avi_if);
+       for (i = 0; i < sizeof(avi_if); i += 4) {
+               I915_WRITE(VIDEO_DIP_DATA, *data);
+               data++;
+       }
+
+       I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
+                  VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC |
+                  VIDEO_DIP_ENABLE_AVI);
 }
 
 static void intel_hdmi_mode_set(struct drm_encoder *encoder,
@@ -65,10 +129,13 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
        if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
                sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
 
-       if (intel_hdmi->has_hdmi_sink) {
+       /* Required on CPT */
+       if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
+               sdvox |= HDMI_MODE_SELECT;
+
+       if (intel_hdmi->has_audio) {
                sdvox |= SDVO_AUDIO_ENABLE;
-               if (HAS_PCH_CPT(dev))
-                       sdvox |= HDMI_MODE_SELECT;
+               sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
        }
 
        if (intel_crtc->pipe == 1) {
@@ -80,6 +147,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
 
        I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
        POSTING_READ(intel_hdmi->sdvox_reg);
+
+       intel_hdmi_set_avi_infoframe(encoder);
 }
 
 static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
@@ -141,36 +210,85 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
 static enum drm_connector_status
 intel_hdmi_detect(struct drm_connector *connector, bool force)
 {
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-       struct edid *edid = NULL;
+       struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct edid *edid;
        enum drm_connector_status status = connector_status_disconnected;
 
        intel_hdmi->has_hdmi_sink = false;
-       edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus);
+       intel_hdmi->has_audio = false;
+       edid = drm_get_edid(connector,
+                           &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
 
        if (edid) {
                if (edid->input & DRM_EDID_INPUT_DIGITAL) {
                        status = connector_status_connected;
                        intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+                       intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
                }
                connector->display_info.raw_edid = NULL;
                kfree(edid);
        }
 
+       if (status == connector_status_connected) {
+               if (intel_hdmi->force_audio)
+                       intel_hdmi->has_audio = intel_hdmi->force_audio > 0;
+       }
+
        return status;
 }
 
 static int intel_hdmi_get_modes(struct drm_connector *connector)
 {
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+       struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+       struct drm_i915_private *dev_priv = connector->dev->dev_private;
 
        /* We should parse the EDID data and find out if it's an HDMI sink so
         * we can send audio to it.
         */
 
-       return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus);
+       return intel_ddc_get_modes(connector,
+                                  &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+}
+
+static int
+intel_hdmi_set_property(struct drm_connector *connector,
+                     struct drm_property *property,
+                     uint64_t val)
+{
+       struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+       int ret;
+
+       ret = drm_connector_property_set_value(connector, property, val);
+       if (ret)
+               return ret;
+
+       if (property == intel_hdmi->force_audio_property) {
+               if (val == intel_hdmi->force_audio)
+                       return 0;
+
+               intel_hdmi->force_audio = val;
+
+               if (val > 0 && intel_hdmi->has_audio)
+                       return 0;
+               if (val < 0 && !intel_hdmi->has_audio)
+                       return 0;
+
+               intel_hdmi->has_audio = val > 0;
+               goto done;
+       }
+
+       return -EINVAL;
+
+done:
+       if (intel_hdmi->base.base.crtc) {
+               struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
+               drm_crtc_helper_set_mode(crtc, &crtc->mode,
+                                        crtc->x, crtc->y,
+                                        crtc->fb);
+       }
+
+       return 0;
 }
 
 static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -192,19 +310,34 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
        .dpms = drm_helper_connector_dpms,
        .detect = intel_hdmi_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
+       .set_property = intel_hdmi_set_property,
        .destroy = intel_hdmi_destroy,
 };
 
 static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
        .get_modes = intel_hdmi_get_modes,
        .mode_valid = intel_hdmi_mode_valid,
-       .best_encoder = intel_attached_encoder,
+       .best_encoder = intel_best_encoder,
 };
 
 static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
        .destroy = intel_encoder_destroy,
 };
 
+static void
+intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+
+       intel_hdmi->force_audio_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
+       if (intel_hdmi->force_audio_property) {
+               intel_hdmi->force_audio_property->values[0] = -1;
+               intel_hdmi->force_audio_property->values[1] = 1;
+               drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
+       }
+}
+
 void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -224,6 +357,9 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
        }
 
        intel_encoder = &intel_hdmi->base;
+       drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
+                        DRM_MODE_ENCODER_TMDS);
+
        connector = &intel_connector->base;
        drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
                           DRM_MODE_CONNECTOR_HDMIA);
@@ -239,39 +375,33 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
        /* Set up the DDC bus. */
        if (sdvox_reg == SDVOB) {
                intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
-               intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
+               intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
                dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
        } else if (sdvox_reg == SDVOC) {
                intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
-               intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
+               intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
                dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
        } else if (sdvox_reg == HDMIB) {
                intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
-               intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
-                                                               "HDMIB");
+               intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
                dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
        } else if (sdvox_reg == HDMIC) {
                intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
-               intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
-                                                               "HDMIC");
+               intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
                dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
        } else if (sdvox_reg == HDMID) {
                intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
-               intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
-                                                               "HDMID");
+               intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
                dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
        }
-       if (!intel_encoder->ddc_bus)
-               goto err_connector;
 
        intel_hdmi->sdvox_reg = sdvox_reg;
 
-       drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
-                        DRM_MODE_ENCODER_TMDS);
-       drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
+       drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+
+       intel_hdmi_add_properties(intel_hdmi, connector);
 
-       drm_mode_connector_attach_encoder(&intel_connector->base,
-                                         &intel_encoder->enc);
+       intel_connector_attach_encoder(intel_connector, intel_encoder);
        drm_sysfs_connector_add(connector);
 
        /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -282,13 +412,4 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
                u32 temp = I915_READ(PEG_BAND_GAP_DATA);
                I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
        }
-
-       return;
-
-err_connector:
-       drm_connector_cleanup(connector);
-       kfree(intel_hdmi);
-       kfree(intel_connector);
-
-       return;
 }
index c2649c7df14c7e654aba8eb495c3874c698707b5..2be4f728ed0c7b250550613cc93259462db9735a 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
- * Copyright © 2006-2008 Intel Corporation
+ * Copyright © 2006-2008,2010 Intel Corporation
  *   Jesse Barnes <jesse.barnes@intel.com>
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  *
  * Authors:
  *     Eric Anholt <eric@anholt.net>
+ *     Chris Wilson <chris@chris-wilson.co.uk>
  */
 #include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/i2c-id.h>
 #include <linux/i2c-algo-bit.h>
 #include "drmP.h"
 #include "drm.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
 
-void intel_i2c_quirk_set(struct drm_device *dev, bool enable)
+/* Intel GPIO access functions */
+
+#define I2C_RISEFALL_TIME 20
+
+static inline struct intel_gmbus *
+to_intel_gmbus(struct i2c_adapter *i2c)
+{
+       return container_of(i2c, struct intel_gmbus, adapter);
+}
+
+struct intel_gpio {
+       struct i2c_adapter adapter;
+       struct i2c_algo_bit_data algo;
+       struct drm_i915_private *dev_priv;
+       u32 reg;
+};
+
+void
+intel_i2c_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       if (HAS_PCH_SPLIT(dev))
+               I915_WRITE(PCH_GMBUS0, 0);
+       else
+               I915_WRITE(GMBUS0, 0);
+}
+
+static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
+{
+       u32 val;
 
        /* When using bit bashing for I2C, this bit needs to be set to 1 */
-       if (!IS_PINEVIEW(dev))
+       if (!IS_PINEVIEW(dev_priv->dev))
                return;
+
+       val = I915_READ(DSPCLK_GATE_D);
        if (enable)
-               I915_WRITE(DSPCLK_GATE_D,
-                       I915_READ(DSPCLK_GATE_D) | DPCUNIT_CLOCK_GATE_DISABLE);
+               val |= DPCUNIT_CLOCK_GATE_DISABLE;
        else
-               I915_WRITE(DSPCLK_GATE_D,
-                       I915_READ(DSPCLK_GATE_D) & (~DPCUNIT_CLOCK_GATE_DISABLE));
+               val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+       I915_WRITE(DSPCLK_GATE_D, val);
 }
 
-/*
- * Intel GPIO access functions
- */
+static u32 get_reserved(struct intel_gpio *gpio)
+{
+       struct drm_i915_private *dev_priv = gpio->dev_priv;
+       struct drm_device *dev = dev_priv->dev;
+       u32 reserved = 0;
 
-#define I2C_RISEFALL_TIME 20
+       /* On most chips, these bits must be preserved in software. */
+       if (!IS_I830(dev) && !IS_845G(dev))
+               reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE |
+                                                  GPIO_CLOCK_PULLUP_DISABLE);
+
+       return reserved;
+}
 
 static int get_clock(void *data)
 {
-       struct intel_i2c_chan *chan = data;
-       struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
-       u32 val;
-
-       val = I915_READ(chan->reg);
-       return ((val & GPIO_CLOCK_VAL_IN) != 0);
+       struct intel_gpio *gpio = data;
+       struct drm_i915_private *dev_priv = gpio->dev_priv;
+       u32 reserved = get_reserved(gpio);
+       I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+       I915_WRITE(gpio->reg, reserved);
+       return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
 }
 
 static int get_data(void *data)
 {
-       struct intel_i2c_chan *chan = data;
-       struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
-       u32 val;
-
-       val = I915_READ(chan->reg);
-       return ((val & GPIO_DATA_VAL_IN) != 0);
+       struct intel_gpio *gpio = data;
+       struct drm_i915_private *dev_priv = gpio->dev_priv;
+       u32 reserved = get_reserved(gpio);
+       I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+       I915_WRITE(gpio->reg, reserved);
+       return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
 }
 
 static void set_clock(void *data, int state_high)
 {
-       struct intel_i2c_chan *chan = data;
-       struct drm_device *dev = chan->drm_dev;
-       struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
-       u32 reserved = 0, clock_bits;
-
-       /* On most chips, these bits must be preserved in software. */
-       if (!IS_I830(dev) && !IS_845G(dev))
-               reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
-                                                  GPIO_CLOCK_PULLUP_DISABLE);
+       struct intel_gpio *gpio = data;
+       struct drm_i915_private *dev_priv = gpio->dev_priv;
+       u32 reserved = get_reserved(gpio);
+       u32 clock_bits;
 
        if (state_high)
                clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
        else
                clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
                        GPIO_CLOCK_VAL_MASK;
-       I915_WRITE(chan->reg, reserved | clock_bits);
-       udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+
+       I915_WRITE(gpio->reg, reserved | clock_bits);
+       POSTING_READ(gpio->reg);
 }
 
 static void set_data(void *data, int state_high)
 {
-       struct intel_i2c_chan *chan = data;
-       struct drm_device *dev = chan->drm_dev;
-       struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
-       u32 reserved = 0, data_bits;
-
-       /* On most chips, these bits must be preserved in software. */
-       if (!IS_I830(dev) && !IS_845G(dev))
-               reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
-                                                  GPIO_CLOCK_PULLUP_DISABLE);
+       struct intel_gpio *gpio = data;
+       struct drm_i915_private *dev_priv = gpio->dev_priv;
+       u32 reserved = get_reserved(gpio);
+       u32 data_bits;
 
        if (state_high)
                data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
@@ -115,109 +141,313 @@ static void set_data(void *data, int state_high)
                data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
                        GPIO_DATA_VAL_MASK;
 
-       I915_WRITE(chan->reg, reserved | data_bits);
-       udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+       I915_WRITE(gpio->reg, reserved | data_bits);
+       POSTING_READ(gpio->reg);
 }
 
-/* Clears the GMBUS setup.  Our driver doesn't make use of the GMBUS I2C
- * engine, but if the BIOS leaves it enabled, then that can break our use
- * of the bit-banging I2C interfaces.  This is notably the case with the
- * Mac Mini in EFI mode.
- */
-void
-intel_i2c_reset_gmbus(struct drm_device *dev)
+static struct i2c_adapter *
+intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       static const int map_pin_to_reg[] = {
+               0,
+               GPIOB,
+               GPIOA,
+               GPIOC,
+               GPIOD,
+               GPIOE,
+               0,
+               GPIOF,
+       };
+       struct intel_gpio *gpio;
 
-       if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(PCH_GMBUS0, 0);
-       } else {
-               I915_WRITE(GMBUS0, 0);
+       if (pin < 1 || pin > 7)
+               return NULL;
+
+       gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
+       if (gpio == NULL)
+               return NULL;
+
+       gpio->reg = map_pin_to_reg[pin];
+       if (HAS_PCH_SPLIT(dev_priv->dev))
+               gpio->reg += PCH_GPIOA - GPIOA;
+       gpio->dev_priv = dev_priv;
+
+       snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO%c", "?BACDEF?"[pin]);
+       gpio->adapter.owner = THIS_MODULE;
+       gpio->adapter.algo_data = &gpio->algo;
+       gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
+       gpio->algo.setsda = set_data;
+       gpio->algo.setscl = set_clock;
+       gpio->algo.getsda = get_data;
+       gpio->algo.getscl = get_clock;
+       gpio->algo.udelay = I2C_RISEFALL_TIME;
+       gpio->algo.timeout = usecs_to_jiffies(2200);
+       gpio->algo.data = gpio;
+
+       if (i2c_bit_add_bus(&gpio->adapter))
+               goto out_free;
+
+       return &gpio->adapter;
+
+out_free:
+       kfree(gpio);
+       return NULL;
+}
+
+static int
+intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv,
+                    struct i2c_adapter *adapter,
+                    struct i2c_msg *msgs,
+                    int num)
+{
+       struct intel_gpio *gpio = container_of(adapter,
+                                              struct intel_gpio,
+                                              adapter);
+       int ret;
+
+       intel_i2c_reset(dev_priv->dev);
+
+       intel_i2c_quirk_set(dev_priv, true);
+       set_data(gpio, 1);
+       set_clock(gpio, 1);
+       udelay(I2C_RISEFALL_TIME);
+
+       ret = adapter->algo->master_xfer(adapter, msgs, num);
+
+       set_data(gpio, 1);
+       set_clock(gpio, 1);
+       intel_i2c_quirk_set(dev_priv, false);
+
+       return ret;
+}
+
+static int
+gmbus_xfer(struct i2c_adapter *adapter,
+          struct i2c_msg *msgs,
+          int num)
+{
+       struct intel_gmbus *bus = container_of(adapter,
+                                              struct intel_gmbus,
+                                              adapter);
+       struct drm_i915_private *dev_priv = adapter->algo_data;
+       int i, reg_offset;
+
+       if (bus->force_bit)
+               return intel_i2c_quirk_xfer(dev_priv,
+                                           bus->force_bit, msgs, num);
+
+       reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
+
+       I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
+
+       for (i = 0; i < num; i++) {
+               u16 len = msgs[i].len;
+               u8 *buf = msgs[i].buf;
+
+               if (msgs[i].flags & I2C_M_RD) {
+                       I915_WRITE(GMBUS1 + reg_offset,
+                                  GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+                                  (len << GMBUS_BYTE_COUNT_SHIFT) |
+                                  (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+                                  GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+                       POSTING_READ(GMBUS2+reg_offset);
+                       do {
+                               u32 val, loop = 0;
+
+                               if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+                                       goto timeout;
+                               if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+                                       return 0;
+
+                               val = I915_READ(GMBUS3 + reg_offset);
+                               do {
+                                       *buf++ = val & 0xff;
+                                       val >>= 8;
+                               } while (--len && ++loop < 4);
+                       } while (len);
+               } else {
+                       u32 val, loop;
+
+                       val = loop = 0;
+                       do {
+                               val |= *buf++ << (8 * loop);
+                       } while (--len && ++loop < 4);
+
+                       I915_WRITE(GMBUS3 + reg_offset, val);
+                       I915_WRITE(GMBUS1 + reg_offset,
+                                  (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
+                                  (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
+                                  (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+                                  GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+                       POSTING_READ(GMBUS2+reg_offset);
+
+                       while (len) {
+                               if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+                                       goto timeout;
+                               if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+                                       return 0;
+
+                               val = loop = 0;
+                               do {
+                                       val |= *buf++ << (8 * loop);
+                               } while (--len && ++loop < 4);
+
+                               I915_WRITE(GMBUS3 + reg_offset, val);
+                               POSTING_READ(GMBUS2+reg_offset);
+                       }
+               }
+
+               if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
+                       goto timeout;
+               if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+                       return 0;
        }
+
+       return num;
+
+timeout:
+       DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
+                bus->reg0 & 0xff, bus->adapter.name);
+       /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
+       bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
+       if (!bus->force_bit)
+               return -ENOMEM;
+
+       return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
 }
 
+static u32 gmbus_func(struct i2c_adapter *adapter)
+{
+       struct intel_gmbus *bus = container_of(adapter,
+                                              struct intel_gmbus,
+                                              adapter);
+
+       if (bus->force_bit)
+               bus->force_bit->algo->functionality(bus->force_bit);
+
+       return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+               /* I2C_FUNC_10BIT_ADDR | */
+               I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+               I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
+}
+
+static const struct i2c_algorithm gmbus_algorithm = {
+       .master_xfer    = gmbus_xfer,
+       .functionality  = gmbus_func
+};
+
 /**
- * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
+ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
  * @dev: DRM device
- * @output: driver specific output device
- * @reg: GPIO reg to use
- * @name: name for this bus
- * @slave_addr: slave address (if fixed)
- *
- * Creates and registers a new i2c bus with the Linux i2c layer, for use
- * in output probing and control (e.g. DDC or SDVO control functions).
- *
- * Possible values for @reg include:
- *   %GPIOA
- *   %GPIOB
- *   %GPIOC
- *   %GPIOD
- *   %GPIOE
- *   %GPIOF
- *   %GPIOG
- *   %GPIOH
- * see PRM for details on how these different busses are used.
  */
-struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
-                                    const char *name)
+int intel_setup_gmbus(struct drm_device *dev)
 {
-       struct intel_i2c_chan *chan;
+       static const char *names[GMBUS_NUM_PORTS] = {
+               "disabled",
+               "ssc",
+               "vga",
+               "panel",
+               "dpc",
+               "dpb",
+               "reserved"
+               "dpd",
+       };
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret, i;
 
-       chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL);
-       if (!chan)
-               goto out_free;
+       dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
+                                 GFP_KERNEL);
+       if (dev_priv->gmbus == NULL)
+               return -ENOMEM;
 
-       chan->drm_dev = dev;
-       chan->reg = reg;
-       snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
-       chan->adapter.owner = THIS_MODULE;
-       chan->adapter.algo_data = &chan->algo;
-       chan->adapter.dev.parent = &dev->pdev->dev;
-       chan->algo.setsda = set_data;
-       chan->algo.setscl = set_clock;
-       chan->algo.getsda = get_data;
-       chan->algo.getscl = get_clock;
-       chan->algo.udelay = 20;
-       chan->algo.timeout = usecs_to_jiffies(2200);
-       chan->algo.data = chan;
-
-       i2c_set_adapdata(&chan->adapter, chan);
-
-       if(i2c_bit_add_bus(&chan->adapter))
-               goto out_free;
+       for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+               struct intel_gmbus *bus = &dev_priv->gmbus[i];
 
-       intel_i2c_reset_gmbus(dev);
+               bus->adapter.owner = THIS_MODULE;
+               bus->adapter.class = I2C_CLASS_DDC;
+               snprintf(bus->adapter.name,
+                        I2C_NAME_SIZE,
+                        "gmbus %s",
+                        names[i]);
 
-       /* JJJ:  raise SCL and SDA? */
-       intel_i2c_quirk_set(dev, true);
-       set_data(chan, 1);
-       set_clock(chan, 1);
-       intel_i2c_quirk_set(dev, false);
-       udelay(20);
+               bus->adapter.dev.parent = &dev->pdev->dev;
+               bus->adapter.algo_data  = dev_priv;
 
-       return &chan->adapter;
+               bus->adapter.algo = &gmbus_algorithm;
+               ret = i2c_add_adapter(&bus->adapter);
+               if (ret)
+                       goto err;
 
-out_free:
-       kfree(chan);
-       return NULL;
+               /* By default use a conservative clock rate */
+               bus->reg0 = i | GMBUS_RATE_100KHZ;
+
+               /* XXX force bit banging until GMBUS is fully debugged */
+               bus->force_bit = intel_gpio_create(dev_priv, i);
+       }
+
+       intel_i2c_reset(dev_priv->dev);
+
+       return 0;
+
+err:
+       while (--i) {
+               struct intel_gmbus *bus = &dev_priv->gmbus[i];
+               i2c_del_adapter(&bus->adapter);
+       }
+       kfree(dev_priv->gmbus);
+       dev_priv->gmbus = NULL;
+       return ret;
 }
 
-/**
- * intel_i2c_destroy - unregister and free i2c bus resources
- * @output: channel to free
- *
- * Unregister the adapter from the i2c layer, then free the structure.
- */
-void intel_i2c_destroy(struct i2c_adapter *adapter)
+void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
+{
+       struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+       /* speed:
+        * 0x0 = 100 KHz
+        * 0x1 = 50 KHz
+        * 0x2 = 400 KHz
+        * 0x3 = 1000 Khz
+        */
+       bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
+}
+
+void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
+{
+       struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+       if (force_bit) {
+               if (bus->force_bit == NULL) {
+                       struct drm_i915_private *dev_priv = adapter->algo_data;
+                       bus->force_bit = intel_gpio_create(dev_priv,
+                                                          bus->reg0 & 0xff);
+               }
+       } else {
+               if (bus->force_bit) {
+                       i2c_del_adapter(bus->force_bit);
+                       kfree(bus->force_bit);
+                       bus->force_bit = NULL;
+               }
+       }
+}
+
+void intel_teardown_gmbus(struct drm_device *dev)
 {
-       struct intel_i2c_chan *chan;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int i;
 
-       if (!adapter)
+       if (dev_priv->gmbus == NULL)
                return;
 
-       chan = container_of(adapter,
-                           struct intel_i2c_chan,
-                           adapter);
-       i2c_del_adapter(&chan->adapter);
-       kfree(chan);
+       for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+               struct intel_gmbus *bus = &dev_priv->gmbus[i];
+               if (bus->force_bit) {
+                       i2c_del_adapter(bus->force_bit);
+                       kfree(bus->force_bit);
+               }
+               i2c_del_adapter(&bus->adapter);
+       }
+
+       kfree(dev_priv->gmbus);
+       dev_priv->gmbus = NULL;
 }
index 6ec39a86ed06d2bd6e716611f3ab4d384d950636..f1a649990ea9e61d5f7ff6f8b15e127d6fcc2f17 100644 (file)
 /* Private structure for the integrated LVDS support */
 struct intel_lvds {
        struct intel_encoder base;
+
+       struct edid *edid;
+
        int fitting_mode;
        u32 pfit_control;
        u32 pfit_pgm_ratios;
+       bool pfit_dirty;
+
+       struct drm_display_mode *fixed_mode;
 };
 
-static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder)
+static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
 {
-       return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base);
-}
-
-/**
- * Sets the backlight level.
- *
- * \param level backlight level, from 0 to intel_lvds_get_max_backlight().
- */
-static void intel_lvds_set_backlight(struct drm_device *dev, int level)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 blc_pwm_ctl, reg;
-
-       if (HAS_PCH_SPLIT(dev))
-               reg = BLC_PWM_CPU_CTL;
-       else
-               reg = BLC_PWM_CTL;
-
-       blc_pwm_ctl = I915_READ(reg) & ~BACKLIGHT_DUTY_CYCLE_MASK;
-       I915_WRITE(reg, (blc_pwm_ctl |
-                                (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+       return container_of(encoder, struct intel_lvds, base.base);
 }
 
-/**
- * Returns the maximum level of the backlight duty cycle field.
- */
-static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
+static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 reg;
-
-       if (HAS_PCH_SPLIT(dev))
-               reg = BLC_PWM_PCH_CTL2;
-       else
-               reg = BLC_PWM_CTL;
-
-       return ((I915_READ(reg) & BACKLIGHT_MODULATION_FREQ_MASK) >>
-               BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+       return container_of(intel_attached_encoder(connector),
+                           struct intel_lvds, base);
 }
 
 /**
  * Sets the power state for the panel.
  */
-static void intel_lvds_set_power(struct drm_device *dev, bool on)
+static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on)
 {
+       struct drm_device *dev = intel_lvds->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 ctl_reg, status_reg, lvds_reg;
+       u32 ctl_reg, lvds_reg;
 
        if (HAS_PCH_SPLIT(dev)) {
                ctl_reg = PCH_PP_CONTROL;
-               status_reg = PCH_PP_STATUS;
                lvds_reg = PCH_LVDS;
        } else {
                ctl_reg = PP_CONTROL;
-               status_reg = PP_STATUS;
                lvds_reg = LVDS;
        }
 
        if (on) {
                I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
-               POSTING_READ(lvds_reg);
-
-               I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
-                          POWER_TARGET_ON);
-               if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0))
-                       DRM_ERROR("timed out waiting to enable LVDS pipe");
-
-               intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
+               I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
+               intel_panel_set_backlight(dev, dev_priv->backlight_level);
        } else {
-               intel_lvds_set_backlight(dev, 0);
+               dev_priv->backlight_level = intel_panel_get_backlight(dev);
+
+               intel_panel_set_backlight(dev, 0);
+               I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
 
-               I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
-                          ~POWER_TARGET_ON);
-               if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0))
-                       DRM_ERROR("timed out waiting for LVDS pipe to turn off");
+               if (intel_lvds->pfit_control) {
+                       if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
+                               DRM_ERROR("timed out waiting for panel to power off\n");
+                       I915_WRITE(PFIT_CONTROL, 0);
+                       intel_lvds->pfit_control = 0;
+                       intel_lvds->pfit_dirty = false;
+               }
 
                I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
-               POSTING_READ(lvds_reg);
        }
+       POSTING_READ(lvds_reg);
 }
 
 static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
 {
-       struct drm_device *dev = encoder->dev;
+       struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
 
        if (mode == DRM_MODE_DPMS_ON)
-               intel_lvds_set_power(dev, true);
+               intel_lvds_set_power(intel_lvds, true);
        else
-               intel_lvds_set_power(dev, false);
+               intel_lvds_set_power(intel_lvds, false);
 
        /* XXX: We never power down the LVDS pairs. */
 }
@@ -146,16 +120,13 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
 static int intel_lvds_mode_valid(struct drm_connector *connector,
                                 struct drm_display_mode *mode)
 {
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
+       struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+       struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
 
-       if (fixed_mode) {
-               if (mode->hdisplay > fixed_mode->hdisplay)
-                       return MODE_PANEL;
-               if (mode->vdisplay > fixed_mode->vdisplay)
-                       return MODE_PANEL;
-       }
+       if (mode->hdisplay > fixed_mode->hdisplay)
+               return MODE_PANEL;
+       if (mode->vdisplay > fixed_mode->vdisplay)
+               return MODE_PANEL;
 
        return MODE_OK;
 }
@@ -223,12 +194,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
-       struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
+       struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
        struct drm_encoder *tmp_encoder;
        u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
 
        /* Should never happen!! */
-       if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
+       if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
                DRM_ERROR("Can't support LVDS on pipe A\n");
                return false;
        }
@@ -241,9 +212,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
                        return false;
                }
        }
-       /* If we don't have a panel mode, there is nothing we can do */
-       if (dev_priv->panel_fixed_mode == NULL)
-               return true;
 
        /*
         * We have timings from the BIOS for the panel, put them in
@@ -251,7 +219,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
         * with the panel scaling set up to source from the H/VDisplay
         * of the original mode.
         */
-       intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
+       intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
 
        if (HAS_PCH_SPLIT(dev)) {
                intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
@@ -260,8 +228,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
        }
 
        /* Make sure pre-965s set dither correctly */
-       if (!IS_I965G(dev)) {
-               if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
+       if (INTEL_INFO(dev)->gen < 4) {
+               if (dev_priv->lvds_dither)
                        pfit_control |= PANEL_8TO6_DITHER_ENABLE;
        }
 
@@ -271,7 +239,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
                goto out;
 
        /* 965+ wants fuzzy fitting */
-       if (IS_I965G(dev))
+       if (INTEL_INFO(dev)->gen >= 4)
                pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
                                 PFIT_FILTER_FUZZY);
 
@@ -297,7 +265,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 
        case DRM_MODE_SCALE_ASPECT:
                /* Scale but preserve the aspect ratio */
-               if (IS_I965G(dev)) {
+               if (INTEL_INFO(dev)->gen >= 4) {
                        u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
                        u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
 
@@ -356,7 +324,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
                 * Fortunately this is all done for us in hw.
                 */
                pfit_control |= PFIT_ENABLE;
-               if (IS_I965G(dev))
+               if (INTEL_INFO(dev)->gen >= 4)
                        pfit_control |= PFIT_SCALING_AUTO;
                else
                        pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
@@ -369,8 +337,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
        }
 
 out:
-       intel_lvds->pfit_control = pfit_control;
-       intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
+       if (pfit_control != intel_lvds->pfit_control ||
+           pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
+               intel_lvds->pfit_control = pfit_control;
+               intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
+               intel_lvds->pfit_dirty = true;
+       }
        dev_priv->lvds_border_bits = border;
 
        /*
@@ -386,30 +358,60 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
 {
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 reg;
-
-       if (HAS_PCH_SPLIT(dev))
-               reg = BLC_PWM_CPU_CTL;
-       else
-               reg = BLC_PWM_CTL;
-
-       dev_priv->saveBLC_PWM_CTL = I915_READ(reg);
-       dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
-                                      BACKLIGHT_DUTY_CYCLE_MASK);
+       struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+
+       dev_priv->backlight_level = intel_panel_get_backlight(dev);
+
+       /* We try to do the minimum that is necessary in order to unlock
+        * the registers for mode setting.
+        *
+        * On Ironlake, this is quite simple as we just set the unlock key
+        * and ignore all subtleties. (This may cause some issues...)
+        *
+        * Prior to Ironlake, we must disable the pipe if we want to adjust
+        * the panel fitter. However at all other times we can just reset
+        * the registers regardless.
+        */
 
-       intel_lvds_set_power(dev, false);
+       if (HAS_PCH_SPLIT(dev)) {
+               I915_WRITE(PCH_PP_CONTROL,
+                          I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
+       } else if (intel_lvds->pfit_dirty) {
+               I915_WRITE(PP_CONTROL,
+                          (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS)
+                          & ~POWER_TARGET_ON);
+       } else {
+               I915_WRITE(PP_CONTROL,
+                          I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
+       }
 }
 
-static void intel_lvds_commit( struct drm_encoder *encoder)
+static void intel_lvds_commit(struct drm_encoder *encoder)
 {
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
 
-       if (dev_priv->backlight_duty_cycle == 0)
-               dev_priv->backlight_duty_cycle =
-                       intel_lvds_get_max_backlight(dev);
+       if (dev_priv->backlight_level == 0)
+               dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
+
+       /* Undo any unlocking done in prepare to prevent accidental
+        * adjustment of the registers.
+        */
+       if (HAS_PCH_SPLIT(dev)) {
+               u32 val = I915_READ(PCH_PP_CONTROL);
+               if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
+                       I915_WRITE(PCH_PP_CONTROL, val & 0x3);
+       } else {
+               u32 val = I915_READ(PP_CONTROL);
+               if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
+                       I915_WRITE(PP_CONTROL, val & 0x3);
+       }
 
-       intel_lvds_set_power(dev, true);
+       /* Always do a full power on as we do not know what state
+        * we were left in.
+        */
+       intel_lvds_set_power(intel_lvds, true);
 }
 
 static void intel_lvds_mode_set(struct drm_encoder *encoder,
@@ -418,7 +420,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
 {
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
+       struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
 
        /*
         * The LVDS pin pair will already have been turned on in the
@@ -429,13 +431,23 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
        if (HAS_PCH_SPLIT(dev))
                return;
 
+       if (!intel_lvds->pfit_dirty)
+               return;
+
        /*
         * Enable automatic panel scaling so that non-native modes fill the
         * screen.  Should be enabled before the pipe is enabled, according to
         * register description and PRM.
         */
+       DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
+                     intel_lvds->pfit_control,
+                     intel_lvds->pfit_pgm_ratios);
+       if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
+               DRM_ERROR("timed out waiting for panel to power off\n");
+
        I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
        I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
+       intel_lvds->pfit_dirty = false;
 }
 
 /**
@@ -465,38 +477,22 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
  */
 static int intel_lvds_get_modes(struct drm_connector *connector)
 {
+       struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
        struct drm_device *dev = connector->dev;
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret = 0;
-
-       if (dev_priv->lvds_edid_good) {
-               ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+       struct drm_display_mode *mode;
 
-               if (ret)
-                       return ret;
+       if (intel_lvds->edid) {
+               drm_mode_connector_update_edid_property(connector,
+                                                       intel_lvds->edid);
+               return drm_add_edid_modes(connector, intel_lvds->edid);
        }
 
-       /* Didn't get an EDID, so
-        * Set wide sync ranges so we get all modes
-        * handed to valid_mode for checking
-        */
-       connector->display_info.min_vfreq = 0;
-       connector->display_info.max_vfreq = 200;
-       connector->display_info.min_hfreq = 0;
-       connector->display_info.max_hfreq = 200;
-
-       if (dev_priv->panel_fixed_mode != NULL) {
-               struct drm_display_mode *mode;
-
-               mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
-               drm_mode_probed_add(connector, mode);
-
-               return 1;
-       }
+       mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
+       if (mode == 0)
+               return 0;
 
-       return 0;
+       drm_mode_probed_add(connector, mode);
+       return 1;
 }
 
 static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
@@ -587,18 +583,17 @@ static int intel_lvds_set_property(struct drm_connector *connector,
                                   struct drm_property *property,
                                   uint64_t value)
 {
+       struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
        struct drm_device *dev = connector->dev;
 
-       if (property == dev->mode_config.scaling_mode_property &&
-                               connector->encoder) {
-               struct drm_crtc *crtc = connector->encoder->crtc;
-               struct drm_encoder *encoder = connector->encoder;
-               struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
+       if (property == dev->mode_config.scaling_mode_property) {
+               struct drm_crtc *crtc = intel_lvds->base.base.crtc;
 
                if (value == DRM_MODE_SCALE_NONE) {
                        DRM_DEBUG_KMS("no scaling not supported\n");
-                       return 0;
+                       return -EINVAL;
                }
+
                if (intel_lvds->fitting_mode == value) {
                        /* the LVDS scaling property is not changed */
                        return 0;
@@ -628,7 +623,7 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
 static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
        .get_modes = intel_lvds_get_modes,
        .mode_valid = intel_lvds_mode_valid,
-       .best_encoder = intel_attached_encoder,
+       .best_encoder = intel_best_encoder,
 };
 
 static const struct drm_connector_funcs intel_lvds_connector_funcs = {
@@ -726,16 +721,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
  * Find the reduced downclock for LVDS in EDID.
  */
 static void intel_find_lvds_downclock(struct drm_device *dev,
-                               struct drm_connector *connector)
+                                     struct drm_display_mode *fixed_mode,
+                                     struct drm_connector *connector)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_display_mode *scan, *panel_fixed_mode;
+       struct drm_display_mode *scan;
        int temp_downclock;
 
-       panel_fixed_mode = dev_priv->panel_fixed_mode;
-       temp_downclock = panel_fixed_mode->clock;
-
-       mutex_lock(&dev->mode_config.mutex);
+       temp_downclock = fixed_mode->clock;
        list_for_each_entry(scan, &connector->probed_modes, head) {
                /*
                 * If one mode has the same resolution with the fixed_panel
@@ -744,14 +737,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
                 * case we can set the different FPx0/1 to dynamically select
                 * between low and high frequency.
                 */
-               if (scan->hdisplay == panel_fixed_mode->hdisplay &&
-                       scan->hsync_start == panel_fixed_mode->hsync_start &&
-                       scan->hsync_end == panel_fixed_mode->hsync_end &&
-                       scan->htotal == panel_fixed_mode->htotal &&
-                       scan->vdisplay == panel_fixed_mode->vdisplay &&
-                       scan->vsync_start == panel_fixed_mode->vsync_start &&
-                       scan->vsync_end == panel_fixed_mode->vsync_end &&
-                       scan->vtotal == panel_fixed_mode->vtotal) {
+               if (scan->hdisplay == fixed_mode->hdisplay &&
+                   scan->hsync_start == fixed_mode->hsync_start &&
+                   scan->hsync_end == fixed_mode->hsync_end &&
+                   scan->htotal == fixed_mode->htotal &&
+                   scan->vdisplay == fixed_mode->vdisplay &&
+                   scan->vsync_start == fixed_mode->vsync_start &&
+                   scan->vsync_end == fixed_mode->vsync_end &&
+                   scan->vtotal == fixed_mode->vtotal) {
                        if (scan->clock < temp_downclock) {
                                /*
                                 * The downclock is already found. But we
@@ -761,17 +754,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
                        }
                }
        }
-       mutex_unlock(&dev->mode_config.mutex);
-       if (temp_downclock < panel_fixed_mode->clock &&
-           i915_lvds_downclock) {
+       if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
                /* We found the downclock for LVDS. */
                dev_priv->lvds_downclock_avail = 1;
                dev_priv->lvds_downclock = temp_downclock;
                DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
-                               "Normal clock %dKhz, downclock %dKhz\n",
-                               panel_fixed_mode->clock, temp_downclock);
+                             "Normal clock %dKhz, downclock %dKhz\n",
+                             fixed_mode->clock, temp_downclock);
        }
-       return;
 }
 
 /*
@@ -780,38 +770,67 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
  * If it is present, return 1.
  * If it is not present, return false.
  * If no child dev is parsed from VBT, it assumes that the LVDS is present.
- * Note: The addin_offset should also be checked for LVDS panel.
- * Only when it is non-zero, it is assumed that it is present.
  */
-static int lvds_is_present_in_vbt(struct drm_device *dev)
+static bool lvds_is_present_in_vbt(struct drm_device *dev,
+                                  u8 *i2c_pin)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct child_device_config *p_child;
-       int i, ret;
+       int i;
 
        if (!dev_priv->child_dev_num)
-               return 1;
+               return true;
 
-       ret = 0;
        for (i = 0; i < dev_priv->child_dev_num; i++) {
-               p_child = dev_priv->child_dev + i;
-               /*
-                * If the device type is not LFP, continue.
-                * If the device type is 0x22, it is also regarded as LFP.
+               struct child_device_config *child = dev_priv->child_dev + i;
+
+               /* If the device type is not LFP, continue.
+                * We have to check both the new identifiers as well as the
+                * old for compatibility with some BIOSes.
                 */
-               if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
-                       p_child->device_type != DEVICE_TYPE_LFP)
+               if (child->device_type != DEVICE_TYPE_INT_LFP &&
+                   child->device_type != DEVICE_TYPE_LFP)
                        continue;
 
-               /* The addin_offset should be checked. Only when it is
-                * non-zero, it is regarded as present.
+               if (child->i2c_pin)
+                   *i2c_pin = child->i2c_pin;
+
+               /* However, we cannot trust the BIOS writers to populate
+                * the VBT correctly.  Since LVDS requires additional
+                * information from AIM blocks, a non-zero addin offset is
+                * a good indicator that the LVDS is actually present.
                 */
-               if (p_child->addin_offset) {
-                       ret = 1;
-                       break;
-               }
+               if (child->addin_offset)
+                       return true;
+
+               /* But even then some BIOS writers perform some black magic
+                * and instantiate the device without reference to any
+                * additional data.  Trust that if the VBT was written into
+                * the OpRegion then they have validated the LVDS's existence.
+                */
+               if (dev_priv->opregion.vbt)
+                       return true;
        }
-       return ret;
+
+       return false;
+}
+
+static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u8 buf = 0;
+       struct i2c_msg msgs[] = {
+               {
+                       .addr = 0xA0,
+                       .flags = 0,
+                       .len = 1,
+                       .buf = &buf,
+               },
+       };
+       struct i2c_adapter *i2c = &dev_priv->gmbus[pin].adapter;
+       /* XXX this only appears to work when using GMBUS */
+       if (intel_gmbus_is_forced_bit(i2c))
+               return true;
+       return i2c_transfer(i2c, msgs, 1) == 1;
 }
 
 /**
@@ -832,13 +851,15 @@ void intel_lvds_init(struct drm_device *dev)
        struct drm_display_mode *scan; /* *modes, *bios_mode; */
        struct drm_crtc *crtc;
        u32 lvds;
-       int pipe, gpio = GPIOC;
+       int pipe;
+       u8 pin;
 
        /* Skip init on machines we know falsely report LVDS */
        if (dmi_check_system(intel_no_lvds))
                return;
 
-       if (!lvds_is_present_in_vbt(dev)) {
+       pin = GMBUS_PORT_PANEL;
+       if (!lvds_is_present_in_vbt(dev, &pin)) {
                DRM_DEBUG_KMS("LVDS is not present in VBT\n");
                return;
        }
@@ -846,11 +867,15 @@ void intel_lvds_init(struct drm_device *dev)
        if (HAS_PCH_SPLIT(dev)) {
                if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
                        return;
-               if (dev_priv->edp_support) {
+               if (dev_priv->edp.support) {
                        DRM_DEBUG_KMS("disable LVDS for eDP support\n");
                        return;
                }
-               gpio = PCH_GPIOC;
+       }
+
+       if (!intel_lvds_ddc_probe(dev, pin)) {
+               DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n");
+               return;
        }
 
        intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
@@ -864,16 +889,20 @@ void intel_lvds_init(struct drm_device *dev)
                return;
        }
 
+       if (!HAS_PCH_SPLIT(dev)) {
+               intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
+       }
+
        intel_encoder = &intel_lvds->base;
-       encoder = &intel_encoder->enc;
+       encoder = &intel_encoder->base;
        connector = &intel_connector->base;
        drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
                           DRM_MODE_CONNECTOR_LVDS);
 
-       drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
+       drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
                         DRM_MODE_ENCODER_LVDS);
 
-       drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
+       intel_connector_attach_encoder(intel_connector, intel_encoder);
        intel_encoder->type = INTEL_OUTPUT_LVDS;
 
        intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
@@ -904,43 +933,41 @@ void intel_lvds_init(struct drm_device *dev)
         *    if closed, act like it's not there for now
         */
 
-       /* Set up the DDC bus. */
-       intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
-       if (!intel_encoder->ddc_bus) {
-               dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
-                          "failed.\n");
-               goto failed;
-       }
-
        /*
         * Attempt to get the fixed panel mode from DDC.  Assume that the
         * preferred mode is the right one.
         */
-       dev_priv->lvds_edid_good = true;
+       intel_lvds->edid = drm_get_edid(connector,
+                                       &dev_priv->gmbus[pin].adapter);
 
-       if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus))
-               dev_priv->lvds_edid_good = false;
+       if (!intel_lvds->edid) {
+               /* Didn't get an EDID, so
+                * Set wide sync ranges so we get all modes
+                * handed to valid_mode for checking
+                */
+               connector->display_info.min_vfreq = 0;
+               connector->display_info.max_vfreq = 200;
+               connector->display_info.min_hfreq = 0;
+               connector->display_info.max_hfreq = 200;
+       }
 
        list_for_each_entry(scan, &connector->probed_modes, head) {
-               mutex_lock(&dev->mode_config.mutex);
                if (scan->type & DRM_MODE_TYPE_PREFERRED) {
-                       dev_priv->panel_fixed_mode =
+                       intel_lvds->fixed_mode =
                                drm_mode_duplicate(dev, scan);
-                       mutex_unlock(&dev->mode_config.mutex);
-                       intel_find_lvds_downclock(dev, connector);
+                       intel_find_lvds_downclock(dev,
+                                                 intel_lvds->fixed_mode,
+                                                 connector);
                        goto out;
                }
-               mutex_unlock(&dev->mode_config.mutex);
        }
 
        /* Failed to get EDID, what about VBT? */
        if (dev_priv->lfp_lvds_vbt_mode) {
-               mutex_lock(&dev->mode_config.mutex);
-               dev_priv->panel_fixed_mode =
+               intel_lvds->fixed_mode =
                        drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
-               mutex_unlock(&dev->mode_config.mutex);
-               if (dev_priv->panel_fixed_mode) {
-                       dev_priv->panel_fixed_mode->type |=
+               if (intel_lvds->fixed_mode) {
+                       intel_lvds->fixed_mode->type |=
                                DRM_MODE_TYPE_PREFERRED;
                        goto out;
                }
@@ -958,19 +985,19 @@ void intel_lvds_init(struct drm_device *dev)
 
        lvds = I915_READ(LVDS);
        pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
-       crtc = intel_get_crtc_from_pipe(dev, pipe);
+       crtc = intel_get_crtc_for_pipe(dev, pipe);
 
        if (crtc && (lvds & LVDS_PORT_EN)) {
-               dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
-               if (dev_priv->panel_fixed_mode) {
-                       dev_priv->panel_fixed_mode->type |=
+               intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
+               if (intel_lvds->fixed_mode) {
+                       intel_lvds->fixed_mode->type |=
                                DRM_MODE_TYPE_PREFERRED;
                        goto out;
                }
        }
 
        /* If we still don't have a mode after all that, give up. */
-       if (!dev_priv->panel_fixed_mode)
+       if (!intel_lvds->fixed_mode)
                goto failed;
 
 out:
@@ -997,8 +1024,6 @@ out:
 
 failed:
        DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
-       if (intel_encoder->ddc_bus)
-               intel_i2c_destroy(intel_encoder->ddc_bus);
        drm_connector_cleanup(connector);
        drm_encoder_cleanup(encoder);
        kfree(intel_lvds);
index 4b1fd3d9c73cb7b5b6d70c2576f4262e8b01fc02..f70b7cf32bffc43a4325233e0ba11ab5726ce64f 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
- * Copyright (c) 2007 Intel Corporation
+ * Copyright (c) 2007, 2010 Intel Corporation
  *   Jesse Barnes <jesse.barnes@intel.com>
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * intel_ddc_probe
  *
  */
-bool intel_ddc_probe(struct intel_encoder *intel_encoder)
+bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
 {
+       struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
        u8 out_buf[] = { 0x0, 0x0};
        u8 buf[2];
-       int ret;
        struct i2c_msg msgs[] = {
                {
                        .addr = 0x50,
@@ -54,13 +54,7 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
                }
        };
 
-       intel_i2c_quirk_set(intel_encoder->enc.dev, true);
-       ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
-       intel_i2c_quirk_set(intel_encoder->enc.dev, false);
-       if (ret == 2)
-               return true;
-
-       return false;
+       return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2;
 }
 
 /**
@@ -76,9 +70,7 @@ int intel_ddc_get_modes(struct drm_connector *connector,
        struct edid *edid;
        int ret = 0;
 
-       intel_i2c_quirk_set(connector->dev, true);
        edid = drm_get_edid(connector, adapter);
-       intel_i2c_quirk_set(connector->dev, false);
        if (edid) {
                drm_mode_connector_update_edid_property(connector, edid);
                ret = drm_add_edid_modes(connector, edid);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
new file mode 100644 (file)
index 0000000..917c7dc
--- /dev/null
@@ -0,0 +1,517 @@
+/*
+ * Copyright 2008 Intel Corporation <hong.liu@intel.com>
+ * Copyright 2008 Red Hat <mjg@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT.  IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <acpi/video.h>
+
+#include "drmP.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+#define PCI_ASLE 0xe4
+#define PCI_ASLS 0xfc
+
+#define OPREGION_HEADER_OFFSET 0
+#define OPREGION_ACPI_OFFSET   0x100
+#define OPREGION_SWSCI_OFFSET  0x200
+#define OPREGION_ASLE_OFFSET   0x300
+#define OPREGION_VBT_OFFSET    0x400
+
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+#define MBOX_ACPI      (1<<0)
+#define MBOX_SWSCI     (1<<1)
+#define MBOX_ASLE      (1<<2)
+
+struct opregion_header {
+       u8 signature[16];
+       u32 size;
+       u32 opregion_ver;
+       u8 bios_ver[32];
+       u8 vbios_ver[16];
+       u8 driver_ver[16];
+       u32 mboxes;
+       u8 reserved[164];
+} __attribute__((packed));
+
+/* OpRegion mailbox #1: public ACPI methods */
+struct opregion_acpi {
+       u32 drdy;       /* driver readiness */
+       u32 csts;       /* notification status */
+       u32 cevt;       /* current event */
+       u8 rsvd1[20];
+       u32 didl[8];    /* supported display devices ID list */
+       u32 cpdl[8];    /* currently presented display list */
+       u32 cadl[8];    /* currently active display list */
+       u32 nadl[8];    /* next active devices list */
+       u32 aslp;       /* ASL sleep time-out */
+       u32 tidx;       /* toggle table index */
+       u32 chpd;       /* current hotplug enable indicator */
+       u32 clid;       /* current lid state*/
+       u32 cdck;       /* current docking state */
+       u32 sxsw;       /* Sx state resume */
+       u32 evts;       /* ASL supported events */
+       u32 cnot;       /* current OS notification */
+       u32 nrdy;       /* driver status */
+       u8 rsvd2[60];
+} __attribute__((packed));
+
+/* OpRegion mailbox #2: SWSCI */
+struct opregion_swsci {
+       u32 scic;       /* SWSCI command|status|data */
+       u32 parm;       /* command parameters */
+       u32 dslp;       /* driver sleep time-out */
+       u8 rsvd[244];
+} __attribute__((packed));
+
+/* OpRegion mailbox #3: ASLE */
+struct opregion_asle {
+       u32 ardy;       /* driver readiness */
+       u32 aslc;       /* ASLE interrupt command */
+       u32 tche;       /* technology enabled indicator */
+       u32 alsi;       /* current ALS illuminance reading */
+       u32 bclp;       /* backlight brightness to set */
+       u32 pfit;       /* panel fitting state */
+       u32 cblv;       /* current brightness level */
+       u16 bclm[20];   /* backlight level duty cycle mapping table */
+       u32 cpfm;       /* current panel fitting mode */
+       u32 epfm;       /* enabled panel fitting modes */
+       u8 plut[74];    /* panel LUT and identifier */
+       u32 pfmb;       /* PWM freq and min brightness */
+       u8 rsvd[102];
+} __attribute__((packed));
+
+/* ASLE irq request bits */
+#define ASLE_SET_ALS_ILLUM     (1 << 0)
+#define ASLE_SET_BACKLIGHT     (1 << 1)
+#define ASLE_SET_PFIT          (1 << 2)
+#define ASLE_SET_PWM_FREQ      (1 << 3)
+#define ASLE_REQ_MSK           0xf
+
+/* response bits of ASLE irq request */
+#define ASLE_ALS_ILLUM_FAILED  (1<<10)
+#define ASLE_BACKLIGHT_FAILED  (1<<12)
+#define ASLE_PFIT_FAILED       (1<<14)
+#define ASLE_PWM_FREQ_FAILED   (1<<16)
+
+/* ASLE backlight brightness to set */
+#define ASLE_BCLP_VALID                (1<<31)
+#define ASLE_BCLP_MSK          (~(1<<31))
+
+/* ASLE panel fitting request */
+#define ASLE_PFIT_VALID         (1<<31)
+#define ASLE_PFIT_CENTER (1<<0)
+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
+#define ASLE_PFIT_STRETCH_GFX (1<<2)
+
+/* PWM frequency and minimum brightness */
+#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
+#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
+#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
+#define ASLE_PFMB_PWM_VALID (1<<31)
+
+#define ASLE_CBLV_VALID         (1<<31)
+
+#define ACPI_OTHER_OUTPUT (0<<8)
+#define ACPI_VGA_OUTPUT (1<<8)
+#define ACPI_TV_OUTPUT (2<<8)
+#define ACPI_DIGITAL_OUTPUT (3<<8)
+#define ACPI_LVDS_OUTPUT (4<<8)
+
+#ifdef CONFIG_ACPI
+static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct opregion_asle *asle = dev_priv->opregion.asle;
+       u32 max;
+
+       if (!(bclp & ASLE_BCLP_VALID))
+               return ASLE_BACKLIGHT_FAILED;
+
+       bclp &= ASLE_BCLP_MSK;
+       if (bclp > 255)
+               return ASLE_BACKLIGHT_FAILED;
+
+       max = intel_panel_get_max_backlight(dev);
+       intel_panel_set_backlight(dev, bclp * max / 255);
+       asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
+
+       return 0;
+}
+
+static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
+{
+       /* alsi is the current ALS reading in lux. 0 indicates below sensor
+          range, 0xffff indicates above sensor range. 1-0xfffe are valid */
+       return 0;
+}
+
+static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       if (pfmb & ASLE_PFMB_PWM_VALID) {
+               u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
+               u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
+               blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
+               pwm = pwm >> 9;
+               /* FIXME - what do we do with the PWM? */
+       }
+       return 0;
+}
+
+static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
+{
+       /* Panel fitting is currently controlled by the X code, so this is a
+          noop until modesetting support works fully */
+       if (!(pfit & ASLE_PFIT_VALID))
+               return ASLE_PFIT_FAILED;
+       return 0;
+}
+
+void intel_opregion_asle_intr(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct opregion_asle *asle = dev_priv->opregion.asle;
+       u32 asle_stat = 0;
+       u32 asle_req;
+
+       if (!asle)
+               return;
+
+       asle_req = asle->aslc & ASLE_REQ_MSK;
+
+       if (!asle_req) {
+               DRM_DEBUG_DRIVER("non asle set request??\n");
+               return;
+       }
+
+       if (asle_req & ASLE_SET_ALS_ILLUM)
+               asle_stat |= asle_set_als_illum(dev, asle->alsi);
+
+       if (asle_req & ASLE_SET_BACKLIGHT)
+               asle_stat |= asle_set_backlight(dev, asle->bclp);
+
+       if (asle_req & ASLE_SET_PFIT)
+               asle_stat |= asle_set_pfit(dev, asle->pfit);
+
+       if (asle_req & ASLE_SET_PWM_FREQ)
+               asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
+
+       asle->aslc = asle_stat;
+}
+
+/* Only present on Ironlake+ */
+void intel_opregion_gse_intr(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct opregion_asle *asle = dev_priv->opregion.asle;
+       u32 asle_stat = 0;
+       u32 asle_req;
+
+       if (!asle)
+               return;
+
+       asle_req = asle->aslc & ASLE_REQ_MSK;
+
+       if (!asle_req) {
+               DRM_DEBUG_DRIVER("non asle set request??\n");
+               return;
+       }
+
+       if (asle_req & ASLE_SET_ALS_ILLUM) {
+               DRM_DEBUG_DRIVER("Illum is not supported\n");
+               asle_stat |= ASLE_ALS_ILLUM_FAILED;
+       }
+
+       if (asle_req & ASLE_SET_BACKLIGHT)
+               asle_stat |= asle_set_backlight(dev, asle->bclp);
+
+       if (asle_req & ASLE_SET_PFIT) {
+               DRM_DEBUG_DRIVER("Pfit is not supported\n");
+               asle_stat |= ASLE_PFIT_FAILED;
+       }
+
+       if (asle_req & ASLE_SET_PWM_FREQ) {
+               DRM_DEBUG_DRIVER("PWM freq is not supported\n");
+               asle_stat |= ASLE_PWM_FREQ_FAILED;
+       }
+
+       asle->aslc = asle_stat;
+}
+#define ASLE_ALS_EN    (1<<0)
+#define ASLE_BLC_EN    (1<<1)
+#define ASLE_PFIT_EN   (1<<2)
+#define ASLE_PFMB_EN   (1<<3)
+
+void intel_opregion_enable_asle(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct opregion_asle *asle = dev_priv->opregion.asle;
+
+       if (asle) {
+               if (IS_MOBILE(dev)) {
+                       unsigned long irqflags;
+
+                       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+                       intel_enable_asle(dev);
+                       spin_unlock_irqrestore(&dev_priv->user_irq_lock,
+                                              irqflags);
+               }
+
+               asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
+                       ASLE_PFMB_EN;
+               asle->ardy = 1;
+       }
+}
+
+#define ACPI_EV_DISPLAY_SWITCH (1<<0)
+#define ACPI_EV_LID            (1<<1)
+#define ACPI_EV_DOCK           (1<<2)
+
+static struct intel_opregion *system_opregion;
+
+static int intel_opregion_video_event(struct notifier_block *nb,
+                                     unsigned long val, void *data)
+{
+       /* The only video events relevant to opregion are 0x80. These indicate
+          either a docking event, lid switch or display switch request. In
+          Linux, these are handled by the dock, button and video drivers.
+          We might want to fix the video driver to be opregion-aware in
+          future, but right now we just indicate to the firmware that the
+          request has been handled */
+
+       struct opregion_acpi *acpi;
+
+       if (!system_opregion)
+               return NOTIFY_DONE;
+
+       acpi = system_opregion->acpi;
+       acpi->csts = 0;
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block intel_opregion_notifier = {
+       .notifier_call = intel_opregion_video_event,
+};
+
+/*
+ * Initialise the DIDL field in opregion. This passes a list of devices to
+ * the firmware. Values are defined by section B.4.2 of the ACPI specification
+ * (version 3)
+ */
+
+static void intel_didl_outputs(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_opregion *opregion = &dev_priv->opregion;
+       struct drm_connector *connector;
+       acpi_handle handle;
+       struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
+       unsigned long long device_id;
+       acpi_status status;
+       int i = 0;
+
+       handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+       if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
+               return;
+
+       if (acpi_is_video_device(acpi_dev))
+               acpi_video_bus = acpi_dev;
+       else {
+               list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
+                       if (acpi_is_video_device(acpi_cdev)) {
+                               acpi_video_bus = acpi_cdev;
+                               break;
+                       }
+               }
+       }
+
+       if (!acpi_video_bus) {
+               printk(KERN_WARNING "No ACPI video bus found\n");
+               return;
+       }
+
+       list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
+               if (i >= 8) {
+                       dev_printk (KERN_ERR, &dev->pdev->dev,
+                                   "More than 8 outputs detected\n");
+                       return;
+               }
+               status =
+                       acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
+                                               NULL, &device_id);
+               if (ACPI_SUCCESS(status)) {
+                       if (!device_id)
+                               goto blind_set;
+                       opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
+                       i++;
+               }
+       }
+
+end:
+       /* If fewer than 8 outputs, the list must be null terminated */
+       if (i < 8)
+               opregion->acpi->didl[i] = 0;
+       return;
+
+blind_set:
+       i = 0;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               int output_type = ACPI_OTHER_OUTPUT;
+               if (i >= 8) {
+                       dev_printk (KERN_ERR, &dev->pdev->dev,
+                                   "More than 8 outputs detected\n");
+                       return;
+               }
+               switch (connector->connector_type) {
+               case DRM_MODE_CONNECTOR_VGA:
+               case DRM_MODE_CONNECTOR_DVIA:
+                       output_type = ACPI_VGA_OUTPUT;
+                       break;
+               case DRM_MODE_CONNECTOR_Composite:
+               case DRM_MODE_CONNECTOR_SVIDEO:
+               case DRM_MODE_CONNECTOR_Component:
+               case DRM_MODE_CONNECTOR_9PinDIN:
+                       output_type = ACPI_TV_OUTPUT;
+                       break;
+               case DRM_MODE_CONNECTOR_DVII:
+               case DRM_MODE_CONNECTOR_DVID:
+               case DRM_MODE_CONNECTOR_DisplayPort:
+               case DRM_MODE_CONNECTOR_HDMIA:
+               case DRM_MODE_CONNECTOR_HDMIB:
+                       output_type = ACPI_DIGITAL_OUTPUT;
+                       break;
+               case DRM_MODE_CONNECTOR_LVDS:
+                       output_type = ACPI_LVDS_OUTPUT;
+                       break;
+               }
+               opregion->acpi->didl[i] |= (1<<31) | output_type | i;
+               i++;
+       }
+       goto end;
+}
+
+void intel_opregion_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_opregion *opregion = &dev_priv->opregion;
+
+       if (!opregion->header)
+               return;
+
+       if (opregion->acpi) {
+               if (drm_core_check_feature(dev, DRIVER_MODESET))
+                       intel_didl_outputs(dev);
+
+               /* Notify BIOS we are ready to handle ACPI video ext notifs.
+                * Right now, all the events are handled by the ACPI video module.
+                * We don't actually need to do anything with them. */
+               opregion->acpi->csts = 0;
+               opregion->acpi->drdy = 1;
+
+               system_opregion = opregion;
+               register_acpi_notifier(&intel_opregion_notifier);
+       }
+
+       if (opregion->asle)
+               intel_opregion_enable_asle(dev);
+}
+
+void intel_opregion_fini(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_opregion *opregion = &dev_priv->opregion;
+
+       if (!opregion->header)
+               return;
+
+       if (opregion->acpi) {
+               opregion->acpi->drdy = 0;
+
+               system_opregion = NULL;
+               unregister_acpi_notifier(&intel_opregion_notifier);
+       }
+
+       /* just clear all opregion memory pointers now */
+       iounmap(opregion->header);
+       opregion->header = NULL;
+       opregion->acpi = NULL;
+       opregion->swsci = NULL;
+       opregion->asle = NULL;
+       opregion->vbt = NULL;
+}
+#endif
+
+int intel_opregion_setup(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_opregion *opregion = &dev_priv->opregion;
+       void *base;
+       u32 asls, mboxes;
+       int err = 0;
+
+       pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
+       DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
+       if (asls == 0) {
+               DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
+               return -ENOTSUPP;
+       }
+
+       base = ioremap(asls, OPREGION_SIZE);
+       if (!base)
+               return -ENOMEM;
+
+       if (memcmp(base, OPREGION_SIGNATURE, 16)) {
+               DRM_DEBUG_DRIVER("opregion signature mismatch\n");
+               err = -EINVAL;
+               goto err_out;
+       }
+       opregion->header = base;
+       opregion->vbt = base + OPREGION_VBT_OFFSET;
+
+       mboxes = opregion->header->mboxes;
+       if (mboxes & MBOX_ACPI) {
+               DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
+               opregion->acpi = base + OPREGION_ACPI_OFFSET;
+       }
+
+       if (mboxes & MBOX_SWSCI) {
+               DRM_DEBUG_DRIVER("SWSCI supported\n");
+               opregion->swsci = base + OPREGION_SWSCI_OFFSET;
+       }
+       if (mboxes & MBOX_ASLE) {
+               DRM_DEBUG_DRIVER("ASLE supported\n");
+               opregion->asle = base + OPREGION_ASLE_OFFSET;
+       }
+
+       return 0;
+
+err_out:
+       iounmap(opregion->header);
+       return err;
+}
index 1d306a458be6463c8e1f6636d69ebf515d845889..afb96d25219afe473b116802f30616c0edb1cf86 100644 (file)
@@ -170,57 +170,143 @@ struct overlay_registers {
     u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
 };
 
-/* overlay flip addr flag */
-#define OFC_UPDATE             0x1
-
-#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
-#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev))
-
+struct intel_overlay {
+       struct drm_device *dev;
+       struct intel_crtc *crtc;
+       struct drm_i915_gem_object *vid_bo;
+       struct drm_i915_gem_object *old_vid_bo;
+       int active;
+       int pfit_active;
+       u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
+       u32 color_key;
+       u32 brightness, contrast, saturation;
+       u32 old_xscale, old_yscale;
+       /* register access */
+       u32 flip_addr;
+       struct drm_i915_gem_object *reg_bo;
+       /* flip handling */
+       uint32_t last_flip_req;
+       void (*flip_tail)(struct intel_overlay *);
+};
 
-static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+static struct overlay_registers *
+intel_overlay_map_regs(struct intel_overlay *overlay)
 {
         drm_i915_private_t *dev_priv = overlay->dev->dev_private;
        struct overlay_registers *regs;
 
-       /* no recursive mappings */
-       BUG_ON(overlay->virt_addr);
+       if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+               regs = overlay->reg_bo->phys_obj->handle->vaddr;
+       else
+               regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
+                                        overlay->reg_bo->gtt_offset);
 
-       if (OVERLAY_NONPHYSICAL(overlay->dev)) {
-               regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
-                                               overlay->reg_bo->gtt_offset,
-                                               KM_USER0);
+       return regs;
+}
 
-               if (!regs) {
-                       DRM_ERROR("failed to map overlay regs in GTT\n");
-                       return NULL;
-               }
-       } else
-               regs = overlay->reg_bo->phys_obj->handle->vaddr;
+static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
+                                    struct overlay_registers *regs)
+{
+       if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+               io_mapping_unmap(regs);
+}
+
+static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+                                        struct drm_i915_gem_request *request,
+                                        bool interruptible,
+                                        void (*tail)(struct intel_overlay *))
+{
+       struct drm_device *dev = overlay->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       int ret;
 
-       return overlay->virt_addr = regs;
+       BUG_ON(overlay->last_flip_req);
+       overlay->last_flip_req =
+               i915_add_request(dev, NULL, request, &dev_priv->render_ring);
+       if (overlay->last_flip_req == 0)
+               return -ENOMEM;
+
+       overlay->flip_tail = tail;
+       ret = i915_do_wait_request(dev,
+                                  overlay->last_flip_req, true,
+                                  &dev_priv->render_ring);
+       if (ret)
+               return ret;
+
+       overlay->last_flip_req = 0;
+       return 0;
 }
 
-static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
+/* Workaround for i830 bug where pipe a must be enable to change control regs */
+static int
+i830_activate_pipe_a(struct drm_device *dev)
 {
-       if (OVERLAY_NONPHYSICAL(overlay->dev))
-               io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0);
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_crtc *crtc;
+       struct drm_crtc_helper_funcs *crtc_funcs;
+       struct drm_display_mode vesa_640x480 = {
+               DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+                        752, 800, 0, 480, 489, 492, 525, 0,
+                        DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
+       }, *mode;
+
+       crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]);
+       if (crtc->dpms_mode == DRM_MODE_DPMS_ON)
+               return 0;
 
-       overlay->virt_addr = NULL;
+       /* most i8xx have pipe a forced on, so don't trust dpms mode */
+       if (I915_READ(PIPEACONF) & PIPECONF_ENABLE)
+               return 0;
 
-       return;
+       crtc_funcs = crtc->base.helper_private;
+       if (crtc_funcs->dpms == NULL)
+               return 0;
+
+       DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
+
+       mode = drm_mode_duplicate(dev, &vesa_640x480);
+       drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+       if(!drm_crtc_helper_set_mode(&crtc->base, mode,
+                                      crtc->base.x, crtc->base.y,
+                                      crtc->base.fb))
+               return 0;
+
+       crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON);
+       return 1;
+}
+
+static void
+i830_deactivate_pipe_a(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
 }
 
 /* overlay needs to be disable in OCMD reg */
 static int intel_overlay_on(struct intel_overlay *overlay)
 {
        struct drm_device *dev = overlay->dev;
+       struct drm_i915_gem_request *request;
+       int pipe_a_quirk = 0;
        int ret;
-       drm_i915_private_t *dev_priv = dev->dev_private;
 
        BUG_ON(overlay->active);
-
        overlay->active = 1;
-       overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
+
+       if (IS_I830(dev)) {
+               pipe_a_quirk = i830_activate_pipe_a(dev);
+               if (pipe_a_quirk < 0)
+                       return pipe_a_quirk;
+       }
+
+       request = kzalloc(sizeof(*request), GFP_KERNEL);
+       if (request == NULL) {
+               ret = -ENOMEM;
+               goto out;
+       }
 
        BEGIN_LP_RING(4);
        OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
@@ -229,32 +315,30 @@ static int intel_overlay_on(struct intel_overlay *overlay)
        OUT_RING(MI_NOOP);
        ADVANCE_LP_RING();
 
-       overlay->last_flip_req =
-               i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
-       if (overlay->last_flip_req == 0)
-               return -ENOMEM;
-
-       ret = i915_do_wait_request(dev,
-                       overlay->last_flip_req, 1, &dev_priv->render_ring);
-       if (ret != 0)
-               return ret;
+       ret = intel_overlay_do_wait_request(overlay, request, true, NULL);
+out:
+       if (pipe_a_quirk)
+               i830_deactivate_pipe_a(dev);
 
-       overlay->hw_wedged = 0;
-       overlay->last_flip_req = 0;
-       return 0;
+       return ret;
 }
 
 /* overlay needs to be enabled in OCMD reg */
-static void intel_overlay_continue(struct intel_overlay *overlay,
-                           bool load_polyphase_filter)
+static int intel_overlay_continue(struct intel_overlay *overlay,
+                                 bool load_polyphase_filter)
 {
        struct drm_device *dev = overlay->dev;
         drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_request *request;
        u32 flip_addr = overlay->flip_addr;
        u32 tmp;
 
        BUG_ON(!overlay->active);
 
+       request = kzalloc(sizeof(*request), GFP_KERNEL);
+       if (request == NULL)
+               return -ENOMEM;
+
        if (load_polyphase_filter)
                flip_addr |= OFC_UPDATE;
 
@@ -269,220 +353,132 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
         ADVANCE_LP_RING();
 
        overlay->last_flip_req =
-               i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
+               i915_add_request(dev, NULL, request, &dev_priv->render_ring);
+       return 0;
 }
 
-static int intel_overlay_wait_flip(struct intel_overlay *overlay)
+static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
 {
-       struct drm_device *dev = overlay->dev;
-        drm_i915_private_t *dev_priv = dev->dev_private;
-       int ret;
-       u32 tmp;
-
-       if (overlay->last_flip_req != 0) {
-               ret = i915_do_wait_request(dev, overlay->last_flip_req,
-                               1, &dev_priv->render_ring);
-               if (ret == 0) {
-                       overlay->last_flip_req = 0;
-
-                       tmp = I915_READ(ISR);
+       struct drm_gem_object *obj = &overlay->old_vid_bo->base;
 
-                       if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT))
-                               return 0;
-               }
-       }
+       i915_gem_object_unpin(obj);
+       drm_gem_object_unreference(obj);
 
-       /* synchronous slowpath */
-       overlay->hw_wedged = RELEASE_OLD_VID;
+       overlay->old_vid_bo = NULL;
+}
 
-       BEGIN_LP_RING(2);
-        OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-        OUT_RING(MI_NOOP);
-        ADVANCE_LP_RING();
+static void intel_overlay_off_tail(struct intel_overlay *overlay)
+{
+       struct drm_gem_object *obj;
 
-       overlay->last_flip_req =
-               i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
-       if (overlay->last_flip_req == 0)
-               return -ENOMEM;
+       /* never have the overlay hw on without showing a frame */
+       BUG_ON(!overlay->vid_bo);
+       obj = &overlay->vid_bo->base;
 
-       ret = i915_do_wait_request(dev, overlay->last_flip_req,
-                       1, &dev_priv->render_ring);
-       if (ret != 0)
-               return ret;
+       i915_gem_object_unpin(obj);
+       drm_gem_object_unreference(obj);
+       overlay->vid_bo = NULL;
 
-       overlay->hw_wedged = 0;
-       overlay->last_flip_req = 0;
-       return 0;
+       overlay->crtc->overlay = NULL;
+       overlay->crtc = NULL;
+       overlay->active = 0;
 }
 
 /* overlay needs to be disabled in OCMD reg */
-static int intel_overlay_off(struct intel_overlay *overlay)
+static int intel_overlay_off(struct intel_overlay *overlay,
+                            bool interruptible)
 {
-       u32 flip_addr = overlay->flip_addr;
        struct drm_device *dev = overlay->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       int ret;
+       u32 flip_addr = overlay->flip_addr;
+       struct drm_i915_gem_request *request;
 
        BUG_ON(!overlay->active);
 
+       request = kzalloc(sizeof(*request), GFP_KERNEL);
+       if (request == NULL)
+               return -ENOMEM;
+
        /* According to intel docs the overlay hw may hang (when switching
         * off) without loading the filter coeffs. It is however unclear whether
         * this applies to the disabling of the overlay or to the switching off
         * of the hw. Do it in both cases */
        flip_addr |= OFC_UPDATE;
 
+       BEGIN_LP_RING(6);
        /* wait for overlay to go idle */
-       overlay->hw_wedged = SWITCH_OFF_STAGE_1;
-
-       BEGIN_LP_RING(4);
        OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
        OUT_RING(flip_addr);
-        OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-        OUT_RING(MI_NOOP);
-        ADVANCE_LP_RING();
-
-       overlay->last_flip_req =
-               i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
-       if (overlay->last_flip_req == 0)
-               return -ENOMEM;
-
-       ret = i915_do_wait_request(dev, overlay->last_flip_req,
-                       1, &dev_priv->render_ring);
-       if (ret != 0)
-               return ret;
-
+       OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
        /* turn overlay off */
-       overlay->hw_wedged = SWITCH_OFF_STAGE_2;
-
-       BEGIN_LP_RING(4);
-        OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+       OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
        OUT_RING(flip_addr);
-        OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-        OUT_RING(MI_NOOP);
+       OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
        ADVANCE_LP_RING();
 
-       overlay->last_flip_req =
-               i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
-       if (overlay->last_flip_req == 0)
-               return -ENOMEM;
-
-       ret = i915_do_wait_request(dev, overlay->last_flip_req,
-                       1, &dev_priv->render_ring);
-       if (ret != 0)
-               return ret;
-
-       overlay->hw_wedged = 0;
-       overlay->last_flip_req = 0;
-       return ret;
-}
-
-static void intel_overlay_off_tail(struct intel_overlay *overlay)
-{
-       struct drm_gem_object *obj;
-
-       /* never have the overlay hw on without showing a frame */
-       BUG_ON(!overlay->vid_bo);
-       obj = &overlay->vid_bo->base;
-
-       i915_gem_object_unpin(obj);
-       drm_gem_object_unreference(obj);
-       overlay->vid_bo = NULL;
-
-       overlay->crtc->overlay = NULL;
-       overlay->crtc = NULL;
-       overlay->active = 0;
+       return intel_overlay_do_wait_request(overlay, request, interruptible,
+                                            intel_overlay_off_tail);
 }
 
 /* recover from an interruption due to a signal
  * We have to be careful not to repeat work forever an make forward progess. */
-int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
-                                        int interruptible)
+static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
+                                               bool interruptible)
 {
        struct drm_device *dev = overlay->dev;
-       struct drm_gem_object *obj;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       u32 flip_addr;
        int ret;
 
-       if (overlay->hw_wedged == HW_WEDGED)
-               return -EIO;
-
-       if (overlay->last_flip_req == 0) {
-               overlay->last_flip_req =
-                       i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
-               if (overlay->last_flip_req == 0)
-                       return -ENOMEM;
-       }
+       if (overlay->last_flip_req == 0)
+               return 0;
 
        ret = i915_do_wait_request(dev, overlay->last_flip_req,
-                       interruptible, &dev_priv->render_ring);
-       if (ret != 0)
+                                  interruptible, &dev_priv->render_ring);
+       if (ret)
                return ret;
 
-       switch (overlay->hw_wedged) {
-               case RELEASE_OLD_VID:
-                       obj = &overlay->old_vid_bo->base;
-                       i915_gem_object_unpin(obj);
-                       drm_gem_object_unreference(obj);
-                       overlay->old_vid_bo = NULL;
-                       break;
-               case SWITCH_OFF_STAGE_1:
-                       flip_addr = overlay->flip_addr;
-                       flip_addr |= OFC_UPDATE;
-
-                       overlay->hw_wedged = SWITCH_OFF_STAGE_2;
-
-                       BEGIN_LP_RING(4);
-                       OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-                       OUT_RING(flip_addr);
-                       OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-                       OUT_RING(MI_NOOP);
-                       ADVANCE_LP_RING();
-
-                       overlay->last_flip_req = i915_add_request(dev, NULL,
-                                       0, &dev_priv->render_ring);
-                       if (overlay->last_flip_req == 0)
-                               return -ENOMEM;
-
-                       ret = i915_do_wait_request(dev, overlay->last_flip_req,
-                                       interruptible, &dev_priv->render_ring);
-                       if (ret != 0)
-                               return ret;
-
-               case SWITCH_OFF_STAGE_2:
-                       intel_overlay_off_tail(overlay);
-                       break;
-               default:
-                       BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
-       }
+       if (overlay->flip_tail)
+               overlay->flip_tail(overlay);
 
-       overlay->hw_wedged = 0;
        overlay->last_flip_req = 0;
        return 0;
 }
 
 /* Wait for pending overlay flip and release old frame.
  * Needs to be called before the overlay register are changed
- * via intel_overlay_(un)map_regs_atomic */
+ * via intel_overlay_(un)map_regs
+ */
 static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 {
+       struct drm_device *dev = overlay->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
-       struct drm_gem_object *obj;
 
-       /* only wait if there is actually an old frame to release to
-        * guarantee forward progress */
+       /* Only wait if there is actually an old frame to release to
+        * guarantee forward progress.
+        */
        if (!overlay->old_vid_bo)
                return 0;
 
-       ret = intel_overlay_wait_flip(overlay);
-       if (ret != 0)
-               return ret;
+       if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
+               struct drm_i915_gem_request *request;
 
-       obj = &overlay->old_vid_bo->base;
-       i915_gem_object_unpin(obj);
-       drm_gem_object_unreference(obj);
-       overlay->old_vid_bo = NULL;
+               /* synchronous slowpath */
+               request = kzalloc(sizeof(*request), GFP_KERNEL);
+               if (request == NULL)
+                       return -ENOMEM;
 
+               BEGIN_LP_RING(2);
+               OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+               OUT_RING(MI_NOOP);
+               ADVANCE_LP_RING();
+
+               ret = intel_overlay_do_wait_request(overlay, request, true,
+                                                   intel_overlay_release_old_vid_tail);
+               if (ret)
+                       return ret;
+       }
+
+       intel_overlay_release_old_vid_tail(overlay);
        return 0;
 }
 
@@ -506,65 +502,65 @@ struct put_image_params {
 static int packed_depth_bytes(u32 format)
 {
        switch (format & I915_OVERLAY_DEPTH_MASK) {
-               case I915_OVERLAY_YUV422:
-                       return 4;
-               case I915_OVERLAY_YUV411:
-                       /* return 6; not implemented */
-               default:
-                       return -EINVAL;
+       case I915_OVERLAY_YUV422:
+               return 4;
+       case I915_OVERLAY_YUV411:
+               /* return 6; not implemented */
+       default:
+               return -EINVAL;
        }
 }
 
 static int packed_width_bytes(u32 format, short width)
 {
        switch (format & I915_OVERLAY_DEPTH_MASK) {
-               case I915_OVERLAY_YUV422:
-                       return width << 1;
-               default:
-                       return -EINVAL;
+       case I915_OVERLAY_YUV422:
+               return width << 1;
+       default:
+               return -EINVAL;
        }
 }
 
 static int uv_hsubsampling(u32 format)
 {
        switch (format & I915_OVERLAY_DEPTH_MASK) {
-               case I915_OVERLAY_YUV422:
-               case I915_OVERLAY_YUV420:
-                       return 2;
-               case I915_OVERLAY_YUV411:
-               case I915_OVERLAY_YUV410:
-                       return 4;
-               default:
-                       return -EINVAL;
+       case I915_OVERLAY_YUV422:
+       case I915_OVERLAY_YUV420:
+               return 2;
+       case I915_OVERLAY_YUV411:
+       case I915_OVERLAY_YUV410:
+               return 4;
+       default:
+               return -EINVAL;
        }
 }
 
 static int uv_vsubsampling(u32 format)
 {
        switch (format & I915_OVERLAY_DEPTH_MASK) {
-               case I915_OVERLAY_YUV420:
-               case I915_OVERLAY_YUV410:
-                       return 2;
-               case I915_OVERLAY_YUV422:
-               case I915_OVERLAY_YUV411:
-                       return 1;
-               default:
-                       return -EINVAL;
+       case I915_OVERLAY_YUV420:
+       case I915_OVERLAY_YUV410:
+               return 2;
+       case I915_OVERLAY_YUV422:
+       case I915_OVERLAY_YUV411:
+               return 1;
+       default:
+               return -EINVAL;
        }
 }
 
 static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
 {
        u32 mask, shift, ret;
-       if (IS_I9XX(dev)) {
-               mask = 0x3f;
-               shift = 6;
-       } else {
+       if (IS_GEN2(dev)) {
                mask = 0x1f;
                shift = 5;
+       } else {
+               mask = 0x3f;
+               shift = 6;
        }
        ret = ((offset + width + mask) >> shift) - (offset >> shift);
-       if (IS_I9XX(dev))
+       if (!IS_GEN2(dev))
                ret <<= 1;
        ret -=1;
        return ret << 2;
@@ -587,7 +583,9 @@ static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
        0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
        0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
        0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
-       0xb000, 0x3000, 0x0800, 0x3000, 0xb000};
+       0xb000, 0x3000, 0x0800, 0x3000, 0xb000
+};
+
 static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
        0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
        0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
@@ -597,7 +595,8 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
        0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
        0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
        0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
-       0x3000, 0x0800, 0x3000};
+       0x3000, 0x0800, 0x3000
+};
 
 static void update_polyphase_filter(struct overlay_registers *regs)
 {
@@ -630,29 +629,31 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
                yscale = 1 << FP_SHIFT;
 
        /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
-               xscale_UV = xscale/uv_hscale;
-               yscale_UV = yscale/uv_vscale;
-               /* make the Y scale to UV scale ratio an exact multiply */
-               xscale = xscale_UV * uv_hscale;
-               yscale = yscale_UV * uv_vscale;
+       xscale_UV = xscale/uv_hscale;
+       yscale_UV = yscale/uv_vscale;
+       /* make the Y scale to UV scale ratio an exact multiply */
+       xscale = xscale_UV * uv_hscale;
+       yscale = yscale_UV * uv_vscale;
        /*} else {
-               xscale_UV = 0;
-               yscale_UV = 0;
-       }*/
+         xscale_UV = 0;
+         yscale_UV = 0;
+         }*/
 
        if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
                scale_changed = true;
        overlay->old_xscale = xscale;
        overlay->old_yscale = yscale;
 
-       regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20)
-               | ((xscale >> FP_SHIFT) << 16)
-               | ((xscale & FRACT_MASK) << 3);
-       regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20)
-               | ((xscale_UV >> FP_SHIFT) << 16)
-               | ((xscale_UV & FRACT_MASK) << 3);
-       regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16)
-               | ((yscale_UV >> FP_SHIFT) << 0);
+       regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
+                          ((xscale >> FP_SHIFT)  << 16) |
+                          ((xscale & FRACT_MASK) << 3));
+
+       regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
+                        ((xscale_UV >> FP_SHIFT)  << 16) |
+                        ((xscale_UV & FRACT_MASK) << 3));
+
+       regs->UVSCALEV = ((((yscale    >> FP_SHIFT) << 16) |
+                          ((yscale_UV >> FP_SHIFT) << 0)));
 
        if (scale_changed)
                update_polyphase_filter(regs);
@@ -664,22 +665,28 @@ static void update_colorkey(struct intel_overlay *overlay,
                            struct overlay_registers *regs)
 {
        u32 key = overlay->color_key;
+
        switch (overlay->crtc->base.fb->bits_per_pixel) {
-               case 8:
-                       regs->DCLRKV = 0;
-                       regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
-               case 16:
-                       if (overlay->crtc->base.fb->depth == 15) {
-                               regs->DCLRKV = RGB15_TO_COLORKEY(key);
-                               regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
-                       } else {
-                               regs->DCLRKV = RGB16_TO_COLORKEY(key);
-                               regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
-                       }
-               case 24:
-               case 32:
-                       regs->DCLRKV = key;
-                       regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+       case 8:
+               regs->DCLRKV = 0;
+               regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+               break;
+
+       case 16:
+               if (overlay->crtc->base.fb->depth == 15) {
+                       regs->DCLRKV = RGB15_TO_COLORKEY(key);
+                       regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+               } else {
+                       regs->DCLRKV = RGB16_TO_COLORKEY(key);
+                       regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+               }
+               break;
+
+       case 24:
+       case 32:
+               regs->DCLRKV = key;
+               regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+               break;
        }
 }
 
@@ -689,48 +696,48 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
 
        if (params->format & I915_OVERLAY_YUV_PLANAR) {
                switch (params->format & I915_OVERLAY_DEPTH_MASK) {
-                       case I915_OVERLAY_YUV422:
-                               cmd |= OCMD_YUV_422_PLANAR;
-                               break;
-                       case I915_OVERLAY_YUV420:
-                               cmd |= OCMD_YUV_420_PLANAR;
-                               break;
-                       case I915_OVERLAY_YUV411:
-                       case I915_OVERLAY_YUV410:
-                               cmd |= OCMD_YUV_410_PLANAR;
-                               break;
+               case I915_OVERLAY_YUV422:
+                       cmd |= OCMD_YUV_422_PLANAR;
+                       break;
+               case I915_OVERLAY_YUV420:
+                       cmd |= OCMD_YUV_420_PLANAR;
+                       break;
+               case I915_OVERLAY_YUV411:
+               case I915_OVERLAY_YUV410:
+                       cmd |= OCMD_YUV_410_PLANAR;
+                       break;
                }
        } else { /* YUV packed */
                switch (params->format & I915_OVERLAY_DEPTH_MASK) {
-                       case I915_OVERLAY_YUV422:
-                               cmd |= OCMD_YUV_422_PACKED;
-                               break;
-                       case I915_OVERLAY_YUV411:
-                               cmd |= OCMD_YUV_411_PACKED;
-                               break;
+               case I915_OVERLAY_YUV422:
+                       cmd |= OCMD_YUV_422_PACKED;
+                       break;
+               case I915_OVERLAY_YUV411:
+                       cmd |= OCMD_YUV_411_PACKED;
+                       break;
                }
 
                switch (params->format & I915_OVERLAY_SWAP_MASK) {
-                       case I915_OVERLAY_NO_SWAP:
-                               break;
-                       case I915_OVERLAY_UV_SWAP:
-                               cmd |= OCMD_UV_SWAP;
-                               break;
-                       case I915_OVERLAY_Y_SWAP:
-                               cmd |= OCMD_Y_SWAP;
-                               break;
-                       case I915_OVERLAY_Y_AND_UV_SWAP:
-                               cmd |= OCMD_Y_AND_UV_SWAP;
-                               break;
+               case I915_OVERLAY_NO_SWAP:
+                       break;
+               case I915_OVERLAY_UV_SWAP:
+                       cmd |= OCMD_UV_SWAP;
+                       break;
+               case I915_OVERLAY_Y_SWAP:
+                       cmd |= OCMD_Y_SWAP;
+                       break;
+               case I915_OVERLAY_Y_AND_UV_SWAP:
+                       cmd |= OCMD_Y_AND_UV_SWAP;
+                       break;
                }
        }
 
        return cmd;
 }
 
-int intel_overlay_do_put_image(struct intel_overlay *overlay,
-                              struct drm_gem_object *new_bo,
-                              struct put_image_params *params)
+static int intel_overlay_do_put_image(struct intel_overlay *overlay,
+                                     struct drm_gem_object *new_bo,
+                                     struct put_image_params *params)
 {
        int ret, tmp_width;
        struct overlay_registers *regs;
@@ -755,24 +762,24 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
                goto out_unpin;
 
        if (!overlay->active) {
-               regs = intel_overlay_map_regs_atomic(overlay);
+               regs = intel_overlay_map_regs(overlay);
                if (!regs) {
                        ret = -ENOMEM;
                        goto out_unpin;
                }
                regs->OCONFIG = OCONF_CC_OUT_8BIT;
-               if (IS_I965GM(overlay->dev))
+               if (IS_GEN4(overlay->dev))
                        regs->OCONFIG |= OCONF_CSC_MODE_BT709;
                regs->OCONFIG |= overlay->crtc->pipe == 0 ?
                        OCONF_PIPE_A : OCONF_PIPE_B;
-               intel_overlay_unmap_regs_atomic(overlay);
+               intel_overlay_unmap_regs(overlay, regs);
 
                ret = intel_overlay_on(overlay);
                if (ret != 0)
                        goto out_unpin;
        }
 
-       regs = intel_overlay_map_regs_atomic(overlay);
+       regs = intel_overlay_map_regs(overlay);
        if (!regs) {
                ret = -ENOMEM;
                goto out_unpin;
@@ -788,7 +795,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
 
        regs->SWIDTH = params->src_w;
        regs->SWIDTHSW = calc_swidthsw(overlay->dev,
-                       params->offset_Y, tmp_width);
+                                      params->offset_Y, tmp_width);
        regs->SHEIGHT = params->src_h;
        regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
        regs->OSTRIDE = params->stride_Y;
@@ -799,9 +806,9 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
                u32 tmp_U, tmp_V;
                regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
                tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
-                               params->src_w/uv_hscale);
+                                     params->src_w/uv_hscale);
                tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
-                               params->src_w/uv_hscale);
+                                     params->src_w/uv_hscale);
                regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
                regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
                regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
@@ -815,9 +822,11 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
 
        regs->OCMD = overlay_cmd_reg(params);
 
-       intel_overlay_unmap_regs_atomic(overlay);
+       intel_overlay_unmap_regs(overlay, regs);
 
-       intel_overlay_continue(overlay, scale_changed);
+       ret = intel_overlay_continue(overlay, scale_changed);
+       if (ret)
+               goto out_unpin;
 
        overlay->old_vid_bo = overlay->vid_bo;
        overlay->vid_bo = to_intel_bo(new_bo);
@@ -829,20 +838,19 @@ out_unpin:
        return ret;
 }
 
-int intel_overlay_switch_off(struct intel_overlay *overlay)
+int intel_overlay_switch_off(struct intel_overlay *overlay,
+                            bool interruptible)
 {
-       int ret;
        struct overlay_registers *regs;
        struct drm_device *dev = overlay->dev;
+       int ret;
 
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
        BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
 
-       if (overlay->hw_wedged) {
-               ret = intel_overlay_recover_from_interrupt(overlay, 1);
-               if (ret != 0)
-                       return ret;
-       }
+       ret = intel_overlay_recover_from_interrupt(overlay, interruptible);
+       if (ret != 0)
+               return ret;
 
        if (!overlay->active)
                return 0;
@@ -851,33 +859,29 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
        if (ret != 0)
                return ret;
 
-       regs = intel_overlay_map_regs_atomic(overlay);
+       regs = intel_overlay_map_regs(overlay);
        regs->OCMD = 0;
-       intel_overlay_unmap_regs_atomic(overlay);
+       intel_overlay_unmap_regs(overlay, regs);
 
-       ret = intel_overlay_off(overlay);
+       ret = intel_overlay_off(overlay, interruptible);
        if (ret != 0)
                return ret;
 
        intel_overlay_off_tail(overlay);
-
        return 0;
 }
 
 static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
                                          struct intel_crtc *crtc)
 {
-        drm_i915_private_t *dev_priv = overlay->dev->dev_private;
-       u32 pipeconf;
-       int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
+       drm_i915_private_t *dev_priv = overlay->dev->dev_private;
 
-       if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON)
+       if (!crtc->active)
                return -EINVAL;
 
-       pipeconf = I915_READ(pipeconf_reg);
-
        /* can't use the overlay with double wide pipe */
-       if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE)
+       if (INTEL_INFO(overlay->dev)->gen < 4 &&
+           (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
                return -EINVAL;
 
        return 0;
@@ -886,20 +890,22 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
 static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
 {
        struct drm_device *dev = overlay->dev;
-        drm_i915_private_t *dev_priv = dev->dev_private;
-       u32 ratio;
+       drm_i915_private_t *dev_priv = dev->dev_private;
        u32 pfit_control = I915_READ(PFIT_CONTROL);
+       u32 ratio;
 
        /* XXX: This is not the same logic as in the xorg driver, but more in
-        * line with the intel documentation for the i965 */
-       if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) {
-               ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT;
-       } else { /* on i965 use the PGM reg to read out the autoscaler values */
-               ratio = I915_READ(PFIT_PGM_RATIOS);
-               if (IS_I965G(dev))
-                       ratio >>= PFIT_VERT_SCALE_SHIFT_965;
+        * line with the intel documentation for the i965
+        */
+       if (INTEL_INFO(dev)->gen >= 4) {
+               /* on i965 use the PGM reg to read out the autoscaler values */
+               ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
+       } else {
+               if (pfit_control & VERT_AUTO_SCALE)
+                       ratio = I915_READ(PFIT_AUTO_RATIOS);
                else
-                       ratio >>= PFIT_VERT_SCALE_SHIFT;
+                       ratio = I915_READ(PFIT_PGM_RATIOS);
+               ratio >>= PFIT_VERT_SCALE_SHIFT;
        }
 
        overlay->pfit_vscale_ratio = ratio;
@@ -910,12 +916,10 @@ static int check_overlay_dst(struct intel_overlay *overlay,
 {
        struct drm_display_mode *mode = &overlay->crtc->base.mode;
 
-       if ((rec->dst_x < mode->crtc_hdisplay)
-           && (rec->dst_x + rec->dst_width
-                   <= mode->crtc_hdisplay)
-           && (rec->dst_y < mode->crtc_vdisplay)
-           && (rec->dst_y + rec->dst_height
-                   <= mode->crtc_vdisplay))
+       if (rec->dst_x < mode->crtc_hdisplay &&
+           rec->dst_x + rec->dst_width <= mode->crtc_hdisplay &&
+           rec->dst_y < mode->crtc_vdisplay &&
+           rec->dst_y + rec->dst_height <= mode->crtc_vdisplay)
                return 0;
        else
                return -EINVAL;
@@ -940,53 +944,57 @@ static int check_overlay_src(struct drm_device *dev,
                             struct drm_intel_overlay_put_image *rec,
                             struct drm_gem_object *new_bo)
 {
-       u32 stride_mask;
-       int depth;
        int uv_hscale = uv_hsubsampling(rec->flags);
        int uv_vscale = uv_vsubsampling(rec->flags);
-       size_t tmp;
+       u32 stride_mask, depth, tmp;
 
        /* check src dimensions */
        if (IS_845G(dev) || IS_I830(dev)) {
-               if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY
-                   || rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
+               if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
+                   rec->src_width  > IMAGE_MAX_WIDTH_LEGACY)
                        return -EINVAL;
        } else {
-               if (rec->src_height > IMAGE_MAX_HEIGHT
-                   || rec->src_width > IMAGE_MAX_WIDTH)
+               if (rec->src_height > IMAGE_MAX_HEIGHT ||
+                   rec->src_width  > IMAGE_MAX_WIDTH)
                        return -EINVAL;
        }
+
        /* better safe than sorry, use 4 as the maximal subsampling ratio */
-       if (rec->src_height < N_VERT_Y_TAPS*4
-           || rec->src_width < N_HORIZ_Y_TAPS*4)
+       if (rec->src_height < N_VERT_Y_TAPS*4 ||
+           rec->src_width  < N_HORIZ_Y_TAPS*4)
                return -EINVAL;
 
        /* check alignment constraints */
        switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
-               case I915_OVERLAY_RGB:
-                       /* not implemented */
+       case I915_OVERLAY_RGB:
+               /* not implemented */
+               return -EINVAL;
+
+       case I915_OVERLAY_YUV_PACKED:
+               if (uv_vscale != 1)
                        return -EINVAL;
-               case I915_OVERLAY_YUV_PACKED:
-                       depth = packed_depth_bytes(rec->flags);
-                       if (uv_vscale != 1)
-                               return -EINVAL;
-                       if (depth < 0)
-                               return depth;
-                       /* ignore UV planes */
-                       rec->stride_UV = 0;
-                       rec->offset_U = 0;
-                       rec->offset_V = 0;
-                       /* check pixel alignment */
-                       if (rec->offset_Y % depth)
-                               return -EINVAL;
-                       break;
-               case I915_OVERLAY_YUV_PLANAR:
-                       if (uv_vscale < 0 || uv_hscale < 0)
-                               return -EINVAL;
-                       /* no offset restrictions for planar formats */
-                       break;
-               default:
+
+               depth = packed_depth_bytes(rec->flags);
+               if (depth < 0)
+                       return depth;
+
+               /* ignore UV planes */
+               rec->stride_UV = 0;
+               rec->offset_U = 0;
+               rec->offset_V = 0;
+               /* check pixel alignment */
+               if (rec->offset_Y % depth)
                        return -EINVAL;
+               break;
+
+       case I915_OVERLAY_YUV_PLANAR:
+               if (uv_vscale < 0 || uv_hscale < 0)
+                       return -EINVAL;
+               /* no offset restrictions for planar formats */
+               break;
+
+       default:
+               return -EINVAL;
        }
 
        if (rec->src_width % uv_hscale)
@@ -1000,47 +1008,74 @@ static int check_overlay_src(struct drm_device *dev,
 
        if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
                return -EINVAL;
-       if (IS_I965G(dev) && rec->stride_Y < 512)
+       if (IS_GEN4(dev) && rec->stride_Y < 512)
                return -EINVAL;
 
        tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
-               4 : 8;
-       if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024)
+               4096 : 8192;
+       if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
                return -EINVAL;
 
        /* check buffer dimensions */
        switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
-               case I915_OVERLAY_RGB:
-               case I915_OVERLAY_YUV_PACKED:
-                       /* always 4 Y values per depth pixels */
-                       if (packed_width_bytes(rec->flags, rec->src_width)
-                                       > rec->stride_Y)
-                               return -EINVAL;
-
-                       tmp = rec->stride_Y*rec->src_height;
-                       if (rec->offset_Y + tmp > new_bo->size)
-                               return -EINVAL;
-                       break;
-               case I915_OVERLAY_YUV_PLANAR:
-                       if (rec->src_width > rec->stride_Y)
-                               return -EINVAL;
-                       if (rec->src_width/uv_hscale > rec->stride_UV)
-                               return -EINVAL;
-
-                       tmp = rec->stride_Y*rec->src_height;
-                       if (rec->offset_Y + tmp > new_bo->size)
-                               return -EINVAL;
-                       tmp = rec->stride_UV*rec->src_height;
-                       tmp /= uv_vscale;
-                       if (rec->offset_U + tmp > new_bo->size
-                           || rec->offset_V + tmp > new_bo->size)
-                               return -EINVAL;
-                       break;
+       case I915_OVERLAY_RGB:
+       case I915_OVERLAY_YUV_PACKED:
+               /* always 4 Y values per depth pixels */
+               if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
+                       return -EINVAL;
+
+               tmp = rec->stride_Y*rec->src_height;
+               if (rec->offset_Y + tmp > new_bo->size)
+                       return -EINVAL;
+               break;
+
+       case I915_OVERLAY_YUV_PLANAR:
+               if (rec->src_width > rec->stride_Y)
+                       return -EINVAL;
+               if (rec->src_width/uv_hscale > rec->stride_UV)
+                       return -EINVAL;
+
+               tmp = rec->stride_Y * rec->src_height;
+               if (rec->offset_Y + tmp > new_bo->size)
+                       return -EINVAL;
+
+               tmp = rec->stride_UV * (rec->src_height / uv_vscale);
+               if (rec->offset_U + tmp > new_bo->size ||
+                   rec->offset_V + tmp > new_bo->size)
+                       return -EINVAL;
+               break;
        }
 
        return 0;
 }
 
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int intel_panel_fitter_pipe(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32  pfit_control;
+
+       /* i830 doesn't have a panel fitter */
+       if (IS_I830(dev))
+               return -1;
+
+       pfit_control = I915_READ(PFIT_CONTROL);
+
+       /* See if the panel fitter is in use */
+       if ((pfit_control & PFIT_ENABLE) == 0)
+               return -1;
+
+       /* 965 can place panel fitter on either pipe */
+       if (IS_GEN4(dev))
+               return (pfit_control >> 29) & 0x3;
+
+       /* older chips can only use pipe 1 */
+       return 1;
+}
+
 int intel_overlay_put_image(struct drm_device *dev, void *data,
                             struct drm_file *file_priv)
 {
@@ -1068,7 +1103,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
                mutex_lock(&dev->mode_config.mutex);
                mutex_lock(&dev->struct_mutex);
 
-               ret = intel_overlay_switch_off(overlay);
+               ret = intel_overlay_switch_off(overlay, true);
 
                mutex_unlock(&dev->struct_mutex);
                mutex_unlock(&dev->mode_config.mutex);
@@ -1081,7 +1116,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
                return -ENOMEM;
 
        drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
-                        DRM_MODE_OBJECT_CRTC);
+                                          DRM_MODE_OBJECT_CRTC);
        if (!drmmode_obj) {
                ret = -ENOENT;
                goto out_free;
@@ -1089,7 +1124,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
        crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
 
        new_bo = drm_gem_object_lookup(dev, file_priv,
-                       put_image_rec->bo_handle);
+                                      put_image_rec->bo_handle);
        if (!new_bo) {
                ret = -ENOENT;
                goto out_free;
@@ -1098,15 +1133,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
        mutex_lock(&dev->mode_config.mutex);
        mutex_lock(&dev->struct_mutex);
 
-       if (overlay->hw_wedged) {
-               ret = intel_overlay_recover_from_interrupt(overlay, 1);
-               if (ret != 0)
-                       goto out_unlock;
-       }
+       ret = intel_overlay_recover_from_interrupt(overlay, true);
+       if (ret != 0)
+               goto out_unlock;
 
        if (overlay->crtc != crtc) {
                struct drm_display_mode *mode = &crtc->base.mode;
-               ret = intel_overlay_switch_off(overlay);
+               ret = intel_overlay_switch_off(overlay, true);
                if (ret != 0)
                        goto out_unlock;
 
@@ -1117,9 +1150,9 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
                overlay->crtc = crtc;
                crtc->overlay = overlay;
 
-               if (intel_panel_fitter_pipe(dev) == crtc->pipe
-                   /* and line to wide, i.e. one-line-mode */
-                   && mode->hdisplay > 1024) {
+               /* line too wide, i.e. one-line-mode */
+               if (mode->hdisplay > 1024 &&
+                   intel_panel_fitter_pipe(dev) == crtc->pipe) {
                        overlay->pfit_active = 1;
                        update_pfit_vscale_ratio(overlay);
                } else
@@ -1132,10 +1165,10 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
 
        if (overlay->pfit_active) {
                params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
-                       overlay->pfit_vscale_ratio);
+                                overlay->pfit_vscale_ratio);
                /* shifting right rounds downwards, so add 1 */
                params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
-                       overlay->pfit_vscale_ratio) + 1;
+                                overlay->pfit_vscale_ratio) + 1;
        } else {
                params->dst_y = put_image_rec->dst_y;
                params->dst_h = put_image_rec->dst_height;
@@ -1147,8 +1180,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
        params->src_h = put_image_rec->src_height;
        params->src_scan_w = put_image_rec->src_scan_width;
        params->src_scan_h = put_image_rec->src_scan_height;
-       if (params->src_scan_h > params->src_h
-           || params->src_scan_w > params->src_w) {
+       if (params->src_scan_h > params->src_h ||
+           params->src_scan_w > params->src_w) {
                ret = -EINVAL;
                goto out_unlock;
        }
@@ -1204,7 +1237,7 @@ static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
                return false;
 
        for (i = 0; i < 3; i++) {
-               if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
+               if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
                        return false;
        }
 
@@ -1225,16 +1258,18 @@ static bool check_gamma5_errata(u32 gamma5)
 
 static int check_gamma(struct drm_intel_overlay_attrs *attrs)
 {
-       if (!check_gamma_bounds(0, attrs->gamma0)
-           || !check_gamma_bounds(attrs->gamma0, attrs->gamma1)
-           || !check_gamma_bounds(attrs->gamma1, attrs->gamma2)
-           || !check_gamma_bounds(attrs->gamma2, attrs->gamma3)
-           || !check_gamma_bounds(attrs->gamma3, attrs->gamma4)
-           || !check_gamma_bounds(attrs->gamma4, attrs->gamma5)
-           || !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
+       if (!check_gamma_bounds(0, attrs->gamma0) ||
+           !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
+           !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
+           !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
+           !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
+           !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
+           !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
                return -EINVAL;
+
        if (!check_gamma5_errata(attrs->gamma5))
                return -EINVAL;
+
        return 0;
 }
 
@@ -1261,13 +1296,14 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
        mutex_lock(&dev->mode_config.mutex);
        mutex_lock(&dev->struct_mutex);
 
+       ret = -EINVAL;
        if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
-               attrs->color_key = overlay->color_key;
+               attrs->color_key  = overlay->color_key;
                attrs->brightness = overlay->brightness;
-               attrs->contrast = overlay->contrast;
+               attrs->contrast   = overlay->contrast;
                attrs->saturation = overlay->saturation;
 
-               if (IS_I9XX(dev)) {
+               if (!IS_GEN2(dev)) {
                        attrs->gamma0 = I915_READ(OGAMC0);
                        attrs->gamma1 = I915_READ(OGAMC1);
                        attrs->gamma2 = I915_READ(OGAMC2);
@@ -1275,29 +1311,20 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
                        attrs->gamma4 = I915_READ(OGAMC4);
                        attrs->gamma5 = I915_READ(OGAMC5);
                }
-               ret = 0;
        } else {
-               overlay->color_key = attrs->color_key;
-               if (attrs->brightness >= -128 && attrs->brightness <= 127) {
-                       overlay->brightness = attrs->brightness;
-               } else {
-                       ret = -EINVAL;
+               if (attrs->brightness < -128 || attrs->brightness > 127)
                        goto out_unlock;
-               }
-               if (attrs->contrast <= 255) {
-                       overlay->contrast = attrs->contrast;
-               } else {
-                       ret = -EINVAL;
+               if (attrs->contrast > 255)
                        goto out_unlock;
-               }
-               if (attrs->saturation <= 1023) {
-                       overlay->saturation = attrs->saturation;
-               } else {
-                       ret = -EINVAL;
+               if (attrs->saturation > 1023)
                        goto out_unlock;
-               }
 
-               regs = intel_overlay_map_regs_atomic(overlay);
+               overlay->color_key  = attrs->color_key;
+               overlay->brightness = attrs->brightness;
+               overlay->contrast   = attrs->contrast;
+               overlay->saturation = attrs->saturation;
+
+               regs = intel_overlay_map_regs(overlay);
                if (!regs) {
                        ret = -ENOMEM;
                        goto out_unlock;
@@ -1305,13 +1332,11 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
 
                update_reg_attrs(overlay, regs);
 
-               intel_overlay_unmap_regs_atomic(overlay);
+               intel_overlay_unmap_regs(overlay, regs);
 
                if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
-                       if (!IS_I9XX(dev)) {
-                               ret = -EINVAL;
+                       if (IS_GEN2(dev))
                                goto out_unlock;
-                       }
 
                        if (overlay->active) {
                                ret = -EBUSY;
@@ -1319,7 +1344,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
                        }
 
                        ret = check_gamma(attrs);
-                       if (ret != 0)
+                       if (ret)
                                goto out_unlock;
 
                        I915_WRITE(OGAMC0, attrs->gamma0);
@@ -1329,9 +1354,9 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
                        I915_WRITE(OGAMC4, attrs->gamma4);
                        I915_WRITE(OGAMC5, attrs->gamma5);
                }
-               ret = 0;
        }
 
+       ret = 0;
 out_unlock:
        mutex_unlock(&dev->struct_mutex);
        mutex_unlock(&dev->mode_config.mutex);
@@ -1347,7 +1372,7 @@ void intel_setup_overlay(struct drm_device *dev)
        struct overlay_registers *regs;
        int ret;
 
-       if (!OVERLAY_EXISTS(dev))
+       if (!HAS_OVERLAY(dev))
                return;
 
        overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
@@ -1360,22 +1385,28 @@ void intel_setup_overlay(struct drm_device *dev)
                goto out_free;
        overlay->reg_bo = to_intel_bo(reg_bo);
 
-       if (OVERLAY_NONPHYSICAL(dev)) {
-               ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
-               if (ret) {
-                        DRM_ERROR("failed to pin overlay register bo\n");
-                        goto out_free_bo;
-                }
-               overlay->flip_addr = overlay->reg_bo->gtt_offset;
-       } else {
+       if (OVERLAY_NEEDS_PHYSICAL(dev)) {
                ret = i915_gem_attach_phys_object(dev, reg_bo,
                                                  I915_GEM_PHYS_OVERLAY_REGS,
-                                                 0);
+                                                 PAGE_SIZE);
                 if (ret) {
                         DRM_ERROR("failed to attach phys overlay regs\n");
                         goto out_free_bo;
                 }
                overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
+       } else {
+               ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
+               if (ret) {
+                        DRM_ERROR("failed to pin overlay register bo\n");
+                        goto out_free_bo;
+                }
+               overlay->flip_addr = overlay->reg_bo->gtt_offset;
+
+               ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
+               if (ret) {
+                        DRM_ERROR("failed to move overlay register bo into the GTT\n");
+                        goto out_unpin_bo;
+                }
        }
 
        /* init all values */
@@ -1384,21 +1415,22 @@ void intel_setup_overlay(struct drm_device *dev)
        overlay->contrast = 75;
        overlay->saturation = 146;
 
-       regs = intel_overlay_map_regs_atomic(overlay);
+       regs = intel_overlay_map_regs(overlay);
        if (!regs)
                goto out_free_bo;
 
        memset(regs, 0, sizeof(struct overlay_registers));
        update_polyphase_filter(regs);
-
        update_reg_attrs(overlay, regs);
 
-       intel_overlay_unmap_regs_atomic(overlay);
+       intel_overlay_unmap_regs(overlay, regs);
 
        dev_priv->overlay = overlay;
        DRM_INFO("initialized overlay support\n");
        return;
 
+out_unpin_bo:
+       i915_gem_object_unpin(reg_bo);
 out_free_bo:
        drm_gem_object_unreference(reg_bo);
 out_free:
@@ -1408,18 +1440,23 @@ out_free:
 
 void intel_cleanup_overlay(struct drm_device *dev)
 {
-        drm_i915_private_t *dev_priv = dev->dev_private;
+       drm_i915_private_t *dev_priv = dev->dev_private;
 
-       if (dev_priv->overlay) {
-               /* The bo's should be free'd by the generic code already.
-                * Furthermore modesetting teardown happens beforehand so the
-                * hardware should be off already */
-               BUG_ON(dev_priv->overlay->active);
+       if (!dev_priv->overlay)
+               return;
 
-               kfree(dev_priv->overlay);
-       }
+       /* The bo's should be free'd by the generic code already.
+        * Furthermore modesetting teardown happens beforehand so the
+        * hardware should be off already */
+       BUG_ON(dev_priv->overlay->active);
+
+       drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
+       kfree(dev_priv->overlay);
 }
 
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
 struct intel_overlay_error_state {
        struct overlay_registers regs;
        unsigned long base;
@@ -1427,6 +1464,29 @@ struct intel_overlay_error_state {
        u32 isr;
 };
 
+static struct overlay_registers *
+intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+{
+       drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+       struct overlay_registers *regs;
+
+       if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+               regs = overlay->reg_bo->phys_obj->handle->vaddr;
+       else
+               regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+                                               overlay->reg_bo->gtt_offset);
+
+       return regs;
+}
+
+static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
+                                           struct overlay_registers *regs)
+{
+       if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+               io_mapping_unmap_atomic(regs);
+}
+
+
 struct intel_overlay_error_state *
 intel_overlay_capture_error_state(struct drm_device *dev)
 {
@@ -1444,17 +1504,17 @@ intel_overlay_capture_error_state(struct drm_device *dev)
 
        error->dovsta = I915_READ(DOVSTA);
        error->isr = I915_READ(ISR);
-       if (OVERLAY_NONPHYSICAL(overlay->dev))
-               error->base = (long) overlay->reg_bo->gtt_offset;
-       else
+       if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
                error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
+       else
+               error->base = (long) overlay->reg_bo->gtt_offset;
 
        regs = intel_overlay_map_regs_atomic(overlay);
        if (!regs)
                goto err;
 
        memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
-       intel_overlay_unmap_regs_atomic(overlay);
+       intel_overlay_unmap_regs_atomic(overlay, regs);
 
        return error;
 
@@ -1515,3 +1575,4 @@ intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_s
        P(UVSCALEV);
 #undef P
 }
+#endif
index e7f5299d9d5740177ca8722db99892b2a7a856f5..92ff8f38527810657f94ad2f4439732e8dae8fa9 100644 (file)
@@ -30,6 +30,8 @@
 
 #include "intel_drv.h"
 
+#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
+
 void
 intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
                       struct drm_display_mode *adjusted_mode)
@@ -109,3 +111,110 @@ done:
        dev_priv->pch_pf_pos = (x << 16) | y;
        dev_priv->pch_pf_size = (width << 16) | height;
 }
+
+static int is_backlight_combination_mode(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (INTEL_INFO(dev)->gen >= 4)
+               return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
+
+       if (IS_GEN2(dev))
+               return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
+
+       return 0;
+}
+
+u32 intel_panel_get_max_backlight(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 max;
+
+       if (HAS_PCH_SPLIT(dev)) {
+               max = I915_READ(BLC_PWM_PCH_CTL2) >> 16;
+       } else {
+               max = I915_READ(BLC_PWM_CTL);
+               if (IS_PINEVIEW(dev)) {
+                       max >>= 17;
+               } else {
+                       max >>= 16;
+                       if (INTEL_INFO(dev)->gen < 4)
+                               max &= ~1;
+               }
+
+               if (is_backlight_combination_mode(dev))
+                       max *= 0xff;
+       }
+
+       if (max == 0) {
+               /* XXX add code here to query mode clock or hardware clock
+                * and program max PWM appropriately.
+                */
+               DRM_ERROR("fixme: max PWM is zero.\n");
+               max = 1;
+       }
+
+       DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
+       return max;
+}
+
+u32 intel_panel_get_backlight(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 val;
+
+       if (HAS_PCH_SPLIT(dev)) {
+               val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+       } else {
+               val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+               if (IS_PINEVIEW(dev))
+                       val >>= 1;
+
+               if (is_backlight_combination_mode(dev)){
+                       u8 lbpc;
+
+                       val &= ~1;
+                       pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
+                       val *= lbpc;
+                       val >>= 1;
+               }
+       }
+
+       DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
+       return val;
+}
+
+static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+       I915_WRITE(BLC_PWM_CPU_CTL, val | level);
+}
+
+void intel_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 tmp;
+
+       DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+
+       if (HAS_PCH_SPLIT(dev))
+               return intel_pch_panel_set_backlight(dev, level);
+
+       if (is_backlight_combination_mode(dev)){
+               u32 max = intel_panel_get_max_backlight(dev);
+               u8 lpbc;
+
+               lpbc = level * 0xfe / max + 1;
+               level /= lpbc;
+               pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
+       }
+
+       tmp = I915_READ(BLC_PWM_CTL);
+       if (IS_PINEVIEW(dev)) {
+               tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+               level <<= 1;
+       } else
+               tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+       I915_WRITE(BLC_PWM_CTL, tmp | level);
+}
index cb3508f78bc350735e16962f8283e598c53deca6..09f2dc353ae239f0d2a6f86a5940c1d7b27656e4 100644 (file)
@@ -32,6 +32,7 @@
 #include "i915_drv.h"
 #include "i915_drm.h"
 #include "i915_trace.h"
+#include "intel_drv.h"
 
 static u32 i915_gem_get_seqno(struct drm_device *dev)
 {
@@ -49,9 +50,9 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
 
 static void
 render_ring_flush(struct drm_device *dev,
-               struct intel_ring_buffer *ring,
-               u32     invalidate_domains,
-               u32     flush_domains)
+                 struct intel_ring_buffer *ring,
+                 u32   invalidate_domains,
+                 u32   flush_domains)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        u32 cmd;
@@ -97,7 +98,7 @@ render_ring_flush(struct drm_device *dev,
                if ((invalidate_domains|flush_domains) &
                    I915_GEM_DOMAIN_RENDER)
                        cmd &= ~MI_NO_WRITE_FLUSH;
-               if (!IS_I965G(dev)) {
+               if (INTEL_INFO(dev)->gen < 4) {
                        /*
                         * On the 965, the sampler cache always gets flushed
                         * and this bit is reserved.
@@ -118,38 +119,26 @@ render_ring_flush(struct drm_device *dev,
        }
 }
 
-static unsigned int render_ring_get_head(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       return I915_READ(PRB0_HEAD) & HEAD_ADDR;
-}
-
-static unsigned int render_ring_get_tail(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+static void ring_write_tail(struct drm_device *dev,
+                           struct intel_ring_buffer *ring,
+                           u32 value)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       return I915_READ(PRB0_TAIL) & TAIL_ADDR;
+       I915_WRITE_TAIL(ring, value);
 }
 
-static unsigned int render_ring_get_active_head(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+u32 intel_ring_get_active_head(struct drm_device *dev,
+                              struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+       u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ?
+                       RING_ACTHD(ring->mmio_base) : ACTHD;
 
        return I915_READ(acthd_reg);
 }
 
-static void render_ring_advance_ring(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       I915_WRITE(PRB0_TAIL, ring->tail);
-}
-
 static int init_ring_common(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+                           struct intel_ring_buffer *ring)
 {
        u32 head;
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -157,57 +146,57 @@ static int init_ring_common(struct drm_device *dev,
        obj_priv = to_intel_bo(ring->gem_object);
 
        /* Stop the ring if it's running. */
-       I915_WRITE(ring->regs.ctl, 0);
-       I915_WRITE(ring->regs.head, 0);
-       I915_WRITE(ring->regs.tail, 0);
+       I915_WRITE_CTL(ring, 0);
+       I915_WRITE_HEAD(ring, 0);
+       ring->write_tail(dev, ring, 0);
 
        /* Initialize the ring. */
-       I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
-       head = ring->get_head(dev, ring);
+       I915_WRITE_START(ring, obj_priv->gtt_offset);
+       head = I915_READ_HEAD(ring) & HEAD_ADDR;
 
        /* G45 ring initialization fails to reset head to zero */
        if (head != 0) {
                DRM_ERROR("%s head not reset to zero "
                                "ctl %08x head %08x tail %08x start %08x\n",
                                ring->name,
-                               I915_READ(ring->regs.ctl),
-                               I915_READ(ring->regs.head),
-                               I915_READ(ring->regs.tail),
-                               I915_READ(ring->regs.start));
+                               I915_READ_CTL(ring),
+                               I915_READ_HEAD(ring),
+                               I915_READ_TAIL(ring),
+                               I915_READ_START(ring));
 
-               I915_WRITE(ring->regs.head, 0);
+               I915_WRITE_HEAD(ring, 0);
 
                DRM_ERROR("%s head forced to zero "
                                "ctl %08x head %08x tail %08x start %08x\n",
                                ring->name,
-                               I915_READ(ring->regs.ctl),
-                               I915_READ(ring->regs.head),
-                               I915_READ(ring->regs.tail),
-                               I915_READ(ring->regs.start));
+                               I915_READ_CTL(ring),
+                               I915_READ_HEAD(ring),
+                               I915_READ_TAIL(ring),
+                               I915_READ_START(ring));
        }
 
-       I915_WRITE(ring->regs.ctl,
+       I915_WRITE_CTL(ring,
                        ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
                        | RING_NO_REPORT | RING_VALID);
 
-       head = I915_READ(ring->regs.head) & HEAD_ADDR;
+       head = I915_READ_HEAD(ring) & HEAD_ADDR;
        /* If the head is still not zero, the ring is dead */
        if (head != 0) {
                DRM_ERROR("%s initialization failed "
                                "ctl %08x head %08x tail %08x start %08x\n",
                                ring->name,
-                               I915_READ(ring->regs.ctl),
-                               I915_READ(ring->regs.head),
-                               I915_READ(ring->regs.tail),
-                               I915_READ(ring->regs.start));
+                               I915_READ_CTL(ring),
+                               I915_READ_HEAD(ring),
+                               I915_READ_TAIL(ring),
+                               I915_READ_START(ring));
                return -EIO;
        }
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                i915_kernel_lost_context(dev);
        else {
-               ring->head = ring->get_head(dev, ring);
-               ring->tail = ring->get_tail(dev, ring);
+               ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+               ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
                ring->space = ring->head - (ring->tail + 8);
                if (ring->space < 0)
                        ring->space += ring->size;
@@ -216,13 +205,13 @@ static int init_ring_common(struct drm_device *dev,
 }
 
 static int init_render_ring(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+                           struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret = init_ring_common(dev, ring);
        int mode;
 
-       if (IS_I9XX(dev) && !IS_GEN3(dev)) {
+       if (INTEL_INFO(dev)->gen > 3) {
                mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
                if (IS_GEN6(dev))
                        mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
@@ -250,9 +239,8 @@ do {                                                                        \
  */
 static u32
 render_ring_add_request(struct drm_device *dev,
-               struct intel_ring_buffer *ring,
-               struct drm_file *file_priv,
-               u32 flush_domains)
+                       struct intel_ring_buffer *ring,
+                       u32 flush_domains)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        u32 seqno;
@@ -315,8 +303,8 @@ render_ring_add_request(struct drm_device *dev,
 }
 
 static u32
-render_ring_get_gem_seqno(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+render_ring_get_seqno(struct drm_device *dev,
+                     struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        if (HAS_PIPE_CONTROL(dev))
@@ -327,7 +315,7 @@ render_ring_get_gem_seqno(struct drm_device *dev,
 
 static void
 render_ring_get_user_irq(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+                        struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
@@ -344,7 +332,7 @@ render_ring_get_user_irq(struct drm_device *dev,
 
 static void
 render_ring_put_user_irq(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+                        struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
@@ -360,21 +348,23 @@ render_ring_put_user_irq(struct drm_device *dev,
        spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
 }
 
-static void render_setup_status_page(struct drm_device *dev,
-       struct  intel_ring_buffer *ring)
+void intel_ring_setup_status_page(struct drm_device *dev,
+                                 struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        if (IS_GEN6(dev)) {
-               I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
-               I915_READ(HWS_PGA_GEN6); /* posting read */
+               I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base),
+                          ring->status_page.gfx_addr);
+               I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */
        } else {
-               I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
-               I915_READ(HWS_PGA); /* posting read */
+               I915_WRITE(RING_HWS_PGA(ring->mmio_base),
+                          ring->status_page.gfx_addr);
+               I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
        }
 
 }
 
-void
+static void
 bsd_ring_flush(struct drm_device *dev,
                struct intel_ring_buffer *ring,
                u32     invalidate_domains,
@@ -386,45 +376,16 @@ bsd_ring_flush(struct drm_device *dev,
        intel_ring_advance(dev, ring);
 }
 
-static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
-}
-
-static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
-}
-
-static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       return I915_READ(BSD_RING_ACTHD);
-}
-
-static inline void bsd_ring_advance_ring(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       I915_WRITE(BSD_RING_TAIL, ring->tail);
-}
-
 static int init_bsd_ring(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+                        struct intel_ring_buffer *ring)
 {
        return init_ring_common(dev, ring);
 }
 
 static u32
-bsd_ring_add_request(struct drm_device *dev,
-               struct intel_ring_buffer *ring,
-               struct drm_file *file_priv,
-               u32 flush_domains)
+ring_add_request(struct drm_device *dev,
+                struct intel_ring_buffer *ring,
+                u32 flush_domains)
 {
        u32 seqno;
 
@@ -443,40 +404,32 @@ bsd_ring_add_request(struct drm_device *dev,
        return seqno;
 }
 
-static void bsd_setup_status_page(struct drm_device *dev,
-               struct  intel_ring_buffer *ring)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
-       I915_READ(BSD_HWS_PGA);
-}
-
 static void
 bsd_ring_get_user_irq(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+                     struct intel_ring_buffer *ring)
 {
        /* do nothing */
 }
 static void
 bsd_ring_put_user_irq(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+                     struct intel_ring_buffer *ring)
 {
        /* do nothing */
 }
 
 static u32
-bsd_ring_get_gem_seqno(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+ring_status_page_get_seqno(struct drm_device *dev,
+                          struct intel_ring_buffer *ring)
 {
        return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
 static int
-bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
-               struct intel_ring_buffer *ring,
-               struct drm_i915_gem_execbuffer2 *exec,
-               struct drm_clip_rect *cliprects,
-               uint64_t exec_offset)
+ring_dispatch_gem_execbuffer(struct drm_device *dev,
+                            struct intel_ring_buffer *ring,
+                            struct drm_i915_gem_execbuffer2 *exec,
+                            struct drm_clip_rect *cliprects,
+                            uint64_t exec_offset)
 {
        uint32_t exec_start;
        exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
@@ -488,13 +441,12 @@ bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
        return 0;
 }
 
-
 static int
 render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
-               struct intel_ring_buffer *ring,
-               struct drm_i915_gem_execbuffer2 *exec,
-               struct drm_clip_rect *cliprects,
-               uint64_t exec_offset)
+                                   struct intel_ring_buffer *ring,
+                                   struct drm_i915_gem_execbuffer2 *exec,
+                                   struct drm_clip_rect *cliprects,
+                                   uint64_t exec_offset)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int nbox = exec->num_cliprects;
@@ -523,8 +475,8 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
                        intel_ring_emit(dev, ring, exec_start + exec_len - 4);
                        intel_ring_emit(dev, ring, 0);
                } else {
-                       intel_ring_begin(dev, ring, 4);
-                       if (IS_I965G(dev)) {
+                       intel_ring_begin(dev, ring, 2);
+                       if (INTEL_INFO(dev)->gen >= 4) {
                                intel_ring_emit(dev, ring,
                                                MI_BATCH_BUFFER_START | (2 << 6)
                                                | MI_BATCH_NON_SECURE_I965);
@@ -539,7 +491,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
                intel_ring_advance(dev, ring);
        }
 
-       if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+       if (IS_G4X(dev) || IS_GEN5(dev)) {
                intel_ring_begin(dev, ring, 2);
                intel_ring_emit(dev, ring, MI_FLUSH |
                                MI_NO_WRITE_FLUSH |
@@ -553,7 +505,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
 }
 
 static void cleanup_status_page(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+                               struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_gem_object *obj;
@@ -573,7 +525,7 @@ static void cleanup_status_page(struct drm_device *dev,
 }
 
 static int init_status_page(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+                           struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_gem_object *obj;
@@ -603,7 +555,7 @@ static int init_status_page(struct drm_device *dev,
        ring->status_page.obj = obj;
        memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 
-       ring->setup_status_page(dev, ring);
+       intel_ring_setup_status_page(dev, ring);
        DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
                        ring->name, ring->status_page.gfx_addr);
 
@@ -617,15 +569,18 @@ err:
        return ret;
 }
 
-
 int intel_init_ring_buffer(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+                          struct intel_ring_buffer *ring)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv;
        struct drm_gem_object *obj;
        int ret;
 
        ring->dev = dev;
+       INIT_LIST_HEAD(&ring->active_list);
+       INIT_LIST_HEAD(&ring->request_list);
+       INIT_LIST_HEAD(&ring->gpu_write_list);
 
        if (I915_NEED_GFX_HWS(dev)) {
                ret = init_status_page(dev, ring);
@@ -642,7 +597,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
 
        ring->gem_object = obj;
 
-       ret = i915_gem_object_pin(obj, ring->alignment);
+       ret = i915_gem_object_pin(obj, PAGE_SIZE);
        if (ret)
                goto err_unref;
 
@@ -668,14 +623,12 @@ int intel_init_ring_buffer(struct drm_device *dev,
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                i915_kernel_lost_context(dev);
        else {
-               ring->head = ring->get_head(dev, ring);
-               ring->tail = ring->get_tail(dev, ring);
+               ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+               ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
                ring->space = ring->head - (ring->tail + 8);
                if (ring->space < 0)
                        ring->space += ring->size;
        }
-       INIT_LIST_HEAD(&ring->active_list);
-       INIT_LIST_HEAD(&ring->request_list);
        return ret;
 
 err_unmap:
@@ -691,7 +644,7 @@ err_hws:
 }
 
 void intel_cleanup_ring_buffer(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+                              struct intel_ring_buffer *ring)
 {
        if (ring->gem_object == NULL)
                return;
@@ -704,8 +657,8 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
        cleanup_status_page(dev, ring);
 }
 
-int intel_wrap_ring_buffer(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+static int intel_wrap_ring_buffer(struct drm_device *dev,
+                                 struct intel_ring_buffer *ring)
 {
        unsigned int *virt;
        int rem;
@@ -731,14 +684,15 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
 }
 
 int intel_wait_ring_buffer(struct drm_device *dev,
-               struct intel_ring_buffer *ring, int n)
+                          struct intel_ring_buffer *ring, int n)
 {
        unsigned long end;
+       drm_i915_private_t *dev_priv = dev->dev_private;
 
        trace_i915_ring_wait_begin (dev);
        end = jiffies + 3 * HZ;
        do {
-               ring->head = ring->get_head(dev, ring);
+               ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
                ring->space = ring->head - (ring->tail + 8);
                if (ring->space < 0)
                        ring->space += ring->size;
@@ -753,14 +707,15 @@ int intel_wait_ring_buffer(struct drm_device *dev,
                                master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
                }
 
-               yield();
+               msleep(1);
        } while (!time_after(jiffies, end));
        trace_i915_ring_wait_end (dev);
        return -EBUSY;
 }
 
 void intel_ring_begin(struct drm_device *dev,
-               struct intel_ring_buffer *ring, int num_dwords)
+                     struct intel_ring_buffer *ring,
+                     int num_dwords)
 {
        int n = 4*num_dwords;
        if (unlikely(ring->tail + n > ring->size))
@@ -772,97 +727,181 @@ void intel_ring_begin(struct drm_device *dev,
 }
 
 void intel_ring_advance(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+                       struct intel_ring_buffer *ring)
 {
        ring->tail &= ring->size - 1;
-       ring->advance_ring(dev, ring);
-}
-
-void intel_fill_struct(struct drm_device *dev,
-               struct intel_ring_buffer *ring,
-               void *data,
-               unsigned int len)
-{
-       unsigned int *virt = ring->virtual_start + ring->tail;
-       BUG_ON((len&~(4-1)) != 0);
-       intel_ring_begin(dev, ring, len/4);
-       memcpy(virt, data, len);
-       ring->tail += len;
-       ring->tail &= ring->size - 1;
-       ring->space -= len;
-       intel_ring_advance(dev, ring);
+       ring->write_tail(dev, ring, ring->tail);
 }
 
-struct intel_ring_buffer render_ring = {
+static const struct intel_ring_buffer render_ring = {
        .name                   = "render ring",
-       .regs                   = {
-               .ctl = PRB0_CTL,
-               .head = PRB0_HEAD,
-               .tail = PRB0_TAIL,
-               .start = PRB0_START
-       },
-       .ring_flag              = I915_EXEC_RENDER,
+       .id                     = RING_RENDER,
+       .mmio_base              = RENDER_RING_BASE,
        .size                   = 32 * PAGE_SIZE,
-       .alignment              = PAGE_SIZE,
-       .virtual_start          = NULL,
-       .dev                    = NULL,
-       .gem_object             = NULL,
-       .head                   = 0,
-       .tail                   = 0,
-       .space                  = 0,
-       .user_irq_refcount      = 0,
-       .irq_gem_seqno          = 0,
-       .waiting_gem_seqno      = 0,
-       .setup_status_page      = render_setup_status_page,
        .init                   = init_render_ring,
-       .get_head               = render_ring_get_head,
-       .get_tail               = render_ring_get_tail,
-       .get_active_head        = render_ring_get_active_head,
-       .advance_ring           = render_ring_advance_ring,
+       .write_tail             = ring_write_tail,
        .flush                  = render_ring_flush,
        .add_request            = render_ring_add_request,
-       .get_gem_seqno          = render_ring_get_gem_seqno,
+       .get_seqno              = render_ring_get_seqno,
        .user_irq_get           = render_ring_get_user_irq,
        .user_irq_put           = render_ring_put_user_irq,
        .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
-       .status_page            = {NULL, 0, NULL},
-       .map                    = {0,}
 };
 
 /* ring buffer for bit-stream decoder */
 
-struct intel_ring_buffer bsd_ring = {
+static const struct intel_ring_buffer bsd_ring = {
        .name                   = "bsd ring",
-       .regs                   = {
-               .ctl = BSD_RING_CTL,
-               .head = BSD_RING_HEAD,
-               .tail = BSD_RING_TAIL,
-               .start = BSD_RING_START
-       },
-       .ring_flag              = I915_EXEC_BSD,
+       .id                     = RING_BSD,
+       .mmio_base              = BSD_RING_BASE,
        .size                   = 32 * PAGE_SIZE,
-       .alignment              = PAGE_SIZE,
-       .virtual_start          = NULL,
-       .dev                    = NULL,
-       .gem_object             = NULL,
-       .head                   = 0,
-       .tail                   = 0,
-       .space                  = 0,
-       .user_irq_refcount      = 0,
-       .irq_gem_seqno          = 0,
-       .waiting_gem_seqno      = 0,
-       .setup_status_page      = bsd_setup_status_page,
        .init                   = init_bsd_ring,
-       .get_head               = bsd_ring_get_head,
-       .get_tail               = bsd_ring_get_tail,
-       .get_active_head        = bsd_ring_get_active_head,
-       .advance_ring           = bsd_ring_advance_ring,
+       .write_tail             = ring_write_tail,
        .flush                  = bsd_ring_flush,
-       .add_request            = bsd_ring_add_request,
-       .get_gem_seqno          = bsd_ring_get_gem_seqno,
+       .add_request            = ring_add_request,
+       .get_seqno              = ring_status_page_get_seqno,
        .user_irq_get           = bsd_ring_get_user_irq,
        .user_irq_put           = bsd_ring_put_user_irq,
-       .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
-       .status_page            = {NULL, 0, NULL},
-       .map                    = {0,}
+       .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer,
 };
+
+
+static void gen6_bsd_ring_write_tail(struct drm_device *dev,
+                                    struct intel_ring_buffer *ring,
+                                    u32 value)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       /* Every tail move must follow the sequence below */
+       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+              GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
+              GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
+       I915_WRITE(GEN6_BSD_RNCID, 0x0);
+
+       if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
+                               GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
+                       50))
+               DRM_ERROR("timed out waiting for IDLE Indicator\n");
+
+       I915_WRITE_TAIL(ring, value);
+       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+              GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
+              GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
+}
+
+static void gen6_ring_flush(struct drm_device *dev,
+                           struct intel_ring_buffer *ring,
+                           u32 invalidate_domains,
+                           u32 flush_domains)
+{
+       intel_ring_begin(dev, ring, 4);
+       intel_ring_emit(dev, ring, MI_FLUSH_DW);
+       intel_ring_emit(dev, ring, 0);
+       intel_ring_emit(dev, ring, 0);
+       intel_ring_emit(dev, ring, 0);
+       intel_ring_advance(dev, ring);
+}
+
+static int
+gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+                                 struct intel_ring_buffer *ring,
+                                 struct drm_i915_gem_execbuffer2 *exec,
+                                 struct drm_clip_rect *cliprects,
+                                 uint64_t exec_offset)
+{
+       uint32_t exec_start;
+
+       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+
+       intel_ring_begin(dev, ring, 2);
+       intel_ring_emit(dev, ring,
+                      MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+       /* bit0-7 is the length on GEN6+ */
+       intel_ring_emit(dev, ring, exec_start);
+       intel_ring_advance(dev, ring);
+
+       return 0;
+}
+
+/* ring buffer for Video Codec for Gen6+ */
+static const struct intel_ring_buffer gen6_bsd_ring = {
+       .name                   = "gen6 bsd ring",
+       .id                     = RING_BSD,
+       .mmio_base              = GEN6_BSD_RING_BASE,
+       .size                   = 32 * PAGE_SIZE,
+       .init                   = init_bsd_ring,
+       .write_tail             = gen6_bsd_ring_write_tail,
+       .flush                  = gen6_ring_flush,
+       .add_request            = ring_add_request,
+       .get_seqno              = ring_status_page_get_seqno,
+       .user_irq_get           = bsd_ring_get_user_irq,
+       .user_irq_put           = bsd_ring_put_user_irq,
+       .dispatch_gem_execbuffer        = gen6_ring_dispatch_gem_execbuffer,
+};
+
+/* Blitter support (SandyBridge+) */
+
+static void
+blt_ring_get_user_irq(struct drm_device *dev,
+                     struct intel_ring_buffer *ring)
+{
+       /* do nothing */
+}
+static void
+blt_ring_put_user_irq(struct drm_device *dev,
+                     struct intel_ring_buffer *ring)
+{
+       /* do nothing */
+}
+
+static const struct intel_ring_buffer gen6_blt_ring = {
+       .name                   = "blt ring",
+       .id                     = RING_BLT,
+       .mmio_base              = BLT_RING_BASE,
+       .size                   = 32 * PAGE_SIZE,
+       .init                   = init_ring_common,
+       .write_tail             = ring_write_tail,
+       .flush                  = gen6_ring_flush,
+       .add_request            = ring_add_request,
+       .get_seqno              = ring_status_page_get_seqno,
+       .user_irq_get           = blt_ring_get_user_irq,
+       .user_irq_put           = blt_ring_put_user_irq,
+       .dispatch_gem_execbuffer        = gen6_ring_dispatch_gem_execbuffer,
+};
+
+int intel_init_render_ring_buffer(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       dev_priv->render_ring = render_ring;
+
+       if (!I915_NEED_GFX_HWS(dev)) {
+               dev_priv->render_ring.status_page.page_addr
+                       = dev_priv->status_page_dmah->vaddr;
+               memset(dev_priv->render_ring.status_page.page_addr,
+                               0, PAGE_SIZE);
+       }
+
+       return intel_init_ring_buffer(dev, &dev_priv->render_ring);
+}
+
+int intel_init_bsd_ring_buffer(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       if (IS_GEN6(dev))
+               dev_priv->bsd_ring = gen6_bsd_ring;
+       else
+               dev_priv->bsd_ring = bsd_ring;
+
+       return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+}
+
+int intel_init_blt_ring_buffer(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       dev_priv->blt_ring = gen6_blt_ring;
+
+       return intel_init_ring_buffer(dev, &dev_priv->blt_ring);
+}
index 525e7d3edda801582ac5a779de31a8fe43844ae0..a05aff0e5764d67e421a5a4995f2b0b92dfcd925 100644 (file)
@@ -7,25 +7,32 @@ struct  intel_hw_status_page {
        struct          drm_gem_object *obj;
 };
 
+#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
+#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
+#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
+#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
+#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
+#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
+#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
+#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
+
 struct drm_i915_gem_execbuffer2;
 struct  intel_ring_buffer {
        const char      *name;
-       struct          ring_regs {
-                       u32 ctl;
-                       u32 head;
-                       u32 tail;
-                       u32 start;
-       } regs;
-       unsigned int    ring_flag;
+       enum intel_ring_id {
+               RING_RENDER = 0x1,
+               RING_BSD = 0x2,
+               RING_BLT = 0x4,
+       } id;
+       u32             mmio_base;
        unsigned long   size;
-       unsigned int    alignment;
        void            *virtual_start;
        struct          drm_device *dev;
        struct          drm_gem_object *gem_object;
 
        unsigned int    head;
        unsigned int    tail;
-       unsigned int    space;
+       int             space;
        struct intel_hw_status_page status_page;
 
        u32             irq_gem_seqno;          /* last seq seem at irq time */
@@ -35,30 +42,22 @@ struct  intel_ring_buffer {
                        struct intel_ring_buffer *ring);
        void            (*user_irq_put)(struct drm_device *dev,
                        struct intel_ring_buffer *ring);
-       void            (*setup_status_page)(struct drm_device *dev,
-                       struct  intel_ring_buffer *ring);
 
        int             (*init)(struct drm_device *dev,
                        struct intel_ring_buffer *ring);
 
-       unsigned int    (*get_head)(struct drm_device *dev,
-                       struct intel_ring_buffer *ring);
-       unsigned int    (*get_tail)(struct drm_device *dev,
-                       struct intel_ring_buffer *ring);
-       unsigned int    (*get_active_head)(struct drm_device *dev,
-                       struct intel_ring_buffer *ring);
-       void            (*advance_ring)(struct drm_device *dev,
-                       struct intel_ring_buffer *ring);
+       void            (*write_tail)(struct drm_device *dev,
+                                     struct intel_ring_buffer *ring,
+                                     u32 value);
        void            (*flush)(struct drm_device *dev,
                        struct intel_ring_buffer *ring,
                        u32     invalidate_domains,
                        u32     flush_domains);
        u32             (*add_request)(struct drm_device *dev,
                        struct intel_ring_buffer *ring,
-                       struct drm_file *file_priv,
                        u32 flush_domains);
-       u32             (*get_gem_seqno)(struct drm_device *dev,
-                       struct intel_ring_buffer *ring);
+       u32             (*get_seqno)(struct drm_device *dev,
+                                    struct intel_ring_buffer *ring);
        int             (*dispatch_gem_execbuffer)(struct drm_device *dev,
                        struct intel_ring_buffer *ring,
                        struct drm_i915_gem_execbuffer2 *exec,
@@ -83,6 +82,20 @@ struct  intel_ring_buffer {
         */
        struct list_head request_list;
 
+       /**
+        * List of objects currently pending a GPU write flush.
+        *
+        * All elements on this list will belong to either the
+        * active_list or flushing_list, last_rendering_seqno can
+        * be used to differentiate between the two elements.
+        */
+       struct list_head gpu_write_list;
+
+       /**
+        * Do we have some not yet emitted requests outstanding?
+        */
+       bool outstanding_lazy_request;
+
        wait_queue_head_t irq_queue;
        drm_local_map_t map;
 };
@@ -96,15 +109,13 @@ intel_read_status_page(struct intel_ring_buffer *ring,
 }
 
 int intel_init_ring_buffer(struct drm_device *dev,
-               struct intel_ring_buffer *ring);
+                          struct intel_ring_buffer *ring);
 void intel_cleanup_ring_buffer(struct drm_device *dev,
-               struct intel_ring_buffer *ring);
+                              struct intel_ring_buffer *ring);
 int intel_wait_ring_buffer(struct drm_device *dev,
-               struct intel_ring_buffer *ring, int n);
-int intel_wrap_ring_buffer(struct drm_device *dev,
-               struct intel_ring_buffer *ring);
+                          struct intel_ring_buffer *ring, int n);
 void intel_ring_begin(struct drm_device *dev,
-               struct intel_ring_buffer *ring, int n);
+                     struct intel_ring_buffer *ring, int n);
 
 static inline void intel_ring_emit(struct drm_device *dev,
                                   struct intel_ring_buffer *ring,
@@ -115,17 +126,19 @@ static inline void intel_ring_emit(struct drm_device *dev,
        ring->tail += 4;
 }
 
-void intel_fill_struct(struct drm_device *dev,
-               struct intel_ring_buffer *ring,
-               void *data,
-               unsigned int len);
 void intel_ring_advance(struct drm_device *dev,
                struct intel_ring_buffer *ring);
 
 u32 intel_ring_get_seqno(struct drm_device *dev,
                struct intel_ring_buffer *ring);
 
-extern struct intel_ring_buffer render_ring;
-extern struct intel_ring_buffer bsd_ring;
+int intel_init_render_ring_buffer(struct drm_device *dev);
+int intel_init_bsd_ring_buffer(struct drm_device *dev);
+int intel_init_blt_ring_buffer(struct drm_device *dev);
+
+u32 intel_ring_get_active_head(struct drm_device *dev,
+                              struct intel_ring_buffer *ring);
+void intel_ring_setup_status_page(struct drm_device *dev,
+                                 struct intel_ring_buffer *ring);
 
 #endif /* _INTEL_RINGBUFFER_H_ */
index ee73e428a84a800dd8d70a1747457033f7e8da8e..de158b76bcd572da1e261713c66d43fa4428f72c 100644 (file)
@@ -65,8 +65,11 @@ static const char *tv_format_names[] = {
 struct intel_sdvo {
        struct intel_encoder base;
 
+       struct i2c_adapter *i2c;
        u8 slave_addr;
 
+       struct i2c_adapter ddc;
+
        /* Register for the SDVO device: SDVOB or SDVOC */
        int sdvo_reg;
 
@@ -104,34 +107,24 @@ struct intel_sdvo {
         * This is set if we treat the device as HDMI, instead of DVI.
         */
        bool is_hdmi;
+       bool has_audio;
 
        /**
-        * This is set if we detect output of sdvo device as LVDS.
+        * This is set if we detect output of sdvo device as LVDS and
+        * have a valid fixed mode to use with the panel.
         */
        bool is_lvds;
 
-       /**
-        * This is sdvo flags for input timing.
-        */
-       uint8_t sdvo_flags;
-
        /**
         * This is sdvo fixed pannel mode pointer
         */
        struct drm_display_mode *sdvo_lvds_fixed_mode;
 
-       /*
-        * supported encoding mode, used to determine whether HDMI is
-        * supported
-        */
-       struct intel_sdvo_encode encode;
-
        /* DDC bus used by this SDVO encoder */
        uint8_t ddc_bus;
 
-       /* Mac mini hack -- use the same DDC as the analog connector */
-       struct i2c_adapter *analog_ddc_bus;
-
+       /* Input timings for adjusted_mode */
+       struct intel_sdvo_dtd input_dtd;
 };
 
 struct intel_sdvo_connector {
@@ -140,11 +133,15 @@ struct intel_sdvo_connector {
        /* Mark the type of connector */
        uint16_t output_flag;
 
+       int force_audio;
+
        /* This contains all current supported TV format */
        u8 tv_format_supported[TV_FORMAT_NUM];
        int   format_supported_num;
        struct drm_property *tv_format;
 
+       struct drm_property *force_audio_property;
+
        /* add the property for the SDVO-TV */
        struct drm_property *left;
        struct drm_property *right;
@@ -186,9 +183,15 @@ struct intel_sdvo_connector {
        u32     cur_dot_crawl,  max_dot_crawl;
 };
 
-static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder)
+static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder)
+{
+       return container_of(encoder, struct intel_sdvo, base.base);
+}
+
+static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
 {
-       return container_of(enc_to_intel_encoder(encoder), struct intel_sdvo, base);
+       return container_of(intel_attached_encoder(connector),
+                           struct intel_sdvo, base);
 }
 
 static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
@@ -213,7 +216,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
  */
 static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
 {
-       struct drm_device *dev = intel_sdvo->base.enc.dev;
+       struct drm_device *dev = intel_sdvo->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 bval = val, cval = val;
        int i;
@@ -245,49 +248,29 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
 
 static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
 {
-       u8 out_buf[2] = { addr, 0 };
-       u8 buf[2];
        struct i2c_msg msgs[] = {
                {
-                       .addr = intel_sdvo->slave_addr >> 1,
+                       .addr = intel_sdvo->slave_addr,
                        .flags = 0,
                        .len = 1,
-                       .buf = out_buf,
+                       .buf = &addr,
                },
                {
-                       .addr = intel_sdvo->slave_addr >> 1,
+                       .addr = intel_sdvo->slave_addr,
                        .flags = I2C_M_RD,
                        .len = 1,
-                       .buf = buf,
+                       .buf = ch,
                }
        };
        int ret;
 
-       if ((ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 2)) == 2)
-       {
-               *ch = buf[0];
+       if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2)
                return true;
-       }
 
        DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
        return false;
 }
 
-static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch)
-{
-       u8 out_buf[2] = { addr, ch };
-       struct i2c_msg msgs[] = {
-               {
-                       .addr = intel_sdvo->slave_addr >> 1,
-                       .flags = 0,
-                       .len = 2,
-                       .buf = out_buf,
-               }
-       };
-
-       return i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 1) == 1;
-}
-
 #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
 /** Mapping of command numbers to names, for debug output */
 static const struct _sdvo_cmd_name {
@@ -432,22 +415,6 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
        DRM_LOG_KMS("\n");
 }
 
-static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
-                                const void *args, int args_len)
-{
-       int i;
-
-       intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
-
-       for (i = 0; i < args_len; i++) {
-               if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0 - i,
-                                          ((u8*)args)[i]))
-                       return false;
-       }
-
-       return intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_OPCODE, cmd);
-}
-
 static const char *cmd_status_names[] = {
        "Power on",
        "Success",
@@ -458,54 +425,115 @@ static const char *cmd_status_names[] = {
        "Scaling not supported"
 };
 
-static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo,
-                                     void *response, int response_len,
-                                     u8 status)
+static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
+                                const void *args, int args_len)
 {
-       int i;
+       u8 buf[args_len*2 + 2], status;
+       struct i2c_msg msgs[args_len + 3];
+       int i, ret;
 
-       DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
-       for (i = 0; i < response_len; i++)
-               DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
-       for (; i < 8; i++)
-               DRM_LOG_KMS("   ");
-       if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
-               DRM_LOG_KMS("(%s)", cmd_status_names[status]);
-       else
-               DRM_LOG_KMS("(??? %d)", status);
-       DRM_LOG_KMS("\n");
+       intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
+
+       for (i = 0; i < args_len; i++) {
+               msgs[i].addr = intel_sdvo->slave_addr;
+               msgs[i].flags = 0;
+               msgs[i].len = 2;
+               msgs[i].buf = buf + 2 *i;
+               buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
+               buf[2*i + 1] = ((u8*)args)[i];
+       }
+       msgs[i].addr = intel_sdvo->slave_addr;
+       msgs[i].flags = 0;
+       msgs[i].len = 2;
+       msgs[i].buf = buf + 2*i;
+       buf[2*i + 0] = SDVO_I2C_OPCODE;
+       buf[2*i + 1] = cmd;
+
+       /* the following two are to read the response */
+       status = SDVO_I2C_CMD_STATUS;
+       msgs[i+1].addr = intel_sdvo->slave_addr;
+       msgs[i+1].flags = 0;
+       msgs[i+1].len = 1;
+       msgs[i+1].buf = &status;
+
+       msgs[i+2].addr = intel_sdvo->slave_addr;
+       msgs[i+2].flags = I2C_M_RD;
+       msgs[i+2].len = 1;
+       msgs[i+2].buf = &status;
+
+       ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
+       if (ret < 0) {
+               DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+               return false;
+       }
+       if (ret != i+3) {
+               /* failure in I2C transfer */
+               DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
+               return false;
+       }
+
+       i = 3;
+       while (status == SDVO_CMD_STATUS_PENDING && i--) {
+               if (!intel_sdvo_read_byte(intel_sdvo,
+                                         SDVO_I2C_CMD_STATUS,
+                                         &status))
+                       return false;
+       }
+       if (status != SDVO_CMD_STATUS_SUCCESS) {
+               DRM_DEBUG_KMS("command returns response %s [%d]\n",
+                             status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???",
+                             status);
+               return false;
+       }
+
+       return true;
 }
 
 static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
                                     void *response, int response_len)
 {
-       int i;
+       u8 retry = 5;
        u8 status;
-       u8 retry = 50;
-
-       while (retry--) {
-               /* Read the command response */
-               for (i = 0; i < response_len; i++) {
-                       if (!intel_sdvo_read_byte(intel_sdvo,
-                                                 SDVO_I2C_RETURN_0 + i,
-                                                 &((u8 *)response)[i]))
-                               return false;
-               }
+       int i;
 
-               /* read the return status */
-               if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS,
+       /*
+        * The documentation states that all commands will be
+        * processed within 15µs, and that we need only poll
+        * the status byte a maximum of 3 times in order for the
+        * command to be complete.
+        *
+        * Check 5 times in case the hardware failed to read the docs.
+        */
+       do {
+               if (!intel_sdvo_read_byte(intel_sdvo,
+                                         SDVO_I2C_CMD_STATUS,
                                          &status))
                        return false;
+       } while (status == SDVO_CMD_STATUS_PENDING && --retry);
 
-               intel_sdvo_debug_response(intel_sdvo, response, response_len,
-                                         status);
-               if (status != SDVO_CMD_STATUS_PENDING)
-                       break;
+       DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
+       if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+               DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+       else
+               DRM_LOG_KMS("(??? %d)", status);
 
-               mdelay(50);
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               goto log_fail;
+
+       /* Read the command response */
+       for (i = 0; i < response_len; i++) {
+               if (!intel_sdvo_read_byte(intel_sdvo,
+                                         SDVO_I2C_RETURN_0 + i,
+                                         &((u8 *)response)[i]))
+                       goto log_fail;
+               DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
        }
+       DRM_LOG_KMS("\n");
+       return true;
 
-       return status == SDVO_CMD_STATUS_SUCCESS;
+log_fail:
+       DRM_LOG_KMS("\n");
+       return false;
 }
 
 static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
@@ -518,71 +546,17 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
                return 4;
 }
 
-/**
- * Try to read the response after issuie the DDC switch command. But it
- * is noted that we must do the action of reading response and issuing DDC
- * switch command in one I2C transaction. Otherwise when we try to start
- * another I2C transaction after issuing the DDC bus switch, it will be
- * switched to the internal SDVO register.
- */
-static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
-                                             u8 target)
+static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
+                                             u8 ddc_bus)
 {
-       u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
-       struct i2c_msg msgs[] = {
-               {
-                       .addr = intel_sdvo->slave_addr >> 1,
-                       .flags = 0,
-                       .len = 2,
-                       .buf = out_buf,
-               },
-               /* the following two are to read the response */
-               {
-                       .addr = intel_sdvo->slave_addr >> 1,
-                       .flags = 0,
-                       .len = 1,
-                       .buf = cmd_buf,
-               },
-               {
-                       .addr = intel_sdvo->slave_addr >> 1,
-                       .flags = I2C_M_RD,
-                       .len = 1,
-                       .buf = ret_value,
-               },
-       };
-
-       intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
-                                       &target, 1);
-       /* write the DDC switch command argument */
-       intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target);
-
-       out_buf[0] = SDVO_I2C_OPCODE;
-       out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
-       cmd_buf[0] = SDVO_I2C_CMD_STATUS;
-       cmd_buf[1] = 0;
-       ret_value[0] = 0;
-       ret_value[1] = 0;
-
-       ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3);
-       if (ret != 3) {
-               /* failure in I2C transfer */
-               DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
-               return;
-       }
-       if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
-               DRM_DEBUG_KMS("DDC switch command returns response %d\n",
-                                       ret_value[0]);
-               return;
-       }
-       return;
+       return intel_sdvo_write_cmd(intel_sdvo,
+                                   SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+                                   &ddc_bus, 1);
 }
 
 static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
 {
-       if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
-               return false;
-
-       return intel_sdvo_read_response(intel_sdvo, NULL, 0);
+       return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len);
 }
 
 static bool
@@ -819,17 +793,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
                mode->flags |= DRM_MODE_FLAG_PVSYNC;
 }
 
-static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo,
-                                      struct intel_sdvo_encode *encode)
+static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
 {
-       if (intel_sdvo_get_value(intel_sdvo,
-                                 SDVO_CMD_GET_SUPP_ENCODE,
-                                 encode, sizeof(*encode)))
-               return true;
+       struct intel_sdvo_encode encode;
 
-       /* non-support means DVI */
-       memset(encode, 0, sizeof(*encode));
-       return false;
+       return intel_sdvo_get_value(intel_sdvo,
+                                 SDVO_CMD_GET_SUPP_ENCODE,
+                                 &encode, sizeof(encode));
 }
 
 static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
@@ -874,115 +844,33 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
 }
 #endif
 
-static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo,
-                                   int index,
-                                   uint8_t *data, int8_t size, uint8_t tx_rate)
-{
-    uint8_t set_buf_index[2];
-
-    set_buf_index[0] = index;
-    set_buf_index[1] = 0;
-
-    if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
-                             set_buf_index, 2))
-           return false;
-
-    for (; size > 0; size -= 8) {
-       if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8))
-               return false;
-
-       data += 8;
-    }
-
-    return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
-}
-
-static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
-{
-       uint8_t csum = 0;
-       int i;
-
-       for (i = 0; i < size; i++)
-               csum += data[i];
-
-       return 0x100 - csum;
-}
-
-#define DIP_TYPE_AVI   0x82
-#define DIP_VERSION_AVI        0x2
-#define DIP_LEN_AVI    13
-
-struct dip_infoframe {
-       uint8_t type;
-       uint8_t version;
-       uint8_t len;
-       uint8_t checksum;
-       union {
-               struct {
-                       /* Packet Byte #1 */
-                       uint8_t S:2;
-                       uint8_t B:2;
-                       uint8_t A:1;
-                       uint8_t Y:2;
-                       uint8_t rsvd1:1;
-                       /* Packet Byte #2 */
-                       uint8_t R:4;
-                       uint8_t M:2;
-                       uint8_t C:2;
-                       /* Packet Byte #3 */
-                       uint8_t SC:2;
-                       uint8_t Q:2;
-                       uint8_t EC:3;
-                       uint8_t ITC:1;
-                       /* Packet Byte #4 */
-                       uint8_t VIC:7;
-                       uint8_t rsvd2:1;
-                       /* Packet Byte #5 */
-                       uint8_t PR:4;
-                       uint8_t rsvd3:4;
-                       /* Packet Byte #6~13 */
-                       uint16_t top_bar_end;
-                       uint16_t bottom_bar_start;
-                       uint16_t left_bar_end;
-                       uint16_t right_bar_start;
-               } avi;
-               struct {
-                       /* Packet Byte #1 */
-                       uint8_t channel_count:3;
-                       uint8_t rsvd1:1;
-                       uint8_t coding_type:4;
-                       /* Packet Byte #2 */
-                       uint8_t sample_size:2; /* SS0, SS1 */
-                       uint8_t sample_frequency:3;
-                       uint8_t rsvd2:3;
-                       /* Packet Byte #3 */
-                       uint8_t coding_type_private:5;
-                       uint8_t rsvd3:3;
-                       /* Packet Byte #4 */
-                       uint8_t channel_allocation;
-                       /* Packet Byte #5 */
-                       uint8_t rsvd4:3;
-                       uint8_t level_shift:4;
-                       uint8_t downmix_inhibit:1;
-               } audio;
-               uint8_t payload[28];
-       } __attribute__ ((packed)) u;
-} __attribute__((packed));
-
-static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
-                                        struct drm_display_mode * mode)
+static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
 {
        struct dip_infoframe avi_if = {
                .type = DIP_TYPE_AVI,
-               .version = DIP_VERSION_AVI,
+               .ver = DIP_VERSION_AVI,
                .len = DIP_LEN_AVI,
        };
+       uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
+       uint8_t set_buf_index[2] = { 1, 0 };
+       uint64_t *data = (uint64_t *)&avi_if;
+       unsigned i;
+
+       intel_dip_infoframe_csum(&avi_if);
+
+       if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
+                                 set_buf_index, 2))
+               return false;
 
-       avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
-                                                   4 + avi_if.len);
-       return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if,
-                                      4 + avi_if.len,
-                                      SDVO_HBUF_TX_VSYNC);
+       for (i = 0; i < sizeof(avi_if); i += 8) {
+               if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA,
+                                         data, 8))
+                       return false;
+               data++;
+       }
+
+       return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE,
+                                   &tx_rate, 1);
 }
 
 static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
@@ -1022,8 +910,6 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
                                        struct drm_display_mode *mode,
                                        struct drm_display_mode *adjusted_mode)
 {
-       struct intel_sdvo_dtd input_dtd;
-
        /* Reset the input timing to the screen. Assume always input 0. */
        if (!intel_sdvo_set_target_input(intel_sdvo))
                return false;
@@ -1035,14 +921,12 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
                return false;
 
        if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
-                                                  &input_dtd))
+                                                  &intel_sdvo->input_dtd))
                return false;
 
-       intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
-       intel_sdvo->sdvo_flags = input_dtd.part2.sdvo_flags;
+       intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
 
        drm_mode_set_crtcinfo(adjusted_mode, 0);
-       mode->clock = adjusted_mode->clock;
        return true;
 }
 
@@ -1050,7 +934,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
                                  struct drm_display_mode *mode,
                                  struct drm_display_mode *adjusted_mode)
 {
-       struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+       struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+       int multiplier;
 
        /* We need to construct preferred input timings based on our
         * output timings.  To do that, we have to set the output
@@ -1065,10 +950,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
                                                             mode,
                                                             adjusted_mode);
        } else if (intel_sdvo->is_lvds) {
-               drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
-
                if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
-                                                           intel_sdvo->sdvo_lvds_fixed_mode))
+                                                            intel_sdvo->sdvo_lvds_fixed_mode))
                        return false;
 
                (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
@@ -1077,9 +960,10 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
        }
 
        /* Make the CRTC code factor in the SDVO pixel multiplier.  The
-        * SDVO device will be told of the multiplier during mode_set.
+        * SDVO device will factor out the multiplier during mode_set.
         */
-       adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
+       multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode);
+       intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
 
        return true;
 }
@@ -1092,11 +976,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc = encoder->crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
-       u32 sdvox = 0;
-       int sdvo_pixel_multiply, rate;
+       struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+       u32 sdvox;
        struct intel_sdvo_in_out_map in_out;
        struct intel_sdvo_dtd input_dtd;
+       int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+       int rate;
 
        if (!mode)
                return;
@@ -1114,28 +999,23 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
                             SDVO_CMD_SET_IN_OUT_MAP,
                             &in_out, sizeof(in_out));
 
-       if (intel_sdvo->is_hdmi) {
-               if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
-                       return;
-
-               sdvox |= SDVO_AUDIO_ENABLE;
-       }
+       /* Set the output timings to the screen */
+       if (!intel_sdvo_set_target_output(intel_sdvo,
+                                         intel_sdvo->attached_output))
+               return;
 
        /* We have tried to get input timing in mode_fixup, and filled into
-          adjusted_mode */
-       intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
-       if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
-               input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
-
-       /* If it's a TV, we already set the output timing in mode_fixup.
-        * Otherwise, the output timing is equal to the input timing.
+        * adjusted_mode.
         */
-       if (!intel_sdvo->is_tv && !intel_sdvo->is_lvds) {
+       if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
+               input_dtd = intel_sdvo->input_dtd;
+       } else {
                /* Set the output timing to the screen */
                if (!intel_sdvo_set_target_output(intel_sdvo,
                                                  intel_sdvo->attached_output))
                        return;
 
+               intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
                (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
        }
 
@@ -1143,31 +1023,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
        if (!intel_sdvo_set_target_input(intel_sdvo))
                return;
 
-       if (intel_sdvo->is_tv) {
-               if (!intel_sdvo_set_tv_format(intel_sdvo))
-                       return;
-       }
+       if (intel_sdvo->is_hdmi &&
+           !intel_sdvo_set_avi_infoframe(intel_sdvo))
+               return;
 
-       /* We would like to use intel_sdvo_create_preferred_input_timing() to
-        * provide the device with a timing it can support, if it supports that
-        * feature.  However, presumably we would need to adjust the CRTC to
-        * output the preferred timing, and we don't support that currently.
-        */
-#if 0
-       success = intel_sdvo_create_preferred_input_timing(encoder, clock,
-                                                          width, height);
-       if (success) {
-               struct intel_sdvo_dtd *input_dtd;
+       if (intel_sdvo->is_tv &&
+           !intel_sdvo_set_tv_format(intel_sdvo))
+               return;
 
-               intel_sdvo_get_preferred_input_timing(encoder, &input_dtd);
-               intel_sdvo_set_input_timing(encoder, &input_dtd);
-       }
-#else
        (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
-#endif
 
-       sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
-       switch (sdvo_pixel_multiply) {
+       switch (pixel_multiplier) {
+       default:
        case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
        case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
        case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
@@ -1176,14 +1043,14 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
                return;
 
        /* Set the SDVO control regs. */
-       if (IS_I965G(dev)) {
-               sdvox |= SDVO_BORDER_ENABLE;
+       if (INTEL_INFO(dev)->gen >= 4) {
+               sdvox = SDVO_BORDER_ENABLE;
                if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
                        sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
                if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
                        sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
        } else {
-               sdvox |= I915_READ(intel_sdvo->sdvo_reg);
+               sdvox = I915_READ(intel_sdvo->sdvo_reg);
                switch (intel_sdvo->sdvo_reg) {
                case SDVOB:
                        sdvox &= SDVOB_PRESERVE_MASK;
@@ -1196,16 +1063,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
        }
        if (intel_crtc->pipe == 1)
                sdvox |= SDVO_PIPE_B_SELECT;
+       if (intel_sdvo->has_audio)
+               sdvox |= SDVO_AUDIO_ENABLE;
 
-       if (IS_I965G(dev)) {
+       if (INTEL_INFO(dev)->gen >= 4) {
                /* done in crtc_mode_set as the dpll_md reg must be written early */
        } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
                /* done in crtc_mode_set as it lives inside the dpll register */
        } else {
-               sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+               sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
        }
 
-       if (intel_sdvo->sdvo_flags & SDVO_NEED_TO_STALL)
+       if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
                sdvox |= SDVO_STALL_SELECT;
        intel_sdvo_write_sdvox(intel_sdvo, sdvox);
 }
@@ -1214,7 +1083,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
 {
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+       struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        u32 temp;
 
@@ -1260,8 +1129,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
 static int intel_sdvo_mode_valid(struct drm_connector *connector,
                                 struct drm_display_mode *mode)
 {
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+       struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
 
        if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return MODE_NO_DBLESCAN;
@@ -1285,7 +1153,38 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
 
 static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
 {
-       return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps));
+       if (!intel_sdvo_get_value(intel_sdvo,
+                                 SDVO_CMD_GET_DEVICE_CAPS,
+                                 caps, sizeof(*caps)))
+               return false;
+
+       DRM_DEBUG_KMS("SDVO capabilities:\n"
+                     "  vendor_id: %d\n"
+                     "  device_id: %d\n"
+                     "  device_rev_id: %d\n"
+                     "  sdvo_version_major: %d\n"
+                     "  sdvo_version_minor: %d\n"
+                     "  sdvo_inputs_mask: %d\n"
+                     "  smooth_scaling: %d\n"
+                     "  sharp_scaling: %d\n"
+                     "  up_scaling: %d\n"
+                     "  down_scaling: %d\n"
+                     "  stall_support: %d\n"
+                     "  output_flags: %d\n",
+                     caps->vendor_id,
+                     caps->device_id,
+                     caps->device_rev_id,
+                     caps->sdvo_version_major,
+                     caps->sdvo_version_minor,
+                     caps->sdvo_inputs_mask,
+                     caps->smooth_scaling,
+                     caps->sharp_scaling,
+                     caps->up_scaling,
+                     caps->down_scaling,
+                     caps->stall_support,
+                     caps->output_flags);
+
+       return true;
 }
 
 /* No use! */
@@ -1389,22 +1288,33 @@ intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
        return (caps > 1);
 }
 
+static struct edid *
+intel_sdvo_get_edid(struct drm_connector *connector)
+{
+       struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+       return drm_get_edid(connector, &sdvo->ddc);
+}
+
 static struct drm_connector *
 intel_find_analog_connector(struct drm_device *dev)
 {
        struct drm_connector *connector;
-       struct drm_encoder *encoder;
-       struct intel_sdvo *intel_sdvo;
-
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-               intel_sdvo = enc_to_intel_sdvo(encoder);
-               if (intel_sdvo->base.type == INTEL_OUTPUT_ANALOG) {
-                       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-                               if (encoder == intel_attached_encoder(connector))
+       struct intel_sdvo *encoder;
+
+       list_for_each_entry(encoder,
+                           &dev->mode_config.encoder_list,
+                           base.base.head) {
+               if (encoder->base.type == INTEL_OUTPUT_ANALOG) {
+                       list_for_each_entry(connector,
+                                           &dev->mode_config.connector_list,
+                                           head) {
+                               if (&encoder->base ==
+                                   intel_attached_encoder(connector))
                                        return connector;
                        }
                }
        }
+
        return NULL;
 }
 
@@ -1424,64 +1334,72 @@ intel_analog_is_connected(struct drm_device *dev)
        return true;
 }
 
+/* Mac mini hack -- use the same DDC as the analog connector */
+static struct edid *
+intel_sdvo_get_analog_edid(struct drm_connector *connector)
+{
+       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+
+       if (!intel_analog_is_connected(connector->dev))
+               return NULL;
+
+       return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+}
+
 enum drm_connector_status
 intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
 {
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
-       struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
-       enum drm_connector_status status = connector_status_connected;
-       struct edid *edid = NULL;
+       struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+       enum drm_connector_status status;
+       struct edid *edid;
 
-       edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
+       edid = intel_sdvo_get_edid(connector);
 
-       /* This is only applied to SDVO cards with multiple outputs */
        if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
-               uint8_t saved_ddc, temp_ddc;
-               saved_ddc = intel_sdvo->ddc_bus;
-               temp_ddc = intel_sdvo->ddc_bus >> 1;
+               u8 ddc, saved_ddc = intel_sdvo->ddc_bus;
+
                /*
                 * Don't use the 1 as the argument of DDC bus switch to get
                 * the EDID. It is used for SDVO SPD ROM.
                 */
-               while(temp_ddc > 1) {
-                       intel_sdvo->ddc_bus = temp_ddc;
-                       edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
-                       if (edid) {
-                               /*
-                                * When we can get the EDID, maybe it is the
-                                * correct DDC bus. Update it.
-                                */
-                               intel_sdvo->ddc_bus = temp_ddc;
+               for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
+                       intel_sdvo->ddc_bus = ddc;
+                       edid = intel_sdvo_get_edid(connector);
+                       if (edid)
                                break;
-                       }
-                       temp_ddc >>= 1;
                }
+               /*
+                * If we found the EDID on the other bus,
+                * assume that is the correct DDC bus.
+                */
                if (edid == NULL)
                        intel_sdvo->ddc_bus = saved_ddc;
        }
-       /* when there is no edid and no monitor is connected with VGA
-        * port, try to use the CRT ddc to read the EDID for DVI-connector
+
+       /*
+        * When there is no edid and no monitor is connected with VGA
+        * port, try to use the CRT ddc to read the EDID for DVI-connector.
         */
-       if (edid == NULL && intel_sdvo->analog_ddc_bus &&
-           !intel_analog_is_connected(connector->dev))
-               edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus);
+       if (edid == NULL)
+               edid = intel_sdvo_get_analog_edid(connector);
 
+       status = connector_status_unknown;
        if (edid != NULL) {
-               bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
-               bool need_digital = !!(intel_sdvo_connector->output_flag & SDVO_TMDS_MASK);
-
                /* DDC bus is shared, match EDID to connector type */
-               if (is_digital && need_digital)
+               if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+                       status = connector_status_connected;
                        intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
-               else if (is_digital != need_digital)
-                       status = connector_status_disconnected;
-
+                       intel_sdvo->has_audio = drm_detect_monitor_audio(edid);
+               }
                connector->display_info.raw_edid = NULL;
-       } else
-               status = connector_status_disconnected;
-       
-       kfree(edid);
+               kfree(edid);
+       }
+
+       if (status == connector_status_connected) {
+               struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+               if (intel_sdvo_connector->force_audio)
+                       intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0;
+       }
 
        return status;
 }
@@ -1490,13 +1408,12 @@ static enum drm_connector_status
 intel_sdvo_detect(struct drm_connector *connector, bool force)
 {
        uint16_t response;
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+       struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
        struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
        enum drm_connector_status ret;
 
        if (!intel_sdvo_write_cmd(intel_sdvo,
-                            SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
+                                 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
                return connector_status_unknown;
        if (intel_sdvo->is_tv) {
                /* add 30ms delay when the output type is SDVO-TV */
@@ -1505,7 +1422,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
        if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
                return connector_status_unknown;
 
-       DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
+       DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
+                     response & 0xff, response >> 8,
+                     intel_sdvo_connector->output_flag);
 
        if (response == 0)
                return connector_status_disconnected;
@@ -1538,12 +1457,10 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
 
 static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
 {
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
-       int num_modes;
+       struct edid *edid;
 
        /* set the bus switch and get the modes */
-       num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
+       edid = intel_sdvo_get_edid(connector);
 
        /*
         * Mac mini hack.  On this device, the DVI-I connector shares one DDC
@@ -1551,12 +1468,14 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
         * DDC fails, check to see if the analog output is disconnected, in
         * which case we'll look there for the digital DDC data.
         */
-       if (num_modes == 0 &&
-           intel_sdvo->analog_ddc_bus &&
-           !intel_analog_is_connected(connector->dev)) {
-               /* Switch to the analog ddc bus and try that
-                */
-               (void) intel_ddc_get_modes(connector, intel_sdvo->analog_ddc_bus);
+       if (edid == NULL)
+               edid = intel_sdvo_get_analog_edid(connector);
+
+       if (edid != NULL) {
+               drm_mode_connector_update_edid_property(connector, edid);
+               drm_add_edid_modes(connector, edid);
+               connector->display_info.raw_edid = NULL;
+               kfree(edid);
        }
 }
 
@@ -1627,8 +1546,7 @@ struct drm_display_mode sdvo_tv_modes[] = {
 
 static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
 {
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+       struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
        struct intel_sdvo_sdtv_resolution_request tv_res;
        uint32_t reply = 0, format_map = 0;
        int i;
@@ -1644,7 +1562,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
                return;
 
        BUILD_BUG_ON(sizeof(tv_res) != 3);
-       if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+       if (!intel_sdvo_write_cmd(intel_sdvo,
+                                 SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
                                  &tv_res, sizeof(tv_res)))
                return;
        if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
@@ -1662,8 +1581,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
 
 static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
 {
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+       struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
        struct drm_i915_private *dev_priv = connector->dev->dev_private;
        struct drm_display_mode *newmode;
 
@@ -1672,7 +1590,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
         * Assume that the preferred modes are
         * arranged in priority order.
         */
-       intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
+       intel_ddc_get_modes(connector, intel_sdvo->i2c);
        if (list_empty(&connector->probed_modes) == false)
                goto end;
 
@@ -1693,6 +1611,10 @@ end:
                if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
                        intel_sdvo->sdvo_lvds_fixed_mode =
                                drm_mode_duplicate(connector->dev, newmode);
+
+                       drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
+                                             0);
+
                        intel_sdvo->is_lvds = true;
                        break;
                }
@@ -1775,8 +1697,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
                        struct drm_property *property,
                        uint64_t val)
 {
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+       struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
        struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
        uint16_t temp_value;
        uint8_t cmd;
@@ -1786,6 +1707,21 @@ intel_sdvo_set_property(struct drm_connector *connector,
        if (ret)
                return ret;
 
+       if (property == intel_sdvo_connector->force_audio_property) {
+               if (val == intel_sdvo_connector->force_audio)
+                       return 0;
+
+               intel_sdvo_connector->force_audio = val;
+
+               if (val > 0 && intel_sdvo->has_audio)
+                       return 0;
+               if (val < 0 && !intel_sdvo->has_audio)
+                       return 0;
+
+               intel_sdvo->has_audio = val > 0;
+               goto done;
+       }
+
 #define CHECK_PROPERTY(name, NAME) \
        if (intel_sdvo_connector->name == property) { \
                if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
@@ -1879,9 +1815,8 @@ set_value:
 
 
 done:
-       if (encoder->crtc) {
-               struct drm_crtc *crtc = encoder->crtc;
-
+       if (intel_sdvo->base.base.crtc) {
+               struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
                drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
                                         crtc->y, crtc->fb);
        }
@@ -1909,20 +1844,18 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
 static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
        .get_modes = intel_sdvo_get_modes,
        .mode_valid = intel_sdvo_mode_valid,
-       .best_encoder = intel_attached_encoder,
+       .best_encoder = intel_best_encoder,
 };
 
 static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
 {
-       struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
-
-       if (intel_sdvo->analog_ddc_bus)
-               intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
+       struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
 
        if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
                drm_mode_destroy(encoder->dev,
                                 intel_sdvo->sdvo_lvds_fixed_mode);
 
+       i2c_del_adapter(&intel_sdvo->ddc);
        intel_encoder_destroy(encoder);
 }
 
@@ -1990,53 +1923,48 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
                intel_sdvo_guess_ddc_bus(sdvo);
 }
 
-static bool
-intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device)
+static void
+intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
+                         struct intel_sdvo *sdvo, u32 reg)
 {
-       return intel_sdvo_set_target_output(intel_sdvo,
-                                           device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) &&
-               intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
-                                    &intel_sdvo->is_hdmi, 1);
-}
+       struct sdvo_device_mapping *mapping;
+       u8 pin, speed;
 
-static struct intel_sdvo *
-intel_sdvo_chan_to_intel_sdvo(struct intel_i2c_chan *chan)
-{
-       struct drm_device *dev = chan->drm_dev;
-       struct drm_encoder *encoder;
+       if (IS_SDVOB(reg))
+               mapping = &dev_priv->sdvo_mappings[0];
+       else
+               mapping = &dev_priv->sdvo_mappings[1];
 
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-               struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
-               if (intel_sdvo->base.ddc_bus == &chan->adapter)
-                       return intel_sdvo;
+       pin = GMBUS_PORT_DPB;
+       speed = GMBUS_RATE_1MHZ >> 8;
+       if (mapping->initialized) {
+               pin = mapping->i2c_pin;
+               speed = mapping->i2c_speed;
        }
 
-       return NULL;
+       sdvo->i2c = &dev_priv->gmbus[pin].adapter;
+       intel_gmbus_set_speed(sdvo->i2c, speed);
+       intel_gmbus_force_bit(sdvo->i2c, true);
 }
 
-static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
-                                 struct i2c_msg msgs[], int num)
+static bool
+intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
 {
-       struct intel_sdvo *intel_sdvo;
-       struct i2c_algo_bit_data *algo_data;
-       const struct i2c_algorithm *algo;
+       int is_hdmi;
 
-       algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
-       intel_sdvo =
-               intel_sdvo_chan_to_intel_sdvo((struct intel_i2c_chan *)
-                                             (algo_data->data));
-       if (intel_sdvo == NULL)
-               return -EINVAL;
+       if (!intel_sdvo_check_supp_encode(intel_sdvo))
+               return false;
 
-       algo = intel_sdvo->base.i2c_bus->algo;
+       if (!intel_sdvo_set_target_output(intel_sdvo,
+                                         device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1))
+               return false;
 
-       intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus);
-       return algo->master_xfer(i2c_adap, msgs, num);
-}
+       is_hdmi = 0;
+       if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1))
+               return false;
 
-static struct i2c_algorithm intel_sdvo_i2c_bit_algo = {
-       .master_xfer    = intel_sdvo_master_xfer,
-};
+       return !!is_hdmi;
+}
 
 static u8
 intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
@@ -2076,26 +2004,44 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
 }
 
 static void
-intel_sdvo_connector_init(struct drm_encoder *encoder,
-                         struct drm_connector *connector)
+intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
+                         struct intel_sdvo *encoder)
 {
-       drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
-                          connector->connector_type);
+       drm_connector_init(encoder->base.base.dev,
+                          &connector->base.base,
+                          &intel_sdvo_connector_funcs,
+                          connector->base.base.connector_type);
+
+       drm_connector_helper_add(&connector->base.base,
+                                &intel_sdvo_connector_helper_funcs);
+
+       connector->base.base.interlace_allowed = 0;
+       connector->base.base.doublescan_allowed = 0;
+       connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
 
-       drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
+       intel_connector_attach_encoder(&connector->base, &encoder->base);
+       drm_sysfs_connector_add(&connector->base.base);
+}
 
-       connector->interlace_allowed = 0;
-       connector->doublescan_allowed = 0;
-       connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+static void
+intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
+{
+       struct drm_device *dev = connector->base.base.dev;
 
-       drm_mode_connector_attach_encoder(connector, encoder);
-       drm_sysfs_connector_add(connector);
+       connector->force_audio_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
+       if (connector->force_audio_property) {
+               connector->force_audio_property->values[0] = -1;
+               connector->force_audio_property->values[1] = 1;
+               drm_connector_attach_property(&connector->base.base,
+                                             connector->force_audio_property, 0);
+       }
 }
 
 static bool
 intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
 {
-       struct drm_encoder *encoder = &intel_sdvo->base.enc;
+       struct drm_encoder *encoder = &intel_sdvo->base.base;
        struct drm_connector *connector;
        struct intel_connector *intel_connector;
        struct intel_sdvo_connector *intel_sdvo_connector;
@@ -2118,19 +2064,20 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
        encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
        connector->connector_type = DRM_MODE_CONNECTOR_DVID;
 
-       if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode)
-               && intel_sdvo_get_digital_encoding_mode(intel_sdvo, device)
-               && intel_sdvo->is_hdmi) {
+       if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
                /* enable hdmi encoding mode if supported */
                intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
                intel_sdvo_set_colorimetry(intel_sdvo,
                                           SDVO_COLORIMETRY_RGB256);
                connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+               intel_sdvo->is_hdmi = true;
        }
        intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
                                       (1 << INTEL_ANALOG_CLONE_BIT));
 
-       intel_sdvo_connector_init(encoder, connector);
+       intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+
+       intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
 
        return true;
 }
@@ -2138,36 +2085,36 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
 static bool
 intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
 {
-        struct drm_encoder *encoder = &intel_sdvo->base.enc;
-        struct drm_connector *connector;
-        struct intel_connector *intel_connector;
-        struct intel_sdvo_connector *intel_sdvo_connector;
+       struct drm_encoder *encoder = &intel_sdvo->base.base;
+       struct drm_connector *connector;
+       struct intel_connector *intel_connector;
+       struct intel_sdvo_connector *intel_sdvo_connector;
 
        intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
        if (!intel_sdvo_connector)
                return false;
 
        intel_connector = &intel_sdvo_connector->base;
-        connector = &intel_connector->base;
-        encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
-        connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+       connector = &intel_connector->base;
+       encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+       connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
 
-        intel_sdvo->controlled_output |= type;
-        intel_sdvo_connector->output_flag = type;
+       intel_sdvo->controlled_output |= type;
+       intel_sdvo_connector->output_flag = type;
 
-        intel_sdvo->is_tv = true;
-        intel_sdvo->base.needs_tv_clock = true;
-        intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+       intel_sdvo->is_tv = true;
+       intel_sdvo->base.needs_tv_clock = true;
+       intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
 
-        intel_sdvo_connector_init(encoder, connector);
+       intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
 
-        if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
+       if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
                goto err;
 
-        if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+       if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
                goto err;
 
-        return true;
+       return true;
 
 err:
        intel_sdvo_destroy(connector);
@@ -2177,43 +2124,44 @@ err:
 static bool
 intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
 {
-        struct drm_encoder *encoder = &intel_sdvo->base.enc;
-        struct drm_connector *connector;
-        struct intel_connector *intel_connector;
-        struct intel_sdvo_connector *intel_sdvo_connector;
+       struct drm_encoder *encoder = &intel_sdvo->base.base;
+       struct drm_connector *connector;
+       struct intel_connector *intel_connector;
+       struct intel_sdvo_connector *intel_sdvo_connector;
 
        intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
        if (!intel_sdvo_connector)
                return false;
 
        intel_connector = &intel_sdvo_connector->base;
-        connector = &intel_connector->base;
+       connector = &intel_connector->base;
        connector->polled = DRM_CONNECTOR_POLL_CONNECT;
-        encoder->encoder_type = DRM_MODE_ENCODER_DAC;
-        connector->connector_type = DRM_MODE_CONNECTOR_VGA;
-
-        if (device == 0) {
-                intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
-                intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
-        } else if (device == 1) {
-                intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
-                intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
-        }
-
-        intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+       encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+       connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+
+       if (device == 0) {
+               intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+               intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+       } else if (device == 1) {
+               intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+               intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+       }
+
+       intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
                                       (1 << INTEL_ANALOG_CLONE_BIT));
 
-        intel_sdvo_connector_init(encoder, connector);
-        return true;
+       intel_sdvo_connector_init(intel_sdvo_connector,
+                                 intel_sdvo);
+       return true;
 }
 
 static bool
 intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
 {
-        struct drm_encoder *encoder = &intel_sdvo->base.enc;
-        struct drm_connector *connector;
-        struct intel_connector *intel_connector;
-        struct intel_sdvo_connector *intel_sdvo_connector;
+       struct drm_encoder *encoder = &intel_sdvo->base.base;
+       struct drm_connector *connector;
+       struct intel_connector *intel_connector;
+       struct intel_sdvo_connector *intel_sdvo_connector;
 
        intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
        if (!intel_sdvo_connector)
@@ -2221,22 +2169,22 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
 
        intel_connector = &intel_sdvo_connector->base;
        connector = &intel_connector->base;
-        encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
-        connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
-
-        if (device == 0) {
-                intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
-                intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
-        } else if (device == 1) {
-                intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
-                intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
-        }
-
-        intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
+       encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+       connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+
+       if (device == 0) {
+               intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+               intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+       } else if (device == 1) {
+               intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+               intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+       }
+
+       intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
                                       (1 << INTEL_SDVO_LVDS_CLONE_BIT));
 
-        intel_sdvo_connector_init(encoder, connector);
-        if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+       intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+       if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
                goto err;
 
        return true;
@@ -2307,7 +2255,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
                                          struct intel_sdvo_connector *intel_sdvo_connector,
                                          int type)
 {
-       struct drm_device *dev = intel_sdvo->base.enc.dev;
+       struct drm_device *dev = intel_sdvo->base.base.dev;
        struct intel_sdvo_tv_format format;
        uint32_t format_map, i;
 
@@ -2373,7 +2321,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
                                      struct intel_sdvo_connector *intel_sdvo_connector,
                                      struct intel_sdvo_enhancements_reply enhancements)
 {
-       struct drm_device *dev = intel_sdvo->base.enc.dev;
+       struct drm_device *dev = intel_sdvo->base.base.dev;
        struct drm_connector *connector = &intel_sdvo_connector->base.base;
        uint16_t response, data_value[2];
 
@@ -2502,7 +2450,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
                                        struct intel_sdvo_connector *intel_sdvo_connector,
                                        struct intel_sdvo_enhancements_reply enhancements)
 {
-       struct drm_device *dev = intel_sdvo->base.enc.dev;
+       struct drm_device *dev = intel_sdvo->base.base.dev;
        struct drm_connector *connector = &intel_sdvo_connector->base.base;
        uint16_t response, data_value[2];
 
@@ -2535,7 +2483,43 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
                return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
        else
                return true;
+}
+
+static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
+                                    struct i2c_msg *msgs,
+                                    int num)
+{
+       struct intel_sdvo *sdvo = adapter->algo_data;
 
+       if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
+               return -EIO;
+
+       return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
+}
+
+static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
+{
+       struct intel_sdvo *sdvo = adapter->algo_data;
+       return sdvo->i2c->algo->functionality(sdvo->i2c);
+}
+
+static const struct i2c_algorithm intel_sdvo_ddc_proxy = {
+       .master_xfer    = intel_sdvo_ddc_proxy_xfer,
+       .functionality  = intel_sdvo_ddc_proxy_func
+};
+
+static bool
+intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
+                         struct drm_device *dev)
+{
+       sdvo->ddc.owner = THIS_MODULE;
+       sdvo->ddc.class = I2C_CLASS_DDC;
+       snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
+       sdvo->ddc.dev.parent = &dev->pdev->dev;
+       sdvo->ddc.algo_data = sdvo;
+       sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
+
+       return i2c_add_adapter(&sdvo->ddc) == 0;
 }
 
 bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
@@ -2543,95 +2527,66 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_encoder *intel_encoder;
        struct intel_sdvo *intel_sdvo;
-       u8 ch[0x40];
        int i;
-       u32 i2c_reg, ddc_reg, analog_ddc_reg;
 
        intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
        if (!intel_sdvo)
                return false;
 
+       if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
+               kfree(intel_sdvo);
+               return false;
+       }
+
        intel_sdvo->sdvo_reg = sdvo_reg;
 
        intel_encoder = &intel_sdvo->base;
        intel_encoder->type = INTEL_OUTPUT_SDVO;
+       /* encoder type will be decided later */
+       drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0);
 
-       if (HAS_PCH_SPLIT(dev)) {
-               i2c_reg = PCH_GPIOE;
-               ddc_reg = PCH_GPIOE;
-               analog_ddc_reg = PCH_GPIOA;
-       } else {
-               i2c_reg = GPIOE;
-               ddc_reg = GPIOE;
-               analog_ddc_reg = GPIOA;
-       }
-
-       /* setup the DDC bus. */
-       if (IS_SDVOB(sdvo_reg))
-               intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB");
-       else
-               intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC");
-
-       if (!intel_encoder->i2c_bus)
-               goto err_inteloutput;
-
-       intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
-
-       /* Save the bit-banging i2c functionality for use by the DDC wrapper */
-       intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
+       intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
+       intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
 
        /* Read the regs to test if we can talk to the device */
        for (i = 0; i < 0x40; i++) {
-               if (!intel_sdvo_read_byte(intel_sdvo, i, &ch[i])) {
+               u8 byte;
+
+               if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
                        DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
                                      IS_SDVOB(sdvo_reg) ? 'B' : 'C');
-                       goto err_i2c;
+                       goto err;
                }
        }
 
-       /* setup the DDC bus. */
-       if (IS_SDVOB(sdvo_reg)) {
-               intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
-               intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
-                                               "SDVOB/VGA DDC BUS");
+       if (IS_SDVOB(sdvo_reg))
                dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
-       } else {
-               intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
-               intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
-                                               "SDVOC/VGA DDC BUS");
+       else
                dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
-       }
-       if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL)
-               goto err_i2c;
 
-       /* Wrap with our custom algo which switches to DDC mode */
-       intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
-
-       /* encoder type will be decided later */
-       drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0);
-       drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
+       drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
 
        /* In default case sdvo lvds is false */
        if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
-               goto err_enc;
+               goto err;
 
        if (intel_sdvo_output_setup(intel_sdvo,
                                    intel_sdvo->caps.output_flags) != true) {
                DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
                              IS_SDVOB(sdvo_reg) ? 'B' : 'C');
-               goto err_enc;
+               goto err;
        }
 
        intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
 
        /* Set the input timing to the screen. Assume always input 0. */
        if (!intel_sdvo_set_target_input(intel_sdvo))
-               goto err_enc;
+               goto err;
 
        if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
                                                    &intel_sdvo->pixel_clock_min,
                                                    &intel_sdvo->pixel_clock_max))
-               goto err_enc;
+               goto err;
 
        DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
                        "clock range %dMHz - %dMHz, "
@@ -2651,16 +2606,9 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
                        (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
        return true;
 
-err_enc:
-       drm_encoder_cleanup(&intel_encoder->enc);
-err_i2c:
-       if (intel_sdvo->analog_ddc_bus != NULL)
-               intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
-       if (intel_encoder->ddc_bus != NULL)
-               intel_i2c_destroy(intel_encoder->ddc_bus);
-       if (intel_encoder->i2c_bus != NULL)
-               intel_i2c_destroy(intel_encoder->i2c_bus);
-err_inteloutput:
+err:
+       drm_encoder_cleanup(&intel_encoder->base);
+       i2c_del_adapter(&intel_sdvo->ddc);
        kfree(intel_sdvo);
 
        return false;
index 4a117e318a73a0a44c7ae4cc0be3447d11a403da..2f7681989316643149f2c11de896eef6936d884a 100644 (file)
@@ -48,7 +48,7 @@ struct intel_tv {
        struct intel_encoder base;
 
        int type;
-       char *tv_format;
+       const char *tv_format;
        int margin[4];
        u32 save_TV_H_CTL_1;
        u32 save_TV_H_CTL_2;
@@ -350,7 +350,7 @@ static const struct video_levels component_levels = {
 
 
 struct tv_mode {
-       char *name;
+       const char *name;
        int clock;
        int refresh; /* in millihertz (for precision) */
        u32 oversample;
@@ -900,7 +900,14 @@ static const struct tv_mode tv_modes[] = {
 
 static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
 {
-       return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base);
+       return container_of(encoder, struct intel_tv, base.base);
+}
+
+static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
+{
+       return container_of(intel_attached_encoder(connector),
+                           struct intel_tv,
+                           base);
 }
 
 static void
@@ -922,7 +929,7 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
 }
 
 static const struct tv_mode *
-intel_tv_mode_lookup (char *tv_format)
+intel_tv_mode_lookup(const char *tv_format)
 {
        int i;
 
@@ -936,22 +943,23 @@ intel_tv_mode_lookup (char *tv_format)
 }
 
 static const struct tv_mode *
-intel_tv_mode_find (struct intel_tv *intel_tv)
+intel_tv_mode_find(struct intel_tv *intel_tv)
 {
        return intel_tv_mode_lookup(intel_tv->tv_format);
 }
 
 static enum drm_mode_status
-intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
+intel_tv_mode_valid(struct drm_connector *connector,
+                   struct drm_display_mode *mode)
 {
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+       struct intel_tv *intel_tv = intel_attached_tv(connector);
        const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
 
        /* Ensure TV refresh is close to desired refresh */
        if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
                                < 1000)
                return MODE_OK;
+
        return MODE_CLOCK_RANGE;
 }
 
@@ -1131,7 +1139,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                           color_conversion->av);
        }
 
-       if (IS_I965G(dev))
+       if (INTEL_INFO(dev)->gen >= 4)
                I915_WRITE(TV_CLR_KNOBS, 0x00404000);
        else
                I915_WRITE(TV_CLR_KNOBS, 0x00606000);
@@ -1157,12 +1165,12 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
 
                /* Wait for vblank for the disable to take effect */
-               if (!IS_I9XX(dev))
+               if (IS_GEN2(dev))
                        intel_wait_for_vblank(dev, intel_crtc->pipe);
 
-               I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
+               I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE);
                /* Wait for vblank for the disable to take effect. */
-               intel_wait_for_vblank(dev, intel_crtc->pipe);
+               intel_wait_for_pipe_off(dev, intel_crtc->pipe);
 
                /* Filter ctl must be set before TV_WIN_SIZE */
                I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
@@ -1196,7 +1204,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
        for (i = 0; i < 43; i++)
                I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
-       I915_WRITE(TV_DAC, 0);
+       I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
        I915_WRITE(TV_CTL, tv_ctl);
 }
 
@@ -1228,15 +1236,13 @@ static const struct drm_display_mode reported_modes[] = {
 static int
 intel_tv_detect_type (struct intel_tv *intel_tv)
 {
-       struct drm_encoder *encoder = &intel_tv->base.enc;
+       struct drm_encoder *encoder = &intel_tv->base.base;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
        u32 tv_ctl, save_tv_ctl;
        u32 tv_dac, save_tv_dac;
-       int type = DRM_MODE_CONNECTOR_Unknown;
-
-       tv_dac = I915_READ(TV_DAC);
+       int type;
 
        /* Disable TV interrupts around load detect or we'll recurse */
        spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -1244,19 +1250,14 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
                              PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
        spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
 
-       /*
-        * Detect TV by polling)
-        */
-       save_tv_dac = tv_dac;
-       tv_ctl = I915_READ(TV_CTL);
-       save_tv_ctl = tv_ctl;
-       tv_ctl &= ~TV_ENC_ENABLE;
-       tv_ctl &= ~TV_TEST_MODE_MASK;
+       save_tv_dac = tv_dac = I915_READ(TV_DAC);
+       save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
+
+       /* Poll for TV detection */
+       tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
        tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
-       tv_dac &= ~TVDAC_SENSE_MASK;
-       tv_dac &= ~DAC_A_MASK;
-       tv_dac &= ~DAC_B_MASK;
-       tv_dac &= ~DAC_C_MASK;
+
+       tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
        tv_dac |= (TVDAC_STATE_CHG_EN |
                   TVDAC_A_SENSE_CTL |
                   TVDAC_B_SENSE_CTL |
@@ -1265,37 +1266,40 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
                   DAC_A_0_7_V |
                   DAC_B_0_7_V |
                   DAC_C_0_7_V);
+
        I915_WRITE(TV_CTL, tv_ctl);
        I915_WRITE(TV_DAC, tv_dac);
        POSTING_READ(TV_DAC);
-       msleep(20);
 
-       tv_dac = I915_READ(TV_DAC);
-       I915_WRITE(TV_DAC, save_tv_dac);
-       I915_WRITE(TV_CTL, save_tv_ctl);
-       POSTING_READ(TV_CTL);
-       msleep(20);
+       intel_wait_for_vblank(intel_tv->base.base.dev,
+                             to_intel_crtc(intel_tv->base.base.crtc)->pipe);
 
-       /*
-        *  A B C
-        *  0 1 1 Composite
-        *  1 0 X svideo
-        *  0 0 0 Component
-        */
-       if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
-               DRM_DEBUG_KMS("Detected Composite TV connection\n");
-               type = DRM_MODE_CONNECTOR_Composite;
-       } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
-               DRM_DEBUG_KMS("Detected S-Video TV connection\n");
-               type = DRM_MODE_CONNECTOR_SVIDEO;
-       } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
-               DRM_DEBUG_KMS("Detected Component TV connection\n");
-               type = DRM_MODE_CONNECTOR_Component;
-       } else {
-               DRM_DEBUG_KMS("No TV connection detected\n");
-               type = -1;
+       type = -1;
+       if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) {
+               DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
+               /*
+                *  A B C
+                *  0 1 1 Composite
+                *  1 0 X svideo
+                *  0 0 0 Component
+                */
+               if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
+                       DRM_DEBUG_KMS("Detected Composite TV connection\n");
+                       type = DRM_MODE_CONNECTOR_Composite;
+               } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
+                       DRM_DEBUG_KMS("Detected S-Video TV connection\n");
+                       type = DRM_MODE_CONNECTOR_SVIDEO;
+               } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
+                       DRM_DEBUG_KMS("Detected Component TV connection\n");
+                       type = DRM_MODE_CONNECTOR_Component;
+               } else {
+                       DRM_DEBUG_KMS("Unrecognised TV connection\n");
+               }
        }
 
+       I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+       I915_WRITE(TV_CTL, save_tv_ctl);
+
        /* Restore interrupt config */
        spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
        i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
@@ -1311,8 +1315,7 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
  */
 static void intel_tv_find_better_format(struct drm_connector *connector)
 {
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+       struct intel_tv *intel_tv = intel_attached_tv(connector);
        const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
        int i;
 
@@ -1344,14 +1347,13 @@ static enum drm_connector_status
 intel_tv_detect(struct drm_connector *connector, bool force)
 {
        struct drm_display_mode mode;
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+       struct intel_tv *intel_tv = intel_attached_tv(connector);
        int type;
 
        mode = reported_modes[0];
        drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
 
-       if (encoder->crtc && encoder->crtc->enabled) {
+       if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
                type = intel_tv_detect_type(intel_tv);
        } else if (force) {
                struct drm_crtc *crtc;
@@ -1375,11 +1377,10 @@ intel_tv_detect(struct drm_connector *connector, bool force)
        return connector_status_connected;
 }
 
-static struct input_res {
-       char *name;
+static const struct input_res {
+       const char *name;
        int w, h;
-} input_res_table[] =
-{
+} input_res_table[] = {
        {"640x480", 640, 480},
        {"800x600", 800, 600},
        {"1024x768", 1024, 768},
@@ -1396,8 +1397,7 @@ static void
 intel_tv_chose_preferred_modes(struct drm_connector *connector,
                               struct drm_display_mode *mode_ptr)
 {
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+       struct intel_tv *intel_tv = intel_attached_tv(connector);
        const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
 
        if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
@@ -1422,15 +1422,14 @@ static int
 intel_tv_get_modes(struct drm_connector *connector)
 {
        struct drm_display_mode *mode_ptr;
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+       struct intel_tv *intel_tv = intel_attached_tv(connector);
        const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
        int j, count = 0;
        u64 tmp;
 
        for (j = 0; j < ARRAY_SIZE(input_res_table);
             j++) {
-               struct input_res *input = &input_res_table[j];
+               const struct input_res *input = &input_res_table[j];
                unsigned int hactive_s = input->w;
                unsigned int vactive_s = input->h;
 
@@ -1488,9 +1487,8 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
                      uint64_t val)
 {
        struct drm_device *dev = connector->dev;
-       struct drm_encoder *encoder = intel_attached_encoder(connector);
-       struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
-       struct drm_crtc *crtc = encoder->crtc;
+       struct intel_tv *intel_tv = intel_attached_tv(connector);
+       struct drm_crtc *crtc = intel_tv->base.base.crtc;
        int ret = 0;
        bool changed = false;
 
@@ -1555,7 +1553,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
 static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
        .mode_valid = intel_tv_mode_valid,
        .get_modes = intel_tv_get_modes,
-       .best_encoder = intel_attached_encoder,
+       .best_encoder = intel_best_encoder,
 };
 
 static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1607,7 +1605,7 @@ intel_tv_init(struct drm_device *dev)
        struct intel_encoder *intel_encoder;
        struct intel_connector *intel_connector;
        u32 tv_dac_on, tv_dac_off, save_tv_dac;
-       char **tv_format_names;
+       char *tv_format_names[ARRAY_SIZE(tv_modes)];
        int i, initial_mode = 0;
 
        if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
@@ -1661,15 +1659,15 @@ intel_tv_init(struct drm_device *dev)
        drm_connector_init(dev, connector, &intel_tv_connector_funcs,
                           DRM_MODE_CONNECTOR_SVIDEO);
 
-       drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
+       drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
                         DRM_MODE_ENCODER_TVDAC);
 
-       drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
+       intel_connector_attach_encoder(intel_connector, intel_encoder);
        intel_encoder->type = INTEL_OUTPUT_TVOUT;
        intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
        intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
-       intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
-       intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
+       intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
+       intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
        intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
 
        /* BIOS margin values */
@@ -1678,21 +1676,19 @@ intel_tv_init(struct drm_device *dev)
        intel_tv->margin[TV_MARGIN_RIGHT] = 46;
        intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
 
-       intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
+       intel_tv->tv_format = tv_modes[initial_mode].name;
 
-       drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
+       drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
        drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
        connector->interlace_allowed = false;
        connector->doublescan_allowed = false;
 
        /* Create TV properties then attach current values */
-       tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes),
-                                 GFP_KERNEL);
-       if (!tv_format_names)
-               goto out;
        for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
-               tv_format_names[i] = tv_modes[i].name;
-       drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names);
+               tv_format_names[i] = (char *)tv_modes[i].name;
+       drm_mode_create_tv_properties(dev,
+                                     ARRAY_SIZE(tv_modes),
+                                     tv_format_names);
 
        drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
                                   initial_mode);
@@ -1708,6 +1704,5 @@ intel_tv_init(struct drm_device *dev)
        drm_connector_attach_property(connector,
                                   dev->mode_config.tv_bottom_margin_property,
                                   intel_tv->margin[TV_MARGIN_BOTTOM]);
-out:
        drm_sysfs_connector_add(connector);
 }
index ac64f0b0392edb32e57a3b0da85c4185ad4efa59..0aaf5f67a43647e5cc37a903e4fb46706cb1f90f 100644 (file)
@@ -60,8 +60,6 @@ static struct drm_driver driver = {
        .irq_uninstall = mga_driver_irq_uninstall,
        .irq_handler = mga_driver_irq_handler,
        .reclaim_buffers = drm_core_reclaim_buffers,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = mga_ioctls,
        .dma_ioctl = mga_dma_buffers,
        .fops = {
index d2d28048efb23e1759c4368a2dbb2a64ac12675e..72730e9ca06c8e30f7f6f415c6074e60941eef18 100644 (file)
@@ -10,6 +10,7 @@ config DRM_NOUVEAU
        select FB
        select FRAMEBUFFER_CONSOLE if !EMBEDDED
        select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
+       select ACPI_VIDEO if ACPI
        help
          Choose this option for open-source nVidia support.
 
index e9b06e4ef2a242921bb023f69bf093eb9da68334..23fa82d667d6f060e70eba35a2834e671441c8d7 100644 (file)
@@ -9,7 +9,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
              nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
              nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
              nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
-             nouveau_dp.o \
+             nouveau_dp.o nouveau_ramht.o \
+            nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
              nv04_timer.o \
              nv04_mc.o nv40_mc.o nv50_mc.o \
              nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
@@ -23,7 +24,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
              nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
              nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
              nv10_gpio.o nv50_gpio.o \
-            nv50_calc.o
+            nv50_calc.o \
+            nv04_pm.o nv50_pm.o nva3_pm.o
 
 nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
 nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
index c17a055ee3e57c3a356f2fd4d3b85d4432f7d4e5..119152606e4c97af9583b451415954637d1a4d74 100644 (file)
@@ -292,6 +292,6 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
        if (ret < 0)
                return ret;
 
-       nv_connector->edid = edid;
+       nv_connector->edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
        return 0;
 }
index 974b0f8ae0483cc462b1be9a81795413e7a7a0c1..5f21030a293bbc3f373a76631a5e092a0075ea3f 100644 (file)
@@ -43,9 +43,6 @@
 #define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg)
 #define LOG_OLD_VALUE(x)
 
-#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
-#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
-
 struct init_exec {
        bool execute;
        bool repeat;
@@ -272,12 +269,6 @@ struct init_tbl_entry {
        int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
 };
 
-struct bit_entry {
-       uint8_t id[2];
-       uint16_t length;
-       uint16_t offset;
-};
-
 static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *);
 
 #define MACRO_INDEX_SIZE       2
@@ -1231,7 +1222,7 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
                        return 3;
                }
 
-               if (cond & 1)
+               if (!(cond & 1))
                        iexec->execute = false;
        }
                break;
@@ -2167,11 +2158,11 @@ peek_fb(struct drm_device *dev, struct io_mapping *fb,
 
        if (off < pci_resource_len(dev->pdev, 1)) {
                uint8_t __iomem *p =
-                       io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
+                       io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
 
                val = ioread32(p + (off & ~PAGE_MASK));
 
-               io_mapping_unmap_atomic(p, KM_USER0);
+               io_mapping_unmap_atomic(p);
        }
 
        return val;
@@ -2183,12 +2174,12 @@ poke_fb(struct drm_device *dev, struct io_mapping *fb,
 {
        if (off < pci_resource_len(dev->pdev, 1)) {
                uint8_t __iomem *p =
-                       io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
+                       io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
 
                iowrite32(val, p + (off & ~PAGE_MASK));
                wmb();
 
-               io_mapping_unmap_atomic(p, KM_USER0);
+               io_mapping_unmap_atomic(p);
        }
 }
 
@@ -4675,6 +4666,92 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i
        return 0;
 }
 
+struct pll_mapping {
+       u8  type;
+       u32 reg;
+};
+
+static struct pll_mapping nv04_pll_mapping[] = {
+       { PLL_CORE  , NV_PRAMDAC_NVPLL_COEFF },
+       { PLL_MEMORY, NV_PRAMDAC_MPLL_COEFF },
+       { PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF },
+       { PLL_VPLL1 , NV_RAMDAC_VPLL2 },
+       {}
+};
+
+static struct pll_mapping nv40_pll_mapping[] = {
+       { PLL_CORE  , 0x004000 },
+       { PLL_MEMORY, 0x004020 },
+       { PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF },
+       { PLL_VPLL1 , NV_RAMDAC_VPLL2 },
+       {}
+};
+
+static struct pll_mapping nv50_pll_mapping[] = {
+       { PLL_CORE  , 0x004028 },
+       { PLL_SHADER, 0x004020 },
+       { PLL_UNK03 , 0x004000 },
+       { PLL_MEMORY, 0x004008 },
+       { PLL_UNK40 , 0x00e810 },
+       { PLL_UNK41 , 0x00e818 },
+       { PLL_UNK42 , 0x00e824 },
+       { PLL_VPLL0 , 0x614100 },
+       { PLL_VPLL1 , 0x614900 },
+       {}
+};
+
+static struct pll_mapping nv84_pll_mapping[] = {
+       { PLL_CORE  , 0x004028 },
+       { PLL_SHADER, 0x004020 },
+       { PLL_MEMORY, 0x004008 },
+       { PLL_UNK05 , 0x004030 },
+       { PLL_UNK41 , 0x00e818 },
+       { PLL_VPLL0 , 0x614100 },
+       { PLL_VPLL1 , 0x614900 },
+       {}
+};
+
+u32
+get_pll_register(struct drm_device *dev, enum pll_types type)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nvbios *bios = &dev_priv->vbios;
+       struct pll_mapping *map;
+       int i;
+
+       if (dev_priv->card_type < NV_40)
+               map = nv04_pll_mapping;
+       else
+       if (dev_priv->card_type < NV_50)
+               map = nv40_pll_mapping;
+       else {
+               u8 *plim = &bios->data[bios->pll_limit_tbl_ptr];
+
+               if (plim[0] >= 0x30) {
+                       u8 *entry = plim + plim[1];
+                       for (i = 0; i < plim[3]; i++, entry += plim[2]) {
+                               if (entry[0] == type)
+                                       return ROM32(entry[3]);
+                       }
+
+                       return 0;
+               }
+
+               if (dev_priv->chipset == 0x50)
+                       map = nv50_pll_mapping;
+               else
+                       map = nv84_pll_mapping;
+       }
+
+       while (map->reg) {
+               if (map->type == type)
+                       return map->reg;
+               map++;
+       }
+
+       return 0;
+}
+
 int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim)
 {
        /*
@@ -4750,6 +4827,17 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
        /* initialize all members to zero */
        memset(pll_lim, 0, sizeof(struct pll_lims));
 
+       /* if we were passed a type rather than a register, figure
+        * out the register and store it
+        */
+       if (limit_match > PLL_MAX)
+               pll_lim->reg = limit_match;
+       else {
+               pll_lim->reg = get_pll_register(dev, limit_match);
+               if (!pll_lim->reg)
+                       return -ENOENT;
+       }
+
        if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) {
                uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex];
 
@@ -4785,7 +4873,6 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
                pll_lim->max_usable_log2p = 0x6;
        } else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) {
                uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen;
-               uint32_t reg = 0; /* default match */
                uint8_t *pll_rec;
                int i;
 
@@ -4797,37 +4884,22 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
                        NV_WARN(dev, "Default PLL limit entry has non-zero "
                                       "register field\n");
 
-               if (limit_match > MAX_PLL_TYPES)
-                       /* we've been passed a reg as the match */
-                       reg = limit_match;
-               else /* limit match is a pll type */
-                       for (i = 1; i < entries && !reg; i++) {
-                               uint32_t cmpreg = ROM32(bios->data[plloffs + recordlen * i]);
-
-                               if (limit_match == NVPLL &&
-                                   (cmpreg == NV_PRAMDAC_NVPLL_COEFF || cmpreg == 0x4000))
-                                       reg = cmpreg;
-                               if (limit_match == MPLL &&
-                                   (cmpreg == NV_PRAMDAC_MPLL_COEFF || cmpreg == 0x4020))
-                                       reg = cmpreg;
-                               if (limit_match == VPLL1 &&
-                                   (cmpreg == NV_PRAMDAC_VPLL_COEFF || cmpreg == 0x4010))
-                                       reg = cmpreg;
-                               if (limit_match == VPLL2 &&
-                                   (cmpreg == NV_RAMDAC_VPLL2 || cmpreg == 0x4018))
-                                       reg = cmpreg;
-                       }
-
                for (i = 1; i < entries; i++)
-                       if (ROM32(bios->data[plloffs + recordlen * i]) == reg) {
+                       if (ROM32(bios->data[plloffs + recordlen * i]) == pll_lim->reg) {
                                pllindex = i;
                                break;
                        }
 
+               if ((dev_priv->card_type >= NV_50) && (pllindex == 0)) {
+                       NV_ERROR(dev, "Register 0x%08x not found in PLL "
+                                "limits table", pll_lim->reg);
+                       return -ENOENT;
+               }
+
                pll_rec = &bios->data[plloffs + recordlen * pllindex];
 
                BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n",
-                       pllindex ? reg : 0);
+                       pllindex ? pll_lim->reg : 0);
 
                /*
                 * Frequencies are stored in tables in MHz, kHz are more
@@ -4877,8 +4949,8 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
                if (cv == 0x51 && !pll_lim->refclk) {
                        uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK);
 
-                       if (((limit_match == NV_PRAMDAC_VPLL_COEFF || limit_match == VPLL1) && sel_clk & 0x20) ||
-                           ((limit_match == NV_RAMDAC_VPLL2 || limit_match == VPLL2) && sel_clk & 0x80)) {
+                       if ((pll_lim->reg == NV_PRAMDAC_VPLL_COEFF && sel_clk & 0x20) ||
+                           (pll_lim->reg == NV_RAMDAC_VPLL2 && sel_clk & 0x80)) {
                                if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3)
                                        pll_lim->refclk = 200000;
                                else
@@ -4891,10 +4963,10 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
                int i;
 
                BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
-                       limit_match);
+                       pll_lim->reg);
 
                for (i = 0; i < entries; i++, entry += recordlen) {
-                       if (ROM32(entry[3]) == limit_match) {
+                       if (ROM32(entry[3]) == pll_lim->reg) {
                                record = &bios->data[ROM16(entry[1])];
                                break;
                        }
@@ -4902,7 +4974,7 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
 
                if (!record) {
                        NV_ERROR(dev, "Register 0x%08x not found in PLL "
-                                "limits table", limit_match);
+                                "limits table", pll_lim->reg);
                        return -ENOENT;
                }
 
@@ -4931,10 +5003,10 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
                int i;
 
                BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
-                       limit_match);
+                       pll_lim->reg);
 
                for (i = 0; i < entries; i++, entry += recordlen) {
-                       if (ROM32(entry[3]) == limit_match) {
+                       if (ROM32(entry[3]) == pll_lim->reg) {
                                record = &bios->data[ROM16(entry[1])];
                                break;
                        }
@@ -4942,7 +5014,7 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
 
                if (!record) {
                        NV_ERROR(dev, "Register 0x%08x not found in PLL "
-                                "limits table", limit_match);
+                                "limits table", pll_lim->reg);
                        return -ENOENT;
                }
 
@@ -5293,7 +5365,7 @@ parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios,
        if (bitentry->length < 0x5)
                return 0;
 
-       if (bitentry->id[1] < 2) {
+       if (bitentry->version < 2) {
                bios->ram_restrict_group_count = bios->data[bitentry->offset + 2];
                bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]);
        } else {
@@ -5403,27 +5475,40 @@ struct bit_table {
 
 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
 
+int
+bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nvbios *bios = &dev_priv->vbios;
+       u8 entries, *entry;
+
+       entries = bios->data[bios->offset + 10];
+       entry   = &bios->data[bios->offset + 12];
+       while (entries--) {
+               if (entry[0] == id) {
+                       bit->id = entry[0];
+                       bit->version = entry[1];
+                       bit->length = ROM16(entry[2]);
+                       bit->offset = ROM16(entry[4]);
+                       bit->data = ROMPTR(bios, entry[4]);
+                       return 0;
+               }
+
+               entry += bios->data[bios->offset + 9];
+       }
+
+       return -ENOENT;
+}
+
 static int
 parse_bit_table(struct nvbios *bios, const uint16_t bitoffset,
                struct bit_table *table)
 {
        struct drm_device *dev = bios->dev;
-       uint8_t maxentries = bios->data[bitoffset + 4];
-       int i, offset;
        struct bit_entry bitentry;
 
-       for (i = 0, offset = bitoffset + 6; i < maxentries; i++, offset += 6) {
-               bitentry.id[0] = bios->data[offset];
-
-               if (bitentry.id[0] != table->id)
-                       continue;
-
-               bitentry.id[1] = bios->data[offset + 1];
-               bitentry.length = ROM16(bios->data[offset + 2]);
-               bitentry.offset = ROM16(bios->data[offset + 4]);
-
+       if (bit_table(dev, table->id, &bitentry) == 0)
                return table->parse_fn(dev, bios, &bitentry);
-       }
 
        NV_INFO(dev, "BIT table '%c' not found\n", table->id);
        return -ENOSYS;
@@ -5683,8 +5768,14 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
 static struct dcb_gpio_entry *
 new_gpio_entry(struct nvbios *bios)
 {
+       struct drm_device *dev = bios->dev;
        struct dcb_gpio_table *gpio = &bios->dcb.gpio;
 
+       if (gpio->entries >= DCB_MAX_NUM_GPIO_ENTRIES) {
+               NV_ERROR(dev, "exceeded maximum number of gpio entries!!\n");
+               return NULL;
+       }
+
        return &gpio->entry[gpio->entries++];
 }
 
@@ -5705,114 +5796,91 @@ nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
        return NULL;
 }
 
-static void
-parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset)
-{
-       struct dcb_gpio_entry *gpio;
-       uint16_t ent = ROM16(bios->data[offset]);
-       uint8_t line = ent & 0x1f,
-               tag = ent >> 5 & 0x3f,
-               flags = ent >> 11 & 0x1f;
-
-       if (tag == 0x3f)
-               return;
-
-       gpio = new_gpio_entry(bios);
-
-       gpio->tag = tag;
-       gpio->line = line;
-       gpio->invert = flags != 4;
-       gpio->entry = ent;
-}
-
-static void
-parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset)
-{
-       uint32_t entry = ROM32(bios->data[offset]);
-       struct dcb_gpio_entry *gpio;
-
-       if ((entry & 0x0000ff00) == 0x0000ff00)
-               return;
-
-       gpio = new_gpio_entry(bios);
-       gpio->tag = (entry & 0x0000ff00) >> 8;
-       gpio->line = (entry & 0x0000001f) >> 0;
-       gpio->state_default = (entry & 0x01000000) >> 24;
-       gpio->state[0] = (entry & 0x18000000) >> 27;
-       gpio->state[1] = (entry & 0x60000000) >> 29;
-       gpio->entry = entry;
-}
-
 static void
 parse_dcb_gpio_table(struct nvbios *bios)
 {
        struct drm_device *dev = bios->dev;
-       uint16_t gpio_table_ptr = bios->dcb.gpio_table_ptr;
-       uint8_t *gpio_table = &bios->data[gpio_table_ptr];
-       int header_len = gpio_table[1],
-           entries = gpio_table[2],
-           entry_len = gpio_table[3];
-       void (*parse_entry)(struct nvbios *, uint16_t) = NULL;
+       struct dcb_gpio_entry *e;
+       u8 headerlen, entries, recordlen;
+       u8 *dcb, *gpio = NULL, *entry;
        int i;
 
-       if (bios->dcb.version >= 0x40) {
-               if (gpio_table_ptr && entry_len != 4) {
-                       NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
-                       return;
-               }
+       dcb = ROMPTR(bios, bios->data[0x36]);
+       if (dcb[0] >= 0x30) {
+               gpio = ROMPTR(bios, dcb[10]);
+               if (!gpio)
+                       goto no_table;
 
-               parse_entry = parse_dcb40_gpio_entry;
+               headerlen = gpio[1];
+               entries   = gpio[2];
+               recordlen = gpio[3];
+       } else
+       if (dcb[0] >= 0x22 && dcb[-1] >= 0x13) {
+               gpio = ROMPTR(bios, dcb[-15]);
+               if (!gpio)
+                       goto no_table;
+
+               headerlen = 3;
+               entries   = gpio[2];
+               recordlen = gpio[1];
+       } else
+       if (dcb[0] >= 0x22) {
+               /* No GPIO table present, parse the TVDAC GPIO data. */
+               uint8_t *tvdac_gpio = &dcb[-5];
 
-       } else if (bios->dcb.version >= 0x30) {
-               if (gpio_table_ptr && entry_len != 2) {
-                       NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
-                       return;
+               if (tvdac_gpio[0] & 1) {
+                       e = new_gpio_entry(bios);
+                       e->tag = DCB_GPIO_TVDAC0;
+                       e->line = tvdac_gpio[1] >> 4;
+                       e->invert = tvdac_gpio[0] & 2;
                }
 
-               parse_entry = parse_dcb30_gpio_entry;
-
-       } else if (bios->dcb.version >= 0x22) {
-               /*
-                * DCBs older than v3.0 don't really have a GPIO
-                * table, instead they keep some GPIO info at fixed
-                * locations.
-                */
-               uint16_t dcbptr = ROM16(bios->data[0x36]);
-               uint8_t *tvdac_gpio = &bios->data[dcbptr - 5];
+               goto no_table;
+       } else {
+               NV_DEBUG(dev, "no/unknown gpio table on DCB 0x%02x\n", dcb[0]);
+               goto no_table;
+       }
 
-               if (tvdac_gpio[0] & 1) {
-                       struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
+       entry = gpio + headerlen;
+       for (i = 0; i < entries; i++, entry += recordlen) {
+               e = new_gpio_entry(bios);
+               if (!e)
+                       break;
 
-                       gpio->tag = DCB_GPIO_TVDAC0;
-                       gpio->line = tvdac_gpio[1] >> 4;
-                       gpio->invert = tvdac_gpio[0] & 2;
-               }
-       } else {
-               /*
-                * No systematic way to store GPIO info on pre-v2.2
-                * DCBs, try to match the PCI device IDs.
-                */
+               if (gpio[0] < 0x40) {
+                       e->entry = ROM16(entry[0]);
+                       e->tag = (e->entry & 0x07e0) >> 5;
+                       if (e->tag == 0x3f) {
+                               bios->dcb.gpio.entries--;
+                               continue;
+                       }
 
-               /* Apple iMac G4 NV18 */
-               if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
-                       struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
+                       e->line = (e->entry & 0x001f);
+                       e->invert = ((e->entry & 0xf800) >> 11) != 4;
+               } else {
+                       e->entry = ROM32(entry[0]);
+                       e->tag = (e->entry & 0x0000ff00) >> 8;
+                       if (e->tag == 0xff) {
+                               bios->dcb.gpio.entries--;
+                               continue;
+                       }
 
-                       gpio->tag = DCB_GPIO_TVDAC0;
-                       gpio->line = 4;
+                       e->line = (e->entry & 0x0000001f) >> 0;
+                       e->state_default = (e->entry & 0x01000000) >> 24;
+                       e->state[0] = (e->entry & 0x18000000) >> 27;
+                       e->state[1] = (e->entry & 0x60000000) >> 29;
                }
-
        }
 
-       if (!gpio_table_ptr)
-               return;
-
-       if (entries > DCB_MAX_NUM_GPIO_ENTRIES) {
-               NV_WARN(dev, "Too many entries in the DCB GPIO table.\n");
-               entries = DCB_MAX_NUM_GPIO_ENTRIES;
+no_table:
+       /* Apple iMac G4 NV18 */
+       if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
+               e = new_gpio_entry(bios);
+               if (e) {
+                       e->tag = DCB_GPIO_TVDAC0;
+                       e->line = 4;
+               }
        }
-
-       for (i = 0; i < entries; i++)
-               parse_entry(bios, gpio_table_ptr + header_len + entry_len * i);
 }
 
 struct dcb_connector_table_entry *
@@ -6680,6 +6748,8 @@ static int nouveau_parse_vbios_struct(struct drm_device *dev)
                                        bit_signature, sizeof(bit_signature));
        if (offset) {
                NV_TRACE(dev, "BIT BIOS found\n");
+               bios->type = NVBIOS_BIT;
+               bios->offset = offset;
                return parse_bit_structure(bios, offset + 6);
        }
 
@@ -6687,6 +6757,8 @@ static int nouveau_parse_vbios_struct(struct drm_device *dev)
                                        bmp_signature, sizeof(bmp_signature));
        if (offset) {
                NV_TRACE(dev, "BMP BIOS found\n");
+               bios->type = NVBIOS_BMP;
+               bios->offset = offset;
                return parse_bmp_structure(dev, bios, offset);
        }
 
@@ -6806,6 +6878,8 @@ nouveau_bios_init(struct drm_device *dev)
                        "running VBIOS init tables.\n");
                bios->execute = true;
        }
+       if (nouveau_force_post)
+               bios->execute = true;
 
        ret = nouveau_run_vbios_init(dev);
        if (ret)
index c1de2f3fcb0ea7e78ec193d88c74683d5090eaba..50a648e01c499a09287b8861865cf8f32d5e196f 100644 (file)
 
 #define DCB_LOC_ON_CHIP 0
 
+#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
+#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
+#define ROMPTR(bios, x) (ROM16(x) ? &(bios)->data[ROM16(x)] : NULL)
+
+struct bit_entry {
+       uint8_t  id;
+       uint8_t  version;
+       uint16_t length;
+       uint16_t offset;
+       uint8_t *data;
+};
+
+int bit_table(struct drm_device *, u8 id, struct bit_entry *);
+
 struct dcb_i2c_entry {
        uint32_t entry;
        uint8_t port_type;
@@ -170,16 +184,28 @@ enum LVDS_script {
        LVDS_PANEL_OFF
 };
 
-/* changing these requires matching changes to reg tables in nv_get_clock */
-#define MAX_PLL_TYPES  4
+/* these match types in pll limits table version 0x40,
+ * nouveau uses them on all chipsets internally where a
+ * specific pll needs to be referenced, but the exact
+ * register isn't known.
+ */
 enum pll_types {
-       NVPLL,
-       MPLL,
-       VPLL1,
-       VPLL2
+       PLL_CORE   = 0x01,
+       PLL_SHADER = 0x02,
+       PLL_UNK03  = 0x03,
+       PLL_MEMORY = 0x04,
+       PLL_UNK05  = 0x05,
+       PLL_UNK40  = 0x40,
+       PLL_UNK41  = 0x41,
+       PLL_UNK42  = 0x42,
+       PLL_VPLL0  = 0x80,
+       PLL_VPLL1  = 0x81,
+       PLL_MAX    = 0xff
 };
 
 struct pll_lims {
+       u32 reg;
+
        struct {
                int minfreq;
                int maxfreq;
@@ -212,6 +238,11 @@ struct pll_lims {
 
 struct nvbios {
        struct drm_device *dev;
+       enum {
+               NVBIOS_BMP,
+               NVBIOS_BIT
+       } type;
+       uint16_t offset;
 
        uint8_t chip_version;
 
index f6f44779d82fb801ac738cc9e5f83e4179fd6de3..80353e2b8409fdfe34b620b54ce8fdc119af5acd 100644 (file)
 #include <linux/log2.h>
 #include <linux/slab.h>
 
-int
-nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan)
-{
-       struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
-       int ret;
-
-       if (!prev_fence || nouveau_fence_channel(prev_fence) == chan)
-               return 0;
-
-       spin_lock(&nvbo->bo.lock);
-       ret = ttm_bo_wait(&nvbo->bo, false, false, false);
-       spin_unlock(&nvbo->bo.lock);
-       return ret;
-}
-
 static void
 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
 {
@@ -58,8 +43,6 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
        struct drm_device *dev = dev_priv->dev;
        struct nouveau_bo *nvbo = nouveau_bo(bo);
 
-       ttm_bo_kunmap(&nvbo->kmap);
-
        if (unlikely(nvbo->gem))
                DRM_ERROR("bo %p still attached to GEM object\n", bo);
 
@@ -164,8 +147,6 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
        nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
        align >>= PAGE_SHIFT;
 
-       nvbo->placement.fpfn = 0;
-       nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
        nouveau_bo_placement_set(nvbo, flags, 0);
 
        nvbo->channel = chan;
@@ -305,7 +286,8 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
 void
 nouveau_bo_unmap(struct nouveau_bo *nvbo)
 {
-       ttm_bo_kunmap(&nvbo->kmap);
+       if (nvbo)
+               ttm_bo_kunmap(&nvbo->kmap);
 }
 
 u16
@@ -399,14 +381,19 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
                man->default_caching = TTM_PL_FLAG_CACHED;
                break;
        case TTM_PL_VRAM:
+               man->func = &ttm_bo_manager_func;
                man->flags = TTM_MEMTYPE_FLAG_FIXED |
                             TTM_MEMTYPE_FLAG_MAPPABLE;
                man->available_caching = TTM_PL_FLAG_UNCACHED |
                                         TTM_PL_FLAG_WC;
                man->default_caching = TTM_PL_FLAG_WC;
-               man->gpu_offset = dev_priv->vm_vram_base;
+               if (dev_priv->card_type == NV_50)
+                       man->gpu_offset = 0x40000000;
+               else
+                       man->gpu_offset = 0;
                break;
        case TTM_PL_TT:
+               man->func = &ttm_bo_manager_func;
                switch (dev_priv->gart_info.type) {
                case NOUVEAU_GART_AGP:
                        man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -469,19 +456,26 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
        if (ret)
                return ret;
 
-       ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
-                                       evict || (nvbo->channel &&
-                                                 nvbo->channel != chan),
+       if (nvbo->channel) {
+               ret = nouveau_fence_sync(fence, nvbo->channel);
+               if (ret)
+                       goto out;
+       }
+
+       ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
                                        no_wait_reserve, no_wait_gpu, new_mem);
+out:
        nouveau_fence_unref((void *)&fence);
        return ret;
 }
 
 static inline uint32_t
-nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
-                     struct ttm_mem_reg *mem)
+nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
+                     struct nouveau_channel *chan, struct ttm_mem_reg *mem)
 {
-       if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) {
+       struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+       if (nvbo->no_vm) {
                if (mem->mem_type == TTM_PL_TT)
                        return NvDmaGART;
                return NvDmaVRAM;
@@ -493,86 +487,181 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
 }
 
 static int
-nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
-                    bool no_wait_reserve, bool no_wait_gpu,
-                    struct ttm_mem_reg *new_mem)
+nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+                 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
 {
-       struct nouveau_bo *nvbo = nouveau_bo(bo);
        struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
-       struct ttm_mem_reg *old_mem = &bo->mem;
-       struct nouveau_channel *chan;
-       uint64_t src_offset, dst_offset;
-       uint32_t page_count;
+       struct nouveau_bo *nvbo = nouveau_bo(bo);
+       u64 length = (new_mem->num_pages << PAGE_SHIFT);
+       u64 src_offset, dst_offset;
        int ret;
 
-       chan = nvbo->channel;
-       if (!chan || nvbo->tile_flags || nvbo->no_vm)
-               chan = dev_priv->channel;
-
-       src_offset = old_mem->mm_node->start << PAGE_SHIFT;
-       dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
-       if (chan != dev_priv->channel) {
-               if (old_mem->mem_type == TTM_PL_TT)
-                       src_offset += dev_priv->vm_gart_base;
-               else
+       src_offset = old_mem->start << PAGE_SHIFT;
+       dst_offset = new_mem->start << PAGE_SHIFT;
+       if (!nvbo->no_vm) {
+               if (old_mem->mem_type == TTM_PL_VRAM)
                        src_offset += dev_priv->vm_vram_base;
-
-               if (new_mem->mem_type == TTM_PL_TT)
-                       dst_offset += dev_priv->vm_gart_base;
                else
+                       src_offset += dev_priv->vm_gart_base;
+
+               if (new_mem->mem_type == TTM_PL_VRAM)
                        dst_offset += dev_priv->vm_vram_base;
+               else
+                       dst_offset += dev_priv->vm_gart_base;
        }
 
        ret = RING_SPACE(chan, 3);
        if (ret)
                return ret;
-       BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
-       OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
-       OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
 
-       if (dev_priv->card_type >= NV_50) {
-               ret = RING_SPACE(chan, 4);
+       BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
+       OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
+       OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
+
+       while (length) {
+               u32 amount, stride, height;
+
+               amount  = min(length, (u64)(4 * 1024 * 1024));
+               stride  = 16 * 4;
+               height  = amount / stride;
+
+               if (new_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) {
+                       ret = RING_SPACE(chan, 8);
+                       if (ret)
+                               return ret;
+
+                       BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
+                       OUT_RING  (chan, 0);
+                       OUT_RING  (chan, 0);
+                       OUT_RING  (chan, stride);
+                       OUT_RING  (chan, height);
+                       OUT_RING  (chan, 1);
+                       OUT_RING  (chan, 0);
+                       OUT_RING  (chan, 0);
+               } else {
+                       ret = RING_SPACE(chan, 2);
+                       if (ret)
+                               return ret;
+
+                       BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
+                       OUT_RING  (chan, 1);
+               }
+               if (old_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) {
+                       ret = RING_SPACE(chan, 8);
+                       if (ret)
+                               return ret;
+
+                       BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
+                       OUT_RING  (chan, 0);
+                       OUT_RING  (chan, 0);
+                       OUT_RING  (chan, stride);
+                       OUT_RING  (chan, height);
+                       OUT_RING  (chan, 1);
+                       OUT_RING  (chan, 0);
+                       OUT_RING  (chan, 0);
+               } else {
+                       ret = RING_SPACE(chan, 2);
+                       if (ret)
+                               return ret;
+
+                       BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
+                       OUT_RING  (chan, 1);
+               }
+
+               ret = RING_SPACE(chan, 14);
                if (ret)
                        return ret;
-               BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
-               OUT_RING(chan, 1);
-               BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
-               OUT_RING(chan, 1);
+
+               BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
+               OUT_RING  (chan, upper_32_bits(src_offset));
+               OUT_RING  (chan, upper_32_bits(dst_offset));
+               BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
+               OUT_RING  (chan, lower_32_bits(src_offset));
+               OUT_RING  (chan, lower_32_bits(dst_offset));
+               OUT_RING  (chan, stride);
+               OUT_RING  (chan, stride);
+               OUT_RING  (chan, stride);
+               OUT_RING  (chan, height);
+               OUT_RING  (chan, 0x00000101);
+               OUT_RING  (chan, 0x00000000);
+               BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
+               OUT_RING  (chan, 0);
+
+               length -= amount;
+               src_offset += amount;
+               dst_offset += amount;
        }
 
+       return 0;
+}
+
+static int
+nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+                 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+       u32 src_offset = old_mem->start << PAGE_SHIFT;
+       u32 dst_offset = new_mem->start << PAGE_SHIFT;
+       u32 page_count = new_mem->num_pages;
+       int ret;
+
+       ret = RING_SPACE(chan, 3);
+       if (ret)
+               return ret;
+
+       BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
+       OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
+       OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
+
        page_count = new_mem->num_pages;
        while (page_count) {
                int line_count = (page_count > 2047) ? 2047 : page_count;
 
-               if (dev_priv->card_type >= NV_50) {
-                       ret = RING_SPACE(chan, 3);
-                       if (ret)
-                               return ret;
-                       BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
-                       OUT_RING(chan, upper_32_bits(src_offset));
-                       OUT_RING(chan, upper_32_bits(dst_offset));
-               }
                ret = RING_SPACE(chan, 11);
                if (ret)
                        return ret;
+
                BEGIN_RING(chan, NvSubM2MF,
                                 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
-               OUT_RING(chan, lower_32_bits(src_offset));
-               OUT_RING(chan, lower_32_bits(dst_offset));
-               OUT_RING(chan, PAGE_SIZE); /* src_pitch */
-               OUT_RING(chan, PAGE_SIZE); /* dst_pitch */
-               OUT_RING(chan, PAGE_SIZE); /* line_length */
-               OUT_RING(chan, line_count);
-               OUT_RING(chan, (1<<8)|(1<<0));
-               OUT_RING(chan, 0);
+               OUT_RING  (chan, src_offset);
+               OUT_RING  (chan, dst_offset);
+               OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
+               OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
+               OUT_RING  (chan, PAGE_SIZE); /* line_length */
+               OUT_RING  (chan, line_count);
+               OUT_RING  (chan, 0x00000101);
+               OUT_RING  (chan, 0x00000000);
                BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
-               OUT_RING(chan, 0);
+               OUT_RING  (chan, 0);
 
                page_count -= line_count;
                src_offset += (PAGE_SIZE * line_count);
                dst_offset += (PAGE_SIZE * line_count);
        }
 
+       return 0;
+}
+
+static int
+nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
+                    bool no_wait_reserve, bool no_wait_gpu,
+                    struct ttm_mem_reg *new_mem)
+{
+       struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+       struct nouveau_bo *nvbo = nouveau_bo(bo);
+       struct nouveau_channel *chan;
+       int ret;
+
+       chan = nvbo->channel;
+       if (!chan || nvbo->no_vm)
+               chan = dev_priv->channel;
+
+       if (dev_priv->card_type < NV_50)
+               ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
+       else
+               ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
+       if (ret)
+               return ret;
+
        return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
 }
 
@@ -606,12 +695,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
 
        ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
 out:
-       if (tmp_mem.mm_node) {
-               spin_lock(&bo->bdev->glob->lru_lock);
-               drm_mm_put_block(tmp_mem.mm_node);
-               spin_unlock(&bo->bdev->glob->lru_lock);
-       }
-
+       ttm_bo_mem_put(bo, &tmp_mem);
        return ret;
 }
 
@@ -644,12 +728,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
                goto out;
 
 out:
-       if (tmp_mem.mm_node) {
-               spin_lock(&bo->bdev->glob->lru_lock);
-               drm_mm_put_block(tmp_mem.mm_node);
-               spin_unlock(&bo->bdev->glob->lru_lock);
-       }
-
+       ttm_bo_mem_put(bo, &tmp_mem);
        return ret;
 }
 
@@ -669,7 +748,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
                return 0;
        }
 
-       offset = new_mem->mm_node->start << PAGE_SHIFT;
+       offset = new_mem->start << PAGE_SHIFT;
 
        if (dev_priv->card_type == NV_50) {
                ret = nv50_mem_vm_bind_linear(dev,
@@ -719,12 +798,6 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
        if (ret)
                return ret;
 
-       /* Software copy if the card isn't up and running yet. */
-       if (!dev_priv->channel) {
-               ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
-               goto out;
-       }
-
        /* Fake bo copy. */
        if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
                BUG_ON(bo->mem.mm_node != NULL);
@@ -733,6 +806,12 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
                goto out;
        }
 
+       /* Software copy if the card isn't up and running yet. */
+       if (!dev_priv->channel) {
+               ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+               goto out;
+       }
+
        /* Hardware assisted copy. */
        if (new_mem->mem_type == TTM_PL_SYSTEM)
                ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
@@ -783,14 +862,14 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
        case TTM_PL_TT:
 #if __OS_HAS_AGP
                if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
-                       mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+                       mem->bus.offset = mem->start << PAGE_SHIFT;
                        mem->bus.base = dev_priv->gart_info.aper_base;
                        mem->bus.is_iomem = true;
                }
 #endif
                break;
        case TTM_PL_VRAM:
-               mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+               mem->bus.offset = mem->start << PAGE_SHIFT;
                mem->bus.base = pci_resource_start(dev->pdev, 1);
                mem->bus.is_iomem = true;
                break;
@@ -808,7 +887,26 @@ nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 static int
 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
-       return 0;
+       struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+       struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+       /* as long as the bo isn't in vram, and isn't tiled, we've got
+        * nothing to do here.
+        */
+       if (bo->mem.mem_type != TTM_PL_VRAM) {
+               if (dev_priv->card_type < NV_50 || !nvbo->tile_flags)
+                       return 0;
+       }
+
+       /* make sure bo is in mappable vram */
+       if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
+               return 0;
+
+
+       nvbo->placement.fpfn = 0;
+       nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
+       nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
+       return ttm_bo_validate(bo, &nvbo->placement, false, true, false);
 }
 
 struct ttm_bo_driver nouveau_bo_driver = {
index ca85da78484653180ef6c03b64655df3e8117202..dad96cce5e390a77ff3de98d63658632c9603cc0 100644 (file)
@@ -198,8 +198,8 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nv_fifo_info fifo_data;
        struct nv_sim_state sim_data;
-       int MClk = nouveau_hw_get_clock(dev, MPLL);
-       int NVClk = nouveau_hw_get_clock(dev, NVPLL);
+       int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
+       int NVClk = nouveau_hw_get_clock(dev, PLL_CORE);
        uint32_t cfg1 = nvReadFB(dev, NV04_PFB_CFG1);
 
        sim_data.pclk_khz = VClk;
@@ -234,7 +234,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
 }
 
 static void
-nv30_update_arb(int *burst, int *lwm)
+nv20_update_arb(int *burst, int *lwm)
 {
        unsigned int fifo_size, burst_size, graphics_lwm;
 
@@ -251,14 +251,14 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 
-       if (dev_priv->card_type < NV_30)
+       if (dev_priv->card_type < NV_20)
                nv04_update_arb(dev, vclk, bpp, burst, lwm);
        else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
                 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
                *burst = 128;
                *lwm = 0x0480;
        } else
-               nv30_update_arb(burst, lwm);
+               nv20_update_arb(burst, lwm);
 }
 
 static int
index 0480f064f2c14fd8c4bf41672fa039f9f2bc9029..373950e3481474e72cdc19c7fb858adf3cd8190c 100644 (file)
@@ -48,14 +48,14 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
                                                  dev_priv->gart_info.aper_size,
                                                  NV_DMA_ACCESS_RO, &pushbuf,
                                                  NULL);
-               chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
+               chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
        } else
        if (dev_priv->card_type != NV_04) {
                ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
                                             dev_priv->fb_available_size,
                                             NV_DMA_ACCESS_RO,
                                             NV_DMA_TARGET_VIDMEM, &pushbuf);
-               chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
+               chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
        } else {
                /* NV04 cmdbuf hack, from original ddx.. not sure of it's
                 * exact reason for existing :)  PCI access to cmdbuf in
@@ -67,17 +67,11 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
                                             dev_priv->fb_available_size,
                                             NV_DMA_ACCESS_RO,
                                             NV_DMA_TARGET_PCI, &pushbuf);
-               chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
-       }
-
-       ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
-       if (ret) {
-               NV_ERROR(dev, "Error referencing pushbuf ctxdma: %d\n", ret);
-               if (pushbuf != dev_priv->gart_info.sg_ctxdma)
-                       nouveau_gpuobj_del(dev, &pushbuf);
-               return ret;
+               chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
        }
 
+       nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
+       nouveau_gpuobj_ref(NULL, &pushbuf);
        return 0;
 }
 
@@ -229,7 +223,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
 
        ret = nouveau_dma_init(chan);
        if (!ret)
-               ret = nouveau_fence_init(chan);
+               ret = nouveau_fence_channel_init(chan);
        if (ret) {
                nouveau_channel_free(chan);
                return ret;
@@ -276,7 +270,7 @@ nouveau_channel_free(struct nouveau_channel *chan)
         * above attempts at idling were OK, but if we failed this'll tell TTM
         * we're done with the buffers.
         */
-       nouveau_fence_fini(chan);
+       nouveau_fence_channel_fini(chan);
 
        /* This will prevent pfifo from switching channels. */
        pfifo->reassign(dev, false);
@@ -308,8 +302,9 @@ nouveau_channel_free(struct nouveau_channel *chan)
        spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 
        /* Release the channel's resources */
-       nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
+       nouveau_gpuobj_ref(NULL, &chan->pushbuf);
        if (chan->pushbuf_bo) {
+               nouveau_bo_unmap(chan->pushbuf_bo);
                nouveau_bo_unpin(chan->pushbuf_bo);
                nouveau_bo_ref(NULL, &chan->pushbuf_bo);
        }
index fc737037f751c3690dfb09239e3439df1fa4191c..0871495096fa86f1278f825c8420f8e14f1e6b38 100644 (file)
@@ -76,6 +76,22 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
        return NULL;
 }
 
+/*TODO: This could use improvement, and learn to handle the fixed
+ *      BIOS tables etc.  It's fine currently, for its only user.
+ */
+int
+nouveau_connector_bpp(struct drm_connector *connector)
+{
+       struct nouveau_connector *nv_connector = nouveau_connector(connector);
+
+       if (nv_connector->edid && nv_connector->edid->revision >= 4) {
+               u8 bpc = ((nv_connector->edid->input & 0x70) >> 3) + 4;
+               if (bpc > 4)
+                       return bpc;
+       }
+
+       return 18;
+}
 
 static void
 nouveau_connector_destroy(struct drm_connector *drm_connector)
@@ -130,6 +146,36 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
        return NULL;
 }
 
+static struct nouveau_encoder *
+nouveau_connector_of_detect(struct drm_connector *connector)
+{
+#ifdef __powerpc__
+       struct drm_device *dev = connector->dev;
+       struct nouveau_connector *nv_connector = nouveau_connector(connector);
+       struct nouveau_encoder *nv_encoder;
+       struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev);
+
+       if (!dn ||
+           !((nv_encoder = find_encoder_by_type(connector, OUTPUT_TMDS)) ||
+             (nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG))))
+               return NULL;
+
+       for_each_child_of_node(dn, cn) {
+               const char *name = of_get_property(cn, "name", NULL);
+               const void *edid = of_get_property(cn, "EDID", NULL);
+               int idx = name ? name[strlen(name) - 1] - 'A' : 0;
+
+               if (nv_encoder->dcb->i2c_index == idx && edid) {
+                       nv_connector->edid =
+                               kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
+                       of_node_put(cn);
+                       return nv_encoder;
+               }
+       }
+#endif
+       return NULL;
+}
+
 static void
 nouveau_connector_set_encoder(struct drm_connector *connector,
                              struct nouveau_encoder *nv_encoder)
@@ -225,6 +271,12 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
                return connector_status_connected;
        }
 
+       nv_encoder = nouveau_connector_of_detect(connector);
+       if (nv_encoder) {
+               nouveau_connector_set_encoder(connector, nv_encoder);
+               return connector_status_connected;
+       }
+
 detect_analog:
        nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
        if (!nv_encoder && !nouveau_tv_disable)
@@ -630,7 +682,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
                else
                        max_clock = nv_encoder->dp.link_nr * 162000;
 
-               clock *= 3;
+               clock = clock * nouveau_connector_bpp(connector) / 8;
                break;
        default:
                BUG_ON(1);
index 0d2e668ccfe5c52525117ca83a9e3791e0fef936..c21ed6b16f88d41aebdee888997c12a7249fdd03 100644 (file)
@@ -55,4 +55,7 @@ nouveau_connector_create(struct drm_device *, int index);
 void
 nouveau_connector_set_polling(struct drm_connector *);
 
+int
+nouveau_connector_bpp(struct drm_connector *);
+
 #endif /* __NOUVEAU_CONNECTOR_H__ */
index 7933de4aff2ed23745332ca1aeaab7ece4a22fde..8e1592368cce1985d3270f934473a58fb73f6a33 100644 (file)
@@ -157,7 +157,23 @@ nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
        return 0;
 }
 
+static int
+nouveau_debugfs_evict_vram(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
+       int ret;
+
+       ret = ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
+       if (ret)
+               seq_printf(m, "failed: %d", ret);
+       else
+               seq_printf(m, "succeeded\n");
+       return 0;
+}
+
 static struct drm_info_list nouveau_debugfs_list[] = {
+       { "evict_vram", nouveau_debugfs_evict_vram, 0, NULL },
        { "chipset", nouveau_debugfs_chipset_info, 0, NULL },
        { "memory", nouveau_debugfs_memory_info, 0, NULL },
        { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
index 2e3c6caa97eeefb70eda2b06f633aae520c19ceb..82581e600dcdea09c9b4d7ccf7ee7c0273506cb7 100644 (file)
@@ -28,6 +28,7 @@
 #include "drm.h"
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
+#include "nouveau_ramht.h"
 
 void
 nouveau_dma_pre_init(struct nouveau_channel *chan)
@@ -58,26 +59,17 @@ nouveau_dma_init(struct nouveau_channel *chan)
 {
        struct drm_device *dev = chan->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpuobj *m2mf = NULL;
-       struct nouveau_gpuobj *nvsw = NULL;
+       struct nouveau_gpuobj *obj = NULL;
        int ret, i;
 
        /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
        ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
-                                   0x0039 : 0x5039, &m2mf);
+                                   0x0039 : 0x5039, &obj);
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_ref_add(dev, chan, NvM2MF, m2mf, NULL);
-       if (ret)
-               return ret;
-
-       /* Create an NV_SW object for various sync purposes */
-       ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw);
-       if (ret)
-               return ret;
-
-       ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL);
+       ret = nouveau_ramht_insert(chan, NvM2MF, obj);
+       nouveau_gpuobj_ref(NULL, &obj);
        if (ret)
                return ret;
 
@@ -91,11 +83,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
        if (ret)
                return ret;
 
-       /* Map M2MF notifier object - fbcon. */
-       ret = nouveau_bo_map(chan->notifier_bo);
-       if (ret)
-               return ret;
-
        /* Insert NOPS for NOUVEAU_DMA_SKIPS */
        ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
        if (ret)
@@ -113,13 +100,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
        BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
        OUT_RING(chan, NvNotify0);
 
-       /* Initialise NV_SW */
-       ret = RING_SPACE(chan, 2);
-       if (ret)
-               return ret;
-       BEGIN_RING(chan, NvSubSw, 0, 1);
-       OUT_RING(chan, NvSw);
-
        /* Sit back and pray the channel works.. */
        FIRE_RING(chan);
 
@@ -217,7 +197,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
 
                chan->dma.ib_free = get - chan->dma.ib_put;
                if (chan->dma.ib_free <= 0)
-                       chan->dma.ib_free += chan->dma.ib_max + 1;
+                       chan->dma.ib_free += chan->dma.ib_max;
        }
 
        return 0;
index 8b05c15866d5b9ad83bc52f78378944f90412fe9..d578c21d3c8d7f9e42d7dd079eba74b26e355cd1 100644 (file)
@@ -72,6 +72,7 @@ enum {
        NvGdiRect       = 0x8000000c,
        NvImageBlit     = 0x8000000d,
        NvSw            = 0x8000000e,
+       NvSema          = 0x8000000f,
 
        /* G80+ display objects */
        NvEvoVRAM       = 0x01000000,
index 8a1b188b4cd13bcdff20d41484cd120618216293..4562f309ae3db87852021061a583b27886592c52 100644 (file)
@@ -317,7 +317,8 @@ train:
                return false;
 
        config[0] = nv_encoder->dp.link_nr;
-       if (nv_encoder->dp.dpcd_version >= 0x11)
+       if (nv_encoder->dp.dpcd_version >= 0x11 &&
+           nv_encoder->dp.enhanced_frame)
                config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
 
        ret = nouveau_dp_lane_count_set(encoder, config[0]);
@@ -468,10 +469,12 @@ nouveau_dp_detect(struct drm_encoder *encoder)
            !nv_encoder->dcb->dpconf.link_bw)
                nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
 
-       nv_encoder->dp.link_nr = dpcd[2] & 0xf;
+       nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
        if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr)
                nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
 
+       nv_encoder->dp.enhanced_frame = (dpcd[2] & DP_ENHANCED_FRAME_CAP);
+
        return true;
 }
 
@@ -524,7 +527,8 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
                nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000);
                nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl);
                nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000);
-               if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) {
+               if (!nv_wait(dev, NV50_AUXCH_CTRL(index),
+                            0x00010000, 0x00000000)) {
                        NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
                                 nv_rd32(dev, NV50_AUXCH_CTRL(index)));
                        ret = -EBUSY;
index eb15345162a03af66a556ccd6f083907261abd90..90875494a65ab160933f4aebc8a4c00209792585 100644 (file)
 #include "nouveau_hw.h"
 #include "nouveau_fb.h"
 #include "nouveau_fbcon.h"
+#include "nouveau_pm.h"
 #include "nv50_display.h"
 
 #include "drm_pciids.h"
 
-MODULE_PARM_DESC(noagp, "Disable AGP");
-int nouveau_noagp;
-module_param_named(noagp, nouveau_noagp, int, 0400);
+MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
+int nouveau_agpmode = -1;
+module_param_named(agpmode, nouveau_agpmode, int, 0400);
 
 MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
 static int nouveau_modeset = -1; /* kms */
@@ -79,6 +80,10 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
 int nouveau_nofbaccel = 0;
 module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
 
+MODULE_PARM_DESC(force_post, "Force POST");
+int nouveau_force_post = 0;
+module_param_named(force_post, nouveau_force_post, int, 0400);
+
 MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
 int nouveau_override_conntype = 0;
 module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
@@ -102,6 +107,14 @@ MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n"
 int nouveau_reg_debug;
 module_param_named(reg_debug, nouveau_reg_debug, int, 0600);
 
+MODULE_PARM_DESC(perflvl, "Performance level (default: boot)\n");
+char *nouveau_perflvl;
+module_param_named(perflvl, nouveau_perflvl, charp, 0400);
+
+MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n");
+int nouveau_perflvl_wr;
+module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
+
 int nouveau_fbpercrtc;
 #if 0
 module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -271,6 +284,8 @@ nouveau_pci_resume(struct pci_dev *pdev)
        if (ret)
                return ret;
 
+       nouveau_pm_resume(dev);
+
        if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
                ret = nouveau_mem_init_agp(dev);
                if (ret) {
@@ -379,8 +394,6 @@ static struct drm_driver driver = {
        .irq_uninstall = nouveau_irq_uninstall,
        .irq_handler = nouveau_irq_handler,
        .reclaim_buffers = drm_core_reclaim_buffers,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = nouveau_ioctls,
        .fops = {
                .owner = THIS_MODULE,
index b1be617373b63891dc05b09f2dc75807cd8b8472..3a07e580d27af548fe2fd247b4e7df4c67994956 100644 (file)
@@ -133,22 +133,24 @@ enum nouveau_flags {
 #define NVOBJ_ENGINE_DISPLAY   2
 #define NVOBJ_ENGINE_INT       0xdeadbeef
 
-#define NVOBJ_FLAG_ALLOW_NO_REFS       (1 << 0)
 #define NVOBJ_FLAG_ZERO_ALLOC          (1 << 1)
 #define NVOBJ_FLAG_ZERO_FREE           (1 << 2)
-#define NVOBJ_FLAG_FAKE                        (1 << 3)
 struct nouveau_gpuobj {
+       struct drm_device *dev;
+       struct kref refcount;
        struct list_head list;
 
-       struct nouveau_channel *im_channel;
        struct drm_mm_node *im_pramin;
        struct nouveau_bo *im_backing;
-       uint32_t im_backing_start;
        uint32_t *im_backing_suspend;
        int im_bound;
 
        uint32_t flags;
-       int refcount;
+
+       u32 size;
+       u32 pinst;
+       u32 cinst;
+       u64 vinst;
 
        uint32_t engine;
        uint32_t class;
@@ -157,16 +159,6 @@ struct nouveau_gpuobj {
        void *priv;
 };
 
-struct nouveau_gpuobj_ref {
-       struct list_head list;
-
-       struct nouveau_gpuobj *gpuobj;
-       uint32_t instance;
-
-       struct nouveau_channel *channel;
-       int handle;
-};
-
 struct nouveau_channel {
        struct drm_device *dev;
        int id;
@@ -192,33 +184,32 @@ struct nouveau_channel {
        } fence;
 
        /* DMA push buffer */
-       struct nouveau_gpuobj_ref *pushbuf;
-       struct nouveau_bo         *pushbuf_bo;
-       uint32_t                   pushbuf_base;
+       struct nouveau_gpuobj *pushbuf;
+       struct nouveau_bo     *pushbuf_bo;
+       uint32_t               pushbuf_base;
 
        /* Notifier memory */
        struct nouveau_bo *notifier_bo;
        struct drm_mm notifier_heap;
 
        /* PFIFO context */
-       struct nouveau_gpuobj_ref *ramfc;
-       struct nouveau_gpuobj_ref *cache;
+       struct nouveau_gpuobj *ramfc;
+       struct nouveau_gpuobj *cache;
 
        /* PGRAPH context */
        /* XXX may be merge 2 pointers as private data ??? */
-       struct nouveau_gpuobj_ref *ramin_grctx;
+       struct nouveau_gpuobj *ramin_grctx;
        void *pgraph_ctx;
 
        /* NV50 VM */
-       struct nouveau_gpuobj     *vm_pd;
-       struct nouveau_gpuobj_ref *vm_gart_pt;
-       struct nouveau_gpuobj_ref *vm_vram_pt[NV50_VM_VRAM_NR];
+       struct nouveau_gpuobj *vm_pd;
+       struct nouveau_gpuobj *vm_gart_pt;
+       struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
 
        /* Objects */
-       struct nouveau_gpuobj_ref *ramin; /* Private instmem */
-       struct drm_mm              ramin_heap; /* Private PRAMIN heap */
-       struct nouveau_gpuobj_ref *ramht; /* Hash table */
-       struct list_head           ramht_refs; /* Objects referenced by RAMHT */
+       struct nouveau_gpuobj *ramin; /* Private instmem */
+       struct drm_mm          ramin_heap; /* Private PRAMIN heap */
+       struct nouveau_ramht  *ramht; /* Hash table */
 
        /* GPU object info for stuff used in-kernel (mm_enabled) */
        uint32_t m2mf_ntfy;
@@ -296,7 +287,7 @@ struct nouveau_fb_engine {
 struct nouveau_fifo_engine {
        int  channels;
 
-       struct nouveau_gpuobj_ref *playlist[2];
+       struct nouveau_gpuobj *playlist[2];
        int cur_playlist;
 
        int  (*init)(struct drm_device *);
@@ -305,7 +296,6 @@ struct nouveau_fifo_engine {
        void (*disable)(struct drm_device *);
        void (*enable)(struct drm_device *);
        bool (*reassign)(struct drm_device *, bool enable);
-       bool (*cache_flush)(struct drm_device *dev);
        bool (*cache_pull)(struct drm_device *dev, bool enable);
 
        int  (*channel_id)(struct drm_device *);
@@ -334,7 +324,7 @@ struct nouveau_pgraph_engine {
        int grctx_size;
 
        /* NV2x/NV3x context table (0x400780) */
-       struct nouveau_gpuobj_ref *ctx_table;
+       struct nouveau_gpuobj *ctx_table;
 
        int  (*init)(struct drm_device *);
        void (*takedown)(struct drm_device *);
@@ -369,6 +359,91 @@ struct nouveau_gpio_engine {
        void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
 };
 
+struct nouveau_pm_voltage_level {
+       u8 voltage;
+       u8 vid;
+};
+
+struct nouveau_pm_voltage {
+       bool supported;
+       u8 vid_mask;
+
+       struct nouveau_pm_voltage_level *level;
+       int nr_level;
+};
+
+#define NOUVEAU_PM_MAX_LEVEL 8
+struct nouveau_pm_level {
+       struct device_attribute dev_attr;
+       char name[32];
+       int id;
+
+       u32 core;
+       u32 memory;
+       u32 shader;
+       u32 unk05;
+
+       u8 voltage;
+       u8 fanspeed;
+
+       u16 memscript;
+};
+
+struct nouveau_pm_temp_sensor_constants {
+       u16 offset_constant;
+       s16 offset_mult;
+       u16 offset_div;
+       u16 slope_mult;
+       u16 slope_div;
+};
+
+struct nouveau_pm_threshold_temp {
+       s16 critical;
+       s16 down_clock;
+       s16 fan_boost;
+};
+
+struct nouveau_pm_memtiming {
+       u32 reg_100220;
+       u32 reg_100224;
+       u32 reg_100228;
+       u32 reg_10022c;
+       u32 reg_100230;
+       u32 reg_100234;
+       u32 reg_100238;
+       u32 reg_10023c;
+};
+
+struct nouveau_pm_memtimings {
+       bool supported;
+       struct nouveau_pm_memtiming *timing;
+       int nr_timing;
+};
+
+struct nouveau_pm_engine {
+       struct nouveau_pm_voltage voltage;
+       struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
+       int nr_perflvl;
+       struct nouveau_pm_memtimings memtimings;
+       struct nouveau_pm_temp_sensor_constants sensor_constants;
+       struct nouveau_pm_threshold_temp threshold_temp;
+
+       struct nouveau_pm_level boot;
+       struct nouveau_pm_level *cur;
+
+       struct device *hwmon;
+
+       int (*clock_get)(struct drm_device *, u32 id);
+       void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
+                          u32 id, int khz);
+       void (*clock_set)(struct drm_device *, void *);
+       int (*voltage_get)(struct drm_device *);
+       int (*voltage_set)(struct drm_device *, int voltage);
+       int (*fanspeed_get)(struct drm_device *);
+       int (*fanspeed_set)(struct drm_device *, int fanspeed);
+       int (*temp_get)(struct drm_device *);
+};
+
 struct nouveau_engine {
        struct nouveau_instmem_engine instmem;
        struct nouveau_mc_engine      mc;
@@ -378,6 +453,7 @@ struct nouveau_engine {
        struct nouveau_fifo_engine    fifo;
        struct nouveau_display_engine display;
        struct nouveau_gpio_engine    gpio;
+       struct nouveau_pm_engine      pm;
 };
 
 struct nouveau_pll_vals {
@@ -522,8 +598,14 @@ struct drm_nouveau_private {
        int flags;
 
        void __iomem *mmio;
+
+       spinlock_t ramin_lock;
        void __iomem *ramin;
-       uint32_t ramin_size;
+       u32 ramin_size;
+       u32 ramin_base;
+       bool ramin_available;
+       struct drm_mm ramin_heap;
+       struct list_head gpuobj_list;
 
        struct nouveau_bo *vga_ram;
 
@@ -540,6 +622,12 @@ struct drm_nouveau_private {
                atomic_t validate_sequence;
        } ttm;
 
+       struct {
+               spinlock_t lock;
+               struct drm_mm heap;
+               struct nouveau_bo *bo;
+       } fence;
+
        int fifo_alloc_count;
        struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
 
@@ -550,15 +638,11 @@ struct drm_nouveau_private {
        spinlock_t context_switch_lock;
 
        /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
-       struct nouveau_gpuobj *ramht;
+       struct nouveau_ramht  *ramht;
+       struct nouveau_gpuobj *ramfc;
+       struct nouveau_gpuobj *ramro;
+
        uint32_t ramin_rsvd_vram;
-       uint32_t ramht_offset;
-       uint32_t ramht_size;
-       uint32_t ramht_bits;
-       uint32_t ramfc_offset;
-       uint32_t ramfc_size;
-       uint32_t ramro_offset;
-       uint32_t ramro_size;
 
        struct {
                enum {
@@ -576,14 +660,12 @@ struct drm_nouveau_private {
        } gart_info;
 
        /* nv10-nv40 tiling regions */
-       struct {
-               struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
-               spinlock_t lock;
-       } tile;
+       struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR];
 
        /* VRAM/fb configuration */
        uint64_t vram_size;
        uint64_t vram_sys_base;
+       u32 vram_rblock_size;
 
        uint64_t fb_phys;
        uint64_t fb_available_size;
@@ -600,10 +682,6 @@ struct drm_nouveau_private {
        struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
        int vm_vram_pt_nr;
 
-       struct drm_mm ramin_heap;
-
-       struct list_head gpuobj_list;
-
        struct nvbios vbios;
 
        struct nv04_mode_state mode_reg;
@@ -633,6 +711,12 @@ struct drm_nouveau_private {
        struct apertures_struct *apertures;
 };
 
+static inline struct drm_nouveau_private *
+nouveau_private(struct drm_device *dev)
+{
+       return dev->dev_private;
+}
+
 static inline struct drm_nouveau_private *
 nouveau_bdev(struct ttm_bo_device *bd)
 {
@@ -669,7 +753,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
 } while (0)
 
 /* nouveau_drv.c */
-extern int nouveau_noagp;
+extern int nouveau_agpmode;
 extern int nouveau_duallink;
 extern int nouveau_uscript_lvds;
 extern int nouveau_uscript_tmds;
@@ -683,7 +767,10 @@ extern char *nouveau_vbios;
 extern int nouveau_ignorelid;
 extern int nouveau_nofbaccel;
 extern int nouveau_noaccel;
+extern int nouveau_force_post;
 extern int nouveau_override_conntype;
+extern char *nouveau_perflvl;
+extern int nouveau_perflvl_wr;
 
 extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
 extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -704,8 +791,10 @@ extern bool nouveau_wait_for_idle(struct drm_device *);
 extern int  nouveau_card_init(struct drm_device *);
 
 /* nouveau_mem.c */
-extern int  nouveau_mem_detect(struct drm_device *dev);
-extern int  nouveau_mem_init(struct drm_device *);
+extern int  nouveau_mem_vram_init(struct drm_device *);
+extern void nouveau_mem_vram_fini(struct drm_device *);
+extern int  nouveau_mem_gart_init(struct drm_device *);
+extern void nouveau_mem_gart_fini(struct drm_device *);
 extern int  nouveau_mem_init_agp(struct drm_device *);
 extern int  nouveau_mem_reset_agp(struct drm_device *);
 extern void nouveau_mem_close(struct drm_device *);
@@ -749,7 +838,6 @@ extern void nouveau_channel_free(struct nouveau_channel *);
 extern int  nouveau_gpuobj_early_init(struct drm_device *);
 extern int  nouveau_gpuobj_init(struct drm_device *);
 extern void nouveau_gpuobj_takedown(struct drm_device *);
-extern void nouveau_gpuobj_late_takedown(struct drm_device *);
 extern int  nouveau_gpuobj_suspend(struct drm_device *dev);
 extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
 extern void nouveau_gpuobj_resume(struct drm_device *dev);
@@ -759,24 +847,11 @@ extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
 extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
                              uint32_t size, int align, uint32_t flags,
                              struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *,
-                                 uint32_t handle, struct nouveau_gpuobj *,
-                                 struct nouveau_gpuobj_ref **);
-extern int nouveau_gpuobj_ref_del(struct drm_device *,
-                                 struct nouveau_gpuobj_ref **);
-extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle,
-                                  struct nouveau_gpuobj_ref **ref_ret);
-extern int nouveau_gpuobj_new_ref(struct drm_device *,
-                                 struct nouveau_channel *alloc_chan,
-                                 struct nouveau_channel *ref_chan,
-                                 uint32_t handle, uint32_t size, int align,
-                                 uint32_t flags, struct nouveau_gpuobj_ref **);
-extern int nouveau_gpuobj_new_fake(struct drm_device *,
-                                  uint32_t p_offset, uint32_t b_offset,
-                                  uint32_t size, uint32_t flags,
-                                  struct nouveau_gpuobj **,
-                                  struct nouveau_gpuobj_ref**);
+extern void nouveau_gpuobj_ref(struct nouveau_gpuobj *,
+                              struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst,
+                                  u32 size, u32 flags,
+                                  struct nouveau_gpuobj **);
 extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
                                  uint64_t offset, uint64_t size, int access,
                                  int target, struct nouveau_gpuobj **);
@@ -879,6 +954,7 @@ extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
                                                      enum dcb_gpio_tag);
 extern struct dcb_connector_table_entry *
 nouveau_bios_connector_entry(struct drm_device *, int index);
+extern u32 get_pll_register(struct drm_device *, enum pll_types);
 extern int get_pll_limits(struct drm_device *, uint32_t limit_match,
                          struct pll_lims *);
 extern int nouveau_bios_run_display_table(struct drm_device *,
@@ -925,10 +1001,10 @@ extern int  nv40_fb_init(struct drm_device *);
 extern void nv40_fb_takedown(struct drm_device *);
 extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
                                      uint32_t, uint32_t);
-
 /* nv50_fb.c */
 extern int  nv50_fb_init(struct drm_device *);
 extern void nv50_fb_takedown(struct drm_device *);
+extern void nv50_fb_vm_trap(struct drm_device *, int display, const char *);
 
 /* nvc0_fb.c */
 extern int  nvc0_fb_init(struct drm_device *);
@@ -939,7 +1015,6 @@ extern int  nv04_fifo_init(struct drm_device *);
 extern void nv04_fifo_disable(struct drm_device *);
 extern void nv04_fifo_enable(struct drm_device *);
 extern bool nv04_fifo_reassign(struct drm_device *, bool);
-extern bool nv04_fifo_cache_flush(struct drm_device *);
 extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
 extern int  nv04_fifo_channel_id(struct drm_device *);
 extern int  nv04_fifo_create_context(struct nouveau_channel *);
@@ -977,7 +1052,6 @@ extern void nvc0_fifo_takedown(struct drm_device *);
 extern void nvc0_fifo_disable(struct drm_device *);
 extern void nvc0_fifo_enable(struct drm_device *);
 extern bool nvc0_fifo_reassign(struct drm_device *, bool);
-extern bool nvc0_fifo_cache_flush(struct drm_device *);
 extern bool nvc0_fifo_cache_pull(struct drm_device *, bool);
 extern int  nvc0_fifo_channel_id(struct drm_device *);
 extern int  nvc0_fifo_create_context(struct nouveau_channel *);
@@ -1169,15 +1243,21 @@ extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *);
 
 /* nouveau_fence.c */
 struct nouveau_fence;
-extern int nouveau_fence_init(struct nouveau_channel *);
-extern void nouveau_fence_fini(struct nouveau_channel *);
+extern int nouveau_fence_init(struct drm_device *);
+extern void nouveau_fence_fini(struct drm_device *);
+extern int nouveau_fence_channel_init(struct nouveau_channel *);
+extern void nouveau_fence_channel_fini(struct nouveau_channel *);
 extern void nouveau_fence_update(struct nouveau_channel *);
 extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **,
                             bool emit);
 extern int nouveau_fence_emit(struct nouveau_fence *);
+extern void nouveau_fence_work(struct nouveau_fence *fence,
+                              void (*work)(void *priv, bool signalled),
+                              void *priv);
 struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
 extern bool nouveau_fence_signalled(void *obj, void *arg);
 extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
+extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
 extern int nouveau_fence_flush(void *obj, void *arg);
 extern void nouveau_fence_unref(void **obj);
 extern void *nouveau_fence_ref(void *obj);
@@ -1255,12 +1335,11 @@ static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val)
        iowrite32_native(val, dev_priv->mmio + reg);
 }
 
-static inline void nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val)
+static inline u32 nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val)
 {
        u32 tmp = nv_rd32(dev, reg);
-       tmp &= ~mask;
-       tmp |= val;
-       nv_wr32(dev, reg, tmp);
+       nv_wr32(dev, reg, (tmp & ~mask) | val);
+       return tmp;
 }
 
 static inline u8 nv_rd08(struct drm_device *dev, unsigned reg)
@@ -1275,7 +1354,7 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
        iowrite8(val, dev_priv->mmio + reg);
 }
 
-#define nv_wait(reg, mask, val) \
+#define nv_wait(dev, reg, mask, val) \
        nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val))
 
 /* PRAMIN access */
@@ -1292,17 +1371,8 @@ static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val)
 }
 
 /* object access */
-static inline u32 nv_ro32(struct drm_device *dev, struct nouveau_gpuobj *obj,
-                               unsigned index)
-{
-       return nv_ri32(dev, obj->im_pramin->start + index * 4);
-}
-
-static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj,
-                               unsigned index, u32 val)
-{
-       nv_wi32(dev, obj->im_pramin->start + index * 4, val);
-}
+extern u32 nv_ro32(struct nouveau_gpuobj *, u32 offset);
+extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val);
 
 /*
  * Logging
@@ -1403,6 +1473,7 @@ nv_match_device(struct drm_device *dev, unsigned device,
 #define NV_SW_SEMAPHORE_OFFSET                                       0x00000064
 #define NV_SW_SEMAPHORE_ACQUIRE                                      0x00000068
 #define NV_SW_SEMAPHORE_RELEASE                                      0x0000006c
+#define NV_SW_YIELD                                                  0x00000080
 #define NV_SW_DMA_VBLSEM                                             0x0000018c
 #define NV_SW_VBLSEM_OFFSET                                          0x00000400
 #define NV_SW_VBLSEM_RELEASE_VALUE                                   0x00000404
index 7c82d68bc155d7d8a738d206a1c795b79849cc05..ae69b61d93dbbd2b3e0caf052b4faae4070c5505 100644 (file)
@@ -55,6 +55,7 @@ struct nouveau_encoder {
                        int dpcd_version;
                        int link_nr;
                        int link_bw;
+                       bool enhanced_frame;
                } dp;
        };
 };
index dbd30b2e43fd13b39aaa59eb0abc4fb0332bd1ee..02a4d1fd484560efe66f6262f140b3983f3e6241 100644 (file)
@@ -104,6 +104,8 @@ static struct fb_ops nouveau_fbcon_ops = {
        .fb_pan_display = drm_fb_helper_pan_display,
        .fb_blank = drm_fb_helper_blank,
        .fb_setcmap = drm_fb_helper_setcmap,
+       .fb_debug_enter = drm_fb_helper_debug_enter,
+       .fb_debug_leave = drm_fb_helper_debug_leave,
 };
 
 static struct fb_ops nv04_fbcon_ops = {
@@ -117,6 +119,8 @@ static struct fb_ops nv04_fbcon_ops = {
        .fb_pan_display = drm_fb_helper_pan_display,
        .fb_blank = drm_fb_helper_blank,
        .fb_setcmap = drm_fb_helper_setcmap,
+       .fb_debug_enter = drm_fb_helper_debug_enter,
+       .fb_debug_leave = drm_fb_helper_debug_leave,
 };
 
 static struct fb_ops nv50_fbcon_ops = {
@@ -130,6 +134,8 @@ static struct fb_ops nv50_fbcon_ops = {
        .fb_pan_display = drm_fb_helper_pan_display,
        .fb_blank = drm_fb_helper_blank,
        .fb_setcmap = drm_fb_helper_setcmap,
+       .fb_debug_enter = drm_fb_helper_debug_enter,
+       .fb_debug_leave = drm_fb_helper_debug_leave,
 };
 
 static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
index 87ac21ec23d290db82e90c6fe6257e4cffdb5955..441b12420bb1b713fb56b98edad389d31fe367ea 100644 (file)
 #include "drm.h"
 
 #include "nouveau_drv.h"
+#include "nouveau_ramht.h"
 #include "nouveau_dma.h"
 
-#define USE_REFCNT (dev_priv->card_type >= NV_10)
+#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
+#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
 
 struct nouveau_fence {
        struct nouveau_channel *channel;
@@ -39,6 +41,15 @@ struct nouveau_fence {
 
        uint32_t sequence;
        bool signalled;
+
+       void (*work)(void *priv, bool signalled);
+       void *priv;
+};
+
+struct nouveau_semaphore {
+       struct kref ref;
+       struct drm_device *dev;
+       struct drm_mm_node *mem;
 };
 
 static inline struct nouveau_fence *
@@ -59,14 +70,13 @@ nouveau_fence_del(struct kref *ref)
 void
 nouveau_fence_update(struct nouveau_channel *chan)
 {
-       struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
-       struct list_head *entry, *tmp;
-       struct nouveau_fence *fence;
+       struct drm_device *dev = chan->dev;
+       struct nouveau_fence *tmp, *fence;
        uint32_t sequence;
 
        spin_lock(&chan->fence.lock);
 
-       if (USE_REFCNT)
+       if (USE_REFCNT(dev))
                sequence = nvchan_rd32(chan, 0x48);
        else
                sequence = atomic_read(&chan->fence.last_sequence_irq);
@@ -75,12 +85,14 @@ nouveau_fence_update(struct nouveau_channel *chan)
                goto out;
        chan->fence.sequence_ack = sequence;
 
-       list_for_each_safe(entry, tmp, &chan->fence.pending) {
-               fence = list_entry(entry, struct nouveau_fence, entry);
-
+       list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
                sequence = fence->sequence;
                fence->signalled = true;
                list_del(&fence->entry);
+
+               if (unlikely(fence->work))
+                       fence->work(fence->priv, true);
+
                kref_put(&fence->refcount, nouveau_fence_del);
 
                if (sequence == chan->fence.sequence_ack)
@@ -121,8 +133,8 @@ nouveau_fence_channel(struct nouveau_fence *fence)
 int
 nouveau_fence_emit(struct nouveau_fence *fence)
 {
-       struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
        struct nouveau_channel *chan = fence->channel;
+       struct drm_device *dev = chan->dev;
        int ret;
 
        ret = RING_SPACE(chan, 2);
@@ -143,13 +155,32 @@ nouveau_fence_emit(struct nouveau_fence *fence)
        list_add_tail(&fence->entry, &chan->fence.pending);
        spin_unlock(&chan->fence.lock);
 
-       BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
+       BEGIN_RING(chan, NvSubSw, USE_REFCNT(dev) ? 0x0050 : 0x0150, 1);
        OUT_RING(chan, fence->sequence);
        FIRE_RING(chan);
 
        return 0;
 }
 
+void
+nouveau_fence_work(struct nouveau_fence *fence,
+                  void (*work)(void *priv, bool signalled),
+                  void *priv)
+{
+       BUG_ON(fence->work);
+
+       spin_lock(&fence->channel->fence.lock);
+
+       if (fence->signalled) {
+               work(priv, true);
+       } else {
+               fence->work = work;
+               fence->priv = priv;
+       }
+
+       spin_unlock(&fence->channel->fence.lock);
+}
+
 void
 nouveau_fence_unref(void **sync_obj)
 {
@@ -213,6 +244,162 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
        return ret;
 }
 
+static struct nouveau_semaphore *
+alloc_semaphore(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_semaphore *sema;
+
+       if (!USE_SEMA(dev))
+               return NULL;
+
+       sema = kmalloc(sizeof(*sema), GFP_KERNEL);
+       if (!sema)
+               goto fail;
+
+       spin_lock(&dev_priv->fence.lock);
+       sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0);
+       if (sema->mem)
+               sema->mem = drm_mm_get_block(sema->mem, 4, 0);
+       spin_unlock(&dev_priv->fence.lock);
+
+       if (!sema->mem)
+               goto fail;
+
+       kref_init(&sema->ref);
+       sema->dev = dev;
+       nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 0);
+
+       return sema;
+fail:
+       kfree(sema);
+       return NULL;
+}
+
+static void
+free_semaphore(struct kref *ref)
+{
+       struct nouveau_semaphore *sema =
+               container_of(ref, struct nouveau_semaphore, ref);
+       struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
+
+       spin_lock(&dev_priv->fence.lock);
+       drm_mm_put_block(sema->mem);
+       spin_unlock(&dev_priv->fence.lock);
+
+       kfree(sema);
+}
+
+static void
+semaphore_work(void *priv, bool signalled)
+{
+       struct nouveau_semaphore *sema = priv;
+       struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
+
+       if (unlikely(!signalled))
+               nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
+
+       kref_put(&sema->ref, free_semaphore);
+}
+
+static int
+emit_semaphore(struct nouveau_channel *chan, int method,
+              struct nouveau_semaphore *sema)
+{
+       struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
+       struct nouveau_fence *fence;
+       bool smart = (dev_priv->card_type >= NV_50);
+       int ret;
+
+       ret = RING_SPACE(chan, smart ? 8 : 4);
+       if (ret)
+               return ret;
+
+       if (smart) {
+               BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
+               OUT_RING(chan, NvSema);
+       }
+       BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
+       OUT_RING(chan, sema->mem->start);
+
+       if (smart && method == NV_SW_SEMAPHORE_ACQUIRE) {
+               /*
+                * NV50 tries to be too smart and context-switch
+                * between semaphores instead of doing a "first come,
+                * first served" strategy like previous cards
+                * do.
+                *
+                * That's bad because the ACQUIRE latency can get as
+                * large as the PFIFO context time slice in the
+                * typical DRI2 case where you have several
+                * outstanding semaphores at the same moment.
+                *
+                * If we're going to ACQUIRE, force the card to
+                * context switch before, just in case the matching
+                * RELEASE is already scheduled to be executed in
+                * another channel.
+                */
+               BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
+               OUT_RING(chan, 0);
+       }
+
+       BEGIN_RING(chan, NvSubSw, method, 1);
+       OUT_RING(chan, 1);
+
+       if (smart && method == NV_SW_SEMAPHORE_RELEASE) {
+               /*
+                * Force the card to context switch, there may be
+                * another channel waiting for the semaphore we just
+                * released.
+                */
+               BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
+               OUT_RING(chan, 0);
+       }
+
+       /* Delay semaphore destruction until its work is done */
+       ret = nouveau_fence_new(chan, &fence, true);
+       if (ret)
+               return ret;
+
+       kref_get(&sema->ref);
+       nouveau_fence_work(fence, semaphore_work, sema);
+       nouveau_fence_unref((void *)&fence);
+
+       return 0;
+}
+
+int
+nouveau_fence_sync(struct nouveau_fence *fence,
+                  struct nouveau_channel *wchan)
+{
+       struct nouveau_channel *chan = nouveau_fence_channel(fence);
+       struct drm_device *dev = wchan->dev;
+       struct nouveau_semaphore *sema;
+       int ret;
+
+       if (likely(!fence || chan == wchan ||
+                  nouveau_fence_signalled(fence, NULL)))
+               return 0;
+
+       sema = alloc_semaphore(dev);
+       if (!sema) {
+               /* Early card or broken userspace, fall back to
+                * software sync. */
+               return nouveau_fence_wait(fence, NULL, false, false);
+       }
+
+       /* Make wchan wait until it gets signalled */
+       ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
+       if (ret)
+               goto out;
+
+       /* Signal the semaphore from chan */
+       ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
+out:
+       kref_put(&sema->ref, free_semaphore);
+       return ret;
+}
+
 int
 nouveau_fence_flush(void *sync_obj, void *sync_arg)
 {
@@ -220,26 +407,123 @@ nouveau_fence_flush(void *sync_obj, void *sync_arg)
 }
 
 int
-nouveau_fence_init(struct nouveau_channel *chan)
+nouveau_fence_channel_init(struct nouveau_channel *chan)
 {
+       struct drm_device *dev = chan->dev;
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpuobj *obj = NULL;
+       int ret;
+
+       /* Create an NV_SW object for various sync purposes */
+       ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj);
+       if (ret)
+               return ret;
+
+       ret = nouveau_ramht_insert(chan, NvSw, obj);
+       nouveau_gpuobj_ref(NULL, &obj);
+       if (ret)
+               return ret;
+
+       ret = RING_SPACE(chan, 2);
+       if (ret)
+               return ret;
+       BEGIN_RING(chan, NvSubSw, 0, 1);
+       OUT_RING(chan, NvSw);
+
+       /* Create a DMA object for the shared cross-channel sync area. */
+       if (USE_SEMA(dev)) {
+               struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node;
+
+               ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+                                            mem->start << PAGE_SHIFT,
+                                            mem->size << PAGE_SHIFT,
+                                            NV_DMA_ACCESS_RW,
+                                            NV_DMA_TARGET_VIDMEM, &obj);
+               if (ret)
+                       return ret;
+
+               ret = nouveau_ramht_insert(chan, NvSema, obj);
+               nouveau_gpuobj_ref(NULL, &obj);
+               if (ret)
+                       return ret;
+
+               ret = RING_SPACE(chan, 2);
+               if (ret)
+                       return ret;
+               BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
+               OUT_RING(chan, NvSema);
+       }
+
+       FIRE_RING(chan);
+
        INIT_LIST_HEAD(&chan->fence.pending);
        spin_lock_init(&chan->fence.lock);
        atomic_set(&chan->fence.last_sequence_irq, 0);
+
        return 0;
 }
 
 void
-nouveau_fence_fini(struct nouveau_channel *chan)
+nouveau_fence_channel_fini(struct nouveau_channel *chan)
 {
-       struct list_head *entry, *tmp;
-       struct nouveau_fence *fence;
-
-       list_for_each_safe(entry, tmp, &chan->fence.pending) {
-               fence = list_entry(entry, struct nouveau_fence, entry);
+       struct nouveau_fence *tmp, *fence;
 
+       list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
                fence->signalled = true;
                list_del(&fence->entry);
+
+               if (unlikely(fence->work))
+                       fence->work(fence->priv, false);
+
                kref_put(&fence->refcount, nouveau_fence_del);
        }
 }
 
+int
+nouveau_fence_init(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       int ret;
+
+       /* Create a shared VRAM heap for cross-channel sync. */
+       if (USE_SEMA(dev)) {
+               ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM,
+                                    0, 0, false, true, &dev_priv->fence.bo);
+               if (ret)
+                       return ret;
+
+               ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
+               if (ret)
+                       goto fail;
+
+               ret = nouveau_bo_map(dev_priv->fence.bo);
+               if (ret)
+                       goto fail;
+
+               ret = drm_mm_init(&dev_priv->fence.heap, 0,
+                                 dev_priv->fence.bo->bo.mem.size);
+               if (ret)
+                       goto fail;
+
+               spin_lock_init(&dev_priv->fence.lock);
+       }
+
+       return 0;
+fail:
+       nouveau_bo_unmap(dev_priv->fence.bo);
+       nouveau_bo_ref(NULL, &dev_priv->fence.bo);
+       return ret;
+}
+
+void
+nouveau_fence_fini(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+       if (USE_SEMA(dev)) {
+               drm_mm_takedown(&dev_priv->fence.heap);
+               nouveau_bo_unmap(dev_priv->fence.bo);
+               nouveau_bo_unpin(dev_priv->fence.bo);
+               nouveau_bo_ref(NULL, &dev_priv->fence.bo);
+       }
+}
index 19620a6709f55c00e97efd5d2f816705788420f8..5c4c929d7f744d6911c1a16b67924ab770b6de5c 100644 (file)
@@ -362,7 +362,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
        list_for_each_entry(nvbo, list, entry) {
                struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
 
-               ret = nouveau_bo_sync_gpu(nvbo, chan);
+               ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
                if (unlikely(ret)) {
                        NV_ERROR(dev, "fail pre-validate sync\n");
                        return ret;
@@ -385,7 +385,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
                        return ret;
                }
 
-               ret = nouveau_bo_sync_gpu(nvbo, chan);
+               ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
                if (unlikely(ret)) {
                        NV_ERROR(dev, "fail post-validate sync\n");
                        return ret;
index 5d39c4ce8006a03817c017b0a02a30c9f4dfe879..4a8ad1307fa49403f53e5768488afd7e4b49f8c9 100644 (file)
@@ -126,7 +126,7 @@ gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
        reg = (reg - 0x00400000) / 4;
        reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
 
-       nv_wo32(ctx->dev, ctx->data, reg, val);
+       nv_wo32(ctx->data, reg * 4, val);
 }
 #endif
 
index 7b613682e400b88cc387c83261987b6259026b98..bed669a54a2da0692af9a9a1cd4a89064f4d0fd0 100644 (file)
@@ -305,7 +305,7 @@ setPLL_double_lowregs(struct drm_device *dev, uint32_t NMNMreg,
        bool mpll = Preg == 0x4020;
        uint32_t oldPval = nvReadMC(dev, Preg);
        uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
-       uint32_t Pval = (oldPval & (mpll ? ~(0x11 << 16) : ~(1 << 16))) |
+       uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) |
                        0xc << 28 | pv->log2P << 16;
        uint32_t saved4600 = 0;
        /* some cards have different maskc040s */
@@ -427,22 +427,12 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype,
                       struct nouveau_pll_vals *pllvals)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       const uint32_t nv04_regs[MAX_PLL_TYPES] = { NV_PRAMDAC_NVPLL_COEFF,
-                                                   NV_PRAMDAC_MPLL_COEFF,
-                                                   NV_PRAMDAC_VPLL_COEFF,
-                                                   NV_RAMDAC_VPLL2 };
-       const uint32_t nv40_regs[MAX_PLL_TYPES] = { 0x4000,
-                                                   0x4020,
-                                                   NV_PRAMDAC_VPLL_COEFF,
-                                                   NV_RAMDAC_VPLL2 };
-       uint32_t reg1, pll1, pll2 = 0;
+       uint32_t reg1 = get_pll_register(dev, plltype), pll1, pll2 = 0;
        struct pll_lims pll_lim;
        int ret;
 
-       if (dev_priv->card_type < NV_40)
-               reg1 = nv04_regs[plltype];
-       else
-               reg1 = nv40_regs[plltype];
+       if (reg1 == 0)
+               return -ENOENT;
 
        pll1 = nvReadMC(dev, reg1);
 
@@ -491,8 +481,10 @@ int
 nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
 {
        struct nouveau_pll_vals pllvals;
+       int ret;
 
-       if (plltype == MPLL && (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
+       if (plltype == PLL_MEMORY &&
+           (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
                uint32_t mpllP;
 
                pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
@@ -501,14 +493,17 @@ nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
 
                return 400000 / mpllP;
        } else
-       if (plltype == MPLL && (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
+       if (plltype == PLL_MEMORY &&
+           (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
                uint32_t clock;
 
                pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
                return clock;
        }
 
-       nouveau_hw_get_pllvals(dev, plltype, &pllvals);
+       ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
+       if (ret)
+               return ret;
 
        return nouveau_hw_pllvals_to_clk(&pllvals);
 }
@@ -526,9 +521,9 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
        struct nouveau_pll_vals pv;
        uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
 
-       if (get_pll_limits(dev, head ? VPLL2 : VPLL1, &pll_lim))
+       if (get_pll_limits(dev, pllreg, &pll_lim))
                return;
-       nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &pv);
+       nouveau_hw_get_pllvals(dev, pllreg, &pv);
 
        if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
            pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
@@ -661,7 +656,7 @@ nv_save_state_ramdac(struct drm_device *dev, int head,
        if (dev_priv->card_type >= NV_10)
                regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
 
-       nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &regp->pllvals);
+       nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals);
        state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
        if (nv_two_heads(dev))
                state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
@@ -866,10 +861,11 @@ nv_save_state_ext(struct drm_device *dev, int head,
        rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
        rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
 
-       if (dev_priv->card_type >= NV_30) {
+       if (dev_priv->card_type >= NV_20)
                rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
+
+       if (dev_priv->card_type >= NV_30)
                rd_cio_state(dev, head, regp, 0x9f);
-       }
 
        rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
        rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
@@ -976,10 +972,11 @@ nv_load_state_ext(struct drm_device *dev, int head,
        wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
        wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
 
-       if (dev_priv->card_type >= NV_30) {
+       if (dev_priv->card_type >= NV_20)
                wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
+
+       if (dev_priv->card_type >= NV_30)
                wr_cio_state(dev, head, regp, 0x9f);
-       }
 
        wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
        wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
index 84614858728ba0f7ab959b54989cf9a512d89d6a..fdd7e3de79c895138c93706bc023048772f93026 100644 (file)
@@ -299,7 +299,10 @@ nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr)
 
 int
 nouveau_i2c_identify(struct drm_device *dev, const char *what,
-                    struct i2c_board_info *info, int index)
+                    struct i2c_board_info *info,
+                    bool (*match)(struct nouveau_i2c_chan *,
+                                  struct i2c_board_info *),
+                    int index)
 {
        struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
        int i;
@@ -307,7 +310,8 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
        NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
 
        for (i = 0; info[i].addr; i++) {
-               if (nouveau_probe_i2c_addr(i2c, info[i].addr)) {
+               if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
+                   (!match || match(i2c, &info[i]))) {
                        NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
                        return i;
                }
index cfe7c8426d1d028dc7f3fc6b229463f41c08f306..422b62fd8272487dd78bb3d276dbfe70dd8e80fe 100644 (file)
@@ -43,7 +43,10 @@ void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
 struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
 bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr);
 int nouveau_i2c_identify(struct drm_device *dev, const char *what,
-                        struct i2c_board_info *info, int index);
+                        struct i2c_board_info *info,
+                        bool (*match)(struct nouveau_i2c_chan *,
+                                      struct i2c_board_info *),
+                        int index);
 
 extern const struct i2c_algorithm nouveau_dp_i2c_algo;
 
index 794b0ee30cf608cd7087a2f4e4055b8702e75e2d..6fd51a51c60861e697f43755593f4ad2a1830c9c 100644 (file)
@@ -35,6 +35,7 @@
 #include "nouveau_drm.h"
 #include "nouveau_drv.h"
 #include "nouveau_reg.h"
+#include "nouveau_ramht.h"
 #include <linux/ratelimit.h>
 
 /* needed for hotplug irq */
@@ -106,15 +107,16 @@ nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
        const int mthd = addr & 0x1ffc;
 
        if (mthd == 0x0000) {
-               struct nouveau_gpuobj_ref *ref = NULL;
+               struct nouveau_gpuobj *gpuobj;
 
-               if (nouveau_gpuobj_ref_find(chan, data, &ref))
+               gpuobj = nouveau_ramht_find(chan, data);
+               if (!gpuobj)
                        return false;
 
-               if (ref->gpuobj->engine != NVOBJ_ENGINE_SW)
+               if (gpuobj->engine != NVOBJ_ENGINE_SW)
                        return false;
 
-               chan->sw_subchannel[subc] = ref->gpuobj->class;
+               chan->sw_subchannel[subc] = gpuobj->class;
                nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
                        NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
                return true;
@@ -200,16 +202,45 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
                }
 
                if (status & NV_PFIFO_INTR_DMA_PUSHER) {
-                       NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d\n", chid);
+                       u32 get = nv_rd32(dev, 0x003244);
+                       u32 put = nv_rd32(dev, 0x003240);
+                       u32 push = nv_rd32(dev, 0x003220);
+                       u32 state = nv_rd32(dev, 0x003228);
+
+                       if (dev_priv->card_type == NV_50) {
+                               u32 ho_get = nv_rd32(dev, 0x003328);
+                               u32 ho_put = nv_rd32(dev, 0x003320);
+                               u32 ib_get = nv_rd32(dev, 0x003334);
+                               u32 ib_put = nv_rd32(dev, 0x003330);
+
+                               NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
+                                            "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
+                                            "State 0x%08x Push 0x%08x\n",
+                                       chid, ho_get, get, ho_put, put, ib_get, ib_put,
+                                       state, push);
+
+                               /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
+                               nv_wr32(dev, 0x003364, 0x00000000);
+                               if (get != put || ho_get != ho_put) {
+                                       nv_wr32(dev, 0x003244, put);
+                                       nv_wr32(dev, 0x003328, ho_put);
+                               } else
+                               if (ib_get != ib_put) {
+                                       nv_wr32(dev, 0x003334, ib_put);
+                               }
+                       } else {
+                               NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
+                                            "Put 0x%08x State 0x%08x Push 0x%08x\n",
+                                       chid, get, put, state, push);
 
-                       status &= ~NV_PFIFO_INTR_DMA_PUSHER;
-                       nv_wr32(dev, NV03_PFIFO_INTR_0,
-                                               NV_PFIFO_INTR_DMA_PUSHER);
+                               if (get != put)
+                                       nv_wr32(dev, 0x003244, put);
+                       }
 
-                       nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
-                       if (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT) != get)
-                               nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET,
-                                                               get + 4);
+                       nv_wr32(dev, 0x003228, 0x00000000);
+                       nv_wr32(dev, 0x003220, 0x00000001);
+                       nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+                       status &= ~NV_PFIFO_INTR_DMA_PUSHER;
                }
 
                if (status & NV_PFIFO_INTR_SEMAPHORE) {
@@ -226,6 +257,14 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
                        nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
                }
 
+               if (dev_priv->card_type == NV_50) {
+                       if (status & 0x00000010) {
+                               nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
+                               status &= ~0x00000010;
+                               nv_wr32(dev, 0x002100, 0x00000010);
+                       }
+               }
+
                if (status) {
                        NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
                                status, chid);
@@ -357,7 +396,7 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev)
                        if (!chan || !chan->ramin_grctx)
                                continue;
 
-                       if (inst == chan->ramin_grctx->instance)
+                       if (inst == chan->ramin_grctx->pinst)
                                break;
                }
        } else {
@@ -369,7 +408,7 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev)
                        if (!chan || !chan->ramin)
                                continue;
 
-                       if (inst == chan->ramin->instance)
+                       if (inst == chan->ramin->vinst)
                                break;
                }
        }
@@ -605,40 +644,6 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
        nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
 }
 
-static void
-nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       uint32_t trap[6];
-       int i, ch;
-       uint32_t idx = nv_rd32(dev, 0x100c90);
-       if (idx & 0x80000000) {
-               idx &= 0xffffff;
-               if (display) {
-                       for (i = 0; i < 6; i++) {
-                               nv_wr32(dev, 0x100c90, idx | i << 24);
-                               trap[i] = nv_rd32(dev, 0x100c94);
-                       }
-                       for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
-                               struct nouveau_channel *chan = dev_priv->fifos[ch];
-
-                               if (!chan || !chan->ramin)
-                                       continue;
-
-                               if (trap[1] == chan->ramin->instance >> 12)
-                                       break;
-                       }
-                       NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n",
-                                       name, (trap[5]&0x100?"read":"write"),
-                                       trap[5]&0xff, trap[4]&0xffff,
-                                       trap[3]&0xffff, trap[0], trap[2], ch);
-               }
-               nv_wr32(dev, 0x100c90, idx | 0x80000000);
-       } else if (display) {
-               NV_INFO(dev, "%s - no VM fault?\n", name);
-       }
-}
-
 static struct nouveau_enum_names nv50_mp_exec_error_names[] =
 {
        { 3, "STACK_UNDERFLOW" },
@@ -711,7 +716,7 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
                tps++;
                switch (type) {
                case 6: /* texture error... unknown for now */
-                       nv50_pfb_vm_trap(dev, display, name);
+                       nv50_fb_vm_trap(dev, display, name);
                        if (display) {
                                NV_ERROR(dev, "magic set %d:\n", i);
                                for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
@@ -734,7 +739,7 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
                        uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
                        uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
                        uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
-                       nv50_pfb_vm_trap(dev, display, name);
+                       nv50_fb_vm_trap(dev, display, name);
                        /* 2d engine destination */
                        if (ustatus & 0x00000010) {
                                if (display) {
@@ -817,7 +822,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
 
                /* Known to be triggered by screwed up NOTIFY and COND... */
                if (ustatus & 0x00000001) {
-                       nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
+                       nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
                        nv_wr32(dev, 0x400500, 0);
                        if (nv_rd32(dev, 0x400808) & 0x80000000) {
                                if (display) {
@@ -842,7 +847,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
                        ustatus &= ~0x00000001;
                }
                if (ustatus & 0x00000002) {
-                       nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
+                       nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
                        nv_wr32(dev, 0x400500, 0);
                        if (nv_rd32(dev, 0x40084c) & 0x80000000) {
                                if (display) {
@@ -884,15 +889,15 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
                        NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
                }
                if (ustatus & 0x00000001) {
-                       nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
+                       nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
                        ustatus &= ~0x00000001;
                }
                if (ustatus & 0x00000002) {
-                       nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
+                       nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
                        ustatus &= ~0x00000002;
                }
                if (ustatus & 0x00000004) {
-                       nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
+                       nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
                        ustatus &= ~0x00000004;
                }
                NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
@@ -917,7 +922,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
                        NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
                }
                if (ustatus & 0x00000001) {
-                       nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
+                       nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
                        NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
                                        nv_rd32(dev, 0x400c00),
                                        nv_rd32(dev, 0x400c08),
@@ -939,7 +944,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
                        NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
                }
                if (ustatus & 0x00000001) {
-                       nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
+                       nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
                        NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
                                        nv_rd32(dev, 0x401804),
                                        nv_rd32(dev, 0x401808),
@@ -964,7 +969,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
                        NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
                }
                if (ustatus & 0x00000001) {
-                       nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
+                       nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
                        NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
                                        nv_rd32(dev, 0x405800),
                                        nv_rd32(dev, 0x405804),
@@ -986,7 +991,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
         * remaining, so try to handle it anyway. Perhaps related to that
         * unknown DMA slot on tesla? */
        if (status & 0x20) {
-               nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
+               nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
                ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
                if (display)
                        NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
index 9689d41476867c82a7be56d92d4a0cfe2aa6e822..a163c7c612e78eb6b6718620ee75773cd49a26f7 100644 (file)
@@ -35,6 +35,8 @@
 #include "drm_sarea.h"
 #include "nouveau_drv.h"
 
+#define MIN(a,b) a < b ? a : b
+
 /*
  * NV10-NV40 tiling helpers
  */
@@ -47,18 +49,14 @@ nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
        struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
        struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
        struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
-       struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+       struct nouveau_tile_reg *tile = &dev_priv->tile[i];
 
        tile->addr = addr;
        tile->size = size;
        tile->used = !!pitch;
        nouveau_fence_unref((void **)&tile->fence);
 
-       if (!pfifo->cache_flush(dev))
-               return;
-
        pfifo->reassign(dev, false);
-       pfifo->cache_flush(dev);
        pfifo->cache_pull(dev, false);
 
        nouveau_wait_for_idle(dev);
@@ -76,34 +74,36 @@ nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
-       struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL;
-       int i;
+       struct nouveau_tile_reg *found = NULL;
+       unsigned long i, flags;
 
-       spin_lock(&dev_priv->tile.lock);
+       spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 
        for (i = 0; i < pfb->num_tiles; i++) {
-               if (tile[i].used)
+               struct nouveau_tile_reg *tile = &dev_priv->tile[i];
+
+               if (tile->used)
                        /* Tile region in use. */
                        continue;
 
-               if (tile[i].fence &&
-                   !nouveau_fence_signalled(tile[i].fence, NULL))
+               if (tile->fence &&
+                   !nouveau_fence_signalled(tile->fence, NULL))
                        /* Pending tile region. */
                        continue;
 
-               if (max(tile[i].addr, addr) <
-                   min(tile[i].addr + tile[i].size, addr + size))
+               if (max(tile->addr, addr) <
+                   min(tile->addr + tile->size, addr + size))
                        /* Kill an intersecting tile region. */
                        nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
 
                if (pitch && !found) {
                        /* Free tile region. */
                        nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
-                       found = &tile[i];
+                       found = tile;
                }
        }
 
-       spin_unlock(&dev_priv->tile.lock);
+       spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 
        return found;
 }
@@ -169,8 +169,9 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
                        virt  += (end - pte);
 
                        while (pte < end) {
-                               nv_wo32(dev, pgt, pte++, offset_l);
-                               nv_wo32(dev, pgt, pte++, offset_h);
+                               nv_wo32(pgt, (pte * 4) + 0, offset_l);
+                               nv_wo32(pgt, (pte * 4) + 4, offset_h);
+                               pte += 2;
                        }
                }
        }
@@ -203,8 +204,10 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
                pages -= (end - pte);
                virt  += (end - pte) << 15;
 
-               while (pte < end)
-                       nv_wo32(dev, pgt, pte++, 0);
+               while (pte < end) {
+                       nv_wo32(pgt, (pte * 4), 0);
+                       pte++;
+               }
        }
        dev_priv->engine.instmem.flush(dev);
 
@@ -218,7 +221,7 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
  * Cleanup everything
  */
 void
-nouveau_mem_close(struct drm_device *dev)
+nouveau_mem_vram_fini(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 
@@ -229,6 +232,19 @@ nouveau_mem_close(struct drm_device *dev)
 
        nouveau_ttm_global_release(dev_priv);
 
+       if (dev_priv->fb_mtrr >= 0) {
+               drm_mtrr_del(dev_priv->fb_mtrr,
+                            pci_resource_start(dev->pdev, 1),
+                            pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
+               dev_priv->fb_mtrr = -1;
+       }
+}
+
+void
+nouveau_mem_gart_fini(struct drm_device *dev)
+{
+       nouveau_sgdma_takedown(dev);
+
        if (drm_core_has_AGP(dev) && dev->agp) {
                struct drm_agp_mem *entry, *tempe;
 
@@ -248,13 +264,6 @@ nouveau_mem_close(struct drm_device *dev)
                dev->agp->acquired = 0;
                dev->agp->enabled = 0;
        }
-
-       if (dev_priv->fb_mtrr) {
-               drm_mtrr_del(dev_priv->fb_mtrr,
-                            pci_resource_start(dev->pdev, 1),
-                            pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
-               dev_priv->fb_mtrr = -1;
-       }
 }
 
 static uint32_t
@@ -305,8 +314,62 @@ nouveau_mem_detect_nforce(struct drm_device *dev)
        return 0;
 }
 
-/* returns the amount of FB ram in bytes */
-int
+static void
+nv50_vram_preinit(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       int i, parts, colbits, rowbitsa, rowbitsb, banks;
+       u64 rowsize, predicted;
+       u32 r0, r4, rt, ru;
+
+       r0 = nv_rd32(dev, 0x100200);
+       r4 = nv_rd32(dev, 0x100204);
+       rt = nv_rd32(dev, 0x100250);
+       ru = nv_rd32(dev, 0x001540);
+       NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+       for (i = 0, parts = 0; i < 8; i++) {
+               if (ru & (0x00010000 << i))
+                       parts++;
+       }
+
+       colbits  =  (r4 & 0x0000f000) >> 12;
+       rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+       rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+       banks    = ((r4 & 0x01000000) ? 8 : 4);
+
+       rowsize = parts * banks * (1 << colbits) * 8;
+       predicted = rowsize << rowbitsa;
+       if (r0 & 0x00000004)
+               predicted += rowsize << rowbitsb;
+
+       if (predicted != dev_priv->vram_size) {
+               NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
+                       (u32)(dev_priv->vram_size >> 20));
+               NV_WARN(dev, "we calculated %dMiB VRAM\n",
+                       (u32)(predicted >> 20));
+       }
+
+       dev_priv->vram_rblock_size = rowsize >> 12;
+       if (rt & 1)
+               dev_priv->vram_rblock_size *= 3;
+
+       NV_DEBUG(dev, "rblock %lld bytes\n",
+                (u64)dev_priv->vram_rblock_size << 12);
+}
+
+static void
+nvaa_vram_preinit(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+       /* To our knowledge, there's no large scale reordering of pages
+        * that occurs on IGP chipsets.
+        */
+       dev_priv->vram_rblock_size = 1;
+}
+
+static int
 nouveau_mem_detect(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -325,9 +388,18 @@ nouveau_mem_detect(struct drm_device *dev)
                dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
                dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
                dev_priv->vram_size &= 0xffffffff00ll;
-               if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) {
+
+               switch (dev_priv->chipset) {
+               case 0xaa:
+               case 0xac:
+               case 0xaf:
                        dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
                        dev_priv->vram_sys_base <<= 12;
+                       nvaa_vram_preinit(dev);
+                       break;
+               default:
+                       nv50_vram_preinit(dev);
+                       break;
                }
        } else {
                dev_priv->vram_size  = nv_rd32(dev, 0x10f20c) << 20;
@@ -345,6 +417,33 @@ nouveau_mem_detect(struct drm_device *dev)
        return -ENOMEM;
 }
 
+#if __OS_HAS_AGP
+static unsigned long
+get_agp_mode(struct drm_device *dev, unsigned long mode)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+       /*
+        * FW seems to be broken on nv18, it makes the card lock up
+        * randomly.
+        */
+       if (dev_priv->chipset == 0x18)
+               mode &= ~PCI_AGP_COMMAND_FW;
+
+       /*
+        * AGP mode set in the command line.
+        */
+       if (nouveau_agpmode > 0) {
+               bool agpv3 = mode & 0x8;
+               int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
+
+               mode = (mode & ~0x7) | (rate & 0x7);
+       }
+
+       return mode;
+}
+#endif
+
 int
 nouveau_mem_reset_agp(struct drm_device *dev)
 {
@@ -355,7 +454,8 @@ nouveau_mem_reset_agp(struct drm_device *dev)
        /* First of all, disable fast writes, otherwise if it's
         * already enabled in the AGP bridge and we disable the card's
         * AGP controller we might be locking ourselves out of it. */
-       if (nv_rd32(dev, NV04_PBUS_PCI_NV_19) & PCI_AGP_COMMAND_FW) {
+       if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) |
+            dev->agp->mode) & PCI_AGP_COMMAND_FW) {
                struct drm_agp_info info;
                struct drm_agp_mode mode;
 
@@ -363,7 +463,7 @@ nouveau_mem_reset_agp(struct drm_device *dev)
                if (ret)
                        return ret;
 
-               mode.mode = info.mode & ~PCI_AGP_COMMAND_FW;
+               mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW;
                ret = drm_agp_enable(dev, mode);
                if (ret)
                        return ret;
@@ -418,7 +518,7 @@ nouveau_mem_init_agp(struct drm_device *dev)
        }
 
        /* see agp.h for the AGPSTAT_* modes available */
-       mode.mode = info.mode;
+       mode.mode = get_agp_mode(dev, info.mode);
        ret = drm_agp_enable(dev, mode);
        if (ret) {
                NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
@@ -433,24 +533,27 @@ nouveau_mem_init_agp(struct drm_device *dev)
 }
 
 int
-nouveau_mem_init(struct drm_device *dev)
+nouveau_mem_vram_init(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
-       int ret, dma_bits = 32;
-
-       dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
-       dev_priv->gart_info.type = NOUVEAU_GART_NONE;
+       int ret, dma_bits;
 
        if (dev_priv->card_type >= NV_50 &&
            pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
                dma_bits = 40;
+       else
+               dma_bits = 32;
 
        ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
-       if (ret) {
-               NV_ERROR(dev, "Error setting DMA mask: %d\n", ret);
+       if (ret)
                return ret;
-       }
+
+       ret = nouveau_mem_detect(dev);
+       if (ret)
+               return ret;
+
+       dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
 
        ret = nouveau_ttm_global_init(dev_priv);
        if (ret)
@@ -465,8 +568,6 @@ nouveau_mem_init(struct drm_device *dev)
                return ret;
        }
 
-       spin_lock_init(&dev_priv->tile.lock);
-
        dev_priv->fb_available_size = dev_priv->vram_size;
        dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
        if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
@@ -474,7 +575,16 @@ nouveau_mem_init(struct drm_device *dev)
                        pci_resource_len(dev->pdev, 1);
        dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
 
-       /* remove reserved space at end of vram from available amount */
+       /* reserve space at end of VRAM for PRAMIN */
+       if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
+           dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
+               dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
+       else
+       if (dev_priv->card_type >= NV_40)
+               dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
+       else
+               dev_priv->ramin_rsvd_vram = (512 * 1024);
+
        dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
        dev_priv->fb_aper_free = dev_priv->fb_available_size;
 
@@ -495,9 +605,23 @@ nouveau_mem_init(struct drm_device *dev)
                nouveau_bo_ref(NULL, &dev_priv->vga_ram);
        }
 
-       /* GART */
+       dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
+                                        pci_resource_len(dev->pdev, 1),
+                                        DRM_MTRR_WC);
+       return 0;
+}
+
+int
+nouveau_mem_gart_init(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+       int ret;
+
+       dev_priv->gart_info.type = NOUVEAU_GART_NONE;
+
 #if !defined(__powerpc__) && !defined(__ia64__)
-       if (drm_device_is_agp(dev) && dev->agp && !nouveau_noagp) {
+       if (drm_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
                ret = nouveau_mem_init_agp(dev);
                if (ret)
                        NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
@@ -523,11 +647,150 @@ nouveau_mem_init(struct drm_device *dev)
                return ret;
        }
 
-       dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
-                                        pci_resource_len(dev->pdev, 1),
-                                        DRM_MTRR_WC);
-
        return 0;
 }
 
+void
+nouveau_mem_timing_init(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
+       struct nvbios *bios = &dev_priv->vbios;
+       struct bit_entry P;
+       u8 tUNK_0, tUNK_1, tUNK_2;
+       u8 tRP;         /* Byte 3 */
+       u8 tRAS;        /* Byte 5 */
+       u8 tRFC;        /* Byte 7 */
+       u8 tRC;         /* Byte 9 */
+       u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
+       u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
+       u8 *mem = NULL, *entry;
+       int i, recordlen, entries;
+
+       if (bios->type == NVBIOS_BIT) {
+               if (bit_table(dev, 'P', &P))
+                       return;
+
+               if (P.version == 1)
+                       mem = ROMPTR(bios, P.data[4]);
+               else
+               if (P.version == 2)
+                       mem = ROMPTR(bios, P.data[8]);
+               else {
+                       NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
+               }
+       } else {
+               NV_DEBUG(dev, "BMP version too old for memory\n");
+               return;
+       }
+
+       if (!mem) {
+               NV_DEBUG(dev, "memory timing table pointer invalid\n");
+               return;
+       }
 
+       if (mem[0] != 0x10) {
+               NV_WARN(dev, "memory timing table 0x%02x unknown\n", mem[0]);
+               return;
+       }
+
+       /* validate record length */
+       entries   = mem[2];
+       recordlen = mem[3];
+       if (recordlen < 15) {
+               NV_ERROR(dev, "mem timing table length unknown: %d\n", mem[3]);
+               return;
+       }
+
+       /* parse vbios entries into common format */
+       memtimings->timing =
+               kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
+       if (!memtimings->timing)
+               return;
+
+       entry = mem + mem[1];
+       for (i = 0; i < entries; i++, entry += recordlen) {
+               struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
+               if (entry[0] == 0)
+                       continue;
+
+               tUNK_18 = 1;
+               tUNK_19 = 1;
+               tUNK_20 = 0;
+               tUNK_21 = 0;
+               switch (MIN(recordlen,21)) {
+               case 21:
+                       tUNK_21 = entry[21];
+               case 20:
+                       tUNK_20 = entry[20];
+               case 19:
+                       tUNK_19 = entry[19];
+               case 18:
+                       tUNK_18 = entry[18];
+               default:
+                       tUNK_0  = entry[0];
+                       tUNK_1  = entry[1];
+                       tUNK_2  = entry[2];
+                       tRP     = entry[3];
+                       tRAS    = entry[5];
+                       tRFC    = entry[7];
+                       tRC     = entry[9];
+                       tUNK_10 = entry[10];
+                       tUNK_11 = entry[11];
+                       tUNK_12 = entry[12];
+                       tUNK_13 = entry[13];
+                       tUNK_14 = entry[14];
+                       break;
+               }
+
+               timing->reg_100220 = (tRC << 24 | tRFC << 16 | tRAS << 8 | tRP);
+
+               /* XXX: I don't trust the -1's and +1's... they must come
+                *      from somewhere! */
+               timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 |
+                                     tUNK_18 << 16 |
+                                     (tUNK_1 + tUNK_19 + 1) << 8 |
+                                     (tUNK_2 - 1));
+
+               timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
+               if(recordlen > 19) {
+                       timing->reg_100228 += (tUNK_19 - 1) << 24;
+               } else {
+                       timing->reg_100228 += tUNK_12 << 24;
+               }
+
+               /* XXX: reg_10022c */
+
+               timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
+                                     tUNK_13 << 8  | tUNK_13);
+
+               /* XXX: +6? */
+               timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC);
+               if(tUNK_10 > tUNK_11) {
+                       timing->reg_100234 += tUNK_10 << 16;
+               } else {
+                       timing->reg_100234 += tUNK_11 << 16;
+               }
+
+               /* XXX; reg_100238, reg_10023c */
+               NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
+                        timing->reg_100220, timing->reg_100224,
+                        timing->reg_100228, timing->reg_10022c);
+               NV_DEBUG(dev, "         230: %08x %08x %08x %08x\n",
+                        timing->reg_100230, timing->reg_100234,
+                        timing->reg_100238, timing->reg_10023c);
+       }
+
+       memtimings->nr_timing  = entries;
+       memtimings->supported = true;
+}
+
+void
+nouveau_mem_timing_fini(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings;
+
+       kfree(mem->timing);
+}
index 3ec181ff50cea7a45b22a28a6ed43857b2a841cb..2cc59f8c658bd63b840fbf35c7871d090a587d95 100644 (file)
@@ -28,6 +28,7 @@
 #include "drmP.h"
 #include "drm.h"
 #include "nouveau_drv.h"
+#include "nouveau_ramht.h"
 
 int
 nouveau_notifier_init_channel(struct nouveau_channel *chan)
@@ -112,7 +113,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
                return -ENOMEM;
        }
 
-       offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT;
+       offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
        if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
                target = NV_DMA_TARGET_VIDMEM;
        } else
@@ -146,11 +147,11 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
        nobj->dtor = nouveau_notifier_gpuobj_dtor;
        nobj->priv = mem;
 
-       ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL);
+       ret = nouveau_ramht_insert(chan, handle, nobj);
+       nouveau_gpuobj_ref(NULL, &nobj);
        if (ret) {
-               nouveau_gpuobj_del(dev, &nobj);
                drm_mm_put_block(mem);
-               NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret);
+               NV_ERROR(dev, "Error adding notifier to ramht: %d\n", ret);
                return ret;
        }
 
index b6bcb254f4ab49ab123834d78f4e62e58ac0398a..896cf8634144a2b5cf7f69fdd006c848b4b2eb79 100644 (file)
@@ -34,6 +34,7 @@
 #include "drm.h"
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
+#include "nouveau_ramht.h"
 
 /* NVidia uses context objects to drive drawing operations.
 
    The key into the hash table depends on the object handle and channel id and
    is given as:
 */
-static uint32_t
-nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       uint32_t hash = 0;
-       int i;
-
-       NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle);
-
-       for (i = 32; i > 0; i -= dev_priv->ramht_bits) {
-               hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
-               handle >>= dev_priv->ramht_bits;
-       }
-
-       if (dev_priv->card_type < NV_50)
-               hash ^= channel << (dev_priv->ramht_bits - 4);
-       hash <<= 3;
-
-       NV_DEBUG(dev, "hash=0x%08x\n", hash);
-       return hash;
-}
-
-static int
-nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
-                         uint32_t offset)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4);
-
-       if (dev_priv->card_type < NV_40)
-               return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
-       return (ctx != 0);
-}
-
-static int
-nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
-       struct nouveau_channel *chan = ref->channel;
-       struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
-       uint32_t ctx, co, ho;
-
-       if (!ramht) {
-               NV_ERROR(dev, "No hash table!\n");
-               return -EINVAL;
-       }
-
-       if (dev_priv->card_type < NV_40) {
-               ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) |
-                     (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
-                     (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
-       } else
-       if (dev_priv->card_type < NV_50) {
-               ctx = (ref->instance >> 4) |
-                     (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
-                     (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
-       } else {
-               if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
-                       ctx = (ref->instance << 10) | 2;
-               } else {
-                       ctx = (ref->instance >> 4) |
-                             ((ref->gpuobj->engine <<
-                               NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
-               }
-       }
-
-       co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
-       do {
-               if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
-                       NV_DEBUG(dev,
-                                "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
-                                chan->id, co, ref->handle, ctx);
-                       nv_wo32(dev, ramht, (co + 0)/4, ref->handle);
-                       nv_wo32(dev, ramht, (co + 4)/4, ctx);
-
-                       list_add_tail(&ref->list, &chan->ramht_refs);
-                       instmem->flush(dev);
-                       return 0;
-               }
-               NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
-                        chan->id, co, nv_ro32(dev, ramht, co/4));
-
-               co += 8;
-               if (co >= dev_priv->ramht_size)
-                       co = 0;
-       } while (co != ho);
-
-       NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
-       return -ENOMEM;
-}
-
-static void
-nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
-       struct nouveau_channel *chan = ref->channel;
-       struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
-       uint32_t co, ho;
-
-       if (!ramht) {
-               NV_ERROR(dev, "No hash table!\n");
-               return;
-       }
-
-       co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
-       do {
-               if (nouveau_ramht_entry_valid(dev, ramht, co) &&
-                   (ref->handle == nv_ro32(dev, ramht, (co/4)))) {
-                       NV_DEBUG(dev,
-                                "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
-                                chan->id, co, ref->handle,
-                                nv_ro32(dev, ramht, (co + 4)));
-                       nv_wo32(dev, ramht, (co + 0)/4, 0x00000000);
-                       nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
-
-                       list_del(&ref->list);
-                       instmem->flush(dev);
-                       return;
-               }
-
-               co += 8;
-               if (co >= dev_priv->ramht_size)
-                       co = 0;
-       } while (co != ho);
-       list_del(&ref->list);
-
-       NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
-                chan->id, ref->handle);
-}
 
 int
 nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
@@ -205,7 +75,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_engine *engine = &dev_priv->engine;
        struct nouveau_gpuobj *gpuobj;
-       struct drm_mm *pramin = NULL;
+       struct drm_mm_node *ramin = NULL;
        int ret;
 
        NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
@@ -218,69 +88,102 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
        if (!gpuobj)
                return -ENOMEM;
        NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
+       gpuobj->dev = dev;
        gpuobj->flags = flags;
-       gpuobj->im_channel = chan;
+       kref_init(&gpuobj->refcount);
+       gpuobj->size = size;
 
+       spin_lock(&dev_priv->ramin_lock);
        list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+       spin_unlock(&dev_priv->ramin_lock);
 
-       /* Choose between global instmem heap, and per-channel private
-        * instmem heap.  On <NV50 allow requests for private instmem
-        * to be satisfied from global heap if no per-channel area
-        * available.
-        */
        if (chan) {
                NV_DEBUG(dev, "channel heap\n");
-               pramin = &chan->ramin_heap;
+
+               ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
+               if (ramin)
+                       ramin = drm_mm_get_block(ramin, size, align);
+
+               if (!ramin) {
+                       nouveau_gpuobj_ref(NULL, &gpuobj);
+                       return -ENOMEM;
+               }
        } else {
                NV_DEBUG(dev, "global heap\n");
-               pramin = &dev_priv->ramin_heap;
 
+               /* allocate backing pages, sets vinst */
                ret = engine->instmem.populate(dev, gpuobj, &size);
                if (ret) {
-                       nouveau_gpuobj_del(dev, &gpuobj);
+                       nouveau_gpuobj_ref(NULL, &gpuobj);
                        return ret;
                }
-       }
 
-       /* Allocate a chunk of the PRAMIN aperture */
-       gpuobj->im_pramin = drm_mm_search_free(pramin, size, align, 0);
-       if (gpuobj->im_pramin)
-               gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align);
+               /* try and get aperture space */
+               do {
+                       if (drm_mm_pre_get(&dev_priv->ramin_heap))
+                               return -ENOMEM;
+
+                       spin_lock(&dev_priv->ramin_lock);
+                       ramin = drm_mm_search_free(&dev_priv->ramin_heap, size,
+                                                  align, 0);
+                       if (ramin == NULL) {
+                               spin_unlock(&dev_priv->ramin_lock);
+                               nouveau_gpuobj_ref(NULL, &gpuobj);
+                               return ret;
+                       }
 
-       if (!gpuobj->im_pramin) {
-               nouveau_gpuobj_del(dev, &gpuobj);
-               return -ENOMEM;
+                       ramin = drm_mm_get_block_atomic(ramin, size, align);
+                       spin_unlock(&dev_priv->ramin_lock);
+               } while (ramin == NULL);
+
+               /* on nv50 it's ok to fail, we have a fallback path */
+               if (!ramin && dev_priv->card_type < NV_50) {
+                       nouveau_gpuobj_ref(NULL, &gpuobj);
+                       return -ENOMEM;
+               }
        }
 
-       if (!chan) {
+       /* if we got a chunk of the aperture, map pages into it */
+       gpuobj->im_pramin = ramin;
+       if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) {
                ret = engine->instmem.bind(dev, gpuobj);
                if (ret) {
-                       nouveau_gpuobj_del(dev, &gpuobj);
+                       nouveau_gpuobj_ref(NULL, &gpuobj);
                        return ret;
                }
        }
 
+       /* calculate the various different addresses for the object */
+       if (chan) {
+               gpuobj->pinst = chan->ramin->pinst;
+               if (gpuobj->pinst != ~0)
+                       gpuobj->pinst += gpuobj->im_pramin->start;
+
+               if (dev_priv->card_type < NV_50) {
+                       gpuobj->cinst = gpuobj->pinst;
+               } else {
+                       gpuobj->cinst = gpuobj->im_pramin->start;
+                       gpuobj->vinst = gpuobj->im_pramin->start +
+                                       chan->ramin->vinst;
+               }
+       } else {
+               if (gpuobj->im_pramin)
+                       gpuobj->pinst = gpuobj->im_pramin->start;
+               else
+                       gpuobj->pinst = ~0;
+               gpuobj->cinst = 0xdeadbeef;
+       }
+
        if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
                int i;
 
-               for (i = 0; i < gpuobj->im_pramin->size; i += 4)
-                       nv_wo32(dev, gpuobj, i/4, 0);
+               for (i = 0; i < gpuobj->size; i += 4)
+                       nv_wo32(gpuobj, i, 0);
                engine->instmem.flush(dev);
        }
 
-       *gpuobj_ret = gpuobj;
-       return 0;
-}
-
-int
-nouveau_gpuobj_early_init(struct drm_device *dev)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-       NV_DEBUG(dev, "\n");
-
-       INIT_LIST_HEAD(&dev_priv->gpuobj_list);
 
+       *gpuobj_ret = gpuobj;
        return 0;
 }
 
@@ -288,18 +191,12 @@ int
 nouveau_gpuobj_init(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       int ret;
 
        NV_DEBUG(dev, "\n");
 
-       if (dev_priv->card_type < NV_50) {
-               ret = nouveau_gpuobj_new_fake(dev,
-                       dev_priv->ramht_offset, ~0, dev_priv->ramht_size,
-                       NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS,
-                                               &dev_priv->ramht, NULL);
-               if (ret)
-                       return ret;
-       }
+       INIT_LIST_HEAD(&dev_priv->gpuobj_list);
+       spin_lock_init(&dev_priv->ramin_lock);
+       dev_priv->ramin_base = ~0;
 
        return 0;
 }
@@ -311,297 +208,89 @@ nouveau_gpuobj_takedown(struct drm_device *dev)
 
        NV_DEBUG(dev, "\n");
 
-       nouveau_gpuobj_del(dev, &dev_priv->ramht);
+       BUG_ON(!list_empty(&dev_priv->gpuobj_list));
 }
 
-void
-nouveau_gpuobj_late_takedown(struct drm_device *dev)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpuobj *gpuobj = NULL;
-       struct list_head *entry, *tmp;
-
-       NV_DEBUG(dev, "\n");
-
-       list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
-               gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
-
-               NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n",
-                        gpuobj, gpuobj->refcount);
-               gpuobj->refcount = 0;
-               nouveau_gpuobj_del(dev, &gpuobj);
-       }
-}
 
-int
-nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
+static void
+nouveau_gpuobj_del(struct kref *ref)
 {
+       struct nouveau_gpuobj *gpuobj =
+               container_of(ref, struct nouveau_gpuobj, refcount);
+       struct drm_device *dev = gpuobj->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_engine *engine = &dev_priv->engine;
-       struct nouveau_gpuobj *gpuobj;
        int i;
 
-       NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
-
-       if (!dev_priv || !pgpuobj || !(*pgpuobj))
-               return -EINVAL;
-       gpuobj = *pgpuobj;
-
-       if (gpuobj->refcount != 0) {
-               NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount);
-               return -EINVAL;
-       }
+       NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
 
        if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
-               for (i = 0; i < gpuobj->im_pramin->size; i += 4)
-                       nv_wo32(dev, gpuobj, i/4, 0);
+               for (i = 0; i < gpuobj->size; i += 4)
+                       nv_wo32(gpuobj, i, 0);
                engine->instmem.flush(dev);
        }
 
        if (gpuobj->dtor)
                gpuobj->dtor(dev, gpuobj);
 
-       if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE))
+       if (gpuobj->im_backing)
                engine->instmem.clear(dev, gpuobj);
 
-       if (gpuobj->im_pramin) {
-               if (gpuobj->flags & NVOBJ_FLAG_FAKE)
-                       kfree(gpuobj->im_pramin);
-               else
-                       drm_mm_put_block(gpuobj->im_pramin);
-       }
-
+       spin_lock(&dev_priv->ramin_lock);
+       if (gpuobj->im_pramin)
+               drm_mm_put_block(gpuobj->im_pramin);
        list_del(&gpuobj->list);
+       spin_unlock(&dev_priv->ramin_lock);
 
-       *pgpuobj = NULL;
        kfree(gpuobj);
-       return 0;
 }
 
-static int
-nouveau_gpuobj_instance_get(struct drm_device *dev,
-                           struct nouveau_channel *chan,
-                           struct nouveau_gpuobj *gpuobj, uint32_t *inst)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpuobj *cpramin;
-
-       /* <NV50 use PRAMIN address everywhere */
-       if (dev_priv->card_type < NV_50) {
-               *inst = gpuobj->im_pramin->start;
-               return 0;
-       }
-
-       if (chan && gpuobj->im_channel != chan) {
-               NV_ERROR(dev, "Channel mismatch: obj %d, ref %d\n",
-                        gpuobj->im_channel->id, chan->id);
-               return -EINVAL;
-       }
-
-       /* NV50 channel-local instance */
-       if (chan) {
-               cpramin = chan->ramin->gpuobj;
-               *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start;
-               return 0;
-       }
-
-       /* NV50 global (VRAM) instance */
-       if (!gpuobj->im_channel) {
-               /* ...from global heap */
-               if (!gpuobj->im_backing) {
-                       NV_ERROR(dev, "AII, no VRAM backing gpuobj\n");
-                       return -EINVAL;
-               }
-               *inst = gpuobj->im_backing_start;
-               return 0;
-       } else {
-               /* ...from local heap */
-               cpramin = gpuobj->im_channel->ramin->gpuobj;
-               *inst = cpramin->im_backing_start +
-                       (gpuobj->im_pramin->start - cpramin->im_pramin->start);
-               return 0;
-       }
-
-       return -EINVAL;
-}
-
-int
-nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
-                      uint32_t handle, struct nouveau_gpuobj *gpuobj,
-                      struct nouveau_gpuobj_ref **ref_ret)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpuobj_ref *ref;
-       uint32_t instance;
-       int ret;
-
-       NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n",
-                chan ? chan->id : -1, handle, gpuobj);
-
-       if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
-               return -EINVAL;
-
-       if (!chan && !ref_ret)
-               return -EINVAL;
-
-       if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) {
-               /* sw object */
-               instance = 0x40;
-       } else {
-               ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
-               if (ret)
-                       return ret;
-       }
-
-       ref = kzalloc(sizeof(*ref), GFP_KERNEL);
-       if (!ref)
-               return -ENOMEM;
-       INIT_LIST_HEAD(&ref->list);
-       ref->gpuobj   = gpuobj;
-       ref->channel  = chan;
-       ref->instance = instance;
-
-       if (!ref_ret) {
-               ref->handle = handle;
-
-               ret = nouveau_ramht_insert(dev, ref);
-               if (ret) {
-                       kfree(ref);
-                       return ret;
-               }
-       } else {
-               ref->handle = ~0;
-               *ref_ret = ref;
-       }
-
-       ref->gpuobj->refcount++;
-       return 0;
-}
-
-int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
-{
-       struct nouveau_gpuobj_ref *ref;
-
-       NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL);
-
-       if (!dev || !pref || *pref == NULL)
-               return -EINVAL;
-       ref = *pref;
-
-       if (ref->handle != ~0)
-               nouveau_ramht_remove(dev, ref);
-
-       if (ref->gpuobj) {
-               ref->gpuobj->refcount--;
-
-               if (ref->gpuobj->refcount == 0) {
-                       if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
-                               nouveau_gpuobj_del(dev, &ref->gpuobj);
-               }
-       }
-
-       *pref = NULL;
-       kfree(ref);
-       return 0;
-}
-
-int
-nouveau_gpuobj_new_ref(struct drm_device *dev,
-                      struct nouveau_channel *oc, struct nouveau_channel *rc,
-                      uint32_t handle, uint32_t size, int align,
-                      uint32_t flags, struct nouveau_gpuobj_ref **ref)
-{
-       struct nouveau_gpuobj *gpuobj = NULL;
-       int ret;
-
-       ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj);
-       if (ret)
-               return ret;
-
-       ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref);
-       if (ret) {
-               nouveau_gpuobj_del(dev, &gpuobj);
-               return ret;
-       }
-
-       return 0;
-}
-
-int
-nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
-                       struct nouveau_gpuobj_ref **ref_ret)
+void
+nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
 {
-       struct nouveau_gpuobj_ref *ref;
-       struct list_head *entry, *tmp;
-
-       list_for_each_safe(entry, tmp, &chan->ramht_refs) {
-               ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
+       if (ref)
+               kref_get(&ref->refcount);
 
-               if (ref->handle == handle) {
-                       if (ref_ret)
-                               *ref_ret = ref;
-                       return 0;
-               }
-       }
+       if (*ptr)
+               kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
 
-       return -EINVAL;
+       *ptr = ref;
 }
 
 int
-nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
-                       uint32_t b_offset, uint32_t size,
-                       uint32_t flags, struct nouveau_gpuobj **pgpuobj,
-                       struct nouveau_gpuobj_ref **pref)
+nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
+                       u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_gpuobj *gpuobj = NULL;
        int i;
 
        NV_DEBUG(dev,
-                "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
-                p_offset, b_offset, size, flags);
+                "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
+                pinst, vinst, size, flags);
 
        gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
        if (!gpuobj)
                return -ENOMEM;
        NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
-       gpuobj->im_channel = NULL;
-       gpuobj->flags      = flags | NVOBJ_FLAG_FAKE;
-
-       list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
-
-       if (p_offset != ~0) {
-               gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node),
-                                           GFP_KERNEL);
-               if (!gpuobj->im_pramin) {
-                       nouveau_gpuobj_del(dev, &gpuobj);
-                       return -ENOMEM;
-               }
-               gpuobj->im_pramin->start = p_offset;
-               gpuobj->im_pramin->size  = size;
-       }
-
-       if (b_offset != ~0) {
-               gpuobj->im_backing = (struct nouveau_bo *)-1;
-               gpuobj->im_backing_start = b_offset;
-       }
+       gpuobj->dev = dev;
+       gpuobj->flags = flags;
+       kref_init(&gpuobj->refcount);
+       gpuobj->size  = size;
+       gpuobj->pinst = pinst;
+       gpuobj->cinst = 0xdeadbeef;
+       gpuobj->vinst = vinst;
 
        if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
-               for (i = 0; i < gpuobj->im_pramin->size; i += 4)
-                       nv_wo32(dev, gpuobj, i/4, 0);
+               for (i = 0; i < gpuobj->size; i += 4)
+                       nv_wo32(gpuobj, i, 0);
                dev_priv->engine.instmem.flush(dev);
        }
 
-       if (pref) {
-               i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref);
-               if (i) {
-                       nouveau_gpuobj_del(dev, &gpuobj);
-                       return i;
-               }
-       }
-
-       if (pgpuobj)
-               *pgpuobj = gpuobj;
+       spin_lock(&dev_priv->ramin_lock);
+       list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+       spin_unlock(&dev_priv->ramin_lock);
+       *pgpuobj = gpuobj;
        return 0;
 }
 
@@ -685,14 +374,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
                adjust = offset &  0x00000fff;
                frame  = offset & ~0x00000fff;
 
-               nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) |
-                               (adjust << 20) |
-                                (access << 14) |
-                                (target << 16) |
-                                 class));
-               nv_wo32(dev, *gpuobj, 1, size - 1);
-               nv_wo32(dev, *gpuobj, 2, frame | pte_flags);
-               nv_wo32(dev, *gpuobj, 3, frame | pte_flags);
+               nv_wo32(*gpuobj,  0, ((1<<12) | (1<<13) | (adjust << 20) |
+                                     (access << 14) | (target << 16) |
+                                     class));
+               nv_wo32(*gpuobj,  4, size - 1);
+               nv_wo32(*gpuobj,  8, frame | pte_flags);
+               nv_wo32(*gpuobj, 12, frame | pte_flags);
        } else {
                uint64_t limit = offset + size - 1;
                uint32_t flags0, flags5;
@@ -705,12 +392,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
                        flags5 = 0x00080000;
                }
 
-               nv_wo32(dev, *gpuobj, 0, flags0 | class);
-               nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit));
-               nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset));
-               nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) |
-                                       (upper_32_bits(offset) & 0xff));
-               nv_wo32(dev, *gpuobj, 5, flags5);
+               nv_wo32(*gpuobj,  0, flags0 | class);
+               nv_wo32(*gpuobj,  4, lower_32_bits(limit));
+               nv_wo32(*gpuobj,  8, lower_32_bits(offset));
+               nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
+                                     (upper_32_bits(offset) & 0xff));
+               nv_wo32(*gpuobj, 20, flags5);
        }
 
        instmem->flush(dev);
@@ -741,7 +428,7 @@ nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
                        *o_ret = 0;
        } else
        if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
-               *gpuobj = dev_priv->gart_info.sg_ctxdma;
+               nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj);
                if (offset & ~0xffffffffULL) {
                        NV_ERROR(dev, "obj offset exceeds 32-bits\n");
                        return -EINVAL;
@@ -829,25 +516,25 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
        }
 
        if (dev_priv->card_type >= NV_50) {
-               nv_wo32(dev, *gpuobj, 0, class);
-               nv_wo32(dev, *gpuobj, 5, 0x00010000);
+               nv_wo32(*gpuobj,  0, class);
+               nv_wo32(*gpuobj, 20, 0x00010000);
        } else {
                switch (class) {
                case NV_CLASS_NULL:
-                       nv_wo32(dev, *gpuobj, 0, 0x00001030);
-                       nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF);
+                       nv_wo32(*gpuobj, 0, 0x00001030);
+                       nv_wo32(*gpuobj, 4, 0xFFFFFFFF);
                        break;
                default:
                        if (dev_priv->card_type >= NV_40) {
-                               nv_wo32(dev, *gpuobj, 0, class);
+                               nv_wo32(*gpuobj, 0, class);
 #ifdef __BIG_ENDIAN
-                               nv_wo32(dev, *gpuobj, 2, 0x01000000);
+                               nv_wo32(*gpuobj, 8, 0x01000000);
 #endif
                        } else {
 #ifdef __BIG_ENDIAN
-                               nv_wo32(dev, *gpuobj, 0, class | 0x00080000);
+                               nv_wo32(*gpuobj, 0, class | 0x00080000);
 #else
-                               nv_wo32(dev, *gpuobj, 0, class);
+                               nv_wo32(*gpuobj, 0, class);
 #endif
                        }
                }
@@ -873,10 +560,15 @@ nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
        gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
        if (!gpuobj)
                return -ENOMEM;
+       gpuobj->dev = chan->dev;
        gpuobj->engine = NVOBJ_ENGINE_SW;
        gpuobj->class = class;
+       kref_init(&gpuobj->refcount);
+       gpuobj->cinst = 0x40;
 
+       spin_lock(&dev_priv->ramin_lock);
        list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+       spin_unlock(&dev_priv->ramin_lock);
        *gpuobj_ret = gpuobj;
        return 0;
 }
@@ -886,7 +578,6 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
 {
        struct drm_device *dev = chan->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpuobj *pramin = NULL;
        uint32_t size;
        uint32_t base;
        int ret;
@@ -911,18 +602,16 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
                size += 0x1000;
        }
 
-       ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
-                                    &chan->ramin);
+       ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
        if (ret) {
                NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
                return ret;
        }
-       pramin = chan->ramin->gpuobj;
 
-       ret = drm_mm_init(&chan->ramin_heap, pramin->im_pramin->start + base, size);
+       ret = drm_mm_init(&chan->ramin_heap, base, size);
        if (ret) {
                NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
-               nouveau_gpuobj_ref_del(dev, &chan->ramin);
+               nouveau_gpuobj_ref(NULL, &chan->ramin);
                return ret;
        }
 
@@ -939,8 +628,6 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
        struct nouveau_gpuobj *vram = NULL, *tt = NULL;
        int ret, i;
 
-       INIT_LIST_HEAD(&chan->ramht_refs);
-
        NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
 
        /* Allocate a chunk of memory for per-channel object storage */
@@ -956,41 +643,38 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
         *    locations determined during init.
         */
        if (dev_priv->card_type >= NV_50) {
-               uint32_t vm_offset, pde;
+               u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
+               u64 vm_vinst = chan->ramin->vinst + pgd_offs;
+               u32 vm_pinst = chan->ramin->pinst;
+               u32 pde;
 
-               vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
-               vm_offset += chan->ramin->gpuobj->im_pramin->start;
+               if (vm_pinst != ~0)
+                       vm_pinst += pgd_offs;
 
-               ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
-                                                       0, &chan->vm_pd, NULL);
+               ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
+                                             0, &chan->vm_pd);
                if (ret)
                        return ret;
                for (i = 0; i < 0x4000; i += 8) {
-                       nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000);
-                       nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe);
+                       nv_wo32(chan->vm_pd, i + 0, 0x00000000);
+                       nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
                }
 
-               pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2;
-               ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
-                                            dev_priv->gart_info.sg_ctxdma,
-                                            &chan->vm_gart_pt);
-               if (ret)
-                       return ret;
-               nv_wo32(dev, chan->vm_pd, pde++,
-                           chan->vm_gart_pt->instance | 0x03);
-               nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
+               nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
+                                  &chan->vm_gart_pt);
+               pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
+               nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
+               nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
 
-               pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2;
+               pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
                for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
-                       ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
-                                                    dev_priv->vm_vram_pt[i],
-                                                    &chan->vm_vram_pt[i]);
-                       if (ret)
-                               return ret;
+                       nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
+                                          &chan->vm_vram_pt[i]);
 
-                       nv_wo32(dev, chan->vm_pd, pde++,
-                                   chan->vm_vram_pt[i]->instance | 0x61);
-                       nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
+                       nv_wo32(chan->vm_pd, pde + 0,
+                               chan->vm_vram_pt[i]->vinst | 0x61);
+                       nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
+                       pde += 8;
                }
 
                instmem->flush(dev);
@@ -998,15 +682,17 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
 
        /* RAMHT */
        if (dev_priv->card_type < NV_50) {
-               ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
-                                            &chan->ramht);
+               nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
+       } else {
+               struct nouveau_gpuobj *ramht = NULL;
+
+               ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
+                                        NVOBJ_FLAG_ZERO_ALLOC, &ramht);
                if (ret)
                        return ret;
-       } else {
-               ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
-                                            0x8000, 16,
-                                            NVOBJ_FLAG_ZERO_ALLOC,
-                                            &chan->ramht);
+
+               ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
+               nouveau_gpuobj_ref(NULL, &ramht);
                if (ret)
                        return ret;
        }
@@ -1023,24 +709,32 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
                }
        } else {
                ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-                                               0, dev_priv->fb_available_size,
-                                               NV_DMA_ACCESS_RW,
-                                               NV_DMA_TARGET_VIDMEM, &vram);
+                                            0, dev_priv->fb_available_size,
+                                            NV_DMA_ACCESS_RW,
+                                            NV_DMA_TARGET_VIDMEM, &vram);
                if (ret) {
                        NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
                        return ret;
                }
        }
 
-       ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL);
+       ret = nouveau_ramht_insert(chan, vram_h, vram);
+       nouveau_gpuobj_ref(NULL, &vram);
        if (ret) {
-               NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret);
+               NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
                return ret;
        }
 
        /* TT memory ctxdma */
        if (dev_priv->card_type >= NV_50) {
-               tt = vram;
+               ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+                                            0, dev_priv->vm_end,
+                                            NV_DMA_ACCESS_RW,
+                                            NV_DMA_TARGET_AGP, &tt);
+               if (ret) {
+                       NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
+                       return ret;
+               }
        } else
        if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
                ret = nouveau_gpuobj_gart_dma_new(chan, 0,
@@ -1056,9 +750,10 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
                return ret;
        }
 
-       ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
+       ret = nouveau_ramht_insert(chan, tt_h, tt);
+       nouveau_gpuobj_ref(NULL, &tt);
        if (ret) {
-               NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret);
+               NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
                return ret;
        }
 
@@ -1070,33 +765,23 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
 {
        struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
        struct drm_device *dev = chan->dev;
-       struct list_head *entry, *tmp;
-       struct nouveau_gpuobj_ref *ref;
        int i;
 
        NV_DEBUG(dev, "ch%d\n", chan->id);
 
-       if (!chan->ramht_refs.next)
+       if (!chan->ramht)
                return;
 
-       list_for_each_safe(entry, tmp, &chan->ramht_refs) {
-               ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
-
-               nouveau_gpuobj_ref_del(dev, &ref);
-       }
-
-       nouveau_gpuobj_ref_del(dev, &chan->ramht);
+       nouveau_ramht_ref(NULL, &chan->ramht, chan);
 
-       nouveau_gpuobj_del(dev, &chan->vm_pd);
-       nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
+       nouveau_gpuobj_ref(NULL, &chan->vm_pd);
+       nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
        for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
-               nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
+               nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
 
        if (chan->ramin_heap.free_stack.next)
                drm_mm_takedown(&chan->ramin_heap);
-       if (chan->ramin)
-               nouveau_gpuobj_ref_del(dev, &chan->ramin);
-
+       nouveau_gpuobj_ref(NULL, &chan->ramin);
 }
 
 int
@@ -1117,17 +802,17 @@ nouveau_gpuobj_suspend(struct drm_device *dev)
        }
 
        list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
-               if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE))
+               if (!gpuobj->im_backing)
                        continue;
 
-               gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size);
+               gpuobj->im_backing_suspend = vmalloc(gpuobj->size);
                if (!gpuobj->im_backing_suspend) {
                        nouveau_gpuobj_resume(dev);
                        return -ENOMEM;
                }
 
-               for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
-                       gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i);
+               for (i = 0; i < gpuobj->size; i += 4)
+                       gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i);
        }
 
        return 0;
@@ -1172,8 +857,8 @@ nouveau_gpuobj_resume(struct drm_device *dev)
                if (!gpuobj->im_backing_suspend)
                        continue;
 
-               for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
-                       nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]);
+               for (i = 0; i < gpuobj->size; i += 4)
+                       nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]);
                dev_priv->engine.instmem.flush(dev);
        }
 
@@ -1208,25 +893,24 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
                return -EPERM;
        }
 
-       if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
+       if (nouveau_ramht_find(chan, init->handle))
                return -EEXIST;
 
        if (!grc->software)
                ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
        else
                ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
-
        if (ret) {
                NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
                         ret, init->channel, init->handle);
                return ret;
        }
 
-       ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL);
+       ret = nouveau_ramht_insert(chan, init->handle, gr);
+       nouveau_gpuobj_ref(NULL, &gr);
        if (ret) {
                NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
                         ret, init->channel, init->handle);
-               nouveau_gpuobj_del(dev, &gr);
                return ret;
        }
 
@@ -1237,16 +921,62 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
                              struct drm_file *file_priv)
 {
        struct drm_nouveau_gpuobj_free *objfree = data;
-       struct nouveau_gpuobj_ref *ref;
+       struct nouveau_gpuobj *gpuobj;
        struct nouveau_channel *chan;
-       int ret;
 
        NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
 
-       ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref);
-       if (ret)
-               return ret;
-       nouveau_gpuobj_ref_del(dev, &ref);
+       gpuobj = nouveau_ramht_find(chan, objfree->handle);
+       if (!gpuobj)
+               return -ENOENT;
 
+       nouveau_ramht_remove(chan, objfree->handle);
        return 0;
 }
+
+u32
+nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
+{
+       struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+       struct drm_device *dev = gpuobj->dev;
+
+       if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
+               u64  ptr = gpuobj->vinst + offset;
+               u32 base = ptr >> 16;
+               u32  val;
+
+               spin_lock(&dev_priv->ramin_lock);
+               if (dev_priv->ramin_base != base) {
+                       dev_priv->ramin_base = base;
+                       nv_wr32(dev, 0x001700, dev_priv->ramin_base);
+               }
+               val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
+               spin_unlock(&dev_priv->ramin_lock);
+               return val;
+       }
+
+       return nv_ri32(dev, gpuobj->pinst + offset);
+}
+
+void
+nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
+{
+       struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+       struct drm_device *dev = gpuobj->dev;
+
+       if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
+               u64  ptr = gpuobj->vinst + offset;
+               u32 base = ptr >> 16;
+
+               spin_lock(&dev_priv->ramin_lock);
+               if (dev_priv->ramin_base != base) {
+                       dev_priv->ramin_base = base;
+                       nv_wr32(dev, 0x001700, dev_priv->ramin_base);
+               }
+               nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
+               spin_unlock(&dev_priv->ramin_lock);
+               return;
+       }
+
+       nv_wi32(dev, gpuobj->pinst + offset, val);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
new file mode 100644 (file)
index 0000000..ac62a1b
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_pm.h"
+
+static void
+legacy_perf_init(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nvbios *bios = &dev_priv->vbios;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       char *perf, *entry, *bmp = &bios->data[bios->offset];
+       int headerlen, use_straps;
+
+       if (bmp[5] < 0x5 || bmp[6] < 0x14) {
+               NV_DEBUG(dev, "BMP version too old for perf\n");
+               return;
+       }
+
+       perf = ROMPTR(bios, bmp[0x73]);
+       if (!perf) {
+               NV_DEBUG(dev, "No memclock table pointer found.\n");
+               return;
+       }
+
+       switch (perf[0]) {
+       case 0x12:
+       case 0x14:
+       case 0x18:
+               use_straps = 0;
+               headerlen = 1;
+               break;
+       case 0x01:
+               use_straps = perf[1] & 1;
+               headerlen = (use_straps ? 8 : 2);
+               break;
+       default:
+               NV_WARN(dev, "Unknown memclock table version %x.\n", perf[0]);
+               return;
+       }
+
+       entry = perf + headerlen;
+       if (use_straps)
+               entry += (nv_rd32(dev, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1;
+
+       sprintf(pm->perflvl[0].name, "performance_level_0");
+       pm->perflvl[0].memory = ROM16(entry[0]) * 20;
+       pm->nr_perflvl = 1;
+}
+
+void
+nouveau_perf_init(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       struct nvbios *bios = &dev_priv->vbios;
+       struct bit_entry P;
+       u8 version, headerlen, recordlen, entries;
+       u8 *perf, *entry;
+       int vid, i;
+
+       if (bios->type == NVBIOS_BIT) {
+               if (bit_table(dev, 'P', &P))
+                       return;
+
+               if (P.version != 1 && P.version != 2) {
+                       NV_WARN(dev, "unknown perf for BIT P %d\n", P.version);
+                       return;
+               }
+
+               perf = ROMPTR(bios, P.data[0]);
+               version   = perf[0];
+               headerlen = perf[1];
+               if (version < 0x40) {
+                       recordlen = perf[3] + (perf[4] * perf[5]);
+                       entries   = perf[2];
+               } else {
+                       recordlen = perf[2] + (perf[3] * perf[4]);
+                       entries   = perf[5];
+               }
+       } else {
+               if (bios->data[bios->offset + 6] < 0x25) {
+                       legacy_perf_init(dev);
+                       return;
+               }
+
+               perf = ROMPTR(bios, bios->data[bios->offset + 0x94]);
+               if (!perf) {
+                       NV_DEBUG(dev, "perf table pointer invalid\n");
+                       return;
+               }
+
+               version   = perf[1];
+               headerlen = perf[0];
+               recordlen = perf[3];
+               entries   = perf[2];
+       }
+
+       entry = perf + headerlen;
+       for (i = 0; i < entries; i++) {
+               struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
+
+               if (entry[0] == 0xff) {
+                       entry += recordlen;
+                       continue;
+               }
+
+               switch (version) {
+               case 0x12:
+               case 0x13:
+               case 0x15:
+                       perflvl->fanspeed = entry[55];
+                       perflvl->voltage = entry[56];
+                       perflvl->core = ROM32(entry[1]) * 10;
+                       perflvl->memory = ROM32(entry[5]) * 20;
+                       break;
+               case 0x21:
+               case 0x23:
+               case 0x24:
+                       perflvl->fanspeed = entry[4];
+                       perflvl->voltage = entry[5];
+                       perflvl->core = ROM16(entry[6]) * 1000;
+
+                       if (dev_priv->chipset == 0x49 ||
+                           dev_priv->chipset == 0x4b)
+                               perflvl->memory = ROM16(entry[11]) * 1000;
+                       else
+                               perflvl->memory = ROM16(entry[11]) * 2000;
+
+                       break;
+               case 0x25:
+                       perflvl->fanspeed = entry[4];
+                       perflvl->voltage = entry[5];
+                       perflvl->core = ROM16(entry[6]) * 1000;
+                       perflvl->shader = ROM16(entry[10]) * 1000;
+                       perflvl->memory = ROM16(entry[12]) * 1000;
+                       break;
+               case 0x30:
+                       perflvl->memscript = ROM16(entry[2]);
+               case 0x35:
+                       perflvl->fanspeed = entry[6];
+                       perflvl->voltage = entry[7];
+                       perflvl->core = ROM16(entry[8]) * 1000;
+                       perflvl->shader = ROM16(entry[10]) * 1000;
+                       perflvl->memory = ROM16(entry[12]) * 1000;
+                       /*XXX: confirm on 0x35 */
+                       perflvl->unk05 = ROM16(entry[16]) * 1000;
+                       break;
+               case 0x40:
+#define subent(n) entry[perf[2] + ((n) * perf[3])]
+                       perflvl->fanspeed = 0; /*XXX*/
+                       perflvl->voltage = entry[2];
+                       perflvl->core = (ROM16(subent(0)) & 0xfff) * 1000;
+                       perflvl->shader = (ROM16(subent(1)) & 0xfff) * 1000;
+                       perflvl->memory = (ROM16(subent(2)) & 0xfff) * 1000;
+                       break;
+               }
+
+               /* make sure vid is valid */
+               if (pm->voltage.supported && perflvl->voltage) {
+                       vid = nouveau_volt_vid_lookup(dev, perflvl->voltage);
+                       if (vid < 0) {
+                               NV_DEBUG(dev, "drop perflvl %d, bad vid\n", i);
+                               entry += recordlen;
+                               continue;
+                       }
+               }
+
+               snprintf(perflvl->name, sizeof(perflvl->name),
+                        "performance_level_%d", i);
+               perflvl->id = i;
+               pm->nr_perflvl++;
+
+               entry += recordlen;
+       }
+}
+
+void
+nouveau_perf_fini(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
new file mode 100644 (file)
index 0000000..1c99c55
--- /dev/null
@@ -0,0 +1,518 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_pm.h"
+
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+static int
+nouveau_pm_clock_set(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+                    u8 id, u32 khz)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       void *pre_state;
+
+       if (khz == 0)
+               return 0;
+
+       pre_state = pm->clock_pre(dev, perflvl, id, khz);
+       if (IS_ERR(pre_state))
+               return PTR_ERR(pre_state);
+
+       if (pre_state)
+               pm->clock_set(dev, pre_state);
+       return 0;
+}
+
+static int
+nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       int ret;
+
+       if (perflvl == pm->cur)
+               return 0;
+
+       if (pm->voltage.supported && pm->voltage_set && perflvl->voltage) {
+               ret = pm->voltage_set(dev, perflvl->voltage);
+               if (ret) {
+                       NV_ERROR(dev, "voltage_set %d failed: %d\n",
+                                perflvl->voltage, ret);
+               }
+       }
+
+       nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core);
+       nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader);
+       nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory);
+       nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05);
+
+       pm->cur = perflvl;
+       return 0;
+}
+
+static int
+nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       struct nouveau_pm_level *perflvl = NULL;
+
+       /* safety precaution, for now */
+       if (nouveau_perflvl_wr != 7777)
+               return -EPERM;
+
+       if (!pm->clock_set)
+               return -EINVAL;
+
+       if (!strncmp(profile, "boot", 4))
+               perflvl = &pm->boot;
+       else {
+               int pl = simple_strtol(profile, NULL, 10);
+               int i;
+
+               for (i = 0; i < pm->nr_perflvl; i++) {
+                       if (pm->perflvl[i].id == pl) {
+                               perflvl = &pm->perflvl[i];
+                               break;
+                       }
+               }
+
+               if (!perflvl)
+                       return -EINVAL;
+       }
+
+       NV_INFO(dev, "setting performance level: %s\n", profile);
+       return nouveau_pm_perflvl_set(dev, perflvl);
+}
+
+static int
+nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       int ret;
+
+       if (!pm->clock_get)
+               return -EINVAL;
+
+       memset(perflvl, 0, sizeof(*perflvl));
+
+       ret = pm->clock_get(dev, PLL_CORE);
+       if (ret > 0)
+               perflvl->core = ret;
+
+       ret = pm->clock_get(dev, PLL_MEMORY);
+       if (ret > 0)
+               perflvl->memory = ret;
+
+       ret = pm->clock_get(dev, PLL_SHADER);
+       if (ret > 0)
+               perflvl->shader = ret;
+
+       ret = pm->clock_get(dev, PLL_UNK05);
+       if (ret > 0)
+               perflvl->unk05 = ret;
+
+       if (pm->voltage.supported && pm->voltage_get) {
+               ret = pm->voltage_get(dev);
+               if (ret > 0)
+                       perflvl->voltage = ret;
+       }
+
+       return 0;
+}
+
+static void
+nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
+{
+       char c[16], s[16], v[16], f[16];
+
+       c[0] = '\0';
+       if (perflvl->core)
+               snprintf(c, sizeof(c), " core %dMHz", perflvl->core / 1000);
+
+       s[0] = '\0';
+       if (perflvl->shader)
+               snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000);
+
+       v[0] = '\0';
+       if (perflvl->voltage)
+               snprintf(v, sizeof(v), " voltage %dmV", perflvl->voltage * 10);
+
+       f[0] = '\0';
+       if (perflvl->fanspeed)
+               snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed);
+
+       snprintf(ptr, len, "memory %dMHz%s%s%s%s\n", perflvl->memory / 1000,
+                c, s, v, f);
+}
+
+static ssize_t
+nouveau_pm_get_perflvl_info(struct device *d,
+                           struct device_attribute *a, char *buf)
+{
+       struct nouveau_pm_level *perflvl = (struct nouveau_pm_level *)a;
+       char *ptr = buf;
+       int len = PAGE_SIZE;
+
+       snprintf(ptr, len, "%d: ", perflvl->id);
+       ptr += strlen(buf);
+       len -= strlen(buf);
+
+       nouveau_pm_perflvl_info(perflvl, ptr, len);
+       return strlen(buf);
+}
+
+static ssize_t
+nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf)
+{
+       struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       struct nouveau_pm_level cur;
+       int len = PAGE_SIZE, ret;
+       char *ptr = buf;
+
+       if (!pm->cur)
+               snprintf(ptr, len, "setting: boot\n");
+       else if (pm->cur == &pm->boot)
+               snprintf(ptr, len, "setting: boot\nc: ");
+       else
+               snprintf(ptr, len, "setting: static %d\nc: ", pm->cur->id);
+       ptr += strlen(buf);
+       len -= strlen(buf);
+
+       ret = nouveau_pm_perflvl_get(dev, &cur);
+       if (ret == 0)
+               nouveau_pm_perflvl_info(&cur, ptr, len);
+       return strlen(buf);
+}
+
+static ssize_t
+nouveau_pm_set_perflvl(struct device *d, struct device_attribute *a,
+                      const char *buf, size_t count)
+{
+       struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
+       int ret;
+
+       ret = nouveau_pm_profile_set(dev, buf);
+       if (ret)
+               return ret;
+       return strlen(buf);
+}
+
+static DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR,
+                  nouveau_pm_get_perflvl, nouveau_pm_set_perflvl);
+
+static int
+nouveau_sysfs_init(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       struct device *d = &dev->pdev->dev;
+       int ret, i;
+
+       ret = device_create_file(d, &dev_attr_performance_level);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < pm->nr_perflvl; i++) {
+               struct nouveau_pm_level *perflvl = &pm->perflvl[i];
+
+               perflvl->dev_attr.attr.name = perflvl->name;
+               perflvl->dev_attr.attr.mode = S_IRUGO;
+               perflvl->dev_attr.show = nouveau_pm_get_perflvl_info;
+               perflvl->dev_attr.store = NULL;
+               sysfs_attr_init(&perflvl->dev_attr.attr);
+
+               ret = device_create_file(d, &perflvl->dev_attr);
+               if (ret) {
+                       NV_ERROR(dev, "failed pervlvl %d sysfs: %d\n",
+                                perflvl->id, i);
+                       perflvl->dev_attr.attr.name = NULL;
+                       nouveau_pm_fini(dev);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static void
+nouveau_sysfs_fini(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       struct device *d = &dev->pdev->dev;
+       int i;
+
+       device_remove_file(d, &dev_attr_performance_level);
+       for (i = 0; i < pm->nr_perflvl; i++) {
+               struct nouveau_pm_level *pl = &pm->perflvl[i];
+
+               if (!pl->dev_attr.attr.name)
+                       break;
+
+               device_remove_file(d, &pl->dev_attr);
+       }
+}
+
+static ssize_t
+nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", pm->temp_get(dev)*1000);
+}
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
+                                                 NULL, 0);
+
+static ssize_t
+nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", temp->down_clock*1000);
+}
+static ssize_t
+nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
+                                               const char *buf, size_t count)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+       long value;
+
+       if (strict_strtol(buf, 10, &value) == -EINVAL)
+               return count;
+
+       temp->down_clock = value/1000;
+
+       nouveau_temp_safety_checks(dev);
+
+       return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_max_temp,
+                                                 nouveau_hwmon_set_max_temp,
+                                                 0);
+
+static ssize_t
+nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
+                                                       char *buf)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", temp->critical*1000);
+}
+static ssize_t
+nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
+                                                           const char *buf,
+                                                               size_t count)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+       long value;
+
+       if (strict_strtol(buf, 10, &value) == -EINVAL)
+               return count;
+
+       temp->critical = value/1000;
+
+       nouveau_temp_safety_checks(dev);
+
+       return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR,
+                                               nouveau_hwmon_critical_temp,
+                                               nouveau_hwmon_set_critical_temp,
+                                               0);
+
+static ssize_t nouveau_hwmon_show_name(struct device *dev,
+                                     struct device_attribute *attr,
+                                     char *buf)
+{
+       return sprintf(buf, "nouveau\n");
+}
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, nouveau_hwmon_show_name, NULL, 0);
+
+static ssize_t nouveau_hwmon_show_update_rate(struct device *dev,
+                                     struct device_attribute *attr,
+                                     char *buf)
+{
+       return sprintf(buf, "1000\n");
+}
+static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
+                                               nouveau_hwmon_show_update_rate,
+                                               NULL, 0);
+
+static struct attribute *hwmon_attributes[] = {
+       &sensor_dev_attr_temp1_input.dev_attr.attr,
+       &sensor_dev_attr_temp1_max.dev_attr.attr,
+       &sensor_dev_attr_temp1_crit.dev_attr.attr,
+       &sensor_dev_attr_name.dev_attr.attr,
+       &sensor_dev_attr_update_rate.dev_attr.attr,
+       NULL
+};
+
+static const struct attribute_group hwmon_attrgroup = {
+       .attrs = hwmon_attributes,
+};
+
+static int
+nouveau_hwmon_init(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       struct device *hwmon_dev;
+       int ret;
+
+       if (!pm->temp_get)
+               return -ENODEV;
+
+       hwmon_dev = hwmon_device_register(&dev->pdev->dev);
+       if (IS_ERR(hwmon_dev)) {
+               ret = PTR_ERR(hwmon_dev);
+               NV_ERROR(dev,
+                       "Unable to register hwmon device: %d\n", ret);
+               return ret;
+       }
+       dev_set_drvdata(hwmon_dev, dev);
+       ret = sysfs_create_group(&hwmon_dev->kobj,
+                                       &hwmon_attrgroup);
+       if (ret) {
+               NV_ERROR(dev,
+                       "Unable to create hwmon sysfs file: %d\n", ret);
+               hwmon_device_unregister(hwmon_dev);
+               return ret;
+       }
+
+       pm->hwmon = hwmon_dev;
+
+       return 0;
+}
+
+static void
+nouveau_hwmon_fini(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+       if (pm->hwmon) {
+               sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup);
+               hwmon_device_unregister(pm->hwmon);
+       }
+}
+
+int
+nouveau_pm_init(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       char info[256];
+       int ret, i;
+
+       nouveau_volt_init(dev);
+       nouveau_perf_init(dev);
+       nouveau_temp_init(dev);
+       nouveau_mem_timing_init(dev);
+
+       NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl);
+       for (i = 0; i < pm->nr_perflvl; i++) {
+               nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
+               NV_INFO(dev, "%d: %s", pm->perflvl[i].id, info);
+       }
+
+       /* determine current ("boot") performance level */
+       ret = nouveau_pm_perflvl_get(dev, &pm->boot);
+       if (ret == 0) {
+               pm->cur = &pm->boot;
+
+               nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
+               NV_INFO(dev, "c: %s", info);
+       }
+
+       /* switch performance levels now if requested */
+       if (nouveau_perflvl != NULL) {
+               ret = nouveau_pm_profile_set(dev, nouveau_perflvl);
+               if (ret) {
+                       NV_ERROR(dev, "error setting perflvl \"%s\": %d\n",
+                                nouveau_perflvl, ret);
+               }
+       }
+
+       nouveau_sysfs_init(dev);
+       nouveau_hwmon_init(dev);
+
+       return 0;
+}
+
+void
+nouveau_pm_fini(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+       if (pm->cur != &pm->boot)
+               nouveau_pm_perflvl_set(dev, &pm->boot);
+
+       nouveau_mem_timing_fini(dev);
+       nouveau_temp_fini(dev);
+       nouveau_perf_fini(dev);
+       nouveau_volt_fini(dev);
+
+       nouveau_hwmon_fini(dev);
+       nouveau_sysfs_fini(dev);
+}
+
+void
+nouveau_pm_resume(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       struct nouveau_pm_level *perflvl;
+
+       if (pm->cur == &pm->boot)
+               return;
+
+       perflvl = pm->cur;
+       pm->cur = &pm->boot;
+       nouveau_pm_perflvl_set(dev, perflvl);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
new file mode 100644 (file)
index 0000000..4a9838d
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_PM_H__
+#define __NOUVEAU_PM_H__
+
+/* nouveau_pm.c */
+int  nouveau_pm_init(struct drm_device *dev);
+void nouveau_pm_fini(struct drm_device *dev);
+void nouveau_pm_resume(struct drm_device *dev);
+
+/* nouveau_volt.c */
+void nouveau_volt_init(struct drm_device *);
+void nouveau_volt_fini(struct drm_device *);
+int  nouveau_volt_vid_lookup(struct drm_device *, int voltage);
+int  nouveau_volt_lvl_lookup(struct drm_device *, int vid);
+int  nouveau_voltage_gpio_get(struct drm_device *);
+int  nouveau_voltage_gpio_set(struct drm_device *, int voltage);
+
+/* nouveau_perf.c */
+void nouveau_perf_init(struct drm_device *);
+void nouveau_perf_fini(struct drm_device *);
+
+/* nouveau_mem.c */
+void nouveau_mem_timing_init(struct drm_device *);
+void nouveau_mem_timing_fini(struct drm_device *);
+
+/* nv04_pm.c */
+int nv04_pm_clock_get(struct drm_device *, u32 id);
+void *nv04_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
+                       u32 id, int khz);
+void nv04_pm_clock_set(struct drm_device *, void *);
+
+/* nv50_pm.c */
+int nv50_pm_clock_get(struct drm_device *, u32 id);
+void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
+                       u32 id, int khz);
+void nv50_pm_clock_set(struct drm_device *, void *);
+
+/* nva3_pm.c */
+int nva3_pm_clock_get(struct drm_device *, u32 id);
+void *nva3_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
+                       u32 id, int khz);
+void nva3_pm_clock_set(struct drm_device *, void *);
+
+/* nouveau_temp.c */
+void nouveau_temp_init(struct drm_device *dev);
+void nouveau_temp_fini(struct drm_device *dev);
+void nouveau_temp_safety_checks(struct drm_device *dev);
+int nv40_temp_get(struct drm_device *dev);
+int nv84_temp_get(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
new file mode 100644 (file)
index 0000000..7f16697
--- /dev/null
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
+
+static u32
+nouveau_ramht_hash_handle(struct nouveau_channel *chan, u32 handle)
+{
+       struct drm_device *dev = chan->dev;
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_ramht *ramht = chan->ramht;
+       u32 hash = 0;
+       int i;
+
+       NV_DEBUG(dev, "ch%d handle=0x%08x\n", chan->id, handle);
+
+       for (i = 32; i > 0; i -= ramht->bits) {
+               hash ^= (handle & ((1 << ramht->bits) - 1));
+               handle >>= ramht->bits;
+       }
+
+       if (dev_priv->card_type < NV_50)
+               hash ^= chan->id << (ramht->bits - 4);
+       hash <<= 3;
+
+       NV_DEBUG(dev, "hash=0x%08x\n", hash);
+       return hash;
+}
+
+static int
+nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
+                         u32 offset)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u32 ctx = nv_ro32(ramht, offset + 4);
+
+       if (dev_priv->card_type < NV_40)
+               return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
+       return (ctx != 0);
+}
+
+static int
+nouveau_ramht_entry_same_channel(struct nouveau_channel *chan,
+                                struct nouveau_gpuobj *ramht, u32 offset)
+{
+       struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+       u32 ctx = nv_ro32(ramht, offset + 4);
+
+       if (dev_priv->card_type >= NV_50)
+               return true;
+       else if (dev_priv->card_type >= NV_40)
+               return chan->id ==
+                       ((ctx >> NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
+       else
+               return chan->id ==
+                       ((ctx >> NV_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
+}
+
+int
+nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
+                    struct nouveau_gpuobj *gpuobj)
+{
+       struct drm_device *dev = chan->dev;
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+       struct nouveau_ramht_entry *entry;
+       struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
+       unsigned long flags;
+       u32 ctx, co, ho;
+
+       if (nouveau_ramht_find(chan, handle))
+               return -EEXIST;
+
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+       entry->channel = chan;
+       entry->gpuobj = NULL;
+       entry->handle = handle;
+       nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
+
+       if (dev_priv->card_type < NV_40) {
+               ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) |
+                     (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+                     (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
+       } else
+       if (dev_priv->card_type < NV_50) {
+               ctx = (gpuobj->cinst >> 4) |
+                     (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+                     (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
+       } else {
+               if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
+                       ctx = (gpuobj->cinst << 10) | 2;
+               } else {
+                       ctx = (gpuobj->cinst >> 4) |
+                             ((gpuobj->engine <<
+                               NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
+               }
+       }
+
+       spin_lock_irqsave(&chan->ramht->lock, flags);
+       list_add(&entry->head, &chan->ramht->entries);
+
+       co = ho = nouveau_ramht_hash_handle(chan, handle);
+       do {
+               if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
+                       NV_DEBUG(dev,
+                                "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
+                                chan->id, co, handle, ctx);
+                       nv_wo32(ramht, co + 0, handle);
+                       nv_wo32(ramht, co + 4, ctx);
+
+                       spin_unlock_irqrestore(&chan->ramht->lock, flags);
+                       instmem->flush(dev);
+                       return 0;
+               }
+               NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
+                        chan->id, co, nv_ro32(ramht, co));
+
+               co += 8;
+               if (co >= ramht->size)
+                       co = 0;
+       } while (co != ho);
+
+       NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
+       list_del(&entry->head);
+       spin_unlock_irqrestore(&chan->ramht->lock, flags);
+       kfree(entry);
+       return -ENOMEM;
+}
+
+static void
+nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle)
+{
+       struct drm_device *dev = chan->dev;
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+       struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
+       struct nouveau_ramht_entry *entry, *tmp;
+       u32 co, ho;
+
+       list_for_each_entry_safe(entry, tmp, &chan->ramht->entries, head) {
+               if (entry->channel != chan || entry->handle != handle)
+                       continue;
+
+               nouveau_gpuobj_ref(NULL, &entry->gpuobj);
+               list_del(&entry->head);
+               kfree(entry);
+               break;
+       }
+
+       co = ho = nouveau_ramht_hash_handle(chan, handle);
+       do {
+               if (nouveau_ramht_entry_valid(dev, ramht, co) &&
+                   nouveau_ramht_entry_same_channel(chan, ramht, co) &&
+                   (handle == nv_ro32(ramht, co))) {
+                       NV_DEBUG(dev,
+                                "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
+                                chan->id, co, handle, nv_ro32(ramht, co + 4));
+                       nv_wo32(ramht, co + 0, 0x00000000);
+                       nv_wo32(ramht, co + 4, 0x00000000);
+                       instmem->flush(dev);
+                       return;
+               }
+
+               co += 8;
+               if (co >= ramht->size)
+                       co = 0;
+       } while (co != ho);
+
+       NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
+                chan->id, handle);
+}
+
+void
+nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
+{
+       struct nouveau_ramht *ramht = chan->ramht;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ramht->lock, flags);
+       nouveau_ramht_remove_locked(chan, handle);
+       spin_unlock_irqrestore(&ramht->lock, flags);
+}
+
+struct nouveau_gpuobj *
+nouveau_ramht_find(struct nouveau_channel *chan, u32 handle)
+{
+       struct nouveau_ramht *ramht = chan->ramht;
+       struct nouveau_ramht_entry *entry;
+       struct nouveau_gpuobj *gpuobj = NULL;
+       unsigned long flags;
+
+       if (unlikely(!chan->ramht))
+               return NULL;
+
+       spin_lock_irqsave(&ramht->lock, flags);
+       list_for_each_entry(entry, &chan->ramht->entries, head) {
+               if (entry->channel == chan && entry->handle == handle) {
+                       gpuobj = entry->gpuobj;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&ramht->lock, flags);
+
+       return gpuobj;
+}
+
+int
+nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
+                 struct nouveau_ramht **pramht)
+{
+       struct nouveau_ramht *ramht;
+
+       ramht = kzalloc(sizeof(*ramht), GFP_KERNEL);
+       if (!ramht)
+               return -ENOMEM;
+
+       ramht->dev = dev;
+       kref_init(&ramht->refcount);
+       ramht->bits = drm_order(gpuobj->size / 8);
+       INIT_LIST_HEAD(&ramht->entries);
+       spin_lock_init(&ramht->lock);
+       nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj);
+
+       *pramht = ramht;
+       return 0;
+}
+
+static void
+nouveau_ramht_del(struct kref *ref)
+{
+       struct nouveau_ramht *ramht =
+               container_of(ref, struct nouveau_ramht, refcount);
+
+       nouveau_gpuobj_ref(NULL, &ramht->gpuobj);
+       kfree(ramht);
+}
+
+void
+nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr,
+                 struct nouveau_channel *chan)
+{
+       struct nouveau_ramht_entry *entry, *tmp;
+       struct nouveau_ramht *ramht;
+       unsigned long flags;
+
+       if (ref)
+               kref_get(&ref->refcount);
+
+       ramht = *ptr;
+       if (ramht) {
+               spin_lock_irqsave(&ramht->lock, flags);
+               list_for_each_entry_safe(entry, tmp, &ramht->entries, head) {
+                       if (entry->channel != chan)
+                               continue;
+
+                       nouveau_ramht_remove_locked(chan, entry->handle);
+               }
+               spin_unlock_irqrestore(&ramht->lock, flags);
+
+               kref_put(&ramht->refcount, nouveau_ramht_del);
+       }
+       *ptr = ref;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h
new file mode 100644 (file)
index 0000000..b79cb5e
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_RAMHT_H__
+#define __NOUVEAU_RAMHT_H__
+
+struct nouveau_ramht_entry {
+       struct list_head head;
+       struct nouveau_channel *channel;
+       struct nouveau_gpuobj *gpuobj;
+       u32 handle;
+};
+
+struct nouveau_ramht {
+       struct drm_device *dev;
+       struct kref refcount;
+       spinlock_t lock;
+       struct nouveau_gpuobj *gpuobj;
+       struct list_head entries;
+       int bits;
+};
+
+extern int  nouveau_ramht_new(struct drm_device *, struct nouveau_gpuobj *,
+                             struct nouveau_ramht **);
+extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **,
+                             struct nouveau_channel *unref_channel);
+
+extern int  nouveau_ramht_insert(struct nouveau_channel *, u32 handle,
+                                struct nouveau_gpuobj *);
+extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
+extern struct nouveau_gpuobj *
+nouveau_ramht_find(struct nouveau_channel *chan, u32 handle);
+
+#endif
index 21a6e453b975afb741a1fdb1f288533b20f3d071..1b42541ca9e5144f3dbf4355a91b1ef12fbf8e14 100644 (file)
 #define NV10_PFIFO_CACHE1_DMA_SUBROUTINE                   0x0000324C
 #define NV03_PFIFO_CACHE1_PULL0                            0x00003240
 #define NV04_PFIFO_CACHE1_PULL0                            0x00003250
+#    define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED            0x00000010
+#    define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY              0x00001000
 #define NV03_PFIFO_CACHE1_PULL1                            0x00003250
 #define NV04_PFIFO_CACHE1_PULL1                            0x00003254
 #define NV04_PFIFO_CACHE1_HASH                             0x00003258
 #define NV50_PDISPLAY_DAC_MODE_CTRL_C(i)                (0x00610b5c + (i) * 0x8)
 #define NV50_PDISPLAY_SOR_MODE_CTRL_P(i)                (0x00610b70 + (i) * 0x8)
 #define NV50_PDISPLAY_SOR_MODE_CTRL_C(i)                (0x00610b74 + (i) * 0x8)
+#define NV50_PDISPLAY_EXT_MODE_CTRL_P(i)                (0x00610b80 + (i) * 0x8)
+#define NV50_PDISPLAY_EXT_MODE_CTRL_C(i)                (0x00610b84 + (i) * 0x8)
 #define NV50_PDISPLAY_DAC_MODE_CTRL2_P(i)               (0x00610bdc + (i) * 0x8)
 #define NV50_PDISPLAY_DAC_MODE_CTRL2_C(i)               (0x00610be0 + (i) * 0x8)
-
 #define NV90_PDISPLAY_SOR_MODE_CTRL_P(i)                (0x00610794 + (i) * 0x8)
 #define NV90_PDISPLAY_SOR_MODE_CTRL_C(i)                (0x00610798 + (i) * 0x8)
-#define NV90_PDISPLAY_DAC_MODE_CTRL_P(i)                (0x00610b58 + (i) * 0x8)
-#define NV90_PDISPLAY_DAC_MODE_CTRL_C(i)                (0x00610b5c + (i) * 0x8)
-#define NV90_PDISPLAY_DAC_MODE_CTRL2_P(i)               (0x00610b80 + (i) * 0x8)
-#define NV90_PDISPLAY_DAC_MODE_CTRL2_C(i)               (0x00610b84 + (i) * 0x8)
 
 #define NV50_PDISPLAY_CRTC_CLK                                       0x00614000
 #define NV50_PDISPLAY_CRTC_CLK_CTRL1(i)                 ((i) * 0x800 + 0x614100)
index 6b9187d7f67de4383502973ef5cdfc9259ec6f1a..288bacac7e5aefc2f3c6c9b4f461a7e48d6e9ce7 100644 (file)
@@ -95,9 +95,9 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
        struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
        unsigned i, j, pte;
 
-       NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
+       NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
 
-       pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
+       pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
        nvbe->pte_start = pte;
        for (i = 0; i < nvbe->nr_pages; i++) {
                dma_addr_t dma_offset = nvbe->pages[i];
@@ -105,11 +105,13 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
                uint32_t offset_h = upper_32_bits(dma_offset);
 
                for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
-                       if (dev_priv->card_type < NV_50)
-                               nv_wo32(dev, gpuobj, pte++, offset_l | 3);
-                       else {
-                               nv_wo32(dev, gpuobj, pte++, offset_l | 0x21);
-                               nv_wo32(dev, gpuobj, pte++, offset_h & 0xff);
+                       if (dev_priv->card_type < NV_50) {
+                               nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
+                               pte += 1;
+                       } else {
+                               nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
+                               nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
+                               pte += 2;
                        }
 
                        dma_offset += NV_CTXDMA_PAGE_SIZE;
@@ -145,11 +147,13 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
                dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
 
                for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
-                       if (dev_priv->card_type < NV_50)
-                               nv_wo32(dev, gpuobj, pte++, dma_offset | 3);
-                       else {
-                               nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21);
-                               nv_wo32(dev, gpuobj, pte++, 0x00000000);
+                       if (dev_priv->card_type < NV_50) {
+                               nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
+                               pte += 1;
+                       } else {
+                               nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
+                               nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
+                               pte += 2;
                        }
 
                        dma_offset += NV_CTXDMA_PAGE_SIZE;
@@ -230,7 +234,6 @@ nouveau_sgdma_init(struct drm_device *dev)
        }
 
        ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
-                                     NVOBJ_FLAG_ALLOW_NO_REFS |
                                      NVOBJ_FLAG_ZERO_ALLOC |
                                      NVOBJ_FLAG_ZERO_FREE, &gpuobj);
        if (ret) {
@@ -239,9 +242,9 @@ nouveau_sgdma_init(struct drm_device *dev)
        }
 
        dev_priv->gart_info.sg_dummy_page =
-               alloc_page(GFP_KERNEL|__GFP_DMA32);
+               alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO);
        if (!dev_priv->gart_info.sg_dummy_page) {
-               nouveau_gpuobj_del(dev, &gpuobj);
+               nouveau_gpuobj_ref(NULL, &gpuobj);
                return -ENOMEM;
        }
 
@@ -250,29 +253,34 @@ nouveau_sgdma_init(struct drm_device *dev)
                pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
                             PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
        if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
-               nouveau_gpuobj_del(dev, &gpuobj);
+               nouveau_gpuobj_ref(NULL, &gpuobj);
                return -EFAULT;
        }
 
        if (dev_priv->card_type < NV_50) {
+               /* special case, allocated from global instmem heap so
+                * cinst is invalid, we use it on all channels though so
+                * cinst needs to be valid, set it the same as pinst
+                */
+               gpuobj->cinst = gpuobj->pinst;
+
                /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
                 * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE
                 * on those cards? */
-               nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
-                                      (1 << 12) /* PT present */ |
-                                      (0 << 13) /* PT *not* linear */ |
-                                      (NV_DMA_ACCESS_RW  << 14) |
-                                      (NV_DMA_TARGET_PCI << 16));
-               nv_wo32(dev, gpuobj, 1, aper_size - 1);
+               nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
+                                  (1 << 12) /* PT present */ |
+                                  (0 << 13) /* PT *not* linear */ |
+                                  (NV_DMA_ACCESS_RW  << 14) |
+                                  (NV_DMA_TARGET_PCI << 16));
+               nv_wo32(gpuobj, 4, aper_size - 1);
                for (i = 2; i < 2 + (aper_size >> 12); i++) {
-                       nv_wo32(dev, gpuobj, i,
-                                   dev_priv->gart_info.sg_dummy_bus | 3);
+                       nv_wo32(gpuobj, i * 4,
+                               dev_priv->gart_info.sg_dummy_bus | 3);
                }
        } else {
                for (i = 0; i < obj_size; i += 8) {
-                       nv_wo32(dev, gpuobj, (i+0)/4,
-                                   dev_priv->gart_info.sg_dummy_bus | 0x21);
-                       nv_wo32(dev, gpuobj, (i+4)/4, 0);
+                       nv_wo32(gpuobj, i + 0, 0x00000000);
+                       nv_wo32(gpuobj, i + 4, 0x00000000);
                }
        }
        dev_priv->engine.instmem.flush(dev);
@@ -298,7 +306,7 @@ nouveau_sgdma_takedown(struct drm_device *dev)
                dev_priv->gart_info.sg_dummy_bus = 0;
        }
 
-       nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
+       nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
 }
 
 int
@@ -308,9 +316,9 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
        struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
        int pte;
 
-       pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
+       pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2;
        if (dev_priv->card_type < NV_50) {
-               *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
+               *page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
                return 0;
        }
 
index 989322be37287af5f4ba555fd7041889222365fd..ed7757f14083a17e3122250ca83aa8ee2959ddec 100644 (file)
@@ -35,6 +35,8 @@
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
 #include "nouveau_fbcon.h"
+#include "nouveau_ramht.h"
+#include "nouveau_pm.h"
 #include "nv50_display.h"
 
 static void nouveau_stub_takedown(struct drm_device *dev) {}
@@ -78,7 +80,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->fifo.disable            = nv04_fifo_disable;
                engine->fifo.enable             = nv04_fifo_enable;
                engine->fifo.reassign           = nv04_fifo_reassign;
-               engine->fifo.cache_flush        = nv04_fifo_cache_flush;
                engine->fifo.cache_pull         = nv04_fifo_cache_pull;
                engine->fifo.channel_id         = nv04_fifo_channel_id;
                engine->fifo.create_context     = nv04_fifo_create_context;
@@ -95,6 +96,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->gpio.get                = NULL;
                engine->gpio.set                = NULL;
                engine->gpio.irq_enable         = NULL;
+               engine->pm.clock_get            = nv04_pm_clock_get;
+               engine->pm.clock_pre            = nv04_pm_clock_pre;
+               engine->pm.clock_set            = nv04_pm_clock_set;
                break;
        case 0x10:
                engine->instmem.init            = nv04_instmem_init;
@@ -130,7 +134,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->fifo.disable            = nv04_fifo_disable;
                engine->fifo.enable             = nv04_fifo_enable;
                engine->fifo.reassign           = nv04_fifo_reassign;
-               engine->fifo.cache_flush        = nv04_fifo_cache_flush;
                engine->fifo.cache_pull         = nv04_fifo_cache_pull;
                engine->fifo.channel_id         = nv10_fifo_channel_id;
                engine->fifo.create_context     = nv10_fifo_create_context;
@@ -147,6 +150,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->gpio.get                = nv10_gpio_get;
                engine->gpio.set                = nv10_gpio_set;
                engine->gpio.irq_enable         = NULL;
+               engine->pm.clock_get            = nv04_pm_clock_get;
+               engine->pm.clock_pre            = nv04_pm_clock_pre;
+               engine->pm.clock_set            = nv04_pm_clock_set;
                break;
        case 0x20:
                engine->instmem.init            = nv04_instmem_init;
@@ -182,7 +188,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->fifo.disable            = nv04_fifo_disable;
                engine->fifo.enable             = nv04_fifo_enable;
                engine->fifo.reassign           = nv04_fifo_reassign;
-               engine->fifo.cache_flush        = nv04_fifo_cache_flush;
                engine->fifo.cache_pull         = nv04_fifo_cache_pull;
                engine->fifo.channel_id         = nv10_fifo_channel_id;
                engine->fifo.create_context     = nv10_fifo_create_context;
@@ -199,6 +204,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->gpio.get                = nv10_gpio_get;
                engine->gpio.set                = nv10_gpio_set;
                engine->gpio.irq_enable         = NULL;
+               engine->pm.clock_get            = nv04_pm_clock_get;
+               engine->pm.clock_pre            = nv04_pm_clock_pre;
+               engine->pm.clock_set            = nv04_pm_clock_set;
                break;
        case 0x30:
                engine->instmem.init            = nv04_instmem_init;
@@ -234,7 +242,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->fifo.disable            = nv04_fifo_disable;
                engine->fifo.enable             = nv04_fifo_enable;
                engine->fifo.reassign           = nv04_fifo_reassign;
-               engine->fifo.cache_flush        = nv04_fifo_cache_flush;
                engine->fifo.cache_pull         = nv04_fifo_cache_pull;
                engine->fifo.channel_id         = nv10_fifo_channel_id;
                engine->fifo.create_context     = nv10_fifo_create_context;
@@ -251,6 +258,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->gpio.get                = nv10_gpio_get;
                engine->gpio.set                = nv10_gpio_set;
                engine->gpio.irq_enable         = NULL;
+               engine->pm.clock_get            = nv04_pm_clock_get;
+               engine->pm.clock_pre            = nv04_pm_clock_pre;
+               engine->pm.clock_set            = nv04_pm_clock_set;
+               engine->pm.voltage_get          = nouveau_voltage_gpio_get;
+               engine->pm.voltage_set          = nouveau_voltage_gpio_set;
                break;
        case 0x40:
        case 0x60:
@@ -287,7 +299,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->fifo.disable            = nv04_fifo_disable;
                engine->fifo.enable             = nv04_fifo_enable;
                engine->fifo.reassign           = nv04_fifo_reassign;
-               engine->fifo.cache_flush        = nv04_fifo_cache_flush;
                engine->fifo.cache_pull         = nv04_fifo_cache_pull;
                engine->fifo.channel_id         = nv10_fifo_channel_id;
                engine->fifo.create_context     = nv40_fifo_create_context;
@@ -304,6 +315,12 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->gpio.get                = nv10_gpio_get;
                engine->gpio.set                = nv10_gpio_set;
                engine->gpio.irq_enable         = NULL;
+               engine->pm.clock_get            = nv04_pm_clock_get;
+               engine->pm.clock_pre            = nv04_pm_clock_pre;
+               engine->pm.clock_set            = nv04_pm_clock_set;
+               engine->pm.voltage_get          = nouveau_voltage_gpio_get;
+               engine->pm.voltage_set          = nouveau_voltage_gpio_set;
+               engine->pm.temp_get             = nv40_temp_get;
                break;
        case 0x50:
        case 0x80: /* gotta love NVIDIA's consistency.. */
@@ -358,6 +375,27 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
                engine->gpio.get                = nv50_gpio_get;
                engine->gpio.set                = nv50_gpio_set;
                engine->gpio.irq_enable         = nv50_gpio_irq_enable;
+               switch (dev_priv->chipset) {
+               case 0xa3:
+               case 0xa5:
+               case 0xa8:
+               case 0xaf:
+                       engine->pm.clock_get    = nva3_pm_clock_get;
+                       engine->pm.clock_pre    = nva3_pm_clock_pre;
+                       engine->pm.clock_set    = nva3_pm_clock_set;
+                       break;
+               default:
+                       engine->pm.clock_get    = nv50_pm_clock_get;
+                       engine->pm.clock_pre    = nv50_pm_clock_pre;
+                       engine->pm.clock_set    = nv50_pm_clock_set;
+                       break;
+               }
+               engine->pm.voltage_get          = nouveau_voltage_gpio_get;
+               engine->pm.voltage_set          = nouveau_voltage_gpio_set;
+               if (dev_priv->chipset >= 0x84)
+                       engine->pm.temp_get     = nv84_temp_get;
+               else
+                       engine->pm.temp_get     = nv40_temp_get;
                break;
        case 0xC0:
                engine->instmem.init            = nvc0_instmem_init;
@@ -437,16 +475,14 @@ static int
 nouveau_card_init_channel(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpuobj *gpuobj;
+       struct nouveau_gpuobj *gpuobj = NULL;
        int ret;
 
        ret = nouveau_channel_alloc(dev, &dev_priv->channel,
-                                   (struct drm_file *)-2,
-                                   NvDmaFB, NvDmaTT);
+                                   (struct drm_file *)-2, NvDmaFB, NvDmaTT);
        if (ret)
                return ret;
 
-       gpuobj = NULL;
        ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
                                     0, dev_priv->vram_size,
                                     NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
@@ -454,26 +490,25 @@ nouveau_card_init_channel(struct drm_device *dev)
        if (ret)
                goto out_err;
 
-       ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
-                                    gpuobj, NULL);
+       ret = nouveau_ramht_insert(dev_priv->channel, NvDmaVRAM, gpuobj);
+       nouveau_gpuobj_ref(NULL, &gpuobj);
        if (ret)
                goto out_err;
 
-       gpuobj = NULL;
        ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
                                          dev_priv->gart_info.aper_size,
                                          NV_DMA_ACCESS_RW, &gpuobj, NULL);
        if (ret)
                goto out_err;
 
-       ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
-                                    gpuobj, NULL);
+       ret = nouveau_ramht_insert(dev_priv->channel, NvDmaGART, gpuobj);
+       nouveau_gpuobj_ref(NULL, &gpuobj);
        if (ret)
                goto out_err;
 
        return 0;
+
 out_err:
-       nouveau_gpuobj_del(dev, &gpuobj);
        nouveau_channel_free(dev_priv->channel);
        dev_priv->channel = NULL;
        return ret;
@@ -534,35 +569,28 @@ nouveau_card_init(struct drm_device *dev)
        if (ret)
                goto out_display_early;
 
-       ret = nouveau_mem_detect(dev);
+       nouveau_pm_init(dev);
+
+       ret = nouveau_mem_vram_init(dev);
        if (ret)
                goto out_bios;
 
-       ret = nouveau_gpuobj_early_init(dev);
+       ret = nouveau_gpuobj_init(dev);
        if (ret)
-               goto out_bios;
+               goto out_vram;
 
-       /* Initialise instance memory, must happen before mem_init so we
-        * know exactly how much VRAM we're able to use for "normal"
-        * purposes.
-        */
        ret = engine->instmem.init(dev);
        if (ret)
-               goto out_gpuobj_early;
+               goto out_gpuobj;
 
-       /* Setup the memory manager */
-       ret = nouveau_mem_init(dev);
+       ret = nouveau_mem_gart_init(dev);
        if (ret)
                goto out_instmem;
 
-       ret = nouveau_gpuobj_init(dev);
-       if (ret)
-               goto out_mem;
-
        /* PMC */
        ret = engine->mc.init(dev);
        if (ret)
-               goto out_gpuobj;
+               goto out_gart;
 
        /* PGPIO */
        ret = engine->gpio.init(dev);
@@ -611,9 +639,13 @@ nouveau_card_init(struct drm_device *dev)
        /* what about PVIDEO/PCRTC/PRAMDAC etc? */
 
        if (!engine->graph.accel_blocked) {
-               ret = nouveau_card_init_channel(dev);
+               ret = nouveau_fence_init(dev);
                if (ret)
                        goto out_irq;
+
+               ret = nouveau_card_init_channel(dev);
+               if (ret)
+                       goto out_fence;
        }
 
        ret = nouveau_backlight_init(dev);
@@ -624,6 +656,8 @@ nouveau_card_init(struct drm_device *dev)
        drm_kms_helper_poll_init(dev);
        return 0;
 
+out_fence:
+       nouveau_fence_fini(dev);
 out_irq:
        drm_irq_uninstall(dev);
 out_display:
@@ -642,16 +676,16 @@ out_gpio:
        engine->gpio.takedown(dev);
 out_mc:
        engine->mc.takedown(dev);
-out_gpuobj:
-       nouveau_gpuobj_takedown(dev);
-out_mem:
-       nouveau_sgdma_takedown(dev);
-       nouveau_mem_close(dev);
+out_gart:
+       nouveau_mem_gart_fini(dev);
 out_instmem:
        engine->instmem.takedown(dev);
-out_gpuobj_early:
-       nouveau_gpuobj_late_takedown(dev);
+out_gpuobj:
+       nouveau_gpuobj_takedown(dev);
+out_vram:
+       nouveau_mem_vram_fini(dev);
 out_bios:
+       nouveau_pm_fini(dev);
        nouveau_bios_takedown(dev);
 out_display_early:
        engine->display.late_takedown(dev);
@@ -667,7 +701,8 @@ static void nouveau_card_takedown(struct drm_device *dev)
 
        nouveau_backlight_exit(dev);
 
-       if (dev_priv->channel) {
+       if (!engine->graph.accel_blocked) {
+               nouveau_fence_fini(dev);
                nouveau_channel_free(dev_priv->channel);
                dev_priv->channel = NULL;
        }
@@ -686,15 +721,15 @@ static void nouveau_card_takedown(struct drm_device *dev)
        ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
        ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
        mutex_unlock(&dev->struct_mutex);
-       nouveau_sgdma_takedown(dev);
+       nouveau_mem_gart_fini(dev);
 
-       nouveau_gpuobj_takedown(dev);
-       nouveau_mem_close(dev);
        engine->instmem.takedown(dev);
+       nouveau_gpuobj_takedown(dev);
+       nouveau_mem_vram_fini(dev);
 
        drm_irq_uninstall(dev);
 
-       nouveau_gpuobj_late_takedown(dev);
+       nouveau_pm_fini(dev);
        nouveau_bios_takedown(dev);
 
        vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -1057,7 +1092,7 @@ bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
 /* Waits for PGRAPH to go completely idle */
 bool nouveau_wait_for_idle(struct drm_device *dev)
 {
-       if (!nv_wait(NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
+       if (!nv_wait(dev, NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
                NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
                         nv_rd32(dev, NV04_PGRAPH_STATUS));
                return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
new file mode 100644 (file)
index 0000000..16bbbf1
--- /dev/null
@@ -0,0 +1,309 @@
+/*
+ * Copyright 2010 PathScale inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_pm.h"
+
+static void
+nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
+       struct nouveau_pm_threshold_temp *temps = &pm->threshold_temp;
+       int i, headerlen, recordlen, entries;
+
+       if (!temp) {
+               NV_DEBUG(dev, "temperature table pointer invalid\n");
+               return;
+       }
+
+       /* Set the default sensor's contants */
+       sensor->offset_constant = 0;
+       sensor->offset_mult = 1;
+       sensor->offset_div = 1;
+       sensor->slope_mult = 1;
+       sensor->slope_div = 1;
+
+       /* Set the default temperature thresholds */
+       temps->critical = 110;
+       temps->down_clock = 100;
+       temps->fan_boost = 90;
+
+       /* Set the known default values to setup the temperature sensor */
+       if (dev_priv->card_type >= NV_40) {
+               switch (dev_priv->chipset) {
+               case 0x43:
+                       sensor->offset_mult = 32060;
+                       sensor->offset_div = 1000;
+                       sensor->slope_mult = 792;
+                       sensor->slope_div = 1000;
+                       break;
+
+               case 0x44:
+               case 0x47:
+               case 0x4a:
+                       sensor->offset_mult = 27839;
+                       sensor->offset_div = 1000;
+                       sensor->slope_mult = 780;
+                       sensor->slope_div = 1000;
+                       break;
+
+               case 0x46:
+                       sensor->offset_mult = -24775;
+                       sensor->offset_div = 100;
+                       sensor->slope_mult = 467;
+                       sensor->slope_div = 10000;
+                       break;
+
+               case 0x49:
+                       sensor->offset_mult = -25051;
+                       sensor->offset_div = 100;
+                       sensor->slope_mult = 458;
+                       sensor->slope_div = 10000;
+                       break;
+
+               case 0x4b:
+                       sensor->offset_mult = -24088;
+                       sensor->offset_div = 100;
+                       sensor->slope_mult = 442;
+                       sensor->slope_div = 10000;
+                       break;
+
+               case 0x50:
+                       sensor->offset_mult = -22749;
+                       sensor->offset_div = 100;
+                       sensor->slope_mult = 431;
+                       sensor->slope_div = 10000;
+                       break;
+               }
+       }
+
+       headerlen = temp[1];
+       recordlen = temp[2];
+       entries = temp[3];
+       temp = temp + headerlen;
+
+       /* Read the entries from the table */
+       for (i = 0; i < entries; i++) {
+               u16 value = ROM16(temp[1]);
+
+               switch (temp[0]) {
+               case 0x01:
+                       if ((value & 0x8f) == 0)
+                               sensor->offset_constant = (value >> 9) & 0x7f;
+                       break;
+
+               case 0x04:
+                       if ((value & 0xf00f) == 0xa000) /* core */
+                               temps->critical = (value&0x0ff0) >> 4;
+                       break;
+
+               case 0x07:
+                       if ((value & 0xf00f) == 0xa000) /* core */
+                               temps->down_clock = (value&0x0ff0) >> 4;
+                       break;
+
+               case 0x08:
+                       if ((value & 0xf00f) == 0xa000) /* core */
+                               temps->fan_boost = (value&0x0ff0) >> 4;
+                       break;
+
+               case 0x10:
+                       sensor->offset_mult = value;
+                       break;
+
+               case 0x11:
+                       sensor->offset_div = value;
+                       break;
+
+               case 0x12:
+                       sensor->slope_mult = value;
+                       break;
+
+               case 0x13:
+                       sensor->slope_div = value;
+                       break;
+               }
+               temp += recordlen;
+       }
+
+       nouveau_temp_safety_checks(dev);
+}
+
+static int
+nv40_sensor_setup(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
+       u32 offset = sensor->offset_mult / sensor->offset_div;
+       u32 sensor_calibration;
+
+       /* set up the sensors */
+       sensor_calibration = 120 - offset - sensor->offset_constant;
+       sensor_calibration = sensor_calibration * sensor->slope_div /
+                               sensor->slope_mult;
+
+       if (dev_priv->chipset >= 0x46)
+               sensor_calibration |= 0x80000000;
+       else
+               sensor_calibration |= 0x10000000;
+
+       nv_wr32(dev, 0x0015b0, sensor_calibration);
+
+       /* Wait for the sensor to update */
+       msleep(5);
+
+       /* read */
+       return nv_rd32(dev, 0x0015b4) & 0x1fff;
+}
+
+int
+nv40_temp_get(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
+       int offset = sensor->offset_mult / sensor->offset_div;
+       int core_temp;
+
+       if (dev_priv->chipset >= 0x50) {
+               core_temp = nv_rd32(dev, 0x20008);
+       } else {
+               core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff;
+               /* Setup the sensor if the temperature is 0 */
+               if (core_temp == 0)
+                       core_temp = nv40_sensor_setup(dev);
+       }
+
+       core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
+       core_temp = core_temp + offset + sensor->offset_constant;
+
+       return core_temp;
+}
+
+int
+nv84_temp_get(struct drm_device *dev)
+{
+       return nv_rd32(dev, 0x20400);
+}
+
+void
+nouveau_temp_safety_checks(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       struct nouveau_pm_threshold_temp *temps = &pm->threshold_temp;
+
+       if (temps->critical > 120)
+               temps->critical = 120;
+       else if (temps->critical < 80)
+               temps->critical = 80;
+
+       if (temps->down_clock > 110)
+               temps->down_clock = 110;
+       else if (temps->down_clock < 60)
+               temps->down_clock = 60;
+
+       if (temps->fan_boost > 100)
+               temps->fan_boost = 100;
+       else if (temps->fan_boost < 40)
+               temps->fan_boost = 40;
+}
+
+static bool
+probe_monitoring_device(struct nouveau_i2c_chan *i2c,
+                       struct i2c_board_info *info)
+{
+       char modalias[16] = "i2c:";
+       struct i2c_client *client;
+
+       strlcat(modalias, info->type, sizeof(modalias));
+       request_module(modalias);
+
+       client = i2c_new_device(&i2c->adapter, info);
+       if (!client)
+               return false;
+
+       if (!client->driver || client->driver->detect(client, info)) {
+               i2c_unregister_device(client);
+               return false;
+       }
+
+       return true;
+}
+
+static void
+nouveau_temp_probe_i2c(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct dcb_table *dcb = &dev_priv->vbios.dcb;
+       struct i2c_board_info info[] = {
+               { I2C_BOARD_INFO("w83l785ts", 0x2d) },
+               { I2C_BOARD_INFO("w83781d", 0x2d) },
+               { I2C_BOARD_INFO("f75375", 0x2e) },
+               { I2C_BOARD_INFO("adt7473", 0x2e) },
+               { I2C_BOARD_INFO("lm99", 0x4c) },
+               { }
+       };
+       int idx = (dcb->version >= 0x40 ?
+                  dcb->i2c_default_indices & 0xf : 2);
+
+       nouveau_i2c_identify(dev, "monitoring device", info,
+                            probe_monitoring_device, idx);
+}
+
+void
+nouveau_temp_init(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nvbios *bios = &dev_priv->vbios;
+       struct bit_entry P;
+       u8 *temp = NULL;
+
+       if (bios->type == NVBIOS_BIT) {
+               if (bit_table(dev, 'P', &P))
+                       return;
+
+               if (P.version == 1)
+                       temp = ROMPTR(bios, P.data[12]);
+               else if (P.version == 2)
+                       temp = ROMPTR(bios, P.data[16]);
+               else
+                       NV_WARN(dev, "unknown temp for BIT P %d\n", P.version);
+
+               nouveau_temp_vbios_parse(dev, temp);
+       }
+
+       nouveau_temp_probe_i2c(dev);
+}
+
+void
+nouveau_temp_fini(struct drm_device *dev)
+{
+
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
new file mode 100644 (file)
index 0000000..04fdc00
--- /dev/null
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_pm.h"
+
+static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a };
+static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
+
+int
+nouveau_voltage_gpio_get(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
+       struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+       u8 vid = 0;
+       int i;
+
+       for (i = 0; i < nr_vidtag; i++) {
+               if (!(volt->vid_mask & (1 << i)))
+                       continue;
+
+               vid |= gpio->get(dev, vidtag[i]) << i;
+       }
+
+       return nouveau_volt_lvl_lookup(dev, vid);
+}
+
+int
+nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
+       struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+       int vid, i;
+
+       vid = nouveau_volt_vid_lookup(dev, voltage);
+       if (vid < 0)
+               return vid;
+
+       for (i = 0; i < nr_vidtag; i++) {
+               if (!(volt->vid_mask & (1 << i)))
+                       continue;
+
+               gpio->set(dev, vidtag[i], !!(vid & (1 << i)));
+       }
+
+       return 0;
+}
+
+int
+nouveau_volt_vid_lookup(struct drm_device *dev, int voltage)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+       int i;
+
+       for (i = 0; i < volt->nr_level; i++) {
+               if (volt->level[i].voltage == voltage)
+                       return volt->level[i].vid;
+       }
+
+       return -ENOENT;
+}
+
+int
+nouveau_volt_lvl_lookup(struct drm_device *dev, int vid)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+       int i;
+
+       for (i = 0; i < volt->nr_level; i++) {
+               if (volt->level[i].vid == vid)
+                       return volt->level[i].voltage;
+       }
+
+       return -ENOENT;
+}
+
+void
+nouveau_volt_init(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+       struct nouveau_pm_voltage *voltage = &pm->voltage;
+       struct nvbios *bios = &dev_priv->vbios;
+       struct bit_entry P;
+       u8 *volt = NULL, *entry;
+       int i, headerlen, recordlen, entries, vidmask, vidshift;
+
+       if (bios->type == NVBIOS_BIT) {
+               if (bit_table(dev, 'P', &P))
+                       return;
+
+               if (P.version == 1)
+                       volt = ROMPTR(bios, P.data[16]);
+               else
+               if (P.version == 2)
+                       volt = ROMPTR(bios, P.data[12]);
+               else {
+                       NV_WARN(dev, "unknown volt for BIT P %d\n", P.version);
+               }
+       } else {
+               if (bios->data[bios->offset + 6] < 0x27) {
+                       NV_DEBUG(dev, "BMP version too old for voltage\n");
+                       return;
+               }
+
+               volt = ROMPTR(bios, bios->data[bios->offset + 0x98]);
+       }
+
+       if (!volt) {
+               NV_DEBUG(dev, "voltage table pointer invalid\n");
+               return;
+       }
+
+       switch (volt[0]) {
+       case 0x10:
+       case 0x11:
+       case 0x12:
+               headerlen = 5;
+               recordlen = volt[1];
+               entries   = volt[2];
+               vidshift  = 0;
+               vidmask   = volt[4];
+               break;
+       case 0x20:
+               headerlen = volt[1];
+               recordlen = volt[3];
+               entries   = volt[2];
+               vidshift  = 0; /* could be vidshift like 0x30? */
+               vidmask   = volt[5];
+               break;
+       case 0x30:
+               headerlen = volt[1];
+               recordlen = volt[2];
+               entries   = volt[3];
+               vidshift  = hweight8(volt[5]);
+               vidmask   = volt[4];
+               break;
+       default:
+               NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]);
+               return;
+       }
+
+       /* validate vid mask */
+       voltage->vid_mask = vidmask;
+       if (!voltage->vid_mask)
+               return;
+
+       i = 0;
+       while (vidmask) {
+               if (i > nr_vidtag) {
+                       NV_DEBUG(dev, "vid bit %d unknown\n", i);
+                       return;
+               }
+
+               if (!nouveau_bios_gpio_entry(dev, vidtag[i])) {
+                       NV_DEBUG(dev, "vid bit %d has no gpio tag\n", i);
+                       return;
+               }
+
+               vidmask >>= 1;
+               i++;
+       }
+
+       /* parse vbios entries into common format */
+       voltage->level = kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL);
+       if (!voltage->level)
+               return;
+
+       entry = volt + headerlen;
+       for (i = 0; i < entries; i++, entry += recordlen) {
+               voltage->level[i].voltage = entry[0];
+               voltage->level[i].vid     = entry[1] >> vidshift;
+       }
+       voltage->nr_level  = entries;
+       voltage->supported = true;
+}
+
+void
+nouveau_volt_fini(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+
+       kfree(volt->level);
+}
index 497df8765f28489c0dbaf34f0cdde137de46c80c..c71abc2a34d5d9cf22a01d3c3bf455a29c5d1dbf 100644 (file)
@@ -33,6 +33,7 @@
 #include "nouveau_fb.h"
 #include "nouveau_hw.h"
 #include "nvreg.h"
+#include "nouveau_fbcon.h"
 
 static int
 nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
@@ -109,7 +110,7 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
        struct nouveau_pll_vals *pv = &regp->pllvals;
        struct pll_lims pll_lim;
 
-       if (get_pll_limits(dev, nv_crtc->index ? VPLL2 : VPLL1, &pll_lim))
+       if (get_pll_limits(dev, nv_crtc->index ? PLL_VPLL1 : PLL_VPLL0, &pll_lim))
                return;
 
        /* NM2 == 0 is used to determine single stage mode on two stage plls */
@@ -718,6 +719,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
 
        drm_crtc_cleanup(crtc);
 
+       nouveau_bo_unmap(nv_crtc->cursor.nvbo);
        nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
        kfree(nv_crtc);
 }
@@ -768,8 +770,9 @@ nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
 }
 
 static int
-nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
-                       struct drm_framebuffer *old_fb)
+nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
+                          struct drm_framebuffer *passed_fb,
+                          int x, int y, bool atomic)
 {
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
        struct drm_device *dev = crtc->dev;
@@ -780,13 +783,26 @@ nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
        int arb_burst, arb_lwm;
        int ret;
 
-       ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
-       if (ret)
-               return ret;
+       /* If atomic, we want to switch to the fb we were passed, so
+        * now we update pointers to do that.  (We don't pin; just
+        * assume we're already pinned and update the base address.)
+        */
+       if (atomic) {
+               drm_fb = passed_fb;
+               fb = nouveau_framebuffer(passed_fb);
+       }
+       else {
+               /* If not atomic, we can go ahead and pin, and unpin the
+                * old fb we were passed.
+                */
+               ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
+               if (ret)
+                       return ret;
 
-       if (old_fb) {
-               struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb);
-               nouveau_bo_unpin(ofb->nvbo);
+               if (passed_fb) {
+                       struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
+                       nouveau_bo_unpin(ofb->nvbo);
+               }
        }
 
        nv_crtc->fb.offset = fb->nvbo->bo.offset;
@@ -826,7 +842,7 @@ nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
        crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
        crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
 
-       if (dev_priv->card_type >= NV_30) {
+       if (dev_priv->card_type >= NV_20) {
                regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
                crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
        }
@@ -834,6 +850,29 @@ nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
        return 0;
 }
 
+static int
+nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+                       struct drm_framebuffer *old_fb)
+{
+       return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
+}
+
+static int
+nv04_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
+                              struct drm_framebuffer *fb,
+                              int x, int y, enum mode_set_atomic state)
+{
+       struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+       struct drm_device *dev = dev_priv->dev;
+
+       if (state == ENTER_ATOMIC_MODE_SET)
+               nouveau_fbcon_save_disable_accel(dev);
+       else
+               nouveau_fbcon_restore_accel(dev);
+
+       return nv04_crtc_do_mode_set_base(crtc, fb, x, y, true);
+}
+
 static void nv04_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
                               struct nouveau_bo *dst)
 {
@@ -962,6 +1001,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
        .mode_fixup = nv_crtc_mode_fixup,
        .mode_set = nv_crtc_mode_set,
        .mode_set_base = nv04_crtc_mode_set_base,
+       .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
        .load_lut = nv_crtc_gamma_load,
 };
 
index ea3627041ecf8e7eb4a59199d8c1c7e1e64fd437..ba6423f2ffccb2e5e7f2a8c9ea431c4aa087cec2 100644 (file)
@@ -291,6 +291,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
        msleep(5);
 
        sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
+       /* do it again just in case it's a residual current */
+       sample &= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
 
        temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
        NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
@@ -343,22 +345,13 @@ static void nv04_dac_prepare(struct drm_encoder *encoder)
 {
        struct drm_encoder_helper_funcs *helper = encoder->helper_private;
        struct drm_device *dev = encoder->dev;
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
        int head = nouveau_crtc(encoder->crtc)->index;
-       struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
 
        helper->dpms(encoder, DRM_MODE_DPMS_OFF);
 
        nv04_dfp_disable(dev, head);
-
-       /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
-        * at LCD__INDEX which we don't alter
-        */
-       if (!(crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] & 0x44))
-               crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
 }
 
-
 static void nv04_dac_mode_set(struct drm_encoder *encoder,
                              struct drm_display_mode *mode,
                              struct drm_display_mode *adjusted_mode)
index 0d3206a7046cb008814237250d5fa9fe2502ac2c..c936403b26e22f7b4bcc1b4fee4fa6809498689b 100644 (file)
@@ -104,6 +104,8 @@ void nv04_dfp_disable(struct drm_device *dev, int head)
        }
        /* don't inadvertently turn it on when state written later */
        crtcstate[head].fp_control = FP_TG_CONTROL_OFF;
+       crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] &=
+               ~NV_CIO_CRE_LCD_ROUTE_MASK;
 }
 
 void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
@@ -253,26 +255,21 @@ static void nv04_dfp_prepare(struct drm_encoder *encoder)
 
        nv04_dfp_prepare_sel_clk(dev, nv_encoder, head);
 
-       /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
-        * at LCD__INDEX which we don't alter
-        */
-       if (!(*cr_lcd & 0x44)) {
-               *cr_lcd = 0x3;
-
-               if (nv_two_heads(dev)) {
-                       if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP)
-                               *cr_lcd |= head ? 0x0 : 0x8;
-                       else {
-                               *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30;
-                               if (nv_encoder->dcb->type == OUTPUT_LVDS)
-                                       *cr_lcd |= 0x30;
-                               if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) {
-                                       /* avoid being connected to both crtcs */
-                                       *cr_lcd_oth &= ~0x30;
-                                       NVWriteVgaCrtc(dev, head ^ 1,
-                                                      NV_CIO_CRE_LCD__INDEX,
-                                                      *cr_lcd_oth);
-                               }
+       *cr_lcd = (*cr_lcd & ~NV_CIO_CRE_LCD_ROUTE_MASK) | 0x3;
+
+       if (nv_two_heads(dev)) {
+               if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP)
+                       *cr_lcd |= head ? 0x0 : 0x8;
+               else {
+                       *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30;
+                       if (nv_encoder->dcb->type == OUTPUT_LVDS)
+                               *cr_lcd |= 0x30;
+                       if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) {
+                               /* avoid being connected to both crtcs */
+                               *cr_lcd_oth &= ~0x30;
+                               NVWriteVgaCrtc(dev, head ^ 1,
+                                              NV_CIO_CRE_LCD__INDEX,
+                                              *cr_lcd_oth);
                        }
                }
        }
@@ -640,7 +637,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
            get_tmds_slave(encoder))
                return;
 
-       type = nouveau_i2c_identify(dev, "TMDS transmitter", info, 2);
+       type = nouveau_i2c_identify(dev, "TMDS transmitter", info, NULL, 2);
        if (type < 0)
                return;
 
index 1eeac4fae73dc244b252d43976b2d8d5ccf8ce2e..33e4c9388bc1b8f787f5bc89e7350670098a7722 100644 (file)
@@ -25,6 +25,7 @@
 #include "drmP.h"
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
+#include "nouveau_ramht.h"
 #include "nouveau_fbcon.h"
 
 void
@@ -169,11 +170,9 @@ nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, handle, obj, NULL);
-       if (ret)
-               return ret;
-
-       return 0;
+       ret = nouveau_ramht_insert(dev_priv->channel, handle, obj);
+       nouveau_gpuobj_ref(NULL, &obj);
+       return ret;
 }
 
 int
index 06cedd99c26a3a72e4fc18d1f743a19a374a8706..708293b7ddcd5ad36fc184271b274753e29d71b8 100644 (file)
@@ -27,8 +27,9 @@
 #include "drmP.h"
 #include "drm.h"
 #include "nouveau_drv.h"
+#include "nouveau_ramht.h"
 
-#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE))
+#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE))
 #define NV04_RAMFC__SIZE 32
 #define NV04_RAMFC_DMA_PUT                                       0x00
 #define NV04_RAMFC_DMA_GET                                       0x04
 #define NV04_RAMFC_ENGINE                                        0x14
 #define NV04_RAMFC_PULL1_ENGINE                                  0x18
 
-#define RAMFC_WR(offset, val) nv_wo32(dev, chan->ramfc->gpuobj, \
-                                        NV04_RAMFC_##offset/4, (val))
-#define RAMFC_RD(offset)      nv_ro32(dev, chan->ramfc->gpuobj, \
-                                        NV04_RAMFC_##offset/4)
+#define RAMFC_WR(offset, val) nv_wo32(chan->ramfc, NV04_RAMFC_##offset, (val))
+#define RAMFC_RD(offset)      nv_ro32(chan->ramfc, NV04_RAMFC_##offset)
 
 void
 nv04_fifo_disable(struct drm_device *dev)
@@ -71,38 +70,33 @@ nv04_fifo_reassign(struct drm_device *dev, bool enable)
        return (reassign == 1);
 }
 
-bool
-nv04_fifo_cache_flush(struct drm_device *dev)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
-       uint64_t start = ptimer->read(dev);
-
-       do {
-               if (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) ==
-                   nv_rd32(dev, NV03_PFIFO_CACHE1_PUT))
-                       return true;
-
-       } while (ptimer->read(dev) - start < 100000000);
-
-       NV_ERROR(dev, "Timeout flushing the PFIFO cache.\n");
-
-       return false;
-}
-
 bool
 nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
 {
-       uint32_t pull = nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0);
+       int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable);
+
+       if (!enable) {
+               /* In some cases the PFIFO puller may be left in an
+                * inconsistent state if you try to stop it when it's
+                * busy translating handles. Sometimes you get a
+                * PFIFO_CACHE_ERROR, sometimes it just fails silently
+                * sending incorrect instance offsets to PGRAPH after
+                * it's started up again. To avoid the latter we
+                * invalidate the most recently calculated instance.
+                */
+               if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
+                            NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
+                       NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
+
+               if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
+                   NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
+                       nv_wr32(dev, NV03_PFIFO_INTR_0,
+                               NV_PFIFO_INTR_CACHE_ERROR);
 
-       if (enable) {
-               nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull | 1);
-       } else {
-               nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull & ~1);
                nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
        }
 
-       return !!(pull & 1);
+       return pull & 1;
 }
 
 int
@@ -130,7 +124,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
                                                NV04_RAMFC__SIZE,
                                                NVOBJ_FLAG_ZERO_ALLOC |
                                                NVOBJ_FLAG_ZERO_FREE,
-                                               NULL, &chan->ramfc);
+                                               &chan->ramfc);
        if (ret)
                return ret;
 
@@ -139,7 +133,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
        /* Setup initial state */
        RAMFC_WR(DMA_PUT, chan->pushbuf_base);
        RAMFC_WR(DMA_GET, chan->pushbuf_base);
-       RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
+       RAMFC_WR(DMA_INSTANCE, chan->pushbuf->pinst >> 4);
        RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
                             NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
                             NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
@@ -161,7 +155,7 @@ nv04_fifo_destroy_context(struct nouveau_channel *chan)
        nv_wr32(dev, NV04_PFIFO_MODE,
                nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
 
-       nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+       nouveau_gpuobj_ref(NULL, &chan->ramfc);
 }
 
 static void
@@ -264,10 +258,10 @@ nv04_fifo_init_ramxx(struct drm_device *dev)
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 
        nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
-                                      ((dev_priv->ramht_bits - 9) << 16) |
-                                      (dev_priv->ramht_offset >> 8));
-       nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
-       nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8);
+                                      ((dev_priv->ramht->bits - 9) << 16) |
+                                      (dev_priv->ramht->gpuobj->pinst >> 8));
+       nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
+       nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
 }
 
 static void
index 4408232d33f179805205763974beede957ddd6f1..0b5ae297abdecb0d4403bedf8b4d84ad75940ef9 100644 (file)
@@ -1,6 +1,7 @@
 #include "drmP.h"
 #include "drm.h"
 #include "nouveau_drv.h"
+#include "nouveau_ramht.h"
 
 /* returns the size of fifo context */
 static int
@@ -17,102 +18,51 @@ nouveau_fifo_ctx_size(struct drm_device *dev)
        return 32;
 }
 
-static void
-nv04_instmem_determine_amount(struct drm_device *dev)
+int nv04_instmem_init(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       int i;
+       struct nouveau_gpuobj *ramht = NULL;
+       u32 offset, length;
+       int ret;
 
-       /* Figure out how much instance memory we need */
-       if (dev_priv->card_type >= NV_40) {
-               /* We'll want more instance memory than this on some NV4x cards.
-                * There's a 16MB aperture to play with that maps onto the end
-                * of vram.  For now, only reserve a small piece until we know
-                * more about what each chipset requires.
-                */
-               switch (dev_priv->chipset) {
-               case 0x40:
-               case 0x47:
-               case 0x49:
-               case 0x4b:
-                       dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
-                       break;
-               default:
-                       dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
-                       break;
-               }
-       } else {
-               /*XXX: what *are* the limits on <NV40 cards?
-                */
-               dev_priv->ramin_rsvd_vram = (512 * 1024);
-       }
-       NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10);
+       /* RAMIN always available */
+       dev_priv->ramin_available = true;
 
-       /* Clear all of it, except the BIOS image that's in the first 64KiB */
-       for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4)
-               nv_wi32(dev, i, 0x00000000);
-}
+       /* Setup shared RAMHT */
+       ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096,
+                                     NVOBJ_FLAG_ZERO_ALLOC, &ramht);
+       if (ret)
+               return ret;
 
-static void
-nv04_instmem_configure_fixed_tables(struct drm_device *dev)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_engine *engine = &dev_priv->engine;
+       ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht);
+       nouveau_gpuobj_ref(NULL, &ramht);
+       if (ret)
+               return ret;
 
-       /* FIFO hash table (RAMHT)
-        *   use 4k hash table at RAMIN+0x10000
-        *   TODO: extend the hash table
-        */
-       dev_priv->ramht_offset = 0x10000;
-       dev_priv->ramht_bits   = 9;
-       dev_priv->ramht_size   = (1 << dev_priv->ramht_bits); /* nr entries */
-       dev_priv->ramht_size  *= 8; /* 2 32-bit values per entry in RAMHT */
-       NV_DEBUG(dev, "RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset,
-                                                     dev_priv->ramht_size);
-
-       /* FIFO runout table (RAMRO) - 512k at 0x11200 */
-       dev_priv->ramro_offset = 0x11200;
-       dev_priv->ramro_size   = 512;
-       NV_DEBUG(dev, "RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset,
-                                                     dev_priv->ramro_size);
-
-       /* FIFO context table (RAMFC)
-        *   NV40  : Not sure exactly how to position RAMFC on some cards,
-        *           0x30002 seems to position it at RAMIN+0x20000 on these
-        *           cards.  RAMFC is 4kb (32 fifos, 128byte entries).
-        *   Others: Position RAMFC at RAMIN+0x11400
-        */
-       dev_priv->ramfc_size = engine->fifo.channels *
-                                               nouveau_fifo_ctx_size(dev);
+       /* And RAMRO */
+       ret = nouveau_gpuobj_new_fake(dev, 0x11200, ~0, 512,
+                                     NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramro);
+       if (ret)
+               return ret;
+
+       /* And RAMFC */
+       length = dev_priv->engine.fifo.channels * nouveau_fifo_ctx_size(dev);
        switch (dev_priv->card_type) {
        case NV_40:
-               dev_priv->ramfc_offset = 0x20000;
+               offset = 0x20000;
                break;
-       case NV_30:
-       case NV_20:
-       case NV_10:
-       case NV_04:
        default:
-               dev_priv->ramfc_offset = 0x11400;
+               offset = 0x11400;
                break;
        }
-       NV_DEBUG(dev, "RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset,
-                                                     dev_priv->ramfc_size);
-}
 
-int nv04_instmem_init(struct drm_device *dev)
-{
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       uint32_t offset;
-       int ret;
-
-       nv04_instmem_determine_amount(dev);
-       nv04_instmem_configure_fixed_tables(dev);
+       ret = nouveau_gpuobj_new_fake(dev, offset, ~0, length,
+                                     NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramfc);
+       if (ret)
+               return ret;
 
-       /* Create a heap to manage RAMIN allocations, we don't allocate
-        * the space that was reserved for RAMHT/FC/RO.
-        */
-       offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
+       /* Only allow space after RAMFC to be used for object allocation */
+       offset += length;
 
        /* It appears RAMRO (or something?) is controlled by 0x2220/0x2230
         * on certain NV4x chipsets as well as RAMFC.  When 0x2230 == 0
@@ -140,46 +90,34 @@ int nv04_instmem_init(struct drm_device *dev)
 void
 nv04_instmem_takedown(struct drm_device *dev)
 {
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+       nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
+       nouveau_gpuobj_ref(NULL, &dev_priv->ramro);
+       nouveau_gpuobj_ref(NULL, &dev_priv->ramfc);
 }
 
 int
-nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz)
+nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
+                     uint32_t *sz)
 {
-       if (gpuobj->im_backing)
-               return -EINVAL;
-
        return 0;
 }
 
 void
 nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
 {
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-       if (gpuobj && gpuobj->im_backing) {
-               if (gpuobj->im_bound)
-                       dev_priv->engine.instmem.unbind(dev, gpuobj);
-               gpuobj->im_backing = NULL;
-       }
 }
 
 int
 nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
 {
-       if (!gpuobj->im_pramin || gpuobj->im_bound)
-               return -EINVAL;
-
-       gpuobj->im_bound = 1;
        return 0;
 }
 
 int
 nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
 {
-       if (gpuobj->im_bound == 0)
-               return -EINVAL;
-
-       gpuobj->im_bound = 0;
        return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
new file mode 100644 (file)
index 0000000..6a6eb69
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+#include "nouveau_pm.h"
+
+struct nv04_pm_state {
+       struct pll_lims pll;
+       struct nouveau_pll_vals calc;
+};
+
+int
+nv04_pm_clock_get(struct drm_device *dev, u32 id)
+{
+       return nouveau_hw_get_clock(dev, id);
+}
+
+void *
+nv04_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+                 u32 id, int khz)
+{
+       struct nv04_pm_state *state;
+       int ret;
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return ERR_PTR(-ENOMEM);
+
+       ret = get_pll_limits(dev, id, &state->pll);
+       if (ret) {
+               kfree(state);
+               return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
+       }
+
+       ret = nouveau_calc_pll_mnp(dev, &state->pll, khz, &state->calc);
+       if (!ret) {
+               kfree(state);
+               return ERR_PTR(-EINVAL);
+       }
+
+       return state;
+}
+
+void
+nv04_pm_clock_set(struct drm_device *dev, void *pre_state)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nv04_pm_state *state = pre_state;
+       u32 reg = state->pll.reg;
+
+       /* thank the insane nouveau_hw_setpll() interface for this */
+       if (dev_priv->card_type >= NV_40)
+               reg += 4;
+
+       nouveau_hw_setpll(dev, reg, &state->calc);
+       kfree(state);
+}
+
index 0b5d012d7c28d4aebacf8aa15576cebe71b36c84..3eb605ddfd031bd4ceb74c532597a00177e5a380 100644 (file)
@@ -49,8 +49,8 @@ static struct i2c_board_info nv04_tv_encoder_info[] = {
 
 int nv04_tv_identify(struct drm_device *dev, int i2c_index)
 {
-       return nouveau_i2c_identify(dev, "TV encoder",
-                                   nv04_tv_encoder_info, i2c_index);
+       return nouveau_i2c_identify(dev, "TV encoder", nv04_tv_encoder_info,
+                                   NULL, i2c_index);
 }
 
 
@@ -99,12 +99,10 @@ static void nv04_tv_bind(struct drm_device *dev, int head, bool bind)
 
        state->tv_setup = 0;
 
-       if (bind) {
-               state->CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
+       if (bind)
                state->CRTC[NV_CIO_CRE_49] |= 0x10;
-       } else {
+       else
                state->CRTC[NV_CIO_CRE_49] &= ~0x10;
-       }
 
        NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX,
                       state->CRTC[NV_CIO_CRE_LCD__INDEX]);
index 7a4069cf5d0b835caccd983722bb640df7f6e351..f1b03ad58fd5459f928ad6b54d97a45dce417648 100644 (file)
@@ -27,8 +27,9 @@
 #include "drmP.h"
 #include "drm.h"
 #include "nouveau_drv.h"
+#include "nouveau_ramht.h"
 
-#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE))
+#define NV10_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV10_RAMFC__SIZE))
 #define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
 
 int
@@ -48,7 +49,7 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
 
        ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0,
                                      NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
-                                     NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
+                                     NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
        if (ret)
                return ret;
 
@@ -57,7 +58,7 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
         */
        nv_wi32(dev, fc +  0, chan->pushbuf_base);
        nv_wi32(dev, fc +  4, chan->pushbuf_base);
-       nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
+       nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4);
        nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
                              NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
                              NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
@@ -80,7 +81,7 @@ nv10_fifo_destroy_context(struct nouveau_channel *chan)
        nv_wr32(dev, NV04_PFIFO_MODE,
                        nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
 
-       nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+       nouveau_gpuobj_ref(NULL, &chan->ramfc);
 }
 
 static void
@@ -202,14 +203,14 @@ nv10_fifo_init_ramxx(struct drm_device *dev)
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 
        nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
-                                      ((dev_priv->ramht_bits - 9) << 16) |
-                                      (dev_priv->ramht_offset >> 8));
-       nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
+                                      ((dev_priv->ramht->bits - 9) << 16) |
+                                      (dev_priv->ramht->gpuobj->pinst >> 8));
+       nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
 
        if (dev_priv->chipset < 0x17) {
-               nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8);
+               nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
        } else {
-               nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset >> 8) |
+               nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc->pinst >> 8) |
                                               (1 << 16) /* 64 Bytes entry*/);
                /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
        }
index b2f6a57c0cc5f1d5c0f879268d6468a8f77014ba..8e68c97311597ebe2c8b8a9b58536e0df3f81eb3 100644 (file)
@@ -803,7 +803,7 @@ nv10_graph_context_switch(struct drm_device *dev)
        /* Load context for next channel */
        chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
        chan = dev_priv->fifos[chid];
-       if (chan)
+       if (chan && chan->pgraph_ctx)
                nv10_graph_load_context(chan);
 
        pgraph->fifo_access(dev, true);
index 13cdc05b7c2d0f4e7785cbe428ce4ab86205bd67..28119fd19d03c7974451c131898b38261eabcfbe 100644 (file)
@@ -193,55 +193,56 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
        }
 }
 
-static const struct {
-       int hdisplay;
-       int vdisplay;
-} modes[] = {
-       { 640, 400 },
-       { 640, 480 },
-       { 720, 480 },
-       { 720, 576 },
-       { 800, 600 },
-       { 1024, 768 },
-       { 1280, 720 },
-       { 1280, 1024 },
-       { 1920, 1080 }
-};
-
-static int nv17_tv_get_modes(struct drm_encoder *encoder,
-                            struct drm_connector *connector)
+static int nv17_tv_get_ld_modes(struct drm_encoder *encoder,
+                               struct drm_connector *connector)
 {
        struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
-       struct drm_display_mode *mode;
-       struct drm_display_mode *output_mode;
+       struct drm_display_mode *mode, *tv_mode;
        int n = 0;
-       int i;
-
-       if (tv_norm->kind != CTV_ENC_MODE) {
-               struct drm_display_mode *tv_mode;
 
-               for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
-                       mode = drm_mode_duplicate(encoder->dev, tv_mode);
+       for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
+               mode = drm_mode_duplicate(encoder->dev, tv_mode);
 
-                       mode->clock = tv_norm->tv_enc_mode.vrefresh *
-                                               mode->htotal / 1000 *
-                                               mode->vtotal / 1000;
+               mode->clock = tv_norm->tv_enc_mode.vrefresh *
+                       mode->htotal / 1000 *
+                       mode->vtotal / 1000;
 
-                       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
-                               mode->clock *= 2;
+               if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+                       mode->clock *= 2;
 
-                       if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay &&
-                           mode->vdisplay == tv_norm->tv_enc_mode.vdisplay)
-                               mode->type |= DRM_MODE_TYPE_PREFERRED;
+               if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay &&
+                   mode->vdisplay == tv_norm->tv_enc_mode.vdisplay)
+                       mode->type |= DRM_MODE_TYPE_PREFERRED;
 
-                       drm_mode_probed_add(connector, mode);
-                       n++;
-               }
-               return n;
+               drm_mode_probed_add(connector, mode);
+               n++;
        }
 
-       /* tv_norm->kind == CTV_ENC_MODE */
-       output_mode = &tv_norm->ctv_enc_mode.mode;
+       return n;
+}
+
+static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
+                               struct drm_connector *connector)
+{
+       struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+       struct drm_display_mode *output_mode = &tv_norm->ctv_enc_mode.mode;
+       struct drm_display_mode *mode;
+       const struct {
+               int hdisplay;
+               int vdisplay;
+       } modes[] = {
+               { 640, 400 },
+               { 640, 480 },
+               { 720, 480 },
+               { 720, 576 },
+               { 800, 600 },
+               { 1024, 768 },
+               { 1280, 720 },
+               { 1280, 1024 },
+               { 1920, 1080 }
+       };
+       int i, n = 0;
+
        for (i = 0; i < ARRAY_SIZE(modes); i++) {
                if (modes[i].hdisplay > output_mode->hdisplay ||
                    modes[i].vdisplay > output_mode->vdisplay)
@@ -251,11 +252,12 @@ static int nv17_tv_get_modes(struct drm_encoder *encoder,
                    modes[i].vdisplay == output_mode->vdisplay) {
                        mode = drm_mode_duplicate(encoder->dev, output_mode);
                        mode->type |= DRM_MODE_TYPE_PREFERRED;
+
                } else {
                        mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay,
-                               modes[i].vdisplay, 60, false,
-                               output_mode->flags & DRM_MODE_FLAG_INTERLACE,
-                               false);
+                                           modes[i].vdisplay, 60, false,
+                                           (output_mode->flags &
+                                            DRM_MODE_FLAG_INTERLACE), false);
                }
 
                /* CVT modes are sometimes unsuitable... */
@@ -266,6 +268,7 @@ static int nv17_tv_get_modes(struct drm_encoder *encoder,
                                             - mode->hdisplay) * 9 / 10) & ~7;
                        mode->hsync_end = mode->hsync_start + 8;
                }
+
                if (output_mode->vdisplay >= 1024) {
                        mode->vtotal = output_mode->vtotal;
                        mode->vsync_start = output_mode->vsync_start;
@@ -276,9 +279,21 @@ static int nv17_tv_get_modes(struct drm_encoder *encoder,
                drm_mode_probed_add(connector, mode);
                n++;
        }
+
        return n;
 }
 
+static int nv17_tv_get_modes(struct drm_encoder *encoder,
+                            struct drm_connector *connector)
+{
+       struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+
+       if (tv_norm->kind == CTV_ENC_MODE)
+               return nv17_tv_get_hd_modes(encoder, connector);
+       else
+               return nv17_tv_get_ld_modes(encoder, connector);
+}
+
 static int nv17_tv_mode_valid(struct drm_encoder *encoder,
                              struct drm_display_mode *mode)
 {
@@ -408,15 +423,8 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
 
        }
 
-       /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
-        * at LCD__INDEX which we don't alter
-        */
-       if (!(*cr_lcd & 0x44)) {
-               if (tv_norm->kind == CTV_ENC_MODE)
-                       *cr_lcd = 0x1 | (head ? 0x0 : 0x8);
-               else
-                       *cr_lcd = 0;
-       }
+       if (tv_norm->kind == CTV_ENC_MODE)
+               *cr_lcd |= 0x1 | (head ? 0x0 : 0x8);
 
        /* Set the DACCLK register */
        dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
index c00977cedabd6654e446965fbff7b94d782a9670..6bf03840f9ebddf9b8ff661e229c52e3432330a8 100644 (file)
@@ -127,7 +127,8 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
 
 /* TV hardware access functions */
 
-static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, uint32_t val)
+static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg,
+                               uint32_t val)
 {
        nv_wr32(dev, reg, val);
 }
@@ -137,7 +138,8 @@ static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
        return nv_rd32(dev, reg);
 }
 
-static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, uint8_t val)
+static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg,
+                                  uint8_t val)
 {
        nv_write_ptv(dev, NV_PTV_TV_INDEX, reg);
        nv_write_ptv(dev, NV_PTV_TV_DATA, val);
@@ -149,8 +151,11 @@ static inline uint8_t nv_read_tv_enc(struct drm_device *dev, uint8_t reg)
        return nv_read_ptv(dev, NV_PTV_TV_DATA);
 }
 
-#define nv_load_ptv(dev, state, reg) nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg)
-#define nv_save_ptv(dev, state, reg) state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg)
-#define nv_load_tv_enc(dev, state, reg) nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg])
+#define nv_load_ptv(dev, state, reg) \
+       nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg)
+#define nv_save_ptv(dev, state, reg) \
+       state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg)
+#define nv_load_tv_enc(dev, state, reg) \
+       nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg])
 
 #endif
index d64683d97e0d044a192a8a42146a13aa4a08a4b4..9d3893c50a419b15bbedcc9e3c8dbd1800ded066 100644 (file)
@@ -336,12 +336,17 @@ static void tv_setup_filter(struct drm_encoder *encoder)
                        struct filter_params *p = &fparams[k][j];
 
                        for (i = 0; i < 7; i++) {
-                               int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + p->ki3*i*i*i)
-                                       + (p->kr + p->kir*i + p->ki2r*i*i + p->ki3r*i*i*i)*rs[k]
-                                       + (p->kf + p->kif*i + p->ki2f*i*i + p->ki3f*i*i*i)*flicker
-                                       + (p->krf + p->kirf*i + p->ki2rf*i*i + p->ki3rf*i*i*i)*flicker*rs[k];
-
-                               (*filters[k])[j][i] = (c + id5/2) >> 39 & (0x1 << 31 | 0x7f << 9);
+                               int64_t c = (p->k1 + p->ki*i + p->ki2*i*i +
+                                            p->ki3*i*i*i)
+                                       + (p->kr + p->kir*i + p->ki2r*i*i +
+                                          p->ki3r*i*i*i) * rs[k]
+                                       + (p->kf + p->kif*i + p->ki2f*i*i +
+                                          p->ki3f*i*i*i) * flicker
+                                       + (p->krf + p->kirf*i + p->ki2rf*i*i +
+                                          p->ki3rf*i*i*i) * flicker * rs[k];
+
+                               (*filters[k])[j][i] = (c + id5/2) >> 39
+                                       & (0x1 << 31 | 0x7f << 9);
                        }
                }
        }
@@ -349,7 +354,8 @@ static void tv_setup_filter(struct drm_encoder *encoder)
 
 /* Hardware state saving/restoring */
 
-static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
+static void tv_save_filter(struct drm_device *dev, uint32_t base,
+                          uint32_t regs[4][7])
 {
        int i, j;
        uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
@@ -360,7 +366,8 @@ static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[
        }
 }
 
-static void tv_load_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
+static void tv_load_filter(struct drm_device *dev, uint32_t base,
+                          uint32_t regs[4][7])
 {
        int i, j;
        uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
@@ -504,10 +511,10 @@ void nv17_tv_update_properties(struct drm_encoder *encoder)
                break;
        }
 
-       regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20], 255,
-                                        tv_enc->saturation);
-       regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22], 255,
-                                        tv_enc->saturation);
+       regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20],
+                                        255, tv_enc->saturation);
+       regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22],
+                                        255, tv_enc->saturation);
        regs->tv_enc[0x25] = tv_enc->hue * 255 / 100;
 
        nv_load_ptv(dev, regs, 204);
@@ -541,7 +548,8 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
        int head = nouveau_crtc(encoder->crtc)->index;
        struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
        struct drm_display_mode *crtc_mode = &encoder->crtc->mode;
-       struct drm_display_mode *output_mode = &get_tv_norm(encoder)->ctv_enc_mode.mode;
+       struct drm_display_mode *output_mode =
+               &get_tv_norm(encoder)->ctv_enc_mode.mode;
        int overscan, hmargin, vmargin, hratio, vratio;
 
        /* The rescaler doesn't do the right thing for interlaced modes. */
@@ -553,13 +561,15 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
        hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2;
        vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2;
 
-       hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20), hmargin,
-                             overscan);
-       vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20), vmargin,
-                             overscan);
+       hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20),
+                             hmargin, overscan);
+       vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20),
+                             vmargin, overscan);
 
-       hratio = crtc_mode->hdisplay * 0x800 / (output_mode->hdisplay - 2*hmargin);
-       vratio = crtc_mode->vdisplay * 0x800 / (output_mode->vdisplay - 2*vmargin) & ~3;
+       hratio = crtc_mode->hdisplay * 0x800 /
+               (output_mode->hdisplay - 2*hmargin);
+       vratio = crtc_mode->vdisplay * 0x800 /
+               (output_mode->vdisplay - 2*vmargin) & ~3;
 
        regs->fp_horiz_regs[FP_VALID_START] = hmargin;
        regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1;
index 17f309b36c910060a355af6d1ada2e6ca6fc7cfd..12ab9cd56ecad286440d9d60d9fe97dd3be0fb7c 100644 (file)
@@ -37,49 +37,49 @@ nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
 {
        int i;
 
-       nv_wo32(dev, ctx, 0x033c/4, 0xffff0000);
-       nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000);
-       nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000);
-       nv_wo32(dev, ctx, 0x047c/4, 0x00000101);
-       nv_wo32(dev, ctx, 0x0490/4, 0x00000111);
-       nv_wo32(dev, ctx, 0x04a8/4, 0x44400000);
+       nv_wo32(ctx, 0x033c, 0xffff0000);
+       nv_wo32(ctx, 0x03a0, 0x0fff0000);
+       nv_wo32(ctx, 0x03a4, 0x0fff0000);
+       nv_wo32(ctx, 0x047c, 0x00000101);
+       nv_wo32(ctx, 0x0490, 0x00000111);
+       nv_wo32(ctx, 0x04a8, 0x44400000);
        for (i = 0x04d4; i <= 0x04e0; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00030303);
+               nv_wo32(ctx, i, 0x00030303);
        for (i = 0x04f4; i <= 0x0500; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00080000);
+               nv_wo32(ctx, i, 0x00080000);
        for (i = 0x050c; i <= 0x0518; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x01012000);
+               nv_wo32(ctx, i, 0x01012000);
        for (i = 0x051c; i <= 0x0528; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x000105b8);
+               nv_wo32(ctx, i, 0x000105b8);
        for (i = 0x052c; i <= 0x0538; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00080008);
+               nv_wo32(ctx, i, 0x00080008);
        for (i = 0x055c; i <= 0x0598; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x07ff0000);
-       nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff);
-       nv_wo32(dev, ctx, 0x05fc/4, 0x00000001);
-       nv_wo32(dev, ctx, 0x0604/4, 0x00004000);
-       nv_wo32(dev, ctx, 0x0610/4, 0x00000001);
-       nv_wo32(dev, ctx, 0x0618/4, 0x00040000);
-       nv_wo32(dev, ctx, 0x061c/4, 0x00010000);
+               nv_wo32(ctx, i, 0x07ff0000);
+       nv_wo32(ctx, 0x05a4, 0x4b7fffff);
+       nv_wo32(ctx, 0x05fc, 0x00000001);
+       nv_wo32(ctx, 0x0604, 0x00004000);
+       nv_wo32(ctx, 0x0610, 0x00000001);
+       nv_wo32(ctx, 0x0618, 0x00040000);
+       nv_wo32(ctx, 0x061c, 0x00010000);
        for (i = 0x1c1c; i <= 0x248c; i += 16) {
-               nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
-               nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
-               nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
+               nv_wo32(ctx, (i + 0), 0x10700ff9);
+               nv_wo32(ctx, (i + 4), 0x0436086c);
+               nv_wo32(ctx, (i + 8), 0x000c001b);
        }
-       nv_wo32(dev, ctx, 0x281c/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x2830/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x285c/4, 0x40000000);
-       nv_wo32(dev, ctx, 0x2860/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x2864/4, 0x3f000000);
-       nv_wo32(dev, ctx, 0x286c/4, 0x40000000);
-       nv_wo32(dev, ctx, 0x2870/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x2878/4, 0xbf800000);
-       nv_wo32(dev, ctx, 0x2880/4, 0xbf800000);
-       nv_wo32(dev, ctx, 0x34a4/4, 0x000fe000);
-       nv_wo32(dev, ctx, 0x3530/4, 0x000003f8);
-       nv_wo32(dev, ctx, 0x3540/4, 0x002fe000);
+       nv_wo32(ctx, 0x281c, 0x3f800000);
+       nv_wo32(ctx, 0x2830, 0x3f800000);
+       nv_wo32(ctx, 0x285c, 0x40000000);
+       nv_wo32(ctx, 0x2860, 0x3f800000);
+       nv_wo32(ctx, 0x2864, 0x3f000000);
+       nv_wo32(ctx, 0x286c, 0x40000000);
+       nv_wo32(ctx, 0x2870, 0x3f800000);
+       nv_wo32(ctx, 0x2878, 0xbf800000);
+       nv_wo32(ctx, 0x2880, 0xbf800000);
+       nv_wo32(ctx, 0x34a4, 0x000fe000);
+       nv_wo32(ctx, 0x3530, 0x000003f8);
+       nv_wo32(ctx, 0x3540, 0x002fe000);
        for (i = 0x355c; i <= 0x3578; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x001c527c);
+               nv_wo32(ctx, i, 0x001c527c);
 }
 
 static void
@@ -87,58 +87,58 @@ nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
 {
        int i;
 
-       nv_wo32(dev, ctx, 0x035c/4, 0xffff0000);
-       nv_wo32(dev, ctx, 0x03c0/4, 0x0fff0000);
-       nv_wo32(dev, ctx, 0x03c4/4, 0x0fff0000);
-       nv_wo32(dev, ctx, 0x049c/4, 0x00000101);
-       nv_wo32(dev, ctx, 0x04b0/4, 0x00000111);
-       nv_wo32(dev, ctx, 0x04c8/4, 0x00000080);
-       nv_wo32(dev, ctx, 0x04cc/4, 0xffff0000);
-       nv_wo32(dev, ctx, 0x04d0/4, 0x00000001);
-       nv_wo32(dev, ctx, 0x04e4/4, 0x44400000);
-       nv_wo32(dev, ctx, 0x04fc/4, 0x4b800000);
+       nv_wo32(ctx, 0x035c, 0xffff0000);
+       nv_wo32(ctx, 0x03c0, 0x0fff0000);
+       nv_wo32(ctx, 0x03c4, 0x0fff0000);
+       nv_wo32(ctx, 0x049c, 0x00000101);
+       nv_wo32(ctx, 0x04b0, 0x00000111);
+       nv_wo32(ctx, 0x04c8, 0x00000080);
+       nv_wo32(ctx, 0x04cc, 0xffff0000);
+       nv_wo32(ctx, 0x04d0, 0x00000001);
+       nv_wo32(ctx, 0x04e4, 0x44400000);
+       nv_wo32(ctx, 0x04fc, 0x4b800000);
        for (i = 0x0510; i <= 0x051c; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00030303);
+               nv_wo32(ctx, i, 0x00030303);
        for (i = 0x0530; i <= 0x053c; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00080000);
+               nv_wo32(ctx, i, 0x00080000);
        for (i = 0x0548; i <= 0x0554; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x01012000);
+               nv_wo32(ctx, i, 0x01012000);
        for (i = 0x0558; i <= 0x0564; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x000105b8);
+               nv_wo32(ctx, i, 0x000105b8);
        for (i = 0x0568; i <= 0x0574; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00080008);
+               nv_wo32(ctx, i, 0x00080008);
        for (i = 0x0598; i <= 0x05d4; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x07ff0000);
-       nv_wo32(dev, ctx, 0x05e0/4, 0x4b7fffff);
-       nv_wo32(dev, ctx, 0x0620/4, 0x00000080);
-       nv_wo32(dev, ctx, 0x0624/4, 0x30201000);
-       nv_wo32(dev, ctx, 0x0628/4, 0x70605040);
-       nv_wo32(dev, ctx, 0x062c/4, 0xb0a09080);
-       nv_wo32(dev, ctx, 0x0630/4, 0xf0e0d0c0);
-       nv_wo32(dev, ctx, 0x0664/4, 0x00000001);
-       nv_wo32(dev, ctx, 0x066c/4, 0x00004000);
-       nv_wo32(dev, ctx, 0x0678/4, 0x00000001);
-       nv_wo32(dev, ctx, 0x0680/4, 0x00040000);
-       nv_wo32(dev, ctx, 0x0684/4, 0x00010000);
+               nv_wo32(ctx, i, 0x07ff0000);
+       nv_wo32(ctx, 0x05e0, 0x4b7fffff);
+       nv_wo32(ctx, 0x0620, 0x00000080);
+       nv_wo32(ctx, 0x0624, 0x30201000);
+       nv_wo32(ctx, 0x0628, 0x70605040);
+       nv_wo32(ctx, 0x062c, 0xb0a09080);
+       nv_wo32(ctx, 0x0630, 0xf0e0d0c0);
+       nv_wo32(ctx, 0x0664, 0x00000001);
+       nv_wo32(ctx, 0x066c, 0x00004000);
+       nv_wo32(ctx, 0x0678, 0x00000001);
+       nv_wo32(ctx, 0x0680, 0x00040000);
+       nv_wo32(ctx, 0x0684, 0x00010000);
        for (i = 0x1b04; i <= 0x2374; i += 16) {
-               nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
-               nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
-               nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
+               nv_wo32(ctx, (i + 0), 0x10700ff9);
+               nv_wo32(ctx, (i + 4), 0x0436086c);
+               nv_wo32(ctx, (i + 8), 0x000c001b);
        }
-       nv_wo32(dev, ctx, 0x2704/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x2718/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x2744/4, 0x40000000);
-       nv_wo32(dev, ctx, 0x2748/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x274c/4, 0x3f000000);
-       nv_wo32(dev, ctx, 0x2754/4, 0x40000000);
-       nv_wo32(dev, ctx, 0x2758/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x2760/4, 0xbf800000);
-       nv_wo32(dev, ctx, 0x2768/4, 0xbf800000);
-       nv_wo32(dev, ctx, 0x308c/4, 0x000fe000);
-       nv_wo32(dev, ctx, 0x3108/4, 0x000003f8);
-       nv_wo32(dev, ctx, 0x3468/4, 0x002fe000);
+       nv_wo32(ctx, 0x2704, 0x3f800000);
+       nv_wo32(ctx, 0x2718, 0x3f800000);
+       nv_wo32(ctx, 0x2744, 0x40000000);
+       nv_wo32(ctx, 0x2748, 0x3f800000);
+       nv_wo32(ctx, 0x274c, 0x3f000000);
+       nv_wo32(ctx, 0x2754, 0x40000000);
+       nv_wo32(ctx, 0x2758, 0x3f800000);
+       nv_wo32(ctx, 0x2760, 0xbf800000);
+       nv_wo32(ctx, 0x2768, 0xbf800000);
+       nv_wo32(ctx, 0x308c, 0x000fe000);
+       nv_wo32(ctx, 0x3108, 0x000003f8);
+       nv_wo32(ctx, 0x3468, 0x002fe000);
        for (i = 0x3484; i <= 0x34a0; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x001c527c);
+               nv_wo32(ctx, i, 0x001c527c);
 }
 
 static void
@@ -146,49 +146,49 @@ nv2a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
 {
        int i;
 
-       nv_wo32(dev, ctx, 0x033c/4, 0xffff0000);
-       nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000);
-       nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000);
-       nv_wo32(dev, ctx, 0x047c/4, 0x00000101);
-       nv_wo32(dev, ctx, 0x0490/4, 0x00000111);
-       nv_wo32(dev, ctx, 0x04a8/4, 0x44400000);
+       nv_wo32(ctx, 0x033c, 0xffff0000);
+       nv_wo32(ctx, 0x03a0, 0x0fff0000);
+       nv_wo32(ctx, 0x03a4, 0x0fff0000);
+       nv_wo32(ctx, 0x047c, 0x00000101);
+       nv_wo32(ctx, 0x0490, 0x00000111);
+       nv_wo32(ctx, 0x04a8, 0x44400000);
        for (i = 0x04d4; i <= 0x04e0; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00030303);
+               nv_wo32(ctx, i, 0x00030303);
        for (i = 0x04f4; i <= 0x0500; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00080000);
+               nv_wo32(ctx, i, 0x00080000);
        for (i = 0x050c; i <= 0x0518; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x01012000);
+               nv_wo32(ctx, i, 0x01012000);
        for (i = 0x051c; i <= 0x0528; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x000105b8);
+               nv_wo32(ctx, i, 0x000105b8);
        for (i = 0x052c; i <= 0x0538; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00080008);
+               nv_wo32(ctx, i, 0x00080008);
        for (i = 0x055c; i <= 0x0598; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x07ff0000);
-       nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff);
-       nv_wo32(dev, ctx, 0x05fc/4, 0x00000001);
-       nv_wo32(dev, ctx, 0x0604/4, 0x00004000);
-       nv_wo32(dev, ctx, 0x0610/4, 0x00000001);
-       nv_wo32(dev, ctx, 0x0618/4, 0x00040000);
-       nv_wo32(dev, ctx, 0x061c/4, 0x00010000);
+               nv_wo32(ctx, i, 0x07ff0000);
+       nv_wo32(ctx, 0x05a4, 0x4b7fffff);
+       nv_wo32(ctx, 0x05fc, 0x00000001);
+       nv_wo32(ctx, 0x0604, 0x00004000);
+       nv_wo32(ctx, 0x0610, 0x00000001);
+       nv_wo32(ctx, 0x0618, 0x00040000);
+       nv_wo32(ctx, 0x061c, 0x00010000);
        for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
-               nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
-               nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
-               nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
+               nv_wo32(ctx, (i + 0), 0x10700ff9);
+               nv_wo32(ctx, (i + 4), 0x0436086c);
+               nv_wo32(ctx, (i + 8), 0x000c001b);
        }
-       nv_wo32(dev, ctx, 0x269c/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x26b0/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x26dc/4, 0x40000000);
-       nv_wo32(dev, ctx, 0x26e0/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x26e4/4, 0x3f000000);
-       nv_wo32(dev, ctx, 0x26ec/4, 0x40000000);
-       nv_wo32(dev, ctx, 0x26f0/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x26f8/4, 0xbf800000);
-       nv_wo32(dev, ctx, 0x2700/4, 0xbf800000);
-       nv_wo32(dev, ctx, 0x3024/4, 0x000fe000);
-       nv_wo32(dev, ctx, 0x30a0/4, 0x000003f8);
-       nv_wo32(dev, ctx, 0x33fc/4, 0x002fe000);
+       nv_wo32(ctx, 0x269c, 0x3f800000);
+       nv_wo32(ctx, 0x26b0, 0x3f800000);
+       nv_wo32(ctx, 0x26dc, 0x40000000);
+       nv_wo32(ctx, 0x26e0, 0x3f800000);
+       nv_wo32(ctx, 0x26e4, 0x3f000000);
+       nv_wo32(ctx, 0x26ec, 0x40000000);
+       nv_wo32(ctx, 0x26f0, 0x3f800000);
+       nv_wo32(ctx, 0x26f8, 0xbf800000);
+       nv_wo32(ctx, 0x2700, 0xbf800000);
+       nv_wo32(ctx, 0x3024, 0x000fe000);
+       nv_wo32(ctx, 0x30a0, 0x000003f8);
+       nv_wo32(ctx, 0x33fc, 0x002fe000);
        for (i = 0x341c; i <= 0x3438; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x001c527c);
+               nv_wo32(ctx, i, 0x001c527c);
 }
 
 static void
@@ -196,57 +196,57 @@ nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
 {
        int i;
 
-       nv_wo32(dev, ctx, 0x0410/4, 0x00000101);
-       nv_wo32(dev, ctx, 0x0424/4, 0x00000111);
-       nv_wo32(dev, ctx, 0x0428/4, 0x00000060);
-       nv_wo32(dev, ctx, 0x0444/4, 0x00000080);
-       nv_wo32(dev, ctx, 0x0448/4, 0xffff0000);
-       nv_wo32(dev, ctx, 0x044c/4, 0x00000001);
-       nv_wo32(dev, ctx, 0x0460/4, 0x44400000);
-       nv_wo32(dev, ctx, 0x048c/4, 0xffff0000);
+       nv_wo32(ctx, 0x0410, 0x00000101);
+       nv_wo32(ctx, 0x0424, 0x00000111);
+       nv_wo32(ctx, 0x0428, 0x00000060);
+       nv_wo32(ctx, 0x0444, 0x00000080);
+       nv_wo32(ctx, 0x0448, 0xffff0000);
+       nv_wo32(ctx, 0x044c, 0x00000001);
+       nv_wo32(ctx, 0x0460, 0x44400000);
+       nv_wo32(ctx, 0x048c, 0xffff0000);
        for (i = 0x04e0; i < 0x04e8; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x0fff0000);
-       nv_wo32(dev, ctx, 0x04ec/4, 0x00011100);
+               nv_wo32(ctx, i, 0x0fff0000);
+       nv_wo32(ctx, 0x04ec, 0x00011100);
        for (i = 0x0508; i < 0x0548; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x07ff0000);
-       nv_wo32(dev, ctx, 0x0550/4, 0x4b7fffff);
-       nv_wo32(dev, ctx, 0x058c/4, 0x00000080);
-       nv_wo32(dev, ctx, 0x0590/4, 0x30201000);
-       nv_wo32(dev, ctx, 0x0594/4, 0x70605040);
-       nv_wo32(dev, ctx, 0x0598/4, 0xb8a89888);
-       nv_wo32(dev, ctx, 0x059c/4, 0xf8e8d8c8);
-       nv_wo32(dev, ctx, 0x05b0/4, 0xb0000000);
+               nv_wo32(ctx, i, 0x07ff0000);
+       nv_wo32(ctx, 0x0550, 0x4b7fffff);
+       nv_wo32(ctx, 0x058c, 0x00000080);
+       nv_wo32(ctx, 0x0590, 0x30201000);
+       nv_wo32(ctx, 0x0594, 0x70605040);
+       nv_wo32(ctx, 0x0598, 0xb8a89888);
+       nv_wo32(ctx, 0x059c, 0xf8e8d8c8);
+       nv_wo32(ctx, 0x05b0, 0xb0000000);
        for (i = 0x0600; i < 0x0640; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00010588);
+               nv_wo32(ctx, i, 0x00010588);
        for (i = 0x0640; i < 0x0680; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00030303);
+               nv_wo32(ctx, i, 0x00030303);
        for (i = 0x06c0; i < 0x0700; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x0008aae4);
+               nv_wo32(ctx, i, 0x0008aae4);
        for (i = 0x0700; i < 0x0740; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x01012000);
+               nv_wo32(ctx, i, 0x01012000);
        for (i = 0x0740; i < 0x0780; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00080008);
-       nv_wo32(dev, ctx, 0x085c/4, 0x00040000);
-       nv_wo32(dev, ctx, 0x0860/4, 0x00010000);
+               nv_wo32(ctx, i, 0x00080008);
+       nv_wo32(ctx, 0x085c, 0x00040000);
+       nv_wo32(ctx, 0x0860, 0x00010000);
        for (i = 0x0864; i < 0x0874; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00040004);
+               nv_wo32(ctx, i, 0x00040004);
        for (i = 0x1f18; i <= 0x3088 ; i += 16) {
-               nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
-               nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
-               nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
+               nv_wo32(ctx, i + 0, 0x10700ff9);
+               nv_wo32(ctx, i + 1, 0x0436086c);
+               nv_wo32(ctx, i + 2, 0x000c001b);
        }
        for (i = 0x30b8; i < 0x30c8; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x0000ffff);
-       nv_wo32(dev, ctx, 0x344c/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x3808/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x381c/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x3848/4, 0x40000000);
-       nv_wo32(dev, ctx, 0x384c/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x3850/4, 0x3f000000);
-       nv_wo32(dev, ctx, 0x3858/4, 0x40000000);
-       nv_wo32(dev, ctx, 0x385c/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x3864/4, 0xbf800000);
-       nv_wo32(dev, ctx, 0x386c/4, 0xbf800000);
+               nv_wo32(ctx, i, 0x0000ffff);
+       nv_wo32(ctx, 0x344c, 0x3f800000);
+       nv_wo32(ctx, 0x3808, 0x3f800000);
+       nv_wo32(ctx, 0x381c, 0x3f800000);
+       nv_wo32(ctx, 0x3848, 0x40000000);
+       nv_wo32(ctx, 0x384c, 0x3f800000);
+       nv_wo32(ctx, 0x3850, 0x3f000000);
+       nv_wo32(ctx, 0x3858, 0x40000000);
+       nv_wo32(ctx, 0x385c, 0x3f800000);
+       nv_wo32(ctx, 0x3864, 0xbf800000);
+       nv_wo32(ctx, 0x386c, 0xbf800000);
 }
 
 static void
@@ -254,57 +254,57 @@ nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
 {
        int i;
 
-       nv_wo32(dev, ctx, 0x040c/4, 0x01000101);
-       nv_wo32(dev, ctx, 0x0420/4, 0x00000111);
-       nv_wo32(dev, ctx, 0x0424/4, 0x00000060);
-       nv_wo32(dev, ctx, 0x0440/4, 0x00000080);
-       nv_wo32(dev, ctx, 0x0444/4, 0xffff0000);
-       nv_wo32(dev, ctx, 0x0448/4, 0x00000001);
-       nv_wo32(dev, ctx, 0x045c/4, 0x44400000);
-       nv_wo32(dev, ctx, 0x0480/4, 0xffff0000);
+       nv_wo32(ctx, 0x040c, 0x01000101);
+       nv_wo32(ctx, 0x0420, 0x00000111);
+       nv_wo32(ctx, 0x0424, 0x00000060);
+       nv_wo32(ctx, 0x0440, 0x00000080);
+       nv_wo32(ctx, 0x0444, 0xffff0000);
+       nv_wo32(ctx, 0x0448, 0x00000001);
+       nv_wo32(ctx, 0x045c, 0x44400000);
+       nv_wo32(ctx, 0x0480, 0xffff0000);
        for (i = 0x04d4; i < 0x04dc; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x0fff0000);
-       nv_wo32(dev, ctx, 0x04e0/4, 0x00011100);
+               nv_wo32(ctx, i, 0x0fff0000);
+       nv_wo32(ctx, 0x04e0, 0x00011100);
        for (i = 0x04fc; i < 0x053c; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x07ff0000);
-       nv_wo32(dev, ctx, 0x0544/4, 0x4b7fffff);
-       nv_wo32(dev, ctx, 0x057c/4, 0x00000080);
-       nv_wo32(dev, ctx, 0x0580/4, 0x30201000);
-       nv_wo32(dev, ctx, 0x0584/4, 0x70605040);
-       nv_wo32(dev, ctx, 0x0588/4, 0xb8a89888);
-       nv_wo32(dev, ctx, 0x058c/4, 0xf8e8d8c8);
-       nv_wo32(dev, ctx, 0x05a0/4, 0xb0000000);
+               nv_wo32(ctx, i, 0x07ff0000);
+       nv_wo32(ctx, 0x0544, 0x4b7fffff);
+       nv_wo32(ctx, 0x057c, 0x00000080);
+       nv_wo32(ctx, 0x0580, 0x30201000);
+       nv_wo32(ctx, 0x0584, 0x70605040);
+       nv_wo32(ctx, 0x0588, 0xb8a89888);
+       nv_wo32(ctx, 0x058c, 0xf8e8d8c8);
+       nv_wo32(ctx, 0x05a0, 0xb0000000);
        for (i = 0x05f0; i < 0x0630; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00010588);
+               nv_wo32(ctx, i, 0x00010588);
        for (i = 0x0630; i < 0x0670; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00030303);
+               nv_wo32(ctx, i, 0x00030303);
        for (i = 0x06b0; i < 0x06f0; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x0008aae4);
+               nv_wo32(ctx, i, 0x0008aae4);
        for (i = 0x06f0; i < 0x0730; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x01012000);
+               nv_wo32(ctx, i, 0x01012000);
        for (i = 0x0730; i < 0x0770; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00080008);
-       nv_wo32(dev, ctx, 0x0850/4, 0x00040000);
-       nv_wo32(dev, ctx, 0x0854/4, 0x00010000);
+               nv_wo32(ctx, i, 0x00080008);
+       nv_wo32(ctx, 0x0850, 0x00040000);
+       nv_wo32(ctx, 0x0854, 0x00010000);
        for (i = 0x0858; i < 0x0868; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00040004);
+               nv_wo32(ctx, i, 0x00040004);
        for (i = 0x15ac; i <= 0x271c ; i += 16) {
-               nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
-               nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
-               nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
+               nv_wo32(ctx, i + 0, 0x10700ff9);
+               nv_wo32(ctx, i + 1, 0x0436086c);
+               nv_wo32(ctx, i + 2, 0x000c001b);
        }
        for (i = 0x274c; i < 0x275c; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x0000ffff);
-       nv_wo32(dev, ctx, 0x2ae0/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x2e9c/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x2eb0/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x2edc/4, 0x40000000);
-       nv_wo32(dev, ctx, 0x2ee0/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x2ee4/4, 0x3f000000);
-       nv_wo32(dev, ctx, 0x2eec/4, 0x40000000);
-       nv_wo32(dev, ctx, 0x2ef0/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x2ef8/4, 0xbf800000);
-       nv_wo32(dev, ctx, 0x2f00/4, 0xbf800000);
+               nv_wo32(ctx, i, 0x0000ffff);
+       nv_wo32(ctx, 0x2ae0, 0x3f800000);
+       nv_wo32(ctx, 0x2e9c, 0x3f800000);
+       nv_wo32(ctx, 0x2eb0, 0x3f800000);
+       nv_wo32(ctx, 0x2edc, 0x40000000);
+       nv_wo32(ctx, 0x2ee0, 0x3f800000);
+       nv_wo32(ctx, 0x2ee4, 0x3f000000);
+       nv_wo32(ctx, 0x2eec, 0x40000000);
+       nv_wo32(ctx, 0x2ef0, 0x3f800000);
+       nv_wo32(ctx, 0x2ef8, 0xbf800000);
+       nv_wo32(ctx, 0x2f00, 0xbf800000);
 }
 
 static void
@@ -312,57 +312,57 @@ nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
 {
        int i;
 
-       nv_wo32(dev, ctx, 0x040c/4, 0x00000101);
-       nv_wo32(dev, ctx, 0x0420/4, 0x00000111);
-       nv_wo32(dev, ctx, 0x0424/4, 0x00000060);
-       nv_wo32(dev, ctx, 0x0440/4, 0x00000080);
-       nv_wo32(dev, ctx, 0x0444/4, 0xffff0000);
-       nv_wo32(dev, ctx, 0x0448/4, 0x00000001);
-       nv_wo32(dev, ctx, 0x045c/4, 0x44400000);
-       nv_wo32(dev, ctx, 0x0488/4, 0xffff0000);
+       nv_wo32(ctx, 0x040c, 0x00000101);
+       nv_wo32(ctx, 0x0420, 0x00000111);
+       nv_wo32(ctx, 0x0424, 0x00000060);
+       nv_wo32(ctx, 0x0440, 0x00000080);
+       nv_wo32(ctx, 0x0444, 0xffff0000);
+       nv_wo32(ctx, 0x0448, 0x00000001);
+       nv_wo32(ctx, 0x045c, 0x44400000);
+       nv_wo32(ctx, 0x0488, 0xffff0000);
        for (i = 0x04dc; i < 0x04e4; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x0fff0000);
-       nv_wo32(dev, ctx, 0x04e8/4, 0x00011100);
+               nv_wo32(ctx, i, 0x0fff0000);
+       nv_wo32(ctx, 0x04e8, 0x00011100);
        for (i = 0x0504; i < 0x0544; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x07ff0000);
-       nv_wo32(dev, ctx, 0x054c/4, 0x4b7fffff);
-       nv_wo32(dev, ctx, 0x0588/4, 0x00000080);
-       nv_wo32(dev, ctx, 0x058c/4, 0x30201000);
-       nv_wo32(dev, ctx, 0x0590/4, 0x70605040);
-       nv_wo32(dev, ctx, 0x0594/4, 0xb8a89888);
-       nv_wo32(dev, ctx, 0x0598/4, 0xf8e8d8c8);
-       nv_wo32(dev, ctx, 0x05ac/4, 0xb0000000);
+               nv_wo32(ctx, i, 0x07ff0000);
+       nv_wo32(ctx, 0x054c, 0x4b7fffff);
+       nv_wo32(ctx, 0x0588, 0x00000080);
+       nv_wo32(ctx, 0x058c, 0x30201000);
+       nv_wo32(ctx, 0x0590, 0x70605040);
+       nv_wo32(ctx, 0x0594, 0xb8a89888);
+       nv_wo32(ctx, 0x0598, 0xf8e8d8c8);
+       nv_wo32(ctx, 0x05ac, 0xb0000000);
        for (i = 0x0604; i < 0x0644; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00010588);
+               nv_wo32(ctx, i, 0x00010588);
        for (i = 0x0644; i < 0x0684; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00030303);
+               nv_wo32(ctx, i, 0x00030303);
        for (i = 0x06c4; i < 0x0704; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x0008aae4);
+               nv_wo32(ctx, i, 0x0008aae4);
        for (i = 0x0704; i < 0x0744; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x01012000);
+               nv_wo32(ctx, i, 0x01012000);
        for (i = 0x0744; i < 0x0784; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00080008);
-       nv_wo32(dev, ctx, 0x0860/4, 0x00040000);
-       nv_wo32(dev, ctx, 0x0864/4, 0x00010000);
+               nv_wo32(ctx, i, 0x00080008);
+       nv_wo32(ctx, 0x0860, 0x00040000);
+       nv_wo32(ctx, 0x0864, 0x00010000);
        for (i = 0x0868; i < 0x0878; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x00040004);
+               nv_wo32(ctx, i, 0x00040004);
        for (i = 0x1f1c; i <= 0x308c ; i += 16) {
-               nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
-               nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
-               nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
+               nv_wo32(ctx, i + 0, 0x10700ff9);
+               nv_wo32(ctx, i + 4, 0x0436086c);
+               nv_wo32(ctx, i + 8, 0x000c001b);
        }
        for (i = 0x30bc; i < 0x30cc; i += 4)
-               nv_wo32(dev, ctx, i/4, 0x0000ffff);
-       nv_wo32(dev, ctx, 0x3450/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x380c/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x3820/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x384c/4, 0x40000000);
-       nv_wo32(dev, ctx, 0x3850/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x3854/4, 0x3f000000);
-       nv_wo32(dev, ctx, 0x385c/4, 0x40000000);
-       nv_wo32(dev, ctx, 0x3860/4, 0x3f800000);
-       nv_wo32(dev, ctx, 0x3868/4, 0xbf800000);
-       nv_wo32(dev, ctx, 0x3870/4, 0xbf800000);
+               nv_wo32(ctx, i, 0x0000ffff);
+       nv_wo32(ctx, 0x3450, 0x3f800000);
+       nv_wo32(ctx, 0x380c, 0x3f800000);
+       nv_wo32(ctx, 0x3820, 0x3f800000);
+       nv_wo32(ctx, 0x384c, 0x40000000);
+       nv_wo32(ctx, 0x3850, 0x3f800000);
+       nv_wo32(ctx, 0x3854, 0x3f000000);
+       nv_wo32(ctx, 0x385c, 0x40000000);
+       nv_wo32(ctx, 0x3860, 0x3f800000);
+       nv_wo32(ctx, 0x3868, 0xbf800000);
+       nv_wo32(ctx, 0x3870, 0xbf800000);
 }
 
 int
@@ -372,7 +372,7 @@ nv20_graph_create_context(struct nouveau_channel *chan)
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
        void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
-       unsigned int idoffs = 0x28/4;
+       unsigned int idoffs = 0x28;
        int ret;
 
        switch (dev_priv->chipset) {
@@ -403,21 +403,19 @@ nv20_graph_create_context(struct nouveau_channel *chan)
                BUG_ON(1);
        }
 
-       ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
-                                    16, NVOBJ_FLAG_ZERO_ALLOC,
-                                    &chan->ramin_grctx);
+       ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
+                                NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx);
        if (ret)
                return ret;
 
        /* Initialise default context values */
-       ctx_init(dev, chan->ramin_grctx->gpuobj);
+       ctx_init(dev, chan->ramin_grctx);
 
        /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
-       nv_wo32(dev, chan->ramin_grctx->gpuobj, idoffs,
-                                       (chan->id << 24) | 0x1); /* CTX_USER */
+       nv_wo32(chan->ramin_grctx, idoffs,
+               (chan->id << 24) | 0x1); /* CTX_USER */
 
-       nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id,
-                    chan->ramin_grctx->instance >> 4);
+       nv_wo32(pgraph->ctx_table, chan->id * 4, chan->ramin_grctx->pinst >> 4);
        return 0;
 }
 
@@ -428,10 +426,8 @@ nv20_graph_destroy_context(struct nouveau_channel *chan)
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 
-       if (chan->ramin_grctx)
-               nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
-
-       nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id, 0);
+       nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
+       nv_wo32(pgraph->ctx_table, chan->id * 4, 0);
 }
 
 int
@@ -442,7 +438,7 @@ nv20_graph_load_context(struct nouveau_channel *chan)
 
        if (!chan->ramin_grctx)
                return -EINVAL;
-       inst = chan->ramin_grctx->instance >> 4;
+       inst = chan->ramin_grctx->pinst >> 4;
 
        nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
        nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
@@ -465,7 +461,7 @@ nv20_graph_unload_context(struct drm_device *dev)
        chan = pgraph->channel(dev);
        if (!chan)
                return 0;
-       inst = chan->ramin_grctx->instance >> 4;
+       inst = chan->ramin_grctx->pinst >> 4;
 
        nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
        nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
@@ -552,15 +548,15 @@ nv20_graph_init(struct drm_device *dev)
 
        if (!pgraph->ctx_table) {
                /* Create Context Pointer Table */
-               ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16,
-                                                 NVOBJ_FLAG_ZERO_ALLOC,
-                                                 &pgraph->ctx_table);
+               ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16,
+                                        NVOBJ_FLAG_ZERO_ALLOC,
+                                        &pgraph->ctx_table);
                if (ret)
                        return ret;
        }
 
        nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
-                    pgraph->ctx_table->instance >> 4);
+                    pgraph->ctx_table->pinst >> 4);
 
        nv20_graph_rdi(dev);
 
@@ -646,7 +642,7 @@ nv20_graph_takedown(struct drm_device *dev)
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 
-       nouveau_gpuobj_ref_del(dev, &pgraph->ctx_table);
+       nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
 }
 
 int
@@ -681,15 +677,15 @@ nv30_graph_init(struct drm_device *dev)
 
        if (!pgraph->ctx_table) {
                /* Create Context Pointer Table */
-               ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16,
-                                                 NVOBJ_FLAG_ZERO_ALLOC,
-                                                 &pgraph->ctx_table);
+               ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16,
+                                        NVOBJ_FLAG_ZERO_ALLOC,
+                                        &pgraph->ctx_table);
                if (ret)
                        return ret;
        }
 
        nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
-                    pgraph->ctx_table->instance >> 4);
+                    pgraph->ctx_table->pinst >> 4);
 
        nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
        nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
index 2b67f1835c3942c4fddbe63b2b84a9050df81134..d337b8b28cdd6f669c315fc677f32a8078428d57 100644 (file)
@@ -27,8 +27,9 @@
 #include "drmP.h"
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
+#include "nouveau_ramht.h"
 
-#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV40_RAMFC__SIZE))
+#define NV40_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV40_RAMFC__SIZE))
 #define NV40_RAMFC__SIZE 128
 
 int
@@ -42,7 +43,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
 
        ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
                                      NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
-                                     NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
+                                     NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
        if (ret)
                return ret;
 
@@ -50,7 +51,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
 
        nv_wi32(dev, fc +  0, chan->pushbuf_base);
        nv_wi32(dev, fc +  4, chan->pushbuf_base);
-       nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
+       nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4);
        nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
                              NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
                              NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
@@ -58,7 +59,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
                              NV_PFIFO_CACHE1_BIG_ENDIAN |
 #endif
                              0x30000000 /* no idea.. */);
-       nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4);
+       nv_wi32(dev, fc + 56, chan->ramin_grctx->pinst >> 4);
        nv_wi32(dev, fc + 60, 0x0001FFFF);
 
        /* enable the fifo dma operation */
@@ -77,8 +78,7 @@ nv40_fifo_destroy_context(struct nouveau_channel *chan)
        nv_wr32(dev, NV04_PFIFO_MODE,
                nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
 
-       if (chan->ramfc)
-               nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+       nouveau_gpuobj_ref(NULL, &chan->ramfc);
 }
 
 static void
@@ -241,9 +241,9 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 
        nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
-                                      ((dev_priv->ramht_bits - 9) << 16) |
-                                      (dev_priv->ramht_offset >> 8));
-       nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
+                                      ((dev_priv->ramht->bits - 9) << 16) |
+                                      (dev_priv->ramht->gpuobj->pinst >> 8));
+       nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
 
        switch (dev_priv->chipset) {
        case 0x47:
@@ -271,7 +271,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
                nv_wr32(dev, 0x2230, 0);
                nv_wr32(dev, NV40_PFIFO_RAMFC,
                        ((dev_priv->vram_size - 512 * 1024 +
-                         dev_priv->ramfc_offset) >> 16) | (3 << 16));
+                         dev_priv->ramfc->pinst) >> 16) | (3 << 16));
                break;
        }
 }
index fd7d2b5013167f58e92b0c5859fd965564f2e53d..7ee1b91569b8ad29b22437d21ee915e56c123c6d 100644 (file)
@@ -45,7 +45,7 @@ nv40_graph_channel(struct drm_device *dev)
                struct nouveau_channel *chan = dev_priv->fifos[i];
 
                if (chan && chan->ramin_grctx &&
-                   chan->ramin_grctx->instance == inst)
+                   chan->ramin_grctx->pinst == inst)
                        return chan;
        }
 
@@ -61,27 +61,25 @@ nv40_graph_create_context(struct nouveau_channel *chan)
        struct nouveau_grctx ctx = {};
        int ret;
 
-       ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
-                                    16, NVOBJ_FLAG_ZERO_ALLOC,
-                                    &chan->ramin_grctx);
+       ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
+                                NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx);
        if (ret)
                return ret;
 
        /* Initialise default context values */
        ctx.dev = chan->dev;
        ctx.mode = NOUVEAU_GRCTX_VALS;
-       ctx.data = chan->ramin_grctx->gpuobj;
+       ctx.data = chan->ramin_grctx;
        nv40_grctx_init(&ctx);
 
-       nv_wo32(dev, chan->ramin_grctx->gpuobj, 0,
-                    chan->ramin_grctx->gpuobj->im_pramin->start);
+       nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst);
        return 0;
 }
 
 void
 nv40_graph_destroy_context(struct nouveau_channel *chan)
 {
-       nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx);
+       nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
 }
 
 static int
@@ -135,7 +133,7 @@ nv40_graph_load_context(struct nouveau_channel *chan)
 
        if (!chan->ramin_grctx)
                return -EINVAL;
-       inst = chan->ramin_grctx->instance >> 4;
+       inst = chan->ramin_grctx->pinst >> 4;
 
        ret = nv40_graph_transfer_context(dev, inst, 0);
        if (ret)
index 9b5c9746958868b98152446a03cacb78b3d3d010..ce585093264e69af22d0689efb2c3c4a8298c201 100644 (file)
@@ -596,13 +596,13 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx)
 
        offset += 0x0280/4;
        for (i = 0; i < 16; i++, offset += 2)
-               nv_wo32(dev, obj, offset, 0x3f800000);
+               nv_wo32(obj, offset * 4, 0x3f800000);
 
        for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
                for (i = 0; i < vs_nr_b0 * 6; i += 6)
-                       nv_wo32(dev, obj, offset + b0_offset + i, 0x00000001);
+                       nv_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001);
                for (i = 0; i < vs_nr_b1 * 4; i += 4)
-                       nv_wo32(dev, obj, offset + b1_offset + i, 0x3f800000);
+                       nv_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
        }
 }
 
index bfd4ca2fe7ef3de146accfe37895bb1e5a760784..16380d52cd885599f349219c903f12cf7b9c6159 100644 (file)
@@ -104,8 +104,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
                OUT_RING(evo, nv_crtc->lut.depth == 8 ?
                                NV50_EVO_CRTC_CLUT_MODE_OFF :
                                NV50_EVO_CRTC_CLUT_MODE_ON);
-               OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.mm_node->start <<
-                                PAGE_SHIFT) >> 8);
+               OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.start << PAGE_SHIFT) >> 8);
                if (dev_priv->chipset != 0x50) {
                        BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
                        OUT_RING(evo, NvEvoVRAM);
@@ -266,15 +265,10 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct pll_lims pll;
-       uint32_t reg, reg1, reg2;
+       uint32_t reg1, reg2;
        int ret, N1, M1, N2, M2, P;
 
-       if (dev_priv->chipset < NV_C0)
-               reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
-       else
-               reg = 0x614140 + (head * 0x800);
-
-       ret = get_pll_limits(dev, reg, &pll);
+       ret = get_pll_limits(dev, PLL_VPLL0 + head, &pll);
        if (ret)
                return ret;
 
@@ -286,11 +280,11 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
                NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n",
                         pclk, ret, N1, M1, N2, M2, P);
 
-               reg1 = nv_rd32(dev, reg + 4) & 0xff00ff00;
-               reg2 = nv_rd32(dev, reg + 8) & 0x8000ff00;
-               nv_wr32(dev, reg, 0x10000611);
-               nv_wr32(dev, reg + 4, reg1 | (M1 << 16) | N1);
-               nv_wr32(dev, reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
+               reg1 = nv_rd32(dev, pll.reg + 4) & 0xff00ff00;
+               reg2 = nv_rd32(dev, pll.reg + 8) & 0x8000ff00;
+               nv_wr32(dev, pll.reg + 0, 0x10000611);
+               nv_wr32(dev, pll.reg + 4, reg1 | (M1 << 16) | N1);
+               nv_wr32(dev, pll.reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
        } else
        if (dev_priv->chipset < NV_C0) {
                ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P);
@@ -300,10 +294,10 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
                NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
                         pclk, ret, N1, N2, M1, P);
 
-               reg1 = nv_rd32(dev, reg + 4) & 0xffc00000;
-               nv_wr32(dev, reg, 0x50000610);
-               nv_wr32(dev, reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
-               nv_wr32(dev, reg + 8, N2);
+               reg1 = nv_rd32(dev, pll.reg + 4) & 0xffc00000;
+               nv_wr32(dev, pll.reg + 0, 0x50000610);
+               nv_wr32(dev, pll.reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
+               nv_wr32(dev, pll.reg + 8, N2);
        } else {
                ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P);
                if (ret <= 0)
@@ -312,9 +306,9 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
                NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
                         pclk, ret, N1, N2, M1, P);
 
-               nv_mask(dev, reg + 0x0c, 0x00000000, 0x00000100);
-               nv_wr32(dev, reg + 0x04, (P << 16) | (N1 << 8) | M1);
-               nv_wr32(dev, reg + 0x10, N2 << 16);
+               nv_mask(dev, pll.reg + 0x0c, 0x00000000, 0x00000100);
+               nv_wr32(dev, pll.reg + 0x04, (P << 16) | (N1 << 8) | M1);
+               nv_wr32(dev, pll.reg + 0x10, N2 << 16);
        }
 
        return 0;
@@ -338,7 +332,9 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
 
        nv50_cursor_fini(nv_crtc);
 
+       nouveau_bo_unmap(nv_crtc->lut.nvbo);
        nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+       nouveau_bo_unmap(nv_crtc->cursor.nvbo);
        nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
        kfree(nv_crtc->mode);
        kfree(nv_crtc);
@@ -491,8 +487,9 @@ nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
 }
 
 static int
-nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
-                          struct drm_framebuffer *old_fb, bool update)
+nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
+                          struct drm_framebuffer *passed_fb,
+                          int x, int y, bool update, bool atomic)
 {
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
        struct drm_device *dev = nv_crtc->base.dev;
@@ -504,6 +501,28 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
 
        NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
 
+       /* If atomic, we want to switch to the fb we were passed, so
+        * now we update pointers to do that.  (We don't pin; just
+        * assume we're already pinned and update the base address.)
+        */
+       if (atomic) {
+               drm_fb = passed_fb;
+               fb = nouveau_framebuffer(passed_fb);
+       }
+       else {
+               /* If not atomic, we can go ahead and pin, and unpin the
+                * old fb we were passed.
+                */
+               ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
+               if (ret)
+                       return ret;
+
+               if (passed_fb) {
+                       struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
+                       nouveau_bo_unpin(ofb->nvbo);
+               }
+       }
+
        switch (drm_fb->depth) {
        case  8:
                format = NV50_EVO_CRTC_FB_DEPTH_8;
@@ -526,15 +545,6 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
                 return -EINVAL;
        }
 
-       ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
-       if (ret)
-               return ret;
-
-       if (old_fb) {
-               struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb);
-               nouveau_bo_unpin(ofb->nvbo);
-       }
-
        nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
        nv_crtc->fb.tile_flags = fb->nvbo->tile_flags;
        nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
@@ -685,14 +695,22 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
        nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
        nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
 
-       return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, false);
+       return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false, false);
 }
 
 static int
 nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
                        struct drm_framebuffer *old_fb)
 {
-       return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, true);
+       return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, true, false);
+}
+
+static int
+nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
+                              struct drm_framebuffer *fb,
+                              int x, int y, enum mode_set_atomic state)
+{
+       return nv50_crtc_do_mode_set_base(crtc, fb, x, y, true, true);
 }
 
 static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
@@ -702,6 +720,7 @@ static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
        .mode_fixup = nv50_crtc_mode_fixup,
        .mode_set = nv50_crtc_mode_set,
        .mode_set_base = nv50_crtc_mode_set_base,
+       .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
        .load_lut = nv50_crtc_lut_load,
 };
 
index 03ad7ab14f0960e1729674cd637a15ff325ae169..1b9ce3021aa3ae4ea3329a882be9dffebb1acaee 100644 (file)
@@ -147,7 +147,7 @@ nv50_cursor_fini(struct nouveau_crtc *nv_crtc)
        NV_DEBUG_KMS(dev, "\n");
 
        nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0);
-       if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
+       if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
                     NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
                NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
                NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
index 1bc085962945056aba2e5ea5acfc8c72a5ca5da2..875414b09adea8bfd3ae46842b1447314ca2cd3f 100644 (file)
@@ -79,7 +79,7 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
 
        nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
                0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-       if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
+       if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
                     NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
                NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
                NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
@@ -130,7 +130,7 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode)
        NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
 
        /* wait for it to be done */
-       if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
+       if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
                     NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
                NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
                NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
index 612fa6d6a0cba9d415b9277308969043fe65a2d8..55c9663ef2bf8ddd4df2cf15b452daed66abc583 100644 (file)
 #include "nouveau_connector.h"
 #include "nouveau_fb.h"
 #include "nouveau_fbcon.h"
+#include "nouveau_ramht.h"
 #include "drm_crtc_helper.h"
 
+static inline int
+nv50_sor_nr(struct drm_device *dev)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+       if (dev_priv->chipset  < 0x90 ||
+           dev_priv->chipset == 0x92 ||
+           dev_priv->chipset == 0xa0)
+               return 2;
+
+       return 4;
+}
+
 static void
 nv50_evo_channel_del(struct nouveau_channel **pchan)
 {
@@ -42,6 +56,7 @@ nv50_evo_channel_del(struct nouveau_channel **pchan)
        *pchan = NULL;
 
        nouveau_gpuobj_channel_takedown(chan);
+       nouveau_bo_unmap(chan->pushbuf_bo);
        nouveau_bo_ref(NULL, &chan->pushbuf_bo);
 
        if (chan->user)
@@ -65,23 +80,23 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
                return ret;
        obj->engine = NVOBJ_ENGINE_DISPLAY;
 
-       ret = nouveau_gpuobj_ref_add(dev, evo, name, obj, NULL);
-       if (ret) {
-               nouveau_gpuobj_del(dev, &obj);
-               return ret;
-       }
-
-       nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
-       nv_wo32(dev, obj, 1, limit);
-       nv_wo32(dev, obj, 2, offset);
-       nv_wo32(dev, obj, 3, 0x00000000);
-       nv_wo32(dev, obj, 4, 0x00000000);
+       nv_wo32(obj,  0, (tile_flags << 22) | (magic_flags << 16) | class);
+       nv_wo32(obj,  4, limit);
+       nv_wo32(obj,  8, offset);
+       nv_wo32(obj, 12, 0x00000000);
+       nv_wo32(obj, 16, 0x00000000);
        if (dev_priv->card_type < NV_C0)
-               nv_wo32(dev, obj, 5, 0x00010000);
+               nv_wo32(obj, 20, 0x00010000);
        else
-               nv_wo32(dev, obj, 5, 0x00020000);
+               nv_wo32(obj, 20, 0x00020000);
        dev_priv->engine.instmem.flush(dev);
 
+       ret = nouveau_ramht_insert(evo, name, obj);
+       nouveau_gpuobj_ref(NULL, &obj);
+       if (ret) {
+               return ret;
+       }
+
        return 0;
 }
 
@@ -89,6 +104,7 @@ static int
 nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpuobj *ramht = NULL;
        struct nouveau_channel *chan;
        int ret;
 
@@ -102,32 +118,35 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
        chan->user_get = 4;
        chan->user_put = 0;
 
-       INIT_LIST_HEAD(&chan->ramht_refs);
-
-       ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32768, 0x1000,
-                                    NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
+       ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000,
+                                NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
        if (ret) {
                NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
                nv50_evo_channel_del(pchan);
                return ret;
        }
 
-       ret = drm_mm_init(&chan->ramin_heap,
-                         chan->ramin->gpuobj->im_pramin->start, 32768);
+       ret = drm_mm_init(&chan->ramin_heap, 0, 32768);
        if (ret) {
                NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
                nv50_evo_channel_del(pchan);
                return ret;
        }
 
-       ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 4096, 16,
-                                    0, &chan->ramht);
+       ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht);
        if (ret) {
                NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
                nv50_evo_channel_del(pchan);
                return ret;
        }
 
+       ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
+       nouveau_gpuobj_ref(NULL, &ramht);
+       if (ret) {
+               nv50_evo_channel_del(pchan);
+               return ret;
+       }
+
        if (dev_priv->chipset != 0x50) {
                ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
                                          0, 0xffffffff);
@@ -227,11 +246,11 @@ nv50_display_init(struct drm_device *dev)
                nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
        }
        /* SOR */
-       for (i = 0; i < 4; i++) {
+       for (i = 0; i < nv50_sor_nr(dev); i++) {
                val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
                nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
        }
-       /* Something not yet in use, tv-out maybe. */
+       /* EXT */
        for (i = 0; i < 3; i++) {
                val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
                nv_wr32(dev, 0x006101f0 + (i * 0x04), val);
@@ -260,7 +279,7 @@ nv50_display_init(struct drm_device *dev)
        if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) {
                nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100);
                nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1);
-               if (!nv_wait(0x006194e8, 2, 0)) {
+               if (!nv_wait(dev, 0x006194e8, 2, 0)) {
                        NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n");
                        NV_ERROR(dev, "0x6194e8 = 0x%08x\n",
                                                nv_rd32(dev, 0x6194e8));
@@ -291,7 +310,8 @@ nv50_display_init(struct drm_device *dev)
 
        nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
        nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
-       if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x40000000, 0x40000000)) {
+       if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
+                    0x40000000, 0x40000000)) {
                NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
                NV_ERROR(dev, "0x610200 = 0x%08x\n",
                          nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
@@ -300,7 +320,7 @@ nv50_display_init(struct drm_device *dev)
 
        for (i = 0; i < 2; i++) {
                nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
-               if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
+               if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
                             NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
                        NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
                        NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
@@ -310,7 +330,7 @@ nv50_display_init(struct drm_device *dev)
 
                nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
                        NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
-               if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
+               if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
                             NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
                             NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
                        NV_ERROR(dev, "timeout: "
@@ -321,16 +341,16 @@ nv50_display_init(struct drm_device *dev)
                }
        }
 
-       nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->instance >> 8) | 9);
+       nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
 
        /* initialise fifo */
        nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
-               ((evo->pushbuf_bo->bo.mem.mm_node->start << PAGE_SHIFT) >> 8) |
+               ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) |
                NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
                NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
        nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
        nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
-       if (!nv_wait(0x610200, 0x80000000, 0x00000000)) {
+       if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) {
                NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
                NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
                return -EBUSY;
@@ -370,7 +390,7 @@ nv50_display_init(struct drm_device *dev)
        BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
        OUT_RING(evo, 0);
        FIRE_RING(evo);
-       if (!nv_wait(0x640004, 0xffffffff, evo->dma.put << 2))
+       if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
                NV_ERROR(dev, "evo pushbuf stalled\n");
 
        /* enable clock change interrupts. */
@@ -424,7 +444,7 @@ static int nv50_display_disable(struct drm_device *dev)
                        continue;
 
                nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask);
-               if (!nv_wait(NV50_PDISPLAY_INTR_1, mask, mask)) {
+               if (!nv_wait(dev, NV50_PDISPLAY_INTR_1, mask, mask)) {
                        NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == "
                                      "0x%08x\n", mask, mask);
                        NV_ERROR(dev, "0x610024 = 0x%08x\n",
@@ -434,14 +454,14 @@ static int nv50_display_disable(struct drm_device *dev)
 
        nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0);
        nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
-       if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
+       if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
                NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
                NV_ERROR(dev, "0x610200 = 0x%08x\n",
                          nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
        }
 
        for (i = 0; i < 3; i++) {
-               if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(i),
+               if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i),
                             NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
                        NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
                        NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
@@ -710,7 +730,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
                or = i;
        }
 
-       for (i = 0; type == OUTPUT_ANY && i < 4; i++) {
+       for (i = 0; type == OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
                if (dev_priv->chipset  < 0x90 ||
                    dev_priv->chipset == 0x92 ||
                    dev_priv->chipset == 0xa0)
@@ -841,7 +861,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
                or = i;
        }
 
-       for (i = 0; type == OUTPUT_ANY && i < 4; i++) {
+       for (i = 0; type == OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
                if (dev_priv->chipset  < 0x90 ||
                    dev_priv->chipset == 0x92 ||
                    dev_priv->chipset == 0xa0)
index 32611bd30e6db6524a44588f06602c33266fb4bc..cd1988b15d2c9f3c2318bd6eba07bf935e3086eb 100644 (file)
@@ -20,6 +20,7 @@ nv50_fb_init(struct drm_device *dev)
        case 0x50:
                nv_wr32(dev, 0x100c90, 0x0707ff);
                break;
+       case 0xa3:
        case 0xa5:
        case 0xa8:
                nv_wr32(dev, 0x100c90, 0x0d0fff);
@@ -36,3 +37,42 @@ void
 nv50_fb_takedown(struct drm_device *dev)
 {
 }
+
+void
+nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u32 trap[6], idx, chinst;
+       int i, ch;
+
+       idx = nv_rd32(dev, 0x100c90);
+       if (!(idx & 0x80000000))
+               return;
+       idx &= 0x00ffffff;
+
+       for (i = 0; i < 6; i++) {
+               nv_wr32(dev, 0x100c90, idx | i << 24);
+               trap[i] = nv_rd32(dev, 0x100c94);
+       }
+       nv_wr32(dev, 0x100c90, idx | 0x80000000);
+
+       if (!display)
+               return;
+
+       chinst = (trap[2] << 16) | trap[1];
+       for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
+               struct nouveau_channel *chan = dev_priv->fifos[ch];
+
+               if (!chan || !chan->ramin)
+                       continue;
+
+               if (chinst == chan->ramin->vinst >> 12)
+                       break;
+       }
+
+       NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x "
+                    "channel %d (0x%08x)\n",
+               name, (trap[5] & 0x100 ? "read" : "write"),
+               trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff,
+               trap[0], ch, chinst);
+}
index 6bf025c6fc6fc14b5fff7a94938a20cc2426a102..6dcf048eddbc7130b753b0e64f6de65d7f10887d 100644 (file)
@@ -1,6 +1,7 @@
 #include "drmP.h"
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
+#include "nouveau_ramht.h"
 #include "nouveau_fbcon.h"
 
 void
@@ -193,7 +194,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
        if (ret)
                return ret;
 
-       ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, Nv2D, eng2d, NULL);
+       ret = nouveau_ramht_insert(dev_priv->channel, Nv2D, eng2d);
+       nouveau_gpuobj_ref(NULL, &eng2d);
        if (ret)
                return ret;
 
index fb0281ae8f90569508711a5edd275cea064da6df..a46a961102f39052832856cac762e3cfe8ae1f88 100644 (file)
 #include "drmP.h"
 #include "drm.h"
 #include "nouveau_drv.h"
+#include "nouveau_ramht.h"
 
 static void
 nv50_fifo_playlist_update(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
-       struct nouveau_gpuobj_ref *cur;
+       struct nouveau_gpuobj *cur;
        int i, nr;
 
        NV_DEBUG(dev, "\n");
@@ -43,12 +44,14 @@ nv50_fifo_playlist_update(struct drm_device *dev)
 
        /* We never schedule channel 0 or 127 */
        for (i = 1, nr = 0; i < 127; i++) {
-               if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc)
-                       nv_wo32(dev, cur->gpuobj, nr++, i);
+               if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) {
+                       nv_wo32(cur, (nr * 4), i);
+                       nr++;
+               }
        }
        dev_priv->engine.instmem.flush(dev);
 
-       nv_wr32(dev, 0x32f4, cur->instance >> 12);
+       nv_wr32(dev, 0x32f4, cur->vinst >> 12);
        nv_wr32(dev, 0x32ec, nr);
        nv_wr32(dev, 0x2500, 0x101);
 }
@@ -63,9 +66,9 @@ nv50_fifo_channel_enable(struct drm_device *dev, int channel)
        NV_DEBUG(dev, "ch%d\n", channel);
 
        if (dev_priv->chipset == 0x50)
-               inst = chan->ramfc->instance >> 12;
+               inst = chan->ramfc->vinst >> 12;
        else
-               inst = chan->ramfc->instance >> 8;
+               inst = chan->ramfc->vinst >> 8;
 
        nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst |
                     NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
@@ -163,19 +166,19 @@ nv50_fifo_init(struct drm_device *dev)
                goto just_reset;
        }
 
-       ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
-                                    NVOBJ_FLAG_ZERO_ALLOC,
-                                    &pfifo->playlist[0]);
+       ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
+                                NVOBJ_FLAG_ZERO_ALLOC,
+                                &pfifo->playlist[0]);
        if (ret) {
                NV_ERROR(dev, "error creating playlist 0: %d\n", ret);
                return ret;
        }
 
-       ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
-                                    NVOBJ_FLAG_ZERO_ALLOC,
-                                    &pfifo->playlist[1]);
+       ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
+                                NVOBJ_FLAG_ZERO_ALLOC,
+                                &pfifo->playlist[1]);
        if (ret) {
-               nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]);
+               nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
                NV_ERROR(dev, "error creating playlist 1: %d\n", ret);
                return ret;
        }
@@ -203,8 +206,8 @@ nv50_fifo_takedown(struct drm_device *dev)
        if (!pfifo->playlist[0])
                return;
 
-       nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]);
-       nouveau_gpuobj_ref_del(dev, &pfifo->playlist[1]);
+       nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
+       nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
 }
 
 int
@@ -226,59 +229,54 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
        NV_DEBUG(dev, "ch%d\n", chan->id);
 
        if (dev_priv->chipset == 0x50) {
-               uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start;
-               uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start;
-
-               ret = nouveau_gpuobj_new_fake(dev, ramin_poffset, ramin_voffset,
-                                             0x100, NVOBJ_FLAG_ZERO_ALLOC |
-                                             NVOBJ_FLAG_ZERO_FREE, &ramfc,
+               ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
+                                             chan->ramin->vinst, 0x100,
+                                             NVOBJ_FLAG_ZERO_ALLOC |
+                                             NVOBJ_FLAG_ZERO_FREE,
                                              &chan->ramfc);
                if (ret)
                        return ret;
 
-               ret = nouveau_gpuobj_new_fake(dev, ramin_poffset + 0x0400,
-                                             ramin_voffset + 0x0400, 4096,
-                                             0, NULL, &chan->cache);
+               ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst + 0x0400,
+                                             chan->ramin->vinst + 0x0400,
+                                             4096, 0, &chan->cache);
                if (ret)
                        return ret;
        } else {
-               ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256,
-                                            NVOBJ_FLAG_ZERO_ALLOC |
-                                            NVOBJ_FLAG_ZERO_FREE,
-                                            &chan->ramfc);
+               ret = nouveau_gpuobj_new(dev, chan, 0x100, 256,
+                                        NVOBJ_FLAG_ZERO_ALLOC |
+                                        NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
                if (ret)
                        return ret;
-               ramfc = chan->ramfc->gpuobj;
 
-               ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024,
-                                            0, &chan->cache);
+               ret = nouveau_gpuobj_new(dev, chan, 4096, 1024,
+                                        0, &chan->cache);
                if (ret)
                        return ret;
        }
+       ramfc = chan->ramfc;
 
        spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 
-       nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
-       nv_wo32(dev, ramfc, 0x80/4, (0 << 27) /* 4KiB */ |
-                                   (4 << 24) /* SEARCH_FULL */ |
-                                   (chan->ramht->instance >> 4));
-       nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
-       nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
-       nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
-       nv_wo32(dev, ramfc, 0x7c/4, 0x30000001);
-       nv_wo32(dev, ramfc, 0x78/4, 0x00000000);
-       nv_wo32(dev, ramfc, 0x3c/4, 0x403f6078);
-       nv_wo32(dev, ramfc, 0x50/4, chan->pushbuf_base +
-                                   chan->dma.ib_base * 4);
-       nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16);
+       nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
+       nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+                            (4 << 24) /* SEARCH_FULL */ |
+                            (chan->ramht->gpuobj->cinst >> 4));
+       nv_wo32(ramfc, 0x44, 0x2101ffff);
+       nv_wo32(ramfc, 0x60, 0x7fffffff);
+       nv_wo32(ramfc, 0x40, 0x00000000);
+       nv_wo32(ramfc, 0x7c, 0x30000001);
+       nv_wo32(ramfc, 0x78, 0x00000000);
+       nv_wo32(ramfc, 0x3c, 0x403f6078);
+       nv_wo32(ramfc, 0x50, chan->pushbuf_base + chan->dma.ib_base * 4);
+       nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16);
 
        if (dev_priv->chipset != 0x50) {
-               nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
-               nv_wo32(dev, chan->ramin->gpuobj, 1,
-                                               chan->ramfc->instance >> 8);
+               nv_wo32(chan->ramin, 0, chan->id);
+               nv_wo32(chan->ramin, 4, chan->ramfc->vinst >> 8);
 
-               nv_wo32(dev, ramfc, 0x88/4, chan->cache->instance >> 10);
-               nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12);
+               nv_wo32(ramfc, 0x88, chan->cache->vinst >> 10);
+               nv_wo32(ramfc, 0x98, chan->ramin->vinst >> 12);
        }
 
        dev_priv->engine.instmem.flush(dev);
@@ -293,12 +291,13 @@ void
 nv50_fifo_destroy_context(struct nouveau_channel *chan)
 {
        struct drm_device *dev = chan->dev;
-       struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
+       struct nouveau_gpuobj *ramfc = NULL;
 
        NV_DEBUG(dev, "ch%d\n", chan->id);
 
        /* This will ensure the channel is seen as disabled. */
-       chan->ramfc = NULL;
+       nouveau_gpuobj_ref(chan->ramfc, &ramfc);
+       nouveau_gpuobj_ref(NULL, &chan->ramfc);
        nv50_fifo_channel_disable(dev, chan->id);
 
        /* Dummy channel, also used on ch 127 */
@@ -306,8 +305,8 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan)
                nv50_fifo_channel_disable(dev, 127);
        nv50_fifo_playlist_update(dev);
 
-       nouveau_gpuobj_ref_del(dev, &ramfc);
-       nouveau_gpuobj_ref_del(dev, &chan->cache);
+       nouveau_gpuobj_ref(NULL, &ramfc);
+       nouveau_gpuobj_ref(NULL, &chan->cache);
 }
 
 int
@@ -315,63 +314,63 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
 {
        struct drm_device *dev = chan->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
-       struct nouveau_gpuobj *cache = chan->cache->gpuobj;
+       struct nouveau_gpuobj *ramfc = chan->ramfc;
+       struct nouveau_gpuobj *cache = chan->cache;
        int ptr, cnt;
 
        NV_DEBUG(dev, "ch%d\n", chan->id);
 
-       nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4));
-       nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4));
-       nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4));
-       nv_wr32(dev, 0x3320, nv_ro32(dev, ramfc, 0x0c/4));
-       nv_wr32(dev, 0x3244, nv_ro32(dev, ramfc, 0x10/4));
-       nv_wr32(dev, 0x3328, nv_ro32(dev, ramfc, 0x14/4));
-       nv_wr32(dev, 0x3368, nv_ro32(dev, ramfc, 0x18/4));
-       nv_wr32(dev, 0x336c, nv_ro32(dev, ramfc, 0x1c/4));
-       nv_wr32(dev, 0x3370, nv_ro32(dev, ramfc, 0x20/4));
-       nv_wr32(dev, 0x3374, nv_ro32(dev, ramfc, 0x24/4));
-       nv_wr32(dev, 0x3378, nv_ro32(dev, ramfc, 0x28/4));
-       nv_wr32(dev, 0x337c, nv_ro32(dev, ramfc, 0x2c/4));
-       nv_wr32(dev, 0x3228, nv_ro32(dev, ramfc, 0x30/4));
-       nv_wr32(dev, 0x3364, nv_ro32(dev, ramfc, 0x34/4));
-       nv_wr32(dev, 0x32a0, nv_ro32(dev, ramfc, 0x38/4));
-       nv_wr32(dev, 0x3224, nv_ro32(dev, ramfc, 0x3c/4));
-       nv_wr32(dev, 0x324c, nv_ro32(dev, ramfc, 0x40/4));
-       nv_wr32(dev, 0x2044, nv_ro32(dev, ramfc, 0x44/4));
-       nv_wr32(dev, 0x322c, nv_ro32(dev, ramfc, 0x48/4));
-       nv_wr32(dev, 0x3234, nv_ro32(dev, ramfc, 0x4c/4));
-       nv_wr32(dev, 0x3340, nv_ro32(dev, ramfc, 0x50/4));
-       nv_wr32(dev, 0x3344, nv_ro32(dev, ramfc, 0x54/4));
-       nv_wr32(dev, 0x3280, nv_ro32(dev, ramfc, 0x58/4));
-       nv_wr32(dev, 0x3254, nv_ro32(dev, ramfc, 0x5c/4));
-       nv_wr32(dev, 0x3260, nv_ro32(dev, ramfc, 0x60/4));
-       nv_wr32(dev, 0x3264, nv_ro32(dev, ramfc, 0x64/4));
-       nv_wr32(dev, 0x3268, nv_ro32(dev, ramfc, 0x68/4));
-       nv_wr32(dev, 0x326c, nv_ro32(dev, ramfc, 0x6c/4));
-       nv_wr32(dev, 0x32e4, nv_ro32(dev, ramfc, 0x70/4));
-       nv_wr32(dev, 0x3248, nv_ro32(dev, ramfc, 0x74/4));
-       nv_wr32(dev, 0x2088, nv_ro32(dev, ramfc, 0x78/4));
-       nv_wr32(dev, 0x2058, nv_ro32(dev, ramfc, 0x7c/4));
-       nv_wr32(dev, 0x2210, nv_ro32(dev, ramfc, 0x80/4));
-
-       cnt = nv_ro32(dev, ramfc, 0x84/4);
+       nv_wr32(dev, 0x3330, nv_ro32(ramfc, 0x00));
+       nv_wr32(dev, 0x3334, nv_ro32(ramfc, 0x04));
+       nv_wr32(dev, 0x3240, nv_ro32(ramfc, 0x08));
+       nv_wr32(dev, 0x3320, nv_ro32(ramfc, 0x0c));
+       nv_wr32(dev, 0x3244, nv_ro32(ramfc, 0x10));
+       nv_wr32(dev, 0x3328, nv_ro32(ramfc, 0x14));
+       nv_wr32(dev, 0x3368, nv_ro32(ramfc, 0x18));
+       nv_wr32(dev, 0x336c, nv_ro32(ramfc, 0x1c));
+       nv_wr32(dev, 0x3370, nv_ro32(ramfc, 0x20));
+       nv_wr32(dev, 0x3374, nv_ro32(ramfc, 0x24));
+       nv_wr32(dev, 0x3378, nv_ro32(ramfc, 0x28));
+       nv_wr32(dev, 0x337c, nv_ro32(ramfc, 0x2c));
+       nv_wr32(dev, 0x3228, nv_ro32(ramfc, 0x30));
+       nv_wr32(dev, 0x3364, nv_ro32(ramfc, 0x34));
+       nv_wr32(dev, 0x32a0, nv_ro32(ramfc, 0x38));
+       nv_wr32(dev, 0x3224, nv_ro32(ramfc, 0x3c));
+       nv_wr32(dev, 0x324c, nv_ro32(ramfc, 0x40));
+       nv_wr32(dev, 0x2044, nv_ro32(ramfc, 0x44));
+       nv_wr32(dev, 0x322c, nv_ro32(ramfc, 0x48));
+       nv_wr32(dev, 0x3234, nv_ro32(ramfc, 0x4c));
+       nv_wr32(dev, 0x3340, nv_ro32(ramfc, 0x50));
+       nv_wr32(dev, 0x3344, nv_ro32(ramfc, 0x54));
+       nv_wr32(dev, 0x3280, nv_ro32(ramfc, 0x58));
+       nv_wr32(dev, 0x3254, nv_ro32(ramfc, 0x5c));
+       nv_wr32(dev, 0x3260, nv_ro32(ramfc, 0x60));
+       nv_wr32(dev, 0x3264, nv_ro32(ramfc, 0x64));
+       nv_wr32(dev, 0x3268, nv_ro32(ramfc, 0x68));
+       nv_wr32(dev, 0x326c, nv_ro32(ramfc, 0x6c));
+       nv_wr32(dev, 0x32e4, nv_ro32(ramfc, 0x70));
+       nv_wr32(dev, 0x3248, nv_ro32(ramfc, 0x74));
+       nv_wr32(dev, 0x2088, nv_ro32(ramfc, 0x78));
+       nv_wr32(dev, 0x2058, nv_ro32(ramfc, 0x7c));
+       nv_wr32(dev, 0x2210, nv_ro32(ramfc, 0x80));
+
+       cnt = nv_ro32(ramfc, 0x84);
        for (ptr = 0; ptr < cnt; ptr++) {
                nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
-                       nv_ro32(dev, cache, (ptr * 2) + 0));
+                       nv_ro32(cache, (ptr * 8) + 0));
                nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
-                       nv_ro32(dev, cache, (ptr * 2) + 1));
+                       nv_ro32(cache, (ptr * 8) + 4));
        }
        nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
        nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
 
        /* guessing that all the 0x34xx regs aren't on NV50 */
        if (dev_priv->chipset != 0x50) {
-               nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4));
-               nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4));
-               nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4));
-               nv_wr32(dev, 0x3408, nv_ro32(dev, ramfc, 0x94/4));
-               nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4));
+               nv_wr32(dev, 0x340c, nv_ro32(ramfc, 0x88));
+               nv_wr32(dev, 0x3400, nv_ro32(ramfc, 0x8c));
+               nv_wr32(dev, 0x3404, nv_ro32(ramfc, 0x90));
+               nv_wr32(dev, 0x3408, nv_ro32(ramfc, 0x94));
+               nv_wr32(dev, 0x3410, nv_ro32(ramfc, 0x98));
        }
 
        nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
@@ -399,62 +398,63 @@ nv50_fifo_unload_context(struct drm_device *dev)
                return -EINVAL;
        }
        NV_DEBUG(dev, "ch%d\n", chan->id);
-       ramfc = chan->ramfc->gpuobj;
-       cache = chan->cache->gpuobj;
-
-       nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330));
-       nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334));
-       nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240));
-       nv_wo32(dev, ramfc, 0x0c/4, nv_rd32(dev, 0x3320));
-       nv_wo32(dev, ramfc, 0x10/4, nv_rd32(dev, 0x3244));
-       nv_wo32(dev, ramfc, 0x14/4, nv_rd32(dev, 0x3328));
-       nv_wo32(dev, ramfc, 0x18/4, nv_rd32(dev, 0x3368));
-       nv_wo32(dev, ramfc, 0x1c/4, nv_rd32(dev, 0x336c));
-       nv_wo32(dev, ramfc, 0x20/4, nv_rd32(dev, 0x3370));
-       nv_wo32(dev, ramfc, 0x24/4, nv_rd32(dev, 0x3374));
-       nv_wo32(dev, ramfc, 0x28/4, nv_rd32(dev, 0x3378));
-       nv_wo32(dev, ramfc, 0x2c/4, nv_rd32(dev, 0x337c));
-       nv_wo32(dev, ramfc, 0x30/4, nv_rd32(dev, 0x3228));
-       nv_wo32(dev, ramfc, 0x34/4, nv_rd32(dev, 0x3364));
-       nv_wo32(dev, ramfc, 0x38/4, nv_rd32(dev, 0x32a0));
-       nv_wo32(dev, ramfc, 0x3c/4, nv_rd32(dev, 0x3224));
-       nv_wo32(dev, ramfc, 0x40/4, nv_rd32(dev, 0x324c));
-       nv_wo32(dev, ramfc, 0x44/4, nv_rd32(dev, 0x2044));
-       nv_wo32(dev, ramfc, 0x48/4, nv_rd32(dev, 0x322c));
-       nv_wo32(dev, ramfc, 0x4c/4, nv_rd32(dev, 0x3234));
-       nv_wo32(dev, ramfc, 0x50/4, nv_rd32(dev, 0x3340));
-       nv_wo32(dev, ramfc, 0x54/4, nv_rd32(dev, 0x3344));
-       nv_wo32(dev, ramfc, 0x58/4, nv_rd32(dev, 0x3280));
-       nv_wo32(dev, ramfc, 0x5c/4, nv_rd32(dev, 0x3254));
-       nv_wo32(dev, ramfc, 0x60/4, nv_rd32(dev, 0x3260));
-       nv_wo32(dev, ramfc, 0x64/4, nv_rd32(dev, 0x3264));
-       nv_wo32(dev, ramfc, 0x68/4, nv_rd32(dev, 0x3268));
-       nv_wo32(dev, ramfc, 0x6c/4, nv_rd32(dev, 0x326c));
-       nv_wo32(dev, ramfc, 0x70/4, nv_rd32(dev, 0x32e4));
-       nv_wo32(dev, ramfc, 0x74/4, nv_rd32(dev, 0x3248));
-       nv_wo32(dev, ramfc, 0x78/4, nv_rd32(dev, 0x2088));
-       nv_wo32(dev, ramfc, 0x7c/4, nv_rd32(dev, 0x2058));
-       nv_wo32(dev, ramfc, 0x80/4, nv_rd32(dev, 0x2210));
+       ramfc = chan->ramfc;
+       cache = chan->cache;
+
+       nv_wo32(ramfc, 0x00, nv_rd32(dev, 0x3330));
+       nv_wo32(ramfc, 0x04, nv_rd32(dev, 0x3334));
+       nv_wo32(ramfc, 0x08, nv_rd32(dev, 0x3240));
+       nv_wo32(ramfc, 0x0c, nv_rd32(dev, 0x3320));
+       nv_wo32(ramfc, 0x10, nv_rd32(dev, 0x3244));
+       nv_wo32(ramfc, 0x14, nv_rd32(dev, 0x3328));
+       nv_wo32(ramfc, 0x18, nv_rd32(dev, 0x3368));
+       nv_wo32(ramfc, 0x1c, nv_rd32(dev, 0x336c));
+       nv_wo32(ramfc, 0x20, nv_rd32(dev, 0x3370));
+       nv_wo32(ramfc, 0x24, nv_rd32(dev, 0x3374));
+       nv_wo32(ramfc, 0x28, nv_rd32(dev, 0x3378));
+       nv_wo32(ramfc, 0x2c, nv_rd32(dev, 0x337c));
+       nv_wo32(ramfc, 0x30, nv_rd32(dev, 0x3228));
+       nv_wo32(ramfc, 0x34, nv_rd32(dev, 0x3364));
+       nv_wo32(ramfc, 0x38, nv_rd32(dev, 0x32a0));
+       nv_wo32(ramfc, 0x3c, nv_rd32(dev, 0x3224));
+       nv_wo32(ramfc, 0x40, nv_rd32(dev, 0x324c));
+       nv_wo32(ramfc, 0x44, nv_rd32(dev, 0x2044));
+       nv_wo32(ramfc, 0x48, nv_rd32(dev, 0x322c));
+       nv_wo32(ramfc, 0x4c, nv_rd32(dev, 0x3234));
+       nv_wo32(ramfc, 0x50, nv_rd32(dev, 0x3340));
+       nv_wo32(ramfc, 0x54, nv_rd32(dev, 0x3344));
+       nv_wo32(ramfc, 0x58, nv_rd32(dev, 0x3280));
+       nv_wo32(ramfc, 0x5c, nv_rd32(dev, 0x3254));
+       nv_wo32(ramfc, 0x60, nv_rd32(dev, 0x3260));
+       nv_wo32(ramfc, 0x64, nv_rd32(dev, 0x3264));
+       nv_wo32(ramfc, 0x68, nv_rd32(dev, 0x3268));
+       nv_wo32(ramfc, 0x6c, nv_rd32(dev, 0x326c));
+       nv_wo32(ramfc, 0x70, nv_rd32(dev, 0x32e4));
+       nv_wo32(ramfc, 0x74, nv_rd32(dev, 0x3248));
+       nv_wo32(ramfc, 0x78, nv_rd32(dev, 0x2088));
+       nv_wo32(ramfc, 0x7c, nv_rd32(dev, 0x2058));
+       nv_wo32(ramfc, 0x80, nv_rd32(dev, 0x2210));
 
        put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
        get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
        ptr = 0;
        while (put != get) {
-               nv_wo32(dev, cache, ptr++,
-                           nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
-               nv_wo32(dev, cache, ptr++,
-                           nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
+               nv_wo32(cache, ptr + 0,
+                       nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
+               nv_wo32(cache, ptr + 4,
+                       nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
                get = (get + 1) & 0x1ff;
+               ptr += 8;
        }
 
        /* guessing that all the 0x34xx regs aren't on NV50 */
        if (dev_priv->chipset != 0x50) {
-               nv_wo32(dev, ramfc, 0x84/4, ptr >> 1);
-               nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c));
-               nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400));
-               nv_wo32(dev, ramfc, 0x90/4, nv_rd32(dev, 0x3404));
-               nv_wo32(dev, ramfc, 0x94/4, nv_rd32(dev, 0x3408));
-               nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410));
+               nv_wo32(ramfc, 0x84, ptr >> 3);
+               nv_wo32(ramfc, 0x88, nv_rd32(dev, 0x340c));
+               nv_wo32(ramfc, 0x8c, nv_rd32(dev, 0x3400));
+               nv_wo32(ramfc, 0x90, nv_rd32(dev, 0x3404));
+               nv_wo32(ramfc, 0x94, nv_rd32(dev, 0x3408));
+               nv_wo32(ramfc, 0x98, nv_rd32(dev, 0x3410));
        }
 
        dev_priv->engine.instmem.flush(dev);
index 1413028e15808b8f9ea476bd7e0d81f948801b40..cbf5ae2f67d4a110de1bb9d9c129224fbcdd6c3c 100644 (file)
@@ -27,7 +27,7 @@
 #include "drmP.h"
 #include "drm.h"
 #include "nouveau_drv.h"
-
+#include "nouveau_ramht.h"
 #include "nouveau_grctx.h"
 
 static void
@@ -181,7 +181,7 @@ nv50_graph_channel(struct drm_device *dev)
        /* Be sure we're not in the middle of a context switch or bad things
         * will happen, such as unloading the wrong pgraph context.
         */
-       if (!nv_wait(0x400300, 0x00000001, 0x00000000))
+       if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
                NV_ERROR(dev, "Ctxprog is still running\n");
 
        inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
@@ -192,7 +192,7 @@ nv50_graph_channel(struct drm_device *dev)
        for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
                struct nouveau_channel *chan = dev_priv->fifos[i];
 
-               if (chan && chan->ramin && chan->ramin->instance == inst)
+               if (chan && chan->ramin && chan->ramin->vinst == inst)
                        return chan;
        }
 
@@ -204,36 +204,34 @@ nv50_graph_create_context(struct nouveau_channel *chan)
 {
        struct drm_device *dev = chan->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
-       struct nouveau_gpuobj *obj;
+       struct nouveau_gpuobj *ramin = chan->ramin;
        struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
        struct nouveau_grctx ctx = {};
        int hdr, ret;
 
        NV_DEBUG(dev, "ch%d\n", chan->id);
 
-       ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
-                                    0x1000, NVOBJ_FLAG_ZERO_ALLOC |
-                                    NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
+       ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0x1000,
+                                NVOBJ_FLAG_ZERO_ALLOC |
+                                NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
        if (ret)
                return ret;
-       obj = chan->ramin_grctx->gpuobj;
 
        hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
-       nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
-       nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
-                                          pgraph->grctx_size - 1);
-       nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
-       nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
-       nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
-       nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000);
+       nv_wo32(ramin, hdr + 0x00, 0x00190002);
+       nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->vinst +
+                                  pgraph->grctx_size - 1);
+       nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->vinst);
+       nv_wo32(ramin, hdr + 0x0c, 0);
+       nv_wo32(ramin, hdr + 0x10, 0);
+       nv_wo32(ramin, hdr + 0x14, 0x00010000);
 
        ctx.dev = chan->dev;
        ctx.mode = NOUVEAU_GRCTX_VALS;
-       ctx.data = obj;
+       ctx.data = chan->ramin_grctx;
        nv50_grctx_init(&ctx);
 
-       nv_wo32(dev, obj, 0x00000/4, chan->ramin->instance >> 12);
+       nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12);
 
        dev_priv->engine.instmem.flush(dev);
        return 0;
@@ -248,14 +246,14 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
 
        NV_DEBUG(dev, "ch%d\n", chan->id);
 
-       if (!chan->ramin || !chan->ramin->gpuobj)
+       if (!chan->ramin)
                return;
 
        for (i = hdr; i < hdr + 24; i += 4)
-               nv_wo32(dev, chan->ramin->gpuobj, i/4, 0);
+               nv_wo32(chan->ramin, i, 0);
        dev_priv->engine.instmem.flush(dev);
 
-       nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
+       nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
 }
 
 static int
@@ -282,7 +280,7 @@ nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
 int
 nv50_graph_load_context(struct nouveau_channel *chan)
 {
-       uint32_t inst = chan->ramin->instance >> 12;
+       uint32_t inst = chan->ramin->vinst >> 12;
 
        NV_DEBUG(chan->dev, "ch%d\n", chan->id);
        return nv50_graph_do_load_context(chan->dev, inst);
@@ -327,15 +325,16 @@ static int
 nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
                           int mthd, uint32_t data)
 {
-       struct nouveau_gpuobj_ref *ref = NULL;
+       struct nouveau_gpuobj *gpuobj;
 
-       if (nouveau_gpuobj_ref_find(chan, data, &ref))
+       gpuobj = nouveau_ramht_find(chan, data);
+       if (!gpuobj)
                return -ENOENT;
 
-       if (nouveau_notifier_offset(ref->gpuobj, NULL))
+       if (nouveau_notifier_offset(gpuobj, NULL))
                return -EINVAL;
 
-       chan->nvsw.vblsem = ref->gpuobj;
+       chan->nvsw.vblsem = gpuobj;
        chan->nvsw.vblsem_offset = ~0;
        return 0;
 }
index 42a8fb20c1e6359d698107654b46ee015c7fb36a..336aab2a24a66ff030b7af629db43f53aaa9ef8e 100644 (file)
 #include "nouveau_drv.h"
 #include "nouveau_grctx.h"
 
+#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
+#define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac)
+
 /*
  * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's
  * the GPU itself that does context-switching, but it needs a special
@@ -182,6 +185,7 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
        case 0xa8:
        case 0xaa:
        case 0xac:
+       case 0xaf:
                break;
        default:
                NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for "
@@ -267,6 +271,9 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
  * registers to save/restore and the default values for them.
  */
 
+static void
+nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx);
+
 static void
 nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 {
@@ -286,7 +293,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
                gr_def(ctx, 0x400840, 0xffe806a8);
        }
        gr_def(ctx, 0x400844, 0x00000002);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+       if (IS_NVA3F(dev_priv->chipset))
                gr_def(ctx, 0x400894, 0x00001000);
        gr_def(ctx, 0x4008e8, 0x00000003);
        gr_def(ctx, 0x4008ec, 0x00001000);
@@ -299,13 +306,15 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 
        if (dev_priv->chipset >= 0xa0)
                cp_ctx(ctx, 0x400b00, 0x1);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+       if (IS_NVA3F(dev_priv->chipset)) {
                cp_ctx(ctx, 0x400b10, 0x1);
                gr_def(ctx, 0x400b10, 0x0001629d);
                cp_ctx(ctx, 0x400b20, 0x1);
                gr_def(ctx, 0x400b20, 0x0001629d);
        }
 
+       nv50_graph_construct_mmio_ddata(ctx);
+
        /* 0C00: VFETCH */
        cp_ctx(ctx, 0x400c08, 0x2);
        gr_def(ctx, 0x400c08, 0x0000fe0c);
@@ -314,7 +323,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
        if (dev_priv->chipset < 0xa0) {
                cp_ctx(ctx, 0x401008, 0x4);
                gr_def(ctx, 0x401014, 0x00001000);
-       } else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) {
+       } else if (!IS_NVA3F(dev_priv->chipset)) {
                cp_ctx(ctx, 0x401008, 0x5);
                gr_def(ctx, 0x401018, 0x00001000);
        } else {
@@ -368,10 +377,13 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
        case 0xa3:
        case 0xa5:
        case 0xa8:
+       case 0xaf:
                gr_def(ctx, 0x401c00, 0x142500df);
                break;
        }
 
+       /* 2000 */
+
        /* 2400 */
        cp_ctx(ctx, 0x402400, 0x1);
        if (dev_priv->chipset == 0x50)
@@ -380,12 +392,12 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
                cp_ctx(ctx, 0x402408, 0x2);
        gr_def(ctx, 0x402408, 0x00000600);
 
-       /* 2800 */
+       /* 2800: CSCHED */
        cp_ctx(ctx, 0x402800, 0x1);
        if (dev_priv->chipset == 0x50)
                gr_def(ctx, 0x402800, 0x00000006);
 
-       /* 2C00 */
+       /* 2C00: ZCULL */
        cp_ctx(ctx, 0x402c08, 0x6);
        if (dev_priv->chipset != 0x50)
                gr_def(ctx, 0x402c14, 0x01000000);
@@ -396,23 +408,23 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
                cp_ctx(ctx, 0x402ca0, 0x2);
        if (dev_priv->chipset < 0xa0)
                gr_def(ctx, 0x402ca0, 0x00000400);
-       else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
+       else if (!IS_NVA3F(dev_priv->chipset))
                gr_def(ctx, 0x402ca0, 0x00000800);
        else
                gr_def(ctx, 0x402ca0, 0x00000400);
        cp_ctx(ctx, 0x402cac, 0x4);
 
-       /* 3000 */
+       /* 3000: ENG2D */
        cp_ctx(ctx, 0x403004, 0x1);
        gr_def(ctx, 0x403004, 0x00000001);
 
-       /* 3404 */
+       /* 3400 */
        if (dev_priv->chipset >= 0xa0) {
                cp_ctx(ctx, 0x403404, 0x1);
                gr_def(ctx, 0x403404, 0x00000001);
        }
 
-       /* 5000 */
+       /* 5000: CCACHE */
        cp_ctx(ctx, 0x405000, 0x1);
        switch (dev_priv->chipset) {
        case 0x50:
@@ -425,6 +437,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
        case 0xa8:
        case 0xaa:
        case 0xac:
+       case 0xaf:
                gr_def(ctx, 0x405000, 0x000e0080);
                break;
        case 0x86:
@@ -441,210 +454,6 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
        cp_ctx(ctx, 0x405024, 0x1);
        cp_ctx(ctx, 0x40502c, 0x1);
 
-       /* 5400 or maybe 4800 */
-       if (dev_priv->chipset == 0x50) {
-               offset = 0x405400;
-               cp_ctx(ctx, 0x405400, 0xea);
-       } else if (dev_priv->chipset < 0x94) {
-               offset = 0x405400;
-               cp_ctx(ctx, 0x405400, 0xcb);
-       } else if (dev_priv->chipset < 0xa0) {
-               offset = 0x405400;
-               cp_ctx(ctx, 0x405400, 0xcc);
-       } else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
-               offset = 0x404800;
-               cp_ctx(ctx, 0x404800, 0xda);
-       } else {
-               offset = 0x405400;
-               cp_ctx(ctx, 0x405400, 0xd4);
-       }
-       gr_def(ctx, offset + 0x0c, 0x00000002);
-       gr_def(ctx, offset + 0x10, 0x00000001);
-       if (dev_priv->chipset >= 0x94)
-               offset += 4;
-       gr_def(ctx, offset + 0x1c, 0x00000001);
-       gr_def(ctx, offset + 0x20, 0x00000100);
-       gr_def(ctx, offset + 0x38, 0x00000002);
-       gr_def(ctx, offset + 0x3c, 0x00000001);
-       gr_def(ctx, offset + 0x40, 0x00000001);
-       gr_def(ctx, offset + 0x50, 0x00000001);
-       gr_def(ctx, offset + 0x54, 0x003fffff);
-       gr_def(ctx, offset + 0x58, 0x00001fff);
-       gr_def(ctx, offset + 0x60, 0x00000001);
-       gr_def(ctx, offset + 0x64, 0x00000001);
-       gr_def(ctx, offset + 0x6c, 0x00000001);
-       gr_def(ctx, offset + 0x70, 0x00000001);
-       gr_def(ctx, offset + 0x74, 0x00000001);
-       gr_def(ctx, offset + 0x78, 0x00000004);
-       gr_def(ctx, offset + 0x7c, 0x00000001);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               offset += 4;
-       gr_def(ctx, offset + 0x80, 0x00000001);
-       gr_def(ctx, offset + 0x84, 0x00000001);
-       gr_def(ctx, offset + 0x88, 0x00000007);
-       gr_def(ctx, offset + 0x8c, 0x00000001);
-       gr_def(ctx, offset + 0x90, 0x00000007);
-       gr_def(ctx, offset + 0x94, 0x00000001);
-       gr_def(ctx, offset + 0x98, 0x00000001);
-       gr_def(ctx, offset + 0x9c, 0x00000001);
-       if (dev_priv->chipset == 0x50) {
-                gr_def(ctx, offset + 0xb0, 0x00000001);
-                gr_def(ctx, offset + 0xb4, 0x00000001);
-                gr_def(ctx, offset + 0xbc, 0x00000001);
-                gr_def(ctx, offset + 0xc0, 0x0000000a);
-                gr_def(ctx, offset + 0xd0, 0x00000040);
-                gr_def(ctx, offset + 0xd8, 0x00000002);
-                gr_def(ctx, offset + 0xdc, 0x00000100);
-                gr_def(ctx, offset + 0xe0, 0x00000001);
-                gr_def(ctx, offset + 0xe4, 0x00000100);
-                gr_def(ctx, offset + 0x100, 0x00000001);
-                gr_def(ctx, offset + 0x124, 0x00000004);
-                gr_def(ctx, offset + 0x13c, 0x00000001);
-                gr_def(ctx, offset + 0x140, 0x00000100);
-                gr_def(ctx, offset + 0x148, 0x00000001);
-                gr_def(ctx, offset + 0x154, 0x00000100);
-                gr_def(ctx, offset + 0x158, 0x00000001);
-                gr_def(ctx, offset + 0x15c, 0x00000100);
-                gr_def(ctx, offset + 0x164, 0x00000001);
-                gr_def(ctx, offset + 0x170, 0x00000100);
-                gr_def(ctx, offset + 0x174, 0x00000001);
-                gr_def(ctx, offset + 0x17c, 0x00000001);
-                gr_def(ctx, offset + 0x188, 0x00000002);
-                gr_def(ctx, offset + 0x190, 0x00000001);
-                gr_def(ctx, offset + 0x198, 0x00000001);
-                gr_def(ctx, offset + 0x1ac, 0x00000003);
-                offset += 0xd0;
-       } else {
-               gr_def(ctx, offset + 0xb0, 0x00000001);
-               gr_def(ctx, offset + 0xb4, 0x00000100);
-               gr_def(ctx, offset + 0xbc, 0x00000001);
-               gr_def(ctx, offset + 0xc8, 0x00000100);
-               gr_def(ctx, offset + 0xcc, 0x00000001);
-               gr_def(ctx, offset + 0xd0, 0x00000100);
-               gr_def(ctx, offset + 0xd8, 0x00000001);
-               gr_def(ctx, offset + 0xe4, 0x00000100);
-       }
-       gr_def(ctx, offset + 0xf8, 0x00000004);
-       gr_def(ctx, offset + 0xfc, 0x00000070);
-       gr_def(ctx, offset + 0x100, 0x00000080);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               offset += 4;
-       gr_def(ctx, offset + 0x114, 0x0000000c);
-       if (dev_priv->chipset == 0x50)
-               offset -= 4;
-       gr_def(ctx, offset + 0x11c, 0x00000008);
-       gr_def(ctx, offset + 0x120, 0x00000014);
-       if (dev_priv->chipset == 0x50) {
-               gr_def(ctx, offset + 0x124, 0x00000026);
-               offset -= 0x18;
-       } else {
-               gr_def(ctx, offset + 0x128, 0x00000029);
-               gr_def(ctx, offset + 0x12c, 0x00000027);
-               gr_def(ctx, offset + 0x130, 0x00000026);
-               gr_def(ctx, offset + 0x134, 0x00000008);
-               gr_def(ctx, offset + 0x138, 0x00000004);
-               gr_def(ctx, offset + 0x13c, 0x00000027);
-       }
-       gr_def(ctx, offset + 0x148, 0x00000001);
-       gr_def(ctx, offset + 0x14c, 0x00000002);
-       gr_def(ctx, offset + 0x150, 0x00000003);
-       gr_def(ctx, offset + 0x154, 0x00000004);
-       gr_def(ctx, offset + 0x158, 0x00000005);
-       gr_def(ctx, offset + 0x15c, 0x00000006);
-       gr_def(ctx, offset + 0x160, 0x00000007);
-       gr_def(ctx, offset + 0x164, 0x00000001);
-       gr_def(ctx, offset + 0x1a8, 0x000000cf);
-       if (dev_priv->chipset == 0x50)
-               offset -= 4;
-       gr_def(ctx, offset + 0x1d8, 0x00000080);
-       gr_def(ctx, offset + 0x1dc, 0x00000004);
-       gr_def(ctx, offset + 0x1e0, 0x00000004);
-       if (dev_priv->chipset == 0x50)
-               offset -= 4;
-       else
-               gr_def(ctx, offset + 0x1e4, 0x00000003);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
-               gr_def(ctx, offset + 0x1ec, 0x00000003);
-               offset += 8;
-       }
-       gr_def(ctx, offset + 0x1e8, 0x00000001);
-       if (dev_priv->chipset == 0x50)
-               offset -= 4;
-       gr_def(ctx, offset + 0x1f4, 0x00000012);
-       gr_def(ctx, offset + 0x1f8, 0x00000010);
-       gr_def(ctx, offset + 0x1fc, 0x0000000c);
-       gr_def(ctx, offset + 0x200, 0x00000001);
-       gr_def(ctx, offset + 0x210, 0x00000004);
-       gr_def(ctx, offset + 0x214, 0x00000002);
-       gr_def(ctx, offset + 0x218, 0x00000004);
-       if (dev_priv->chipset >= 0xa0)
-               offset += 4;
-       gr_def(ctx, offset + 0x224, 0x003fffff);
-       gr_def(ctx, offset + 0x228, 0x00001fff);
-       if (dev_priv->chipset == 0x50)
-               offset -= 0x20;
-       else if (dev_priv->chipset >= 0xa0) {
-               gr_def(ctx, offset + 0x250, 0x00000001);
-               gr_def(ctx, offset + 0x254, 0x00000001);
-               gr_def(ctx, offset + 0x258, 0x00000002);
-               offset += 0x10;
-       }
-       gr_def(ctx, offset + 0x250, 0x00000004);
-       gr_def(ctx, offset + 0x254, 0x00000014);
-       gr_def(ctx, offset + 0x258, 0x00000001);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               offset += 4;
-       gr_def(ctx, offset + 0x264, 0x00000002);
-       if (dev_priv->chipset >= 0xa0)
-               offset += 8;
-       gr_def(ctx, offset + 0x270, 0x00000001);
-       gr_def(ctx, offset + 0x278, 0x00000002);
-       gr_def(ctx, offset + 0x27c, 0x00001000);
-       if (dev_priv->chipset == 0x50)
-               offset -= 0xc;
-       else {
-               gr_def(ctx, offset + 0x280, 0x00000e00);
-               gr_def(ctx, offset + 0x284, 0x00001000);
-               gr_def(ctx, offset + 0x288, 0x00001e00);
-       }
-       gr_def(ctx, offset + 0x290, 0x00000001);
-       gr_def(ctx, offset + 0x294, 0x00000001);
-       gr_def(ctx, offset + 0x298, 0x00000001);
-       gr_def(ctx, offset + 0x29c, 0x00000001);
-       gr_def(ctx, offset + 0x2a0, 0x00000001);
-       gr_def(ctx, offset + 0x2b0, 0x00000200);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
-               gr_def(ctx, offset + 0x2b4, 0x00000200);
-               offset += 4;
-       }
-       if (dev_priv->chipset < 0xa0) {
-               gr_def(ctx, offset + 0x2b8, 0x00000001);
-               gr_def(ctx, offset + 0x2bc, 0x00000070);
-               gr_def(ctx, offset + 0x2c0, 0x00000080);
-               gr_def(ctx, offset + 0x2cc, 0x00000001);
-               gr_def(ctx, offset + 0x2d0, 0x00000070);
-               gr_def(ctx, offset + 0x2d4, 0x00000080);
-       } else {
-               gr_def(ctx, offset + 0x2b8, 0x00000001);
-               gr_def(ctx, offset + 0x2bc, 0x000000f0);
-               gr_def(ctx, offset + 0x2c0, 0x000000ff);
-               gr_def(ctx, offset + 0x2cc, 0x00000001);
-               gr_def(ctx, offset + 0x2d0, 0x000000f0);
-               gr_def(ctx, offset + 0x2d4, 0x000000ff);
-               gr_def(ctx, offset + 0x2dc, 0x00000009);
-               offset += 4;
-       }
-       gr_def(ctx, offset + 0x2e4, 0x00000001);
-       gr_def(ctx, offset + 0x2e8, 0x000000cf);
-       gr_def(ctx, offset + 0x2f0, 0x00000001);
-       gr_def(ctx, offset + 0x300, 0x000000cf);
-       gr_def(ctx, offset + 0x308, 0x00000002);
-       gr_def(ctx, offset + 0x310, 0x00000001);
-       gr_def(ctx, offset + 0x318, 0x00000001);
-       gr_def(ctx, offset + 0x320, 0x000000cf);
-       gr_def(ctx, offset + 0x324, 0x000000cf);
-       gr_def(ctx, offset + 0x328, 0x00000001);
-
        /* 6000? */
        if (dev_priv->chipset == 0x50)
                cp_ctx(ctx, 0x4063e0, 0x1);
@@ -661,7 +470,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
                        gr_def(ctx, 0x406818, 0x00000f80);
                else
                        gr_def(ctx, 0x406818, 0x00001f80);
-               if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+               if (IS_NVA3F(dev_priv->chipset))
                        gr_def(ctx, 0x40681c, 0x00000030);
                cp_ctx(ctx, 0x406830, 0x3);
        }
@@ -706,7 +515,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 
                        if (dev_priv->chipset < 0xa0)
                                cp_ctx(ctx, 0x407094 + (i<<8), 1);
-                       else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
+                       else if (!IS_NVA3F(dev_priv->chipset))
                                cp_ctx(ctx, 0x407094 + (i<<8), 3);
                        else {
                                cp_ctx(ctx, 0x407094 + (i<<8), 4);
@@ -799,6 +608,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
                                case 0xa8:
                                case 0xaa:
                                case 0xac:
+                               case 0xaf:
                                        gr_def(ctx, offset + 0x1c, 0x300c0000);
                                        break;
                                }
@@ -825,7 +635,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
                                gr_def(ctx, base + 0x304, 0x00007070);
                        else if (dev_priv->chipset < 0xa0)
                                gr_def(ctx, base + 0x304, 0x00027070);
-                       else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
+                       else if (!IS_NVA3F(dev_priv->chipset))
                                gr_def(ctx, base + 0x304, 0x01127070);
                        else
                                gr_def(ctx, base + 0x304, 0x05127070);
@@ -849,7 +659,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
                        if (dev_priv->chipset < 0xa0) {
                                cp_ctx(ctx, base + 0x340, 9);
                                offset = base + 0x340;
-                       } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
+                       } else if (!IS_NVA3F(dev_priv->chipset)) {
                                cp_ctx(ctx, base + 0x33c, 0xb);
                                offset = base + 0x344;
                        } else {
@@ -880,7 +690,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
                        gr_def(ctx, offset + 0x0, 0x000001f0);
                        gr_def(ctx, offset + 0x4, 0x00000001);
                        gr_def(ctx, offset + 0x8, 0x00000003);
-                       if (dev_priv->chipset == 0x50 || dev_priv->chipset >= 0xaa)
+                       if (dev_priv->chipset == 0x50 || IS_NVAAF(dev_priv->chipset))
                                gr_def(ctx, offset + 0xc, 0x00008000);
                        gr_def(ctx, offset + 0x14, 0x00039e00);
                        cp_ctx(ctx, offset + 0x1c, 2);
@@ -892,7 +702,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
 
                        if (dev_priv->chipset >= 0xa0) {
                                cp_ctx(ctx, base + 0x54c, 2);
-                               if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
+                               if (!IS_NVA3F(dev_priv->chipset))
                                        gr_def(ctx, base + 0x54c, 0x003fe006);
                                else
                                        gr_def(ctx, base + 0x54c, 0x003fe007);
@@ -948,6 +758,336 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
        }
 }
 
+static void
+dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
+       int i;
+       if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
+               for (i = 0; i < num; i++)
+                       nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
+       ctx->ctxvals_pos += num;
+}
+
+static void
+nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
+{
+       struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+       int base, num;
+       base = ctx->ctxvals_pos;
+
+       /* tesla state */
+       dd_emit(ctx, 1, 0);     /* 00000001 UNK0F90 */
+       dd_emit(ctx, 1, 0);     /* 00000001 UNK135C */
+
+       /* SRC_TIC state */
+       dd_emit(ctx, 1, 0);     /* 00000007 SRC_TILE_MODE_Z */
+       dd_emit(ctx, 1, 2);     /* 00000007 SRC_TILE_MODE_Y */
+       dd_emit(ctx, 1, 1);     /* 00000001 SRC_LINEAR #1 */
+       dd_emit(ctx, 1, 0);     /* 000000ff SRC_ADDRESS_HIGH */
+       dd_emit(ctx, 1, 0);     /* 00000001 SRC_SRGB */
+       if (dev_priv->chipset >= 0x94)
+               dd_emit(ctx, 1, 0);     /* 00000003 eng2d UNK0258 */
+       dd_emit(ctx, 1, 1);     /* 00000fff SRC_DEPTH */
+       dd_emit(ctx, 1, 0x100); /* 0000ffff SRC_HEIGHT */
+
+       /* turing state */
+       dd_emit(ctx, 1, 0);             /* 0000000f TEXTURES_LOG2 */
+       dd_emit(ctx, 1, 0);             /* 0000000f SAMPLERS_LOG2 */
+       dd_emit(ctx, 1, 0);             /* 000000ff CB_DEF_ADDRESS_HIGH */
+       dd_emit(ctx, 1, 0);             /* ffffffff CB_DEF_ADDRESS_LOW */
+       dd_emit(ctx, 1, 0);             /* ffffffff SHARED_SIZE */
+       dd_emit(ctx, 1, 2);             /* ffffffff REG_MODE */
+       dd_emit(ctx, 1, 1);             /* 0000ffff BLOCK_ALLOC_THREADS */
+       dd_emit(ctx, 1, 1);             /* 00000001 LANES32 */
+       dd_emit(ctx, 1, 0);             /* 000000ff UNK370 */
+       dd_emit(ctx, 1, 0);             /* 000000ff USER_PARAM_UNK */
+       dd_emit(ctx, 1, 0);             /* 000000ff USER_PARAM_COUNT */
+       dd_emit(ctx, 1, 1);             /* 000000ff UNK384 bits 8-15 */
+       dd_emit(ctx, 1, 0x3fffff);      /* 003fffff TIC_LIMIT */
+       dd_emit(ctx, 1, 0x1fff);        /* 000fffff TSC_LIMIT */
+       dd_emit(ctx, 1, 0);             /* 0000ffff CB_ADDR_INDEX */
+       dd_emit(ctx, 1, 1);             /* 000007ff BLOCKDIM_X */
+       dd_emit(ctx, 1, 1);             /* 000007ff BLOCKDIM_XMY */
+       dd_emit(ctx, 1, 0);             /* 00000001 BLOCKDIM_XMY_OVERFLOW */
+       dd_emit(ctx, 1, 1);             /* 0003ffff BLOCKDIM_XMYMZ */
+       dd_emit(ctx, 1, 1);             /* 000007ff BLOCKDIM_Y */
+       dd_emit(ctx, 1, 1);             /* 0000007f BLOCKDIM_Z */
+       dd_emit(ctx, 1, 4);             /* 000000ff CP_REG_ALLOC_TEMP */
+       dd_emit(ctx, 1, 1);             /* 00000001 BLOCKDIM_DIRTY */
+       if (IS_NVA3F(dev_priv->chipset))
+               dd_emit(ctx, 1, 0);     /* 00000003 UNK03E8 */
+       dd_emit(ctx, 1, 1);             /* 0000007f BLOCK_ALLOC_HALFWARPS */
+       dd_emit(ctx, 1, 1);             /* 00000007 LOCAL_WARPS_NO_CLAMP */
+       dd_emit(ctx, 1, 7);             /* 00000007 LOCAL_WARPS_LOG_ALLOC */
+       dd_emit(ctx, 1, 1);             /* 00000007 STACK_WARPS_NO_CLAMP */
+       dd_emit(ctx, 1, 7);             /* 00000007 STACK_WARPS_LOG_ALLOC */
+       dd_emit(ctx, 1, 1);             /* 00001fff BLOCK_ALLOC_REGSLOTS_PACKED */
+       dd_emit(ctx, 1, 1);             /* 00001fff BLOCK_ALLOC_REGSLOTS_STRIDED */
+       dd_emit(ctx, 1, 1);             /* 000007ff BLOCK_ALLOC_THREADS */
+
+       /* compat 2d state */
+       if (dev_priv->chipset == 0x50) {
+               dd_emit(ctx, 4, 0);             /* 0000ffff clip X, Y, W, H */
+
+               dd_emit(ctx, 1, 1);             /* ffffffff chroma COLOR_FORMAT */
+
+               dd_emit(ctx, 1, 1);             /* ffffffff pattern COLOR_FORMAT */
+               dd_emit(ctx, 1, 0);             /* ffffffff pattern SHAPE */
+               dd_emit(ctx, 1, 1);             /* ffffffff pattern PATTERN_SELECT */
+
+               dd_emit(ctx, 1, 0xa);           /* ffffffff surf2d SRC_FORMAT */
+               dd_emit(ctx, 1, 0);             /* ffffffff surf2d DMA_SRC */
+               dd_emit(ctx, 1, 0);             /* 000000ff surf2d SRC_ADDRESS_HIGH */
+               dd_emit(ctx, 1, 0);             /* ffffffff surf2d SRC_ADDRESS_LOW */
+               dd_emit(ctx, 1, 0x40);          /* 0000ffff surf2d SRC_PITCH */
+               dd_emit(ctx, 1, 0);             /* 0000000f surf2d SRC_TILE_MODE_Z */
+               dd_emit(ctx, 1, 2);             /* 0000000f surf2d SRC_TILE_MODE_Y */
+               dd_emit(ctx, 1, 0x100);         /* ffffffff surf2d SRC_HEIGHT */
+               dd_emit(ctx, 1, 1);             /* 00000001 surf2d SRC_LINEAR */
+               dd_emit(ctx, 1, 0x100);         /* ffffffff surf2d SRC_WIDTH */
+
+               dd_emit(ctx, 1, 0);             /* 0000ffff gdirect CLIP_B_X */
+               dd_emit(ctx, 1, 0);             /* 0000ffff gdirect CLIP_B_Y */
+               dd_emit(ctx, 1, 0);             /* 0000ffff gdirect CLIP_C_X */
+               dd_emit(ctx, 1, 0);             /* 0000ffff gdirect CLIP_C_Y */
+               dd_emit(ctx, 1, 0);             /* 0000ffff gdirect CLIP_D_X */
+               dd_emit(ctx, 1, 0);             /* 0000ffff gdirect CLIP_D_Y */
+               dd_emit(ctx, 1, 1);             /* ffffffff gdirect COLOR_FORMAT */
+               dd_emit(ctx, 1, 0);             /* ffffffff gdirect OPERATION */
+               dd_emit(ctx, 1, 0);             /* 0000ffff gdirect POINT_X */
+               dd_emit(ctx, 1, 0);             /* 0000ffff gdirect POINT_Y */
+
+               dd_emit(ctx, 1, 0);             /* 0000ffff blit SRC_Y */
+               dd_emit(ctx, 1, 0);             /* ffffffff blit OPERATION */
+
+               dd_emit(ctx, 1, 0);             /* ffffffff ifc OPERATION */
+
+               dd_emit(ctx, 1, 0);             /* ffffffff iifc INDEX_FORMAT */
+               dd_emit(ctx, 1, 0);             /* ffffffff iifc LUT_OFFSET */
+               dd_emit(ctx, 1, 4);             /* ffffffff iifc COLOR_FORMAT */
+               dd_emit(ctx, 1, 0);             /* ffffffff iifc OPERATION */
+       }
+
+       /* m2mf state */
+       dd_emit(ctx, 1, 0);             /* ffffffff m2mf LINE_COUNT */
+       dd_emit(ctx, 1, 0);             /* ffffffff m2mf LINE_LENGTH_IN */
+       dd_emit(ctx, 2, 0);             /* ffffffff m2mf OFFSET_IN, OFFSET_OUT */
+       dd_emit(ctx, 1, 1);             /* ffffffff m2mf TILING_DEPTH_OUT */
+       dd_emit(ctx, 1, 0x100);         /* ffffffff m2mf TILING_HEIGHT_OUT */
+       dd_emit(ctx, 1, 0);             /* ffffffff m2mf TILING_POSITION_OUT_Z */
+       dd_emit(ctx, 1, 1);             /* 00000001 m2mf LINEAR_OUT */
+       dd_emit(ctx, 2, 0);             /* 0000ffff m2mf TILING_POSITION_OUT_X, Y */
+       dd_emit(ctx, 1, 0x100);         /* ffffffff m2mf TILING_PITCH_OUT */
+       dd_emit(ctx, 1, 1);             /* ffffffff m2mf TILING_DEPTH_IN */
+       dd_emit(ctx, 1, 0x100);         /* ffffffff m2mf TILING_HEIGHT_IN */
+       dd_emit(ctx, 1, 0);             /* ffffffff m2mf TILING_POSITION_IN_Z */
+       dd_emit(ctx, 1, 1);             /* 00000001 m2mf LINEAR_IN */
+       dd_emit(ctx, 2, 0);             /* 0000ffff m2mf TILING_POSITION_IN_X, Y */
+       dd_emit(ctx, 1, 0x100);         /* ffffffff m2mf TILING_PITCH_IN */
+
+       /* more compat 2d state */
+       if (dev_priv->chipset == 0x50) {
+               dd_emit(ctx, 1, 1);             /* ffffffff line COLOR_FORMAT */
+               dd_emit(ctx, 1, 0);             /* ffffffff line OPERATION */
+
+               dd_emit(ctx, 1, 1);             /* ffffffff triangle COLOR_FORMAT */
+               dd_emit(ctx, 1, 0);             /* ffffffff triangle OPERATION */
+
+               dd_emit(ctx, 1, 0);             /* 0000000f sifm TILE_MODE_Z */
+               dd_emit(ctx, 1, 2);             /* 0000000f sifm TILE_MODE_Y */
+               dd_emit(ctx, 1, 0);             /* 000000ff sifm FORMAT_FILTER */
+               dd_emit(ctx, 1, 1);             /* 000000ff sifm FORMAT_ORIGIN */
+               dd_emit(ctx, 1, 0);             /* 0000ffff sifm SRC_PITCH */
+               dd_emit(ctx, 1, 1);             /* 00000001 sifm SRC_LINEAR */
+               dd_emit(ctx, 1, 0);             /* 000000ff sifm SRC_OFFSET_HIGH */
+               dd_emit(ctx, 1, 0);             /* ffffffff sifm SRC_OFFSET */
+               dd_emit(ctx, 1, 0);             /* 0000ffff sifm SRC_HEIGHT */
+               dd_emit(ctx, 1, 0);             /* 0000ffff sifm SRC_WIDTH */
+               dd_emit(ctx, 1, 3);             /* ffffffff sifm COLOR_FORMAT */
+               dd_emit(ctx, 1, 0);             /* ffffffff sifm OPERATION */
+
+               dd_emit(ctx, 1, 0);             /* ffffffff sifc OPERATION */
+       }
+
+       /* tesla state */
+       dd_emit(ctx, 1, 0);             /* 0000000f GP_TEXTURES_LOG2 */
+       dd_emit(ctx, 1, 0);             /* 0000000f GP_SAMPLERS_LOG2 */
+       dd_emit(ctx, 1, 0);             /* 000000ff */
+       dd_emit(ctx, 1, 0);             /* ffffffff */
+       dd_emit(ctx, 1, 4);             /* 000000ff UNK12B0_0 */
+       dd_emit(ctx, 1, 0x70);          /* 000000ff UNK12B0_1 */
+       dd_emit(ctx, 1, 0x80);          /* 000000ff UNK12B0_3 */
+       dd_emit(ctx, 1, 0);             /* 000000ff UNK12B0_2 */
+       dd_emit(ctx, 1, 0);             /* 0000000f FP_TEXTURES_LOG2 */
+       dd_emit(ctx, 1, 0);             /* 0000000f FP_SAMPLERS_LOG2 */
+       if (IS_NVA3F(dev_priv->chipset)) {
+               dd_emit(ctx, 1, 0);     /* ffffffff */
+               dd_emit(ctx, 1, 0);     /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */
+       } else {
+               dd_emit(ctx, 1, 0);     /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */
+       } 
+       dd_emit(ctx, 1, 0xc);           /* 000000ff SEMANTIC_COLOR.BFC0_ID */
+       if (dev_priv->chipset != 0x50)
+               dd_emit(ctx, 1, 0);     /* 00000001 SEMANTIC_COLOR.CLMP_EN */
+       dd_emit(ctx, 1, 8);             /* 000000ff SEMANTIC_COLOR.COLR_NR */
+       dd_emit(ctx, 1, 0x14);          /* 000000ff SEMANTIC_COLOR.FFC0_ID */
+       if (dev_priv->chipset == 0x50) {
+               dd_emit(ctx, 1, 0);     /* 000000ff SEMANTIC_LAYER */
+               dd_emit(ctx, 1, 0);     /* 00000001 */
+       } else {
+               dd_emit(ctx, 1, 0);     /* 00000001 SEMANTIC_PTSZ.ENABLE */
+               dd_emit(ctx, 1, 0x29);  /* 000000ff SEMANTIC_PTSZ.PTSZ_ID */
+               dd_emit(ctx, 1, 0x27);  /* 000000ff SEMANTIC_PRIM */
+               dd_emit(ctx, 1, 0x26);  /* 000000ff SEMANTIC_LAYER */
+               dd_emit(ctx, 1, 8);     /* 0000000f SMENATIC_CLIP.CLIP_HIGH */
+               dd_emit(ctx, 1, 4);     /* 000000ff SEMANTIC_CLIP.CLIP_LO */
+               dd_emit(ctx, 1, 0x27);  /* 000000ff UNK0FD4 */
+               dd_emit(ctx, 1, 0);     /* 00000001 UNK1900 */
+       }
+       dd_emit(ctx, 1, 0);             /* 00000007 RT_CONTROL_MAP0 */
+       dd_emit(ctx, 1, 1);             /* 00000007 RT_CONTROL_MAP1 */
+       dd_emit(ctx, 1, 2);             /* 00000007 RT_CONTROL_MAP2 */
+       dd_emit(ctx, 1, 3);             /* 00000007 RT_CONTROL_MAP3 */
+       dd_emit(ctx, 1, 4);             /* 00000007 RT_CONTROL_MAP4 */
+       dd_emit(ctx, 1, 5);             /* 00000007 RT_CONTROL_MAP5 */
+       dd_emit(ctx, 1, 6);             /* 00000007 RT_CONTROL_MAP6 */
+       dd_emit(ctx, 1, 7);             /* 00000007 RT_CONTROL_MAP7 */
+       dd_emit(ctx, 1, 1);             /* 0000000f RT_CONTROL_COUNT */
+       dd_emit(ctx, 8, 0);             /* 00000001 RT_HORIZ_UNK */
+       dd_emit(ctx, 8, 0);             /* ffffffff RT_ADDRESS_LOW */
+       dd_emit(ctx, 1, 0xcf);          /* 000000ff RT_FORMAT */
+       dd_emit(ctx, 7, 0);             /* 000000ff RT_FORMAT */
+       if (dev_priv->chipset != 0x50)
+               dd_emit(ctx, 3, 0);     /* 1, 1, 1 */
+       else
+               dd_emit(ctx, 2, 0);     /* 1, 1 */
+       dd_emit(ctx, 1, 0);             /* ffffffff GP_ENABLE */
+       dd_emit(ctx, 1, 0x80);          /* 0000ffff GP_VERTEX_OUTPUT_COUNT*/
+       dd_emit(ctx, 1, 4);             /* 000000ff GP_REG_ALLOC_RESULT */
+       dd_emit(ctx, 1, 4);             /* 000000ff GP_RESULT_MAP_SIZE */
+       if (IS_NVA3F(dev_priv->chipset)) {
+               dd_emit(ctx, 1, 3);     /* 00000003 */
+               dd_emit(ctx, 1, 0);     /* 00000001 UNK1418. Alone. */
+       }
+       if (dev_priv->chipset != 0x50)
+               dd_emit(ctx, 1, 3);     /* 00000003 UNK15AC */
+       dd_emit(ctx, 1, 1);             /* ffffffff RASTERIZE_ENABLE */
+       dd_emit(ctx, 1, 0);             /* 00000001 FP_CONTROL.EXPORTS_Z */
+       if (dev_priv->chipset != 0x50)
+               dd_emit(ctx, 1, 0);     /* 00000001 FP_CONTROL.MULTIPLE_RESULTS */
+       dd_emit(ctx, 1, 0x12);          /* 000000ff FP_INTERPOLANT_CTRL.COUNT */
+       dd_emit(ctx, 1, 0x10);          /* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */
+       dd_emit(ctx, 1, 0xc);           /* 000000ff FP_INTERPOLANT_CTRL.OFFSET */
+       dd_emit(ctx, 1, 1);             /* 00000001 FP_INTERPOLANT_CTRL.UMASK.W */
+       dd_emit(ctx, 1, 0);             /* 00000001 FP_INTERPOLANT_CTRL.UMASK.X */
+       dd_emit(ctx, 1, 0);             /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Y */
+       dd_emit(ctx, 1, 0);             /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Z */
+       dd_emit(ctx, 1, 4);             /* 000000ff FP_RESULT_COUNT */
+       dd_emit(ctx, 1, 2);             /* ffffffff REG_MODE */
+       dd_emit(ctx, 1, 4);             /* 000000ff FP_REG_ALLOC_TEMP */
+       if (dev_priv->chipset >= 0xa0)
+               dd_emit(ctx, 1, 0);     /* ffffffff */
+       dd_emit(ctx, 1, 0);             /* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */
+       dd_emit(ctx, 1, 0);             /* ffffffff STRMOUT_ENABLE */
+       dd_emit(ctx, 1, 0x3fffff);      /* 003fffff TIC_LIMIT */
+       dd_emit(ctx, 1, 0x1fff);        /* 000fffff TSC_LIMIT */
+       dd_emit(ctx, 1, 0);             /* 00000001 VERTEX_TWO_SIDE_ENABLE*/
+       if (dev_priv->chipset != 0x50)
+               dd_emit(ctx, 8, 0);     /* 00000001 */
+       if (dev_priv->chipset >= 0xa0) {
+               dd_emit(ctx, 1, 1);     /* 00000007 VTX_ATTR_DEFINE.COMP */
+               dd_emit(ctx, 1, 1);     /* 00000007 VTX_ATTR_DEFINE.SIZE */
+               dd_emit(ctx, 1, 2);     /* 00000007 VTX_ATTR_DEFINE.TYPE */
+               dd_emit(ctx, 1, 0);     /* 000000ff VTX_ATTR_DEFINE.ATTR */
+       }
+       dd_emit(ctx, 1, 4);             /* 0000007f VP_RESULT_MAP_SIZE */
+       dd_emit(ctx, 1, 0x14);          /* 0000001f ZETA_FORMAT */
+       dd_emit(ctx, 1, 1);             /* 00000001 ZETA_ENABLE */
+       dd_emit(ctx, 1, 0);             /* 0000000f VP_TEXTURES_LOG2 */
+       dd_emit(ctx, 1, 0);             /* 0000000f VP_SAMPLERS_LOG2 */
+       if (IS_NVA3F(dev_priv->chipset))
+               dd_emit(ctx, 1, 0);     /* 00000001 */
+       dd_emit(ctx, 1, 2);             /* 00000003 POLYGON_MODE_BACK */
+       if (dev_priv->chipset >= 0xa0)
+               dd_emit(ctx, 1, 0);     /* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */
+       dd_emit(ctx, 1, 0);             /* 0000ffff CB_ADDR_INDEX */
+       if (dev_priv->chipset >= 0xa0)
+               dd_emit(ctx, 1, 0);     /* 00000003 */
+       dd_emit(ctx, 1, 0);             /* 00000001 CULL_FACE_ENABLE */
+       dd_emit(ctx, 1, 1);             /* 00000003 CULL_FACE */
+       dd_emit(ctx, 1, 0);             /* 00000001 FRONT_FACE */
+       dd_emit(ctx, 1, 2);             /* 00000003 POLYGON_MODE_FRONT */
+       dd_emit(ctx, 1, 0x1000);        /* 00007fff UNK141C */
+       if (dev_priv->chipset != 0x50) {
+               dd_emit(ctx, 1, 0xe00);         /* 7fff */
+               dd_emit(ctx, 1, 0x1000);        /* 7fff */
+               dd_emit(ctx, 1, 0x1e00);        /* 7fff */
+       }
+       dd_emit(ctx, 1, 0);             /* 00000001 BEGIN_END_ACTIVE */
+       dd_emit(ctx, 1, 1);             /* 00000001 POLYGON_MODE_??? */
+       dd_emit(ctx, 1, 1);             /* 000000ff GP_REG_ALLOC_TEMP / 4 rounded up */
+       dd_emit(ctx, 1, 1);             /* 000000ff FP_REG_ALLOC_TEMP... without /4? */
+       dd_emit(ctx, 1, 1);             /* 000000ff VP_REG_ALLOC_TEMP / 4 rounded up */
+       dd_emit(ctx, 1, 1);             /* 00000001 */
+       dd_emit(ctx, 1, 0);             /* 00000001 */
+       dd_emit(ctx, 1, 0);             /* 00000001 VTX_ATTR_MASK_UNK0 nonempty */
+       dd_emit(ctx, 1, 0);             /* 00000001 VTX_ATTR_MASK_UNK1 nonempty */
+       dd_emit(ctx, 1, 0x200);         /* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */
+       if (IS_NVA3F(dev_priv->chipset))
+               dd_emit(ctx, 1, 0x200);
+       dd_emit(ctx, 1, 0);             /* 00000001 */
+       if (dev_priv->chipset < 0xa0) {
+               dd_emit(ctx, 1, 1);     /* 00000001 */
+               dd_emit(ctx, 1, 0x70);  /* 000000ff */
+               dd_emit(ctx, 1, 0x80);  /* 000000ff */
+               dd_emit(ctx, 1, 0);     /* 000000ff */
+               dd_emit(ctx, 1, 0);     /* 00000001 */
+               dd_emit(ctx, 1, 1);     /* 00000001 */
+               dd_emit(ctx, 1, 0x70);  /* 000000ff */
+               dd_emit(ctx, 1, 0x80);  /* 000000ff */
+               dd_emit(ctx, 1, 0);     /* 000000ff */
+       } else {
+               dd_emit(ctx, 1, 1);     /* 00000001 */
+               dd_emit(ctx, 1, 0xf0);  /* 000000ff */
+               dd_emit(ctx, 1, 0xff);  /* 000000ff */
+               dd_emit(ctx, 1, 0);     /* 000000ff */
+               dd_emit(ctx, 1, 0);     /* 00000001 */
+               dd_emit(ctx, 1, 1);     /* 00000001 */
+               dd_emit(ctx, 1, 0xf0);  /* 000000ff */
+               dd_emit(ctx, 1, 0xff);  /* 000000ff */
+               dd_emit(ctx, 1, 0);     /* 000000ff */
+               dd_emit(ctx, 1, 9);     /* 0000003f UNK114C.COMP,SIZE */
+       }
+
+       /* eng2d state */
+       dd_emit(ctx, 1, 0);             /* 00000001 eng2d COLOR_KEY_ENABLE */
+       dd_emit(ctx, 1, 0);             /* 00000007 eng2d COLOR_KEY_FORMAT */
+       dd_emit(ctx, 1, 1);             /* ffffffff eng2d DST_DEPTH */
+       dd_emit(ctx, 1, 0xcf);          /* 000000ff eng2d DST_FORMAT */
+       dd_emit(ctx, 1, 0);             /* ffffffff eng2d DST_LAYER */
+       dd_emit(ctx, 1, 1);             /* 00000001 eng2d DST_LINEAR */
+       dd_emit(ctx, 1, 0);             /* 00000007 eng2d PATTERN_COLOR_FORMAT */
+       dd_emit(ctx, 1, 0);             /* 00000007 eng2d OPERATION */
+       dd_emit(ctx, 1, 0);             /* 00000003 eng2d PATTERN_SELECT */
+       dd_emit(ctx, 1, 0xcf);          /* 000000ff eng2d SIFC_FORMAT */
+       dd_emit(ctx, 1, 0);             /* 00000001 eng2d SIFC_BITMAP_ENABLE */
+       dd_emit(ctx, 1, 2);             /* 00000003 eng2d SIFC_BITMAP_UNK808 */
+       dd_emit(ctx, 1, 0);             /* ffffffff eng2d BLIT_DU_DX_FRACT */
+       dd_emit(ctx, 1, 1);             /* ffffffff eng2d BLIT_DU_DX_INT */
+       dd_emit(ctx, 1, 0);             /* ffffffff eng2d BLIT_DV_DY_FRACT */
+       dd_emit(ctx, 1, 1);             /* ffffffff eng2d BLIT_DV_DY_INT */
+       dd_emit(ctx, 1, 0);             /* 00000001 eng2d BLIT_CONTROL_FILTER */
+       dd_emit(ctx, 1, 0xcf);          /* 000000ff eng2d DRAW_COLOR_FORMAT */
+       dd_emit(ctx, 1, 0xcf);          /* 000000ff eng2d SRC_FORMAT */
+       dd_emit(ctx, 1, 1);             /* 00000001 eng2d SRC_LINEAR #2 */
+
+       num = ctx->ctxvals_pos - base;
+       ctx->ctxvals_pos = base;
+       if (IS_NVA3F(dev_priv->chipset))
+               cp_ctx(ctx, 0x404800, num);
+       else
+               cp_ctx(ctx, 0x405400, num);
+}
+
 /*
  * xfer areas. These are a pain.
  *
@@ -990,28 +1130,33 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
  * without the help of ctxprog.
  */
 
-static inline void
+static void
 xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
        int i;
        if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
                for (i = 0; i < num; i++)
-                       nv_wo32(ctx->dev, ctx->data, ctx->ctxvals_pos + (i << 3), val);
+                       nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
        ctx->ctxvals_pos += num << 3;
 }
 
 /* Gene declarations... */
 
+static void nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx);
 static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx);
 static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx);
 static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
 
@@ -1030,102 +1175,32 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
        if (dev_priv->chipset < 0xa0) {
                /* Strand 0 */
                ctx->ctxvals_pos = offset;
-               switch (dev_priv->chipset) {
-               case 0x50:
-                       xf_emit(ctx, 0x99, 0);
-                       break;
-               case 0x84:
-               case 0x86:
-                       xf_emit(ctx, 0x384, 0);
-                       break;
-               case 0x92:
-               case 0x94:
-               case 0x96:
-               case 0x98:
-                       xf_emit(ctx, 0x380, 0);
-                       break;
-               }
-               nv50_graph_construct_gene_m2mf (ctx);
-               switch (dev_priv->chipset) {
-               case 0x50:
-               case 0x84:
-               case 0x86:
-               case 0x98:
-                       xf_emit(ctx, 0x4c4, 0);
-                       break;
-               case 0x92:
-               case 0x94:
-               case 0x96:
-                       xf_emit(ctx, 0x984, 0);
-                       break;
-               }
-               nv50_graph_construct_gene_unk5(ctx);
-               if (dev_priv->chipset == 0x50)
-                       xf_emit(ctx, 0xa, 0);
-               else
-                       xf_emit(ctx, 0xb, 0);
-               nv50_graph_construct_gene_unk4(ctx);
-               nv50_graph_construct_gene_unk3(ctx);
+               nv50_graph_construct_gene_dispatch(ctx);
+               nv50_graph_construct_gene_m2mf(ctx);
+               nv50_graph_construct_gene_unk24xx(ctx);
+               nv50_graph_construct_gene_clipid(ctx);
+               nv50_graph_construct_gene_zcull(ctx);
                if ((ctx->ctxvals_pos-offset)/8 > size)
                        size = (ctx->ctxvals_pos-offset)/8;
 
                /* Strand 1 */
                ctx->ctxvals_pos = offset + 0x1;
-               nv50_graph_construct_gene_unk6(ctx);
-               nv50_graph_construct_gene_unk7(ctx);
-               nv50_graph_construct_gene_unk8(ctx);
-               switch (dev_priv->chipset) {
-               case 0x50:
-               case 0x92:
-                       xf_emit(ctx, 0xfb, 0);
-                       break;
-               case 0x84:
-                       xf_emit(ctx, 0xd3, 0);
-                       break;
-               case 0x94:
-               case 0x96:
-                       xf_emit(ctx, 0xab, 0);
-                       break;
-               case 0x86:
-               case 0x98:
-                       xf_emit(ctx, 0x6b, 0);
-                       break;
-               }
-               xf_emit(ctx, 2, 0x4e3bfdf);
-               xf_emit(ctx, 4, 0);
-               xf_emit(ctx, 1, 0x0fac6881);
-               xf_emit(ctx, 0xb, 0);
-               xf_emit(ctx, 2, 0x4e3bfdf);
+               nv50_graph_construct_gene_vfetch(ctx);
+               nv50_graph_construct_gene_eng2d(ctx);
+               nv50_graph_construct_gene_csched(ctx);
+               nv50_graph_construct_gene_ropm1(ctx);
+               nv50_graph_construct_gene_ropm2(ctx);
                if ((ctx->ctxvals_pos-offset)/8 > size)
                        size = (ctx->ctxvals_pos-offset)/8;
 
                /* Strand 2 */
                ctx->ctxvals_pos = offset + 0x2;
-               switch (dev_priv->chipset) {
-               case 0x50:
-               case 0x92:
-                       xf_emit(ctx, 0xa80, 0);
-                       break;
-               case 0x84:
-                       xf_emit(ctx, 0xa7e, 0);
-                       break;
-               case 0x94:
-               case 0x96:
-                       xf_emit(ctx, 0xa7c, 0);
-                       break;
-               case 0x86:
-               case 0x98:
-                       xf_emit(ctx, 0xa7a, 0);
-                       break;
-               }
-               xf_emit(ctx, 1, 0x3fffff);
-               xf_emit(ctx, 2, 0);
-               xf_emit(ctx, 1, 0x1fff);
-               xf_emit(ctx, 0xe, 0);
-               nv50_graph_construct_gene_unk9(ctx);
-               nv50_graph_construct_gene_unk2(ctx);
-               nv50_graph_construct_gene_unk1(ctx);
-               nv50_graph_construct_gene_unk10(ctx);
+               nv50_graph_construct_gene_ccache(ctx);
+               nv50_graph_construct_gene_unk1cxx(ctx);
+               nv50_graph_construct_gene_strmout(ctx);
+               nv50_graph_construct_gene_unk14xx(ctx);
+               nv50_graph_construct_gene_unk10xx(ctx);
+               nv50_graph_construct_gene_unk34xx(ctx);
                if ((ctx->ctxvals_pos-offset)/8 > size)
                        size = (ctx->ctxvals_pos-offset)/8;
 
@@ -1150,86 +1225,46 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
        } else {
                /* Strand 0 */
                ctx->ctxvals_pos = offset;
-               if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-                       xf_emit(ctx, 0x385, 0);
-               else
-                       xf_emit(ctx, 0x384, 0);
+               nv50_graph_construct_gene_dispatch(ctx);
                nv50_graph_construct_gene_m2mf(ctx);
-               xf_emit(ctx, 0x950, 0);
-               nv50_graph_construct_gene_unk10(ctx);
-               xf_emit(ctx, 1, 0x0fac6881);
-               if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
-                       xf_emit(ctx, 1, 1);
-                       xf_emit(ctx, 3, 0);
-               }
-               nv50_graph_construct_gene_unk8(ctx);
-               if (dev_priv->chipset == 0xa0)
-                       xf_emit(ctx, 0x189, 0);
-               else if (dev_priv->chipset == 0xa3)
-                       xf_emit(ctx, 0xd5, 0);
-               else if (dev_priv->chipset == 0xa5)
-                       xf_emit(ctx, 0x99, 0);
-               else if (dev_priv->chipset == 0xaa)
-                       xf_emit(ctx, 0x65, 0);
-               else
-                       xf_emit(ctx, 0x6d, 0);
-               nv50_graph_construct_gene_unk9(ctx);
+               nv50_graph_construct_gene_unk34xx(ctx);
+               nv50_graph_construct_gene_csched(ctx);
+               nv50_graph_construct_gene_unk1cxx(ctx);
+               nv50_graph_construct_gene_strmout(ctx);
                if ((ctx->ctxvals_pos-offset)/8 > size)
                        size = (ctx->ctxvals_pos-offset)/8;
 
                /* Strand 1 */
                ctx->ctxvals_pos = offset + 1;
-               nv50_graph_construct_gene_unk1(ctx);
+               nv50_graph_construct_gene_unk10xx(ctx);
                if ((ctx->ctxvals_pos-offset)/8 > size)
                        size = (ctx->ctxvals_pos-offset)/8;
 
                /* Strand 2 */
                ctx->ctxvals_pos = offset + 2;
-               if (dev_priv->chipset == 0xa0) {
-                       nv50_graph_construct_gene_unk2(ctx);
-               }
-               xf_emit(ctx, 0x36, 0);
-               nv50_graph_construct_gene_unk5(ctx);
+               if (dev_priv->chipset == 0xa0)
+                       nv50_graph_construct_gene_unk14xx(ctx);
+               nv50_graph_construct_gene_unk24xx(ctx);
                if ((ctx->ctxvals_pos-offset)/8 > size)
                        size = (ctx->ctxvals_pos-offset)/8;
 
                /* Strand 3 */
                ctx->ctxvals_pos = offset + 3;
-               xf_emit(ctx, 1, 0);
-               xf_emit(ctx, 1, 1);
-               nv50_graph_construct_gene_unk6(ctx);
+               nv50_graph_construct_gene_vfetch(ctx);
                if ((ctx->ctxvals_pos-offset)/8 > size)
                        size = (ctx->ctxvals_pos-offset)/8;
 
                /* Strand 4 */
                ctx->ctxvals_pos = offset + 4;
-               if (dev_priv->chipset == 0xa0)
-                       xf_emit(ctx, 0xa80, 0);
-               else if (dev_priv->chipset == 0xa3)
-                       xf_emit(ctx, 0xa7c, 0);
-               else
-                       xf_emit(ctx, 0xa7a, 0);
-               xf_emit(ctx, 1, 0x3fffff);
-               xf_emit(ctx, 2, 0);
-               xf_emit(ctx, 1, 0x1fff);
+               nv50_graph_construct_gene_ccache(ctx);
                if ((ctx->ctxvals_pos-offset)/8 > size)
                        size = (ctx->ctxvals_pos-offset)/8;
 
                /* Strand 5 */
                ctx->ctxvals_pos = offset + 5;
-               xf_emit(ctx, 1, 0);
-               xf_emit(ctx, 1, 0x0fac6881);
-               xf_emit(ctx, 0xb, 0);
-               xf_emit(ctx, 2, 0x4e3bfdf);
-               xf_emit(ctx, 3, 0);
-               if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-                       xf_emit(ctx, 1, 0x11);
-               xf_emit(ctx, 1, 0);
-               xf_emit(ctx, 2, 0x4e3bfdf);
-               xf_emit(ctx, 2, 0);
-               if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-                       xf_emit(ctx, 1, 0x11);
-               xf_emit(ctx, 1, 0);
+               nv50_graph_construct_gene_ropm2(ctx);
+               nv50_graph_construct_gene_ropm1(ctx);
+               /* per-ROP context */
                for (i = 0; i < 8; i++)
                        if (units & (1<<(i+16)))
                                nv50_graph_construct_gene_ropc(ctx);
@@ -1238,10 +1273,9 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
 
                /* Strand 6 */
                ctx->ctxvals_pos = offset + 6;
-               nv50_graph_construct_gene_unk3(ctx);
-               xf_emit(ctx, 0xb, 0);
-               nv50_graph_construct_gene_unk4(ctx);
-               nv50_graph_construct_gene_unk7(ctx);
+               nv50_graph_construct_gene_zcull(ctx);
+               nv50_graph_construct_gene_clipid(ctx);
+               nv50_graph_construct_gene_eng2d(ctx);
                if (units & (1 << 0))
                        nv50_graph_construct_xfer_tp(ctx);
                if (units & (1 << 1))
@@ -1269,7 +1303,7 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
                        if (units & (1 << 9))
                                nv50_graph_construct_xfer_tp(ctx);
                } else {
-                       nv50_graph_construct_gene_unk2(ctx);
+                       nv50_graph_construct_gene_unk14xx(ctx);
                }
                if ((ctx->ctxvals_pos-offset)/8 > size)
                        size = (ctx->ctxvals_pos-offset)/8;
@@ -1289,10 +1323,71 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
  * non-trivial demagiced parts of ctx init go here
  */
 
+static void
+nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
+{
+       /* start of strand 0 */
+       struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+       /* SEEK */
+       if (dev_priv->chipset == 0x50)
+               xf_emit(ctx, 5, 0);
+       else if (!IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 6, 0);
+       else
+               xf_emit(ctx, 4, 0);
+       /* SEEK */
+       /* the PGRAPH's internal FIFO */
+       if (dev_priv->chipset == 0x50)
+               xf_emit(ctx, 8*3, 0);
+       else
+               xf_emit(ctx, 0x100*3, 0);
+       /* and another bonus slot?!? */
+       xf_emit(ctx, 3, 0);
+       /* and YET ANOTHER bonus slot? */
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 3, 0);
+       /* SEEK */
+       /* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */
+       xf_emit(ctx, 9, 0);
+       /* SEEK */
+       xf_emit(ctx, 9, 0);
+       /* SEEK */
+       xf_emit(ctx, 9, 0);
+       /* SEEK */
+       xf_emit(ctx, 9, 0);
+       /* SEEK */
+       if (dev_priv->chipset < 0x90)
+               xf_emit(ctx, 4, 0);
+       /* SEEK */
+       xf_emit(ctx, 2, 0);
+       /* SEEK */
+       xf_emit(ctx, 6*2, 0);
+       xf_emit(ctx, 2, 0);
+       /* SEEK */
+       xf_emit(ctx, 2, 0);
+       /* SEEK */
+       xf_emit(ctx, 6*2, 0);
+       xf_emit(ctx, 2, 0);
+       /* SEEK */
+       if (dev_priv->chipset == 0x50)
+               xf_emit(ctx, 0x1c, 0);
+       else if (dev_priv->chipset < 0xa0)
+               xf_emit(ctx, 0x1e, 0);
+       else
+               xf_emit(ctx, 0x22, 0);
+       /* SEEK */
+       xf_emit(ctx, 0x15, 0);
+}
+
 static void
 nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
 {
-       /* m2mf state */
+       /* Strand 0, right after dispatch */
+       struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+       int smallm2mf = 0;
+       if (dev_priv->chipset < 0x92 || dev_priv->chipset == 0x98)
+               smallm2mf = 1;
+       /* SEEK */
        xf_emit (ctx, 1, 0);            /* DMA_NOTIFY instance >> 4 */
        xf_emit (ctx, 1, 0);            /* DMA_BUFFER_IN instance >> 4 */
        xf_emit (ctx, 1, 0);            /* DMA_BUFFER_OUT instance >> 4 */
@@ -1319,427 +1414,975 @@ nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
        xf_emit (ctx, 1, 0);            /* TILING_POSITION_OUT */
        xf_emit (ctx, 1, 0);            /* OFFSET_IN_HIGH */
        xf_emit (ctx, 1, 0);            /* OFFSET_OUT_HIGH */
+       /* SEEK */
+       if (smallm2mf)
+               xf_emit(ctx, 0x40, 0);  /* 20 * ffffffff, 3ffff */
+       else
+               xf_emit(ctx, 0x100, 0); /* 80 * ffffffff, 3ffff */
+       xf_emit(ctx, 4, 0);             /* 1f/7f, 0, 1f/7f, 0 [1f for smallm2mf, 7f otherwise] */
+       /* SEEK */
+       if (smallm2mf)
+               xf_emit(ctx, 0x400, 0); /* ffffffff */
+       else
+               xf_emit(ctx, 0x800, 0); /* ffffffff */
+       xf_emit(ctx, 4, 0);             /* ff/1ff, 0, 0, 0 [ff for smallm2mf, 1ff otherwise] */
+       /* SEEK */
+       xf_emit(ctx, 0x40, 0);          /* 20 * bits ffffffff, 3ffff */
+       xf_emit(ctx, 0x6, 0);           /* 1f, 0, 1f, 0, 1f, 0 */
 }
 
 static void
-nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
 {
        struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
-       /* end of area 2 on pre-NVA0, area 1 on NVAx */
-       xf_emit(ctx, 2, 4);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x80);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 1, 0x80c14);
-       xf_emit(ctx, 1, 0);
-       if (dev_priv->chipset == 0x50)
-               xf_emit(ctx, 1, 0x3ff);
-       else
-               xf_emit(ctx, 1, 0x7ff);
+       xf_emit(ctx, 2, 0);             /* RO */
+       xf_emit(ctx, 0x800, 0);         /* ffffffff */
        switch (dev_priv->chipset) {
        case 0x50:
-       case 0x86:
-       case 0x98:
-       case 0xaa:
-       case 0xac:
-               xf_emit(ctx, 0x542, 0);
+       case 0x92:
+       case 0xa0:
+               xf_emit(ctx, 0x2b, 0);
                break;
        case 0x84:
-       case 0x92:
+               xf_emit(ctx, 0x29, 0);
+               break;
        case 0x94:
        case 0x96:
-               xf_emit(ctx, 0x942, 0);
-               break;
-       case 0xa0:
        case 0xa3:
-               xf_emit(ctx, 0x2042, 0);
+               xf_emit(ctx, 0x27, 0);
                break;
+       case 0x86:
+       case 0x98:
        case 0xa5:
        case 0xa8:
-               xf_emit(ctx, 0x842, 0);
+       case 0xaa:
+       case 0xac:
+       case 0xaf:
+               xf_emit(ctx, 0x25, 0);
                break;
        }
-       xf_emit(ctx, 2, 4);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x80);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x27);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x26);
-       xf_emit(ctx, 3, 0);
+       /* CB bindings, 0x80 of them. first word is address >> 8, second is
+        * size >> 4 | valid << 24 */
+       xf_emit(ctx, 0x100, 0);         /* ffffffff CB_DEF */
+       xf_emit(ctx, 1, 0);             /* 0000007f CB_ADDR_BUFFER */
+       xf_emit(ctx, 1, 0);             /* 0 */
+       xf_emit(ctx, 0x30, 0);          /* ff SET_PROGRAM_CB */
+       xf_emit(ctx, 1, 0);             /* 3f last SET_PROGRAM_CB */
+       xf_emit(ctx, 4, 0);             /* RO */
+       xf_emit(ctx, 0x100, 0);         /* ffffffff */
+       xf_emit(ctx, 8, 0);             /* 1f, 0, 0, ... */
+       xf_emit(ctx, 8, 0);             /* ffffffff */
+       xf_emit(ctx, 4, 0);             /* ffffffff */
+       xf_emit(ctx, 1, 0);             /* 3 */
+       xf_emit(ctx, 1, 0);             /* ffffffff */
+       xf_emit(ctx, 1, 0);             /* 0000ffff DMA_CODE_CB */
+       xf_emit(ctx, 1, 0);             /* 0000ffff DMA_TIC */
+       xf_emit(ctx, 1, 0);             /* 0000ffff DMA_TSC */
+       xf_emit(ctx, 1, 0);             /* 00000001 LINKED_TSC */
+       xf_emit(ctx, 1, 0);             /* 000000ff TIC_ADDRESS_HIGH */
+       xf_emit(ctx, 1, 0);             /* ffffffff TIC_ADDRESS_LOW */
+       xf_emit(ctx, 1, 0x3fffff);      /* 003fffff TIC_LIMIT */
+       xf_emit(ctx, 1, 0);             /* 000000ff TSC_ADDRESS_HIGH */
+       xf_emit(ctx, 1, 0);             /* ffffffff TSC_ADDRESS_LOW */
+       xf_emit(ctx, 1, 0x1fff);        /* 000fffff TSC_LIMIT */
+       xf_emit(ctx, 1, 0);             /* 000000ff VP_ADDRESS_HIGH */
+       xf_emit(ctx, 1, 0);             /* ffffffff VP_ADDRESS_LOW */
+       xf_emit(ctx, 1, 0);             /* 00ffffff VP_START_ID */
+       xf_emit(ctx, 1, 0);             /* 000000ff CB_DEF_ADDRESS_HIGH */
+       xf_emit(ctx, 1, 0);             /* ffffffff CB_DEF_ADDRESS_LOW */
+       xf_emit(ctx, 1, 0);             /* 00000001 GP_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 000000ff GP_ADDRESS_HIGH */
+       xf_emit(ctx, 1, 0);             /* ffffffff GP_ADDRESS_LOW */
+       xf_emit(ctx, 1, 0);             /* 00ffffff GP_START_ID */
+       xf_emit(ctx, 1, 0);             /* 000000ff FP_ADDRESS_HIGH */
+       xf_emit(ctx, 1, 0);             /* ffffffff FP_ADDRESS_LOW */
+       xf_emit(ctx, 1, 0);             /* 00ffffff FP_START_ID */
 }
 
 static void
-nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
 {
+       struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+       int i;
        /* end of area 2 on pre-NVA0, area 1 on NVAx */
-       xf_emit(ctx, 0x10, 0x04000000);
-       xf_emit(ctx, 0x24, 0);
-       xf_emit(ctx, 2, 0x04e3bfdf);
-       xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 0x1fe21);
+       xf_emit(ctx, 1, 4);             /* 000000ff GP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 4);             /* 0000007f VP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 0);             /* 00000001 GP_ENABLE */
+       xf_emit(ctx, 1, 0x80);          /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
+       xf_emit(ctx, 1, 4);             /* 000000ff GP_REG_ALLOC_RESULT */
+       xf_emit(ctx, 1, 0x80c14);       /* 01ffffff SEMANTIC_COLOR */
+       xf_emit(ctx, 1, 0);             /* 00000001 VERTEX_TWO_SIDE_ENABLE */
+       if (dev_priv->chipset == 0x50)
+               xf_emit(ctx, 1, 0x3ff);
+       else
+               xf_emit(ctx, 1, 0x7ff); /* 000007ff */
+       xf_emit(ctx, 1, 0);             /* 111/113 */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A30 */
+       for (i = 0; i < 8; i++) {
+               switch (dev_priv->chipset) {
+               case 0x50:
+               case 0x86:
+               case 0x98:
+               case 0xaa:
+               case 0xac:
+                       xf_emit(ctx, 0xa0, 0);  /* ffffffff */
+                       break;
+               case 0x84:
+               case 0x92:
+               case 0x94:
+               case 0x96:
+                       xf_emit(ctx, 0x120, 0);
+                       break;
+               case 0xa5:
+               case 0xa8:
+                       xf_emit(ctx, 0x100, 0); /* ffffffff */
+                       break;
+               case 0xa0:
+               case 0xa3:
+               case 0xaf:
+                       xf_emit(ctx, 0x400, 0); /* ffffffff */
+                       break;
+               }
+               xf_emit(ctx, 4, 0);     /* 3f, 0, 0, 0 */
+               xf_emit(ctx, 4, 0);     /* ffffffff */
+       }
+       xf_emit(ctx, 1, 4);             /* 000000ff GP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 4);             /* 0000007f VP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 0);             /* 00000001 GP_ENABLE */
+       xf_emit(ctx, 1, 0x80);          /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
+       xf_emit(ctx, 1, 4);             /* 000000ff GP_REG_ALLOC_TEMP */
+       xf_emit(ctx, 1, 1);             /* 00000001 RASTERIZE_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 tesla UNK1900 */
+       xf_emit(ctx, 1, 0x27);          /* 000000ff UNK0FD4 */
+       xf_emit(ctx, 1, 0);             /* 0001ffff GP_BUILTIN_RESULT_EN */
+       xf_emit(ctx, 1, 0x26);          /* 000000ff SEMANTIC_LAYER */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A30 */
+}
+
+static void
+nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
+{
+       struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+       /* end of area 2 on pre-NVA0, area 1 on NVAx */
+       xf_emit(ctx, 1, 0);             /* 00000001 VIEWPORT_CLIP_RECTS_EN */
+       xf_emit(ctx, 1, 0);             /* 00000003 VIEWPORT_CLIP_MODE */
+       xf_emit(ctx, 0x10, 0x04000000); /* 07ffffff VIEWPORT_CLIP_HORIZ*8, VIEWPORT_CLIP_VERT*8 */
+       xf_emit(ctx, 1, 0);             /* 00000001 POLYGON_STIPPLE_ENABLE */
+       xf_emit(ctx, 0x20, 0);          /* ffffffff POLYGON_STIPPLE */
+       xf_emit(ctx, 2, 0);             /* 00007fff WINDOW_OFFSET_XY */
+       xf_emit(ctx, 1, 0);             /* ffff0ff3 */
+       xf_emit(ctx, 1, 0x04e3bfdf);    /* ffffffff UNK0D64 */
+       xf_emit(ctx, 1, 0x04e3bfdf);    /* ffffffff UNK0DF4 */
+       xf_emit(ctx, 1, 0);             /* 00000003 WINDOW_ORIGIN */
+       xf_emit(ctx, 1, 0);             /* 00000007 */
+       xf_emit(ctx, 1, 0x1fe21);       /* 0001ffff tesla UNK0FAC */
+       if (dev_priv->chipset >= 0xa0)
+               xf_emit(ctx, 1, 0x0fac6881);
+       if (IS_NVA3F(dev_priv->chipset)) {
+               xf_emit(ctx, 1, 1);
+               xf_emit(ctx, 3, 0);
+       }
 }
 
 static void
-nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
 {
        struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
        /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
        if (dev_priv->chipset != 0x50) {
-               xf_emit(ctx, 5, 0);
-               xf_emit(ctx, 1, 0x80c14);
-               xf_emit(ctx, 2, 0);
-               xf_emit(ctx, 1, 0x804);
-               xf_emit(ctx, 1, 0);
-               xf_emit(ctx, 2, 4);
-               xf_emit(ctx, 1, 0x8100c12);
+               xf_emit(ctx, 5, 0);             /* ffffffff */
+               xf_emit(ctx, 1, 0x80c14);       /* 01ffffff SEMANTIC_COLOR */
+               xf_emit(ctx, 1, 0);             /* 00000001 */
+               xf_emit(ctx, 1, 0);             /* 000003ff */
+               xf_emit(ctx, 1, 0x804);         /* 00000fff SEMANTIC_CLIP */
+               xf_emit(ctx, 1, 0);             /* 00000001 */
+               xf_emit(ctx, 2, 4);             /* 7f, ff */
+               xf_emit(ctx, 1, 0x8100c12);     /* 1fffffff FP_INTERPOLANT_CTRL */
        }
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 2, 4);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x10);
-       if (dev_priv->chipset == 0x50)
-               xf_emit(ctx, 3, 0);
-       else
-               xf_emit(ctx, 4, 0);
-       xf_emit(ctx, 1, 0x804);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0x1a);
+       xf_emit(ctx, 1, 0);                     /* ffffffff tesla UNK1A30 */
+       xf_emit(ctx, 1, 4);                     /* 0000007f VP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 4);                     /* 000000ff GP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 0);                     /* 00000001 GP_ENABLE */
+       xf_emit(ctx, 1, 0x10);                  /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+       xf_emit(ctx, 1, 0);                     /* 000000ff VP_CLIP_DISTANCE_ENABLE */
        if (dev_priv->chipset != 0x50)
-               xf_emit(ctx, 1, 0x7f);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0x80c14);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x8100c12);
-       xf_emit(ctx, 2, 4);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x10);
-       xf_emit(ctx, 3, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0x8100c12);
-       xf_emit(ctx, 6, 0);
-       if (dev_priv->chipset == 0x50)
-               xf_emit(ctx, 1, 0x3ff);
-       else
-               xf_emit(ctx, 1, 0x7ff);
-       xf_emit(ctx, 1, 0x80c14);
-       xf_emit(ctx, 0x38, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 0x10);
-       xf_emit(ctx, 0x38, 0);
-       xf_emit(ctx, 2, 0x88);
-       xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 0x16, 0);
-       xf_emit(ctx, 1, 0x26);
-       xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 0x3f800000);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 4, 0);
-       else
-               xf_emit(ctx, 3, 0);
-       xf_emit(ctx, 1, 0x1a);
-       xf_emit(ctx, 1, 0x10);
+               xf_emit(ctx, 1, 0);             /* 3ff */
+       xf_emit(ctx, 1, 0);                     /* 000000ff tesla UNK1940 */
+       xf_emit(ctx, 1, 0);                     /* 00000001 tesla UNK0D7C */
+       xf_emit(ctx, 1, 0x804);                 /* 00000fff SEMANTIC_CLIP */
+       xf_emit(ctx, 1, 1);                     /* 00000001 VIEWPORT_TRANSFORM_EN */
+       xf_emit(ctx, 1, 0x1a);                  /* 0000001f POLYGON_MODE */
        if (dev_priv->chipset != 0x50)
-               xf_emit(ctx, 0x28, 0);
+               xf_emit(ctx, 1, 0x7f);          /* 000000ff tesla UNK0FFC */
+       xf_emit(ctx, 1, 0);                     /* ffffffff tesla UNK1A30 */
+       xf_emit(ctx, 1, 1);                     /* 00000001 SHADE_MODEL */
+       xf_emit(ctx, 1, 0x80c14);               /* 01ffffff SEMANTIC_COLOR */
+       xf_emit(ctx, 1, 0);                     /* 00000001 tesla UNK1900 */
+       xf_emit(ctx, 1, 0x8100c12);             /* 1fffffff FP_INTERPOLANT_CTRL */
+       xf_emit(ctx, 1, 4);                     /* 0000007f VP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 4);                     /* 000000ff GP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 0);                     /* 00000001 GP_ENABLE */
+       xf_emit(ctx, 1, 0x10);                  /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+       xf_emit(ctx, 1, 0);                     /* 00000001 tesla UNK0D7C */
+       xf_emit(ctx, 1, 0);                     /* 00000001 tesla UNK0F8C */
+       xf_emit(ctx, 1, 0);                     /* ffffffff tesla UNK1A30 */
+       xf_emit(ctx, 1, 1);                     /* 00000001 VIEWPORT_TRANSFORM_EN */
+       xf_emit(ctx, 1, 0x8100c12);             /* 1fffffff FP_INTERPOLANT_CTRL */
+       xf_emit(ctx, 4, 0);                     /* ffffffff NOPERSPECTIVE_BITMAP */
+       xf_emit(ctx, 1, 0);                     /* 00000001 tesla UNK1900 */
+       xf_emit(ctx, 1, 0);                     /* 0000000f */
+       if (dev_priv->chipset == 0x50)
+               xf_emit(ctx, 1, 0x3ff);         /* 000003ff tesla UNK0D68 */
        else
-               xf_emit(ctx, 0x25, 0);
-       xf_emit(ctx, 1, 0x52);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x26);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 2, 4);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x1a);
-       xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 0x00ffff00);
-       xf_emit(ctx, 1, 0);
+               xf_emit(ctx, 1, 0x7ff);         /* 000007ff tesla UNK0D68 */
+       xf_emit(ctx, 1, 0x80c14);               /* 01ffffff SEMANTIC_COLOR */
+       xf_emit(ctx, 1, 0);                     /* 00000001 VERTEX_TWO_SIDE_ENABLE */
+       xf_emit(ctx, 0x30, 0);                  /* ffffffff VIEWPORT_SCALE: X0, Y0, Z0, X1, Y1, ... */
+       xf_emit(ctx, 3, 0);                     /* f, 0, 0 */
+       xf_emit(ctx, 3, 0);                     /* ffffffff last VIEWPORT_SCALE? */
+       xf_emit(ctx, 1, 0);                     /* ffffffff tesla UNK1A30 */
+       xf_emit(ctx, 1, 1);                     /* 00000001 VIEWPORT_TRANSFORM_EN */
+       xf_emit(ctx, 1, 0);                     /* 00000001 tesla UNK1900 */
+       xf_emit(ctx, 1, 0);                     /* 00000001 tesla UNK1924 */
+       xf_emit(ctx, 1, 0x10);                  /* 000000ff VIEW_VOLUME_CLIP_CTRL */
+       xf_emit(ctx, 1, 0);                     /* 00000001 */
+       xf_emit(ctx, 0x30, 0);                  /* ffffffff VIEWPORT_TRANSLATE */
+       xf_emit(ctx, 3, 0);                     /* f, 0, 0 */
+       xf_emit(ctx, 3, 0);                     /* ffffffff */
+       xf_emit(ctx, 1, 0);                     /* ffffffff tesla UNK1A30 */
+       xf_emit(ctx, 2, 0x88);                  /* 000001ff tesla UNK19D8 */
+       xf_emit(ctx, 1, 0);                     /* 00000001 tesla UNK1924 */
+       xf_emit(ctx, 1, 0);                     /* ffffffff tesla UNK1A30 */
+       xf_emit(ctx, 1, 4);                     /* 0000000f CULL_MODE */
+       xf_emit(ctx, 2, 0);                     /* 07ffffff SCREEN_SCISSOR */
+       xf_emit(ctx, 2, 0);                     /* 00007fff WINDOW_OFFSET_XY */
+       xf_emit(ctx, 1, 0);                     /* 00000003 WINDOW_ORIGIN */
+       xf_emit(ctx, 0x10, 0);                  /* 00000001 SCISSOR_ENABLE */
+       xf_emit(ctx, 1, 0);                     /* 0001ffff GP_BUILTIN_RESULT_EN */
+       xf_emit(ctx, 1, 0x26);                  /* 000000ff SEMANTIC_LAYER */
+       xf_emit(ctx, 1, 0);                     /* 00000001 tesla UNK1900 */
+       xf_emit(ctx, 1, 0);                     /* 0000000f */
+       xf_emit(ctx, 1, 0x3f800000);            /* ffffffff LINE_WIDTH */
+       xf_emit(ctx, 1, 0);                     /* 00000001 LINE_STIPPLE_ENABLE */
+       xf_emit(ctx, 1, 0);                     /* 00000001 LINE_SMOOTH_ENABLE */
+       xf_emit(ctx, 1, 0);                     /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 1, 0);             /* 00000001 */
+       xf_emit(ctx, 1, 0x1a);                  /* 0000001f POLYGON_MODE */
+       xf_emit(ctx, 1, 0x10);                  /* 000000ff VIEW_VOLUME_CLIP_CTRL */
+       if (dev_priv->chipset != 0x50) {
+               xf_emit(ctx, 1, 0);             /* ffffffff */
+               xf_emit(ctx, 1, 0);             /* 00000001 */
+               xf_emit(ctx, 1, 0);             /* 000003ff */
+       }
+       xf_emit(ctx, 0x20, 0);                  /* 10xbits ffffffff, 3fffff. SCISSOR_* */
+       xf_emit(ctx, 1, 0);                     /* f */
+       xf_emit(ctx, 1, 0);                     /* 0? */
+       xf_emit(ctx, 1, 0);                     /* ffffffff */
+       xf_emit(ctx, 1, 0);                     /* 003fffff */
+       xf_emit(ctx, 1, 0);                     /* ffffffff tesla UNK1A30 */
+       xf_emit(ctx, 1, 0x52);                  /* 000001ff SEMANTIC_PTSZ */
+       xf_emit(ctx, 1, 0);                     /* 0001ffff GP_BUILTIN_RESULT_EN */
+       xf_emit(ctx, 1, 0x26);                  /* 000000ff SEMANTIC_LAYER */
+       xf_emit(ctx, 1, 0);                     /* 00000001 tesla UNK1900 */
+       xf_emit(ctx, 1, 4);                     /* 0000007f VP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 4);                     /* 000000ff GP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 0);                     /* 00000001 GP_ENABLE */
+       xf_emit(ctx, 1, 0x1a);                  /* 0000001f POLYGON_MODE */
+       xf_emit(ctx, 1, 0);                     /* 00000001 LINE_SMOOTH_ENABLE */
+       xf_emit(ctx, 1, 0);                     /* 00000001 LINE_STIPPLE_ENABLE */
+       xf_emit(ctx, 1, 0x00ffff00);            /* 00ffffff LINE_STIPPLE_PATTERN */
+       xf_emit(ctx, 1, 0);                     /* 0000000f */
 }
 
 static void
-nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
 {
        struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
-       /* end of area 0 on pre-NVA0, beginning of area 6 on NVAx */
-       xf_emit(ctx, 1, 0x3f);
-       xf_emit(ctx, 0xa, 0);
-       xf_emit(ctx, 1, 2);
-       xf_emit(ctx, 2, 0x04000000);
-       xf_emit(ctx, 8, 0);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 3, 0);
-       xf_emit(ctx, 1, 4);
-       if (dev_priv->chipset == 0x50)
-               xf_emit(ctx, 0x10, 0);
-       else
-               xf_emit(ctx, 0x11, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0x1001);
-       xf_emit(ctx, 4, 0xffff);
-       xf_emit(ctx, 0x20, 0);
-       xf_emit(ctx, 0x10, 0x3f800000);
-       xf_emit(ctx, 1, 0x10);
-       if (dev_priv->chipset == 0x50)
-               xf_emit(ctx, 1, 0);
-       else
-               xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 3);
-       xf_emit(ctx, 2, 0);
+       /* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */
+       /* SEEK */
+       xf_emit(ctx, 1, 0x3f);          /* 0000003f UNK1590 */
+       xf_emit(ctx, 1, 0);             /* 00000001 ALPHA_TEST_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+       xf_emit(ctx, 1, 0);             /* 00000001 tesla UNK1534 */
+       xf_emit(ctx, 1, 0);             /* 00000007 STENCIL_BACK_FUNC_FUNC */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_BACK_FUNC_MASK */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_BACK_FUNC_REF */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_BACK_MASK */
+       xf_emit(ctx, 3, 0);             /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
+       xf_emit(ctx, 1, 2);             /* 00000003 tesla UNK143C */
+       xf_emit(ctx, 2, 0x04000000);    /* 07ffffff tesla UNK0D6C */
+       xf_emit(ctx, 1, 0);             /* ffff0ff3 */
+       xf_emit(ctx, 1, 0);             /* 00000001 CLIPID_ENABLE */
+       xf_emit(ctx, 2, 0);             /* ffffffff DEPTH_BOUNDS */
+       xf_emit(ctx, 1, 0);             /* 00000001 */
+       xf_emit(ctx, 1, 0);             /* 00000007 DEPTH_TEST_FUNC */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_TEST_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_WRITE_ENABLE */
+       xf_emit(ctx, 1, 4);             /* 0000000f CULL_MODE */
+       xf_emit(ctx, 1, 0);             /* 0000ffff */
+       xf_emit(ctx, 1, 0);             /* 00000001 UNK0FB0 */
+       xf_emit(ctx, 1, 0);             /* 00000001 POLYGON_STIPPLE_ENABLE */
+       xf_emit(ctx, 1, 4);             /* 00000007 FP_CONTROL */
+       xf_emit(ctx, 1, 0);             /* ffffffff */
+       xf_emit(ctx, 1, 0);             /* 0001ffff GP_BUILTIN_RESULT_EN */
+       xf_emit(ctx, 1, 0);             /* 000000ff CLEAR_STENCIL */
+       xf_emit(ctx, 1, 0);             /* 00000007 STENCIL_FRONT_FUNC_FUNC */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_FRONT_FUNC_MASK */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_FRONT_FUNC_REF */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_FRONT_MASK */
+       xf_emit(ctx, 3, 0);             /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
+       xf_emit(ctx, 1, 0);             /* 00000001 STENCIL_FRONT_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 STENCIL_BACK_ENABLE */
+       xf_emit(ctx, 1, 0);             /* ffffffff CLEAR_DEPTH */
+       xf_emit(ctx, 1, 0);             /* 00000007 */
+       if (dev_priv->chipset != 0x50)
+               xf_emit(ctx, 1, 0);     /* 00000003 tesla UNK1108 */
+       xf_emit(ctx, 1, 0);             /* 00000001 SAMPLECNT_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 0000000f ZETA_FORMAT */
+       xf_emit(ctx, 1, 1);             /* 00000001 ZETA_ENABLE */
+       xf_emit(ctx, 1, 0x1001);        /* 00001fff ZETA_ARRAY_MODE */
+       /* SEEK */
+       xf_emit(ctx, 4, 0xffff);        /* 0000ffff MSAA_MASK */
+       xf_emit(ctx, 0x10, 0);          /* 00000001 SCISSOR_ENABLE */
+       xf_emit(ctx, 0x10, 0);          /* ffffffff DEPTH_RANGE_NEAR */
+       xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
+       xf_emit(ctx, 1, 0x10);          /* 7f/ff/3ff VIEW_VOLUME_CLIP_CTRL */
+       xf_emit(ctx, 1, 0);             /* 00000001 VIEWPORT_CLIP_RECTS_EN */
+       xf_emit(ctx, 1, 3);             /* 00000003 FP_CTRL_UNK196C */
+       xf_emit(ctx, 1, 0);             /* 00000003 tesla UNK1968 */
+       if (dev_priv->chipset != 0x50)
+               xf_emit(ctx, 1, 0);     /* 0fffffff tesla UNK1104 */
+       xf_emit(ctx, 1, 0);             /* 00000001 tesla UNK151C */
 }
 
 static void
-nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx)
 {
-       /* middle of area 0 on pre-NVA0, middle of area 6 on NVAx */
-       xf_emit(ctx, 2, 0x04000000);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x80);
-       xf_emit(ctx, 3, 0);
-       xf_emit(ctx, 1, 0x80);
-       xf_emit(ctx, 1, 0);
+       /* middle of strand 0 on pre-NVA0 [after 24xx], middle of area 6 on NVAx */
+       /* SEEK */
+       xf_emit(ctx, 1, 0);             /* 00000007 UNK0FB4 */
+       /* SEEK */
+       xf_emit(ctx, 4, 0);             /* 07ffffff CLIPID_REGION_HORIZ */
+       xf_emit(ctx, 4, 0);             /* 07ffffff CLIPID_REGION_VERT */
+       xf_emit(ctx, 2, 0);             /* 07ffffff SCREEN_SCISSOR */
+       xf_emit(ctx, 2, 0x04000000);    /* 07ffffff UNK1508 */
+       xf_emit(ctx, 1, 0);             /* 00000001 CLIPID_ENABLE */
+       xf_emit(ctx, 1, 0x80);          /* 00003fff CLIPID_WIDTH */
+       xf_emit(ctx, 1, 0);             /* 000000ff CLIPID_ID */
+       xf_emit(ctx, 1, 0);             /* 000000ff CLIPID_ADDRESS_HIGH */
+       xf_emit(ctx, 1, 0);             /* ffffffff CLIPID_ADDRESS_LOW */
+       xf_emit(ctx, 1, 0x80);          /* 00003fff CLIPID_HEIGHT */
+       xf_emit(ctx, 1, 0);             /* 0000ffff DMA_CLIPID */
 }
 
 static void
-nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
 {
        struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
-       /* middle of area 0 on pre-NVA0 [after m2mf], end of area 2 on NVAx */
-       xf_emit(ctx, 2, 4);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 0x1c4d, 0);
+       int i;
+       /* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */
+       /* SEEK */
+       xf_emit(ctx, 0x33, 0);
+       /* SEEK */
+       xf_emit(ctx, 2, 0);
+       /* SEEK */
+       xf_emit(ctx, 1, 0);             /* 00000001 GP_ENABLE */
+       xf_emit(ctx, 1, 4);             /* 0000007f VP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 4);             /* 000000ff GP_RESULT_MAP_SIZE */
+       /* SEEK */
+       if (IS_NVA3F(dev_priv->chipset)) {
+               xf_emit(ctx, 4, 0);     /* RO */
+               xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
+               xf_emit(ctx, 1, 0);     /* 1ff */
+               xf_emit(ctx, 8, 0);     /* 0? */
+               xf_emit(ctx, 9, 0);     /* ffffffff, 7ff */
+
+               xf_emit(ctx, 4, 0);     /* RO */
+               xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
+               xf_emit(ctx, 1, 0);     /* 1ff */
+               xf_emit(ctx, 8, 0);     /* 0? */
+               xf_emit(ctx, 9, 0);     /* ffffffff, 7ff */
+       }
        else
-               xf_emit(ctx, 0x1c4b, 0);
-       xf_emit(ctx, 2, 4);
-       xf_emit(ctx, 1, 0x8100c12);
+       {
+               xf_emit(ctx, 0xc, 0);   /* RO */
+               /* SEEK */
+               xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
+               xf_emit(ctx, 1, 0);     /* 1ff */
+               xf_emit(ctx, 8, 0);     /* 0? */
+
+               /* SEEK */
+               xf_emit(ctx, 0xc, 0);   /* RO */
+               /* SEEK */
+               xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
+               xf_emit(ctx, 1, 0);     /* 1ff */
+               xf_emit(ctx, 8, 0);     /* 0? */
+       }
+       /* SEEK */
+       xf_emit(ctx, 1, 0);             /* 00000001 GP_ENABLE */
+       xf_emit(ctx, 1, 4);             /* 000000ff GP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 4);             /* 0000007f VP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 0x8100c12);     /* 1fffffff FP_INTERPOLANT_CTRL */
        if (dev_priv->chipset != 0x50)
-               xf_emit(ctx, 1, 3);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x8100c12);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x80c14);
-       xf_emit(ctx, 1, 1);
+               xf_emit(ctx, 1, 3);     /* 00000003 tesla UNK1100 */
+       /* SEEK */
+       xf_emit(ctx, 1, 0);             /* 00000001 GP_ENABLE */
+       xf_emit(ctx, 1, 0x8100c12);     /* 1fffffff FP_INTERPOLANT_CTRL */
+       xf_emit(ctx, 1, 0);             /* 0000000f VP_GP_BUILTIN_ATTR_EN */
+       xf_emit(ctx, 1, 0x80c14);       /* 01ffffff SEMANTIC_COLOR */
+       xf_emit(ctx, 1, 1);             /* 00000001 */
+       /* SEEK */
        if (dev_priv->chipset >= 0xa0)
-               xf_emit(ctx, 2, 4);
-       xf_emit(ctx, 1, 0x80c14);
-       xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 0x8100c12);
-       xf_emit(ctx, 1, 0x27);
-       xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 0x3c1, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 0x16, 0);
-       xf_emit(ctx, 1, 0x8100c12);
-       xf_emit(ctx, 1, 0);
+               xf_emit(ctx, 2, 4);     /* 000000ff */
+       xf_emit(ctx, 1, 0x80c14);       /* 01ffffff SEMANTIC_COLOR */
+       xf_emit(ctx, 1, 0);             /* 00000001 VERTEX_TWO_SIDE_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 POINT_SPRITE_ENABLE */
+       xf_emit(ctx, 1, 0x8100c12);     /* 1fffffff FP_INTERPOLANT_CTRL */
+       xf_emit(ctx, 1, 0x27);          /* 000000ff SEMANTIC_PRIM_ID */
+       xf_emit(ctx, 1, 0);             /* 00000001 GP_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 0000000f */
+       xf_emit(ctx, 1, 1);             /* 00000001 */
+       for (i = 0; i < 10; i++) {
+               /* SEEK */
+               xf_emit(ctx, 0x40, 0);          /* ffffffff */
+               xf_emit(ctx, 0x10, 0);          /* 3, 0, 0.... */
+               xf_emit(ctx, 0x10, 0);          /* ffffffff */
+       }
+       /* SEEK */
+       xf_emit(ctx, 1, 0);             /* 00000001 POINT_SPRITE_CTRL */
+       xf_emit(ctx, 1, 1);             /* 00000001 */
+       xf_emit(ctx, 1, 0);             /* ffffffff */
+       xf_emit(ctx, 4, 0);             /* ffffffff NOPERSPECTIVE_BITMAP */
+       xf_emit(ctx, 0x10, 0);          /* 00ffffff POINT_COORD_REPLACE_MAP */
+       xf_emit(ctx, 1, 0);             /* 00000003 WINDOW_ORIGIN */
+       xf_emit(ctx, 1, 0x8100c12);     /* 1fffffff FP_INTERPOLANT_CTRL */
+       if (dev_priv->chipset != 0x50)
+               xf_emit(ctx, 1, 0);     /* 000003ff */
 }
 
 static void
-nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
 {
        struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
-       /* beginning of area 1 on pre-NVA0 [after m2mf], area 3 on NVAx */
-       xf_emit(ctx, 4, 0);
-       xf_emit(ctx, 1, 0xf);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 8, 0);
-       else
-               xf_emit(ctx, 4, 0);
-       xf_emit(ctx, 1, 0x20);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 0x11, 0);
+       int acnt = 0x10, rep, i;
+       /* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */
+       if (IS_NVA3F(dev_priv->chipset))
+               acnt = 0x20;
+       /* SEEK */
+       if (dev_priv->chipset >= 0xa0) {
+               xf_emit(ctx, 1, 0);     /* ffffffff tesla UNK13A4 */
+               xf_emit(ctx, 1, 1);     /* 00000fff tesla UNK1318 */
+       }
+       xf_emit(ctx, 1, 0);             /* ffffffff VERTEX_BUFFER_FIRST */
+       xf_emit(ctx, 1, 0);             /* 00000001 PRIMITIVE_RESTART_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 UNK0DE8 */
+       xf_emit(ctx, 1, 0);             /* ffffffff PRIMITIVE_RESTART_INDEX */
+       xf_emit(ctx, 1, 0xf);           /* ffffffff VP_ATTR_EN */
+       xf_emit(ctx, (acnt/8)-1, 0);    /* ffffffff VP_ATTR_EN */
+       xf_emit(ctx, acnt/8, 0);        /* ffffffff VTX_ATR_MASK_UNK0DD0 */
+       xf_emit(ctx, 1, 0);             /* 0000000f VP_GP_BUILTIN_ATTR_EN */
+       xf_emit(ctx, 1, 0x20);          /* 0000ffff tesla UNK129C */
+       xf_emit(ctx, 1, 0);             /* 000000ff turing UNK370??? */
+       xf_emit(ctx, 1, 0);             /* 0000ffff turing USER_PARAM_COUNT */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A30 */
+       /* SEEK */
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 0xb, 0);   /* RO */
        else if (dev_priv->chipset >= 0xa0)
-               xf_emit(ctx, 0xf, 0);
+               xf_emit(ctx, 0x9, 0);   /* RO */
        else
-               xf_emit(ctx, 0xe, 0);
-       xf_emit(ctx, 1, 0x1a);
-       xf_emit(ctx, 0xd, 0);
-       xf_emit(ctx, 2, 4);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 1, 8);
-       xf_emit(ctx, 1, 0);
+               xf_emit(ctx, 0x8, 0);   /* RO */
+       /* SEEK */
+       xf_emit(ctx, 1, 0);             /* 00000001 EDGE_FLAG */
+       xf_emit(ctx, 1, 0);             /* 00000001 PROVOKING_VERTEX_LAST */
+       xf_emit(ctx, 1, 0);             /* 00000001 GP_ENABLE */
+       xf_emit(ctx, 1, 0x1a);          /* 0000001f POLYGON_MODE */
+       /* SEEK */
+       xf_emit(ctx, 0xc, 0);           /* RO */
+       /* SEEK */
+       xf_emit(ctx, 1, 0);             /* 7f/ff */
+       xf_emit(ctx, 1, 4);             /* 7f/ff VP_REG_ALLOC_RESULT */
+       xf_emit(ctx, 1, 4);             /* 7f/ff VP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 0);             /* 0000000f VP_GP_BUILTIN_ATTR_EN */
+       xf_emit(ctx, 1, 4);             /* 000001ff UNK1A28 */
+       xf_emit(ctx, 1, 8);             /* 000001ff UNK0DF0 */
+       xf_emit(ctx, 1, 0);             /* 00000001 GP_ENABLE */
        if (dev_priv->chipset == 0x50)
-               xf_emit(ctx, 1, 0x3ff);
+               xf_emit(ctx, 1, 0x3ff); /* 3ff tesla UNK0D68 */
        else
-               xf_emit(ctx, 1, 0x7ff);
+               xf_emit(ctx, 1, 0x7ff); /* 7ff tesla UNK0D68 */
        if (dev_priv->chipset == 0xa8)
-               xf_emit(ctx, 1, 0x1e00);
-       xf_emit(ctx, 0xc, 0);
-       xf_emit(ctx, 1, 0xf);
-       if (dev_priv->chipset == 0x50)
-               xf_emit(ctx, 0x125, 0);
-       else if (dev_priv->chipset < 0xa0)
-               xf_emit(ctx, 0x126, 0);
-       else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
-               xf_emit(ctx, 0x124, 0);
+               xf_emit(ctx, 1, 0x1e00);        /* 7fff */
+       /* SEEK */
+       xf_emit(ctx, 0xc, 0);           /* RO or close */
+       /* SEEK */
+       xf_emit(ctx, 1, 0xf);           /* ffffffff VP_ATTR_EN */
+       xf_emit(ctx, (acnt/8)-1, 0);    /* ffffffff VP_ATTR_EN */
+       xf_emit(ctx, 1, 0);             /* 0000000f VP_GP_BUILTIN_ATTR_EN */
+       if (dev_priv->chipset > 0x50 && dev_priv->chipset < 0xa0)
+               xf_emit(ctx, 2, 0);     /* ffffffff */
        else
-               xf_emit(ctx, 0x1f7, 0);
-       xf_emit(ctx, 1, 0xf);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 3, 0);
+               xf_emit(ctx, 1, 0);     /* ffffffff */
+       xf_emit(ctx, 1, 0);             /* 00000003 tesla UNK0FD8 */
+       /* SEEK */
+       if (IS_NVA3F(dev_priv->chipset)) {
+               xf_emit(ctx, 0x10, 0);  /* 0? */
+               xf_emit(ctx, 2, 0);     /* weird... */
+               xf_emit(ctx, 2, 0);     /* RO */
+       } else {
+               xf_emit(ctx, 8, 0);     /* 0? */
+               xf_emit(ctx, 1, 0);     /* weird... */
+               xf_emit(ctx, 2, 0);     /* RO */
+       }
+       /* SEEK */
+       xf_emit(ctx, 1, 0);             /* ffffffff VB_ELEMENT_BASE */
+       xf_emit(ctx, 1, 0);             /* ffffffff UNK1438 */
+       xf_emit(ctx, acnt, 0);          /* 1 tesla UNK1000 */
+       if (dev_priv->chipset >= 0xa0)
+               xf_emit(ctx, 1, 0);     /* ffffffff tesla UNK1118? */
+       /* SEEK */
+       xf_emit(ctx, acnt, 0);          /* ffffffff VERTEX_ARRAY_UNK90C */
+       xf_emit(ctx, 1, 0);             /* f/1f */
+       /* SEEK */
+       xf_emit(ctx, acnt, 0);          /* ffffffff VERTEX_ARRAY_UNK90C */
+       xf_emit(ctx, 1, 0);             /* f/1f */
+       /* SEEK */
+       xf_emit(ctx, acnt, 0);          /* RO */
+       xf_emit(ctx, 2, 0);             /* RO */
+       /* SEEK */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK111C? */
+       xf_emit(ctx, 1, 0);             /* RO */
+       /* SEEK */
+       xf_emit(ctx, 1, 0);             /* 000000ff UNK15F4_ADDRESS_HIGH */
+       xf_emit(ctx, 1, 0);             /* ffffffff UNK15F4_ADDRESS_LOW */
+       xf_emit(ctx, 1, 0);             /* 000000ff UNK0F84_ADDRESS_HIGH */
+       xf_emit(ctx, 1, 0);             /* ffffffff UNK0F84_ADDRESS_LOW */
+       /* SEEK */
+       xf_emit(ctx, acnt, 0);          /* 00003fff VERTEX_ARRAY_ATTRIB_OFFSET */
+       xf_emit(ctx, 3, 0);             /* f/1f */
+       /* SEEK */
+       xf_emit(ctx, acnt, 0);          /* 00000fff VERTEX_ARRAY_STRIDE */
+       xf_emit(ctx, 3, 0);             /* f/1f */
+       /* SEEK */
+       xf_emit(ctx, acnt, 0);          /* ffffffff VERTEX_ARRAY_LOW */
+       xf_emit(ctx, 3, 0);             /* f/1f */
+       /* SEEK */
+       xf_emit(ctx, acnt, 0);          /* 000000ff VERTEX_ARRAY_HIGH */
+       xf_emit(ctx, 3, 0);             /* f/1f */
+       /* SEEK */
+       xf_emit(ctx, acnt, 0);          /* ffffffff VERTEX_LIMIT_LOW */
+       xf_emit(ctx, 3, 0);             /* f/1f */
+       /* SEEK */
+       xf_emit(ctx, acnt, 0);          /* 000000ff VERTEX_LIMIT_HIGH */
+       xf_emit(ctx, 3, 0);             /* f/1f */
+       /* SEEK */
+       if (IS_NVA3F(dev_priv->chipset)) {
+               xf_emit(ctx, acnt, 0);          /* f */
+               xf_emit(ctx, 3, 0);             /* f/1f */
+       }
+       /* SEEK */
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 2, 0);     /* RO */
+       else
+               xf_emit(ctx, 5, 0);     /* RO */
+       /* SEEK */
+       xf_emit(ctx, 1, 0);             /* ffff DMA_VTXBUF */
+       /* SEEK */
+       if (dev_priv->chipset < 0xa0) {
+               xf_emit(ctx, 0x41, 0);  /* RO */
+               /* SEEK */
+               xf_emit(ctx, 0x11, 0);  /* RO */
+       } else if (!IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 0x50, 0);  /* RO */
        else
-               xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 1);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 0xa1, 0);
+               xf_emit(ctx, 0x58, 0);  /* RO */
+       /* SEEK */
+       xf_emit(ctx, 1, 0xf);           /* ffffffff VP_ATTR_EN */
+       xf_emit(ctx, (acnt/8)-1, 0);    /* ffffffff VP_ATTR_EN */
+       xf_emit(ctx, 1, 1);             /* 1 UNK0DEC */
+       /* SEEK */
+       xf_emit(ctx, acnt*4, 0);        /* ffffffff VTX_ATTR */
+       xf_emit(ctx, 4, 0);             /* f/1f, 0, 0, 0 */
+       /* SEEK */
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 0x1d, 0);  /* RO */
        else
-               xf_emit(ctx, 0x5a, 0);
-       xf_emit(ctx, 1, 0xf);
+               xf_emit(ctx, 0x16, 0);  /* RO */
+       /* SEEK */
+       xf_emit(ctx, 1, 0xf);           /* ffffffff VP_ATTR_EN */
+       xf_emit(ctx, (acnt/8)-1, 0);    /* ffffffff VP_ATTR_EN */
+       /* SEEK */
        if (dev_priv->chipset < 0xa0)
-               xf_emit(ctx, 0x834, 0);
-       else if (dev_priv->chipset == 0xa0)
-               xf_emit(ctx, 0x1873, 0);
-       else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 0x8ba, 0);
+               xf_emit(ctx, 8, 0);     /* RO */
+       else if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 0xc, 0);   /* RO */
+       else
+               xf_emit(ctx, 7, 0);     /* RO */
+       /* SEEK */
+       xf_emit(ctx, 0xa, 0);           /* RO */
+       if (dev_priv->chipset == 0xa0)
+               rep = 0xc;
+       else
+               rep = 4;
+       for (i = 0; i < rep; i++) {
+               /* SEEK */
+               if (IS_NVA3F(dev_priv->chipset))
+                       xf_emit(ctx, 0x20, 0);  /* ffffffff */
+               xf_emit(ctx, 0x200, 0); /* ffffffff */
+               xf_emit(ctx, 4, 0);     /* 7f/ff, 0, 0, 0 */
+               xf_emit(ctx, 4, 0);     /* ffffffff */
+       }
+       /* SEEK */
+       xf_emit(ctx, 1, 0);             /* 113/111 */
+       xf_emit(ctx, 1, 0xf);           /* ffffffff VP_ATTR_EN */
+       xf_emit(ctx, (acnt/8)-1, 0);    /* ffffffff VP_ATTR_EN */
+       xf_emit(ctx, acnt/8, 0);        /* ffffffff VTX_ATTR_MASK_UNK0DD0 */
+       xf_emit(ctx, 1, 0);             /* 0000000f VP_GP_BUILTIN_ATTR_EN */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A30 */
+       /* SEEK */
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 7, 0);     /* weird... */
        else
-               xf_emit(ctx, 0x833, 0);
-       xf_emit(ctx, 1, 0xf);
-       xf_emit(ctx, 0xf, 0);
+               xf_emit(ctx, 5, 0);     /* weird... */
 }
 
 static void
-nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
 {
        struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
-       /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 6 on NVAx */
-       xf_emit(ctx, 2, 0);
-       if (dev_priv->chipset == 0x50)
-               xf_emit(ctx, 2, 1);
-       else
-               xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 2, 0x100);
-       xf_emit(ctx, 1, 0x11);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 8);
-       xf_emit(ctx, 5, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 3, 1);
-       xf_emit(ctx, 1, 0xcf);
-       xf_emit(ctx, 1, 2);
-       xf_emit(ctx, 6, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 3, 1);
-       xf_emit(ctx, 4, 0);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0x15);
-       xf_emit(ctx, 3, 0);
-       xf_emit(ctx, 1, 0x4444480);
-       xf_emit(ctx, 0x37, 0);
+       /* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */
+       /* SEEK */
+       xf_emit(ctx, 2, 0);             /* 0001ffff CLIP_X, CLIP_Y */
+       xf_emit(ctx, 2, 0);             /* 0000ffff CLIP_W, CLIP_H */
+       xf_emit(ctx, 1, 0);             /* 00000001 CLIP_ENABLE */
+       if (dev_priv->chipset < 0xa0) {
+               /* this is useless on everything but the original NV50,
+                * guess they forgot to nuke it. Or just didn't bother. */
+               xf_emit(ctx, 2, 0);     /* 0000ffff IFC_CLIP_X, Y */
+               xf_emit(ctx, 2, 1);     /* 0000ffff IFC_CLIP_W, H */
+               xf_emit(ctx, 1, 0);     /* 00000001 IFC_CLIP_ENABLE */
+       }
+       xf_emit(ctx, 1, 1);             /* 00000001 DST_LINEAR */
+       xf_emit(ctx, 1, 0x100);         /* 0001ffff DST_WIDTH */
+       xf_emit(ctx, 1, 0x100);         /* 0001ffff DST_HEIGHT */
+       xf_emit(ctx, 1, 0x11);          /* 3f[NV50]/7f[NV84+] DST_FORMAT */
+       xf_emit(ctx, 1, 0);             /* 0001ffff DRAW_POINT_X */
+       xf_emit(ctx, 1, 8);             /* 0000000f DRAW_UNK58C */
+       xf_emit(ctx, 1, 0);             /* 000fffff SIFC_DST_X_FRACT */
+       xf_emit(ctx, 1, 0);             /* 0001ffff SIFC_DST_X_INT */
+       xf_emit(ctx, 1, 0);             /* 000fffff SIFC_DST_Y_FRACT */
+       xf_emit(ctx, 1, 0);             /* 0001ffff SIFC_DST_Y_INT */
+       xf_emit(ctx, 1, 0);             /* 000fffff SIFC_DX_DU_FRACT */
+       xf_emit(ctx, 1, 1);             /* 0001ffff SIFC_DX_DU_INT */
+       xf_emit(ctx, 1, 0);             /* 000fffff SIFC_DY_DV_FRACT */
+       xf_emit(ctx, 1, 1);             /* 0001ffff SIFC_DY_DV_INT */
+       xf_emit(ctx, 1, 1);             /* 0000ffff SIFC_WIDTH */
+       xf_emit(ctx, 1, 1);             /* 0000ffff SIFC_HEIGHT */
+       xf_emit(ctx, 1, 0xcf);          /* 000000ff SIFC_FORMAT */
+       xf_emit(ctx, 1, 2);             /* 00000003 SIFC_BITMAP_UNK808 */
+       xf_emit(ctx, 1, 0);             /* 00000003 SIFC_BITMAP_LINE_PACK_MODE */
+       xf_emit(ctx, 1, 0);             /* 00000001 SIFC_BITMAP_LSB_FIRST */
+       xf_emit(ctx, 1, 0);             /* 00000001 SIFC_BITMAP_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 0000ffff BLIT_DST_X */
+       xf_emit(ctx, 1, 0);             /* 0000ffff BLIT_DST_Y */
+       xf_emit(ctx, 1, 0);             /* 000fffff BLIT_DU_DX_FRACT */
+       xf_emit(ctx, 1, 1);             /* 0001ffff BLIT_DU_DX_INT */
+       xf_emit(ctx, 1, 0);             /* 000fffff BLIT_DV_DY_FRACT */
+       xf_emit(ctx, 1, 1);             /* 0001ffff BLIT_DV_DY_INT */
+       xf_emit(ctx, 1, 1);             /* 0000ffff BLIT_DST_W */
+       xf_emit(ctx, 1, 1);             /* 0000ffff BLIT_DST_H */
+       xf_emit(ctx, 1, 0);             /* 000fffff BLIT_SRC_X_FRACT */
+       xf_emit(ctx, 1, 0);             /* 0001ffff BLIT_SRC_X_INT */
+       xf_emit(ctx, 1, 0);             /* 000fffff BLIT_SRC_Y_FRACT */
+       xf_emit(ctx, 1, 0);             /* 00000001 UNK888 */
+       xf_emit(ctx, 1, 4);             /* 0000003f UNK884 */
+       xf_emit(ctx, 1, 0);             /* 00000007 UNK880 */
+       xf_emit(ctx, 1, 1);             /* 0000001f tesla UNK0FB8 */
+       xf_emit(ctx, 1, 0x15);          /* 000000ff tesla UNK128C */
+       xf_emit(ctx, 2, 0);             /* 00000007, ffff0ff3 */
+       xf_emit(ctx, 1, 0);             /* 00000001 UNK260 */
+       xf_emit(ctx, 1, 0x4444480);     /* 1fffffff UNK870 */
+       /* SEEK */
+       xf_emit(ctx, 0x10, 0);
+       /* SEEK */
+       xf_emit(ctx, 0x27, 0);
 }
 
 static void
-nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
 {
-       /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 0 on NVAx */
-       xf_emit(ctx, 4, 0);
-       xf_emit(ctx, 1, 0x8100c12);
-       xf_emit(ctx, 4, 0);
-       xf_emit(ctx, 1, 0x100);
-       xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 0x10001);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x10001);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0x10001);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 1, 2);
+       struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+       /* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */
+       /* SEEK */
+       xf_emit(ctx, 2, 0);             /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */
+       xf_emit(ctx, 1, 0);             /* 00000001 tesla UNK1924 */
+       xf_emit(ctx, 1, 0);             /* 00000003 WINDOW_ORIGIN */
+       xf_emit(ctx, 1, 0x8100c12);     /* 1fffffff FP_INTERPOLANT_CTRL */
+       xf_emit(ctx, 1, 0);             /* 000003ff */
+       /* SEEK */
+       xf_emit(ctx, 1, 0);             /* ffffffff turing UNK364 */
+       xf_emit(ctx, 1, 0);             /* 0000000f turing UNK36C */
+       xf_emit(ctx, 1, 0);             /* 0000ffff USER_PARAM_COUNT */
+       xf_emit(ctx, 1, 0x100);         /* 00ffffff turing UNK384 */
+       xf_emit(ctx, 1, 0);             /* 0000000f turing UNK2A0 */
+       xf_emit(ctx, 1, 0);             /* 0000ffff GRIDID */
+       xf_emit(ctx, 1, 0x10001);       /* ffffffff GRIDDIM_XY */
+       xf_emit(ctx, 1, 0);             /* ffffffff */
+       xf_emit(ctx, 1, 0x10001);       /* ffffffff BLOCKDIM_XY */
+       xf_emit(ctx, 1, 1);             /* 0000ffff BLOCKDIM_Z */
+       xf_emit(ctx, 1, 0x10001);       /* 00ffffff BLOCK_ALLOC */
+       xf_emit(ctx, 1, 1);             /* 00000001 LANES32 */
+       xf_emit(ctx, 1, 4);             /* 000000ff FP_REG_ALLOC_TEMP */
+       xf_emit(ctx, 1, 2);             /* 00000003 REG_MODE */
+       /* SEEK */
+       xf_emit(ctx, 0x40, 0);          /* ffffffff USER_PARAM */
+       switch (dev_priv->chipset) {
+       case 0x50:
+       case 0x92:
+               xf_emit(ctx, 8, 0);     /* 7, 0, 0, 0, ... */
+               xf_emit(ctx, 0x80, 0);  /* fff */
+               xf_emit(ctx, 2, 0);     /* ff, fff */
+               xf_emit(ctx, 0x10*2, 0);        /* ffffffff, 1f */
+               break;
+       case 0x84:
+               xf_emit(ctx, 8, 0);     /* 7, 0, 0, 0, ... */
+               xf_emit(ctx, 0x60, 0);  /* fff */
+               xf_emit(ctx, 2, 0);     /* ff, fff */
+               xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */
+               break;
+       case 0x94:
+       case 0x96:
+               xf_emit(ctx, 8, 0);     /* 7, 0, 0, 0, ... */
+               xf_emit(ctx, 0x40, 0);  /* fff */
+               xf_emit(ctx, 2, 0);     /* ff, fff */
+               xf_emit(ctx, 8*2, 0);   /* ffffffff, 1f */
+               break;
+       case 0x86:
+       case 0x98:
+               xf_emit(ctx, 4, 0);     /* f, 0, 0, 0 */
+               xf_emit(ctx, 0x10, 0);  /* fff */
+               xf_emit(ctx, 2, 0);     /* ff, fff */
+               xf_emit(ctx, 2*2, 0);   /* ffffffff, 1f */
+               break;
+       case 0xa0:
+               xf_emit(ctx, 8, 0);     /* 7, 0, 0, 0, ... */
+               xf_emit(ctx, 0xf0, 0);  /* fff */
+               xf_emit(ctx, 2, 0);     /* ff, fff */
+               xf_emit(ctx, 0x1e*2, 0);        /* ffffffff, 1f */
+               break;
+       case 0xa3:
+               xf_emit(ctx, 8, 0);     /* 7, 0, 0, 0, ... */
+               xf_emit(ctx, 0x60, 0);  /* fff */
+               xf_emit(ctx, 2, 0);     /* ff, fff */
+               xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */
+               break;
+       case 0xa5:
+       case 0xaf:
+               xf_emit(ctx, 8, 0);     /* 7, 0, 0, 0, ... */
+               xf_emit(ctx, 0x30, 0);  /* fff */
+               xf_emit(ctx, 2, 0);     /* ff, fff */
+               xf_emit(ctx, 6*2, 0);   /* ffffffff, 1f */
+               break;
+       case 0xaa:
+               xf_emit(ctx, 0x12, 0);
+               break;
+       case 0xa8:
+       case 0xac:
+               xf_emit(ctx, 4, 0);     /* f, 0, 0, 0 */
+               xf_emit(ctx, 0x10, 0);  /* fff */
+               xf_emit(ctx, 2, 0);     /* ff, fff */
+               xf_emit(ctx, 2*2, 0);   /* ffffffff, 1f */
+               break;
+       }
+       xf_emit(ctx, 1, 0);             /* 0000000f */
+       xf_emit(ctx, 1, 0);             /* 00000000 */
+       xf_emit(ctx, 1, 0);             /* ffffffff */
+       xf_emit(ctx, 1, 0);             /* 0000001f */
+       xf_emit(ctx, 4, 0);             /* ffffffff */
+       xf_emit(ctx, 1, 0);             /* 00000003 turing UNK35C */
+       xf_emit(ctx, 1, 0);             /* ffffffff */
+       xf_emit(ctx, 4, 0);             /* ffffffff */
+       xf_emit(ctx, 1, 0);             /* 00000003 turing UNK35C */
+       xf_emit(ctx, 1, 0);             /* ffffffff */
+       xf_emit(ctx, 1, 0);             /* 000000ff */
 }
 
 static void
-nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx)
+nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
 {
        struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
-       /* middle of area 2 on pre-NVA0 [after m2mf], end of area 0 on NVAx */
-       xf_emit(ctx, 1, 0x3f800000);
-       xf_emit(ctx, 6, 0);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 1, 0x1a);
-       xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 0x12, 0);
-       xf_emit(ctx, 1, 0x00ffff00);
-       xf_emit(ctx, 6, 0);
-       xf_emit(ctx, 1, 0xf);
-       xf_emit(ctx, 7, 0);
-       xf_emit(ctx, 1, 0x0fac6881);
-       xf_emit(ctx, 1, 0x11);
-       xf_emit(ctx, 0xf, 0);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 2, 0);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 1, 3);
+       xf_emit(ctx, 2, 0);             /* 00007fff WINDOW_OFFSET_XY */
+       xf_emit(ctx, 1, 0x3f800000);    /* ffffffff LINE_WIDTH */
+       xf_emit(ctx, 1, 0);             /* 00000001 LINE_SMOOTH_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 tesla UNK1658 */
+       xf_emit(ctx, 1, 0);             /* 00000001 POLYGON_SMOOTH_ENABLE */
+       xf_emit(ctx, 3, 0);             /* 00000001 POLYGON_OFFSET_*_ENABLE */
+       xf_emit(ctx, 1, 4);             /* 0000000f CULL_MODE */
+       xf_emit(ctx, 1, 0x1a);          /* 0000001f POLYGON_MODE */
+       xf_emit(ctx, 1, 0);             /* 0000000f ZETA_FORMAT */
+       xf_emit(ctx, 1, 0);             /* 00000001 POINT_SPRITE_ENABLE */
+       xf_emit(ctx, 1, 1);             /* 00000001 tesla UNK165C */
+       xf_emit(ctx, 0x10, 0);          /* 00000001 SCISSOR_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 tesla UNK1534 */
+       xf_emit(ctx, 1, 0);             /* 00000001 LINE_STIPPLE_ENABLE */
+       xf_emit(ctx, 1, 0x00ffff00);    /* 00ffffff LINE_STIPPLE_PATTERN */
+       xf_emit(ctx, 1, 0);             /* ffffffff POLYGON_OFFSET_UNITS */
+       xf_emit(ctx, 1, 0);             /* ffffffff POLYGON_OFFSET_FACTOR */
+       xf_emit(ctx, 1, 0);             /* 00000003 tesla UNK1668 */
+       xf_emit(ctx, 2, 0);             /* 07ffffff SCREEN_SCISSOR */
+       xf_emit(ctx, 1, 0);             /* 00000001 tesla UNK1900 */
+       xf_emit(ctx, 1, 0xf);           /* 0000000f COLOR_MASK */
+       xf_emit(ctx, 7, 0);             /* 0000000f COLOR_MASK */
+       xf_emit(ctx, 1, 0x0fac6881);    /* 0fffffff RT_CONTROL */
+       xf_emit(ctx, 1, 0x11);          /* 0000007f RT_FORMAT */
+       xf_emit(ctx, 7, 0);             /* 0000007f RT_FORMAT */
+       xf_emit(ctx, 8, 0);             /* 00000001 RT_HORIZ_LINEAR */
+       xf_emit(ctx, 1, 4);             /* 00000007 FP_CONTROL */
+       xf_emit(ctx, 1, 0);             /* 00000001 ALPHA_TEST_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000007 ALPHA_TEST_FUNC */
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 1, 3);     /* 00000003 UNK16B4 */
        else if (dev_priv->chipset >= 0xa0)
-               xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 2);
-       xf_emit(ctx, 2, 0x04000000);
-       xf_emit(ctx, 3, 0);
-       xf_emit(ctx, 1, 5);
-       xf_emit(ctx, 1, 0x52);
-       if (dev_priv->chipset == 0x50) {
-               xf_emit(ctx, 0x13, 0);
-       } else {
-               xf_emit(ctx, 4, 0);
-               xf_emit(ctx, 1, 1);
-               if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-                       xf_emit(ctx, 0x11, 0);
-               else
-                       xf_emit(ctx, 0x10, 0);
+               xf_emit(ctx, 1, 1);     /* 00000001 UNK16B4 */
+       xf_emit(ctx, 1, 0);             /* 00000003 MULTISAMPLE_CTRL */
+       xf_emit(ctx, 1, 0);             /* 00000003 tesla UNK0F90 */
+       xf_emit(ctx, 1, 2);             /* 00000003 tesla UNK143C */
+       xf_emit(ctx, 2, 0x04000000);    /* 07ffffff tesla UNK0D6C */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_FRONT_MASK */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_WRITE_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 SAMPLECNT_ENABLE */
+       xf_emit(ctx, 1, 5);             /* 0000000f UNK1408 */
+       xf_emit(ctx, 1, 0x52);          /* 000001ff SEMANTIC_PTSZ */
+       xf_emit(ctx, 1, 0);             /* ffffffff POINT_SIZE */
+       xf_emit(ctx, 1, 0);             /* 00000001 */
+       xf_emit(ctx, 1, 0);             /* 00000007 tesla UNK0FB4 */
+       if (dev_priv->chipset != 0x50) {
+               xf_emit(ctx, 1, 0);     /* 3ff */
+               xf_emit(ctx, 1, 1);     /* 00000001 tesla UNK1110 */
        }
-       xf_emit(ctx, 0x10, 0x3f800000);
-       xf_emit(ctx, 1, 0x10);
-       xf_emit(ctx, 0x26, 0);
-       xf_emit(ctx, 1, 0x8100c12);
-       xf_emit(ctx, 1, 5);
-       xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 4, 0xffff);
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 1, 0);     /* 00000003 tesla UNK1928 */
+       xf_emit(ctx, 0x10, 0);          /* ffffffff DEPTH_RANGE_NEAR */
+       xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
+       xf_emit(ctx, 1, 0x10);          /* 000000ff VIEW_VOLUME_CLIP_CTRL */
+       xf_emit(ctx, 0x20, 0);          /* 07ffffff VIEWPORT_HORIZ, then VIEWPORT_VERT. (W&0x3fff)<<13 | (X&0x1fff). */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK187C */
+       xf_emit(ctx, 1, 0);             /* 00000003 WINDOW_ORIGIN */
+       xf_emit(ctx, 1, 0);             /* 00000001 STENCIL_FRONT_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_TEST_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 STENCIL_BACK_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_BACK_MASK */
+       xf_emit(ctx, 1, 0x8100c12);     /* 1fffffff FP_INTERPOLANT_CTRL */
+       xf_emit(ctx, 1, 5);             /* 0000000f tesla UNK1220 */
+       xf_emit(ctx, 1, 0);             /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+       xf_emit(ctx, 1, 0);             /* 000000ff tesla UNK1A20 */
+       xf_emit(ctx, 1, 1);             /* 00000001 ZETA_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 VERTEX_TWO_SIDE_ENABLE */
+       xf_emit(ctx, 4, 0xffff);        /* 0000ffff MSAA_MASK */
        if (dev_priv->chipset != 0x50)
-               xf_emit(ctx, 1, 3);
+               xf_emit(ctx, 1, 3);     /* 00000003 tesla UNK1100 */
        if (dev_priv->chipset < 0xa0)
-               xf_emit(ctx, 0x1f, 0);
-       else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 0xc, 0);
-       else
-               xf_emit(ctx, 3, 0);
-       xf_emit(ctx, 1, 0x00ffff00);
-       xf_emit(ctx, 1, 0x1a);
+               xf_emit(ctx, 0x1c, 0);  /* RO */
+       else if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 0x9, 0);
+       xf_emit(ctx, 1, 0);             /* 00000001 UNK1534 */
+       xf_emit(ctx, 1, 0);             /* 00000001 LINE_SMOOTH_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 LINE_STIPPLE_ENABLE */
+       xf_emit(ctx, 1, 0x00ffff00);    /* 00ffffff LINE_STIPPLE_PATTERN */
+       xf_emit(ctx, 1, 0x1a);          /* 0000001f POLYGON_MODE */
+       xf_emit(ctx, 1, 0);             /* 00000003 WINDOW_ORIGIN */
        if (dev_priv->chipset != 0x50) {
-               xf_emit(ctx, 1, 0);
-               xf_emit(ctx, 1, 3);
+               xf_emit(ctx, 1, 3);     /* 00000003 tesla UNK1100 */
+               xf_emit(ctx, 1, 0);     /* 3ff */
        }
+       /* XXX: the following block could belong either to unk1cxx, or
+        * to STRMOUT. Rather hard to tell. */
        if (dev_priv->chipset < 0xa0)
-               xf_emit(ctx, 0x26, 0);
+               xf_emit(ctx, 0x25, 0);
        else
-               xf_emit(ctx, 0x3c, 0);
-       xf_emit(ctx, 1, 0x102);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 4, 4);
-       if (dev_priv->chipset >= 0xa0)
-               xf_emit(ctx, 8, 0);
-       xf_emit(ctx, 2, 4);
-       xf_emit(ctx, 1, 0);
+               xf_emit(ctx, 0x3b, 0);
+}
+
+static void
+nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
+{
+       struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+       xf_emit(ctx, 1, 0x102);         /* 0000ffff STRMOUT_BUFFER_CTRL */
+       xf_emit(ctx, 1, 0);             /* ffffffff STRMOUT_PRIMITIVE_COUNT */
+       xf_emit(ctx, 4, 4);             /* 000000ff STRMOUT_NUM_ATTRIBS */
+       if (dev_priv->chipset >= 0xa0) {
+               xf_emit(ctx, 4, 0);     /* ffffffff UNK1A8C */
+               xf_emit(ctx, 4, 0);     /* ffffffff UNK1780 */
+       }
+       xf_emit(ctx, 1, 4);             /* 000000ff GP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 4);             /* 0000007f VP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 0);             /* 00000001 GP_ENABLE */
        if (dev_priv->chipset == 0x50)
-               xf_emit(ctx, 1, 0x3ff);
+               xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
        else
-               xf_emit(ctx, 1, 0x7ff);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x102);
-       xf_emit(ctx, 9, 0);
-       xf_emit(ctx, 4, 4);
-       xf_emit(ctx, 0x2c, 0);
+               xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A30 */
+       /* SEEK */
+       xf_emit(ctx, 1, 0x102);         /* 0000ffff STRMOUT_BUFFER_CTRL */
+       xf_emit(ctx, 1, 0);             /* ffffffff STRMOUT_PRIMITIVE_COUNT */
+       xf_emit(ctx, 4, 0);             /* 000000ff STRMOUT_ADDRESS_HIGH */
+       xf_emit(ctx, 4, 0);             /* ffffffff STRMOUT_ADDRESS_LOW */
+       xf_emit(ctx, 4, 4);             /* 000000ff STRMOUT_NUM_ATTRIBS */
+       if (dev_priv->chipset >= 0xa0) {
+               xf_emit(ctx, 4, 0);     /* ffffffff UNK1A8C */
+               xf_emit(ctx, 4, 0);     /* ffffffff UNK1780 */
+       }
+       xf_emit(ctx, 1, 0);             /* 0000ffff DMA_STRMOUT */
+       xf_emit(ctx, 1, 0);             /* 0000ffff DMA_QUERY */
+       xf_emit(ctx, 1, 0);             /* 000000ff QUERY_ADDRESS_HIGH */
+       xf_emit(ctx, 2, 0);             /* ffffffff QUERY_ADDRESS_LOW QUERY_COUNTER */
+       xf_emit(ctx, 2, 0);             /* ffffffff */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A30 */
+       /* SEEK */
+       xf_emit(ctx, 0x20, 0);          /* ffffffff STRMOUT_MAP */
+       xf_emit(ctx, 1, 0);             /* 0000000f */
+       xf_emit(ctx, 1, 0);             /* 00000000? */
+       xf_emit(ctx, 2, 0);             /* ffffffff */
+}
+
+static void
+nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
+{
+       struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+       xf_emit(ctx, 1, 0x4e3bfdf);     /* ffffffff UNK0D64 */
+       xf_emit(ctx, 1, 0x4e3bfdf);     /* ffffffff UNK0DF4 */
+       xf_emit(ctx, 1, 0);             /* 00000007 */
+       xf_emit(ctx, 1, 0);             /* 000003ff */
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 1, 0x11);  /* 000000ff tesla UNK1968 */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A3C */
+}
+
+static void
+nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
+{
+       struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+       /* SEEK */
+       xf_emit(ctx, 1, 0);             /* 0000ffff DMA_QUERY */
+       xf_emit(ctx, 1, 0x0fac6881);    /* 0fffffff RT_CONTROL */
+       xf_emit(ctx, 2, 0);             /* ffffffff */
+       xf_emit(ctx, 1, 0);             /* 000000ff QUERY_ADDRESS_HIGH */
+       xf_emit(ctx, 2, 0);             /* ffffffff QUERY_ADDRESS_LOW, COUNTER */
+       xf_emit(ctx, 1, 0);             /* 00000001 SAMPLECNT_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 7 */
+       /* SEEK */
+       xf_emit(ctx, 1, 0);             /* 0000ffff DMA_QUERY */
+       xf_emit(ctx, 1, 0);             /* 000000ff QUERY_ADDRESS_HIGH */
+       xf_emit(ctx, 2, 0);             /* ffffffff QUERY_ADDRESS_LOW, COUNTER */
+       xf_emit(ctx, 1, 0x4e3bfdf);     /* ffffffff UNK0D64 */
+       xf_emit(ctx, 1, 0x4e3bfdf);     /* ffffffff UNK0DF4 */
+       xf_emit(ctx, 1, 0);             /* 00000001 eng2d UNK260 */
+       xf_emit(ctx, 1, 0);             /* ff/3ff */
+       xf_emit(ctx, 1, 0);             /* 00000007 */
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 1, 0x11);  /* 000000ff tesla UNK1968 */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A3C */
 }
 
 static void
@@ -1749,443 +2392,709 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
        int magic2;
        if (dev_priv->chipset == 0x50) {
                magic2 = 0x00003e60;
-       } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
+       } else if (!IS_NVA3F(dev_priv->chipset)) {
                magic2 = 0x001ffe67;
        } else {
                magic2 = 0x00087e67;
        }
-       xf_emit(ctx, 8, 0);
-       xf_emit(ctx, 1, 2);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, magic2);
-       xf_emit(ctx, 4, 0);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 7, 0);
-       if (dev_priv->chipset >= 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 1, 0x15);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0x10);
-       xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 4, 0);
+       xf_emit(ctx, 1, 0);             /* f/7 MUTISAMPLE_SAMPLES_LOG2 */
+       xf_emit(ctx, 1, 0);             /* 00000001 tesla UNK1534 */
+       xf_emit(ctx, 1, 0);             /* 00000007 STENCIL_BACK_FUNC_FUNC */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_BACK_FUNC_MASK */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_BACK_MASK */
+       xf_emit(ctx, 3, 0);             /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
+       xf_emit(ctx, 1, 2);             /* 00000003 tesla UNK143C */
+       xf_emit(ctx, 1, 0);             /* ffff0ff3 */
+       xf_emit(ctx, 1, magic2);        /* 001fffff tesla UNK0F78 */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_BOUNDS_EN */
+       xf_emit(ctx, 1, 0);             /* 00000007 DEPTH_TEST_FUNC */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_TEST_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_WRITE_ENABLE */
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 1, 1);     /* 0000001f tesla UNK169C */
+       xf_emit(ctx, 1, 0);             /* 00000007 STENCIL_FRONT_FUNC_FUNC */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_FRONT_FUNC_MASK */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_FRONT_MASK */
+       xf_emit(ctx, 3, 0);             /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
+       xf_emit(ctx, 1, 0);             /* 00000001 STENCIL_FRONT_ENABLE */
+       if (dev_priv->chipset >= 0xa0 && !IS_NVAAF(dev_priv->chipset))
+               xf_emit(ctx, 1, 0x15);  /* 000000ff */
+       xf_emit(ctx, 1, 0);             /* 00000001 STENCIL_BACK_ENABLE */
+       xf_emit(ctx, 1, 1);             /* 00000001 tesla UNK15B4 */
+       xf_emit(ctx, 1, 0x10);          /* 3ff/ff VIEW_VOLUME_CLIP_CTRL */
+       xf_emit(ctx, 1, 0);             /* ffffffff CLEAR_DEPTH */
+       xf_emit(ctx, 1, 0);             /* 0000000f ZETA_FORMAT */
+       xf_emit(ctx, 1, 1);             /* 00000001 ZETA_ENABLE */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A3C */
        if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) {
-               xf_emit(ctx, 1, 4);
-               xf_emit(ctx, 1, 0x400);
-               xf_emit(ctx, 1, 0x300);
-               xf_emit(ctx, 1, 0x1001);
+               xf_emit(ctx, 3, 0);     /* ff, ffffffff, ffffffff */
+               xf_emit(ctx, 1, 4);     /* 7 */
+               xf_emit(ctx, 1, 0x400); /* fffffff */
+               xf_emit(ctx, 1, 0x300); /* ffff */
+               xf_emit(ctx, 1, 0x1001);        /* 1fff */
                if (dev_priv->chipset != 0xa0) {
-                       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-                               xf_emit(ctx, 1, 0);
+                       if (IS_NVA3F(dev_priv->chipset))
+                               xf_emit(ctx, 1, 0);     /* 0000000f UNK15C8 */
                        else
-                               xf_emit(ctx, 1, 0x15);
+                               xf_emit(ctx, 1, 0x15);  /* ff */
                }
-               xf_emit(ctx, 3, 0);
        }
-       xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 2);
-       xf_emit(ctx, 8, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0x10);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 0x13, 0);
-       xf_emit(ctx, 1, 0x10);
-       xf_emit(ctx, 0x10, 0);
-       xf_emit(ctx, 0x10, 0x3f800000);
-       xf_emit(ctx, 0x19, 0);
-       xf_emit(ctx, 1, 0x10);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x3f);
-       xf_emit(ctx, 6, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 1);
+       xf_emit(ctx, 1, 0);             /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+       xf_emit(ctx, 1, 0);             /* 00000001 tesla UNK1534 */
+       xf_emit(ctx, 1, 0);             /* 00000007 STENCIL_BACK_FUNC_FUNC */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_BACK_FUNC_MASK */
+       xf_emit(ctx, 1, 0);             /* ffff0ff3 */
+       xf_emit(ctx, 1, 2);             /* 00000003 tesla UNK143C */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_BOUNDS_EN */
+       xf_emit(ctx, 1, 0);             /* 00000007 DEPTH_TEST_FUNC */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_TEST_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_WRITE_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000007 STENCIL_FRONT_FUNC_FUNC */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_FRONT_FUNC_MASK */
+       xf_emit(ctx, 1, 0);             /* 00000001 STENCIL_FRONT_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 STENCIL_BACK_ENABLE */
+       xf_emit(ctx, 1, 1);             /* 00000001 tesla UNK15B4 */
+       xf_emit(ctx, 1, 0x10);          /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+       xf_emit(ctx, 1, 0);             /* 0000000f ZETA_FORMAT */
+       xf_emit(ctx, 1, 1);             /* 00000001 ZETA_ENABLE */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A3C */
+       xf_emit(ctx, 1, 0);             /* 00000001 tesla UNK1534 */
+       xf_emit(ctx, 1, 0);             /* 00000001 tesla UNK1900 */
+       xf_emit(ctx, 1, 0);             /* 00000007 STENCIL_BACK_FUNC_FUNC */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_BACK_FUNC_MASK */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_BACK_FUNC_REF */
+       xf_emit(ctx, 2, 0);             /* ffffffff DEPTH_BOUNDS */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_BOUNDS_EN */
+       xf_emit(ctx, 1, 0);             /* 00000007 DEPTH_TEST_FUNC */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_TEST_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_WRITE_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 0000000f */
+       xf_emit(ctx, 1, 0);             /* 00000001 tesla UNK0FB0 */
+       xf_emit(ctx, 1, 0);             /* 00000007 STENCIL_FRONT_FUNC_FUNC */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_FRONT_FUNC_MASK */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_FRONT_FUNC_REF */
+       xf_emit(ctx, 1, 0);             /* 00000001 STENCIL_FRONT_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 STENCIL_BACK_ENABLE */
+       xf_emit(ctx, 1, 0x10);          /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+       xf_emit(ctx, 0x10, 0);          /* ffffffff DEPTH_RANGE_NEAR */
+       xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
+       xf_emit(ctx, 1, 0);             /* 0000000f ZETA_FORMAT */
+       xf_emit(ctx, 1, 0);             /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+       xf_emit(ctx, 1, 0);             /* 00000007 STENCIL_BACK_FUNC_FUNC */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_BACK_FUNC_MASK */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_BACK_FUNC_REF */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_BACK_MASK */
+       xf_emit(ctx, 3, 0);             /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
+       xf_emit(ctx, 2, 0);             /* ffffffff DEPTH_BOUNDS */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_BOUNDS_EN */
+       xf_emit(ctx, 1, 0);             /* 00000007 DEPTH_TEST_FUNC */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_TEST_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_WRITE_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 000000ff CLEAR_STENCIL */
+       xf_emit(ctx, 1, 0);             /* 00000007 STENCIL_FRONT_FUNC_FUNC */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_FRONT_FUNC_MASK */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_FRONT_FUNC_REF */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_FRONT_MASK */
+       xf_emit(ctx, 3, 0);             /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
+       xf_emit(ctx, 1, 0);             /* 00000001 STENCIL_FRONT_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 STENCIL_BACK_ENABLE */
+       xf_emit(ctx, 1, 0x10);          /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+       xf_emit(ctx, 1, 0);             /* 0000000f ZETA_FORMAT */
+       xf_emit(ctx, 1, 0x3f);          /* 0000003f UNK1590 */
+       xf_emit(ctx, 1, 0);             /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+       xf_emit(ctx, 1, 0);             /* 00000001 tesla UNK1534 */
+       xf_emit(ctx, 2, 0);             /* ffff0ff3, ffff */
+       xf_emit(ctx, 1, 0);             /* 00000001 tesla UNK0FB0 */
+       xf_emit(ctx, 1, 0);             /* 0001ffff GP_BUILTIN_RESULT_EN */
+       xf_emit(ctx, 1, 1);             /* 00000001 tesla UNK15B4 */
+       xf_emit(ctx, 1, 0);             /* 0000000f ZETA_FORMAT */
+       xf_emit(ctx, 1, 1);             /* 00000001 ZETA_ENABLE */
+       xf_emit(ctx, 1, 0);             /* ffffffff CLEAR_DEPTH */
+       xf_emit(ctx, 1, 1);             /* 00000001 tesla UNK19CC */
        if (dev_priv->chipset >= 0xa0) {
                xf_emit(ctx, 2, 0);
                xf_emit(ctx, 1, 0x1001);
                xf_emit(ctx, 0xb, 0);
        } else {
-               xf_emit(ctx, 0xc, 0);
+               xf_emit(ctx, 1, 0);     /* 00000007 */
+               xf_emit(ctx, 1, 0);     /* 00000001 tesla UNK1534 */
+               xf_emit(ctx, 1, 0);     /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+               xf_emit(ctx, 8, 0);     /* 00000001 BLEND_ENABLE */
+               xf_emit(ctx, 1, 0);     /* ffff0ff3 */
        }
-       xf_emit(ctx, 1, 0x11);
-       xf_emit(ctx, 7, 0);
-       xf_emit(ctx, 1, 0xf);
-       xf_emit(ctx, 7, 0);
-       xf_emit(ctx, 1, 0x11);
-       if (dev_priv->chipset == 0x50)
-               xf_emit(ctx, 4, 0);
-       else
-               xf_emit(ctx, 6, 0);
-       xf_emit(ctx, 3, 1);
-       xf_emit(ctx, 1, 2);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 2);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, magic2);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x0fac6881);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
-               xf_emit(ctx, 1, 0);
-               xf_emit(ctx, 0x18, 1);
-               xf_emit(ctx, 8, 2);
-               xf_emit(ctx, 8, 1);
-               xf_emit(ctx, 8, 2);
-               xf_emit(ctx, 8, 1);
-               xf_emit(ctx, 3, 0);
-               xf_emit(ctx, 1, 1);
-               xf_emit(ctx, 5, 0);
-               xf_emit(ctx, 1, 1);
-               xf_emit(ctx, 0x16, 0);
+       xf_emit(ctx, 1, 0x11);          /* 3f/7f RT_FORMAT */
+       xf_emit(ctx, 7, 0);             /* 3f/7f RT_FORMAT */
+       xf_emit(ctx, 1, 0xf);           /* 0000000f COLOR_MASK */
+       xf_emit(ctx, 7, 0);             /* 0000000f COLOR_MASK */
+       xf_emit(ctx, 1, 0x11);          /* 3f/7f */
+       xf_emit(ctx, 1, 0);             /* 00000001 LOGIC_OP_ENABLE */
+       if (dev_priv->chipset != 0x50) {
+               xf_emit(ctx, 1, 0);     /* 0000000f LOGIC_OP */
+               xf_emit(ctx, 1, 0);     /* 000000ff */
+       }
+       xf_emit(ctx, 1, 0);             /* 00000007 OPERATION */
+       xf_emit(ctx, 1, 0);             /* ff/3ff */
+       xf_emit(ctx, 1, 0);             /* 00000003 UNK0F90 */
+       xf_emit(ctx, 2, 1);             /* 00000007 BLEND_EQUATION_RGB, ALPHA */
+       xf_emit(ctx, 1, 1);             /* 00000001 UNK133C */
+       xf_emit(ctx, 1, 2);             /* 0000001f BLEND_FUNC_SRC_RGB */
+       xf_emit(ctx, 1, 1);             /* 0000001f BLEND_FUNC_DST_RGB */
+       xf_emit(ctx, 1, 2);             /* 0000001f BLEND_FUNC_SRC_ALPHA */
+       xf_emit(ctx, 1, 1);             /* 0000001f BLEND_FUNC_DST_ALPHA */
+       xf_emit(ctx, 1, 0);             /* 00000001 */
+       xf_emit(ctx, 1, magic2);        /* 001fffff tesla UNK0F78 */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A3C */
+       xf_emit(ctx, 1, 0x0fac6881);    /* 0fffffff RT_CONTROL */
+       if (IS_NVA3F(dev_priv->chipset)) {
+               xf_emit(ctx, 1, 0);     /* 00000001 tesla UNK12E4 */
+               xf_emit(ctx, 8, 1);     /* 00000007 IBLEND_EQUATION_RGB */
+               xf_emit(ctx, 8, 1);     /* 00000007 IBLEND_EQUATION_ALPHA */
+               xf_emit(ctx, 8, 1);     /* 00000001 IBLEND_UNK00 */
+               xf_emit(ctx, 8, 2);     /* 0000001f IBLEND_FUNC_SRC_RGB */
+               xf_emit(ctx, 8, 1);     /* 0000001f IBLEND_FUNC_DST_RGB */
+               xf_emit(ctx, 8, 2);     /* 0000001f IBLEND_FUNC_SRC_ALPHA */
+               xf_emit(ctx, 8, 1);     /* 0000001f IBLEND_FUNC_DST_ALPHA */
+               xf_emit(ctx, 1, 0);     /* 00000001 tesla UNK1140 */
+               xf_emit(ctx, 2, 0);     /* 00000001 */
+               xf_emit(ctx, 1, 1);     /* 0000001f tesla UNK169C */
+               xf_emit(ctx, 1, 0);     /* 0000000f */
+               xf_emit(ctx, 1, 0);     /* 00000003 */
+               xf_emit(ctx, 1, 0);     /* ffffffff */
+               xf_emit(ctx, 2, 0);     /* 00000001 */
+               xf_emit(ctx, 1, 1);     /* 0000001f tesla UNK169C */
+               xf_emit(ctx, 1, 0);     /* 00000001 */
+               xf_emit(ctx, 1, 0);     /* 000003ff */
+       } else if (dev_priv->chipset >= 0xa0) {
+               xf_emit(ctx, 2, 0);     /* 00000001 */
+               xf_emit(ctx, 1, 0);     /* 00000007 */
+               xf_emit(ctx, 1, 0);     /* 00000003 */
+               xf_emit(ctx, 1, 0);     /* ffffffff */
+               xf_emit(ctx, 2, 0);     /* 00000001 */
        } else {
-               if (dev_priv->chipset >= 0xa0)
-                       xf_emit(ctx, 0x1b, 0);
-               else
-                       xf_emit(ctx, 0x15, 0);
+               xf_emit(ctx, 1, 0);     /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+               xf_emit(ctx, 1, 0);     /* 00000003 tesla UNK1430 */
+               xf_emit(ctx, 1, 0);     /* ffffffff tesla UNK1A3C */
        }
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 2);
-       xf_emit(ctx, 2, 1);
-       xf_emit(ctx, 1, 2);
-       xf_emit(ctx, 2, 1);
+       xf_emit(ctx, 4, 0);             /* ffffffff CLEAR_COLOR */
+       xf_emit(ctx, 4, 0);             /* ffffffff BLEND_COLOR A R G B */
+       xf_emit(ctx, 1, 0);             /* 00000fff eng2d UNK2B0 */
        if (dev_priv->chipset >= 0xa0)
-               xf_emit(ctx, 4, 0);
-       else
-               xf_emit(ctx, 3, 0);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
-               xf_emit(ctx, 0x10, 1);
-               xf_emit(ctx, 8, 2);
-               xf_emit(ctx, 0x10, 1);
-               xf_emit(ctx, 8, 2);
-               xf_emit(ctx, 8, 1);
-               xf_emit(ctx, 3, 0);
+               xf_emit(ctx, 2, 0);     /* 00000001 */
+       xf_emit(ctx, 1, 0);             /* 000003ff */
+       xf_emit(ctx, 8, 0);             /* 00000001 BLEND_ENABLE */
+       xf_emit(ctx, 1, 1);             /* 00000001 UNK133C */
+       xf_emit(ctx, 1, 2);             /* 0000001f BLEND_FUNC_SRC_RGB */
+       xf_emit(ctx, 1, 1);             /* 0000001f BLEND_FUNC_DST_RGB */
+       xf_emit(ctx, 1, 1);             /* 00000007 BLEND_EQUATION_RGB */
+       xf_emit(ctx, 1, 2);             /* 0000001f BLEND_FUNC_SRC_ALPHA */
+       xf_emit(ctx, 1, 1);             /* 0000001f BLEND_FUNC_DST_ALPHA */
+       xf_emit(ctx, 1, 1);             /* 00000007 BLEND_EQUATION_ALPHA */
+       xf_emit(ctx, 1, 0);             /* 00000001 UNK19C0 */
+       xf_emit(ctx, 1, 0);             /* 00000001 LOGIC_OP_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 0000000f LOGIC_OP */
+       if (dev_priv->chipset >= 0xa0)
+               xf_emit(ctx, 1, 0);     /* 00000001 UNK12E4? NVA3+ only? */
+       if (IS_NVA3F(dev_priv->chipset)) {
+               xf_emit(ctx, 8, 1);     /* 00000001 IBLEND_UNK00 */
+               xf_emit(ctx, 8, 1);     /* 00000007 IBLEND_EQUATION_RGB */
+               xf_emit(ctx, 8, 2);     /* 0000001f IBLEND_FUNC_SRC_RGB */
+               xf_emit(ctx, 8, 1);     /* 0000001f IBLEND_FUNC_DST_RGB */
+               xf_emit(ctx, 8, 1);     /* 00000007 IBLEND_EQUATION_ALPHA */
+               xf_emit(ctx, 8, 2);     /* 0000001f IBLEND_FUNC_SRC_ALPHA */
+               xf_emit(ctx, 8, 1);     /* 0000001f IBLEND_FUNC_DST_ALPHA */
+               xf_emit(ctx, 1, 0);     /* 00000001 tesla UNK15C4 */
+               xf_emit(ctx, 1, 0);     /* 00000001 */
+               xf_emit(ctx, 1, 0);     /* 00000001 tesla UNK1140 */
        }
-       xf_emit(ctx, 1, 0x11);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 0x5b, 0);
+       xf_emit(ctx, 1, 0x11);          /* 3f/7f DST_FORMAT */
+       xf_emit(ctx, 1, 1);             /* 00000001 DST_LINEAR */
+       xf_emit(ctx, 1, 0);             /* 00000007 PATTERN_COLOR_FORMAT */
+       xf_emit(ctx, 2, 0);             /* ffffffff PATTERN_MONO_COLOR */
+       xf_emit(ctx, 1, 0);             /* 00000001 PATTERN_MONO_FORMAT */
+       xf_emit(ctx, 2, 0);             /* ffffffff PATTERN_MONO_BITMAP */
+       xf_emit(ctx, 1, 0);             /* 00000003 PATTERN_SELECT */
+       xf_emit(ctx, 1, 0);             /* 000000ff ROP */
+       xf_emit(ctx, 1, 0);             /* ffffffff BETA1 */
+       xf_emit(ctx, 1, 0);             /* ffffffff BETA4 */
+       xf_emit(ctx, 1, 0);             /* 00000007 OPERATION */
+       xf_emit(ctx, 0x50, 0);          /* 10x ffffff, ffffff, ffffff, ffffff, 3 PATTERN */
 }
 
 static void
-nv50_graph_construct_xfer_tp_x1(struct nouveau_grctx *ctx)
+nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
 {
        struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
        int magic3;
-       if (dev_priv->chipset == 0x50)
+       switch (dev_priv->chipset) {
+       case 0x50:
                magic3 = 0x1000;
-       else if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8)
+               break;
+       case 0x86:
+       case 0x98:
+       case 0xa8:
+       case 0xaa:
+       case 0xac:
+       case 0xaf:
                magic3 = 0x1e00;
-       else
+               break;
+       default:
                magic3 = 0;
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 4);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 0x24, 0);
+       }
+       xf_emit(ctx, 1, 0);             /* 00000001 GP_ENABLE */
+       xf_emit(ctx, 1, 4);             /* 7f/ff[NVA0+] VP_REG_ALLOC_RESULT */
+       xf_emit(ctx, 1, 0);             /* 00000001 GP_ENABLE */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A30 */
+       xf_emit(ctx, 1, 0);             /* 111/113[NVA0+] */
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 0x1f, 0);  /* ffffffff */
        else if (dev_priv->chipset >= 0xa0)
-               xf_emit(ctx, 0x14, 0);
+               xf_emit(ctx, 0x0f, 0);  /* ffffffff */
        else
-               xf_emit(ctx, 0x15, 0);
-       xf_emit(ctx, 2, 4);
+               xf_emit(ctx, 0x10, 0);  /* fffffff VP_RESULT_MAP_1 up */
+       xf_emit(ctx, 2, 0);             /* f/1f[NVA3], fffffff/ffffffff[NVA0+] */
+       xf_emit(ctx, 1, 4);             /* 7f/ff VP_REG_ALLOC_RESULT */
+       xf_emit(ctx, 1, 4);             /* 7f/ff VP_RESULT_MAP_SIZE */
        if (dev_priv->chipset >= 0xa0)
-               xf_emit(ctx, 1, 0x03020100);
+               xf_emit(ctx, 1, 0x03020100);    /* ffffffff */
        else
-               xf_emit(ctx, 1, 0x00608080);
-       xf_emit(ctx, 4, 0);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 2, 4);
-       xf_emit(ctx, 1, 0x80);
+               xf_emit(ctx, 1, 0x00608080);    /* fffffff VP_RESULT_MAP_0 */
+       xf_emit(ctx, 1, 0);             /* 00000001 GP_ENABLE */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A30 */
+       xf_emit(ctx, 2, 0);             /* 111/113, 7f/ff */
+       xf_emit(ctx, 1, 4);             /* 7f/ff VP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A30 */
+       xf_emit(ctx, 1, 0);             /* 00000001 GP_ENABLE */
+       xf_emit(ctx, 1, 4);             /* 000000ff GP_REG_ALLOC_RESULT */
+       xf_emit(ctx, 1, 4);             /* 000000ff GP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 0x80);          /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
        if (magic3)
-               xf_emit(ctx, 1, magic3);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 0x24, 0);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 1, 0x80);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 1, 0x03020100);
-       xf_emit(ctx, 1, 3);
+               xf_emit(ctx, 1, magic3);        /* 00007fff tesla UNK141C */
+       xf_emit(ctx, 1, 4);             /* 7f/ff VP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A30 */
+       xf_emit(ctx, 1, 0);             /* 111/113 */
+       xf_emit(ctx, 0x1f, 0);          /* ffffffff GP_RESULT_MAP_1 up */
+       xf_emit(ctx, 1, 0);             /* 0000001f */
+       xf_emit(ctx, 1, 0);             /* ffffffff */
+       xf_emit(ctx, 1, 0);             /* 00000001 GP_ENABLE */
+       xf_emit(ctx, 1, 4);             /* 000000ff GP_REG_ALLOC_RESULT */
+       xf_emit(ctx, 1, 0x80);          /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
+       xf_emit(ctx, 1, 4);             /* 000000ff GP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 0x03020100);    /* ffffffff GP_RESULT_MAP_0 */
+       xf_emit(ctx, 1, 3);             /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
        if (magic3)
-               xf_emit(ctx, 1, magic3);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 4, 0);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 1, 3);
-       xf_emit(ctx, 3, 0);
-       xf_emit(ctx, 1, 4);
+               xf_emit(ctx, 1, magic3);        /* 7fff tesla UNK141C */
+       xf_emit(ctx, 1, 4);             /* 7f/ff VP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 0);             /* 00000001 PROVOKING_VERTEX_LAST */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A30 */
+       xf_emit(ctx, 1, 0);             /* 111/113 */
+       xf_emit(ctx, 1, 0);             /* 00000001 GP_ENABLE */
+       xf_emit(ctx, 1, 4);             /* 000000ff GP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 3);             /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
+       xf_emit(ctx, 1, 0);             /* 00000001 PROVOKING_VERTEX_LAST */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A30 */
+       xf_emit(ctx, 1, 0);             /* 00000003 tesla UNK13A0 */
+       xf_emit(ctx, 1, 4);             /* 7f/ff VP_REG_ALLOC_RESULT */
+       xf_emit(ctx, 1, 0);             /* 00000001 GP_ENABLE */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A30 */
+       xf_emit(ctx, 1, 0);             /* 111/113 */
        if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96)
-               xf_emit(ctx, 0x1024, 0);
+               xf_emit(ctx, 0x1020, 0);        /* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
        else if (dev_priv->chipset < 0xa0)
-               xf_emit(ctx, 0xa24, 0);
-       else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
-               xf_emit(ctx, 0x214, 0);
+               xf_emit(ctx, 0xa20, 0); /* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
+       else if (!IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 0x210, 0); /* ffffffff */
        else
-               xf_emit(ctx, 0x414, 0);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 1, 3);
-       xf_emit(ctx, 2, 0);
+               xf_emit(ctx, 0x410, 0); /* ffffffff */
+       xf_emit(ctx, 1, 0);             /* 00000001 GP_ENABLE */
+       xf_emit(ctx, 1, 4);             /* 000000ff GP_RESULT_MAP_SIZE */
+       xf_emit(ctx, 1, 3);             /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
+       xf_emit(ctx, 1, 0);             /* 00000001 PROVOKING_VERTEX_LAST */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A30 */
 }
 
 static void
-nv50_graph_construct_xfer_tp_x2(struct nouveau_grctx *ctx)
+nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
 {
        struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
        int magic1, magic2;
        if (dev_priv->chipset == 0x50) {
                magic1 = 0x3ff;
                magic2 = 0x00003e60;
-       } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
+       } else if (!IS_NVA3F(dev_priv->chipset)) {
                magic1 = 0x7ff;
                magic2 = 0x001ffe67;
        } else {
                magic1 = 0x7ff;
                magic2 = 0x00087e67;
        }
-       xf_emit(ctx, 3, 0);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 0xc, 0);
-       xf_emit(ctx, 1, 0xf);
-       xf_emit(ctx, 0xb, 0);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 4, 0xffff);
-       xf_emit(ctx, 8, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 3, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 5, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 2, 0);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
-               xf_emit(ctx, 1, 3);
-               xf_emit(ctx, 1, 0);
-       } else if (dev_priv->chipset >= 0xa0)
-               xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 0xa, 0);
-       xf_emit(ctx, 2, 1);
-       xf_emit(ctx, 1, 2);
-       xf_emit(ctx, 2, 1);
-       xf_emit(ctx, 1, 2);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
-               xf_emit(ctx, 1, 0);
-               xf_emit(ctx, 0x18, 1);
-               xf_emit(ctx, 8, 2);
-               xf_emit(ctx, 8, 1);
-               xf_emit(ctx, 8, 2);
-               xf_emit(ctx, 8, 1);
-               xf_emit(ctx, 1, 0);
+       xf_emit(ctx, 1, 0);             /* 00000007 ALPHA_TEST_FUNC */
+       xf_emit(ctx, 1, 0);             /* ffffffff ALPHA_TEST_REF */
+       xf_emit(ctx, 1, 0);             /* 00000001 ALPHA_TEST_ENABLE */
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 1, 1);     /* 0000000f UNK16A0 */
+       xf_emit(ctx, 1, 0);             /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+       xf_emit(ctx, 1, 0);             /* 00000001 tesla UNK1534 */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_BACK_MASK */
+       xf_emit(ctx, 3, 0);             /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
+       xf_emit(ctx, 4, 0);             /* ffffffff BLEND_COLOR */
+       xf_emit(ctx, 1, 0);             /* 00000001 UNK19C0 */
+       xf_emit(ctx, 1, 0);             /* 00000001 UNK0FDC */
+       xf_emit(ctx, 1, 0xf);           /* 0000000f COLOR_MASK */
+       xf_emit(ctx, 7, 0);             /* 0000000f COLOR_MASK */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_TEST_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_WRITE_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 LOGIC_OP_ENABLE */
+       xf_emit(ctx, 1, 0);             /* ff[NV50]/3ff[NV84+] */
+       xf_emit(ctx, 1, 4);             /* 00000007 FP_CONTROL */
+       xf_emit(ctx, 4, 0xffff);        /* 0000ffff MSAA_MASK */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_FRONT_MASK */
+       xf_emit(ctx, 3, 0);             /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
+       xf_emit(ctx, 1, 0);             /* 00000001 STENCIL_FRONT_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 STENCIL_BACK_ENABLE */
+       xf_emit(ctx, 2, 0);             /* 00007fff WINDOW_OFFSET_XY */
+       xf_emit(ctx, 1, 1);             /* 00000001 tesla UNK19CC */
+       xf_emit(ctx, 1, 0);             /* 7 */
+       xf_emit(ctx, 1, 0);             /* 00000001 SAMPLECNT_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 0000000f ZETA_FORMAT */
+       xf_emit(ctx, 1, 1);             /* 00000001 ZETA_ENABLE */
+       xf_emit(ctx, 1, 0);             /* ffffffff COLOR_KEY */
+       xf_emit(ctx, 1, 0);             /* 00000001 COLOR_KEY_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000007 COLOR_KEY_FORMAT */
+       xf_emit(ctx, 2, 0);             /* ffffffff SIFC_BITMAP_COLOR */
+       xf_emit(ctx, 1, 1);             /* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000007 ALPHA_TEST_FUNC */
+       xf_emit(ctx, 1, 0);             /* 00000001 ALPHA_TEST_ENABLE */
+       if (IS_NVA3F(dev_priv->chipset)) {
+               xf_emit(ctx, 1, 3);     /* 00000003 tesla UNK16B4 */
+               xf_emit(ctx, 1, 0);     /* 00000003 */
+               xf_emit(ctx, 1, 0);     /* 00000003 tesla UNK1298 */
+       } else if (dev_priv->chipset >= 0xa0) {
+               xf_emit(ctx, 1, 1);     /* 00000001 tesla UNK16B4 */
+               xf_emit(ctx, 1, 0);     /* 00000003 */
+       } else {
+               xf_emit(ctx, 1, 0);     /* 00000003 MULTISAMPLE_CTRL */
        }
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x11);
-       xf_emit(ctx, 7, 0);
-       xf_emit(ctx, 1, 0x0fac6881);
-       xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 3, 0);
-       xf_emit(ctx, 1, 0x11);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 3, 0xcf);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 0xa, 0);
-       xf_emit(ctx, 2, 1);
-       xf_emit(ctx, 1, 2);
-       xf_emit(ctx, 2, 1);
-       xf_emit(ctx, 1, 2);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 8, 1);
-       xf_emit(ctx, 1, 0x11);
-       xf_emit(ctx, 7, 0);
-       xf_emit(ctx, 1, 0x0fac6881);
-       xf_emit(ctx, 1, 0xf);
-       xf_emit(ctx, 7, 0);
-       xf_emit(ctx, 1, magic2);
-       xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 0x11);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 2, 1);
-       else
-               xf_emit(ctx, 1, 1);
+       xf_emit(ctx, 1, 0);             /* 00000001 tesla UNK1534 */
+       xf_emit(ctx, 8, 0);             /* 00000001 BLEND_ENABLE */
+       xf_emit(ctx, 1, 1);             /* 0000001f BLEND_FUNC_DST_ALPHA */
+       xf_emit(ctx, 1, 1);             /* 00000007 BLEND_EQUATION_ALPHA */
+       xf_emit(ctx, 1, 2);             /* 0000001f BLEND_FUNC_SRC_ALPHA */
+       xf_emit(ctx, 1, 1);             /* 0000001f BLEND_FUNC_DST_RGB */
+       xf_emit(ctx, 1, 1);             /* 00000007 BLEND_EQUATION_RGB */
+       xf_emit(ctx, 1, 2);             /* 0000001f BLEND_FUNC_SRC_RGB */
+       if (IS_NVA3F(dev_priv->chipset)) {
+               xf_emit(ctx, 1, 0);     /* 00000001 UNK12E4 */
+               xf_emit(ctx, 8, 1);     /* 00000007 IBLEND_EQUATION_RGB */
+               xf_emit(ctx, 8, 1);     /* 00000007 IBLEND_EQUATION_ALPHA */
+               xf_emit(ctx, 8, 1);     /* 00000001 IBLEND_UNK00 */
+               xf_emit(ctx, 8, 2);     /* 0000001f IBLEND_SRC_RGB */
+               xf_emit(ctx, 8, 1);     /* 0000001f IBLEND_DST_RGB */
+               xf_emit(ctx, 8, 2);     /* 0000001f IBLEND_SRC_ALPHA */
+               xf_emit(ctx, 8, 1);     /* 0000001f IBLEND_DST_ALPHA */
+               xf_emit(ctx, 1, 0);     /* 00000001 UNK1140 */
+       }
+       xf_emit(ctx, 1, 1);             /* 00000001 UNK133C */
+       xf_emit(ctx, 1, 0);             /* ffff0ff3 */
+       xf_emit(ctx, 1, 0x11);          /* 3f/7f RT_FORMAT */
+       xf_emit(ctx, 7, 0);             /* 3f/7f RT_FORMAT */
+       xf_emit(ctx, 1, 0x0fac6881);    /* 0fffffff RT_CONTROL */
+       xf_emit(ctx, 1, 0);             /* 00000001 LOGIC_OP_ENABLE */
+       xf_emit(ctx, 1, 0);             /* ff/3ff */
+       xf_emit(ctx, 1, 4);             /* 00000007 FP_CONTROL */
+       xf_emit(ctx, 1, 0);             /* 00000003 UNK0F90 */
+       xf_emit(ctx, 1, 0);             /* 00000001 FRAMEBUFFER_SRGB */
+       xf_emit(ctx, 1, 0);             /* 7 */
+       xf_emit(ctx, 1, 0x11);          /* 3f/7f DST_FORMAT */
+       xf_emit(ctx, 1, 1);             /* 00000001 DST_LINEAR */
+       xf_emit(ctx, 1, 0);             /* 00000007 OPERATION */
+       xf_emit(ctx, 1, 0xcf);          /* 000000ff SIFC_FORMAT */
+       xf_emit(ctx, 1, 0xcf);          /* 000000ff DRAW_COLOR_FORMAT */
+       xf_emit(ctx, 1, 0xcf);          /* 000000ff SRC_FORMAT */
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 1, 1);     /* 0000001f tesla UNK169C */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A3C */
+       xf_emit(ctx, 1, 0);             /* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */
+       xf_emit(ctx, 8, 0);             /* 00000001 BLEND_ENABLE */
+       xf_emit(ctx, 1, 1);             /* 0000001f BLEND_FUNC_DST_ALPHA */
+       xf_emit(ctx, 1, 1);             /* 00000007 BLEND_EQUATION_ALPHA */
+       xf_emit(ctx, 1, 2);             /* 0000001f BLEND_FUNC_SRC_ALPHA */
+       xf_emit(ctx, 1, 1);             /* 0000001f BLEND_FUNC_DST_RGB */
+       xf_emit(ctx, 1, 1);             /* 00000007 BLEND_EQUATION_RGB */
+       xf_emit(ctx, 1, 2);             /* 0000001f BLEND_FUNC_SRC_RGB */
+       xf_emit(ctx, 1, 1);             /* 00000001 UNK133C */
+       xf_emit(ctx, 1, 0);             /* ffff0ff3 */
+       xf_emit(ctx, 8, 1);             /* 00000001 UNK19E0 */
+       xf_emit(ctx, 1, 0x11);          /* 3f/7f RT_FORMAT */
+       xf_emit(ctx, 7, 0);             /* 3f/7f RT_FORMAT */
+       xf_emit(ctx, 1, 0x0fac6881);    /* 0fffffff RT_CONTROL */
+       xf_emit(ctx, 1, 0xf);           /* 0000000f COLOR_MASK */
+       xf_emit(ctx, 7, 0);             /* 0000000f COLOR_MASK */
+       xf_emit(ctx, 1, magic2);        /* 001fffff tesla UNK0F78 */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_BOUNDS_EN */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_TEST_ENABLE */
+       xf_emit(ctx, 1, 0x11);          /* 3f/7f DST_FORMAT */
+       xf_emit(ctx, 1, 1);             /* 00000001 DST_LINEAR */
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 1, 1);     /* 0000001f tesla UNK169C */
        if(dev_priv->chipset == 0x50)
-               xf_emit(ctx, 1, 0);
+               xf_emit(ctx, 1, 0);     /* ff */
        else
-               xf_emit(ctx, 3, 0);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 5, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 4, 0);
-       xf_emit(ctx, 1, 0x11);
-       xf_emit(ctx, 7, 0);
-       xf_emit(ctx, 1, 0x0fac6881);
-       xf_emit(ctx, 3, 0);
-       xf_emit(ctx, 1, 0x11);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, magic1);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 2, 0);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 0x28, 0);
-       xf_emit(ctx, 8, 8);
-       xf_emit(ctx, 1, 0x11);
-       xf_emit(ctx, 7, 0);
-       xf_emit(ctx, 1, 0x0fac6881);
-       xf_emit(ctx, 8, 0x400);
-       xf_emit(ctx, 8, 0x300);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0xf);
-       xf_emit(ctx, 7, 0);
-       xf_emit(ctx, 1, 0x20);
-       xf_emit(ctx, 1, 0x11);
-       xf_emit(ctx, 1, 0x100);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 0x40);
-       xf_emit(ctx, 1, 0x100);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 3);
-       xf_emit(ctx, 4, 0);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, magic2);
-       xf_emit(ctx, 3, 0);
-       xf_emit(ctx, 1, 2);
-       xf_emit(ctx, 1, 0x0fac6881);
-       xf_emit(ctx, 9, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 4, 0);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0x400);
-       xf_emit(ctx, 1, 0x300);
-       xf_emit(ctx, 1, 0x1001);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 4, 0);
-       else
-               xf_emit(ctx, 3, 0);
-       xf_emit(ctx, 1, 0x11);
-       xf_emit(ctx, 7, 0);
-       xf_emit(ctx, 1, 0x0fac6881);
-       xf_emit(ctx, 1, 0xf);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
-               xf_emit(ctx, 0x15, 0);
-               xf_emit(ctx, 1, 1);
-               xf_emit(ctx, 3, 0);
-       } else
-               xf_emit(ctx, 0x17, 0);
+               xf_emit(ctx, 3, 0);     /* 1, 7, 3ff */
+       xf_emit(ctx, 1, 4);             /* 00000007 FP_CONTROL */
+       xf_emit(ctx, 1, 0);             /* 00000003 UNK0F90 */
+       xf_emit(ctx, 1, 0);             /* 00000001 STENCIL_FRONT_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000007 */
+       xf_emit(ctx, 1, 0);             /* 00000001 SAMPLECNT_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 0000000f ZETA_FORMAT */
+       xf_emit(ctx, 1, 1);             /* 00000001 ZETA_ENABLE */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A3C */
+       xf_emit(ctx, 1, 0);             /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+       xf_emit(ctx, 1, 0);             /* 00000001 tesla UNK1534 */
+       xf_emit(ctx, 1, 0);             /* ffff0ff3 */
+       xf_emit(ctx, 1, 0x11);          /* 3f/7f RT_FORMAT */
+       xf_emit(ctx, 7, 0);             /* 3f/7f RT_FORMAT */
+       xf_emit(ctx, 1, 0x0fac6881);    /* 0fffffff RT_CONTROL */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_BOUNDS_EN */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_TEST_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_WRITE_ENABLE */
+       xf_emit(ctx, 1, 0x11);          /* 3f/7f DST_FORMAT */
+       xf_emit(ctx, 1, 1);             /* 00000001 DST_LINEAR */
+       xf_emit(ctx, 1, 0);             /* 000fffff BLIT_DU_DX_FRACT */
+       xf_emit(ctx, 1, 1);             /* 0001ffff BLIT_DU_DX_INT */
+       xf_emit(ctx, 1, 0);             /* 000fffff BLIT_DV_DY_FRACT */
+       xf_emit(ctx, 1, 1);             /* 0001ffff BLIT_DV_DY_INT */
+       xf_emit(ctx, 1, 0);             /* ff/3ff */
+       xf_emit(ctx, 1, magic1);        /* 3ff/7ff tesla UNK0D68 */
+       xf_emit(ctx, 1, 0);             /* 00000001 STENCIL_FRONT_ENABLE */
+       xf_emit(ctx, 1, 1);             /* 00000001 tesla UNK15B4 */
+       xf_emit(ctx, 1, 0);             /* 0000000f ZETA_FORMAT */
+       xf_emit(ctx, 1, 1);             /* 00000001 ZETA_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000007 */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A3C */
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 1, 1);     /* 0000001f tesla UNK169C */
+       xf_emit(ctx, 8, 0);             /* 0000ffff DMA_COLOR */
+       xf_emit(ctx, 1, 0);             /* 0000ffff DMA_GLOBAL */
+       xf_emit(ctx, 1, 0);             /* 0000ffff DMA_LOCAL */
+       xf_emit(ctx, 1, 0);             /* 0000ffff DMA_STACK */
+       xf_emit(ctx, 1, 0);             /* ff/3ff */
+       xf_emit(ctx, 1, 0);             /* 0000ffff DMA_DST */
+       xf_emit(ctx, 1, 0);             /* 7 */
+       xf_emit(ctx, 1, 0);             /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+       xf_emit(ctx, 1, 0);             /* ffff0ff3 */
+       xf_emit(ctx, 8, 0);             /* 000000ff RT_ADDRESS_HIGH */
+       xf_emit(ctx, 8, 0);             /* ffffffff RT_LAYER_STRIDE */
+       xf_emit(ctx, 8, 0);             /* ffffffff RT_ADDRESS_LOW */
+       xf_emit(ctx, 8, 8);             /* 0000007f RT_TILE_MODE */
+       xf_emit(ctx, 1, 0x11);          /* 3f/7f RT_FORMAT */
+       xf_emit(ctx, 7, 0);             /* 3f/7f RT_FORMAT */
+       xf_emit(ctx, 1, 0x0fac6881);    /* 0fffffff RT_CONTROL */
+       xf_emit(ctx, 8, 0x400);         /* 0fffffff RT_HORIZ */
+       xf_emit(ctx, 8, 0x300);         /* 0000ffff RT_VERT */
+       xf_emit(ctx, 1, 1);             /* 00001fff RT_ARRAY_MODE */
+       xf_emit(ctx, 1, 0xf);           /* 0000000f COLOR_MASK */
+       xf_emit(ctx, 7, 0);             /* 0000000f COLOR_MASK */
+       xf_emit(ctx, 1, 0x20);          /* 00000fff DST_TILE_MODE */
+       xf_emit(ctx, 1, 0x11);          /* 3f/7f DST_FORMAT */
+       xf_emit(ctx, 1, 0x100);         /* 0001ffff DST_HEIGHT */
+       xf_emit(ctx, 1, 0);             /* 000007ff DST_LAYER */
+       xf_emit(ctx, 1, 1);             /* 00000001 DST_LINEAR */
+       xf_emit(ctx, 1, 0);             /* ffffffff DST_ADDRESS_LOW */
+       xf_emit(ctx, 1, 0);             /* 000000ff DST_ADDRESS_HIGH */
+       xf_emit(ctx, 1, 0x40);          /* 0007ffff DST_PITCH */
+       xf_emit(ctx, 1, 0x100);         /* 0001ffff DST_WIDTH */
+       xf_emit(ctx, 1, 0);             /* 0000ffff */
+       xf_emit(ctx, 1, 3);             /* 00000003 tesla UNK15AC */
+       xf_emit(ctx, 1, 0);             /* ff/3ff */
+       xf_emit(ctx, 1, 0);             /* 0001ffff GP_BUILTIN_RESULT_EN */
+       xf_emit(ctx, 1, 0);             /* 00000003 UNK0F90 */
+       xf_emit(ctx, 1, 0);             /* 00000007 */
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 1, 1);     /* 0000001f tesla UNK169C */
+       xf_emit(ctx, 1, magic2);        /* 001fffff tesla UNK0F78 */
+       xf_emit(ctx, 1, 0);             /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+       xf_emit(ctx, 1, 0);             /* 00000001 tesla UNK1534 */
+       xf_emit(ctx, 1, 0);             /* ffff0ff3 */
+       xf_emit(ctx, 1, 2);             /* 00000003 tesla UNK143C */
+       xf_emit(ctx, 1, 0x0fac6881);    /* 0fffffff RT_CONTROL */
+       xf_emit(ctx, 1, 0);             /* 0000ffff DMA_ZETA */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_BOUNDS_EN */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_TEST_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_WRITE_ENABLE */
+       xf_emit(ctx, 2, 0);             /* ffff, ff/3ff */
+       xf_emit(ctx, 1, 0);             /* 0001ffff GP_BUILTIN_RESULT_EN */
+       xf_emit(ctx, 1, 0);             /* 00000001 STENCIL_FRONT_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 000000ff STENCIL_FRONT_MASK */
+       xf_emit(ctx, 1, 1);             /* 00000001 tesla UNK15B4 */
+       xf_emit(ctx, 1, 0);             /* 00000007 */
+       xf_emit(ctx, 1, 0);             /* ffffffff ZETA_LAYER_STRIDE */
+       xf_emit(ctx, 1, 0);             /* 000000ff ZETA_ADDRESS_HIGH */
+       xf_emit(ctx, 1, 0);             /* ffffffff ZETA_ADDRESS_LOW */
+       xf_emit(ctx, 1, 4);             /* 00000007 ZETA_TILE_MODE */
+       xf_emit(ctx, 1, 0);             /* 0000000f ZETA_FORMAT */
+       xf_emit(ctx, 1, 1);             /* 00000001 ZETA_ENABLE */
+       xf_emit(ctx, 1, 0x400);         /* 0fffffff ZETA_HORIZ */
+       xf_emit(ctx, 1, 0x300);         /* 0000ffff ZETA_VERT */
+       xf_emit(ctx, 1, 0x1001);        /* 00001fff ZETA_ARRAY_MODE */
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A3C */
+       xf_emit(ctx, 1, 0);             /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 1, 0);     /* 00000001 */
+       xf_emit(ctx, 1, 0);             /* ffff0ff3 */
+       xf_emit(ctx, 1, 0x11);          /* 3f/7f RT_FORMAT */
+       xf_emit(ctx, 7, 0);             /* 3f/7f RT_FORMAT */
+       xf_emit(ctx, 1, 0x0fac6881);    /* 0fffffff RT_CONTROL */
+       xf_emit(ctx, 1, 0xf);           /* 0000000f COLOR_MASK */
+       xf_emit(ctx, 7, 0);             /* 0000000f COLOR_MASK */
+       xf_emit(ctx, 1, 0);             /* ff/3ff */
+       xf_emit(ctx, 8, 0);             /* 00000001 BLEND_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000003 UNK0F90 */
+       xf_emit(ctx, 1, 0);             /* 00000001 FRAMEBUFFER_SRGB */
+       xf_emit(ctx, 1, 0);             /* 7 */
+       xf_emit(ctx, 1, 0);             /* 00000001 LOGIC_OP_ENABLE */
+       if (IS_NVA3F(dev_priv->chipset)) {
+               xf_emit(ctx, 1, 0);     /* 00000001 UNK1140 */
+               xf_emit(ctx, 1, 1);     /* 0000001f tesla UNK169C */
+       }
+       xf_emit(ctx, 1, 0);             /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+       xf_emit(ctx, 1, 0);             /* 00000001 UNK1534 */
+       xf_emit(ctx, 1, 0);             /* ffff0ff3 */
        if (dev_priv->chipset >= 0xa0)
-               xf_emit(ctx, 1, 0x0fac6881);
-       xf_emit(ctx, 1, magic2);
-       xf_emit(ctx, 3, 0);
-       xf_emit(ctx, 1, 0x11);
-       xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 2, 1);
-       xf_emit(ctx, 3, 0);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 2, 1);
-       else
-               xf_emit(ctx, 1, 1);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 2, 0);
-       else if (dev_priv->chipset != 0x50)
-               xf_emit(ctx, 1, 0);
+               xf_emit(ctx, 1, 0x0fac6881);    /* fffffff */
+       xf_emit(ctx, 1, magic2);        /* 001fffff tesla UNK0F78 */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_BOUNDS_EN */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_TEST_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_WRITE_ENABLE */
+       xf_emit(ctx, 1, 0x11);          /* 3f/7f DST_FORMAT */
+       xf_emit(ctx, 1, 0);             /* 00000001 tesla UNK0FB0 */
+       xf_emit(ctx, 1, 0);             /* ff/3ff */
+       xf_emit(ctx, 1, 4);             /* 00000007 FP_CONTROL */
+       xf_emit(ctx, 1, 0);             /* 00000001 STENCIL_FRONT_ENABLE */
+       xf_emit(ctx, 1, 1);             /* 00000001 tesla UNK15B4 */
+       xf_emit(ctx, 1, 1);             /* 00000001 tesla UNK19CC */
+       xf_emit(ctx, 1, 0);             /* 00000007 */
+       xf_emit(ctx, 1, 0);             /* 00000001 SAMPLECNT_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 0000000f ZETA_FORMAT */
+       xf_emit(ctx, 1, 1);             /* 00000001 ZETA_ENABLE */
+       if (IS_NVA3F(dev_priv->chipset)) {
+               xf_emit(ctx, 1, 1);     /* 0000001f tesla UNK169C */
+               xf_emit(ctx, 1, 0);     /* 0000000f tesla UNK15C8 */
+       }
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A3C */
+       if (dev_priv->chipset >= 0xa0) {
+               xf_emit(ctx, 3, 0);             /* 7/f, 1, ffff0ff3 */
+               xf_emit(ctx, 1, 0xfac6881);     /* fffffff */
+               xf_emit(ctx, 4, 0);             /* 1, 1, 1, 3ff */
+               xf_emit(ctx, 1, 4);             /* 7 */
+               xf_emit(ctx, 1, 0);             /* 1 */
+               xf_emit(ctx, 2, 1);             /* 1 */
+               xf_emit(ctx, 2, 0);             /* 7, f */
+               xf_emit(ctx, 1, 1);             /* 1 */
+               xf_emit(ctx, 1, 0);             /* 7/f */
+               if (IS_NVA3F(dev_priv->chipset))
+                       xf_emit(ctx, 0x9, 0);   /* 1 */
+               else
+                       xf_emit(ctx, 0x8, 0);   /* 1 */
+               xf_emit(ctx, 1, 0);             /* ffff0ff3 */
+               xf_emit(ctx, 8, 1);             /* 1 */
+               xf_emit(ctx, 1, 0x11);          /* 7f */
+               xf_emit(ctx, 7, 0);             /* 7f */
+               xf_emit(ctx, 1, 0xfac6881);     /* fffffff */
+               xf_emit(ctx, 1, 0xf);           /* f */
+               xf_emit(ctx, 7, 0);             /* f */
+               xf_emit(ctx, 1, 0x11);          /* 7f */
+               xf_emit(ctx, 1, 1);             /* 1 */
+               xf_emit(ctx, 5, 0);             /* 1, 7, 3ff, 3, 7 */
+               if (IS_NVA3F(dev_priv->chipset)) {
+                       xf_emit(ctx, 1, 0);     /* 00000001 UNK1140 */
+                       xf_emit(ctx, 1, 1);     /* 0000001f tesla UNK169C */
+               }
+       }
 }
 
 static void
-nv50_graph_construct_xfer_tp_x3(struct nouveau_grctx *ctx)
+nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
 {
        struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
-       xf_emit(ctx, 3, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 1);
+       xf_emit(ctx, 2, 0);             /* 1 LINKED_TSC. yes, 2. */
+       if (dev_priv->chipset != 0x50)
+               xf_emit(ctx, 1, 0);     /* 3 */
+       xf_emit(ctx, 1, 1);             /* 1ffff BLIT_DU_DX_INT */
+       xf_emit(ctx, 1, 0);             /* fffff BLIT_DU_DX_FRACT */
+       xf_emit(ctx, 1, 1);             /* 1ffff BLIT_DV_DY_INT */
+       xf_emit(ctx, 1, 0);             /* fffff BLIT_DV_DY_FRACT */
        if (dev_priv->chipset == 0x50)
-               xf_emit(ctx, 2, 0);
+               xf_emit(ctx, 1, 0);     /* 3 BLIT_CONTROL */
        else
-               xf_emit(ctx, 3, 0);
-       xf_emit(ctx, 1, 0x2a712488);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x4085c000);
-       xf_emit(ctx, 1, 0x40);
-       xf_emit(ctx, 1, 0x100);
-       xf_emit(ctx, 1, 0x10100);
-       xf_emit(ctx, 1, 0x02800000);
+               xf_emit(ctx, 2, 0);     /* 3ff, 1 */
+       xf_emit(ctx, 1, 0x2a712488);    /* ffffffff SRC_TIC_0 */
+       xf_emit(ctx, 1, 0);             /* ffffffff SRC_TIC_1 */
+       xf_emit(ctx, 1, 0x4085c000);    /* ffffffff SRC_TIC_2 */
+       xf_emit(ctx, 1, 0x40);          /* ffffffff SRC_TIC_3 */
+       xf_emit(ctx, 1, 0x100);         /* ffffffff SRC_TIC_4 */
+       xf_emit(ctx, 1, 0x10100);       /* ffffffff SRC_TIC_5 */
+       xf_emit(ctx, 1, 0x02800000);    /* ffffffff SRC_TIC_6 */
+       xf_emit(ctx, 1, 0);             /* ffffffff SRC_TIC_7 */
+       if (dev_priv->chipset == 0x50) {
+               xf_emit(ctx, 1, 0);     /* 00000001 turing UNK358 */
+               xf_emit(ctx, 1, 0);     /* ffffffff tesla UNK1A34? */
+               xf_emit(ctx, 1, 0);     /* 00000003 turing UNK37C tesla UNK1690 */
+               xf_emit(ctx, 1, 0);     /* 00000003 BLIT_CONTROL */
+               xf_emit(ctx, 1, 0);     /* 00000001 turing UNK32C tesla UNK0F94 */
+       } else if (!IS_NVAAF(dev_priv->chipset)) {
+               xf_emit(ctx, 1, 0);     /* ffffffff tesla UNK1A34? */
+               xf_emit(ctx, 1, 0);     /* 00000003 */
+               xf_emit(ctx, 1, 0);     /* 000003ff */
+               xf_emit(ctx, 1, 0);     /* 00000003 */
+               xf_emit(ctx, 1, 0);     /* 000003ff */
+               xf_emit(ctx, 1, 0);     /* 00000003 tesla UNK1664 / turing UNK03E8 */
+               xf_emit(ctx, 1, 0);     /* 00000003 */
+               xf_emit(ctx, 1, 0);     /* 000003ff */
+       } else {
+               xf_emit(ctx, 0x6, 0);
+       }
+       xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A34 */
+       xf_emit(ctx, 1, 0);             /* 0000ffff DMA_TEXTURE */
+       xf_emit(ctx, 1, 0);             /* 0000ffff DMA_SRC */
 }
 
 static void
-nv50_graph_construct_xfer_tp_x4(struct nouveau_grctx *ctx)
+nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
 {
        struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
-       xf_emit(ctx, 2, 0x04e3bfdf);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x00ffff00);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 2, 1);
-       else
-               xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 0x00ffff00);
-       xf_emit(ctx, 8, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0x30201000);
-       xf_emit(ctx, 1, 0x70605040);
-       xf_emit(ctx, 1, 0xb8a89888);
-       xf_emit(ctx, 1, 0xf8e8d8c8);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x1a);
-}
-
-static void
-nv50_graph_construct_xfer_tp_x5(struct nouveau_grctx *ctx)
-{
-       struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
-       xf_emit(ctx, 3, 0);
-       xf_emit(ctx, 1, 0xfac6881);
-       xf_emit(ctx, 4, 0);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 2, 1);
-       xf_emit(ctx, 2, 0);
-       xf_emit(ctx, 1, 1);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 0xb, 0);
-       else
-               xf_emit(ctx, 0xa, 0);
-       xf_emit(ctx, 8, 1);
-       xf_emit(ctx, 1, 0x11);
-       xf_emit(ctx, 7, 0);
-       xf_emit(ctx, 1, 0xfac6881);
-       xf_emit(ctx, 1, 0xf);
-       xf_emit(ctx, 7, 0);
-       xf_emit(ctx, 1, 0x11);
-       xf_emit(ctx, 1, 1);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
-               xf_emit(ctx, 6, 0);
-               xf_emit(ctx, 1, 1);
-               xf_emit(ctx, 6, 0);
-       } else {
-               xf_emit(ctx, 0xb, 0);
-       }
+       xf_emit(ctx, 1, 0);             /* 00000001 UNK1534 */
+       xf_emit(ctx, 1, 0);             /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+       xf_emit(ctx, 2, 0);             /* 7, ffff0ff3 */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_TEST_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_WRITE */
+       xf_emit(ctx, 1, 0x04e3bfdf);    /* ffffffff UNK0D64 */
+       xf_emit(ctx, 1, 0x04e3bfdf);    /* ffffffff UNK0DF4 */
+       xf_emit(ctx, 1, 1);             /* 00000001 UNK15B4 */
+       xf_emit(ctx, 1, 0);             /* 00000001 LINE_STIPPLE_ENABLE */
+       xf_emit(ctx, 1, 0x00ffff00);    /* 00ffffff LINE_STIPPLE_PATTERN */
+       xf_emit(ctx, 1, 1);             /* 00000001 tesla UNK0F98 */
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 1, 1);     /* 0000001f tesla UNK169C */
+       xf_emit(ctx, 1, 0);             /* 00000003 tesla UNK1668 */
+       xf_emit(ctx, 1, 0);             /* 00000001 LINE_STIPPLE_ENABLE */
+       xf_emit(ctx, 1, 0x00ffff00);    /* 00ffffff LINE_STIPPLE_PATTERN */
+       xf_emit(ctx, 1, 0);             /* 00000001 POLYGON_SMOOTH_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 UNK1534 */
+       xf_emit(ctx, 1, 0);             /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+       xf_emit(ctx, 1, 0);             /* 00000001 tesla UNK1658 */
+       xf_emit(ctx, 1, 0);             /* 00000001 LINE_SMOOTH_ENABLE */
+       xf_emit(ctx, 1, 0);             /* ffff0ff3 */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_TEST_ENABLE */
+       xf_emit(ctx, 1, 0);             /* 00000001 DEPTH_WRITE */
+       xf_emit(ctx, 1, 1);             /* 00000001 UNK15B4 */
+       xf_emit(ctx, 1, 0);             /* 00000001 POINT_SPRITE_ENABLE */
+       xf_emit(ctx, 1, 1);             /* 00000001 tesla UNK165C */
+       xf_emit(ctx, 1, 0x30201000);    /* ffffffff tesla UNK1670 */
+       xf_emit(ctx, 1, 0x70605040);    /* ffffffff tesla UNK1670 */
+       xf_emit(ctx, 1, 0xb8a89888);    /* ffffffff tesla UNK1670 */
+       xf_emit(ctx, 1, 0xf8e8d8c8);    /* ffffffff tesla UNK1670 */
+       xf_emit(ctx, 1, 0);             /* 00000001 VERTEX_TWO_SIDE_ENABLE */
+       xf_emit(ctx, 1, 0x1a);          /* 0000001f POLYGON_MODE */
 }
 
 static void
@@ -2193,108 +3102,136 @@ nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
 {
        struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
        if (dev_priv->chipset < 0xa0) {
-               nv50_graph_construct_xfer_tp_x1(ctx);
-               nv50_graph_construct_xfer_tp_x2(ctx);
-               nv50_graph_construct_xfer_tp_x3(ctx);
-               if (dev_priv->chipset == 0x50)
-                       xf_emit(ctx, 0xf, 0);
-               else
-                       xf_emit(ctx, 0x12, 0);
-               nv50_graph_construct_xfer_tp_x4(ctx);
+               nv50_graph_construct_xfer_unk84xx(ctx);
+               nv50_graph_construct_xfer_tprop(ctx);
+               nv50_graph_construct_xfer_tex(ctx);
+               nv50_graph_construct_xfer_unk8cxx(ctx);
        } else {
-               nv50_graph_construct_xfer_tp_x3(ctx);
-               if (dev_priv->chipset < 0xaa)
-                       xf_emit(ctx, 0xc, 0);
-               else
-                       xf_emit(ctx, 0xa, 0);
-               nv50_graph_construct_xfer_tp_x2(ctx);
-               nv50_graph_construct_xfer_tp_x5(ctx);
-               nv50_graph_construct_xfer_tp_x4(ctx);
-               nv50_graph_construct_xfer_tp_x1(ctx);
+               nv50_graph_construct_xfer_tex(ctx);
+               nv50_graph_construct_xfer_tprop(ctx);
+               nv50_graph_construct_xfer_unk8cxx(ctx);
+               nv50_graph_construct_xfer_unk84xx(ctx);
        }
 }
 
 static void
-nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx)
+nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
 {
        struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
-       int i, mpcnt;
-       if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
-               mpcnt = 1;
-       else if (dev_priv->chipset < 0xa0 || dev_priv->chipset >= 0xa8)
-               mpcnt = 2;
-       else
-               mpcnt = 3;
+       int i, mpcnt = 2;
+       switch (dev_priv->chipset) {
+               case 0x98:
+               case 0xaa:
+                       mpcnt = 1;
+                       break;
+               case 0x50:
+               case 0x84:
+               case 0x86:
+               case 0x92:
+               case 0x94:
+               case 0x96:
+               case 0xa8:
+               case 0xac:
+                       mpcnt = 2;
+                       break;
+               case 0xa0:
+               case 0xa3:
+               case 0xa5:
+               case 0xaf:
+                       mpcnt = 3;
+                       break;
+       }
        for (i = 0; i < mpcnt; i++) {
-               xf_emit(ctx, 1, 0);
-               xf_emit(ctx, 1, 0x80);
-               xf_emit(ctx, 1, 0x80007004);
-               xf_emit(ctx, 1, 0x04000400);
+               xf_emit(ctx, 1, 0);             /* ff */
+               xf_emit(ctx, 1, 0x80);          /* ffffffff tesla UNK1404 */
+               xf_emit(ctx, 1, 0x80007004);    /* ffffffff tesla UNK12B0 */
+               xf_emit(ctx, 1, 0x04000400);    /* ffffffff */
                if (dev_priv->chipset >= 0xa0)
-                       xf_emit(ctx, 1, 0xc0);
-               xf_emit(ctx, 1, 0x1000);
-               xf_emit(ctx, 2, 0);
-               if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8) {
-                       xf_emit(ctx, 1, 0xe00);
-                       xf_emit(ctx, 1, 0x1e00);
+                       xf_emit(ctx, 1, 0xc0);  /* 00007fff tesla UNK152C */
+               xf_emit(ctx, 1, 0x1000);        /* 0000ffff tesla UNK0D60 */
+               xf_emit(ctx, 1, 0);             /* ff/3ff */
+               xf_emit(ctx, 1, 0);             /* ffffffff tesla UNK1A30 */
+               if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset == 0xa8 || IS_NVAAF(dev_priv->chipset)) {
+                       xf_emit(ctx, 1, 0xe00);         /* 7fff */
+                       xf_emit(ctx, 1, 0x1e00);        /* 7fff */
                }
-               xf_emit(ctx, 1, 1);
-               xf_emit(ctx, 2, 0);
+               xf_emit(ctx, 1, 1);             /* 000000ff VP_REG_ALLOC_TEMP */
+               xf_emit(ctx, 1, 0);             /* 00000001 LINKED_TSC */
+               xf_emit(ctx, 1, 0);             /* 00000001 GP_ENABLE */
                if (dev_priv->chipset == 0x50)
-                       xf_emit(ctx, 2, 0x1000);
-               xf_emit(ctx, 1, 1);
-               xf_emit(ctx, 1, 0);
-               xf_emit(ctx, 1, 4);
-               xf_emit(ctx, 1, 2);
-               if (dev_priv->chipset >= 0xaa)
-                       xf_emit(ctx, 0xb, 0);
+                       xf_emit(ctx, 2, 0x1000);        /* 7fff tesla UNK141C */
+               xf_emit(ctx, 1, 1);             /* 000000ff GP_REG_ALLOC_TEMP */
+               xf_emit(ctx, 1, 0);             /* 00000001 GP_ENABLE */
+               xf_emit(ctx, 1, 4);             /* 000000ff FP_REG_ALLOC_TEMP */
+               xf_emit(ctx, 1, 2);             /* 00000003 REG_MODE */
+               if (IS_NVAAF(dev_priv->chipset))
+                       xf_emit(ctx, 0xb, 0);   /* RO */
                else if (dev_priv->chipset >= 0xa0)
-                       xf_emit(ctx, 0xc, 0);
+                       xf_emit(ctx, 0xc, 0);   /* RO */
                else
-                       xf_emit(ctx, 0xa, 0);
+                       xf_emit(ctx, 0xa, 0);   /* RO */
        }
-       xf_emit(ctx, 1, 0x08100c12);
-       xf_emit(ctx, 1, 0);
+       xf_emit(ctx, 1, 0x08100c12);            /* 1fffffff FP_INTERPOLANT_CTRL */
+       xf_emit(ctx, 1, 0);                     /* ff/3ff */
        if (dev_priv->chipset >= 0xa0) {
-               xf_emit(ctx, 1, 0x1fe21);
+               xf_emit(ctx, 1, 0x1fe21);       /* 0003ffff tesla UNK0FAC */
        }
-       xf_emit(ctx, 5, 0);
-       xf_emit(ctx, 4, 0xffff);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 2, 0x10001);
-       xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 0x1fe21);
-       xf_emit(ctx, 1, 0);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 1, 1);
-       xf_emit(ctx, 4, 0);
-       xf_emit(ctx, 1, 0x08100c12);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 2);
-       xf_emit(ctx, 1, 0x11);
-       xf_emit(ctx, 8, 0);
-       xf_emit(ctx, 1, 0xfac6881);
-       xf_emit(ctx, 1, 0);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
-               xf_emit(ctx, 1, 3);
-       xf_emit(ctx, 3, 0);
-       xf_emit(ctx, 1, 4);
-       xf_emit(ctx, 9, 0);
-       xf_emit(ctx, 1, 2);
-       xf_emit(ctx, 2, 1);
-       xf_emit(ctx, 1, 2);
-       xf_emit(ctx, 3, 1);
-       xf_emit(ctx, 1, 0);
-       if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
-               xf_emit(ctx, 8, 2);
-               xf_emit(ctx, 0x10, 1);
-               xf_emit(ctx, 8, 2);
-               xf_emit(ctx, 0x18, 1);
-               xf_emit(ctx, 3, 0);
+       xf_emit(ctx, 3, 0);                     /* 7fff, 0, 0 */
+       xf_emit(ctx, 1, 0);                     /* 00000001 tesla UNK1534 */
+       xf_emit(ctx, 1, 0);                     /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+       xf_emit(ctx, 4, 0xffff);                /* 0000ffff MSAA_MASK */
+       xf_emit(ctx, 1, 1);                     /* 00000001 LANES32 */
+       xf_emit(ctx, 1, 0x10001);               /* 00ffffff BLOCK_ALLOC */
+       xf_emit(ctx, 1, 0x10001);               /* ffffffff BLOCKDIM_XY */
+       xf_emit(ctx, 1, 1);                     /* 0000ffff BLOCKDIM_Z */
+       xf_emit(ctx, 1, 0);                     /* ffffffff SHARED_SIZE */
+       xf_emit(ctx, 1, 0x1fe21);               /* 1ffff/3ffff[NVA0+] tesla UNk0FAC */
+       xf_emit(ctx, 1, 0);                     /* ffffffff tesla UNK1A34 */
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 1, 1);             /* 0000001f tesla UNK169C */
+       xf_emit(ctx, 1, 0);                     /* ff/3ff */
+       xf_emit(ctx, 1, 0);                     /* 1 LINKED_TSC */
+       xf_emit(ctx, 1, 0);                     /* ff FP_ADDRESS_HIGH */
+       xf_emit(ctx, 1, 0);                     /* ffffffff FP_ADDRESS_LOW */
+       xf_emit(ctx, 1, 0x08100c12);            /* 1fffffff FP_INTERPOLANT_CTRL */
+       xf_emit(ctx, 1, 4);                     /* 00000007 FP_CONTROL */
+       xf_emit(ctx, 1, 0);                     /* 000000ff FRAG_COLOR_CLAMP_EN */
+       xf_emit(ctx, 1, 2);                     /* 00000003 REG_MODE */
+       xf_emit(ctx, 1, 0x11);                  /* 0000007f RT_FORMAT */
+       xf_emit(ctx, 7, 0);                     /* 0000007f RT_FORMAT */
+       xf_emit(ctx, 1, 0);                     /* 00000007 */
+       xf_emit(ctx, 1, 0xfac6881);             /* 0fffffff RT_CONTROL */
+       xf_emit(ctx, 1, 0);                     /* 00000003 MULTISAMPLE_CTRL */
+       if (IS_NVA3F(dev_priv->chipset))
+               xf_emit(ctx, 1, 3);             /* 00000003 tesla UNK16B4 */
+       xf_emit(ctx, 1, 0);                     /* 00000001 ALPHA_TEST_ENABLE */
+       xf_emit(ctx, 1, 0);                     /* 00000007 ALPHA_TEST_FUNC */
+       xf_emit(ctx, 1, 0);                     /* 00000001 FRAMEBUFFER_SRGB */
+       xf_emit(ctx, 1, 4);                     /* ffffffff tesla UNK1400 */
+       xf_emit(ctx, 8, 0);                     /* 00000001 BLEND_ENABLE */
+       xf_emit(ctx, 1, 0);                     /* 00000001 LOGIC_OP_ENABLE */
+       xf_emit(ctx, 1, 2);                     /* 0000001f BLEND_FUNC_SRC_RGB */
+       xf_emit(ctx, 1, 1);                     /* 0000001f BLEND_FUNC_DST_RGB */
+       xf_emit(ctx, 1, 1);                     /* 00000007 BLEND_EQUATION_RGB */
+       xf_emit(ctx, 1, 2);                     /* 0000001f BLEND_FUNC_SRC_ALPHA */
+       xf_emit(ctx, 1, 1);                     /* 0000001f BLEND_FUNC_DST_ALPHA */
+       xf_emit(ctx, 1, 1);                     /* 00000007 BLEND_EQUATION_ALPHA */
+       xf_emit(ctx, 1, 1);                     /* 00000001 UNK133C */
+       if (IS_NVA3F(dev_priv->chipset)) {
+               xf_emit(ctx, 1, 0);             /* 00000001 UNK12E4 */
+               xf_emit(ctx, 8, 2);             /* 0000001f IBLEND_FUNC_SRC_RGB */
+               xf_emit(ctx, 8, 1);             /* 0000001f IBLEND_FUNC_DST_RGB */
+               xf_emit(ctx, 8, 1);             /* 00000007 IBLEND_EQUATION_RGB */
+               xf_emit(ctx, 8, 2);             /* 0000001f IBLEND_FUNC_SRC_ALPHA */
+               xf_emit(ctx, 8, 1);             /* 0000001f IBLEND_FUNC_DST_ALPHA */
+               xf_emit(ctx, 8, 1);             /* 00000007 IBLEND_EQUATION_ALPHA */
+               xf_emit(ctx, 8, 1);             /* 00000001 IBLEND_UNK00 */
+               xf_emit(ctx, 1, 0);             /* 00000003 tesla UNK1928 */
+               xf_emit(ctx, 1, 0);             /* 00000001 UNK1140 */
        }
-       xf_emit(ctx, 1, 4);
+       xf_emit(ctx, 1, 0);                     /* 00000003 tesla UNK0F90 */
+       xf_emit(ctx, 1, 4);                     /* 000000ff FP_RESULT_COUNT */
+       /* XXX: demagic this part some day */
        if (dev_priv->chipset == 0x50)
                xf_emit(ctx, 0x3a0, 0);
        else if (dev_priv->chipset < 0x94)
@@ -2303,9 +3240,9 @@ nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx)
                xf_emit(ctx, 0x39f, 0);
        else
                xf_emit(ctx, 0x3a3, 0);
-       xf_emit(ctx, 1, 0x11);
-       xf_emit(ctx, 1, 0);
-       xf_emit(ctx, 1, 1);
+       xf_emit(ctx, 1, 0x11);                  /* 3f/7f DST_FORMAT */
+       xf_emit(ctx, 1, 0);                     /* 7 OPERATION */
+       xf_emit(ctx, 1, 1);                     /* 1 DST_LINEAR */
        xf_emit(ctx, 0x2d, 0);
 }
 
@@ -2323,52 +3260,56 @@ nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
        if (dev_priv->chipset < 0xa0) {
                for (i = 0; i < 8; i++) {
                        ctx->ctxvals_pos = offset + i;
+                       /* that little bugger belongs to csched. No idea
+                        * what it's doing here. */
                        if (i == 0)
-                               xf_emit(ctx, 1, 0x08100c12);
+                               xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */
                        if (units & (1 << i))
-                               nv50_graph_construct_xfer_tp2(ctx);
+                               nv50_graph_construct_xfer_mpc(ctx);
                        if ((ctx->ctxvals_pos-offset)/8 > size)
                                size = (ctx->ctxvals_pos-offset)/8;
                }
        } else {
                /* Strand 0: TPs 0, 1 */
                ctx->ctxvals_pos = offset;
-               xf_emit(ctx, 1, 0x08100c12);
+               /* that little bugger belongs to csched. No idea
+                * what it's doing here. */
+               xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */
                if (units & (1 << 0))
-                       nv50_graph_construct_xfer_tp2(ctx);
+                       nv50_graph_construct_xfer_mpc(ctx);
                if (units & (1 << 1))
-                       nv50_graph_construct_xfer_tp2(ctx);
+                       nv50_graph_construct_xfer_mpc(ctx);
                if ((ctx->ctxvals_pos-offset)/8 > size)
                        size = (ctx->ctxvals_pos-offset)/8;
 
-               /* Strand 0: TPs 2, 3 */
+               /* Strand 1: TPs 2, 3 */
                ctx->ctxvals_pos = offset + 1;
                if (units & (1 << 2))
-                       nv50_graph_construct_xfer_tp2(ctx);
+                       nv50_graph_construct_xfer_mpc(ctx);
                if (units & (1 << 3))
-                       nv50_graph_construct_xfer_tp2(ctx);
+                       nv50_graph_construct_xfer_mpc(ctx);
                if ((ctx->ctxvals_pos-offset)/8 > size)
                        size = (ctx->ctxvals_pos-offset)/8;
 
-               /* Strand 0: TPs 4, 5, 6 */
+               /* Strand 2: TPs 4, 5, 6 */
                ctx->ctxvals_pos = offset + 2;
                if (units & (1 << 4))
-                       nv50_graph_construct_xfer_tp2(ctx);
+                       nv50_graph_construct_xfer_mpc(ctx);
                if (units & (1 << 5))
-                       nv50_graph_construct_xfer_tp2(ctx);
+                       nv50_graph_construct_xfer_mpc(ctx);
                if (units & (1 << 6))
-                       nv50_graph_construct_xfer_tp2(ctx);
+                       nv50_graph_construct_xfer_mpc(ctx);
                if ((ctx->ctxvals_pos-offset)/8 > size)
                        size = (ctx->ctxvals_pos-offset)/8;
 
-               /* Strand 0: TPs 7, 8, 9 */
+               /* Strand 3: TPs 7, 8, 9 */
                ctx->ctxvals_pos = offset + 3;
                if (units & (1 << 7))
-                       nv50_graph_construct_xfer_tp2(ctx);
+                       nv50_graph_construct_xfer_mpc(ctx);
                if (units & (1 << 8))
-                       nv50_graph_construct_xfer_tp2(ctx);
+                       nv50_graph_construct_xfer_mpc(ctx);
                if (units & (1 << 9))
-                       nv50_graph_construct_xfer_tp2(ctx);
+                       nv50_graph_construct_xfer_mpc(ctx);
                if ((ctx->ctxvals_pos-offset)/8 > size)
                        size = (ctx->ctxvals_pos-offset)/8;
        }
index 91ef93cf1f352f89553b7dab4ab9fb4f219326ad..a53fc974332b38ec659513e91a8421017e43c411 100644 (file)
 struct nv50_instmem_priv {
        uint32_t save1700[5]; /* 0x1700->0x1710 */
 
-       struct nouveau_gpuobj_ref *pramin_pt;
-       struct nouveau_gpuobj_ref *pramin_bar;
-       struct nouveau_gpuobj_ref *fb_bar;
+       struct nouveau_gpuobj *pramin_pt;
+       struct nouveau_gpuobj *pramin_bar;
+       struct nouveau_gpuobj *fb_bar;
 };
 
-#define NV50_INSTMEM_PAGE_SHIFT 12
-#define NV50_INSTMEM_PAGE_SIZE  (1 << NV50_INSTMEM_PAGE_SHIFT)
-#define NV50_INSTMEM_PT_SIZE(a)        (((a) >> 12) << 3)
+static void
+nv50_channel_del(struct nouveau_channel **pchan)
+{
+       struct nouveau_channel *chan;
 
-/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN
- */
-#define BAR0_WI32(g, o, v) do {                                   \
-       uint32_t offset;                                          \
-       if ((g)->im_backing) {                                    \
-               offset = (g)->im_backing_start;                   \
-       } else {                                                  \
-               offset  = chan->ramin->gpuobj->im_backing_start;  \
-               offset += (g)->im_pramin->start;                  \
-       }                                                         \
-       offset += (o);                                            \
-       nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v));              \
-} while (0)
+       chan = *pchan;
+       *pchan = NULL;
+       if (!chan)
+               return;
+
+       nouveau_gpuobj_ref(NULL, &chan->ramfc);
+       nouveau_gpuobj_ref(NULL, &chan->vm_pd);
+       if (chan->ramin_heap.free_stack.next)
+               drm_mm_takedown(&chan->ramin_heap);
+       nouveau_gpuobj_ref(NULL, &chan->ramin);
+       kfree(chan);
+}
+
+static int
+nv50_channel_new(struct drm_device *dev, u32 size,
+                struct nouveau_channel **pchan)
+{
+       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
+       u32  fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
+       struct nouveau_channel *chan;
+       int ret;
+
+       chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+       if (!chan)
+               return -ENOMEM;
+       chan->dev = dev;
+
+       ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
+       if (ret) {
+               nv50_channel_del(&chan);
+               return ret;
+       }
+
+       ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size);
+       if (ret) {
+               nv50_channel_del(&chan);
+               return ret;
+       }
+
+       ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
+                                     chan->ramin->pinst + pgd,
+                                     chan->ramin->vinst + pgd,
+                                     0x4000, NVOBJ_FLAG_ZERO_ALLOC,
+                                     &chan->vm_pd);
+       if (ret) {
+               nv50_channel_del(&chan);
+               return ret;
+       }
+
+       ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
+                                     chan->ramin->pinst + fc,
+                                     chan->ramin->vinst + fc, 0x100,
+                                     NVOBJ_FLAG_ZERO_ALLOC, &chan->ramfc);
+       if (ret) {
+               nv50_channel_del(&chan);
+               return ret;
+       }
+
+       *pchan = chan;
+       return 0;
+}
 
 int
 nv50_instmem_init(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_channel *chan;
-       uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size;
-       uint32_t save_nv001700;
-       uint64_t v;
        struct nv50_instmem_priv *priv;
+       struct nouveau_channel *chan;
        int ret, i;
+       u32 tmp;
 
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv)
@@ -75,212 +123,115 @@ nv50_instmem_init(struct drm_device *dev)
        for (i = 0x1700; i <= 0x1710; i += 4)
                priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
 
-       /* Reserve the last MiB of VRAM, we should probably try to avoid
-        * setting up the below tables over the top of the VBIOS image at
-        * some point.
-        */
-       dev_priv->ramin_rsvd_vram = 1 << 20;
-       c_offset = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
-       c_size   = 128 << 10;
-       c_vmpd   = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200;
-       c_ramfc  = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20;
-       c_base   = c_vmpd + 0x4000;
-       pt_size  = NV50_INSTMEM_PT_SIZE(dev_priv->ramin_size);
-
-       NV_DEBUG(dev, " Rsvd VRAM base: 0x%08x\n", c_offset);
-       NV_DEBUG(dev, "    VBIOS image: 0x%08x\n",
-                               (nv_rd32(dev, 0x619f04) & ~0xff) << 8);
-       NV_DEBUG(dev, "  Aperture size: %d MiB\n", dev_priv->ramin_size >> 20);
-       NV_DEBUG(dev, "        PT size: %d KiB\n", pt_size >> 10);
-
-       /* Determine VM layout, we need to do this first to make sure
-        * we allocate enough memory for all the page tables.
-        */
-       dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
-       dev_priv->vm_gart_size = NV50_VM_BLOCK;
-
-       dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
-       dev_priv->vm_vram_size = dev_priv->vram_size;
-       if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
-               dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
-       dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
-       dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
-
-       dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
-
-       NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
-                dev_priv->vm_gart_base,
-                dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
-       NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
-                dev_priv->vm_vram_base,
-                dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
-
-       c_size += dev_priv->vm_vram_pt_nr * (NV50_VM_BLOCK / 65536 * 8);
-
-       /* Map BAR0 PRAMIN aperture over the memory we want to use */
-       save_nv001700 = nv_rd32(dev, NV50_PUNK_BAR0_PRAMIN);
-       nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16));
-
-       /* Create a fake channel, and use it as our "dummy" channels 0/127.
-        * The main reason for creating a channel is so we can use the gpuobj
-        * code.  However, it's probably worth noting that NVIDIA also setup
-        * their channels 0/127 with the same values they configure here.
-        * So, there may be some other reason for doing this.
-        *
-        * Have to create the entire channel manually, as the real channel
-        * creation code assumes we have PRAMIN access, and we don't until
-        * we're done here.
-        */
-       chan = kzalloc(sizeof(*chan), GFP_KERNEL);
-       if (!chan)
+       /* Global PRAMIN heap */
+       ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
+       if (ret) {
+               NV_ERROR(dev, "Failed to init RAMIN heap\n");
                return -ENOMEM;
-       chan->id = 0;
-       chan->dev = dev;
-       chan->file_priv = (struct drm_file *)-2;
-       dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
-
-       INIT_LIST_HEAD(&chan->ramht_refs);
+       }
 
-       /* Channel's PRAMIN object + heap */
-       ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
-                                                       NULL, &chan->ramin);
+       /* we need a channel to plug into the hw to control the BARs */
+       ret = nv50_channel_new(dev, 128*1024, &dev_priv->fifos[0]);
        if (ret)
                return ret;
+       chan = dev_priv->fifos[127] = dev_priv->fifos[0];
 
-       if (drm_mm_init(&chan->ramin_heap, c_base, c_size - c_base))
-               return -ENOMEM;
-
-       /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */
-       ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc,
-                                               0x4000, 0, NULL, &chan->ramfc);
+       /* allocate page table for PRAMIN BAR */
+       ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8,
+                                0x1000, NVOBJ_FLAG_ZERO_ALLOC,
+                                &priv->pramin_pt);
        if (ret)
                return ret;
 
-       for (i = 0; i < c_vmpd; i += 4)
-               BAR0_WI32(chan->ramin->gpuobj, i, 0);
+       nv_wo32(chan->vm_pd, 0x0000, priv->pramin_pt->vinst | 0x63);
+       nv_wo32(chan->vm_pd, 0x0004, 0);
 
-       /* VM page directory */
-       ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd,
-                                          0x4000, 0, &chan->vm_pd, NULL);
+       /* DMA object for PRAMIN BAR */
+       ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar);
        if (ret)
                return ret;
-       for (i = 0; i < 0x4000; i += 8) {
-               BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000);
-               BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000);
-       }
-
-       /* PRAMIN page table, cheat and map into VM at 0x0000000000.
-        * We map the entire fake channel into the start of the PRAMIN BAR
-        */
-       ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
-                                    0, &priv->pramin_pt);
+       nv_wo32(priv->pramin_bar, 0x00, 0x7fc00000);
+       nv_wo32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1);
+       nv_wo32(priv->pramin_bar, 0x08, 0x00000000);
+       nv_wo32(priv->pramin_bar, 0x0c, 0x00000000);
+       nv_wo32(priv->pramin_bar, 0x10, 0x00000000);
+       nv_wo32(priv->pramin_bar, 0x14, 0x00000000);
+
+       /* map channel into PRAMIN, gpuobj didn't do it for us */
+       ret = nv50_instmem_bind(dev, chan->ramin);
        if (ret)
                return ret;
 
-       v = c_offset | 1;
-       if (dev_priv->vram_sys_base) {
-               v += dev_priv->vram_sys_base;
-               v |= 0x30;
-       }
+       /* poke regs... */
+       nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
+       nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
+       nv_wr32(dev, 0x00170c, 0x80000000 | (priv->pramin_bar->cinst >> 4));
 
-       i = 0;
-       while (v < dev_priv->vram_sys_base + c_offset + c_size) {
-               BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, lower_32_bits(v));
-               BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, upper_32_bits(v));
-               v += 0x1000;
-               i += 8;
+       tmp = nv_ri32(dev, 0);
+       nv_wi32(dev, 0, ~tmp);
+       if (nv_ri32(dev, 0) != ~tmp) {
+               NV_ERROR(dev, "PRAMIN readback failed\n");
+               return -EIO;
        }
+       nv_wi32(dev, 0, tmp);
 
-       while (i < pt_size) {
-               BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
-               BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
-               i += 8;
-       }
+       dev_priv->ramin_available = true;
 
-       BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
-       BAR0_WI32(chan->vm_pd, 0x04, 0x00000000);
+       /* Determine VM layout */
+       dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
+       dev_priv->vm_gart_size = NV50_VM_BLOCK;
+
+       dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
+       dev_priv->vm_vram_size = dev_priv->vram_size;
+       if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
+               dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
+       dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
+       dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
+
+       dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
+
+       NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
+                dev_priv->vm_gart_base,
+                dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
+       NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
+                dev_priv->vm_vram_base,
+                dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
 
        /* VRAM page table(s), mapped into VM at +1GiB  */
        for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
-               ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0,
-                                            NV50_VM_BLOCK/65536*8, 0, 0,
-                                            &chan->vm_vram_pt[i]);
+               ret = nouveau_gpuobj_new(dev, NULL, NV50_VM_BLOCK / 0x10000 * 8,
+                                        0, NVOBJ_FLAG_ZERO_ALLOC,
+                                        &chan->vm_vram_pt[i]);
                if (ret) {
-                       NV_ERROR(dev, "Error creating VRAM page tables: %d\n",
-                                                                       ret);
+                       NV_ERROR(dev, "Error creating VRAM PGT: %d\n", ret);
                        dev_priv->vm_vram_pt_nr = i;
                        return ret;
                }
-               dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]->gpuobj;
+               dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i];
 
-               for (v = 0; v < dev_priv->vm_vram_pt[i]->im_pramin->size;
-                                                               v += 4)
-                       BAR0_WI32(dev_priv->vm_vram_pt[i], v, 0);
-
-               BAR0_WI32(chan->vm_pd, 0x10 + (i*8),
-                         chan->vm_vram_pt[i]->instance | 0x61);
-               BAR0_WI32(chan->vm_pd, 0x14 + (i*8), 0);
+               nv_wo32(chan->vm_pd, 0x10 + (i*8),
+                       chan->vm_vram_pt[i]->vinst | 0x61);
+               nv_wo32(chan->vm_pd, 0x14 + (i*8), 0);
        }
 
-       /* DMA object for PRAMIN BAR */
-       ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
-                                                       &priv->pramin_bar);
-       if (ret)
-               return ret;
-       BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000);
-       BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin_size - 1);
-       BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000);
-       BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000);
-       BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000);
-       BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000);
-
        /* DMA object for FB BAR */
-       ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
-                                                       &priv->fb_bar);
+       ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->fb_bar);
        if (ret)
                return ret;
-       BAR0_WI32(priv->fb_bar->gpuobj, 0x00, 0x7fc00000);
-       BAR0_WI32(priv->fb_bar->gpuobj, 0x04, 0x40000000 +
-                                             pci_resource_len(dev->pdev, 1) - 1);
-       BAR0_WI32(priv->fb_bar->gpuobj, 0x08, 0x40000000);
-       BAR0_WI32(priv->fb_bar->gpuobj, 0x0c, 0x00000000);
-       BAR0_WI32(priv->fb_bar->gpuobj, 0x10, 0x00000000);
-       BAR0_WI32(priv->fb_bar->gpuobj, 0x14, 0x00000000);
+       nv_wo32(priv->fb_bar, 0x00, 0x7fc00000);
+       nv_wo32(priv->fb_bar, 0x04, 0x40000000 +
+                                   pci_resource_len(dev->pdev, 1) - 1);
+       nv_wo32(priv->fb_bar, 0x08, 0x40000000);
+       nv_wo32(priv->fb_bar, 0x0c, 0x00000000);
+       nv_wo32(priv->fb_bar, 0x10, 0x00000000);
+       nv_wo32(priv->fb_bar, 0x14, 0x00000000);
 
-       /* Poke the relevant regs, and pray it works :) */
-       nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
-       nv_wr32(dev, NV50_PUNK_UNK1710, 0);
-       nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
-                                        NV50_PUNK_BAR_CFG_BASE_VALID);
-       nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
-                                       NV50_PUNK_BAR1_CTXDMA_VALID);
-       nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
-                                       NV50_PUNK_BAR3_CTXDMA_VALID);
+       dev_priv->engine.instmem.flush(dev);
 
+       nv_wr32(dev, 0x001708, 0x80000000 | (priv->fb_bar->cinst >> 4));
        for (i = 0; i < 8; i++)
                nv_wr32(dev, 0x1900 + (i*4), 0);
 
-       /* Assume that praying isn't enough, check that we can re-read the
-        * entire fake channel back from the PRAMIN BAR */
-       for (i = 0; i < c_size; i += 4) {
-               if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) {
-                       NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n",
-                                                                       i);
-                       return -EINVAL;
-               }
-       }
-
-       nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700);
-
-       /* Global PRAMIN heap */
-       if (drm_mm_init(&dev_priv->ramin_heap, c_size, dev_priv->ramin_size - c_size)) {
-               NV_ERROR(dev, "Failed to init RAMIN heap\n");
-       }
-
-       /*XXX: incorrect, but needed to make hash func "work" */
-       dev_priv->ramht_offset = 0x10000;
-       dev_priv->ramht_bits   = 9;
-       dev_priv->ramht_size   = (1 << dev_priv->ramht_bits) * 8;
        return 0;
 }
 
@@ -297,29 +248,24 @@ nv50_instmem_takedown(struct drm_device *dev)
        if (!priv)
                return;
 
+       dev_priv->ramin_available = false;
+
        /* Restore state from before init */
        for (i = 0x1700; i <= 0x1710; i += 4)
                nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
 
-       nouveau_gpuobj_ref_del(dev, &priv->fb_bar);
-       nouveau_gpuobj_ref_del(dev, &priv->pramin_bar);
-       nouveau_gpuobj_ref_del(dev, &priv->pramin_pt);
+       nouveau_gpuobj_ref(NULL, &priv->fb_bar);
+       nouveau_gpuobj_ref(NULL, &priv->pramin_bar);
+       nouveau_gpuobj_ref(NULL, &priv->pramin_pt);
 
        /* Destroy dummy channel */
        if (chan) {
-               for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
-                       nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
-                       dev_priv->vm_vram_pt[i] = NULL;
-               }
+               for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
+                       nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
                dev_priv->vm_vram_pt_nr = 0;
 
-               nouveau_gpuobj_del(dev, &chan->vm_pd);
-               nouveau_gpuobj_ref_del(dev, &chan->ramfc);
-               nouveau_gpuobj_ref_del(dev, &chan->ramin);
-               drm_mm_takedown(&chan->ramin_heap);
-
-               dev_priv->fifos[0] = dev_priv->fifos[127] = NULL;
-               kfree(chan);
+               nv50_channel_del(&dev_priv->fifos[0]);
+               dev_priv->fifos[127] = NULL;
        }
 
        dev_priv->engine.instmem.priv = NULL;
@@ -331,14 +277,14 @@ nv50_instmem_suspend(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_channel *chan = dev_priv->fifos[0];
-       struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
+       struct nouveau_gpuobj *ramin = chan->ramin;
        int i;
 
-       ramin->im_backing_suspend = vmalloc(ramin->im_pramin->size);
+       ramin->im_backing_suspend = vmalloc(ramin->size);
        if (!ramin->im_backing_suspend)
                return -ENOMEM;
 
-       for (i = 0; i < ramin->im_pramin->size; i += 4)
+       for (i = 0; i < ramin->size; i += 4)
                ramin->im_backing_suspend[i/4] = nv_ri32(dev, i);
        return 0;
 }
@@ -349,23 +295,25 @@ nv50_instmem_resume(struct drm_device *dev)
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
        struct nouveau_channel *chan = dev_priv->fifos[0];
-       struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
+       struct nouveau_gpuobj *ramin = chan->ramin;
        int i;
 
-       nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->im_backing_start >> 16));
-       for (i = 0; i < ramin->im_pramin->size; i += 4)
-               BAR0_WI32(ramin, i, ramin->im_backing_suspend[i/4]);
+       dev_priv->ramin_available = false;
+       dev_priv->ramin_base = ~0;
+       for (i = 0; i < ramin->size; i += 4)
+               nv_wo32(ramin, i, ramin->im_backing_suspend[i/4]);
+       dev_priv->ramin_available = true;
        vfree(ramin->im_backing_suspend);
        ramin->im_backing_suspend = NULL;
 
        /* Poke the relevant regs, and pray it works :) */
-       nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
+       nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
        nv_wr32(dev, NV50_PUNK_UNK1710, 0);
-       nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
+       nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
                                         NV50_PUNK_BAR_CFG_BASE_VALID);
-       nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
+       nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) |
                                        NV50_PUNK_BAR1_CTXDMA_VALID);
-       nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
+       nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) |
                                        NV50_PUNK_BAR3_CTXDMA_VALID);
 
        for (i = 0; i < 8; i++)
@@ -381,7 +329,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
        if (gpuobj->im_backing)
                return -EINVAL;
 
-       *sz = ALIGN(*sz, NV50_INSTMEM_PAGE_SIZE);
+       *sz = ALIGN(*sz, 4096);
        if (*sz == 0)
                return -EINVAL;
 
@@ -399,9 +347,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
                return ret;
        }
 
-       gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start;
-       gpuobj->im_backing_start <<= PAGE_SHIFT;
-
+       gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
        return 0;
 }
 
@@ -424,7 +370,7 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-       struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
+       struct nouveau_gpuobj *pramin_pt = priv->pramin_pt;
        uint32_t pte, pte_end;
        uint64_t vram;
 
@@ -436,11 +382,11 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
 
        pte     = (gpuobj->im_pramin->start >> 12) << 1;
        pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
-       vram    = gpuobj->im_backing_start;
+       vram    = gpuobj->vinst;
 
        NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
                 gpuobj->im_pramin->start, pte, pte_end);
-       NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
+       NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
 
        vram |= 1;
        if (dev_priv->vram_sys_base) {
@@ -449,9 +395,10 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
        }
 
        while (pte < pte_end) {
-               nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
-               nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
-               vram += NV50_INSTMEM_PAGE_SIZE;
+               nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram));
+               nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram));
+               vram += 0x1000;
+               pte += 2;
        }
        dev_priv->engine.instmem.flush(dev);
 
@@ -472,12 +419,17 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
        if (gpuobj->im_bound == 0)
                return -EINVAL;
 
+       /* can happen during late takedown */
+       if (unlikely(!dev_priv->ramin_available))
+               return 0;
+
        pte     = (gpuobj->im_pramin->start >> 12) << 1;
        pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
 
        while (pte < pte_end) {
-               nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
-               nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
+               nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000);
+               nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000);
+               pte += 2;
        }
        dev_priv->engine.instmem.flush(dev);
 
@@ -489,7 +441,7 @@ void
 nv50_instmem_flush(struct drm_device *dev)
 {
        nv_wr32(dev, 0x00330c, 0x00000001);
-       if (!nv_wait(0x00330c, 0x00000002, 0x00000000))
+       if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
                NV_ERROR(dev, "PRAMIN flush timeout\n");
 }
 
@@ -497,7 +449,7 @@ void
 nv84_instmem_flush(struct drm_device *dev)
 {
        nv_wr32(dev, 0x070000, 0x00000001);
-       if (!nv_wait(0x070000, 0x00000002, 0x00000000))
+       if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
                NV_ERROR(dev, "PRAMIN flush timeout\n");
 }
 
@@ -505,7 +457,7 @@ void
 nv50_vm_flush(struct drm_device *dev, int engine)
 {
        nv_wr32(dev, 0x100c80, (engine << 16) | 1);
-       if (!nv_wait(0x100c80, 0x00000001, 0x00000000))
+       if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
                NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
new file mode 100644 (file)
index 0000000..7dbb305
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_bios.h"
+#include "nouveau_pm.h"
+
+struct nv50_pm_state {
+       struct nouveau_pm_level *perflvl;
+       struct pll_lims pll;
+       enum pll_types type;
+       int N, M, P;
+};
+
+int
+nv50_pm_clock_get(struct drm_device *dev, u32 id)
+{
+       struct pll_lims pll;
+       int P, N, M, ret;
+       u32 reg0, reg1;
+
+       ret = get_pll_limits(dev, id, &pll);
+       if (ret)
+               return ret;
+
+       reg0 = nv_rd32(dev, pll.reg + 0);
+       reg1 = nv_rd32(dev, pll.reg + 4);
+       P = (reg0 & 0x00070000) >> 16;
+       N = (reg1 & 0x0000ff00) >> 8;
+       M = (reg1 & 0x000000ff);
+
+       return ((pll.refclk * N / M) >> P);
+}
+
+void *
+nv50_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+                 u32 id, int khz)
+{
+       struct nv50_pm_state *state;
+       int dummy, ret;
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return ERR_PTR(-ENOMEM);
+       state->type = id;
+       state->perflvl = perflvl;
+
+       ret = get_pll_limits(dev, id, &state->pll);
+       if (ret < 0) {
+               kfree(state);
+               return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
+       }
+
+       ret = nv50_calc_pll(dev, &state->pll, khz, &state->N, &state->M,
+                           &dummy, &dummy, &state->P);
+       if (ret < 0) {
+               kfree(state);
+               return ERR_PTR(ret);
+       }
+
+       return state;
+}
+
+void
+nv50_pm_clock_set(struct drm_device *dev, void *pre_state)
+{
+       struct nv50_pm_state *state = pre_state;
+       struct nouveau_pm_level *perflvl = state->perflvl;
+       u32 reg = state->pll.reg, tmp;
+       struct bit_entry BIT_M;
+       u16 script;
+       int N = state->N;
+       int M = state->M;
+       int P = state->P;
+
+       if (state->type == PLL_MEMORY && perflvl->memscript &&
+           bit_table(dev, 'M', &BIT_M) == 0 &&
+           BIT_M.version == 1 && BIT_M.length >= 0x0b) {
+               script = ROM16(BIT_M.data[0x05]);
+               if (script)
+                       nouveau_bios_run_init_table(dev, script, NULL);
+               script = ROM16(BIT_M.data[0x07]);
+               if (script)
+                       nouveau_bios_run_init_table(dev, script, NULL);
+               script = ROM16(BIT_M.data[0x09]);
+               if (script)
+                       nouveau_bios_run_init_table(dev, script, NULL);
+
+               nouveau_bios_run_init_table(dev, perflvl->memscript, NULL);
+       }
+
+       if (state->type == PLL_MEMORY) {
+               nv_wr32(dev, 0x100210, 0);
+               nv_wr32(dev, 0x1002dc, 1);
+       }
+
+       tmp  = nv_rd32(dev, reg + 0) & 0xfff8ffff;
+       tmp |= 0x80000000 | (P << 16);
+       nv_wr32(dev, reg + 0, tmp);
+       nv_wr32(dev, reg + 4, (N << 8) | M);
+
+       if (state->type == PLL_MEMORY) {
+               nv_wr32(dev, 0x1002dc, 0);
+               nv_wr32(dev, 0x100210, 0x80000000);
+       }
+
+       kfree(state);
+}
+
index bcd4cf84a7e641cb0d79d5b1d67a8604110a9af3..b4a5ecb199f925646ca10c4364ebe05696786a7d 100644 (file)
@@ -92,7 +92,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
        }
 
        /* wait for it to be done */
-       if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
+       if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or),
                     NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
                NV_ERROR(dev, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
                NV_ERROR(dev, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
@@ -108,7 +108,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
 
        nv_wr32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
                NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
-       if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(or),
+       if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or),
                     NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
                NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
                NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
new file mode 100644 (file)
index 0000000..dbbafed
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_bios.h"
+#include "nouveau_pm.h"
+
+/*XXX: boards using limits 0x40 need fixing, the register layout
+ *     is correct here, but, there's some other funny magic
+ *     that modifies things, so it's not likely we'll set/read
+ *     the correct timings yet..  working on it...
+ */
+
+struct nva3_pm_state {
+       struct pll_lims pll;
+       int N, M, P;
+};
+
+int
+nva3_pm_clock_get(struct drm_device *dev, u32 id)
+{
+       struct pll_lims pll;
+       int P, N, M, ret;
+       u32 reg;
+
+       ret = get_pll_limits(dev, id, &pll);
+       if (ret)
+               return ret;
+
+       reg = nv_rd32(dev, pll.reg + 4);
+       P = (reg & 0x003f0000) >> 16;
+       N = (reg & 0x0000ff00) >> 8;
+       M = (reg & 0x000000ff);
+       return pll.refclk * N / M / P;
+}
+
+void *
+nva3_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+                 u32 id, int khz)
+{
+       struct nva3_pm_state *state;
+       int dummy, ret;
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return ERR_PTR(-ENOMEM);
+
+       ret = get_pll_limits(dev, id, &state->pll);
+       if (ret < 0) {
+               kfree(state);
+               return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
+       }
+
+       ret = nv50_calc_pll2(dev, &state->pll, khz, &state->N, &dummy,
+                            &state->M, &state->P);
+       if (ret < 0) {
+               kfree(state);
+               return ERR_PTR(ret);
+       }
+
+       return state;
+}
+
+void
+nva3_pm_clock_set(struct drm_device *dev, void *pre_state)
+{
+       struct nva3_pm_state *state = pre_state;
+       u32 reg = state->pll.reg;
+
+       nv_wr32(dev, reg + 4, (state->P << 16) | (state->N << 8) | state->M);
+       kfree(state);
+}
+
index d64375871979d5bf06147ca65b3afe4644dfb095..890c2b95fbc12751c4954d384dc56d0ccb19dc06 100644 (file)
@@ -42,12 +42,6 @@ nvc0_fifo_reassign(struct drm_device *dev, bool enable)
        return false;
 }
 
-bool
-nvc0_fifo_cache_flush(struct drm_device *dev)
-{
-       return true;
-}
-
 bool
 nvc0_fifo_cache_pull(struct drm_device *dev, bool enable)
 {
index 6b451f864783e5c1ed3610c13c86adb5588a5783..13a0f78a9088c3b57c25ab148819ef6e47b914cb 100644 (file)
@@ -50,8 +50,7 @@ nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
                return ret;
        }
 
-       gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start;
-       gpuobj->im_backing_start <<= PAGE_SHIFT;
+       gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
        return 0;
 }
 
@@ -84,11 +83,11 @@ nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
 
        pte     = gpuobj->im_pramin->start >> 12;
        pte_end = (gpuobj->im_pramin->size >> 12) + pte;
-       vram    = gpuobj->im_backing_start;
+       vram    = gpuobj->vinst;
 
        NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
                 gpuobj->im_pramin->start, pte, pte_end);
-       NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
+       NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
 
        while (pte < pte_end) {
                nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1);
@@ -134,7 +133,7 @@ void
 nvc0_instmem_flush(struct drm_device *dev)
 {
        nv_wr32(dev, 0x070000, 1);
-       if (!nv_wait(0x070000, 0x00000002, 0x00000000))
+       if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
                NV_ERROR(dev, "PRAMIN flush timeout\n");
 }
 
@@ -221,10 +220,6 @@ nvc0_instmem_init(struct drm_device *dev)
                return -ENOMEM;
        }
 
-       /*XXX: incorrect, but needed to make hash func "work" */
-       dev_priv->ramht_offset = 0x10000;
-       dev_priv->ramht_bits   = 9;
-       dev_priv->ramht_size   = (1 << dev_priv->ramht_bits) * 8;
        return 0;
 }
 
index ad64673ace1fc955d650a2c6f8645ec0aba2a402..881f8a585613a82f190349d60f4a2dd2b6815adb 100644 (file)
 #              define NV_CIO_CRE_HCUR_ADDR1_ADR        7:2
 #      define NV_CIO_CRE_LCD__INDEX            0x33
 #              define NV_CIO_CRE_LCD_LCD_SELECT        0:0
+#              define NV_CIO_CRE_LCD_ROUTE_MASK        0x3b
 #      define NV_CIO_CRE_DDC0_STATUS__INDEX    0x36
 #      define NV_CIO_CRE_DDC0_WR__INDEX        0x37
 #      define NV_CIO_CRE_ILACE__INDEX          0x39    /* interlace */
index d42c76c237142225adf95332a3e4ca2c2dee62ea..18c3c71e41b1cc37c94477d052b7959085fe0ebd 100644 (file)
@@ -56,8 +56,6 @@ static struct drm_driver driver = {
        .irq_uninstall = r128_driver_irq_uninstall,
        .irq_handler = r128_driver_irq_handler,
        .reclaim_buffers = drm_core_reclaim_buffers,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = r128_ioctls,
        .dma_ioctl = r128_cce_buffers,
        .fops = {
index aebe00875041448351bb129b7a131baab66f4e9e..6cae4f2028d27544467f2c2c3d605788b9f60df8 100644 (file)
@@ -65,7 +65,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
        rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
        r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
        r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
-       evergreen.o evergreen_cs.o
+       evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o
 
 radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
 radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
index cd0290f946cff51e8aa8c702dda5e650ec5f9af0..df2b6f2b35f893d00b475147f8d995a7b3c15bc2 100644 (file)
@@ -398,65 +398,76 @@ static void atombios_disable_ss(struct drm_crtc *crtc)
 
 
 union atom_enable_ss {
-       ENABLE_LVDS_SS_PARAMETERS legacy;
+       ENABLE_LVDS_SS_PARAMETERS lvds_ss;
+       ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2;
        ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
+       ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2;
 };
 
-static void atombios_enable_ss(struct drm_crtc *crtc)
+static void atombios_crtc_program_ss(struct drm_crtc *crtc,
+                                    int enable,
+                                    int pll_id,
+                                    struct radeon_atom_ss *ss)
 {
-       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct radeon_device *rdev = dev->dev_private;
-       struct drm_encoder *encoder = NULL;
-       struct radeon_encoder *radeon_encoder = NULL;
-       struct radeon_encoder_atom_dig *dig = NULL;
        int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
        union atom_enable_ss args;
-       uint16_t percentage = 0;
-       uint8_t type = 0, step = 0, delay = 0, range = 0;
 
-       /* XXX add ss support for DCE4 */
-       if (ASIC_IS_DCE4(rdev))
-               return;
+       memset(&args, 0, sizeof(args));
 
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-               if (encoder->crtc == crtc) {
-                       radeon_encoder = to_radeon_encoder(encoder);
-                       /* only enable spread spectrum on LVDS */
-                       if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-                               dig = radeon_encoder->enc_priv;
-                               if (dig && dig->ss) {
-                                       percentage = dig->ss->percentage;
-                                       type = dig->ss->type;
-                                       step = dig->ss->step;
-                                       delay = dig->ss->delay;
-                                       range = dig->ss->range;
-                               } else
-                                       return;
-                       } else
-                               return;
+       if (ASIC_IS_DCE4(rdev)) {
+               args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+               args.v2.ucSpreadSpectrumType = ss->type;
+               switch (pll_id) {
+               case ATOM_PPLL1:
+                       args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
+                       args.v2.usSpreadSpectrumAmount = ss->amount;
+                       args.v2.usSpreadSpectrumStep = ss->step;
+                       break;
+               case ATOM_PPLL2:
+                       args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
+                       args.v2.usSpreadSpectrumAmount = ss->amount;
+                       args.v2.usSpreadSpectrumStep = ss->step;
                        break;
+               case ATOM_DCPLL:
+                       args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
+                       args.v2.usSpreadSpectrumAmount = 0;
+                       args.v2.usSpreadSpectrumStep = 0;
+                       break;
+               case ATOM_PPLL_INVALID:
+                       return;
                }
-       }
-
-       if (!radeon_encoder)
-               return;
-
-       memset(&args, 0, sizeof(args));
-       if (ASIC_IS_AVIVO(rdev)) {
-               args.v1.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
-               args.v1.ucSpreadSpectrumType = type;
-               args.v1.ucSpreadSpectrumStep = step;
-               args.v1.ucSpreadSpectrumDelay = delay;
-               args.v1.ucSpreadSpectrumRange = range;
-               args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
-               args.v1.ucEnable = ATOM_ENABLE;
+               args.v2.ucEnable = enable;
+       } else if (ASIC_IS_DCE3(rdev)) {
+               args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+               args.v1.ucSpreadSpectrumType = ss->type;
+               args.v1.ucSpreadSpectrumStep = ss->step;
+               args.v1.ucSpreadSpectrumDelay = ss->delay;
+               args.v1.ucSpreadSpectrumRange = ss->range;
+               args.v1.ucPpll = pll_id;
+               args.v1.ucEnable = enable;
+       } else if (ASIC_IS_AVIVO(rdev)) {
+               if (enable == ATOM_DISABLE) {
+                       atombios_disable_ss(crtc);
+                       return;
+               }
+               args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+               args.lvds_ss_2.ucSpreadSpectrumType = ss->type;
+               args.lvds_ss_2.ucSpreadSpectrumStep = ss->step;
+               args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay;
+               args.lvds_ss_2.ucSpreadSpectrumRange = ss->range;
+               args.lvds_ss_2.ucEnable = enable;
        } else {
-               args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
-               args.legacy.ucSpreadSpectrumType = type;
-               args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
-               args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
-               args.legacy.ucEnable = ATOM_ENABLE;
+               if (enable == ATOM_DISABLE) {
+                       atombios_disable_ss(crtc);
+                       return;
+               }
+               args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+               args.lvds_ss.ucSpreadSpectrumType = ss->type;
+               args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2;
+               args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4;
+               args.lvds_ss.ucEnable = enable;
        }
        atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 }
@@ -468,7 +479,9 @@ union adjust_pixel_clock {
 
 static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                               struct drm_display_mode *mode,
-                              struct radeon_pll *pll)
+                              struct radeon_pll *pll,
+                              bool ss_enabled,
+                              struct radeon_atom_ss *ss)
 {
        struct drm_device *dev = crtc->dev;
        struct radeon_device *rdev = dev->dev_private;
@@ -482,19 +495,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
        /* reset the pll flags */
        pll->flags = 0;
 
-       /* select the PLL algo */
-       if (ASIC_IS_AVIVO(rdev)) {
-               if (radeon_new_pll == 0)
-                       pll->algo = PLL_ALGO_LEGACY;
-               else
-                       pll->algo = PLL_ALGO_NEW;
-       } else {
-               if (radeon_new_pll == 1)
-                       pll->algo = PLL_ALGO_NEW;
-               else
-                       pll->algo = PLL_ALGO_LEGACY;
-       }
-
        if (ASIC_IS_AVIVO(rdev)) {
                if ((rdev->family == CHIP_RS600) ||
                    (rdev->family == CHIP_RS690) ||
@@ -531,29 +531,22 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                }
                        }
 
+                       /* use recommended ref_div for ss */
+                       if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+                               if (ss_enabled) {
+                                       if (ss->refdiv) {
+                                               pll->flags |= RADEON_PLL_USE_REF_DIV;
+                                               pll->reference_div = ss->refdiv;
+                                       }
+                               }
+                       }
+
                        if (ASIC_IS_AVIVO(rdev)) {
                                /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
                                if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
                                        adjusted_clock = mode->clock * 2;
-                               if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
-                                       pll->algo = PLL_ALGO_LEGACY;
+                               if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
                                        pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
-                               }
-                               /* There is some evidence (often anecdotal) that RV515/RV620 LVDS
-                                * (on some boards at least) prefers the legacy algo.  I'm not
-                                * sure whether this should handled generically or on a
-                                * case-by-case quirk basis.  Both algos should work fine in the
-                                * majority of cases.
-                                */
-                               if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
-                                   ((rdev->family == CHIP_RV515) ||
-                                    (rdev->family == CHIP_RV620))) {
-                                       /* allow the user to overrride just in case */
-                                       if (radeon_new_pll == 1)
-                                               pll->algo = PLL_ALGO_NEW;
-                                       else
-                                               pll->algo = PLL_ALGO_LEGACY;
-                               }
                        } else {
                                if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
                                        pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -589,9 +582,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                args.v1.ucTransmitterID = radeon_encoder->encoder_id;
                                args.v1.ucEncodeMode = encoder_mode;
                                if (encoder_mode == ATOM_ENCODER_MODE_DP) {
-                                       /* may want to enable SS on DP eventually */
-                                       /* args.v1.ucConfig |=
-                                          ADJUST_DISPLAY_CONFIG_SS_ENABLE;*/
+                                       if (ss_enabled)
+                                               args.v1.ucConfig |=
+                                                       ADJUST_DISPLAY_CONFIG_SS_ENABLE;
                                } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
                                        args.v1.ucConfig |=
                                                ADJUST_DISPLAY_CONFIG_SS_ENABLE;
@@ -608,11 +601,10 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                args.v3.sInput.ucDispPllConfig = 0;
                                if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
                                        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-
                                        if (encoder_mode == ATOM_ENCODER_MODE_DP) {
-                                               /* may want to enable SS on DP/eDP eventually */
-                                               /*args.v3.sInput.ucDispPllConfig |=
-                                                 DISPPLL_CONFIG_SS_ENABLE;*/
+                                               if (ss_enabled)
+                                                       args.v3.sInput.ucDispPllConfig |=
+                                                               DISPPLL_CONFIG_SS_ENABLE;
                                                args.v3.sInput.ucDispPllConfig |=
                                                        DISPPLL_CONFIG_COHERENT_MODE;
                                                /* 16200 or 27000 */
@@ -632,17 +624,17 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                        }
                                } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
                                        if (encoder_mode == ATOM_ENCODER_MODE_DP) {
-                                               /* may want to enable SS on DP/eDP eventually */
-                                               /*args.v3.sInput.ucDispPllConfig |=
-                                                 DISPPLL_CONFIG_SS_ENABLE;*/
+                                               if (ss_enabled)
+                                                       args.v3.sInput.ucDispPllConfig |=
+                                                               DISPPLL_CONFIG_SS_ENABLE;
                                                args.v3.sInput.ucDispPllConfig |=
                                                        DISPPLL_CONFIG_COHERENT_MODE;
                                                /* 16200 or 27000 */
                                                args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
                                        } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
-                                               /* want to enable SS on LVDS eventually */
-                                               /*args.v3.sInput.ucDispPllConfig |=
-                                                 DISPPLL_CONFIG_SS_ENABLE;*/
+                                               if (ss_enabled)
+                                                       args.v3.sInput.ucDispPllConfig |=
+                                                               DISPPLL_CONFIG_SS_ENABLE;
                                        } else {
                                                if (mode->clock > 165000)
                                                        args.v3.sInput.ucDispPllConfig |=
@@ -816,6 +808,8 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
        struct radeon_pll *pll;
        u32 adjusted_clock;
        int encoder_mode = 0;
+       struct radeon_atom_ss ss;
+       bool ss_enabled = false;
 
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
                if (encoder->crtc == crtc) {
@@ -842,25 +836,123 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
                break;
        }
 
+       if (radeon_encoder->active_device &
+           (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
+               struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+               struct drm_connector *connector =
+                       radeon_get_connector_for_encoder(encoder);
+               struct radeon_connector *radeon_connector =
+                       to_radeon_connector(connector);
+               struct radeon_connector_atom_dig *dig_connector =
+                       radeon_connector->con_priv;
+               int dp_clock;
+
+               switch (encoder_mode) {
+               case ATOM_ENCODER_MODE_DP:
+                       /* DP/eDP */
+                       dp_clock = dig_connector->dp_clock / 10;
+                       if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+                               if (ASIC_IS_DCE4(rdev))
+                                       ss_enabled =
+                                               radeon_atombios_get_asic_ss_info(rdev, &ss,
+                                                                                dig->lcd_ss_id,
+                                                                                dp_clock);
+                               else
+                                       ss_enabled =
+                                               radeon_atombios_get_ppll_ss_info(rdev, &ss,
+                                                                                dig->lcd_ss_id);
+                       } else {
+                               if (ASIC_IS_DCE4(rdev))
+                                       ss_enabled =
+                                               radeon_atombios_get_asic_ss_info(rdev, &ss,
+                                                                                ASIC_INTERNAL_SS_ON_DP,
+                                                                                dp_clock);
+                               else {
+                                       if (dp_clock == 16200) {
+                                               ss_enabled =
+                                                       radeon_atombios_get_ppll_ss_info(rdev, &ss,
+                                                                                        ATOM_DP_SS_ID2);
+                                               if (!ss_enabled)
+                                                       ss_enabled =
+                                                               radeon_atombios_get_ppll_ss_info(rdev, &ss,
+                                                                                                ATOM_DP_SS_ID1);
+                                       } else
+                                               ss_enabled =
+                                                       radeon_atombios_get_ppll_ss_info(rdev, &ss,
+                                                                                        ATOM_DP_SS_ID1);
+                               }
+                       }
+                       break;
+               case ATOM_ENCODER_MODE_LVDS:
+                       if (ASIC_IS_DCE4(rdev))
+                               ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
+                                                                             dig->lcd_ss_id,
+                                                                             mode->clock / 10);
+                       else
+                               ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &ss,
+                                                                             dig->lcd_ss_id);
+                       break;
+               case ATOM_ENCODER_MODE_DVI:
+                       if (ASIC_IS_DCE4(rdev))
+                               ss_enabled =
+                                       radeon_atombios_get_asic_ss_info(rdev, &ss,
+                                                                        ASIC_INTERNAL_SS_ON_TMDS,
+                                                                        mode->clock / 10);
+                       break;
+               case ATOM_ENCODER_MODE_HDMI:
+                       if (ASIC_IS_DCE4(rdev))
+                               ss_enabled =
+                                       radeon_atombios_get_asic_ss_info(rdev, &ss,
+                                                                        ASIC_INTERNAL_SS_ON_HDMI,
+                                                                        mode->clock / 10);
+                       break;
+               default:
+                       break;
+               }
+       }
+
        /* adjust pixel clock as needed */
-       adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
+       adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
 
        radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
                           &ref_div, &post_div);
 
+       atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
+
        atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
                                  encoder_mode, radeon_encoder->encoder_id, mode->clock,
                                  ref_div, fb_div, frac_fb_div, post_div);
 
+       if (ss_enabled) {
+               /* calculate ss amount and step size */
+               if (ASIC_IS_DCE4(rdev)) {
+                       u32 step_size;
+                       u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000;
+                       ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
+                       ss.amount |= ((amount - (ss.amount * 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
+                               ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
+                       if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
+                               step_size = (4 * amount * ref_div * (ss.rate * 2048)) /
+                                       (125 * 25 * pll->reference_freq / 100);
+                       else
+                               step_size = (2 * amount * ref_div * (ss.rate * 2048)) /
+                                       (125 * 25 * pll->reference_freq / 100);
+                       ss.step = step_size;
+               }
+
+               atombios_crtc_program_ss(crtc, ATOM_ENABLE, radeon_crtc->pll_id, &ss);
+       }
 }
 
-static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
-                                  struct drm_framebuffer *old_fb)
+static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
+                                     struct drm_framebuffer *fb,
+                                     int x, int y, int atomic)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_framebuffer *radeon_fb;
+       struct drm_framebuffer *target_fb;
        struct drm_gem_object *obj;
        struct radeon_bo *rbo;
        uint64_t fb_location;
@@ -868,28 +960,43 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
        int r;
 
        /* no fb bound */
-       if (!crtc->fb) {
+       if (!atomic && !crtc->fb) {
                DRM_DEBUG_KMS("No FB bound\n");
                return 0;
        }
 
-       radeon_fb = to_radeon_framebuffer(crtc->fb);
+       if (atomic) {
+               radeon_fb = to_radeon_framebuffer(fb);
+               target_fb = fb;
+       }
+       else {
+               radeon_fb = to_radeon_framebuffer(crtc->fb);
+               target_fb = crtc->fb;
+       }
 
-       /* Pin framebuffer & get tilling informations */
+       /* If atomic, assume fb object is pinned & idle & fenced and
+        * just update base pointers
+        */
        obj = radeon_fb->obj;
        rbo = obj->driver_private;
        r = radeon_bo_reserve(rbo, false);
        if (unlikely(r != 0))
                return r;
-       r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
-       if (unlikely(r != 0)) {
-               radeon_bo_unreserve(rbo);
-               return -EINVAL;
+
+       if (atomic)
+               fb_location = radeon_bo_gpu_offset(rbo);
+       else {
+               r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+               if (unlikely(r != 0)) {
+                       radeon_bo_unreserve(rbo);
+                       return -EINVAL;
+               }
        }
+
        radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
        radeon_bo_unreserve(rbo);
 
-       switch (crtc->fb->bits_per_pixel) {
+       switch (target_fb->bits_per_pixel) {
        case 8:
                fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
                             EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
@@ -909,7 +1016,7 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
                break;
        default:
                DRM_ERROR("Unsupported screen depth %d\n",
-                         crtc->fb->bits_per_pixel);
+                         target_fb->bits_per_pixel);
                return -EINVAL;
        }
 
@@ -955,10 +1062,10 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
        WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
        WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
        WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0);
-       WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
-       WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
+       WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
+       WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
 
-       fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
+       fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
        WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
        WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
 
@@ -977,8 +1084,8 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
        else
                WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
 
-       if (old_fb && old_fb != crtc->fb) {
-               radeon_fb = to_radeon_framebuffer(old_fb);
+       if (!atomic && fb && fb != crtc->fb) {
+               radeon_fb = to_radeon_framebuffer(fb);
                rbo = radeon_fb->obj->driver_private;
                r = radeon_bo_reserve(rbo, false);
                if (unlikely(r != 0))
@@ -993,8 +1100,9 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
        return 0;
 }
 
-static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
-                              struct drm_framebuffer *old_fb)
+static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
+                                 struct drm_framebuffer *fb,
+                                 int x, int y, int atomic)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct drm_device *dev = crtc->dev;
@@ -1002,33 +1110,48 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
        struct radeon_framebuffer *radeon_fb;
        struct drm_gem_object *obj;
        struct radeon_bo *rbo;
+       struct drm_framebuffer *target_fb;
        uint64_t fb_location;
        uint32_t fb_format, fb_pitch_pixels, tiling_flags;
        int r;
 
        /* no fb bound */
-       if (!crtc->fb) {
+       if (!atomic && !crtc->fb) {
                DRM_DEBUG_KMS("No FB bound\n");
                return 0;
        }
 
-       radeon_fb = to_radeon_framebuffer(crtc->fb);
+       if (atomic) {
+               radeon_fb = to_radeon_framebuffer(fb);
+               target_fb = fb;
+       }
+       else {
+               radeon_fb = to_radeon_framebuffer(crtc->fb);
+               target_fb = crtc->fb;
+       }
 
-       /* Pin framebuffer & get tilling informations */
        obj = radeon_fb->obj;
        rbo = obj->driver_private;
        r = radeon_bo_reserve(rbo, false);
        if (unlikely(r != 0))
                return r;
-       r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
-       if (unlikely(r != 0)) {
-               radeon_bo_unreserve(rbo);
-               return -EINVAL;
+
+       /* If atomic, assume fb object is pinned & idle & fenced and
+        * just update base pointers
+        */
+       if (atomic)
+               fb_location = radeon_bo_gpu_offset(rbo);
+       else {
+               r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+               if (unlikely(r != 0)) {
+                       radeon_bo_unreserve(rbo);
+                       return -EINVAL;
+               }
        }
        radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
        radeon_bo_unreserve(rbo);
 
-       switch (crtc->fb->bits_per_pixel) {
+       switch (target_fb->bits_per_pixel) {
        case 8:
                fb_format =
                    AVIVO_D1GRPH_CONTROL_DEPTH_8BPP |
@@ -1052,7 +1175,7 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
                break;
        default:
                DRM_ERROR("Unsupported screen depth %d\n",
-                         crtc->fb->bits_per_pixel);
+                         target_fb->bits_per_pixel);
                return -EINVAL;
        }
 
@@ -1093,10 +1216,10 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
        WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
        WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
        WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0);
-       WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
-       WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
+       WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
+       WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
 
-       fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
+       fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
        WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
        WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
 
@@ -1115,8 +1238,8 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
        else
                WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
 
-       if (old_fb && old_fb != crtc->fb) {
-               radeon_fb = to_radeon_framebuffer(old_fb);
+       if (!atomic && fb && fb != crtc->fb) {
+               radeon_fb = to_radeon_framebuffer(fb);
                rbo = radeon_fb->obj->driver_private;
                r = radeon_bo_reserve(rbo, false);
                if (unlikely(r != 0))
@@ -1138,11 +1261,26 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
        struct radeon_device *rdev = dev->dev_private;
 
        if (ASIC_IS_DCE4(rdev))
-               return evergreen_crtc_set_base(crtc, x, y, old_fb);
+               return evergreen_crtc_do_set_base(crtc, old_fb, x, y, 0);
        else if (ASIC_IS_AVIVO(rdev))
-               return avivo_crtc_set_base(crtc, x, y, old_fb);
+               return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0);
        else
-               return radeon_crtc_set_base(crtc, x, y, old_fb);
+               return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
+                                  struct drm_framebuffer *fb,
+                                 int x, int y, enum mode_set_atomic state)
+{
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+
+       if (ASIC_IS_DCE4(rdev))
+               return evergreen_crtc_do_set_base(crtc, fb, x, y, 1);
+       else if (ASIC_IS_AVIVO(rdev))
+               return avivo_crtc_do_set_base(crtc, fb, x, y, 1);
+       else
+               return radeon_crtc_do_set_base(crtc, fb, x, y, 1);
 }
 
 /* properly set additional regs when using atombios */
@@ -1230,12 +1368,19 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
                }
        }
 
-       atombios_disable_ss(crtc);
        /* always set DCPLL */
-       if (ASIC_IS_DCE4(rdev))
+       if (ASIC_IS_DCE4(rdev)) {
+               struct radeon_atom_ss ss;
+               bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
+                                                                  ASIC_INTERNAL_SS_ON_DCPLL,
+                                                                  rdev->clock.default_dispclk);
+               if (ss_enabled)
+                       atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss);
                atombios_crtc_set_dcpll(crtc);
+               if (ss_enabled)
+                       atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss);
+       }
        atombios_crtc_set_pll(crtc, adjusted_mode);
-       atombios_enable_ss(crtc);
 
        if (ASIC_IS_DCE4(rdev))
                atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
@@ -1311,6 +1456,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
        .mode_fixup = atombios_crtc_mode_fixup,
        .mode_set = atombios_crtc_mode_set,
        .mode_set_base = atombios_crtc_set_base,
+       .mode_set_base_atomic = atombios_crtc_set_base_atomic,
        .prepare = atombios_crtc_prepare,
        .commit = atombios_crtc_commit,
        .load_lut = radeon_crtc_load_lut,
index 2f93d46ae69ad58dfb90ea5db02021a402506ae5..f12a5b3ec050320505c6dc0b1cb44dcdde3fbeed 100644 (file)
@@ -32,6 +32,7 @@
 #include "atom.h"
 #include "avivod.h"
 #include "evergreen_reg.h"
+#include "evergreen_blit_shaders.h"
 
 #define EVERGREEN_PFP_UCODE_SIZE 1120
 #define EVERGREEN_PM4_UCODE_SIZE 1376
@@ -284,9 +285,444 @@ void evergreen_hpd_fini(struct radeon_device *rdev)
        }
 }
 
+/* watermark setup */
+
+static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
+                                       struct radeon_crtc *radeon_crtc,
+                                       struct drm_display_mode *mode,
+                                       struct drm_display_mode *other_mode)
+{
+       u32 tmp = 0;
+       /*
+        * Line Buffer Setup
+        * There are 3 line buffers, each one shared by 2 display controllers.
+        * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+        * the display controllers.  The paritioning is done via one of four
+        * preset allocations specified in bits 2:0:
+        * first display controller
+        *  0 - first half of lb (3840 * 2)
+        *  1 - first 3/4 of lb (5760 * 2)
+        *  2 - whole lb (7680 * 2)
+        *  3 - first 1/4 of lb (1920 * 2)
+        * second display controller
+        *  4 - second half of lb (3840 * 2)
+        *  5 - second 3/4 of lb (5760 * 2)
+        *  6 - whole lb (7680 * 2)
+        *  7 - last 1/4 of lb (1920 * 2)
+        */
+       if (mode && other_mode) {
+               if (mode->hdisplay > other_mode->hdisplay) {
+                       if (mode->hdisplay > 2560)
+                               tmp = 1; /* 3/4 */
+                       else
+                               tmp = 0; /* 1/2 */
+               } else if (other_mode->hdisplay > mode->hdisplay) {
+                       if (other_mode->hdisplay > 2560)
+                               tmp = 3; /* 1/4 */
+                       else
+                               tmp = 0; /* 1/2 */
+               } else
+                       tmp = 0; /* 1/2 */
+       } else if (mode)
+               tmp = 2; /* whole */
+       else if (other_mode)
+               tmp = 3; /* 1/4 */
+
+       /* second controller of the pair uses second half of the lb */
+       if (radeon_crtc->crtc_id % 2)
+               tmp += 4;
+       WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
+
+       switch (tmp) {
+       case 0:
+       case 4:
+       default:
+               return 3840 * 2;
+       case 1:
+       case 5:
+               return 5760 * 2;
+       case 2:
+       case 6:
+               return 7680 * 2;
+       case 3:
+       case 7:
+               return 1920 * 2;
+       }
+}
+
+static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
+{
+       u32 tmp = RREG32(MC_SHARED_CHMAP);
+
+       switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+       case 0:
+       default:
+               return 1;
+       case 1:
+               return 2;
+       case 2:
+               return 4;
+       case 3:
+               return 8;
+       }
+}
+
+struct evergreen_wm_params {
+       u32 dram_channels; /* number of dram channels */
+       u32 yclk;          /* bandwidth per dram data pin in kHz */
+       u32 sclk;          /* engine clock in kHz */
+       u32 disp_clk;      /* display clock in kHz */
+       u32 src_width;     /* viewport width */
+       u32 active_time;   /* active display time in ns */
+       u32 blank_time;    /* blank time in ns */
+       bool interlaced;    /* mode is interlaced */
+       fixed20_12 vsc;    /* vertical scale ratio */
+       u32 num_heads;     /* number of active crtcs */
+       u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+       u32 lb_size;       /* line buffer allocated to pipe */
+       u32 vtaps;         /* vertical scaler taps */
+};
+
+static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
+{
+       /* Calculate DRAM Bandwidth and the part allocated to display. */
+       fixed20_12 dram_efficiency; /* 0.7 */
+       fixed20_12 yclk, dram_channels, bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       yclk.full = dfixed_const(wm->yclk);
+       yclk.full = dfixed_div(yclk, a);
+       dram_channels.full = dfixed_const(wm->dram_channels * 4);
+       a.full = dfixed_const(10);
+       dram_efficiency.full = dfixed_const(7);
+       dram_efficiency.full = dfixed_div(dram_efficiency, a);
+       bandwidth.full = dfixed_mul(dram_channels, yclk);
+       bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+       return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
+{
+       /* Calculate DRAM Bandwidth and the part allocated to display. */
+       fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+       fixed20_12 yclk, dram_channels, bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       yclk.full = dfixed_const(wm->yclk);
+       yclk.full = dfixed_div(yclk, a);
+       dram_channels.full = dfixed_const(wm->dram_channels * 4);
+       a.full = dfixed_const(10);
+       disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+       disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+       bandwidth.full = dfixed_mul(dram_channels, yclk);
+       bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+       return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
+{
+       /* Calculate the display Data return Bandwidth */
+       fixed20_12 return_efficiency; /* 0.8 */
+       fixed20_12 sclk, bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       sclk.full = dfixed_const(wm->sclk);
+       sclk.full = dfixed_div(sclk, a);
+       a.full = dfixed_const(10);
+       return_efficiency.full = dfixed_const(8);
+       return_efficiency.full = dfixed_div(return_efficiency, a);
+       a.full = dfixed_const(32);
+       bandwidth.full = dfixed_mul(a, sclk);
+       bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+       return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
+{
+       /* Calculate the DMIF Request Bandwidth */
+       fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+       fixed20_12 disp_clk, bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       disp_clk.full = dfixed_const(wm->disp_clk);
+       disp_clk.full = dfixed_div(disp_clk, a);
+       a.full = dfixed_const(10);
+       disp_clk_request_efficiency.full = dfixed_const(8);
+       disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+       a.full = dfixed_const(32);
+       bandwidth.full = dfixed_mul(a, disp_clk);
+       bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
+
+       return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
+{
+       /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+       u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
+       u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
+       u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
+
+       return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
+{
+       /* Calculate the display mode Average Bandwidth
+        * DisplayMode should contain the source and destination dimensions,
+        * timing, etc.
+        */
+       fixed20_12 bpp;
+       fixed20_12 line_time;
+       fixed20_12 src_width;
+       fixed20_12 bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+       line_time.full = dfixed_div(line_time, a);
+       bpp.full = dfixed_const(wm->bytes_per_pixel);
+       src_width.full = dfixed_const(wm->src_width);
+       bandwidth.full = dfixed_mul(src_width, bpp);
+       bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+       bandwidth.full = dfixed_div(bandwidth, line_time);
+
+       return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
+{
+       /* First calcualte the latency in ns */
+       u32 mc_latency = 2000; /* 2000 ns. */
+       u32 available_bandwidth = evergreen_available_bandwidth(wm);
+       u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+       u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+       u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+       u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+               (wm->num_heads * cursor_line_pair_return_time);
+       u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+       u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+       fixed20_12 a, b, c;
+
+       if (wm->num_heads == 0)
+               return 0;
+
+       a.full = dfixed_const(2);
+       b.full = dfixed_const(1);
+       if ((wm->vsc.full > a.full) ||
+           ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+           (wm->vtaps >= 5) ||
+           ((wm->vsc.full >= a.full) && wm->interlaced))
+               max_src_lines_per_dst_line = 4;
+       else
+               max_src_lines_per_dst_line = 2;
+
+       a.full = dfixed_const(available_bandwidth);
+       b.full = dfixed_const(wm->num_heads);
+       a.full = dfixed_div(a, b);
+
+       b.full = dfixed_const(1000);
+       c.full = dfixed_const(wm->disp_clk);
+       b.full = dfixed_div(c, b);
+       c.full = dfixed_const(wm->bytes_per_pixel);
+       b.full = dfixed_mul(b, c);
+
+       lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
+
+       a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+       b.full = dfixed_const(1000);
+       c.full = dfixed_const(lb_fill_bw);
+       b.full = dfixed_div(c, b);
+       a.full = dfixed_div(a, b);
+       line_fill_time = dfixed_trunc(a);
+
+       if (line_fill_time < wm->active_time)
+               return latency;
+       else
+               return latency + (line_fill_time - wm->active_time);
+
+}
+
+static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
+{
+       if (evergreen_average_bandwidth(wm) <=
+           (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
+               return true;
+       else
+               return false;
+};
+
+static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
+{
+       if (evergreen_average_bandwidth(wm) <=
+           (evergreen_available_bandwidth(wm) / wm->num_heads))
+               return true;
+       else
+               return false;
+};
+
+static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
+{
+       u32 lb_partitions = wm->lb_size / wm->src_width;
+       u32 line_time = wm->active_time + wm->blank_time;
+       u32 latency_tolerant_lines;
+       u32 latency_hiding;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1);
+       if (wm->vsc.full > a.full)
+               latency_tolerant_lines = 1;
+       else {
+               if (lb_partitions <= (wm->vtaps + 1))
+                       latency_tolerant_lines = 1;
+               else
+                       latency_tolerant_lines = 2;
+       }
+
+       latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+       if (evergreen_latency_watermark(wm) <= latency_hiding)
+               return true;
+       else
+               return false;
+}
+
+static void evergreen_program_watermarks(struct radeon_device *rdev,
+                                        struct radeon_crtc *radeon_crtc,
+                                        u32 lb_size, u32 num_heads)
+{
+       struct drm_display_mode *mode = &radeon_crtc->base.mode;
+       struct evergreen_wm_params wm;
+       u32 pixel_period;
+       u32 line_time = 0;
+       u32 latency_watermark_a = 0, latency_watermark_b = 0;
+       u32 priority_a_mark = 0, priority_b_mark = 0;
+       u32 priority_a_cnt = PRIORITY_OFF;
+       u32 priority_b_cnt = PRIORITY_OFF;
+       u32 pipe_offset = radeon_crtc->crtc_id * 16;
+       u32 tmp, arb_control3;
+       fixed20_12 a, b, c;
+
+       if (radeon_crtc->base.enabled && num_heads && mode) {
+               pixel_period = 1000000 / (u32)mode->clock;
+               line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+               priority_a_cnt = 0;
+               priority_b_cnt = 0;
+
+               wm.yclk = rdev->pm.current_mclk * 10;
+               wm.sclk = rdev->pm.current_sclk * 10;
+               wm.disp_clk = mode->clock;
+               wm.src_width = mode->crtc_hdisplay;
+               wm.active_time = mode->crtc_hdisplay * pixel_period;
+               wm.blank_time = line_time - wm.active_time;
+               wm.interlaced = false;
+               if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                       wm.interlaced = true;
+               wm.vsc = radeon_crtc->vsc;
+               wm.vtaps = 1;
+               if (radeon_crtc->rmx_type != RMX_OFF)
+                       wm.vtaps = 2;
+               wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
+               wm.lb_size = lb_size;
+               wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
+               wm.num_heads = num_heads;
+
+               /* set for high clocks */
+               latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
+               /* set for low clocks */
+               /* wm.yclk = low clk; wm.sclk = low clk */
+               latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
+
+               /* possibly force display priority to high */
+               /* should really do this at mode validation time... */
+               if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
+                   !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
+                   !evergreen_check_latency_hiding(&wm) ||
+                   (rdev->disp_priority == 2)) {
+                       DRM_INFO("force priority to high\n");
+                       priority_a_cnt |= PRIORITY_ALWAYS_ON;
+                       priority_b_cnt |= PRIORITY_ALWAYS_ON;
+               }
+
+               a.full = dfixed_const(1000);
+               b.full = dfixed_const(mode->clock);
+               b.full = dfixed_div(b, a);
+               c.full = dfixed_const(latency_watermark_a);
+               c.full = dfixed_mul(c, b);
+               c.full = dfixed_mul(c, radeon_crtc->hsc);
+               c.full = dfixed_div(c, a);
+               a.full = dfixed_const(16);
+               c.full = dfixed_div(c, a);
+               priority_a_mark = dfixed_trunc(c);
+               priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
+
+               a.full = dfixed_const(1000);
+               b.full = dfixed_const(mode->clock);
+               b.full = dfixed_div(b, a);
+               c.full = dfixed_const(latency_watermark_b);
+               c.full = dfixed_mul(c, b);
+               c.full = dfixed_mul(c, radeon_crtc->hsc);
+               c.full = dfixed_div(c, a);
+               a.full = dfixed_const(16);
+               c.full = dfixed_div(c, a);
+               priority_b_mark = dfixed_trunc(c);
+               priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+       }
+
+       /* select wm A */
+       arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
+       tmp = arb_control3;
+       tmp &= ~LATENCY_WATERMARK_MASK(3);
+       tmp |= LATENCY_WATERMARK_MASK(1);
+       WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
+       WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
+              (LATENCY_LOW_WATERMARK(latency_watermark_a) |
+               LATENCY_HIGH_WATERMARK(line_time)));
+       /* select wm B */
+       tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
+       tmp &= ~LATENCY_WATERMARK_MASK(3);
+       tmp |= LATENCY_WATERMARK_MASK(2);
+       WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
+       WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
+              (LATENCY_LOW_WATERMARK(latency_watermark_b) |
+               LATENCY_HIGH_WATERMARK(line_time)));
+       /* restore original selection */
+       WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
+
+       /* write the priority marks */
+       WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
+       WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
+
+}
+
 void evergreen_bandwidth_update(struct radeon_device *rdev)
 {
-       /* XXX */
+       struct drm_display_mode *mode0 = NULL;
+       struct drm_display_mode *mode1 = NULL;
+       u32 num_heads = 0, lb_size;
+       int i;
+
+       radeon_update_display_priority(rdev);
+
+       for (i = 0; i < rdev->num_crtc; i++) {
+               if (rdev->mode_info.crtcs[i]->base.enabled)
+                       num_heads++;
+       }
+       for (i = 0; i < rdev->num_crtc; i += 2) {
+               mode0 = &rdev->mode_info.crtcs[i]->base.mode;
+               mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
+               lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
+               evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
+               lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
+               evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
+       }
 }
 
 static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
@@ -677,7 +1113,7 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
 
 static int evergreen_cp_start(struct radeon_device *rdev)
 {
-       int r;
+       int r, i;
        uint32_t cp_me;
 
        r = radeon_ring_lock(rdev, 7);
@@ -697,16 +1133,39 @@ static int evergreen_cp_start(struct radeon_device *rdev)
        cp_me = 0xff;
        WREG32(CP_ME_CNTL, cp_me);
 
-       r = radeon_ring_lock(rdev, 4);
+       r = radeon_ring_lock(rdev, evergreen_default_size + 15);
        if (r) {
                DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
                return r;
        }
-       /* init some VGT regs */
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
-       radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2);
-       radeon_ring_write(rdev, 0xe);
-       radeon_ring_write(rdev, 0x10);
+
+       /* setup clear context state */
+       radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+       for (i = 0; i < evergreen_default_size; i++)
+               radeon_ring_write(rdev, evergreen_default_state[i]);
+
+       radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+       /* set clear context state */
+       radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
+       radeon_ring_write(rdev, 0);
+
+       /* SQ_VTX_BASE_VTX_LOC */
+       radeon_ring_write(rdev, 0xc0026f00);
+       radeon_ring_write(rdev, 0x00000000);
+       radeon_ring_write(rdev, 0x00000000);
+       radeon_ring_write(rdev, 0x00000000);
+
+       /* Clear consts */
+       radeon_ring_write(rdev, 0xc0036f00);
+       radeon_ring_write(rdev, 0x00000bc4);
+       radeon_ring_write(rdev, 0xffffffff);
+       radeon_ring_write(rdev, 0xffffffff);
+       radeon_ring_write(rdev, 0xffffffff);
+
        radeon_ring_unlock_commit(rdev);
 
        return 0;
@@ -731,7 +1190,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
 
        /* Set ring buffer size */
        rb_bufsz = drm_order(rdev->cp.ring_size / 8);
-       tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+       tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
 #endif
@@ -745,8 +1204,19 @@ int evergreen_cp_resume(struct radeon_device *rdev)
        WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
        WREG32(CP_RB_RPTR_WR, 0);
        WREG32(CP_RB_WPTR, 0);
-       WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
-       WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
+
+       /* set the wb address wether it's enabled or not */
+       WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+       WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+       WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+       if (rdev->wb.enabled)
+               WREG32(SCRATCH_UMSK, 0xff);
+       else {
+               tmp |= RB_NO_UPDATE;
+               WREG32(SCRATCH_UMSK, 0);
+       }
+
        mdelay(1);
        WREG32(CP_RB_CNTL, tmp);
 
@@ -1584,6 +2054,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
        if (rdev->irq.sw_int) {
                DRM_DEBUG("evergreen_irq_set: sw int\n");
                cp_int_cntl |= RB_INT_ENABLE;
+               cp_int_cntl |= TIME_STAMP_INT_ENABLE;
        }
        if (rdev->irq.crtc_vblank_int[0]) {
                DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -1760,8 +2231,10 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
 {
        u32 wptr, tmp;
 
-       /* XXX use writeback */
-       wptr = RREG32(IH_RB_WPTR);
+       if (rdev->wb.enabled)
+               wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
+       else
+               wptr = RREG32(IH_RB_WPTR);
 
        if (wptr & RB_OVERFLOW) {
                /* When a ring buffer overflow happen start parsing interrupt
@@ -2000,6 +2473,7 @@ restart_ih:
                        break;
                case 181: /* CP EOP event */
                        DRM_DEBUG("IH: CP EOP\n");
+                       radeon_fence_process(rdev);
                        break;
                case 233: /* GUI IDLE */
                        DRM_DEBUG("IH: CP EOP\n");
@@ -2048,26 +2522,18 @@ static int evergreen_startup(struct radeon_device *rdev)
                        return r;
        }
        evergreen_gpu_init(rdev);
-#if 0
-       if (!rdev->r600_blit.shader_obj) {
-               r = r600_blit_init(rdev);
-               if (r) {
-                       DRM_ERROR("radeon: failed blitter (%d).\n", r);
-                       return r;
-               }
-       }
 
-       r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
-       if (unlikely(r != 0))
-               return r;
-       r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
-                       &rdev->r600_blit.shader_gpu_addr);
-       radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+       r = evergreen_blit_init(rdev);
        if (r) {
-               DRM_ERROR("failed to pin blit object %d\n", r);
-               return r;
+               evergreen_blit_fini(rdev);
+               rdev->asic->copy = NULL;
+               dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
        }
-#endif
+
+       /* allocate wb buffer */
+       r = radeon_wb_init(rdev);
+       if (r)
+               return r;
 
        /* Enable IRQ */
        r = r600_irq_init(rdev);
@@ -2087,8 +2553,6 @@ static int evergreen_startup(struct radeon_device *rdev)
        r = evergreen_cp_resume(rdev);
        if (r)
                return r;
-       /* write back buffer are not vital so don't worry about failure */
-       r600_wb_enable(rdev);
 
        return 0;
 }
@@ -2122,23 +2586,43 @@ int evergreen_resume(struct radeon_device *rdev)
 
 int evergreen_suspend(struct radeon_device *rdev)
 {
-#if 0
        int r;
-#endif
+
        /* FIXME: we should wait for ring to be empty */
        r700_cp_stop(rdev);
        rdev->cp.ready = false;
        evergreen_irq_suspend(rdev);
-       r600_wb_disable(rdev);
+       radeon_wb_disable(rdev);
        evergreen_pcie_gart_disable(rdev);
-#if 0
+
        /* unpin shaders bo */
        r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
        if (likely(r == 0)) {
                radeon_bo_unpin(rdev->r600_blit.shader_obj);
                radeon_bo_unreserve(rdev->r600_blit.shader_obj);
        }
-#endif
+
+       return 0;
+}
+
+int evergreen_copy_blit(struct radeon_device *rdev,
+                       uint64_t src_offset, uint64_t dst_offset,
+                       unsigned num_pages, struct radeon_fence *fence)
+{
+       int r;
+
+       mutex_lock(&rdev->r600_blit.mutex);
+       rdev->r600_blit.vb_ib = NULL;
+       r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+       if (r) {
+               if (rdev->r600_blit.vb_ib)
+                       radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
+               mutex_unlock(&rdev->r600_blit.mutex);
+               return r;
+       }
+       evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
+       evergreen_blit_done_copy(rdev, fence);
+       mutex_unlock(&rdev->r600_blit.mutex);
        return 0;
 }
 
@@ -2246,8 +2730,8 @@ int evergreen_init(struct radeon_device *rdev)
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                r700_cp_fini(rdev);
-               r600_wb_fini(rdev);
                r600_irq_fini(rdev);
+               radeon_wb_fini(rdev);
                radeon_irq_kms_fini(rdev);
                evergreen_pcie_gart_fini(rdev);
                rdev->accel_working = false;
@@ -2269,10 +2753,10 @@ int evergreen_init(struct radeon_device *rdev)
 
 void evergreen_fini(struct radeon_device *rdev)
 {
-       /*r600_blit_fini(rdev);*/
+       evergreen_blit_fini(rdev);
        r700_cp_fini(rdev);
-       r600_wb_fini(rdev);
        r600_irq_fini(rdev);
+       radeon_wb_fini(rdev);
        radeon_irq_kms_fini(rdev);
        evergreen_pcie_gart_fini(rdev);
        radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
new file mode 100644 (file)
index 0000000..086b9b0
--- /dev/null
@@ -0,0 +1,772 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+#include "evergreend.h"
+#include "evergreen_blit_shaders.h"
+
+#define DI_PT_RECTLIST        0x11
+#define DI_INDEX_SIZE_16_BIT  0x0
+#define DI_SRC_SEL_AUTO_INDEX 0x2
+
+#define FMT_8                 0x1
+#define FMT_5_6_5             0x8
+#define FMT_8_8_8_8           0x1a
+#define COLOR_8               0x1
+#define COLOR_5_6_5           0x8
+#define COLOR_8_8_8_8         0x1a
+
+/* emits 17 */
+static void
+set_render_target(struct radeon_device *rdev, int format,
+                 int w, int h, u64 gpu_addr)
+{
+       u32 cb_color_info;
+       int pitch, slice;
+
+       h = ALIGN(h, 8);
+       if (h < 8)
+               h = 8;
+
+       cb_color_info = ((format << 2) | (1 << 24));
+       pitch = (w / 8) - 1;
+       slice = ((w * h) / 64) - 1;
+
+       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
+       radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
+       radeon_ring_write(rdev, gpu_addr >> 8);
+       radeon_ring_write(rdev, pitch);
+       radeon_ring_write(rdev, slice);
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, cb_color_info);
+       radeon_ring_write(rdev, (1 << 4));
+       radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16));
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, 0);
+}
+
+/* emits 5dw */
+static void
+cp_set_surface_sync(struct radeon_device *rdev,
+                   u32 sync_type, u32 size,
+                   u64 mc_addr)
+{
+       u32 cp_coher_size;
+
+       if (size == 0xffffffff)
+               cp_coher_size = 0xffffffff;
+       else
+               cp_coher_size = ((size + 255) >> 8);
+
+       radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
+       radeon_ring_write(rdev, sync_type);
+       radeon_ring_write(rdev, cp_coher_size);
+       radeon_ring_write(rdev, mc_addr >> 8);
+       radeon_ring_write(rdev, 10); /* poll interval */
+}
+
+/* emits 11dw + 1 surface sync = 16dw */
+static void
+set_shaders(struct radeon_device *rdev)
+{
+       u64 gpu_addr;
+
+       /* VS */
+       gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
+       radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+       radeon_ring_write(rdev, gpu_addr >> 8);
+       radeon_ring_write(rdev, 2);
+       radeon_ring_write(rdev, 0);
+
+       /* PS */
+       gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
+       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
+       radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+       radeon_ring_write(rdev, gpu_addr >> 8);
+       radeon_ring_write(rdev, 1);
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, 2);
+
+       gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+       cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
+}
+
+/* emits 10 + 1 sync (5) = 15 */
+static void
+set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
+{
+       u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
+
+       /* high addr, stride */
+       sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
+       /* xyzw swizzles */
+       sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12);
+
+       radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
+       radeon_ring_write(rdev, 0x580);
+       radeon_ring_write(rdev, gpu_addr & 0xffffffff);
+       radeon_ring_write(rdev, 48 - 1); /* size */
+       radeon_ring_write(rdev, sq_vtx_constant_word2);
+       radeon_ring_write(rdev, sq_vtx_constant_word3);
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
+
+       if (rdev->family == CHIP_CEDAR)
+               cp_set_surface_sync(rdev,
+                                   PACKET3_TC_ACTION_ENA, 48, gpu_addr);
+       else
+               cp_set_surface_sync(rdev,
+                                   PACKET3_VC_ACTION_ENA, 48, gpu_addr);
+
+}
+
+/* emits 10 */
+static void
+set_tex_resource(struct radeon_device *rdev,
+                int format, int w, int h, int pitch,
+                u64 gpu_addr)
+{
+       u32 sq_tex_resource_word0, sq_tex_resource_word1;
+       u32 sq_tex_resource_word4, sq_tex_resource_word7;
+
+       if (h < 1)
+               h = 1;
+
+       sq_tex_resource_word0 = (1 << 0); /* 2D */
+       sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
+                                 ((w - 1) << 18));
+       sq_tex_resource_word1 = ((h - 1) << 0);
+       /* xyzw swizzles */
+       sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25);
+
+       sq_tex_resource_word7 = format | (SQ_TEX_VTX_VALID_TEXTURE << 30);
+
+       radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, sq_tex_resource_word0);
+       radeon_ring_write(rdev, sq_tex_resource_word1);
+       radeon_ring_write(rdev, gpu_addr >> 8);
+       radeon_ring_write(rdev, gpu_addr >> 8);
+       radeon_ring_write(rdev, sq_tex_resource_word4);
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, sq_tex_resource_word7);
+}
+
+/* emits 12 */
+static void
+set_scissors(struct radeon_device *rdev, int x1, int y1,
+            int x2, int y2)
+{
+       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+       radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+       radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
+       radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+
+       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+       radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+       radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
+       radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+
+       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+       radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+       radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
+       radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+}
+
+/* emits 10 */
+static void
+draw_auto(struct radeon_device *rdev)
+{
+       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+       radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
+       radeon_ring_write(rdev, DI_PT_RECTLIST);
+
+       radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
+       radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT);
+
+       radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
+       radeon_ring_write(rdev, 1);
+
+       radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+       radeon_ring_write(rdev, 3);
+       radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
+
+}
+
+/* emits 30 */
+static void
+set_default_state(struct radeon_device *rdev)
+{
+       u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
+       u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
+       u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
+       int num_ps_gprs, num_vs_gprs, num_temp_gprs;
+       int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs;
+       int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
+       int num_hs_threads, num_ls_threads;
+       int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
+       int num_hs_stack_entries, num_ls_stack_entries;
+
+       switch (rdev->family) {
+       case CHIP_CEDAR:
+       default:
+               num_ps_gprs = 93;
+               num_vs_gprs = 46;
+               num_temp_gprs = 4;
+               num_gs_gprs = 31;
+               num_es_gprs = 31;
+               num_hs_gprs = 23;
+               num_ls_gprs = 23;
+               num_ps_threads = 96;
+               num_vs_threads = 16;
+               num_gs_threads = 16;
+               num_es_threads = 16;
+               num_hs_threads = 16;
+               num_ls_threads = 16;
+               num_ps_stack_entries = 42;
+               num_vs_stack_entries = 42;
+               num_gs_stack_entries = 42;
+               num_es_stack_entries = 42;
+               num_hs_stack_entries = 42;
+               num_ls_stack_entries = 42;
+               break;
+       case CHIP_REDWOOD:
+               num_ps_gprs = 93;
+               num_vs_gprs = 46;
+               num_temp_gprs = 4;
+               num_gs_gprs = 31;
+               num_es_gprs = 31;
+               num_hs_gprs = 23;
+               num_ls_gprs = 23;
+               num_ps_threads = 128;
+               num_vs_threads = 20;
+               num_gs_threads = 20;
+               num_es_threads = 20;
+               num_hs_threads = 20;
+               num_ls_threads = 20;
+               num_ps_stack_entries = 42;
+               num_vs_stack_entries = 42;
+               num_gs_stack_entries = 42;
+               num_es_stack_entries = 42;
+               num_hs_stack_entries = 42;
+               num_ls_stack_entries = 42;
+               break;
+       case CHIP_JUNIPER:
+               num_ps_gprs = 93;
+               num_vs_gprs = 46;
+               num_temp_gprs = 4;
+               num_gs_gprs = 31;
+               num_es_gprs = 31;
+               num_hs_gprs = 23;
+               num_ls_gprs = 23;
+               num_ps_threads = 128;
+               num_vs_threads = 20;
+               num_gs_threads = 20;
+               num_es_threads = 20;
+               num_hs_threads = 20;
+               num_ls_threads = 20;
+               num_ps_stack_entries = 85;
+               num_vs_stack_entries = 85;
+               num_gs_stack_entries = 85;
+               num_es_stack_entries = 85;
+               num_hs_stack_entries = 85;
+               num_ls_stack_entries = 85;
+               break;
+       case CHIP_CYPRESS:
+       case CHIP_HEMLOCK:
+               num_ps_gprs = 93;
+               num_vs_gprs = 46;
+               num_temp_gprs = 4;
+               num_gs_gprs = 31;
+               num_es_gprs = 31;
+               num_hs_gprs = 23;
+               num_ls_gprs = 23;
+               num_ps_threads = 128;
+               num_vs_threads = 20;
+               num_gs_threads = 20;
+               num_es_threads = 20;
+               num_hs_threads = 20;
+               num_ls_threads = 20;
+               num_ps_stack_entries = 85;
+               num_vs_stack_entries = 85;
+               num_gs_stack_entries = 85;
+               num_es_stack_entries = 85;
+               num_hs_stack_entries = 85;
+               num_ls_stack_entries = 85;
+               break;
+       }
+
+       if (rdev->family == CHIP_CEDAR)
+               sq_config = 0;
+       else
+               sq_config = VC_ENABLE;
+
+       sq_config |= (EXPORT_SRC_C |
+                     CS_PRIO(0) |
+                     LS_PRIO(0) |
+                     HS_PRIO(0) |
+                     PS_PRIO(0) |
+                     VS_PRIO(1) |
+                     GS_PRIO(2) |
+                     ES_PRIO(3));
+
+       sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
+                                 NUM_VS_GPRS(num_vs_gprs) |
+                                 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
+       sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
+                                 NUM_ES_GPRS(num_es_gprs));
+       sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
+                                 NUM_LS_GPRS(num_ls_gprs));
+       sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
+                                  NUM_VS_THREADS(num_vs_threads) |
+                                  NUM_GS_THREADS(num_gs_threads) |
+                                  NUM_ES_THREADS(num_es_threads));
+       sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
+                                    NUM_LS_THREADS(num_ls_threads));
+       sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
+                                   NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
+       sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
+                                   NUM_ES_STACK_ENTRIES(num_es_stack_entries));
+       sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
+                                   NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
+
+       /* set clear context state */
+       radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
+       radeon_ring_write(rdev, 0);
+
+       /* disable dyn gprs */
+       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+       radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
+       radeon_ring_write(rdev, 0);
+
+       /* SQ config */
+       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
+       radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
+       radeon_ring_write(rdev, sq_config);
+       radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
+       radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
+       radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, sq_thread_resource_mgmt);
+       radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
+       radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
+       radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
+       radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
+
+       /* CONTEXT_CONTROL */
+       radeon_ring_write(rdev, 0xc0012800);
+       radeon_ring_write(rdev, 0x80000000);
+       radeon_ring_write(rdev, 0x80000000);
+
+       /* SQ_VTX_BASE_VTX_LOC */
+       radeon_ring_write(rdev, 0xc0026f00);
+       radeon_ring_write(rdev, 0x00000000);
+       radeon_ring_write(rdev, 0x00000000);
+       radeon_ring_write(rdev, 0x00000000);
+
+       /* SET_SAMPLER */
+       radeon_ring_write(rdev, 0xc0036e00);
+       radeon_ring_write(rdev, 0x00000000);
+       radeon_ring_write(rdev, 0x00000012);
+       radeon_ring_write(rdev, 0x00000000);
+       radeon_ring_write(rdev, 0x00000000);
+
+}
+
+static inline uint32_t i2f(uint32_t input)
+{
+       u32 result, i, exponent, fraction;
+
+       if ((input & 0x3fff) == 0)
+               result = 0; /* 0 is a special case */
+       else {
+               exponent = 140; /* exponent biased by 127; */
+               fraction = (input & 0x3fff) << 10; /* cheat and only
+                                                     handle numbers below 2^^15 */
+               for (i = 0; i < 14; i++) {
+                       if (fraction & 0x800000)
+                               break;
+                       else {
+                               fraction = fraction << 1; /* keep
+                                                            shifting left until top bit = 1 */
+                               exponent = exponent - 1;
+                       }
+               }
+               result = exponent << 23 | (fraction & 0x7fffff); /* mask
+                                                                   off top bit; assumed 1 */
+       }
+       return result;
+}
+
+int evergreen_blit_init(struct radeon_device *rdev)
+{
+       u32 obj_size;
+       int r;
+       void *ptr;
+
+       /* pin copy shader into vram if already initialized */
+       if (rdev->r600_blit.shader_obj)
+               goto done;
+
+       mutex_init(&rdev->r600_blit.mutex);
+       rdev->r600_blit.state_offset = 0;
+       rdev->r600_blit.state_len = 0;
+       obj_size = 0;
+
+       rdev->r600_blit.vs_offset = obj_size;
+       obj_size += evergreen_vs_size * 4;
+       obj_size = ALIGN(obj_size, 256);
+
+       rdev->r600_blit.ps_offset = obj_size;
+       obj_size += evergreen_ps_size * 4;
+       obj_size = ALIGN(obj_size, 256);
+
+       r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM,
+                               &rdev->r600_blit.shader_obj);
+       if (r) {
+               DRM_ERROR("evergreen failed to allocate shader\n");
+               return r;
+       }
+
+       DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n",
+                 obj_size,
+                 rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
+
+       r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+       if (unlikely(r != 0))
+               return r;
+       r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
+       if (r) {
+               DRM_ERROR("failed to map blit object %d\n", r);
+               return r;
+       }
+
+       memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4);
+       memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4);
+       radeon_bo_kunmap(rdev->r600_blit.shader_obj);
+       radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+
+done:
+       r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+       if (unlikely(r != 0))
+               return r;
+       r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+                         &rdev->r600_blit.shader_gpu_addr);
+       radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+       if (r) {
+               dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
+               return r;
+       }
+       return 0;
+}
+
+void evergreen_blit_fini(struct radeon_device *rdev)
+{
+       int r;
+
+       if (rdev->r600_blit.shader_obj == NULL)
+               return;
+       /* If we can't reserve the bo, unref should be enough to destroy
+        * it when it becomes idle.
+        */
+       r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+       if (!r) {
+               radeon_bo_unpin(rdev->r600_blit.shader_obj);
+               radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+       }
+       radeon_bo_unref(&rdev->r600_blit.shader_obj);
+}
+
+static int evergreen_vb_ib_get(struct radeon_device *rdev)
+{
+       int r;
+       r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
+       if (r) {
+               DRM_ERROR("failed to get IB for vertex buffer\n");
+               return r;
+       }
+
+       rdev->r600_blit.vb_total = 64*1024;
+       rdev->r600_blit.vb_used = 0;
+       return 0;
+}
+
+static void evergreen_vb_ib_put(struct radeon_device *rdev)
+{
+       radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
+       radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
+}
+
+int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
+{
+       int r;
+       int ring_size, line_size;
+       int max_size;
+       /* loops of emits + fence emit possible */
+       int dwords_per_loop = 74, num_loops;
+
+       r = evergreen_vb_ib_get(rdev);
+       if (r)
+               return r;
+
+       /* 8 bpp vs 32 bpp for xfer unit */
+       if (size_bytes & 3)
+               line_size = 8192;
+       else
+               line_size = 8192 * 4;
+
+       max_size = 8192 * line_size;
+
+       /* major loops cover the max size transfer */
+       num_loops = ((size_bytes + max_size) / max_size);
+       /* minor loops cover the extra non aligned bits */
+       num_loops += ((size_bytes % line_size) ? 1 : 0);
+       /* calculate number of loops correctly */
+       ring_size = num_loops * dwords_per_loop;
+       /* set default  + shaders */
+       ring_size += 46; /* shaders + def state */
+       ring_size += 10; /* fence emit for VB IB */
+       ring_size += 5; /* done copy */
+       ring_size += 10; /* fence emit for done copy */
+       r = radeon_ring_lock(rdev, ring_size);
+       if (r)
+               return r;
+
+       set_default_state(rdev); /* 30 */
+       set_shaders(rdev); /* 16 */
+       return 0;
+}
+
+void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
+{
+       int r;
+
+       if (rdev->r600_blit.vb_ib)
+               evergreen_vb_ib_put(rdev);
+
+       if (fence)
+               r = radeon_fence_emit(rdev, fence);
+
+       radeon_ring_unlock_commit(rdev);
+}
+
+void evergreen_kms_blit_copy(struct radeon_device *rdev,
+                            u64 src_gpu_addr, u64 dst_gpu_addr,
+                            int size_bytes)
+{
+       int max_bytes;
+       u64 vb_gpu_addr;
+       u32 *vb;
+
+       DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
+                 size_bytes, rdev->r600_blit.vb_used);
+       vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
+       if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
+               max_bytes = 8192;
+
+               while (size_bytes) {
+                       int cur_size = size_bytes;
+                       int src_x = src_gpu_addr & 255;
+                       int dst_x = dst_gpu_addr & 255;
+                       int h = 1;
+                       src_gpu_addr = src_gpu_addr & ~255ULL;
+                       dst_gpu_addr = dst_gpu_addr & ~255ULL;
+
+                       if (!src_x && !dst_x) {
+                               h = (cur_size / max_bytes);
+                               if (h > 8192)
+                                       h = 8192;
+                               if (h == 0)
+                                       h = 1;
+                               else
+                                       cur_size = max_bytes;
+                       } else {
+                               if (cur_size > max_bytes)
+                                       cur_size = max_bytes;
+                               if (cur_size > (max_bytes - dst_x))
+                                       cur_size = (max_bytes - dst_x);
+                               if (cur_size > (max_bytes - src_x))
+                                       cur_size = (max_bytes - src_x);
+                       }
+
+                       if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
+                               WARN_ON(1);
+                       }
+
+                       vb[0] = i2f(dst_x);
+                       vb[1] = 0;
+                       vb[2] = i2f(src_x);
+                       vb[3] = 0;
+
+                       vb[4] = i2f(dst_x);
+                       vb[5] = i2f(h);
+                       vb[6] = i2f(src_x);
+                       vb[7] = i2f(h);
+
+                       vb[8] = i2f(dst_x + cur_size);
+                       vb[9] = i2f(h);
+                       vb[10] = i2f(src_x + cur_size);
+                       vb[11] = i2f(h);
+
+                       /* src 10 */
+                       set_tex_resource(rdev, FMT_8,
+                                        src_x + cur_size, h, src_x + cur_size,
+                                        src_gpu_addr);
+
+                       /* 5 */
+                       cp_set_surface_sync(rdev,
+                                           PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
+
+
+                       /* dst 17 */
+                       set_render_target(rdev, COLOR_8,
+                                         dst_x + cur_size, h,
+                                         dst_gpu_addr);
+
+                       /* scissors 12 */
+                       set_scissors(rdev, dst_x, 0, dst_x + cur_size, h);
+
+                       /* 15 */
+                       vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
+                       set_vtx_resource(rdev, vb_gpu_addr);
+
+                       /* draw 10 */
+                       draw_auto(rdev);
+
+                       /* 5 */
+                       cp_set_surface_sync(rdev,
+                                           PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
+                                           cur_size * h, dst_gpu_addr);
+
+                       vb += 12;
+                       rdev->r600_blit.vb_used += 12 * 4;
+
+                       src_gpu_addr += cur_size * h;
+                       dst_gpu_addr += cur_size * h;
+                       size_bytes -= cur_size * h;
+               }
+       } else {
+               max_bytes = 8192 * 4;
+
+               while (size_bytes) {
+                       int cur_size = size_bytes;
+                       int src_x = (src_gpu_addr & 255);
+                       int dst_x = (dst_gpu_addr & 255);
+                       int h = 1;
+                       src_gpu_addr = src_gpu_addr & ~255ULL;
+                       dst_gpu_addr = dst_gpu_addr & ~255ULL;
+
+                       if (!src_x && !dst_x) {
+                               h = (cur_size / max_bytes);
+                               if (h > 8192)
+                                       h = 8192;
+                               if (h == 0)
+                                       h = 1;
+                               else
+                                       cur_size = max_bytes;
+                       } else {
+                               if (cur_size > max_bytes)
+                                       cur_size = max_bytes;
+                               if (cur_size > (max_bytes - dst_x))
+                                       cur_size = (max_bytes - dst_x);
+                               if (cur_size > (max_bytes - src_x))
+                                       cur_size = (max_bytes - src_x);
+                       }
+
+                       if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
+                               WARN_ON(1);
+                       }
+
+                       vb[0] = i2f(dst_x / 4);
+                       vb[1] = 0;
+                       vb[2] = i2f(src_x / 4);
+                       vb[3] = 0;
+
+                       vb[4] = i2f(dst_x / 4);
+                       vb[5] = i2f(h);
+                       vb[6] = i2f(src_x / 4);
+                       vb[7] = i2f(h);
+
+                       vb[8] = i2f((dst_x + cur_size) / 4);
+                       vb[9] = i2f(h);
+                       vb[10] = i2f((src_x + cur_size) / 4);
+                       vb[11] = i2f(h);
+
+                       /* src 10 */
+                       set_tex_resource(rdev, FMT_8_8_8_8,
+                                        (src_x + cur_size) / 4,
+                                        h, (src_x + cur_size) / 4,
+                                        src_gpu_addr);
+                       /* 5 */
+                       cp_set_surface_sync(rdev,
+                                           PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
+
+                       /* dst 17 */
+                       set_render_target(rdev, COLOR_8_8_8_8,
+                                         (dst_x + cur_size) / 4, h,
+                                         dst_gpu_addr);
+
+                       /* scissors 12  */
+                       set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
+
+                       /* Vertex buffer setup 15 */
+                       vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
+                       set_vtx_resource(rdev, vb_gpu_addr);
+
+                       /* draw 10 */
+                       draw_auto(rdev);
+
+                       /* 5 */
+                       cp_set_surface_sync(rdev,
+                                           PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
+                                           cur_size * h, dst_gpu_addr);
+
+                       /* 74 ring dwords per loop */
+                       vb += 12;
+                       rdev->r600_blit.vb_used += 12 * 4;
+
+                       src_gpu_addr += cur_size * h;
+                       dst_gpu_addr += cur_size * h;
+                       size_bytes -= cur_size * h;
+               }
+       }
+}
+
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
new file mode 100644 (file)
index 0000000..ef1d28c
--- /dev/null
@@ -0,0 +1,348 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+/*
+ * evergreen cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup.  Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables.  The regsiter state and shaders
+ * were hand generated to support blitting functionality.  See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
+const u32 evergreen_default_state[] =
+{
+       0xc0016900,
+       0x0000023b,
+       0x00000000, /* SQ_LDS_ALLOC_PS */
+
+       0xc0066900,
+       0x00000240,
+       0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+
+       0xc0046900,
+       0x00000247,
+       0x00000000, /* SQ_GS_VERT_ITEMSIZE */
+       0x00000000,
+       0x00000000,
+       0x00000000,
+
+       0xc0026900,
+       0x00000010,
+       0x00000000, /* DB_Z_INFO */
+       0x00000000, /* DB_STENCIL_INFO */
+
+       0xc0016900,
+       0x00000200,
+       0x00000000, /* DB_DEPTH_CONTROL */
+
+       0xc0066900,
+       0x00000000,
+       0x00000060, /* DB_RENDER_CONTROL */
+       0x00000000, /* DB_COUNT_CONTROL */
+       0x00000000, /* DB_DEPTH_VIEW */
+       0x0000002a, /* DB_RENDER_OVERRIDE */
+       0x00000000, /* DB_RENDER_OVERRIDE2 */
+       0x00000000, /* DB_HTILE_DATA_BASE */
+
+       0xc0026900,
+       0x0000000a,
+       0x00000000, /* DB_STENCIL_CLEAR */
+       0x00000000, /* DB_DEPTH_CLEAR */
+
+       0xc0016900,
+       0x000002dc,
+       0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+       0xc0016900,
+       0x00000080,
+       0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+       0xc00d6900,
+       0x00000083,
+       0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+       0x00000000, /* PA_SC_CLIPRECT_0_TL */
+       0x20002000, /* PA_SC_CLIPRECT_0_BR */
+       0x00000000,
+       0x20002000,
+       0x00000000,
+       0x20002000,
+       0x00000000,
+       0x20002000,
+       0xaaaaaaaa, /* PA_SC_EDGERULE */
+       0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+       0x0000000f, /* CB_TARGET_MASK */
+       0x0000000f, /* CB_SHADER_MASK */
+
+       0xc0226900,
+       0x00000094,
+       0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+       0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x80000000,
+       0x20002000,
+       0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+       0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+       0xc0016900,
+       0x000000d4,
+       0x00000000, /* SX_MISC */
+
+       0xc0026900,
+       0x00000292,
+       0x00000000, /* PA_SC_MODE_CNTL_0 */
+       0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+       0xc0106900,
+       0x00000300,
+       0x00000000, /* PA_SC_LINE_CNTL */
+       0x00000000, /* PA_SC_AA_CONFIG */
+       0x00000005, /* PA_SU_VTX_CNTL */
+       0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+       0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+       0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+       0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+       0x00000000, /* PA_SC_AA_SAMPLE_LOCS_0 */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /* PA_SC_AA_SAMPLE_LOCS_7 */
+       0xffffffff, /* PA_SC_AA_MASK */
+
+       0xc00d6900,
+       0x00000202,
+       0x00cc0010, /* CB_COLOR_CONTROL */
+       0x00000210, /* DB_SHADER_CONTROL */
+       0x00010000, /* PA_CL_CLIP_CNTL */
+       0x00000004, /* PA_SU_SC_MODE_CNTL */
+       0x00000100, /* PA_CL_VTE_CNTL */
+       0x00000000, /* PA_CL_VS_OUT_CNTL */
+       0x00000000, /* PA_CL_NANINF_CNTL */
+       0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+       0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+       0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /* SQ_DYN_GPR_RESOURCE_LIMIT_1 */
+
+       0xc0066900,
+       0x000002de,
+       0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /*  */
+
+       0xc0016900,
+       0x00000229,
+       0x00000000, /* SQ_PGM_START_FS */
+
+       0xc0016900,
+       0x0000022a,
+       0x00000000, /* SQ_PGM_RESOURCES_FS */
+
+       0xc0096900,
+       0x00000100,
+       0x00ffffff, /* VGT_MAX_VTX_INDX */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /* SX_ALPHA_TEST_CONTROL */
+       0x00000000, /* CB_BLEND_RED */
+       0x00000000, /* CB_BLEND_GREEN */
+       0x00000000, /* CB_BLEND_BLUE */
+       0x00000000, /* CB_BLEND_ALPHA */
+
+       0xc0026900,
+       0x000002a8,
+       0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+       0x00000000, /*  */
+
+       0xc0026900,
+       0x000002ad,
+       0x00000000, /* VGT_REUSE_OFF */
+       0x00000000, /*  */
+
+       0xc0116900,
+       0x00000280,
+       0x00000000, /* PA_SU_POINT_SIZE */
+       0x00000000, /* PA_SU_POINT_MINMAX */
+       0x00000008, /* PA_SU_LINE_CNTL */
+       0x00000000, /* PA_SC_LINE_STIPPLE */
+       0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+       0x00000000, /* VGT_HOS_CNTL */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /* VGT_GS_MODE */
+
+       0xc0016900,
+       0x000002a1,
+       0x00000000, /* VGT_PRIMITIVEID_EN */
+
+       0xc0016900,
+       0x000002a5,
+       0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+       0xc0016900,
+       0x000002d5,
+       0x00000000, /* VGT_SHADER_STAGES_EN */
+
+       0xc0026900,
+       0x000002e5,
+       0x00000000, /* VGT_STRMOUT_CONFIG */
+       0x00000000, /*  */
+
+       0xc0016900,
+       0x000001e0,
+       0x00000000, /* CB_BLEND0_CONTROL */
+
+       0xc0016900,
+       0x000001b1,
+       0x00000000, /* SPI_VS_OUT_CONFIG */
+
+       0xc0016900,
+       0x00000187,
+       0x00000000, /* SPI_VS_OUT_ID_0 */
+
+       0xc0016900,
+       0x00000191,
+       0x00000100, /* SPI_PS_INPUT_CNTL_0 */
+
+       0xc00b6900,
+       0x000001b3,
+       0x20000001, /* SPI_PS_IN_CONTROL_0 */
+       0x00000000, /* SPI_PS_IN_CONTROL_1 */
+       0x00000000, /* SPI_INTERP_CONTROL_0 */
+       0x00000000, /* SPI_INPUT_Z */
+       0x00000000, /* SPI_FOG_CNTL */
+       0x00100000, /* SPI_BARYC_CNTL */
+       0x00000000, /* SPI_PS_IN_CONTROL_2 */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /*  */
+       0x00000000, /*  */
+
+       0xc0026900,
+       0x00000316,
+       0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+       0x00000010, /*  */
+};
+
+const u32 evergreen_vs[] =
+{
+       0x00000004,
+       0x80800400,
+       0x0000a03c,
+       0x95000688,
+       0x00004000,
+       0x15200688,
+       0x00000000,
+       0x00000000,
+       0x3c000000,
+       0x67961001,
+       0x00080000,
+       0x00000000,
+       0x1c000000,
+       0x67961000,
+       0x00000008,
+       0x00000000,
+};
+
+const u32 evergreen_ps[] =
+{
+       0x00000003,
+       0xa00c0000,
+       0x00000008,
+       0x80400000,
+       0x00000000,
+       0x95200688,
+       0x00380400,
+       0x00146b10,
+       0x00380000,
+       0x20146b10,
+       0x00380400,
+       0x40146b00,
+       0x80380000,
+       0x60146b00,
+       0x00000000,
+       0x00000000,
+       0x00000010,
+       0x000d1000,
+       0xb0800000,
+       0x00000000,
+};
+
+const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps);
+const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs);
+const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.h b/drivers/gpu/drm/radeon/evergreen_blit_shaders.h
new file mode 100644 (file)
index 0000000..bb8d6c7
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef EVERGREEN_BLIT_SHADERS_H
+#define EVERGREEN_BLIT_SHADERS_H
+
+extern const u32 evergreen_ps[];
+extern const u32 evergreen_vs[];
+extern const u32 evergreen_default_state[];
+
+extern const u32 evergreen_ps_size, evergreen_vs_size;
+extern const u32 evergreen_default_size;
+
+#endif
index 9b7532dd30f754d4ad2e3e02915006ae823538f2..113c70cc8b3930eba7170a94f2c75b9f62e07c1d 100644 (file)
 #define                SOFT_RESET_REGBB                        (1 << 22)
 #define                SOFT_RESET_ORB                          (1 << 23)
 
+/* display watermarks */
+#define        DC_LB_MEMORY_SPLIT                                0x6b0c
+#define        PRIORITY_A_CNT                                    0x6b18
+#define                PRIORITY_MARK_MASK                        0x7fff
+#define                PRIORITY_OFF                              (1 << 16)
+#define                PRIORITY_ALWAYS_ON                        (1 << 20)
+#define        PRIORITY_B_CNT                                    0x6b1c
+#define        PIPE0_ARBITRATION_CONTROL3                        0x0bf0
+#       define LATENCY_WATERMARK_MASK(x)                  ((x) << 16)
+#define        PIPE0_LATENCY_CONTROL                             0x0bf4
+#       define LATENCY_LOW_WATERMARK(x)                   ((x) << 0)
+#       define LATENCY_HIGH_WATERMARK(x)                  ((x) << 16)
+
 #define IH_RB_CNTL                                        0x3e00
 #       define IH_RB_ENABLE                               (1 << 0)
 #       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
 #define        PACKET3_EVENT_WRITE_EOP                         0x47
 #define        PACKET3_EVENT_WRITE_EOS                         0x48
 #define        PACKET3_PREAMBLE_CNTL                           0x4A
+#              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
+#              define PACKET3_PREAMBLE_END_CLEAR_STATE       (3 << 28)
 #define        PACKET3_RB_OFFSET                               0x4B
 #define        PACKET3_ALU_PS_CONST_BUFFER_COPY                0x4C
 #define        PACKET3_ALU_VS_CONST_BUFFER_COPY                0x4D
 #define SQ_ALU_CONST_CACHE_LS_14                       0x28f78
 #define SQ_ALU_CONST_CACHE_LS_15                       0x28f7c
 
+#define PA_SC_SCREEN_SCISSOR_TL                         0x28030
+#define PA_SC_GENERIC_SCISSOR_TL                        0x28240
+#define PA_SC_WINDOW_SCISSOR_TL                         0x28204
+#define VGT_PRIMITIVE_TYPE                              0x8958
+
 #define DB_DEPTH_CONTROL                               0x28800
 #define DB_DEPTH_VIEW                                  0x28008
 #define DB_HTILE_DATA_BASE                             0x28014
index e59422320bb6df9873fbf88f9e29d34fdc412110..6d1540c0bfed1c192e258bb2a4c65b74e559b2a0 100644 (file)
@@ -675,67 +675,6 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
        radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
 }
 
-int r100_wb_init(struct radeon_device *rdev)
-{
-       int r;
-
-       if (rdev->wb.wb_obj == NULL) {
-               r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
-                                       RADEON_GEM_DOMAIN_GTT,
-                                       &rdev->wb.wb_obj);
-               if (r) {
-                       dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
-                       return r;
-               }
-               r = radeon_bo_reserve(rdev->wb.wb_obj, false);
-               if (unlikely(r != 0))
-                       return r;
-               r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
-                                       &rdev->wb.gpu_addr);
-               if (r) {
-                       dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
-                       radeon_bo_unreserve(rdev->wb.wb_obj);
-                       return r;
-               }
-               r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
-               radeon_bo_unreserve(rdev->wb.wb_obj);
-               if (r) {
-                       dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
-                       return r;
-               }
-       }
-       WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr);
-       WREG32(R_00070C_CP_RB_RPTR_ADDR,
-               S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2));
-       WREG32(R_000770_SCRATCH_UMSK, 0xff);
-       return 0;
-}
-
-void r100_wb_disable(struct radeon_device *rdev)
-{
-       WREG32(R_000770_SCRATCH_UMSK, 0);
-}
-
-void r100_wb_fini(struct radeon_device *rdev)
-{
-       int r;
-
-       r100_wb_disable(rdev);
-       if (rdev->wb.wb_obj) {
-               r = radeon_bo_reserve(rdev->wb.wb_obj, false);
-               if (unlikely(r != 0)) {
-                       dev_err(rdev->dev, "(%d) can't finish WB\n", r);
-                       return;
-               }
-               radeon_bo_kunmap(rdev->wb.wb_obj);
-               radeon_bo_unpin(rdev->wb.wb_obj);
-               radeon_bo_unreserve(rdev->wb.wb_obj);
-               radeon_bo_unref(&rdev->wb.wb_obj);
-               rdev->wb.wb = NULL;
-               rdev->wb.wb_obj = NULL;
-       }
-}
-
 int r100_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset,
                   uint64_t dst_offset,
@@ -996,20 +935,32 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
        WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
        tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
               REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
-              REG_SET(RADEON_MAX_FETCH, max_fetch) |
-              RADEON_RB_NO_UPDATE);
+              REG_SET(RADEON_MAX_FETCH, max_fetch));
 #ifdef __BIG_ENDIAN
        tmp |= RADEON_BUF_SWAP_32BIT;
 #endif
-       WREG32(RADEON_CP_RB_CNTL, tmp);
+       WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
 
        /* Set ring address */
        DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
        WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
        /* Force read & write ptr to 0 */
-       WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+       WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
        WREG32(RADEON_CP_RB_RPTR_WR, 0);
        WREG32(RADEON_CP_RB_WPTR, 0);
+
+       /* set the wb address whether it's enabled or not */
+       WREG32(R_00070C_CP_RB_RPTR_ADDR,
+               S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2));
+       WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET);
+
+       if (rdev->wb.enabled)
+               WREG32(R_000770_SCRATCH_UMSK, 0xff);
+       else {
+               tmp |= RADEON_RB_NO_UPDATE;
+               WREG32(R_000770_SCRATCH_UMSK, 0);
+       }
+
        WREG32(RADEON_CP_RB_CNTL, tmp);
        udelay(10);
        rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
@@ -1052,6 +1003,7 @@ void r100_cp_disable(struct radeon_device *rdev)
        rdev->cp.ready = false;
        WREG32(RADEON_CP_CSQ_MODE, 0);
        WREG32(RADEON_CP_CSQ_CNTL, 0);
+       WREG32(R_000770_SCRATCH_UMSK, 0);
        if (r100_gui_wait_for_idle(rdev)) {
                printk(KERN_WARNING "Failed to wait GUI idle while "
                       "programming pipes. Bad things might happen.\n");
@@ -2318,6 +2270,9 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
                /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 
                 * Novell bug 204882 + along with lots of ubuntu ones
                 */
+               if (rdev->mc.aper_size > config_aper_size)
+                       config_aper_size = rdev->mc.aper_size;
+
                if (config_aper_size > rdev->mc.real_vram_size)
                        rdev->mc.mc_vram_size = config_aper_size;
                else
@@ -3737,6 +3692,12 @@ static int r100_startup(struct radeon_device *rdev)
                if (r)
                        return r;
        }
+
+       /* allocate wb buffer */
+       r = radeon_wb_init(rdev);
+       if (r)
+               return r;
+
        /* Enable IRQ */
        r100_irq_set(rdev);
        rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -3746,9 +3707,6 @@ static int r100_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_wb_init(rdev);
-       if (r)
-               dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
        r = r100_ib_init(rdev);
        if (r) {
                dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -3782,7 +3740,7 @@ int r100_resume(struct radeon_device *rdev)
 int r100_suspend(struct radeon_device *rdev)
 {
        r100_cp_disable(rdev);
-       r100_wb_disable(rdev);
+       radeon_wb_disable(rdev);
        r100_irq_disable(rdev);
        if (rdev->flags & RADEON_IS_PCI)
                r100_pci_gart_disable(rdev);
@@ -3792,7 +3750,7 @@ int r100_suspend(struct radeon_device *rdev)
 void r100_fini(struct radeon_device *rdev)
 {
        r100_cp_fini(rdev);
-       r100_wb_fini(rdev);
+       radeon_wb_fini(rdev);
        r100_ib_fini(rdev);
        radeon_gem_fini(rdev);
        if (rdev->flags & RADEON_IS_PCI)
@@ -3905,7 +3863,7 @@ int r100_init(struct radeon_device *rdev)
                /* Somethings want wront with the accel init stop accel */
                dev_err(rdev->dev, "Disabling GPU acceleration\n");
                r100_cp_fini(rdev);
-               r100_wb_fini(rdev);
+               radeon_wb_fini(rdev);
                r100_ib_fini(rdev);
                radeon_irq_kms_fini(rdev);
                if (rdev->flags & RADEON_IS_PCI)
index c827738ad7ddbe620323937f97c5cc8953365338..34527e600fe94f91ab33da78285b695afbb6cd6e 100644 (file)
@@ -1332,6 +1332,12 @@ static int r300_startup(struct radeon_device *rdev)
                if (r)
                        return r;
        }
+
+       /* allocate wb buffer */
+       r = radeon_wb_init(rdev);
+       if (r)
+               return r;
+
        /* Enable IRQ */
        r100_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -1341,9 +1347,6 @@ static int r300_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_wb_init(rdev);
-       if (r)
-               dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
        r = r100_ib_init(rdev);
        if (r) {
                dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -1379,7 +1382,7 @@ int r300_resume(struct radeon_device *rdev)
 int r300_suspend(struct radeon_device *rdev)
 {
        r100_cp_disable(rdev);
-       r100_wb_disable(rdev);
+       radeon_wb_disable(rdev);
        r100_irq_disable(rdev);
        if (rdev->flags & RADEON_IS_PCIE)
                rv370_pcie_gart_disable(rdev);
@@ -1391,7 +1394,7 @@ int r300_suspend(struct radeon_device *rdev)
 void r300_fini(struct radeon_device *rdev)
 {
        r100_cp_fini(rdev);
-       r100_wb_fini(rdev);
+       radeon_wb_fini(rdev);
        r100_ib_fini(rdev);
        radeon_gem_fini(rdev);
        if (rdev->flags & RADEON_IS_PCIE)
@@ -1484,7 +1487,7 @@ int r300_init(struct radeon_device *rdev)
                /* Somethings want wront with the accel init stop accel */
                dev_err(rdev->dev, "Disabling GPU acceleration\n");
                r100_cp_fini(rdev);
-               r100_wb_fini(rdev);
+               radeon_wb_fini(rdev);
                r100_ib_fini(rdev);
                radeon_irq_kms_fini(rdev);
                if (rdev->flags & RADEON_IS_PCIE)
index 59f7bccc5be0a291dcf0fcfb9487080ea93d5693..c387346f93a9d5a924bc6aab769e92aebba8fe91 100644 (file)
@@ -248,6 +248,12 @@ static int r420_startup(struct radeon_device *rdev)
                        return r;
        }
        r420_pipes_init(rdev);
+
+       /* allocate wb buffer */
+       r = radeon_wb_init(rdev);
+       if (r)
+               return r;
+
        /* Enable IRQ */
        r100_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -258,10 +264,6 @@ static int r420_startup(struct radeon_device *rdev)
                return r;
        }
        r420_cp_errata_init(rdev);
-       r = r100_wb_init(rdev);
-       if (r) {
-               dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
-       }
        r = r100_ib_init(rdev);
        if (r) {
                dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -302,7 +304,7 @@ int r420_suspend(struct radeon_device *rdev)
 {
        r420_cp_errata_fini(rdev);
        r100_cp_disable(rdev);
-       r100_wb_disable(rdev);
+       radeon_wb_disable(rdev);
        r100_irq_disable(rdev);
        if (rdev->flags & RADEON_IS_PCIE)
                rv370_pcie_gart_disable(rdev);
@@ -314,7 +316,7 @@ int r420_suspend(struct radeon_device *rdev)
 void r420_fini(struct radeon_device *rdev)
 {
        r100_cp_fini(rdev);
-       r100_wb_fini(rdev);
+       radeon_wb_fini(rdev);
        r100_ib_fini(rdev);
        radeon_gem_fini(rdev);
        if (rdev->flags & RADEON_IS_PCIE)
@@ -418,7 +420,7 @@ int r420_init(struct radeon_device *rdev)
                /* Somethings want wront with the accel init stop accel */
                dev_err(rdev->dev, "Disabling GPU acceleration\n");
                r100_cp_fini(rdev);
-               r100_wb_fini(rdev);
+               radeon_wb_fini(rdev);
                r100_ib_fini(rdev);
                radeon_irq_kms_fini(rdev);
                if (rdev->flags & RADEON_IS_PCIE)
index 1458dee902dd6124db7c11d3c74911220c7648de..3c8677f9e38550f05e9940fd12a728d8178a58c8 100644 (file)
@@ -181,6 +181,12 @@ static int r520_startup(struct radeon_device *rdev)
                if (r)
                        return r;
        }
+
+       /* allocate wb buffer */
+       r = radeon_wb_init(rdev);
+       if (r)
+               return r;
+
        /* Enable IRQ */
        rs600_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -190,9 +196,6 @@ static int r520_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_wb_init(rdev);
-       if (r)
-               dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
        r = r100_ib_init(rdev);
        if (r) {
                dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -295,7 +298,7 @@ int r520_init(struct radeon_device *rdev)
                /* Somethings want wront with the accel init stop accel */
                dev_err(rdev->dev, "Disabling GPU acceleration\n");
                r100_cp_fini(rdev);
-               r100_wb_fini(rdev);
+               radeon_wb_fini(rdev);
                r100_ib_fini(rdev);
                radeon_irq_kms_fini(rdev);
                rv370_pcie_gart_fini(rdev);
index 7b65e4efe8af61e2df5404ea52c468fe8ee564db..33952a12f0a31a49986a248a39c38cb48cdf618c 100644 (file)
@@ -1608,8 +1608,11 @@ void r600_gpu_init(struct radeon_device *rdev)
        rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
        rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
        tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
-       tiling_config |= GROUP_SIZE(0);
-       rdev->config.r600.tiling_group_size = 256;
+       tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+       if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
+               rdev->config.r600.tiling_group_size = 512;
+       else
+               rdev->config.r600.tiling_group_size = 256;
        tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
        if (tmp > 3) {
                tiling_config |= ROW_TILING(3);
@@ -1920,6 +1923,7 @@ void r600_cp_stop(struct radeon_device *rdev)
 {
        rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+       WREG32(SCRATCH_UMSK, 0);
 }
 
 int r600_init_microcode(struct radeon_device *rdev)
@@ -2152,7 +2156,7 @@ int r600_cp_resume(struct radeon_device *rdev)
 
        /* Set ring buffer size */
        rb_bufsz = drm_order(rdev->cp.ring_size / 8);
-       tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+       tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
 #endif
@@ -2166,8 +2170,19 @@ int r600_cp_resume(struct radeon_device *rdev)
        WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
        WREG32(CP_RB_RPTR_WR, 0);
        WREG32(CP_RB_WPTR, 0);
-       WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
-       WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
+
+       /* set the wb address whether it's enabled or not */
+       WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+       WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+       WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+       if (rdev->wb.enabled)
+               WREG32(SCRATCH_UMSK, 0xff);
+       else {
+               tmp |= RB_NO_UPDATE;
+               WREG32(SCRATCH_UMSK, 0);
+       }
+
        mdelay(1);
        WREG32(CP_RB_CNTL, tmp);
 
@@ -2219,9 +2234,10 @@ void r600_scratch_init(struct radeon_device *rdev)
        int i;
 
        rdev->scratch.num_reg = 7;
+       rdev->scratch.reg_base = SCRATCH_REG0;
        for (i = 0; i < rdev->scratch.num_reg; i++) {
                rdev->scratch.free[i] = true;
-               rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
+               rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
        }
 }
 
@@ -2265,88 +2281,34 @@ int r600_ring_test(struct radeon_device *rdev)
        return r;
 }
 
-void r600_wb_disable(struct radeon_device *rdev)
-{
-       int r;
-
-       WREG32(SCRATCH_UMSK, 0);
-       if (rdev->wb.wb_obj) {
-               r = radeon_bo_reserve(rdev->wb.wb_obj, false);
-               if (unlikely(r != 0))
-                       return;
-               radeon_bo_kunmap(rdev->wb.wb_obj);
-               radeon_bo_unpin(rdev->wb.wb_obj);
-               radeon_bo_unreserve(rdev->wb.wb_obj);
-       }
-}
-
-void r600_wb_fini(struct radeon_device *rdev)
-{
-       r600_wb_disable(rdev);
-       if (rdev->wb.wb_obj) {
-               radeon_bo_unref(&rdev->wb.wb_obj);
-               rdev->wb.wb = NULL;
-               rdev->wb.wb_obj = NULL;
-       }
-}
-
-int r600_wb_enable(struct radeon_device *rdev)
-{
-       int r;
-
-       if (rdev->wb.wb_obj == NULL) {
-               r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
-                               RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
-               if (r) {
-                       dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
-                       return r;
-               }
-               r = radeon_bo_reserve(rdev->wb.wb_obj, false);
-               if (unlikely(r != 0)) {
-                       r600_wb_fini(rdev);
-                       return r;
-               }
-               r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
-                               &rdev->wb.gpu_addr);
-               if (r) {
-                       radeon_bo_unreserve(rdev->wb.wb_obj);
-                       dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
-                       r600_wb_fini(rdev);
-                       return r;
-               }
-               r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
-               radeon_bo_unreserve(rdev->wb.wb_obj);
-               if (r) {
-                       dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
-                       r600_wb_fini(rdev);
-                       return r;
-               }
-       }
-       WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
-       WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
-       WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
-       WREG32(SCRATCH_UMSK, 0xff);
-       return 0;
-}
-
 void r600_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence)
 {
-       /* Also consider EVENT_WRITE_EOP.  it handles the interrupts + timestamps + events */
-
-       radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
-       radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
-       /* wait for 3D idle clean */
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-       radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
-       radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
-       /* Emit fence sequence & fire IRQ */
-       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
-       radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
-       radeon_ring_write(rdev, fence->seq);
-       /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
-       radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
-       radeon_ring_write(rdev, RB_INT_STAT);
+       if (rdev->wb.use_event) {
+               u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
+                       (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
+               /* EVENT_WRITE_EOP - flush caches, send int */
+               radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+               radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+               radeon_ring_write(rdev, addr & 0xffffffff);
+               radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+               radeon_ring_write(rdev, fence->seq);
+               radeon_ring_write(rdev, 0);
+       } else {
+               radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
+               radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
+               /* wait for 3D idle clean */
+               radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+               radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+               radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
+               /* Emit fence sequence & fire IRQ */
+               radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+               radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+               radeon_ring_write(rdev, fence->seq);
+               /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
+               radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
+               radeon_ring_write(rdev, RB_INT_STAT);
+       }
 }
 
 int r600_copy_blit(struct radeon_device *rdev,
@@ -2428,19 +2390,12 @@ int r600_startup(struct radeon_device *rdev)
                rdev->asic->copy = NULL;
                dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
        }
-       /* pin copy shader into vram */
-       if (rdev->r600_blit.shader_obj) {
-               r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
-               if (unlikely(r != 0))
-                       return r;
-               r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
-                               &rdev->r600_blit.shader_gpu_addr);
-               radeon_bo_unreserve(rdev->r600_blit.shader_obj);
-               if (r) {
-                       dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
-                       return r;
-               }
-       }
+
+       /* allocate wb buffer */
+       r = radeon_wb_init(rdev);
+       if (r)
+               return r;
+
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -2459,8 +2414,7 @@ int r600_startup(struct radeon_device *rdev)
        r = r600_cp_resume(rdev);
        if (r)
                return r;
-       /* write back buffer are not vital so don't worry about failure */
-       r600_wb_enable(rdev);
+
        return 0;
 }
 
@@ -2519,7 +2473,7 @@ int r600_suspend(struct radeon_device *rdev)
        r600_cp_stop(rdev);
        rdev->cp.ready = false;
        r600_irq_suspend(rdev);
-       r600_wb_disable(rdev);
+       radeon_wb_disable(rdev);
        r600_pcie_gart_disable(rdev);
        /* unpin shaders bo */
        if (rdev->r600_blit.shader_obj) {
@@ -2616,8 +2570,8 @@ int r600_init(struct radeon_device *rdev)
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                r600_cp_fini(rdev);
-               r600_wb_fini(rdev);
                r600_irq_fini(rdev);
+               radeon_wb_fini(rdev);
                radeon_irq_kms_fini(rdev);
                r600_pcie_gart_fini(rdev);
                rdev->accel_working = false;
@@ -2647,8 +2601,8 @@ void r600_fini(struct radeon_device *rdev)
        r600_audio_fini(rdev);
        r600_blit_fini(rdev);
        r600_cp_fini(rdev);
-       r600_wb_fini(rdev);
        r600_irq_fini(rdev);
+       radeon_wb_fini(rdev);
        radeon_irq_kms_fini(rdev);
        r600_pcie_gart_fini(rdev);
        radeon_agp_fini(rdev);
@@ -2983,10 +2937,13 @@ int r600_irq_init(struct radeon_device *rdev)
        ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
                      IH_WPTR_OVERFLOW_CLEAR |
                      (rb_bufsz << 1));
-       /* WPTR writeback, not yet */
-       /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
-       WREG32(IH_RB_WPTR_ADDR_LO, 0);
-       WREG32(IH_RB_WPTR_ADDR_HI, 0);
+
+       if (rdev->wb.enabled)
+               ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
+
+       /* set the writeback address whether it's enabled or not */
+       WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
+       WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
 
        WREG32(IH_RB_CNTL, ih_rb_cntl);
 
@@ -3070,6 +3027,7 @@ int r600_irq_set(struct radeon_device *rdev)
        if (rdev->irq.sw_int) {
                DRM_DEBUG("r600_irq_set: sw int\n");
                cp_int_cntl |= RB_INT_ENABLE;
+               cp_int_cntl |= TIME_STAMP_INT_ENABLE;
        }
        if (rdev->irq.crtc_vblank_int[0]) {
                DRM_DEBUG("r600_irq_set: vblank 0\n");
@@ -3244,8 +3202,10 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
 {
        u32 wptr, tmp;
 
-       /* XXX use writeback */
-       wptr = RREG32(IH_RB_WPTR);
+       if (rdev->wb.enabled)
+               wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
+       else
+               wptr = RREG32(IH_RB_WPTR);
 
        if (wptr & RB_OVERFLOW) {
                /* When a ring buffer overflow happen start parsing interrupt
@@ -3433,6 +3393,7 @@ restart_ih:
                        break;
                case 181: /* CP EOP event */
                        DRM_DEBUG("IH: CP EOP\n");
+                       radeon_fence_process(rdev);
                        break;
                case 233: /* GUI IDLE */
                        DRM_DEBUG("IH: CP EOP\n");
index 3473c00781ffaaac06cab0c520231a5a66a21111..8362974ef41ac9eac713f1e66377661aa7106c0b 100644 (file)
@@ -472,9 +472,10 @@ int r600_blit_init(struct radeon_device *rdev)
        u32 packet2s[16];
        int num_packet2s = 0;
 
-       /* don't reinitialize blit */
+       /* pin copy shader into vram if already initialized */
        if (rdev->r600_blit.shader_obj)
-               return 0;
+               goto done;
+
        mutex_init(&rdev->r600_blit.mutex);
        rdev->r600_blit.state_offset = 0;
 
@@ -532,6 +533,18 @@ int r600_blit_init(struct radeon_device *rdev)
        memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
        radeon_bo_kunmap(rdev->r600_blit.shader_obj);
        radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+
+done:
+       r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+       if (unlikely(r != 0))
+               return r;
+       r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+                         &rdev->r600_blit.shader_gpu_addr);
+       radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+       if (r) {
+               dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
+               return r;
+       }
        rdev->mc.active_vram_size = rdev->mc.real_vram_size;
        return 0;
 }
@@ -554,7 +567,7 @@ void r600_blit_fini(struct radeon_device *rdev)
        radeon_bo_unref(&rdev->r600_blit.shader_obj);
 }
 
-int r600_vb_ib_get(struct radeon_device *rdev)
+static int r600_vb_ib_get(struct radeon_device *rdev)
 {
        int r;
        r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
@@ -568,7 +581,7 @@ int r600_vb_ib_get(struct radeon_device *rdev)
        return 0;
 }
 
-void r600_vb_ib_put(struct radeon_device *rdev)
+static void r600_vb_ib_put(struct radeon_device *rdev)
 {
        radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
        radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
@@ -650,8 +663,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
                        int src_x = src_gpu_addr & 255;
                        int dst_x = dst_gpu_addr & 255;
                        int h = 1;
-                       src_gpu_addr = src_gpu_addr & ~255;
-                       dst_gpu_addr = dst_gpu_addr & ~255;
+                       src_gpu_addr = src_gpu_addr & ~255ULL;
+                       dst_gpu_addr = dst_gpu_addr & ~255ULL;
 
                        if (!src_x && !dst_x) {
                                h = (cur_size / max_bytes);
@@ -672,17 +685,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
 
                        if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
                                WARN_ON(1);
-
-#if 0
-                               r600_vb_ib_put(rdev);
-
-                               r600_nomm_put_vb(dev);
-                               r600_nomm_get_vb(dev);
-                               if (!dev_priv->blit_vb)
-                                       return;
-                               set_shaders(dev);
-                               vb = r600_nomm_get_vb_ptr(dev);
-#endif
                        }
 
                        vb[0] = i2f(dst_x);
@@ -744,8 +746,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
                        int src_x = (src_gpu_addr & 255);
                        int dst_x = (dst_gpu_addr & 255);
                        int h = 1;
-                       src_gpu_addr = src_gpu_addr & ~255;
-                       dst_gpu_addr = dst_gpu_addr & ~255;
+                       src_gpu_addr = src_gpu_addr & ~255ULL;
+                       dst_gpu_addr = dst_gpu_addr & ~255ULL;
 
                        if (!src_x && !dst_x) {
                                h = (cur_size / max_bytes);
@@ -767,17 +769,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
                        if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
                                WARN_ON(1);
                        }
-#if 0
-                       if ((rdev->blit_vb->used + 48) > rdev->blit_vb->total) {
-                               r600_nomm_put_vb(dev);
-                               r600_nomm_get_vb(dev);
-                               if (!rdev->blit_vb)
-                                       return;
-
-                               set_shaders(dev);
-                               vb = r600_nomm_get_vb_ptr(dev);
-                       }
-#endif
 
                        vb[0] = i2f(dst_x / 4);
                        vb[1] = 0;
index 250a3a918193e9821aa6f5836d463553cf6f903d..7b294c127c5ff7c5000174390bb600767870eff5 100644 (file)
@@ -170,6 +170,7 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
        struct r600_cs_track *track = p->track;
        u32 bpe = 0, pitch, slice_tile_max, size, tmp, height, pitch_align;
        volatile u32 *ib = p->ib->ptr;
+       unsigned array_mode;
 
        if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
                dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
@@ -185,12 +186,12 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
        /* pitch is the number of 8x8 tiles per row */
        pitch = G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1;
        slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
-       height = size / (pitch * 8 * bpe);
+       slice_tile_max *= 64;
+       height = slice_tile_max / (pitch * 8);
        if (height > 8192)
                height = 8192;
-       if (height > 7)
-               height &= ~0x7;
-       switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) {
+       array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
+       switch (array_mode) {
        case V_0280A0_ARRAY_LINEAR_GENERAL:
                /* technically height & 0x7 */
                break;
@@ -214,6 +215,9 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
                                 __func__, __LINE__, pitch);
                        return -EINVAL;
                }
+               /* avoid breaking userspace */
+               if (height > 7)
+                       height &= ~0x7;
                if (!IS_ALIGNED(height, 8)) {
                        dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
                                 __func__, __LINE__, height);
@@ -222,13 +226,13 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
                break;
        case V_0280A0_ARRAY_2D_TILED_THIN1:
                pitch_align = max((u32)track->nbanks,
-                                 (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks));
+                                 (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks)) / 8;
                if (!IS_ALIGNED(pitch, pitch_align)) {
                        dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
                                __func__, __LINE__, pitch);
                        return -EINVAL;
                }
-               if (!IS_ALIGNED((height / 8), track->nbanks)) {
+               if (!IS_ALIGNED((height / 8), track->npipes)) {
                        dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
                                 __func__, __LINE__, height);
                        return -EINVAL;
@@ -243,8 +247,18 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
        /* check offset */
        tmp = height * pitch * 8 * bpe;
        if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
-               dev_warn(p->dev, "%s offset[%d] %d too big\n", __func__, i, track->cb_color_bo_offset[i]);
-               return -EINVAL;
+               if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
+                       /* the initial DDX does bad things with the CB size occasionally */
+                       /* it rounds up height too far for slice tile max but the BO is smaller */
+                       tmp = (height - 7) * 8 * bpe;
+                       if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
+                               dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
+                               return -EINVAL;
+                       }
+               } else {
+                       dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
+                       return -EINVAL;
+               }
        }
        if (!IS_ALIGNED(track->cb_color_bo_offset[i], track->group_size)) {
                dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->cb_color_bo_offset[i]);
@@ -361,13 +375,13 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
                                break;
                        case V_028010_ARRAY_2D_TILED_THIN1:
                                pitch_align = max((u32)track->nbanks,
-                                                 (u32)(((track->group_size / 8) / bpe) * track->nbanks));
+                                                 (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8;
                                if (!IS_ALIGNED(pitch, pitch_align)) {
                                        dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
                                                 __func__, __LINE__, pitch);
                                        return -EINVAL;
                                }
-                               if ((height / 8) & (track->nbanks - 1)) {
+                               if (!IS_ALIGNED((height / 8), track->npipes)) {
                                        dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
                                                 __func__, __LINE__, height);
                                        return -EINVAL;
@@ -1138,7 +1152,7 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 i
                break;
        case V_038000_ARRAY_2D_TILED_THIN1:
                pitch_align = max((u32)track->nbanks,
-                                 (u32)(((track->group_size / 8) / bpe) * track->nbanks));
+                                 (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8;
                if (!IS_ALIGNED(pitch, pitch_align)) {
                        dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
                                __func__, __LINE__, pitch);
index 858a1920c0d76bb2c916c847c8b2751058d7e9d5..966a793e225b79a5bfdf3f8bf4c1f413ee2b3dc5 100644 (file)
 #define        VGT_VERTEX_REUSE_BLOCK_CNTL                     0x28C58
 #define                VTX_REUSE_DEPTH_MASK                            0x000000FF
 #define VGT_EVENT_INITIATOR                             0x28a90
+#       define CACHE_FLUSH_AND_INV_EVENT_TS                     (0x14 << 0)
 #       define CACHE_FLUSH_AND_INV_EVENT                        (0x16 << 0)
 
 #define VM_CONTEXT0_CNTL                               0x1410
 #define                PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
 #define        PACKET3_COND_WRITE                              0x45
 #define        PACKET3_EVENT_WRITE                             0x46
+#define                EVENT_TYPE(x)                           ((x) << 0)
+#define                EVENT_INDEX(x)                          ((x) << 8)
+                /* 0 - any non-TS event
+                * 1 - ZPASS_DONE
+                * 2 - SAMPLE_PIPELINESTAT
+                * 3 - SAMPLE_STREAMOUTSTAT*
+                * 4 - *S_PARTIAL_FLUSH
+                * 5 - TS events
+                */
 #define        PACKET3_EVENT_WRITE_EOP                         0x47
+#define                DATA_SEL(x)                             ((x) << 29)
+                /* 0 - discard
+                * 1 - send low 32bit data
+                * 2 - send 64bit data
+                * 3 - send 64bit counter value
+                */
+#define                INT_SEL(x)                              ((x) << 24)
+                /* 0 - none
+                * 1 - interrupt only (DATA_SEL = 0)
+                * 2 - interrupt when data write is confirmed
+                */
 #define        PACKET3_ONE_REG_WRITE                           0x57
 #define        PACKET3_SET_CONFIG_REG                          0x68
 #define                PACKET3_SET_CONFIG_REG_OFFSET                   0x00008000
index 9ff38c99a6ea0e568f2567c0a7ad34b06e60e512..73f600d39ad4bb79c4dfdeddee97972a8caeaec0 100644 (file)
@@ -88,7 +88,6 @@ extern int radeon_benchmarking;
 extern int radeon_testing;
 extern int radeon_connector_table;
 extern int radeon_tv;
-extern int radeon_new_pll;
 extern int radeon_audio;
 extern int radeon_disp_priority;
 extern int radeon_hw_i2c;
@@ -366,6 +365,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev);
  */
 struct radeon_scratch {
        unsigned                num_reg;
+       uint32_t                reg_base;
        bool                    free[32];
        uint32_t                reg[32];
 };
@@ -594,8 +594,15 @@ struct radeon_wb {
        struct radeon_bo        *wb_obj;
        volatile uint32_t       *wb;
        uint64_t                gpu_addr;
+       bool                    enabled;
+       bool                    use_event;
 };
 
+#define RADEON_WB_SCRATCH_OFFSET 0
+#define RADEON_WB_CP_RPTR_OFFSET 1024
+#define R600_WB_IH_WPTR_OFFSET   2048
+#define R600_WB_EVENT_OFFSET     3072
+
 /**
  * struct radeon_pm - power management datas
  * @max_bandwidth:      maximum bandwidth the gpu has (MByte/s)
@@ -1124,6 +1131,12 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
 void r600_kms_blit_copy(struct radeon_device *rdev,
                        u64 src_gpu_addr, u64 dst_gpu_addr,
                        int size_bytes);
+/* evergreen blit */
+int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
+void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
+void evergreen_kms_blit_copy(struct radeon_device *rdev,
+                            u64 src_gpu_addr, u64 dst_gpu_addr,
+                            int size_bytes);
 
 static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
 {
@@ -1341,6 +1354,9 @@ extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
 extern void radeon_update_display_priority(struct radeon_device *rdev);
 extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
 extern void radeon_scratch_init(struct radeon_device *rdev);
+extern void radeon_wb_fini(struct radeon_device *rdev);
+extern int radeon_wb_init(struct radeon_device *rdev);
+extern void radeon_wb_disable(struct radeon_device *rdev);
 extern void radeon_surface_init(struct radeon_device *rdev);
 extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
 extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
@@ -1425,9 +1441,6 @@ extern int r600_pcie_gart_init(struct radeon_device *rdev);
 extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
 extern int r600_ib_test(struct radeon_device *rdev);
 extern int r600_ring_test(struct radeon_device *rdev);
-extern void r600_wb_fini(struct radeon_device *rdev);
-extern int r600_wb_enable(struct radeon_device *rdev);
-extern void r600_wb_disable(struct radeon_device *rdev);
 extern void r600_scratch_init(struct radeon_device *rdev);
 extern int r600_blit_init(struct radeon_device *rdev);
 extern void r600_blit_fini(struct radeon_device *rdev);
@@ -1465,6 +1478,8 @@ extern void r700_cp_stop(struct radeon_device *rdev);
 extern void r700_cp_fini(struct radeon_device *rdev);
 extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
 extern int evergreen_irq_set(struct radeon_device *rdev);
+extern int evergreen_blit_init(struct radeon_device *rdev);
+extern void evergreen_blit_fini(struct radeon_device *rdev);
 
 /* radeon_acpi.c */ 
 #if defined(CONFIG_ACPI) 
index 25e1dd1977917ad4151cdaf599add440ef14d625..64fb89ecbf74728893bccb0dfcf442688c904688 100644 (file)
@@ -726,9 +726,9 @@ static struct radeon_asic evergreen_asic = {
        .get_vblank_counter = &evergreen_get_vblank_counter,
        .fence_ring_emit = &r600_fence_ring_emit,
        .cs_parse = &evergreen_cs_parse,
-       .copy_blit = NULL,
-       .copy_dma = NULL,
-       .copy = NULL,
+       .copy_blit = &evergreen_copy_blit,
+       .copy_dma = &evergreen_copy_blit,
+       .copy = &evergreen_copy_blit,
        .get_engine_clock = &radeon_atom_get_engine_clock,
        .set_engine_clock = &radeon_atom_set_engine_clock,
        .get_memory_clock = &radeon_atom_get_memory_clock,
index a5aff755f0d2e185dbdd4a1efd541dea5dae78fa..740988244143ce2105ba6fb04964a87c9eff644e 100644 (file)
@@ -108,9 +108,6 @@ void r100_irq_disable(struct radeon_device *rdev);
 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
 void r100_vram_init_sizes(struct radeon_device *rdev);
-void r100_wb_disable(struct radeon_device *rdev);
-void r100_wb_fini(struct radeon_device *rdev);
-int r100_wb_init(struct radeon_device *rdev);
 int r100_cp_reset(struct radeon_device *rdev);
 void r100_vga_render_disable(struct radeon_device *rdev);
 void r100_restore_sanity(struct radeon_device *rdev);
@@ -257,11 +254,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 int r600_cs_parse(struct radeon_cs_parser *p);
 void r600_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence);
-int r600_copy_dma(struct radeon_device *rdev,
-                 uint64_t src_offset,
-                 uint64_t dst_offset,
-                 unsigned num_pages,
-                 struct radeon_fence *fence);
 int r600_irq_process(struct radeon_device *rdev);
 int r600_irq_set(struct radeon_device *rdev);
 bool r600_gpu_is_lockup(struct radeon_device *rdev);
@@ -307,6 +299,9 @@ int evergreen_resume(struct radeon_device *rdev);
 bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
 int evergreen_asic_reset(struct radeon_device *rdev);
 void evergreen_bandwidth_update(struct radeon_device *rdev);
+int evergreen_copy_blit(struct radeon_device *rdev,
+                       uint64_t src_offset, uint64_t dst_offset,
+                       unsigned num_pages, struct radeon_fence *fence);
 void evergreen_hpd_init(struct radeon_device *rdev);
 void evergreen_hpd_fini(struct radeon_device *rdev);
 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
index 8e43ddae70cc27d3c37d472561527432cc51dd53..04cac7ec90397fa7f4f46f9d966fb41b2d3c5368 100644 (file)
@@ -1112,8 +1112,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
                         * pre-DCE 3.0 r6xx hardware.  This might need to be adjusted per
                         * family.
                         */
-                       if (!radeon_new_pll)
-                               p1pll->pll_out_min = 64800;
+                       p1pll->pll_out_min = 64800;
                }
 
                p1pll->pll_in_min =
@@ -1277,36 +1276,27 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
        return false;
 }
 
-static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
-                                                         radeon_encoder
-                                                         *encoder,
-                                                         int id)
+bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
+                                     struct radeon_atom_ss *ss,
+                                     int id)
 {
-       struct drm_device *dev = encoder->base.dev;
-       struct radeon_device *rdev = dev->dev_private;
        struct radeon_mode_info *mode_info = &rdev->mode_info;
        int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
-       uint16_t data_offset;
+       uint16_t data_offset, size;
        struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
        uint8_t frev, crev;
-       struct radeon_atom_ss *ss = NULL;
-       int i;
-
-       if (id > ATOM_MAX_SS_ENTRY)
-               return NULL;
+       int i, num_indices;
 
-       if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+       memset(ss, 0, sizeof(struct radeon_atom_ss));
+       if (atom_parse_data_header(mode_info->atom_context, index, &size,
                                   &frev, &crev, &data_offset)) {
                ss_info =
                        (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
 
-               ss =
-                   kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL);
-
-               if (!ss)
-                       return NULL;
+               num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+                       sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
 
-               for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) {
+               for (i = 0; i < num_indices; i++) {
                        if (ss_info->asSS_Info[i].ucSS_Id == id) {
                                ss->percentage =
                                        le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
@@ -1315,11 +1305,88 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
                                ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
                                ss->range = ss_info->asSS_Info[i].ucSS_Range;
                                ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
-                               break;
+                               return true;
+                       }
+               }
+       }
+       return false;
+}
+
+union asic_ss_info {
+       struct _ATOM_ASIC_INTERNAL_SS_INFO info;
+       struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2;
+       struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
+};
+
+bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
+                                     struct radeon_atom_ss *ss,
+                                     int id, u32 clock)
+{
+       struct radeon_mode_info *mode_info = &rdev->mode_info;
+       int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
+       uint16_t data_offset, size;
+       union asic_ss_info *ss_info;
+       uint8_t frev, crev;
+       int i, num_indices;
+
+       memset(ss, 0, sizeof(struct radeon_atom_ss));
+       if (atom_parse_data_header(mode_info->atom_context, index, &size,
+                                  &frev, &crev, &data_offset)) {
+
+               ss_info =
+                       (union asic_ss_info *)(mode_info->atom_context->bios + data_offset);
+
+               switch (frev) {
+               case 1:
+                       num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+                               sizeof(ATOM_ASIC_SS_ASSIGNMENT);
+
+                       for (i = 0; i < num_indices; i++) {
+                               if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
+                                   (clock <= ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) {
+                                       ss->percentage =
+                                               le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+                                       ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+                                       ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz);
+                                       return true;
+                               }
+                       }
+                       break;
+               case 2:
+                       num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+                               sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+                       for (i = 0; i < num_indices; i++) {
+                               if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
+                                   (clock <= ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) {
+                                       ss->percentage =
+                                               le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+                                       ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+                                       ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+                                       return true;
+                               }
                        }
+                       break;
+               case 3:
+                       num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+                               sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+                       for (i = 0; i < num_indices; i++) {
+                               if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
+                                   (clock <= ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) {
+                                       ss->percentage =
+                                               le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+                                       ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+                                       ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+                                       return true;
+                               }
+                       }
+                       break;
+               default:
+                       DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev);
+                       break;
                }
+
        }
-       return ss;
+       return false;
 }
 
 union lvds_info {
@@ -1371,7 +1438,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
                        le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
                lvds->panel_pwr_delay =
                    le16_to_cpu(lvds_info->info.usOffDelayInMs);
-               lvds->lvds_misc = lvds_info->info.ucLVDS_Misc;
+               lvds->lcd_misc = lvds_info->info.ucLVDS_Misc;
 
                misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
                if (misc & ATOM_VSYNC_POLARITY)
@@ -1388,19 +1455,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
                /* set crtc values */
                drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
 
-               lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id);
-
-               if (ASIC_IS_AVIVO(rdev)) {
-                       if (radeon_new_pll == 0)
-                               lvds->pll_algo = PLL_ALGO_LEGACY;
-                       else
-                               lvds->pll_algo = PLL_ALGO_NEW;
-               } else {
-                       if (radeon_new_pll == 1)
-                               lvds->pll_algo = PLL_ALGO_NEW;
-                       else
-                               lvds->pll_algo = PLL_ALGO_LEGACY;
-               }
+               lvds->lcd_ss_id = lvds_info->info.ucSS_Id;
 
                encoder->native_mode = lvds->native_mode;
 
index ecc1a8fafbfd3eb3c12c0c4d45b4b091a1bee03b..4dac4b0a02eea1928837c7009f88e84f227a81f5 100644 (file)
@@ -326,6 +326,34 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
                }
        }
 
+       if (property == rdev->mode_info.underscan_hborder_property) {
+               /* need to find digital encoder on connector */
+               encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+               if (!encoder)
+                       return 0;
+
+               radeon_encoder = to_radeon_encoder(encoder);
+
+               if (radeon_encoder->underscan_hborder != val) {
+                       radeon_encoder->underscan_hborder = val;
+                       radeon_property_change_mode(&radeon_encoder->base);
+               }
+       }
+
+       if (property == rdev->mode_info.underscan_vborder_property) {
+               /* need to find digital encoder on connector */
+               encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+               if (!encoder)
+                       return 0;
+
+               radeon_encoder = to_radeon_encoder(encoder);
+
+               if (radeon_encoder->underscan_vborder != val) {
+                       radeon_encoder->underscan_vborder = val;
+                       radeon_property_change_mode(&radeon_encoder->base);
+               }
+       }
+
        if (property == rdev->mode_info.tv_std_property) {
                encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC);
                if (!encoder) {
@@ -635,6 +663,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
                                ret = connector_status_connected;
                }
        } else {
+
+               /* if we aren't forcing don't do destructive polling */
+               if (!force)
+                       return connector->status;
+
                if (radeon_connector->dac_load_detect && encoder) {
                        encoder_funcs = encoder->helper_private;
                        ret = encoder_funcs->detect(encoder, connector);
@@ -822,6 +855,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
        if ((ret == connector_status_connected) && (radeon_connector->use_digital == true))
                goto out;
 
+       if (!force) {
+               ret = connector->status;
+               goto out;
+       }
+
        /* find analog encoder */
        if (radeon_connector->dac_load_detect) {
                for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
@@ -1153,10 +1191,17 @@ radeon_add_atom_connector(struct drm_device *dev,
                drm_connector_attach_property(&radeon_connector->base,
                                              rdev->mode_info.coherent_mode_property,
                                              1);
-               if (ASIC_IS_AVIVO(rdev))
+               if (ASIC_IS_AVIVO(rdev)) {
                        drm_connector_attach_property(&radeon_connector->base,
                                                      rdev->mode_info.underscan_property,
                                                      UNDERSCAN_AUTO);
+                       drm_connector_attach_property(&radeon_connector->base,
+                                                     rdev->mode_info.underscan_hborder_property,
+                                                     0);
+                       drm_connector_attach_property(&radeon_connector->base,
+                                                     rdev->mode_info.underscan_vborder_property,
+                                                     0);
+               }
                if (connector_type == DRM_MODE_CONNECTOR_DVII) {
                        radeon_connector->dac_load_detect = true;
                        drm_connector_attach_property(&radeon_connector->base,
@@ -1181,10 +1226,17 @@ radeon_add_atom_connector(struct drm_device *dev,
                drm_connector_attach_property(&radeon_connector->base,
                                              rdev->mode_info.coherent_mode_property,
                                              1);
-               if (ASIC_IS_AVIVO(rdev))
+               if (ASIC_IS_AVIVO(rdev)) {
                        drm_connector_attach_property(&radeon_connector->base,
                                                      rdev->mode_info.underscan_property,
                                                      UNDERSCAN_AUTO);
+                       drm_connector_attach_property(&radeon_connector->base,
+                                                     rdev->mode_info.underscan_hborder_property,
+                                                     0);
+                       drm_connector_attach_property(&radeon_connector->base,
+                                                     rdev->mode_info.underscan_vborder_property,
+                                                     0);
+               }
                subpixel_order = SubPixelHorizontalRGB;
                break;
        case DRM_MODE_CONNECTOR_DisplayPort:
@@ -1212,10 +1264,17 @@ radeon_add_atom_connector(struct drm_device *dev,
                drm_connector_attach_property(&radeon_connector->base,
                                              rdev->mode_info.coherent_mode_property,
                                              1);
-               if (ASIC_IS_AVIVO(rdev))
+               if (ASIC_IS_AVIVO(rdev)) {
                        drm_connector_attach_property(&radeon_connector->base,
                                                      rdev->mode_info.underscan_property,
                                                      UNDERSCAN_AUTO);
+                       drm_connector_attach_property(&radeon_connector->base,
+                                                     rdev->mode_info.underscan_hborder_property,
+                                                     0);
+                       drm_connector_attach_property(&radeon_connector->base,
+                                                     rdev->mode_info.underscan_vborder_property,
+                                                     0);
+               }
                break;
        case DRM_MODE_CONNECTOR_SVIDEO:
        case DRM_MODE_CONNECTOR_Composite:
index 3eef567b0421ae71826abd77ac3bc035a5ec1c33..017ac54920fb4f3fb58f7c0f9a57e1efb46450f0 100644 (file)
@@ -118,22 +118,25 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
 }
 
 static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
-                             uint32_t gpu_addr)
+                             uint64_t gpu_addr)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct radeon_device *rdev = crtc->dev->dev_private;
 
        if (ASIC_IS_DCE4(rdev)) {
-               WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 0);
-               WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
+               WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+                      upper_32_bits(gpu_addr));
+               WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+                      gpu_addr & 0xffffffff);
        } else if (ASIC_IS_AVIVO(rdev)) {
                if (rdev->family >= CHIP_RV770) {
                        if (radeon_crtc->crtc_id)
-                               WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0);
+                               WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
                        else
-                               WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, 0);
+                               WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
                }
-               WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
+               WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+                      gpu_addr & 0xffffffff);
        } else {
                radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
                /* offset is from DISP(2)_BASE_ADDRESS */
index 256d204a6d24226a93f68b5d4d097fc4b52739c7..8adfedfe547f6b6455676deaa26dada2592ff6a5 100644 (file)
@@ -117,9 +117,10 @@ void radeon_scratch_init(struct radeon_device *rdev)
        } else {
                rdev->scratch.num_reg = 7;
        }
+       rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
        for (i = 0; i < rdev->scratch.num_reg; i++) {
                rdev->scratch.free[i] = true;
-               rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4);
+               rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
        }
 }
 
@@ -149,6 +150,86 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
        }
 }
 
+void radeon_wb_disable(struct radeon_device *rdev)
+{
+       int r;
+
+       if (rdev->wb.wb_obj) {
+               r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+               if (unlikely(r != 0))
+                       return;
+               radeon_bo_kunmap(rdev->wb.wb_obj);
+               radeon_bo_unpin(rdev->wb.wb_obj);
+               radeon_bo_unreserve(rdev->wb.wb_obj);
+       }
+       rdev->wb.enabled = false;
+}
+
+void radeon_wb_fini(struct radeon_device *rdev)
+{
+       radeon_wb_disable(rdev);
+       if (rdev->wb.wb_obj) {
+               radeon_bo_unref(&rdev->wb.wb_obj);
+               rdev->wb.wb = NULL;
+               rdev->wb.wb_obj = NULL;
+       }
+}
+
+int radeon_wb_init(struct radeon_device *rdev)
+{
+       int r;
+
+       if (rdev->wb.wb_obj == NULL) {
+               r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
+                               RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
+               if (r) {
+                       dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
+                       return r;
+               }
+       }
+       r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+       if (unlikely(r != 0)) {
+               radeon_wb_fini(rdev);
+               return r;
+       }
+       r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+                         &rdev->wb.gpu_addr);
+       if (r) {
+               radeon_bo_unreserve(rdev->wb.wb_obj);
+               dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
+               radeon_wb_fini(rdev);
+               return r;
+       }
+       r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+       radeon_bo_unreserve(rdev->wb.wb_obj);
+       if (r) {
+               dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
+               radeon_wb_fini(rdev);
+               return r;
+       }
+
+       /* disable event_write fences */
+       rdev->wb.use_event = false;
+       /* disabled via module param */
+       if (radeon_no_wb == 1)
+               rdev->wb.enabled = false;
+       else {
+               /* often unreliable on AGP */
+               if (rdev->flags & RADEON_IS_AGP) {
+                       rdev->wb.enabled = false;
+               } else {
+                       rdev->wb.enabled = true;
+                       /* event_write fences are only available on r600+ */
+                       if (rdev->family >= CHIP_R600)
+                               rdev->wb.use_event = true;
+               }
+       }
+
+       dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
+
+       return 0;
+}
+
 /**
  * radeon_vram_location - try to find VRAM location
  * @rdev: radeon device structure holding all necessary informations
index b92d2f2fcbed6a8bd472ce9f4b936aa82f309278..0383631da69c42db417f27f6b87ddf58f19eb24e 100644 (file)
@@ -454,13 +454,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d)
        return n;
 }
 
-static void radeon_compute_pll_legacy(struct radeon_pll *pll,
-                                     uint64_t freq,
-                                     uint32_t *dot_clock_p,
-                                     uint32_t *fb_div_p,
-                                     uint32_t *frac_fb_div_p,
-                                     uint32_t *ref_div_p,
-                                     uint32_t *post_div_p)
+void radeon_compute_pll(struct radeon_pll *pll,
+                       uint64_t freq,
+                       uint32_t *dot_clock_p,
+                       uint32_t *fb_div_p,
+                       uint32_t *frac_fb_div_p,
+                       uint32_t *ref_div_p,
+                       uint32_t *post_div_p)
 {
        uint32_t min_ref_div = pll->min_ref_div;
        uint32_t max_ref_div = pll->max_ref_div;
@@ -513,7 +513,7 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
                max_fractional_feed_div = pll->max_frac_feedback_div;
        }
 
-       for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
+       for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
                uint32_t ref_div;
 
                if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
@@ -631,214 +631,6 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
        *post_div_p = best_post_div;
 }
 
-static bool
-calc_fb_div(struct radeon_pll *pll,
-           uint32_t freq,
-            uint32_t post_div,
-            uint32_t ref_div,
-            uint32_t *fb_div,
-            uint32_t *fb_div_frac)
-{
-       fixed20_12 feedback_divider, a, b;
-       u32 vco_freq;
-
-       vco_freq = freq * post_div;
-       /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
-       a.full = dfixed_const(pll->reference_freq);
-       feedback_divider.full = dfixed_const(vco_freq);
-       feedback_divider.full = dfixed_div(feedback_divider, a);
-       a.full = dfixed_const(ref_div);
-       feedback_divider.full = dfixed_mul(feedback_divider, a);
-
-       if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
-               /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
-               a.full = dfixed_const(10);
-               feedback_divider.full = dfixed_mul(feedback_divider, a);
-               feedback_divider.full += dfixed_const_half(0);
-               feedback_divider.full = dfixed_floor(feedback_divider);
-               feedback_divider.full = dfixed_div(feedback_divider, a);
-
-               /* *fb_div = floor(feedback_divider); */
-               a.full = dfixed_floor(feedback_divider);
-               *fb_div = dfixed_trunc(a);
-               /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
-               a.full = dfixed_const(10);
-               b.full = dfixed_mul(feedback_divider, a);
-
-               feedback_divider.full = dfixed_floor(feedback_divider);
-               feedback_divider.full = dfixed_mul(feedback_divider, a);
-               feedback_divider.full = b.full - feedback_divider.full;
-               *fb_div_frac = dfixed_trunc(feedback_divider);
-       } else {
-               /* *fb_div = floor(feedback_divider + 0.5); */
-               feedback_divider.full += dfixed_const_half(0);
-               feedback_divider.full = dfixed_floor(feedback_divider);
-
-               *fb_div = dfixed_trunc(feedback_divider);
-               *fb_div_frac = 0;
-       }
-
-       if (((*fb_div) < pll->min_feedback_div) || ((*fb_div) > pll->max_feedback_div))
-               return false;
-       else
-               return true;
-}
-
-static bool
-calc_fb_ref_div(struct radeon_pll *pll,
-               uint32_t freq,
-               uint32_t post_div,
-               uint32_t *fb_div,
-                uint32_t *fb_div_frac,
-                uint32_t *ref_div)
-{
-       fixed20_12 ffreq, max_error, error, pll_out, a;
-       u32 vco;
-       u32 pll_out_min, pll_out_max;
-
-       if (pll->flags & RADEON_PLL_IS_LCD) {
-               pll_out_min = pll->lcd_pll_out_min;
-               pll_out_max = pll->lcd_pll_out_max;
-       } else {
-               pll_out_min = pll->pll_out_min;
-               pll_out_max = pll->pll_out_max;
-       }
-
-       ffreq.full = dfixed_const(freq);
-       /* max_error = ffreq * 0.0025; */
-       a.full = dfixed_const(400);
-       max_error.full = dfixed_div(ffreq, a);
-
-       for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
-               if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
-                       vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac));
-                       vco = vco / ((*ref_div) * 10);
-
-                       if ((vco < pll_out_min) || (vco > pll_out_max))
-                               continue;
-
-                       /* pll_out = vco / post_div; */
-                       a.full = dfixed_const(post_div);
-                       pll_out.full = dfixed_const(vco);
-                       pll_out.full = dfixed_div(pll_out, a);
-
-                       if (pll_out.full >= ffreq.full) {
-                               error.full = pll_out.full - ffreq.full;
-                               if (error.full <= max_error.full)
-                                       return true;
-                       }
-               }
-       }
-       return false;
-}
-
-static void radeon_compute_pll_new(struct radeon_pll *pll,
-                                  uint64_t freq,
-                                  uint32_t *dot_clock_p,
-                                  uint32_t *fb_div_p,
-                                  uint32_t *frac_fb_div_p,
-                                  uint32_t *ref_div_p,
-                                  uint32_t *post_div_p)
-{
-       u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0;
-       u32 best_freq = 0, vco_frequency;
-       u32 pll_out_min, pll_out_max;
-
-       if (pll->flags & RADEON_PLL_IS_LCD) {
-               pll_out_min = pll->lcd_pll_out_min;
-               pll_out_max = pll->lcd_pll_out_max;
-       } else {
-               pll_out_min = pll->pll_out_min;
-               pll_out_max = pll->pll_out_max;
-       }
-
-       /* freq = freq / 10; */
-       do_div(freq, 10);
-
-       if (pll->flags & RADEON_PLL_USE_POST_DIV) {
-               post_div = pll->post_div;
-               if ((post_div < pll->min_post_div) || (post_div > pll->max_post_div))
-                       goto done;
-
-               vco_frequency = freq * post_div;
-               if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max))
-                       goto done;
-
-               if (pll->flags & RADEON_PLL_USE_REF_DIV) {
-                       ref_div = pll->reference_div;
-                       if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
-                               goto done;
-                       if (!calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
-                               goto done;
-               }
-       } else {
-               for (post_div = pll->max_post_div; post_div >= pll->min_post_div; --post_div) {
-                       if (pll->flags & RADEON_PLL_LEGACY) {
-                               if ((post_div == 5) ||
-                                   (post_div == 7) ||
-                                   (post_div == 9) ||
-                                   (post_div == 10) ||
-                                   (post_div == 11))
-                                       continue;
-                       }
-
-                       if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
-                               continue;
-
-                       vco_frequency = freq * post_div;
-                       if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max))
-                               continue;
-                       if (pll->flags & RADEON_PLL_USE_REF_DIV) {
-                               ref_div = pll->reference_div;
-                               if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
-                                       goto done;
-                               if (calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
-                                       break;
-                       } else {
-                               if (calc_fb_ref_div(pll, freq, post_div, &fb_div, &fb_div_frac, &ref_div))
-                                       break;
-                       }
-               }
-       }
-
-       best_freq = pll->reference_freq * 10 * fb_div;
-       best_freq += pll->reference_freq * fb_div_frac;
-       best_freq = best_freq / (ref_div * post_div);
-
-done:
-       if (best_freq == 0)
-               DRM_ERROR("Couldn't find valid PLL dividers\n");
-
-       *dot_clock_p = best_freq / 10;
-       *fb_div_p = fb_div;
-       *frac_fb_div_p = fb_div_frac;
-       *ref_div_p = ref_div;
-       *post_div_p = post_div;
-
-       DRM_DEBUG_KMS("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
-}
-
-void radeon_compute_pll(struct radeon_pll *pll,
-                       uint64_t freq,
-                       uint32_t *dot_clock_p,
-                       uint32_t *fb_div_p,
-                       uint32_t *frac_fb_div_p,
-                       uint32_t *ref_div_p,
-                       uint32_t *post_div_p)
-{
-       switch (pll->algo) {
-       case PLL_ALGO_NEW:
-               radeon_compute_pll_new(pll, freq, dot_clock_p, fb_div_p,
-                                      frac_fb_div_p, ref_div_p, post_div_p);
-               break;
-       case PLL_ALGO_LEGACY:
-       default:
-               radeon_compute_pll_legacy(pll, freq, dot_clock_p, fb_div_p,
-                                         frac_fb_div_p, ref_div_p, post_div_p);
-               break;
-       }
-}
-
 static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
@@ -1002,6 +794,24 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
                                      radeon_underscan_enum_list[i].name);
        }
 
+       rdev->mode_info.underscan_hborder_property =
+               drm_property_create(rdev->ddev,
+                                       DRM_MODE_PROP_RANGE,
+                                       "underscan hborder", 2);
+       if (!rdev->mode_info.underscan_hborder_property)
+               return -ENOMEM;
+       rdev->mode_info.underscan_hborder_property->values[0] = 0;
+       rdev->mode_info.underscan_hborder_property->values[1] = 128;
+
+       rdev->mode_info.underscan_vborder_property =
+               drm_property_create(rdev->ddev,
+                                       DRM_MODE_PROP_RANGE,
+                                       "underscan vborder", 2);
+       if (!rdev->mode_info.underscan_vborder_property)
+               return -ENOMEM;
+       rdev->mode_info.underscan_vborder_property->values[0] = 0;
+       rdev->mode_info.underscan_vborder_property->values[1] = 128;
+
        return 0;
 }
 
@@ -1159,8 +969,14 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
                             ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
                              drm_detect_hdmi_monitor(radeon_connector->edid) &&
                              is_hdtv_mode(mode)))) {
-                               radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
-                               radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
+                               if (radeon_encoder->underscan_hborder != 0)
+                                       radeon_crtc->h_border = radeon_encoder->underscan_hborder;
+                               else
+                                       radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
+                               if (radeon_encoder->underscan_vborder != 0)
+                                       radeon_crtc->v_border = radeon_encoder->underscan_vborder;
+                               else
+                                       radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
                                radeon_crtc->rmx_type = RMX_FULL;
                                src_v = crtc->mode.vdisplay;
                                dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2);
@@ -1195,3 +1011,156 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
        }
        return true;
 }
+
+/*
+ * Retrieve current video scanout position of crtc on a given gpu.
+ *
+ * \param rdev Device to query.
+ * \param crtc Crtc to query.
+ * \param *vpos Location where vertical scanout position should be stored.
+ * \param *hpos Location where horizontal scanout position should go.
+ *
+ * Returns vpos as a positive number while in active scanout area.
+ * Returns vpos as a negative number inside vblank, counting the number
+ * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+ * until start of active scanout / end of vblank."
+ *
+ * \return Flags, or'ed together as follows:
+ *
+ * RADEON_SCANOUTPOS_VALID = Query successfull.
+ * RADEON_SCANOUTPOS_INVBL = Inside vblank.
+ * RADEON_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+ * this flag means that returned position may be offset by a constant but
+ * unknown small number of scanlines wrt. real scanout position.
+ *
+ */
+int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos)
+{
+       u32 stat_crtc = 0, vbl = 0, position = 0;
+       int vbl_start, vbl_end, vtotal, ret = 0;
+       bool in_vbl = true;
+
+       if (ASIC_IS_DCE4(rdev)) {
+               if (crtc == 0) {
+                       vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+                                    EVERGREEN_CRTC0_REGISTER_OFFSET);
+                       position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+                                         EVERGREEN_CRTC0_REGISTER_OFFSET);
+                       ret |= RADEON_SCANOUTPOS_VALID;
+               }
+               if (crtc == 1) {
+                       vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+                                    EVERGREEN_CRTC1_REGISTER_OFFSET);
+                       position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+                                         EVERGREEN_CRTC1_REGISTER_OFFSET);
+                       ret |= RADEON_SCANOUTPOS_VALID;
+               }
+               if (crtc == 2) {
+                       vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+                                    EVERGREEN_CRTC2_REGISTER_OFFSET);
+                       position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+                                         EVERGREEN_CRTC2_REGISTER_OFFSET);
+                       ret |= RADEON_SCANOUTPOS_VALID;
+               }
+               if (crtc == 3) {
+                       vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+                                    EVERGREEN_CRTC3_REGISTER_OFFSET);
+                       position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+                                         EVERGREEN_CRTC3_REGISTER_OFFSET);
+                       ret |= RADEON_SCANOUTPOS_VALID;
+               }
+               if (crtc == 4) {
+                       vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+                                    EVERGREEN_CRTC4_REGISTER_OFFSET);
+                       position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+                                         EVERGREEN_CRTC4_REGISTER_OFFSET);
+                       ret |= RADEON_SCANOUTPOS_VALID;
+               }
+               if (crtc == 5) {
+                       vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+                                    EVERGREEN_CRTC5_REGISTER_OFFSET);
+                       position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+                                         EVERGREEN_CRTC5_REGISTER_OFFSET);
+                       ret |= RADEON_SCANOUTPOS_VALID;
+               }
+       } else if (ASIC_IS_AVIVO(rdev)) {
+               if (crtc == 0) {
+                       vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END);
+                       position = RREG32(AVIVO_D1CRTC_STATUS_POSITION);
+                       ret |= RADEON_SCANOUTPOS_VALID;
+               }
+               if (crtc == 1) {
+                       vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END);
+                       position = RREG32(AVIVO_D2CRTC_STATUS_POSITION);
+                       ret |= RADEON_SCANOUTPOS_VALID;
+               }
+       } else {
+               /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
+               if (crtc == 0) {
+                       /* Assume vbl_end == 0, get vbl_start from
+                        * upper 16 bits.
+                        */
+                       vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) &
+                               RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
+                       /* Only retrieve vpos from upper 16 bits, set hpos == 0. */
+                       position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+                       stat_crtc = RREG32(RADEON_CRTC_STATUS);
+                       if (!(stat_crtc & 1))
+                               in_vbl = false;
+
+                       ret |= RADEON_SCANOUTPOS_VALID;
+               }
+               if (crtc == 1) {
+                       vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) &
+                               RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
+                       position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+                       stat_crtc = RREG32(RADEON_CRTC2_STATUS);
+                       if (!(stat_crtc & 1))
+                               in_vbl = false;
+
+                       ret |= RADEON_SCANOUTPOS_VALID;
+               }
+       }
+
+       /* Decode into vertical and horizontal scanout position. */
+       *vpos = position & 0x1fff;
+       *hpos = (position >> 16) & 0x1fff;
+
+       /* Valid vblank area boundaries from gpu retrieved? */
+       if (vbl > 0) {
+               /* Yes: Decode. */
+               ret |= RADEON_SCANOUTPOS_ACCURATE;
+               vbl_start = vbl & 0x1fff;
+               vbl_end = (vbl >> 16) & 0x1fff;
+       }
+       else {
+               /* No: Fake something reasonable which gives at least ok results. */
+               vbl_start = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vdisplay;
+               vbl_end = 0;
+       }
+
+       /* Test scanout position against vblank region. */
+       if ((*vpos < vbl_start) && (*vpos >= vbl_end))
+               in_vbl = false;
+
+       /* Check if inside vblank area and apply corrective offsets:
+        * vpos will then be >=0 in video scanout area, but negative
+        * within vblank area, counting down the number of lines until
+        * start of scanout.
+        */
+
+       /* Inside "upper part" of vblank area? Apply corrective offset if so: */
+       if (in_vbl && (*vpos >= vbl_start)) {
+               vtotal = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vtotal;
+               *vpos = *vpos - vtotal;
+       }
+
+       /* Correct for shifted end of vbl at vbl_end. */
+       *vpos = *vpos - vbl_end;
+
+       /* In vblank? */
+       if (in_vbl)
+               ret |= RADEON_SCANOUTPOS_INVBL;
+
+       return ret;
+}
index 29c1237c2e7b8357d7b72d07cf76a99ed9375869..88e4ea925900ef3aecf71aa17744d3f9d0d8c445 100644 (file)
  * - 2.4.0 - add crtc id query
  * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
  * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
+ *   2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       6
+#define KMS_DRIVER_MINOR       7
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
@@ -93,7 +94,6 @@ int radeon_benchmarking = 0;
 int radeon_testing = 0;
 int radeon_connector_table = 0;
 int radeon_tv = 1;
-int radeon_new_pll = -1;
 int radeon_audio = 1;
 int radeon_disp_priority = 0;
 int radeon_hw_i2c = 0;
@@ -131,9 +131,6 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
 MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
 module_param_named(tv, radeon_tv, int, 0444);
 
-MODULE_PARM_DESC(new_pll, "Select new PLL code");
-module_param_named(new_pll, radeon_new_pll, int, 0444);
-
 MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
 module_param_named(audio, radeon_audio, int, 0444);
 
@@ -203,8 +200,6 @@ static struct drm_driver driver_old = {
        .irq_uninstall = radeon_driver_irq_uninstall,
        .irq_handler = radeon_driver_irq_handler,
        .reclaim_buffers = drm_core_reclaim_buffers,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = radeon_ioctls,
        .dma_ioctl = radeon_cp_buffers,
        .fops = {
@@ -291,8 +286,6 @@ static struct drm_driver kms_driver = {
        .irq_uninstall = radeon_driver_irq_uninstall_kms,
        .irq_handler = radeon_driver_irq_handler_kms,
        .reclaim_buffers = drm_core_reclaim_buffers,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = radeon_ioctls_kms,
        .gem_init_object = radeon_gem_object_init,
        .gem_free_object = radeon_gem_object_free,
index 2c293e8304d657d047144708bff75c6474d560f0..ae58b6849a2eaa4fd259b88f3d43d2497733161b 100644 (file)
@@ -529,9 +529,9 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
                                args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
                        args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
                        if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-                               if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
+                               if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
                                        args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
-                               if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
+                               if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
                                        args.v1.ucMisc |= (1 << 1);
                        } else {
                                if (dig->linkb)
@@ -558,18 +558,18 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
                        args.v2.ucTemporal = 0;
                        args.v2.ucFRC = 0;
                        if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-                               if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
+                               if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
                                        args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
-                               if (dig->lvds_misc & ATOM_PANEL_MISC_SPATIAL) {
+                               if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) {
                                        args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
-                                       if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
+                                       if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
                                                args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
                                }
-                               if (dig->lvds_misc & ATOM_PANEL_MISC_TEMPORAL) {
+                               if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) {
                                        args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
-                                       if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
+                                       if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
                                                args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
-                                       if (((dig->lvds_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
+                                       if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
                                                args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
                                }
                        } else {
index 40b0c087b5921384d46bf7f745cf93f2b391015b..efa211898fe60204a424b98cdb9295ea2ee99356 100644 (file)
@@ -59,6 +59,8 @@ static struct fb_ops radeonfb_ops = {
        .fb_pan_display = drm_fb_helper_pan_display,
        .fb_blank = drm_fb_helper_blank,
        .fb_setcmap = drm_fb_helper_setcmap,
+       .fb_debug_enter = drm_fb_helper_debug_enter,
+       .fb_debug_leave = drm_fb_helper_debug_leave,
 };
 
 
index b1f9a81b5d1dabcf37da7de738817a6d8db5f84a..216392d0353bcace87cd2a80d06dda358e285ee4 100644 (file)
@@ -72,7 +72,15 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
        bool wake = false;
        unsigned long cjiffies;
 
-       seq = RREG32(rdev->fence_drv.scratch_reg);
+       if (rdev->wb.enabled) {
+               u32 scratch_index;
+               if (rdev->wb.use_event)
+                       scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+               else
+                       scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+               seq = rdev->wb.wb[scratch_index/4];
+       } else
+               seq = RREG32(rdev->fence_drv.scratch_reg);
        if (seq != rdev->fence_drv.last_seq) {
                rdev->fence_drv.last_seq = seq;
                rdev->fence_drv.last_jiffies = jiffies;
index 305049afde15235e558dc22f3992543508f362e6..ace2e6384d4093a688b856c7a5369a9b4124fe94 100644 (file)
@@ -347,11 +347,26 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
 
 int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
                         struct drm_framebuffer *old_fb)
+{
+       return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
+                               struct drm_framebuffer *fb,
+                               int x, int y, enum mode_set_atomic state)
+{
+       return radeon_crtc_do_set_base(crtc, fb, x, y, 1);
+}
+
+int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+                        struct drm_framebuffer *fb,
+                        int x, int y, int atomic)
 {
        struct drm_device *dev = crtc->dev;
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct radeon_framebuffer *radeon_fb;
+       struct drm_framebuffer *target_fb;
        struct drm_gem_object *obj;
        struct radeon_bo *rbo;
        uint64_t base;
@@ -364,14 +379,21 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
 
        DRM_DEBUG_KMS("\n");
        /* no fb bound */
-       if (!crtc->fb) {
+       if (!atomic && !crtc->fb) {
                DRM_DEBUG_KMS("No FB bound\n");
                return 0;
        }
 
-       radeon_fb = to_radeon_framebuffer(crtc->fb);
+       if (atomic) {
+               radeon_fb = to_radeon_framebuffer(fb);
+               target_fb = fb;
+       }
+       else {
+               radeon_fb = to_radeon_framebuffer(crtc->fb);
+               target_fb = crtc->fb;
+       }
 
-       switch (crtc->fb->bits_per_pixel) {
+       switch (target_fb->bits_per_pixel) {
        case 8:
                format = 2;
                break;
@@ -415,10 +437,10 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
 
        crtc_offset_cntl = 0;
 
-       pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
-       crtc_pitch  = (((pitch_pixels * crtc->fb->bits_per_pixel) +
-                       ((crtc->fb->bits_per_pixel * 8) - 1)) /
-                      (crtc->fb->bits_per_pixel * 8));
+       pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
+       crtc_pitch  = (((pitch_pixels * target_fb->bits_per_pixel) +
+                       ((target_fb->bits_per_pixel * 8) - 1)) /
+                      (target_fb->bits_per_pixel * 8));
        crtc_pitch |= crtc_pitch << 16;
 
 
@@ -443,14 +465,14 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
                        crtc_tile_x0_y0 = x | (y << 16);
                        base &= ~0x7ff;
                } else {
-                       int byteshift = crtc->fb->bits_per_pixel >> 4;
+                       int byteshift = target_fb->bits_per_pixel >> 4;
                        int tile_addr = (((y >> 3) * pitch_pixels +  x) >> (8 - byteshift)) << 11;
                        base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8);
                        crtc_offset_cntl |= (y % 16);
                }
        } else {
                int offset = y * pitch_pixels + x;
-               switch (crtc->fb->bits_per_pixel) {
+               switch (target_fb->bits_per_pixel) {
                case 8:
                        offset *= 1;
                        break;
@@ -496,8 +518,8 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
        WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset);
        WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
 
-       if (old_fb && old_fb != crtc->fb) {
-               radeon_fb = to_radeon_framebuffer(old_fb);
+       if (!atomic && fb && fb != crtc->fb) {
+               radeon_fb = to_radeon_framebuffer(fb);
                rbo = radeon_fb->obj->driver_private;
                r = radeon_bo_reserve(rbo, false);
                if (unlikely(r != 0))
@@ -717,10 +739,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
                pll = &rdev->clock.p1pll;
 
        pll->flags = RADEON_PLL_LEGACY;
-       if (radeon_new_pll == 1)
-               pll->algo = PLL_ALGO_NEW;
-       else
-               pll->algo = PLL_ALGO_LEGACY;
 
        if (mode->clock > 200000) /* range limits??? */
                pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
@@ -1040,6 +1058,7 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
        .mode_fixup = radeon_crtc_mode_fixup,
        .mode_set = radeon_crtc_mode_set,
        .mode_set_base = radeon_crtc_set_base,
+       .mode_set_base_atomic = radeon_crtc_set_base_atomic,
        .prepare = radeon_crtc_prepare,
        .commit = radeon_crtc_commit,
        .load_lut = radeon_crtc_load_lut,
index 454c1dc7ea45ea5975647680b15191d15f2bb470..92457163d07094dba9bd5975dc63dfb4968e92a6 100644 (file)
@@ -35,6 +35,7 @@
 #include <drm_edid.h>
 #include <drm_dp_helper.h>
 #include <drm_fixed.h>
+#include <drm_crtc_helper.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 
@@ -149,12 +150,6 @@ struct radeon_tmds_pll {
 #define RADEON_PLL_USE_POST_DIV         (1 << 12)
 #define RADEON_PLL_IS_LCD               (1 << 13)
 
-/* pll algo */
-enum radeon_pll_algo {
-       PLL_ALGO_LEGACY,
-       PLL_ALGO_NEW
-};
-
 struct radeon_pll {
        /* reference frequency */
        uint32_t reference_freq;
@@ -187,8 +182,6 @@ struct radeon_pll {
 
        /* pll id */
        uint32_t id;
-       /* pll algo */
-       enum radeon_pll_algo algo;
 };
 
 struct radeon_i2c_chan {
@@ -240,6 +233,8 @@ struct radeon_mode_info {
        struct drm_property *tmds_pll_property;
        /* underscan */
        struct drm_property *underscan_property;
+       struct drm_property *underscan_hborder_property;
+       struct drm_property *underscan_vborder_property;
        /* hardcoded DFP edid from BIOS */
        struct edid *bios_hardcoded_edid;
 
@@ -335,22 +330,24 @@ struct radeon_encoder_ext_tmds {
 struct radeon_atom_ss {
        uint16_t percentage;
        uint8_t type;
-       uint8_t step;
+       uint16_t step;
        uint8_t delay;
        uint8_t range;
        uint8_t refdiv;
+       /* asic_ss */
+       uint16_t rate;
+       uint16_t amount;
 };
 
 struct radeon_encoder_atom_dig {
        bool linkb;
        /* atom dig */
        bool coherent_mode;
-       int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
-       /* atom lvds */
-       uint32_t lvds_misc;
+       int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB, etc. */
+       /* atom lvds/edp */
+       uint32_t lcd_misc;
        uint16_t panel_pwr_delay;
-       enum radeon_pll_algo pll_algo;
-       struct radeon_atom_ss *ss;
+       uint32_t lcd_ss_id;
        /* panel mode */
        struct drm_display_mode native_mode;
 };
@@ -369,6 +366,8 @@ struct radeon_encoder {
        uint32_t pixel_clock;
        enum radeon_rmx_type rmx_type;
        enum radeon_underscan_type underscan_type;
+       uint32_t underscan_hborder;
+       uint32_t underscan_vborder;
        struct drm_display_mode native_mode;
        void *enc_priv;
        int audio_polling_active;
@@ -435,6 +434,11 @@ struct radeon_framebuffer {
        struct drm_gem_object *obj;
 };
 
+/* radeon_get_crtc_scanoutpos() return flags */
+#define RADEON_SCANOUTPOS_VALID        (1 << 0)
+#define RADEON_SCANOUTPOS_INVBL        (1 << 1)
+#define RADEON_SCANOUTPOS_ACCURATE     (1 << 2)
+
 extern enum radeon_tv_std
 radeon_combios_get_tv_info(struct radeon_device *rdev);
 extern enum radeon_tv_std
@@ -490,6 +494,13 @@ extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
 
 extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
 
+extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
+                                            struct radeon_atom_ss *ss,
+                                            int id);
+extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
+                                            struct radeon_atom_ss *ss,
+                                            int id, u32 clock);
+
 extern void radeon_compute_pll(struct radeon_pll *pll,
                               uint64_t freq,
                               uint32_t *dot_clock_p,
@@ -513,6 +524,10 @@ extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
 extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
 extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
                                   struct drm_framebuffer *old_fb);
+extern int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
+                                        struct drm_framebuffer *fb,
+                                        int x, int y,
+                                        enum mode_set_atomic state);
 extern int atombios_crtc_mode_set(struct drm_crtc *crtc,
                                   struct drm_display_mode *mode,
                                   struct drm_display_mode *adjusted_mode,
@@ -522,7 +537,13 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
 
 extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
                                 struct drm_framebuffer *old_fb);
-
+extern int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
+                                      struct drm_framebuffer *fb,
+                                      int x, int y,
+                                      enum mode_set_atomic state);
+extern int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+                                  struct drm_framebuffer *fb,
+                                  int x, int y, int atomic);
 extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
                                  struct drm_file *file_priv,
                                  uint32_t handle,
@@ -531,6 +552,8 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
 extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
                                   int x, int y);
 
+extern int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos);
+
 extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
 extern struct edid *
 radeon_combios_get_hardcoded_edid(struct radeon_device *rdev);
index b3b5306bb578bf88547e4078fe48f59d9e0ea720..d7ab914164103bdda7bf9ea72e8e35a2c78a5f37 100644 (file)
@@ -435,7 +435,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
 
 out:
        radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
-                              bo->tbo.mem.mm_node->start << PAGE_SHIFT,
+                              bo->tbo.mem.start << PAGE_SHIFT,
                               bo->tbo.num_pages << PAGE_SHIFT);
        return 0;
 }
@@ -532,7 +532,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
        rdev = rbo->rdev;
        if (bo->mem.mem_type == TTM_PL_VRAM) {
                size = bo->mem.num_pages << PAGE_SHIFT;
-               offset = bo->mem.mm_node->start << PAGE_SHIFT;
+               offset = bo->mem.start << PAGE_SHIFT;
                if ((offset + size) > rdev->mc.visible_vram_size) {
                        /* hurrah the memory is not visible ! */
                        radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
@@ -540,7 +540,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
                        r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
                        if (unlikely(r != 0))
                                return r;
-                       offset = bo->mem.mm_node->start << PAGE_SHIFT;
+                       offset = bo->mem.start << PAGE_SHIFT;
                        /* this should not happen */
                        if ((offset + size) > rdev->mc.visible_vram_size)
                                return -EINVAL;
index f87efec76236966ac790ff0709f5cf2ae3c57563..8c9b2ef32c681229aa9b21d5d2bce87b7bfae6fa 100644 (file)
@@ -712,73 +712,21 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
 
 static bool radeon_pm_in_vbl(struct radeon_device *rdev)
 {
-       u32 stat_crtc = 0, vbl = 0, position = 0;
+       int  crtc, vpos, hpos, vbl_status;
        bool in_vbl = true;
 
-       if (ASIC_IS_DCE4(rdev)) {
-               if (rdev->pm.active_crtcs & (1 << 0)) {
-                       vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
-                                    EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
-                       position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
-                                         EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
-               }
-               if (rdev->pm.active_crtcs & (1 << 1)) {
-                       vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
-                                    EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
-                       position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
-                                         EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
-               }
-               if (rdev->pm.active_crtcs & (1 << 2)) {
-                       vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
-                                    EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
-                       position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
-                                         EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
-               }
-               if (rdev->pm.active_crtcs & (1 << 3)) {
-                       vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
-                                    EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
-                       position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
-                                         EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
-               }
-               if (rdev->pm.active_crtcs & (1 << 4)) {
-                       vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
-                                    EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
-                       position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
-                                         EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
-               }
-               if (rdev->pm.active_crtcs & (1 << 5)) {
-                       vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
-                                    EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
-                       position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
-                                         EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
-               }
-       } else if (ASIC_IS_AVIVO(rdev)) {
-               if (rdev->pm.active_crtcs & (1 << 0)) {
-                       vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff;
-                       position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff;
-               }
-               if (rdev->pm.active_crtcs & (1 << 1)) {
-                       vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff;
-                       position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff;
-               }
-               if (position < vbl && position > 1)
-                       in_vbl = false;
-       } else {
-               if (rdev->pm.active_crtcs & (1 << 0)) {
-                       stat_crtc = RREG32(RADEON_CRTC_STATUS);
-                       if (!(stat_crtc & 1))
-                               in_vbl = false;
-               }
-               if (rdev->pm.active_crtcs & (1 << 1)) {
-                       stat_crtc = RREG32(RADEON_CRTC2_STATUS);
-                       if (!(stat_crtc & 1))
+       /* Iterate over all active crtc's. All crtc's must be in vblank,
+        * otherwise return in_vbl == false.
+        */
+       for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
+               if (rdev->pm.active_crtcs & (1 << crtc)) {
+                       vbl_status = radeon_get_crtc_scanoutpos(rdev, crtc, &vpos, &hpos);
+                       if ((vbl_status & RADEON_SCANOUTPOS_VALID) &&
+                           !(vbl_status & RADEON_SCANOUTPOS_INVBL))
                                in_vbl = false;
                }
        }
 
-       if (position < vbl && position > 1)
-               in_vbl = false;
-
        return in_vbl;
 }
 
index 261e98a276dbb813631e56bf48358e003491c32f..6ea798ce821896dd4b2704461b260578a63a2516 100644 (file)
@@ -247,10 +247,14 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
  */
 void radeon_ring_free_size(struct radeon_device *rdev)
 {
-       if (rdev->family >= CHIP_R600)
-               rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
-       else
-               rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+       if (rdev->wb.enabled)
+               rdev->cp.rptr = rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4];
+       else {
+               if (rdev->family >= CHIP_R600)
+                       rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
+               else
+                       rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+       }
        /* This works because ring_size is a power of 2 */
        rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
        rdev->cp.ring_free_dw -= rdev->cp.wptr;
index a823d8fe54c2ccaac8e63256a75e249bce542994..fe95bb35317ea26c2cabe33fc0bae285bfc32cef 100644 (file)
@@ -152,6 +152,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
                man->default_caching = TTM_PL_FLAG_CACHED;
                break;
        case TTM_PL_TT:
+               man->func = &ttm_bo_manager_func;
                man->gpu_offset = rdev->mc.gtt_start;
                man->available_caching = TTM_PL_MASK_CACHING;
                man->default_caching = TTM_PL_FLAG_CACHED;
@@ -173,6 +174,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
                break;
        case TTM_PL_VRAM:
                /* "On-card" video ram */
+               man->func = &ttm_bo_manager_func;
                man->gpu_offset = rdev->mc.vram_start;
                man->flags = TTM_MEMTYPE_FLAG_FIXED |
                             TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -246,8 +248,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
        if (unlikely(r)) {
                return r;
        }
-       old_start = old_mem->mm_node->start << PAGE_SHIFT;
-       new_start = new_mem->mm_node->start << PAGE_SHIFT;
+       old_start = old_mem->start << PAGE_SHIFT;
+       new_start = new_mem->start << PAGE_SHIFT;
 
        switch (old_mem->mem_type) {
        case TTM_PL_VRAM:
@@ -326,14 +328,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
        }
        r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
 out_cleanup:
-       if (tmp_mem.mm_node) {
-               struct ttm_bo_global *glob = rdev->mman.bdev.glob;
-
-               spin_lock(&glob->lru_lock);
-               drm_mm_put_block(tmp_mem.mm_node);
-               spin_unlock(&glob->lru_lock);
-               return r;
-       }
+       ttm_bo_mem_put(bo, &tmp_mem);
        return r;
 }
 
@@ -372,14 +367,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
                goto out_cleanup;
        }
 out_cleanup:
-       if (tmp_mem.mm_node) {
-               struct ttm_bo_global *glob = rdev->mman.bdev.glob;
-
-               spin_lock(&glob->lru_lock);
-               drm_mm_put_block(tmp_mem.mm_node);
-               spin_unlock(&glob->lru_lock);
-               return r;
-       }
+       ttm_bo_mem_put(bo, &tmp_mem);
        return r;
 }
 
@@ -449,14 +437,14 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
 #if __OS_HAS_AGP
                if (rdev->flags & RADEON_IS_AGP) {
                        /* RADEON_IS_AGP is set only if AGP is active */
-                       mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+                       mem->bus.offset = mem->start << PAGE_SHIFT;
                        mem->bus.base = rdev->mc.agp_base;
                        mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
                }
 #endif
                break;
        case TTM_PL_VRAM:
-               mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+               mem->bus.offset = mem->start << PAGE_SHIFT;
                /* check if it's visible */
                if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
                        return -EINVAL;
@@ -699,7 +687,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
        int r;
 
        gtt = container_of(backend, struct radeon_ttm_backend, backend);
-       gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT;
+       gtt->offset = bo_mem->start << PAGE_SHIFT;
        if (!gtt->num_pages) {
                WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
        }
@@ -798,9 +786,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
                radeon_mem_types_list[i].show = &radeon_mm_dump_table;
                radeon_mem_types_list[i].driver_features = 0;
                if (i == 0)
-                       radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager;
+                       radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv;
                else
-                       radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
+                       radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv;
 
        }
        /* Add ttm page pool to debugfs */
index f78fd592544d67a58e15016487d4d8b47ee38bf7..ac40fd39d7873f23fd48235d676c5e59ddd9a4a2 100644 (file)
@@ -22,6 +22,10 @@ evergreen 0x9400
 0x00008B10 PA_SC_LINE_STIPPLE_STATE
 0x00008BF0 PA_SC_ENHANCE
 0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008D90 SQ_DYN_GPR_OPTIMIZATION
+0x00008D94 SQ_DYN_GPR_SIMD_LOCK_EN
+0x00008D98 SQ_DYN_GPR_THREAD_LIMIT
+0x00008D9C SQ_DYN_GPR_LDS_LIMIT
 0x00008C00 SQ_CONFIG
 0x00008C04 SQ_GPR_RESOURCE_MGMT_1
 0x00008C08 SQ_GPR_RESOURCE_MGMT_2
@@ -34,6 +38,10 @@ evergreen 0x9400
 0x00008C24 SQ_STACK_RESOURCE_MGMT_2
 0x00008C28 SQ_STACK_RESOURCE_MGMT_3
 0x00008DF8 SQ_CONST_MEM_BASE
+0x00008E20 SQ_STATIC_THREAD_MGMT_1
+0x00008E24 SQ_STATIC_THREAD_MGMT_2
+0x00008E28 SQ_STATIC_THREAD_MGMT_3
+0x00008E2C SQ_LDS_RESOURCE_MGMT
 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
 0x00009100 SPI_CONFIG_CNTL
 0x0000913C SPI_CONFIG_CNTL_1
index ae2b76b9a388ae78da7e9d923a14de2d19c956e8..f683e51a2a0674518b476d6a2157b39a303170a6 100644 (file)
@@ -397,6 +397,12 @@ static int rs400_startup(struct radeon_device *rdev)
        r = rs400_gart_enable(rdev);
        if (r)
                return r;
+
+       /* allocate wb buffer */
+       r = radeon_wb_init(rdev);
+       if (r)
+               return r;
+
        /* Enable IRQ */
        r100_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -406,9 +412,6 @@ static int rs400_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_wb_init(rdev);
-       if (r)
-               dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
        r = r100_ib_init(rdev);
        if (r) {
                dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -443,7 +446,7 @@ int rs400_resume(struct radeon_device *rdev)
 int rs400_suspend(struct radeon_device *rdev)
 {
        r100_cp_disable(rdev);
-       r100_wb_disable(rdev);
+       radeon_wb_disable(rdev);
        r100_irq_disable(rdev);
        rs400_gart_disable(rdev);
        return 0;
@@ -452,7 +455,7 @@ int rs400_suspend(struct radeon_device *rdev)
 void rs400_fini(struct radeon_device *rdev)
 {
        r100_cp_fini(rdev);
-       r100_wb_fini(rdev);
+       radeon_wb_fini(rdev);
        r100_ib_fini(rdev);
        radeon_gem_fini(rdev);
        rs400_gart_fini(rdev);
@@ -526,7 +529,7 @@ int rs400_init(struct radeon_device *rdev)
                /* Somethings want wront with the accel init stop accel */
                dev_err(rdev->dev, "Disabling GPU acceleration\n");
                r100_cp_fini(rdev);
-               r100_wb_fini(rdev);
+               radeon_wb_fini(rdev);
                r100_ib_fini(rdev);
                rs400_gart_fini(rdev);
                radeon_irq_kms_fini(rdev);
index 51d5f7b5ab21b40a6e34d2fd286f28da91e4f0b0..b091a1f6fa4ed2b86dc1975717abbf017667c251 100644 (file)
@@ -796,6 +796,12 @@ static int rs600_startup(struct radeon_device *rdev)
        r = rs600_gart_enable(rdev);
        if (r)
                return r;
+
+       /* allocate wb buffer */
+       r = radeon_wb_init(rdev);
+       if (r)
+               return r;
+
        /* Enable IRQ */
        rs600_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -805,9 +811,6 @@ static int rs600_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_wb_init(rdev);
-       if (r)
-               dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
        r = r100_ib_init(rdev);
        if (r) {
                dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -848,7 +851,7 @@ int rs600_suspend(struct radeon_device *rdev)
 {
        r600_audio_fini(rdev);
        r100_cp_disable(rdev);
-       r100_wb_disable(rdev);
+       radeon_wb_disable(rdev);
        rs600_irq_disable(rdev);
        rs600_gart_disable(rdev);
        return 0;
@@ -858,7 +861,7 @@ void rs600_fini(struct radeon_device *rdev)
 {
        r600_audio_fini(rdev);
        r100_cp_fini(rdev);
-       r100_wb_fini(rdev);
+       radeon_wb_fini(rdev);
        r100_ib_fini(rdev);
        radeon_gem_fini(rdev);
        rs600_gart_fini(rdev);
@@ -932,7 +935,7 @@ int rs600_init(struct radeon_device *rdev)
                /* Somethings want wront with the accel init stop accel */
                dev_err(rdev->dev, "Disabling GPU acceleration\n");
                r100_cp_fini(rdev);
-               r100_wb_fini(rdev);
+               radeon_wb_fini(rdev);
                r100_ib_fini(rdev);
                rs600_gart_fini(rdev);
                radeon_irq_kms_fini(rdev);
index 4dc2a87ea68018f0292cc0724d6ef4868c00e8ac..0137d3e3728d950734fdbe9e3489a9f2f44847f1 100644 (file)
@@ -616,6 +616,12 @@ static int rs690_startup(struct radeon_device *rdev)
        r = rs400_gart_enable(rdev);
        if (r)
                return r;
+
+       /* allocate wb buffer */
+       r = radeon_wb_init(rdev);
+       if (r)
+               return r;
+
        /* Enable IRQ */
        rs600_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -625,9 +631,6 @@ static int rs690_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_wb_init(rdev);
-       if (r)
-               dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
        r = r100_ib_init(rdev);
        if (r) {
                dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -668,7 +671,7 @@ int rs690_suspend(struct radeon_device *rdev)
 {
        r600_audio_fini(rdev);
        r100_cp_disable(rdev);
-       r100_wb_disable(rdev);
+       radeon_wb_disable(rdev);
        rs600_irq_disable(rdev);
        rs400_gart_disable(rdev);
        return 0;
@@ -678,7 +681,7 @@ void rs690_fini(struct radeon_device *rdev)
 {
        r600_audio_fini(rdev);
        r100_cp_fini(rdev);
-       r100_wb_fini(rdev);
+       radeon_wb_fini(rdev);
        r100_ib_fini(rdev);
        radeon_gem_fini(rdev);
        rs400_gart_fini(rdev);
@@ -753,7 +756,7 @@ int rs690_init(struct radeon_device *rdev)
                /* Somethings want wront with the accel init stop accel */
                dev_err(rdev->dev, "Disabling GPU acceleration\n");
                r100_cp_fini(rdev);
-               r100_wb_fini(rdev);
+               radeon_wb_fini(rdev);
                r100_ib_fini(rdev);
                rs400_gart_fini(rdev);
                radeon_irq_kms_fini(rdev);
index 4d6e86041a9fde07f8caa56c4f90ba75c05a9f9e..5d569f41f4aeadbe04ee1d1c21cb165d4bf8c338 100644 (file)
@@ -386,6 +386,12 @@ static int rv515_startup(struct radeon_device *rdev)
                if (r)
                        return r;
        }
+
+       /* allocate wb buffer */
+       r = radeon_wb_init(rdev);
+       if (r)
+               return r;
+
        /* Enable IRQ */
        rs600_irq_set(rdev);
        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -395,9 +401,6 @@ static int rv515_startup(struct radeon_device *rdev)
                dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
                return r;
        }
-       r = r100_wb_init(rdev);
-       if (r)
-               dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
        r = r100_ib_init(rdev);
        if (r) {
                dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
@@ -431,7 +434,7 @@ int rv515_resume(struct radeon_device *rdev)
 int rv515_suspend(struct radeon_device *rdev)
 {
        r100_cp_disable(rdev);
-       r100_wb_disable(rdev);
+       radeon_wb_disable(rdev);
        rs600_irq_disable(rdev);
        if (rdev->flags & RADEON_IS_PCIE)
                rv370_pcie_gart_disable(rdev);
@@ -447,7 +450,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
 void rv515_fini(struct radeon_device *rdev)
 {
        r100_cp_fini(rdev);
-       r100_wb_fini(rdev);
+       radeon_wb_fini(rdev);
        r100_ib_fini(rdev);
        radeon_gem_fini(rdev);
        rv370_pcie_gart_fini(rdev);
@@ -527,7 +530,7 @@ int rv515_init(struct radeon_device *rdev)
                /* Somethings want wront with the accel init stop accel */
                dev_err(rdev->dev, "Disabling GPU acceleration\n");
                r100_cp_fini(rdev);
-               r100_wb_fini(rdev);
+               radeon_wb_fini(rdev);
                r100_ib_fini(rdev);
                radeon_irq_kms_fini(rdev);
                rv370_pcie_gart_fini(rdev);
index 9490da700749487c00fe9c57671ec89727653136..245374e2b778b7706e9bf828c8a65876c1e11a00 100644 (file)
@@ -269,6 +269,7 @@ void r700_cp_stop(struct radeon_device *rdev)
 {
        rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
        WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
+       WREG32(SCRATCH_UMSK, 0);
 }
 
 static int rv770_cp_load_microcode(struct radeon_device *rdev)
@@ -643,10 +644,11 @@ static void rv770_gpu_init(struct radeon_device *rdev)
        else
                gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
        rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
-
-       gb_tiling_config |= GROUP_SIZE(0);
-       rdev->config.rv770.tiling_group_size = 256;
-
+       gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+       if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
+               rdev->config.rv770.tiling_group_size = 512;
+       else
+               rdev->config.rv770.tiling_group_size = 256;
        if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
                gb_tiling_config |= ROW_TILING(3);
                gb_tiling_config |= SAMPLE_SPLIT(3);
@@ -1030,19 +1032,12 @@ static int rv770_startup(struct radeon_device *rdev)
                rdev->asic->copy = NULL;
                dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
        }
-       /* pin copy shader into vram */
-       if (rdev->r600_blit.shader_obj) {
-               r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
-               if (unlikely(r != 0))
-                       return r;
-               r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
-                               &rdev->r600_blit.shader_gpu_addr);
-               radeon_bo_unreserve(rdev->r600_blit.shader_obj);
-               if (r) {
-                       DRM_ERROR("failed to pin blit object %d\n", r);
-                       return r;
-               }
-       }
+
+       /* allocate wb buffer */
+       r = radeon_wb_init(rdev);
+       if (r)
+               return r;
+
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -1061,8 +1056,7 @@ static int rv770_startup(struct radeon_device *rdev)
        r = r600_cp_resume(rdev);
        if (r)
                return r;
-       /* write back buffer are not vital so don't worry about failure */
-       r600_wb_enable(rdev);
+
        return 0;
 }
 
@@ -1108,7 +1102,7 @@ int rv770_suspend(struct radeon_device *rdev)
        r700_cp_stop(rdev);
        rdev->cp.ready = false;
        r600_irq_suspend(rdev);
-       r600_wb_disable(rdev);
+       radeon_wb_disable(rdev);
        rv770_pcie_gart_disable(rdev);
        /* unpin shaders bo */
        if (rdev->r600_blit.shader_obj) {
@@ -1203,8 +1197,8 @@ int rv770_init(struct radeon_device *rdev)
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                r700_cp_fini(rdev);
-               r600_wb_fini(rdev);
                r600_irq_fini(rdev);
+               radeon_wb_fini(rdev);
                radeon_irq_kms_fini(rdev);
                rv770_pcie_gart_fini(rdev);
                rdev->accel_working = false;
@@ -1236,8 +1230,8 @@ void rv770_fini(struct radeon_device *rdev)
 {
        r600_blit_fini(rdev);
        r700_cp_fini(rdev);
-       r600_wb_fini(rdev);
        r600_irq_fini(rdev);
+       radeon_wb_fini(rdev);
        radeon_irq_kms_fini(rdev);
        rv770_pcie_gart_fini(rdev);
        rv770_vram_scratch_fini(rdev);
index 2a2830f5a8409b802a000b663faa1580d9a20815..fa64d25d4248d6e361ace5c8ea920b322a65f0c5 100644 (file)
@@ -42,8 +42,6 @@ static struct drm_driver driver = {
        .lastclose = savage_driver_lastclose,
        .unload = savage_driver_unload,
        .reclaim_buffers = savage_reclaim_buffers,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = savage_ioctls,
        .dma_ioctl = savage_bci_buffers,
        .fops = {
index 4bb10ef6676a711b795346006f2e97ebdce51e42..4caf5d01cfd3feb5ae76e3fefc7cbe0bea8c20f8 100644 (file)
@@ -67,13 +67,10 @@ static struct drm_driver driver = {
        .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
        .load = sis_driver_load,
        .unload = sis_driver_unload,
-       .context_dtor = NULL,
        .dma_quiescent = sis_idle,
        .reclaim_buffers = NULL,
        .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
        .lastclose = sis_lastclose,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = sis_ioctls,
        .fops = {
                 .owner = THIS_MODULE,
index 640567ef713dbe97e9f22c710a7d951cc73c46b5..b70fa91d761a950ef163877327c31254cfa1ce32 100644 (file)
@@ -42,8 +42,6 @@ static struct pci_device_id pciidlist[] = {
 static struct drm_driver driver = {
        .driver_features = DRIVER_USE_MTRR,
        .reclaim_buffers = drm_core_reclaim_buffers,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .fops = {
                 .owner = THIS_MODULE,
                 .open = drm_open,
index b256d4adfafe76710cf1dd6e3b03371e03f7fd46..f3cf6f02c9970da645280df0f12622a12a3e0d7e 100644 (file)
@@ -4,6 +4,7 @@
 ccflags-y := -Iinclude/drm
 ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
        ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
-       ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
+       ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
+       ttm_bo_manager.o
 
 obj-$(CONFIG_DRM_TTM) += ttm.o
index 4bf69c4044913d7e890f06a93aa2e5d25ca40fb9..f999e36f30b4c184fd5d04aa2f34bf085eba89ab 100644 (file)
@@ -74,6 +74,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
 {
        struct ttm_agp_backend *agp_be =
            container_of(backend, struct ttm_agp_backend, backend);
+       struct drm_mm_node *node = bo_mem->mm_node;
        struct agp_memory *mem = agp_be->mem;
        int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
        int ret;
@@ -81,7 +82,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
        mem->is_flushed = 1;
        mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
 
-       ret = agp_bind_memory(mem, bo_mem->mm_node->start);
+       ret = agp_bind_memory(mem, node->start);
        if (ret)
                printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
 
index db809e034cc48b6d8c246cba1ede0660113e30de..a1cb783c7131c56de8aa19cf5afb6a668e5e4345 100644 (file)
@@ -84,11 +84,8 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
                man->available_caching);
        printk(KERN_ERR TTM_PFX "    default_caching: 0x%08X\n",
                man->default_caching);
-       if (mem_type != TTM_PL_SYSTEM) {
-               spin_lock(&bdev->glob->lru_lock);
-               drm_mm_debug_table(&man->manager, TTM_PFX);
-               spin_unlock(&bdev->glob->lru_lock);
-       }
+       if (mem_type != TTM_PL_SYSTEM)
+               (*man->func->debug)(man, TTM_PFX);
 }
 
 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
@@ -169,18 +166,13 @@ static void ttm_bo_release_list(struct kref *list_kref)
 
 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
 {
-
        if (interruptible) {
-               int ret = 0;
-
-               ret = wait_event_interruptible(bo->event_queue,
+               return wait_event_interruptible(bo->event_queue,
                                               atomic_read(&bo->reserved) == 0);
-               if (unlikely(ret != 0))
-                       return ret;
        } else {
                wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
+               return 0;
        }
-       return 0;
 }
 EXPORT_SYMBOL(ttm_bo_wait_unreserved);
 
@@ -421,7 +413,7 @@ moved:
 
        if (bo->mem.mm_node) {
                spin_lock(&bo->lock);
-               bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
+               bo->offset = (bo->mem.start << PAGE_SHIFT) +
                    bdev->man[bo->mem.mem_type].gpu_offset;
                bo->cur_placement = bo->mem.placement;
                spin_unlock(&bo->lock);
@@ -442,135 +434,144 @@ out_err:
 }
 
 /**
- * Call bo::reserved and with the lru lock held.
+ * Call bo::reserved.
  * Will release GPU memory type usage on destruction.
- * This is the place to put in driver specific hooks.
- * Will release the bo::reserved lock and the
- * lru lock on exit.
+ * This is the place to put in driver specific hooks to release
+ * driver private resources.
+ * Will release the bo::reserved lock.
  */
 
 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
 {
-       struct ttm_bo_global *glob = bo->glob;
-
        if (bo->ttm) {
-
-               /**
-                * Release the lru_lock, since we don't want to have
-                * an atomic requirement on ttm_tt[unbind|destroy].
-                */
-
-               spin_unlock(&glob->lru_lock);
                ttm_tt_unbind(bo->ttm);
                ttm_tt_destroy(bo->ttm);
                bo->ttm = NULL;
-               spin_lock(&glob->lru_lock);
        }
 
-       if (bo->mem.mm_node) {
-               drm_mm_put_block(bo->mem.mm_node);
-               bo->mem.mm_node = NULL;
-       }
+       ttm_bo_mem_put(bo, &bo->mem);
 
        atomic_set(&bo->reserved, 0);
        wake_up_all(&bo->event_queue);
-       spin_unlock(&glob->lru_lock);
 }
 
-
-/**
- * If bo idle, remove from delayed- and lru lists, and unref.
- * If not idle, and already on delayed list, do nothing.
- * If not idle, and not on delayed list, put on delayed list,
- *   up the list_kref and schedule a delayed list check.
- */
-
-static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
+static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_global *glob = bo->glob;
-       struct ttm_bo_driver *driver = bdev->driver;
+       struct ttm_bo_driver *driver;
+       void *sync_obj;
+       void *sync_obj_arg;
+       int put_count;
        int ret;
 
        spin_lock(&bo->lock);
-retry:
-       (void) ttm_bo_wait(bo, false, false, !remove_all);
-
+       (void) ttm_bo_wait(bo, false, false, true);
        if (!bo->sync_obj) {
-               int put_count;
-
-               spin_unlock(&bo->lock);
 
                spin_lock(&glob->lru_lock);
-               ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0);
 
                /**
-                * Someone else has the object reserved. Bail and retry.
+                * Lock inversion between bo::reserve and bo::lock here,
+                * but that's OK, since we're only trylocking.
                 */
 
-               if (unlikely(ret == -EBUSY)) {
-                       spin_unlock(&glob->lru_lock);
-                       spin_lock(&bo->lock);
-                       goto requeue;
-               }
-
-               /**
-                * We can re-check for sync object without taking
-                * the bo::lock since setting the sync object requires
-                * also bo::reserved. A busy object at this point may
-                * be caused by another thread starting an accelerated
-                * eviction.
-                */
+               ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
 
-               if (unlikely(bo->sync_obj)) {
-                       atomic_set(&bo->reserved, 0);
-                       wake_up_all(&bo->event_queue);
-                       spin_unlock(&glob->lru_lock);
-                       spin_lock(&bo->lock);
-                       if (remove_all)
-                               goto retry;
-                       else
-                               goto requeue;
-               }
+               if (unlikely(ret == -EBUSY))
+                       goto queue;
 
+               spin_unlock(&bo->lock);
                put_count = ttm_bo_del_from_lru(bo);
 
-               if (!list_empty(&bo->ddestroy)) {
-                       list_del_init(&bo->ddestroy);
-                       ++put_count;
-               }
-
+               spin_unlock(&glob->lru_lock);
                ttm_bo_cleanup_memtype_use(bo);
 
                while (put_count--)
                        kref_put(&bo->list_kref, ttm_bo_ref_bug);
 
-               return 0;
+               return;
+       } else {
+               spin_lock(&glob->lru_lock);
        }
-requeue:
+queue:
+       sync_obj = bo->sync_obj;
+       sync_obj_arg = bo->sync_obj_arg;
+       driver = bdev->driver;
+
+       kref_get(&bo->list_kref);
+       list_add_tail(&bo->ddestroy, &bdev->ddestroy);
+       spin_unlock(&glob->lru_lock);
+       spin_unlock(&bo->lock);
+
+       if (sync_obj)
+               driver->sync_obj_flush(sync_obj, sync_obj_arg);
+       schedule_delayed_work(&bdev->wq,
+                             ((HZ / 100) < 1) ? 1 : HZ / 100);
+}
+
+/**
+ * function ttm_bo_cleanup_refs
+ * If bo idle, remove from delayed- and lru lists, and unref.
+ * If not idle, do nothing.
+ *
+ * @interruptible         Any sleeps should occur interruptibly.
+ * @no_wait_reserve       Never wait for reserve. Return -EBUSY instead.
+ * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
+ */
+
+static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
+                              bool interruptible,
+                              bool no_wait_reserve,
+                              bool no_wait_gpu)
+{
+       struct ttm_bo_global *glob = bo->glob;
+       int put_count;
+       int ret = 0;
+
+retry:
+       spin_lock(&bo->lock);
+       ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
+       spin_unlock(&bo->lock);
+
+       if (unlikely(ret != 0))
+               return ret;
+
        spin_lock(&glob->lru_lock);
-       if (list_empty(&bo->ddestroy)) {
-               void *sync_obj = bo->sync_obj;
-               void *sync_obj_arg = bo->sync_obj_arg;
+       ret = ttm_bo_reserve_locked(bo, interruptible,
+                                   no_wait_reserve, false, 0);
 
-               kref_get(&bo->list_kref);
-               list_add_tail(&bo->ddestroy, &bdev->ddestroy);
+       if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) {
                spin_unlock(&glob->lru_lock);
-               spin_unlock(&bo->lock);
+               return ret;
+       }
 
-               if (sync_obj)
-                       driver->sync_obj_flush(sync_obj, sync_obj_arg);
-               schedule_delayed_work(&bdev->wq,
-                                     ((HZ / 100) < 1) ? 1 : HZ / 100);
-               ret = 0;
+       /**
+        * We can re-check for sync object without taking
+        * the bo::lock since setting the sync object requires
+        * also bo::reserved. A busy object at this point may
+        * be caused by another thread recently starting an accelerated
+        * eviction.
+        */
 
-       } else {
+       if (unlikely(bo->sync_obj)) {
+               atomic_set(&bo->reserved, 0);
+               wake_up_all(&bo->event_queue);
                spin_unlock(&glob->lru_lock);
-               spin_unlock(&bo->lock);
-               ret = -EBUSY;
+               goto retry;
        }
 
-       return ret;
+       put_count = ttm_bo_del_from_lru(bo);
+       list_del_init(&bo->ddestroy);
+       ++put_count;
+
+       spin_unlock(&glob->lru_lock);
+       ttm_bo_cleanup_memtype_use(bo);
+
+       while (put_count--)
+               kref_put(&bo->list_kref, ttm_bo_ref_bug);
+
+       return 0;
 }
 
 /**
@@ -602,7 +603,8 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
                }
 
                spin_unlock(&glob->lru_lock);
-               ret = ttm_bo_cleanup_refs(entry, remove_all);
+               ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
+                                         !remove_all);
                kref_put(&entry->list_kref, ttm_bo_release_list);
                entry = nentry;
 
@@ -645,7 +647,7 @@ static void ttm_bo_release(struct kref *kref)
                bo->vm_node = NULL;
        }
        write_unlock(&bdev->vm_lock);
-       ttm_bo_cleanup_refs(bo, false);
+       ttm_bo_cleanup_refs_or_queue(bo);
        kref_put(&bo->list_kref, ttm_bo_release_list);
        write_lock(&bdev->vm_lock);
 }
@@ -680,7 +682,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
                        bool no_wait_reserve, bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
-       struct ttm_bo_global *glob = bo->glob;
        struct ttm_mem_reg evict_mem;
        struct ttm_placement placement;
        int ret = 0;
@@ -726,12 +727,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
        if (ret) {
                if (ret != -ERESTARTSYS)
                        printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
-               spin_lock(&glob->lru_lock);
-               if (evict_mem.mm_node) {
-                       drm_mm_put_block(evict_mem.mm_node);
-                       evict_mem.mm_node = NULL;
-               }
-               spin_unlock(&glob->lru_lock);
+               ttm_bo_mem_put(bo, &evict_mem);
                goto out;
        }
        bo->evicted = true;
@@ -759,6 +755,18 @@ retry:
        bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
        kref_get(&bo->list_kref);
 
+       if (!list_empty(&bo->ddestroy)) {
+               spin_unlock(&glob->lru_lock);
+               ret = ttm_bo_cleanup_refs(bo, interruptible,
+                                         no_wait_reserve, no_wait_gpu);
+               kref_put(&bo->list_kref, ttm_bo_release_list);
+
+               if (likely(ret == 0 || ret == -ERESTARTSYS))
+                       return ret;
+
+               goto retry;
+       }
+
        ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
 
        if (unlikely(ret == -EBUSY)) {
@@ -792,41 +800,14 @@ retry:
        return ret;
 }
 
-static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
-                               struct ttm_mem_type_manager *man,
-                               struct ttm_placement *placement,
-                               struct ttm_mem_reg *mem,
-                               struct drm_mm_node **node)
+void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
 {
-       struct ttm_bo_global *glob = bo->glob;
-       unsigned long lpfn;
-       int ret;
+       struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
 
-       lpfn = placement->lpfn;
-       if (!lpfn)
-               lpfn = man->size;
-       *node = NULL;
-       do {
-               ret = drm_mm_pre_get(&man->manager);
-               if (unlikely(ret))
-                       return ret;
-
-               spin_lock(&glob->lru_lock);
-               *node = drm_mm_search_free_in_range(&man->manager,
-                                       mem->num_pages, mem->page_alignment,
-                                       placement->fpfn, lpfn, 1);
-               if (unlikely(*node == NULL)) {
-                       spin_unlock(&glob->lru_lock);
-                       return 0;
-               }
-               *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
-                                                       mem->page_alignment,
-                                                       placement->fpfn,
-                                                       lpfn);
-               spin_unlock(&glob->lru_lock);
-       } while (*node == NULL);
-       return 0;
+       if (mem->mm_node)
+               (*man->func->put_node)(man, mem);
 }
+EXPORT_SYMBOL(ttm_bo_mem_put);
 
 /**
  * Repeatedly evict memory from the LRU for @mem_type until we create enough
@@ -843,14 +824,13 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_global *glob = bdev->glob;
        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
-       struct drm_mm_node *node;
        int ret;
 
        do {
-               ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
+               ret = (*man->func->get_node)(man, bo, placement, mem);
                if (unlikely(ret != 0))
                        return ret;
-               if (node)
+               if (mem->mm_node)
                        break;
                spin_lock(&glob->lru_lock);
                if (list_empty(&man->lru)) {
@@ -863,9 +843,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                if (unlikely(ret != 0))
                        return ret;
        } while (1);
-       if (node == NULL)
+       if (mem->mm_node == NULL)
                return -ENOMEM;
-       mem->mm_node = node;
        mem->mem_type = mem_type;
        return 0;
 }
@@ -939,7 +918,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
        bool type_found = false;
        bool type_ok = false;
        bool has_erestartsys = false;
-       struct drm_mm_node *node = NULL;
        int i, ret;
 
        mem->mm_node = NULL;
@@ -973,17 +951,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 
                if (man->has_type && man->use_type) {
                        type_found = true;
-                       ret = ttm_bo_man_get_node(bo, man, placement, mem,
-                                                       &node);
+                       ret = (*man->func->get_node)(man, bo, placement, mem);
                        if (unlikely(ret))
                                return ret;
                }
-               if (node)
+               if (mem->mm_node)
                        break;
        }
 
-       if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
-               mem->mm_node = node;
+       if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
                mem->mem_type = mem_type;
                mem->placement = cur_flags;
                return 0;
@@ -1053,7 +1029,6 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
                        bool interruptible, bool no_wait_reserve,
                        bool no_wait_gpu)
 {
-       struct ttm_bo_global *glob = bo->glob;
        int ret = 0;
        struct ttm_mem_reg mem;
 
@@ -1081,11 +1056,8 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
                goto out_unlock;
        ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
 out_unlock:
-       if (ret && mem.mm_node) {
-               spin_lock(&glob->lru_lock);
-               drm_mm_put_block(mem.mm_node);
-               spin_unlock(&glob->lru_lock);
-       }
+       if (ret && mem.mm_node)
+               ttm_bo_mem_put(bo, &mem);
        return ret;
 }
 
@@ -1093,11 +1065,10 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
                             struct ttm_mem_reg *mem)
 {
        int i;
-       struct drm_mm_node *node = mem->mm_node;
 
-       if (node && placement->lpfn != 0 &&
-           (node->start < placement->fpfn ||
-            node->start + node->size > placement->lpfn))
+       if (mem->mm_node && placement->lpfn != 0 &&
+           (mem->start < placement->fpfn ||
+            mem->start + mem->num_pages > placement->lpfn))
                return -1;
 
        for (i = 0; i < placement->num_placement; i++) {
@@ -1341,7 +1312,6 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
 
 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
 {
-       struct ttm_bo_global *glob = bdev->glob;
        struct ttm_mem_type_manager *man;
        int ret = -EINVAL;
 
@@ -1364,13 +1334,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
        if (mem_type > 0) {
                ttm_bo_force_list_clean(bdev, mem_type, false);
 
-               spin_lock(&glob->lru_lock);
-               if (drm_mm_clean(&man->manager))
-                       drm_mm_takedown(&man->manager);
-               else
-                       ret = -EBUSY;
-
-               spin_unlock(&glob->lru_lock);
+               ret = (*man->func->takedown)(man);
        }
 
        return ret;
@@ -1421,6 +1385,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
        ret = bdev->driver->init_mem_type(bdev, type, man);
        if (ret)
                return ret;
+       man->bdev = bdev;
 
        ret = 0;
        if (type != TTM_PL_SYSTEM) {
@@ -1430,7 +1395,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
                               type);
                        return ret;
                }
-               ret = drm_mm_init(&man->manager, 0, p_size);
+
+               ret = (*man->func->init)(man, p_size);
                if (ret)
                        return ret;
        }
@@ -1824,6 +1790,13 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
                                      struct ttm_buffer_object, swap);
                kref_get(&bo->list_kref);
 
+               if (!list_empty(&bo->ddestroy)) {
+                       spin_unlock(&glob->lru_lock);
+                       (void) ttm_bo_cleanup_refs(bo, false, false, false);
+                       kref_put(&bo->list_kref, ttm_bo_release_list);
+                       continue;
+               }
+
                /**
                 * Reserve buffer. Since we unlock while sleeping, we need
                 * to re-check that nobody removed us from the swap-list while
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
new file mode 100644 (file)
index 0000000..7410c19
--- /dev/null
@@ -0,0 +1,148 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "ttm/ttm_module.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/module.h>
+
+static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
+                              struct ttm_buffer_object *bo,
+                              struct ttm_placement *placement,
+                              struct ttm_mem_reg *mem)
+{
+       struct ttm_bo_global *glob = man->bdev->glob;
+       struct drm_mm *mm = man->priv;
+       struct drm_mm_node *node = NULL;
+       unsigned long lpfn;
+       int ret;
+
+       lpfn = placement->lpfn;
+       if (!lpfn)
+               lpfn = man->size;
+       do {
+               ret = drm_mm_pre_get(mm);
+               if (unlikely(ret))
+                       return ret;
+
+               spin_lock(&glob->lru_lock);
+               node = drm_mm_search_free_in_range(mm,
+                                       mem->num_pages, mem->page_alignment,
+                                       placement->fpfn, lpfn, 1);
+               if (unlikely(node == NULL)) {
+                       spin_unlock(&glob->lru_lock);
+                       return 0;
+               }
+               node = drm_mm_get_block_atomic_range(node, mem->num_pages,
+                                                       mem->page_alignment,
+                                                       placement->fpfn,
+                                                       lpfn);
+               spin_unlock(&glob->lru_lock);
+       } while (node == NULL);
+
+       mem->mm_node = node;
+       mem->start = node->start;
+       return 0;
+}
+
+static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
+                               struct ttm_mem_reg *mem)
+{
+       struct ttm_bo_global *glob = man->bdev->glob;
+
+       if (mem->mm_node) {
+               spin_lock(&glob->lru_lock);
+               drm_mm_put_block(mem->mm_node);
+               spin_unlock(&glob->lru_lock);
+               mem->mm_node = NULL;
+       }
+}
+
+static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
+                          unsigned long p_size)
+{
+       struct drm_mm *mm;
+       int ret;
+
+       mm = kzalloc(sizeof(*mm), GFP_KERNEL);
+       if (!mm)
+               return -ENOMEM;
+
+       ret = drm_mm_init(mm, 0, p_size);
+       if (ret) {
+               kfree(mm);
+               return ret;
+       }
+
+       man->priv = mm;
+       return 0;
+}
+
+static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
+{
+       struct ttm_bo_global *glob = man->bdev->glob;
+       struct drm_mm *mm = man->priv;
+       int ret = 0;
+
+       spin_lock(&glob->lru_lock);
+       if (drm_mm_clean(mm)) {
+               drm_mm_takedown(mm);
+               kfree(mm);
+               man->priv = NULL;
+       } else
+               ret = -EBUSY;
+       spin_unlock(&glob->lru_lock);
+       return ret;
+}
+
+static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
+                            const char *prefix)
+{
+       struct ttm_bo_global *glob = man->bdev->glob;
+       struct drm_mm *mm = man->priv;
+
+       spin_lock(&glob->lru_lock);
+       drm_mm_debug_table(mm, prefix);
+       spin_unlock(&glob->lru_lock);
+}
+
+const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
+       ttm_bo_man_init,
+       ttm_bo_man_takedown,
+       ttm_bo_man_get_node,
+       ttm_bo_man_put_node,
+       ttm_bo_man_debug
+};
+EXPORT_SYMBOL(ttm_bo_manager_func);
index 3451a82adba76c31672ee96f086146f5da1ab12b..3106d5bcce32d908f0991ad07eca6936c5c01cde 100644 (file)
 
 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
 {
-       struct ttm_mem_reg *old_mem = &bo->mem;
-
-       if (old_mem->mm_node) {
-               spin_lock(&bo->glob->lru_lock);
-               drm_mm_put_block(old_mem->mm_node);
-               spin_unlock(&bo->glob->lru_lock);
-       }
-       old_mem->mm_node = NULL;
+       ttm_bo_mem_put(bo, &bo->mem);
 }
 
 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
@@ -170,7 +163,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
        src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
 
 #ifdef CONFIG_X86
-       dst = kmap_atomic_prot(d, KM_USER0, prot);
+       dst = kmap_atomic_prot(d, prot);
 #else
        if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
                dst = vmap(&d, 1, 0, prot);
@@ -183,7 +176,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
        memcpy_fromio(dst, src, PAGE_SIZE);
 
 #ifdef CONFIG_X86
-       kunmap_atomic(dst, KM_USER0);
+       kunmap_atomic(dst);
 #else
        if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
                vunmap(dst);
@@ -206,7 +199,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
 
        dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
 #ifdef CONFIG_X86
-       src = kmap_atomic_prot(s, KM_USER0, prot);
+       src = kmap_atomic_prot(s, prot);
 #else
        if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
                src = vmap(&s, 1, 0, prot);
@@ -219,7 +212,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
        memcpy_toio(dst, src, PAGE_SIZE);
 
 #ifdef CONFIG_X86
-       kunmap_atomic(src, KM_USER0);
+       kunmap_atomic(src);
 #else
        if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
                vunmap(src);
@@ -263,8 +256,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
        dir = 1;
 
        if ((old_mem->mem_type == new_mem->mem_type) &&
-           (new_mem->mm_node->start <
-            old_mem->mm_node->start + old_mem->mm_node->size)) {
+           (new_mem->start < old_mem->start + old_mem->size)) {
                dir = -1;
                add = new_mem->num_pages - 1;
        }
index b8984a5ae5218b375764fe2845d6c8cb5febc548..e1ff4e7a6eb02ee72d09762c0ea70f7afa3bac28 100644 (file)
@@ -51,8 +51,6 @@ static struct drm_driver driver = {
        .reclaim_buffers_locked = NULL,
        .reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
        .lastclose = via_lastclose,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = via_ioctls,
        .fops = {
                .owner = THIS_MODULE,
index 4505e17df3f557e21beb14581e72f3356985e345..c9281a1b1d3b27c82c0c2c769745cff5edc1604f 100644 (file)
@@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm
 vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
            vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
            vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
-           vmwgfx_overlay.o vmwgfx_fence.o
+           vmwgfx_overlay.o vmwgfx_fence.o vmwgfx_gmrid_manager.o
 
 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
index c4f5114aee7c8bf956867dd45da9ddb1f4236b00..80bc37b274e71b29c6b8c5622a020b945c915410 100644 (file)
@@ -39,6 +39,9 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
 static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
        TTM_PL_FLAG_CACHED;
 
+static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
+       TTM_PL_FLAG_CACHED;
+
 struct ttm_placement vmw_vram_placement = {
        .fpfn = 0,
        .lpfn = 0,
@@ -48,6 +51,20 @@ struct ttm_placement vmw_vram_placement = {
        .busy_placement = &vram_placement_flags
 };
 
+static uint32_t vram_gmr_placement_flags[] = {
+       TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
+       VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+};
+
+struct ttm_placement vmw_vram_gmr_placement = {
+       .fpfn = 0,
+       .lpfn = 0,
+       .num_placement = 2,
+       .placement = vram_gmr_placement_flags,
+       .num_busy_placement = 1,
+       .busy_placement = &gmr_placement_flags
+};
+
 struct ttm_placement vmw_vram_sys_placement = {
        .fpfn = 0,
        .lpfn = 0,
@@ -77,27 +94,52 @@ struct ttm_placement vmw_sys_placement = {
 
 struct vmw_ttm_backend {
        struct ttm_backend backend;
+       struct page **pages;
+       unsigned long num_pages;
+       struct vmw_private *dev_priv;
+       int gmr_id;
 };
 
 static int vmw_ttm_populate(struct ttm_backend *backend,
                            unsigned long num_pages, struct page **pages,
                            struct page *dummy_read_page)
 {
+       struct vmw_ttm_backend *vmw_be =
+           container_of(backend, struct vmw_ttm_backend, backend);
+
+       vmw_be->pages = pages;
+       vmw_be->num_pages = num_pages;
+
        return 0;
 }
 
 static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
 {
-       return 0;
+       struct vmw_ttm_backend *vmw_be =
+           container_of(backend, struct vmw_ttm_backend, backend);
+
+       vmw_be->gmr_id = bo_mem->start;
+
+       return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages,
+                           vmw_be->num_pages, vmw_be->gmr_id);
 }
 
 static int vmw_ttm_unbind(struct ttm_backend *backend)
 {
+       struct vmw_ttm_backend *vmw_be =
+           container_of(backend, struct vmw_ttm_backend, backend);
+
+       vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
        return 0;
 }
 
 static void vmw_ttm_clear(struct ttm_backend *backend)
 {
+       struct vmw_ttm_backend *vmw_be =
+               container_of(backend, struct vmw_ttm_backend, backend);
+
+       vmw_be->pages = NULL;
+       vmw_be->num_pages = 0;
 }
 
 static void vmw_ttm_destroy(struct ttm_backend *backend)
@@ -125,6 +167,7 @@ struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
                return NULL;
 
        vmw_be->backend.func = &vmw_ttm_func;
+       vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
 
        return &vmw_be->backend;
 }
@@ -142,15 +185,28 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
                /* System memory */
 
                man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
-               man->available_caching = TTM_PL_MASK_CACHING;
+               man->available_caching = TTM_PL_FLAG_CACHED;
                man->default_caching = TTM_PL_FLAG_CACHED;
                break;
        case TTM_PL_VRAM:
                /* "On-card" video ram */
+               man->func = &ttm_bo_manager_func;
                man->gpu_offset = 0;
                man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
-               man->available_caching = TTM_PL_MASK_CACHING;
-               man->default_caching = TTM_PL_FLAG_WC;
+               man->available_caching = TTM_PL_FLAG_CACHED;
+               man->default_caching = TTM_PL_FLAG_CACHED;
+               break;
+       case VMW_PL_GMR:
+               /*
+                * "Guest Memory Regions" is an aperture like feature with
+                *  one slot per bo. There is an upper limit of the number of
+                *  slots as well as the bo size.
+                */
+               man->func = &vmw_gmrid_manager_func;
+               man->gpu_offset = 0;
+               man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
+               man->available_caching = TTM_PL_FLAG_CACHED;
+               man->default_caching = TTM_PL_FLAG_CACHED;
                break;
        default:
                DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
@@ -174,18 +230,6 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
        return 0;
 }
 
-static void vmw_move_notify(struct ttm_buffer_object *bo,
-                    struct ttm_mem_reg *new_mem)
-{
-       if (new_mem->mem_type != TTM_PL_SYSTEM)
-               vmw_dmabuf_gmr_unbind(bo);
-}
-
-static void vmw_swap_notify(struct ttm_buffer_object *bo)
-{
-       vmw_dmabuf_gmr_unbind(bo);
-}
-
 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 {
        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
@@ -200,10 +244,10 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg
                return -EINVAL;
        switch (mem->mem_type) {
        case TTM_PL_SYSTEM:
-               /* System memory */
+       case VMW_PL_GMR:
                return 0;
        case TTM_PL_VRAM:
-               mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+               mem->bus.offset = mem->start << PAGE_SHIFT;
                mem->bus.base = dev_priv->vram_start;
                mem->bus.is_iomem = true;
                break;
@@ -276,8 +320,8 @@ struct ttm_bo_driver vmw_bo_driver = {
        .sync_obj_flush = vmw_sync_obj_flush,
        .sync_obj_unref = vmw_sync_obj_unref,
        .sync_obj_ref = vmw_sync_obj_ref,
-       .move_notify = vmw_move_notify,
-       .swap_notify = vmw_swap_notify,
+       .move_notify = NULL,
+       .swap_notify = NULL,
        .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
        .io_mem_reserve = &vmw_ttm_io_mem_reserve,
        .io_mem_free = &vmw_ttm_io_mem_free,
index 2ef93df9e8ae39f8c9e267a500ee5157f2fda420..10ca97ee02063c6c83c56b27f2d5ac895382dad1 100644 (file)
@@ -260,13 +260,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        idr_init(&dev_priv->context_idr);
        idr_init(&dev_priv->surface_idr);
        idr_init(&dev_priv->stream_idr);
-       ida_init(&dev_priv->gmr_ida);
        mutex_init(&dev_priv->init_mutex);
        init_waitqueue_head(&dev_priv->fence_queue);
        init_waitqueue_head(&dev_priv->fifo_queue);
        atomic_set(&dev_priv->fence_queue_waiters, 0);
        atomic_set(&dev_priv->fifo_queue_waiters, 0);
-       INIT_LIST_HEAD(&dev_priv->gmr_lru);
 
        dev_priv->io_start = pci_resource_start(dev->pdev, 0);
        dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
@@ -341,6 +339,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                goto out_err2;
        }
 
+       dev_priv->has_gmr = true;
+       if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
+                          dev_priv->max_gmr_ids) != 0) {
+               DRM_INFO("No GMR memory available. "
+                        "Graphics memory resources are very limited.\n");
+               dev_priv->has_gmr = false;
+       }
+
        dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
                                           dev_priv->mmio_size, DRM_MTRR_WC);
 
@@ -440,13 +446,14 @@ out_err4:
 out_err3:
        drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
                     dev_priv->mmio_size, DRM_MTRR_WC);
+       if (dev_priv->has_gmr)
+               (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
        (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 out_err2:
        (void)ttm_bo_device_release(&dev_priv->bdev);
 out_err1:
        vmw_ttm_global_release(dev_priv);
 out_err0:
-       ida_destroy(&dev_priv->gmr_ida);
        idr_destroy(&dev_priv->surface_idr);
        idr_destroy(&dev_priv->context_idr);
        idr_destroy(&dev_priv->stream_idr);
@@ -478,10 +485,11 @@ static int vmw_driver_unload(struct drm_device *dev)
        iounmap(dev_priv->mmio_virt);
        drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
                     dev_priv->mmio_size, DRM_MTRR_WC);
+       if (dev_priv->has_gmr)
+               (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
        (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
        (void)ttm_bo_device_release(&dev_priv->bdev);
        vmw_ttm_global_release(dev_priv);
-       ida_destroy(&dev_priv->gmr_ida);
        idr_destroy(&dev_priv->surface_idr);
        idr_destroy(&dev_priv->context_idr);
        idr_destroy(&dev_priv->stream_idr);
@@ -597,6 +605,8 @@ static void vmw_lastclose(struct drm_device *dev)
 static void vmw_master_init(struct vmw_master *vmaster)
 {
        ttm_lock_init(&vmaster->lock);
+       INIT_LIST_HEAD(&vmaster->fb_surf);
+       mutex_init(&vmaster->fb_surf_mutex);
 }
 
 static int vmw_master_create(struct drm_device *dev,
@@ -608,7 +618,7 @@ static int vmw_master_create(struct drm_device *dev,
        if (unlikely(vmaster == NULL))
                return -ENOMEM;
 
-       ttm_lock_init(&vmaster->lock);
+       vmw_master_init(vmaster);
        ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
        master->driver_priv = vmaster;
 
@@ -699,6 +709,7 @@ static void vmw_master_drop(struct drm_device *dev,
 
        vmw_fp->locked_master = drm_master_get(file_priv->master);
        ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
+       vmw_kms_idle_workqueues(vmaster);
 
        if (unlikely((ret != 0))) {
                DRM_ERROR("Unable to lock TTM at VT switch.\n");
@@ -751,15 +762,16 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
                 * Buffer contents is moved to swappable memory.
                 */
                ttm_bo_swapout_all(&dev_priv->bdev);
+
                break;
        case PM_POST_HIBERNATION:
        case PM_POST_SUSPEND:
+       case PM_POST_RESTORE:
                ttm_suspend_unlock(&vmaster->lock);
+
                break;
        case PM_RESTORE_PREPARE:
                break;
-       case PM_POST_RESTORE:
-               break;
        default:
                break;
        }
@@ -770,21 +782,98 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
  * These might not be needed with the virtual SVGA device.
  */
 
-int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
 {
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct vmw_private *dev_priv = vmw_priv(dev);
+
+       if (dev_priv->num_3d_resources != 0) {
+               DRM_INFO("Can't suspend or hibernate "
+                        "while 3D resources are active.\n");
+               return -EBUSY;
+       }
+
        pci_save_state(pdev);
        pci_disable_device(pdev);
        pci_set_power_state(pdev, PCI_D3hot);
        return 0;
 }
 
-int vmw_pci_resume(struct pci_dev *pdev)
+static int vmw_pci_resume(struct pci_dev *pdev)
 {
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
        return pci_enable_device(pdev);
 }
 
+static int vmw_pm_suspend(struct device *kdev)
+{
+       struct pci_dev *pdev = to_pci_dev(kdev);
+       struct pm_message dummy;
+
+       dummy.event = 0;
+
+       return vmw_pci_suspend(pdev, dummy);
+}
+
+static int vmw_pm_resume(struct device *kdev)
+{
+       struct pci_dev *pdev = to_pci_dev(kdev);
+
+       return vmw_pci_resume(pdev);
+}
+
+static int vmw_pm_prepare(struct device *kdev)
+{
+       struct pci_dev *pdev = to_pci_dev(kdev);
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct vmw_private *dev_priv = vmw_priv(dev);
+
+       /**
+        * Release 3d reference held by fbdev and potentially
+        * stop fifo.
+        */
+       dev_priv->suspended = true;
+       if (dev_priv->enable_fb)
+               vmw_3d_resource_dec(dev_priv);
+
+       if (dev_priv->num_3d_resources != 0) {
+
+               DRM_INFO("Can't suspend or hibernate "
+                        "while 3D resources are active.\n");
+
+               if (dev_priv->enable_fb)
+                       vmw_3d_resource_inc(dev_priv);
+               dev_priv->suspended = false;
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static void vmw_pm_complete(struct device *kdev)
+{
+       struct pci_dev *pdev = to_pci_dev(kdev);
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct vmw_private *dev_priv = vmw_priv(dev);
+
+       /**
+        * Reclaim 3d reference held by fbdev and potentially
+        * start fifo.
+        */
+       if (dev_priv->enable_fb)
+               vmw_3d_resource_inc(dev_priv);
+
+       dev_priv->suspended = false;
+}
+
+static const struct dev_pm_ops vmw_pm_ops = {
+       .prepare = vmw_pm_prepare,
+       .complete = vmw_pm_complete,
+       .suspend = vmw_pm_suspend,
+       .resume = vmw_pm_resume,
+};
+
 static struct drm_driver driver = {
        .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
        DRIVER_MODESET,
@@ -798,8 +887,6 @@ static struct drm_driver driver = {
        .irq_handler = vmw_irq_handler,
        .get_vblank_counter = vmw_get_vblank_counter,
        .reclaim_buffers_locked = NULL,
-       .get_map_ofs = drm_core_get_map_ofs,
-       .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = vmw_ioctls,
        .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
        .dma_quiescent = NULL,  /*vmw_dma_quiescent, */
@@ -821,15 +908,16 @@ static struct drm_driver driver = {
                 .compat_ioctl = drm_compat_ioctl,
 #endif
                 .llseek = noop_llseek,
-                },
+       },
        .pci_driver = {
-                      .name = VMWGFX_DRIVER_NAME,
-                      .id_table = vmw_pci_id_list,
-                      .probe = vmw_probe,
-                      .remove = vmw_remove,
-                      .suspend = vmw_pci_suspend,
-                      .resume = vmw_pci_resume
-                      },
+                .name = VMWGFX_DRIVER_NAME,
+                .id_table = vmw_pci_id_list,
+                .probe = vmw_probe,
+                .remove = vmw_remove,
+                .driver = {
+                        .pm = &vmw_pm_ops
+                }
+        },
        .name = VMWGFX_DRIVER_NAME,
        .desc = VMWGFX_DRIVER_DESC,
        .date = VMWGFX_DRIVER_DATE,
@@ -863,3 +951,7 @@ module_exit(vmwgfx_exit);
 MODULE_AUTHOR("VMware Inc. and others");
 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
 MODULE_LICENSE("GPL and additional rights");
+MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
+              __stringify(VMWGFX_DRIVER_MINOR) "."
+              __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
+              "0");
index 58de6393f611dd79fbbebeab81102dcf0f4abfcb..e7a58d055041bfacd5e4af4b5f6f055dd6214a5b 100644 (file)
@@ -39,9 +39,9 @@
 #include "ttm/ttm_execbuf_util.h"
 #include "ttm/ttm_module.h"
 
-#define VMWGFX_DRIVER_DATE "20100209"
+#define VMWGFX_DRIVER_DATE "20100927"
 #define VMWGFX_DRIVER_MAJOR 1
-#define VMWGFX_DRIVER_MINOR 2
+#define VMWGFX_DRIVER_MINOR 4
 #define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -49,6 +49,9 @@
 #define VMWGFX_MAX_GMRS 2048
 #define VMWGFX_MAX_DISPLAYS 16
 
+#define VMW_PL_GMR TTM_PL_PRIV0
+#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
+
 struct vmw_fpriv {
        struct drm_master *locked_master;
        struct ttm_object_file *tfile;
@@ -57,8 +60,6 @@ struct vmw_fpriv {
 struct vmw_dma_buffer {
        struct ttm_buffer_object base;
        struct list_head validate_list;
-       struct list_head gmr_lru;
-       uint32_t gmr_id;
        bool gmr_bound;
        uint32_t cur_validate_node;
        bool on_validate_list;
@@ -151,6 +152,8 @@ struct vmw_overlay;
 
 struct vmw_master {
        struct ttm_lock lock;
+       struct mutex fb_surf_mutex;
+       struct list_head fb_surf;
 };
 
 struct vmw_vga_topology_state {
@@ -182,6 +185,7 @@ struct vmw_private {
        uint32_t capabilities;
        uint32_t max_gmr_descriptors;
        uint32_t max_gmr_ids;
+       bool has_gmr;
        struct mutex hw_mutex;
 
        /*
@@ -263,14 +267,6 @@ struct vmw_private {
        uint32_t val_seq;
        struct mutex cmdbuf_mutex;
 
-       /**
-        * GMR management. Protected by the lru spinlock.
-        */
-
-       struct ida gmr_ida;
-       struct list_head gmr_lru;
-
-
        /**
         * Operating mode.
         */
@@ -286,6 +282,7 @@ struct vmw_private {
        struct vmw_master *active_master;
        struct vmw_master fbdev_master;
        struct notifier_block pm_nb;
+       bool suspended;
 
        struct mutex release_mutex;
        uint32_t num_3d_resources;
@@ -331,7 +328,9 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv);
  */
 
 extern int vmw_gmr_bind(struct vmw_private *dev_priv,
-                       struct ttm_buffer_object *bo);
+                       struct page *pages[],
+                       unsigned long num_pages,
+                       int gmr_id);
 extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
 
 /**
@@ -380,14 +379,10 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
 extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
 extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
                                  uint32_t id, struct vmw_dma_buffer **out);
-extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo);
-extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id);
-extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id);
 extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
                                       struct vmw_dma_buffer *bo);
 extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
                                struct vmw_dma_buffer *bo);
-extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo);
 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv);
 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -439,6 +434,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
 extern struct ttm_placement vmw_vram_placement;
 extern struct ttm_placement vmw_vram_ne_placement;
 extern struct ttm_placement vmw_vram_sys_placement;
+extern struct ttm_placement vmw_vram_gmr_placement;
 extern struct ttm_placement vmw_sys_placement;
 extern struct ttm_bo_driver vmw_bo_driver;
 extern int vmw_dma_quiescent(struct drm_device *dev);
@@ -518,6 +514,10 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv,
                        unsigned bbp, unsigned depth);
 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
+void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
+bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+                               uint32_t pitch,
+                               uint32_t height);
 u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
 
 /**
@@ -536,6 +536,12 @@ int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
 int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
 int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
 
+/**
+ * GMR Id manager
+ */
+
+extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
+
 /**
  * Inline helper functions
  */
index 8e396850513cab2b6abebf3858227a7463001121..51d9f9f1d7f2aa1b2ea04f309569d71bd9244600 100644 (file)
@@ -538,8 +538,11 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
                reloc = &sw_context->relocs[i];
                validate = &sw_context->val_bufs[reloc->index];
                bo = validate->bo;
-               reloc->location->offset += bo->offset;
-               reloc->location->gmrId = vmw_dmabuf_gmr(bo);
+               if (bo->mem.mem_type == TTM_PL_VRAM) {
+                       reloc->location->offset += bo->offset;
+                       reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
+               } else
+                       reloc->location->gmrId = bo->mem.start;
        }
        vmw_free_relocations(sw_context);
 }
@@ -563,25 +566,14 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
 {
        int ret;
 
-       if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
-               return 0;
-
        /**
-        * Put BO in VRAM, only if there is space.
+        * Put BO in VRAM if there is space, otherwise as a GMR.
+        * If there is no space in VRAM and GMR ids are all used up,
+        * start evicting GMRs to make room. If the DMA buffer can't be
+        * used as a GMR, this will return -ENOMEM.
         */
 
-       ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false);
-       if (unlikely(ret == -ERESTARTSYS))
-               return ret;
-
-       /**
-        * Otherwise, set it up as GMR.
-        */
-
-       if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
-               return 0;
-
-       ret = vmw_gmr_bind(dev_priv, bo);
+       ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
        if (likely(ret == 0 || ret == -ERESTARTSYS))
                return ret;
 
@@ -590,6 +582,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
         * previous contents.
         */
 
+       DRM_INFO("Falling through to VRAM.\n");
        ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
        return ret;
 }
index 409e172f4abfe94502be96502e251b5d6b2e54c9..41d9a5b73c03ebae477655eeff56d24e4766d1a0 100644 (file)
@@ -144,6 +144,13 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
                return -EINVAL;
        }
 
+       if (!vmw_kms_validate_mode_vram(vmw_priv,
+                                       info->fix.line_length,
+                                       var->yoffset + var->yres)) {
+               DRM_ERROR("Requested geom can not fit in framebuffer\n");
+               return -EINVAL;
+       }
+
        return 0;
 }
 
@@ -205,6 +212,9 @@ static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
                SVGAFifoCmdUpdate body;
        } *cmd;
 
+       if (vmw_priv->suspended)
+               return;
+
        spin_lock_irqsave(&par->dirty.lock, flags);
        if (!par->dirty.active) {
                spin_unlock_irqrestore(&par->dirty.lock, flags);
@@ -616,7 +626,8 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
                goto err_unlock;
 
        if (bo->mem.mem_type == TTM_PL_VRAM &&
-           bo->mem.mm_node->start < bo->num_pages)
+           bo->mem.start < bo->num_pages &&
+           bo->mem.start > 0)
                (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
                                       false, false);
 
index 5f8908a5d7fd49edd8d2f375ee67fec4fb01586a..de0c5948521dcab39bef89190ca19750a738b983 100644 (file)
@@ -146,7 +146,7 @@ static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
  */
 
 static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
-                                              unsigned long num_pages)
+                                       unsigned long num_pages)
 {
        unsigned long prev_pfn = ~(0UL);
        unsigned long pfn;
@@ -163,45 +163,33 @@ static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
 }
 
 int vmw_gmr_bind(struct vmw_private *dev_priv,
-                struct ttm_buffer_object *bo)
+                struct page *pages[],
+                unsigned long num_pages,
+                int gmr_id)
 {
-       struct ttm_tt *ttm = bo->ttm;
-       unsigned long descriptors;
-       int ret;
-       uint32_t id;
        struct list_head desc_pages;
+       int ret;
 
-       if (!(dev_priv->capabilities & SVGA_CAP_GMR))
+       if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
                return -EINVAL;
 
-       ret = ttm_tt_populate(ttm);
-       if (unlikely(ret != 0))
-               return ret;
-
-       descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages);
-       if (unlikely(descriptors > dev_priv->max_gmr_descriptors))
+       if (vmw_gmr_count_descriptors(pages, num_pages) >
+           dev_priv->max_gmr_descriptors)
                return -EINVAL;
 
        INIT_LIST_HEAD(&desc_pages);
-       ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages,
-                                       ttm->num_pages);
-       if (unlikely(ret != 0))
-               return ret;
 
-       ret = vmw_gmr_id_alloc(dev_priv, &id);
+       ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages);
        if (unlikely(ret != 0))
-               goto out_no_id;
+               return ret;
 
-       vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages);
+       vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages);
        vmw_gmr_free_descriptors(&desc_pages);
-       vmw_dmabuf_set_gmr(bo, id);
-       return 0;
 
-out_no_id:
-       vmw_gmr_free_descriptors(&desc_pages);
-       return ret;
+       return 0;
 }
 
+
 void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
 {
        mutex_lock(&dev_priv->hw_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
new file mode 100644 (file)
index 0000000..ac6e0d1
--- /dev/null
@@ -0,0 +1,137 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "vmwgfx_drv.h"
+#include "ttm/ttm_module.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include <linux/idr.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+
+struct vmwgfx_gmrid_man {
+       spinlock_t lock;
+       struct ida gmr_ida;
+       uint32_t max_gmr_ids;
+};
+
+static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
+                                 struct ttm_buffer_object *bo,
+                                 struct ttm_placement *placement,
+                                 struct ttm_mem_reg *mem)
+{
+       struct vmwgfx_gmrid_man *gman =
+               (struct vmwgfx_gmrid_man *)man->priv;
+       int ret;
+       int id;
+
+       mem->mm_node = NULL;
+
+       do {
+               if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0))
+                       return -ENOMEM;
+
+               spin_lock(&gman->lock);
+               ret = ida_get_new(&gman->gmr_ida, &id);
+
+               if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
+                       ida_remove(&gman->gmr_ida, id);
+                       spin_unlock(&gman->lock);
+                       return 0;
+               }
+
+               spin_unlock(&gman->lock);
+
+       } while (ret == -EAGAIN);
+
+       if (likely(ret == 0)) {
+               mem->mm_node = gman;
+               mem->start = id;
+       }
+
+       return ret;
+}
+
+static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
+                                  struct ttm_mem_reg *mem)
+{
+       struct vmwgfx_gmrid_man *gman =
+               (struct vmwgfx_gmrid_man *)man->priv;
+
+       if (mem->mm_node) {
+               spin_lock(&gman->lock);
+               ida_remove(&gman->gmr_ida, mem->start);
+               spin_unlock(&gman->lock);
+               mem->mm_node = NULL;
+       }
+}
+
+static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
+                             unsigned long p_size)
+{
+       struct vmwgfx_gmrid_man *gman =
+               kzalloc(sizeof(*gman), GFP_KERNEL);
+
+       if (unlikely(gman == NULL))
+               return -ENOMEM;
+
+       spin_lock_init(&gman->lock);
+       ida_init(&gman->gmr_ida);
+       gman->max_gmr_ids = p_size;
+       man->priv = (void *) gman;
+       return 0;
+}
+
+static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
+{
+       struct vmwgfx_gmrid_man *gman =
+               (struct vmwgfx_gmrid_man *)man->priv;
+
+       if (gman) {
+               ida_destroy(&gman->gmr_ida);
+               kfree(gman);
+       }
+       return 0;
+}
+
+static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
+                               const char *prefix)
+{
+       printk(KERN_INFO "%s: No debug info available for the GMR "
+              "id manager.\n", prefix);
+}
+
+const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
+       vmw_gmrid_man_init,
+       vmw_gmrid_man_takedown,
+       vmw_gmrid_man_get_node,
+       vmw_gmrid_man_put_node,
+       vmw_gmrid_man_debug
+};
index 1c7a316454d80ceb9aaeb6c83d6ac6a82f1cae1f..570d57775a58de0eebf9574f5ce3782fafcae508 100644 (file)
@@ -54,6 +54,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
        case DRM_VMW_PARAM_FIFO_CAPS:
                param->value = dev_priv->fifo.capabilities;
                break;
+       case DRM_VMW_PARAM_MAX_FB_SIZE:
+               param->value = dev_priv->vram_size;
+               break;
        default:
                DRM_ERROR("Illegal vmwgfx get param request: %d\n",
                          param->param);
index e882ba099f0c33dab30f12f7b9328b3b628ac712..87c6e6156d7de8bc809a62bab06dbf64a7da512b 100644 (file)
@@ -332,18 +332,55 @@ struct vmw_framebuffer_surface {
        struct delayed_work d_work;
        struct mutex work_lock;
        bool present_fs;
+       struct list_head head;
+       struct drm_master *master;
 };
 
+/**
+ * vmw_kms_idle_workqueues - Flush workqueues on this master
+ *
+ * @vmaster - Pointer identifying the master, for the surfaces of which
+ * we idle the dirty work queues.
+ *
+ * This function should be called with the ttm lock held in exclusive mode
+ * to idle all dirty work queues before the fifo is taken down.
+ *
+ * The work task may actually requeue itself, but after the flush returns we're
+ * sure that there's nothing to present, since the ttm lock is held in
+ * exclusive mode, so the fifo will never get used.
+ */
+
+void vmw_kms_idle_workqueues(struct vmw_master *vmaster)
+{
+       struct vmw_framebuffer_surface *entry;
+
+       mutex_lock(&vmaster->fb_surf_mutex);
+       list_for_each_entry(entry, &vmaster->fb_surf, head) {
+               if (cancel_delayed_work_sync(&entry->d_work))
+                       (void) entry->d_work.work.func(&entry->d_work.work);
+
+               (void) cancel_delayed_work_sync(&entry->d_work);
+       }
+       mutex_unlock(&vmaster->fb_surf_mutex);
+}
+
 void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
 {
-       struct vmw_framebuffer_surface *vfb =
+       struct vmw_framebuffer_surface *vfbs =
                vmw_framebuffer_to_vfbs(framebuffer);
+       struct vmw_master *vmaster = vmw_master(vfbs->master);
+
 
-       cancel_delayed_work_sync(&vfb->d_work);
+       mutex_lock(&vmaster->fb_surf_mutex);
+       list_del(&vfbs->head);
+       mutex_unlock(&vmaster->fb_surf_mutex);
+
+       cancel_delayed_work_sync(&vfbs->d_work);
+       drm_master_put(&vfbs->master);
        drm_framebuffer_cleanup(framebuffer);
-       vmw_surface_unreference(&vfb->surface);
+       vmw_surface_unreference(&vfbs->surface);
 
-       kfree(framebuffer);
+       kfree(vfbs);
 }
 
 static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
@@ -362,6 +399,12 @@ static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
                SVGA3dCopyRect cr;
        } *cmd;
 
+       /**
+        * Strictly we should take the ttm_lock in read mode before accessing
+        * the fifo, to make sure the fifo is present and up. However,
+        * instead we flush all workqueues under the ttm lock in exclusive mode
+        * before taking down the fifo.
+        */
        mutex_lock(&vfbs->work_lock);
        if (!vfbs->present_fs)
                goto out_unlock;
@@ -392,17 +435,20 @@ out_unlock:
 
 
 int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
+                                 struct drm_file *file_priv,
                                  unsigned flags, unsigned color,
                                  struct drm_clip_rect *clips,
                                  unsigned num_clips)
 {
        struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+       struct vmw_master *vmaster = vmw_master(file_priv->master);
        struct vmw_framebuffer_surface *vfbs =
                vmw_framebuffer_to_vfbs(framebuffer);
        struct vmw_surface *surf = vfbs->surface;
        struct drm_clip_rect norect;
        SVGA3dCopyRect *cr;
        int i, inc = 1;
+       int ret;
 
        struct {
                SVGA3dCmdHeader header;
@@ -410,6 +456,13 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
                SVGA3dCopyRect cr;
        } *cmd;
 
+       if (unlikely(vfbs->master != file_priv->master))
+               return -EINVAL;
+
+       ret = ttm_read_lock(&vmaster->lock, true);
+       if (unlikely(ret != 0))
+               return ret;
+
        if (!num_clips ||
            !(dev_priv->fifo.capabilities &
              SVGA_FIFO_CAP_SCREEN_OBJECT)) {
@@ -425,6 +478,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
                         */
                        vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
                }
+               ttm_read_unlock(&vmaster->lock);
                return 0;
        }
 
@@ -442,6 +496,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
        cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
        if (unlikely(cmd == NULL)) {
                DRM_ERROR("Fifo reserve failed.\n");
+               ttm_read_unlock(&vmaster->lock);
                return -ENOMEM;
        }
 
@@ -461,7 +516,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
        }
 
        vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
-
+       ttm_read_unlock(&vmaster->lock);
        return 0;
 }
 
@@ -471,16 +526,57 @@ static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
        .create_handle = vmw_framebuffer_create_handle,
 };
 
-int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
-                                   struct vmw_surface *surface,
-                                   struct vmw_framebuffer **out,
-                                   unsigned width, unsigned height)
+static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
+                                          struct drm_file *file_priv,
+                                          struct vmw_surface *surface,
+                                          struct vmw_framebuffer **out,
+                                          const struct drm_mode_fb_cmd
+                                          *mode_cmd)
 
 {
        struct drm_device *dev = dev_priv->dev;
        struct vmw_framebuffer_surface *vfbs;
+       enum SVGA3dSurfaceFormat format;
+       struct vmw_master *vmaster = vmw_master(file_priv->master);
        int ret;
 
+       /*
+        * Sanity checks.
+        */
+
+       if (unlikely(surface->mip_levels[0] != 1 ||
+                    surface->num_sizes != 1 ||
+                    surface->sizes[0].width < mode_cmd->width ||
+                    surface->sizes[0].height < mode_cmd->height ||
+                    surface->sizes[0].depth != 1)) {
+               DRM_ERROR("Incompatible surface dimensions "
+                         "for requested mode.\n");
+               return -EINVAL;
+       }
+
+       switch (mode_cmd->depth) {
+       case 32:
+               format = SVGA3D_A8R8G8B8;
+               break;
+       case 24:
+               format = SVGA3D_X8R8G8B8;
+               break;
+       case 16:
+               format = SVGA3D_R5G6B5;
+               break;
+       case 15:
+               format = SVGA3D_A1R5G5B5;
+               break;
+       default:
+               DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
+               return -EINVAL;
+       }
+
+       if (unlikely(format != surface->format)) {
+               DRM_ERROR("Invalid surface format for requested mode.\n");
+               return -EINVAL;
+       }
+
        vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
        if (!vfbs) {
                ret = -ENOMEM;
@@ -498,16 +594,22 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
        }
 
        /* XXX get the first 3 from the surface info */
-       vfbs->base.base.bits_per_pixel = 32;
-       vfbs->base.base.pitch = width * 32 / 4;
-       vfbs->base.base.depth = 24;
-       vfbs->base.base.width = width;
-       vfbs->base.base.height = height;
+       vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
+       vfbs->base.base.pitch = mode_cmd->pitch;
+       vfbs->base.base.depth = mode_cmd->depth;
+       vfbs->base.base.width = mode_cmd->width;
+       vfbs->base.base.height = mode_cmd->height;
        vfbs->base.pin = &vmw_surface_dmabuf_pin;
        vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
        vfbs->surface = surface;
+       vfbs->master = drm_master_get(file_priv->master);
        mutex_init(&vfbs->work_lock);
+
+       mutex_lock(&vmaster->fb_surf_mutex);
        INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
+       list_add_tail(&vfbs->head, &vmaster->fb_surf);
+       mutex_unlock(&vmaster->fb_surf_mutex);
+
        *out = &vfbs->base;
 
        return 0;
@@ -544,18 +646,25 @@ void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
 }
 
 int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
+                                struct drm_file *file_priv,
                                 unsigned flags, unsigned color,
                                 struct drm_clip_rect *clips,
                                 unsigned num_clips)
 {
        struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+       struct vmw_master *vmaster = vmw_master(file_priv->master);
        struct drm_clip_rect norect;
+       int ret;
        struct {
                uint32_t header;
                SVGAFifoCmdUpdate body;
        } *cmd;
        int i, increment = 1;
 
+       ret = ttm_read_lock(&vmaster->lock, true);
+       if (unlikely(ret != 0))
+               return ret;
+
        if (!num_clips) {
                num_clips = 1;
                clips = &norect;
@@ -570,6 +679,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
        cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
        if (unlikely(cmd == NULL)) {
                DRM_ERROR("Fifo reserve failed.\n");
+               ttm_read_unlock(&vmaster->lock);
                return -ENOMEM;
        }
 
@@ -582,6 +692,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
        }
 
        vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
+       ttm_read_unlock(&vmaster->lock);
 
        return 0;
 }
@@ -659,16 +770,25 @@ static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
        return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
 }
 
-int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
-                                  struct vmw_dma_buffer *dmabuf,
-                                  struct vmw_framebuffer **out,
-                                  unsigned width, unsigned height)
+static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
+                                         struct vmw_dma_buffer *dmabuf,
+                                         struct vmw_framebuffer **out,
+                                         const struct drm_mode_fb_cmd
+                                         *mode_cmd)
 
 {
        struct drm_device *dev = dev_priv->dev;
        struct vmw_framebuffer_dmabuf *vfbd;
+       unsigned int requested_size;
        int ret;
 
+       requested_size = mode_cmd->height * mode_cmd->pitch;
+       if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
+               DRM_ERROR("Screen buffer object size is too small "
+                         "for requested mode.\n");
+               return -EINVAL;
+       }
+
        vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
        if (!vfbd) {
                ret = -ENOMEM;
@@ -685,12 +805,11 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
                goto out_err3;
        }
 
-       /* XXX get the first 3 from the surface info */
-       vfbd->base.base.bits_per_pixel = 32;
-       vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8;
-       vfbd->base.base.depth = 24;
-       vfbd->base.base.width = width;
-       vfbd->base.base.height = height;
+       vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
+       vfbd->base.base.pitch = mode_cmd->pitch;
+       vfbd->base.base.depth = mode_cmd->depth;
+       vfbd->base.base.width = mode_cmd->width;
+       vfbd->base.base.height = mode_cmd->height;
        vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
        vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
        vfbd->buffer = dmabuf;
@@ -719,8 +838,25 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
        struct vmw_framebuffer *vfb = NULL;
        struct vmw_surface *surface = NULL;
        struct vmw_dma_buffer *bo = NULL;
+       u64 required_size;
        int ret;
 
+       /**
+        * This code should be conditioned on Screen Objects not being used.
+        * If screen objects are used, we can allocate a GMR to hold the
+        * requested framebuffer.
+        */
+
+       required_size = mode_cmd->pitch * mode_cmd->height;
+       if (unlikely(required_size > (u64) dev_priv->vram_size)) {
+               DRM_ERROR("VRAM size is too small for requested mode.\n");
+               return NULL;
+       }
+
+       /**
+        * End conditioned code.
+        */
+
        ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
                                             mode_cmd->handle, &surface);
        if (ret)
@@ -729,8 +865,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
        if (!surface->scanout)
                goto err_not_scanout;
 
-       ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
-                                             mode_cmd->width, mode_cmd->height);
+       ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
+                                             &vfb, mode_cmd);
 
        /* vmw_user_surface_lookup takes one ref so does new_fb */
        vmw_surface_unreference(&surface);
@@ -751,7 +887,7 @@ try_dmabuf:
        }
 
        ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
-                                            mode_cmd->width, mode_cmd->height);
+                                            mode_cmd);
 
        /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
        vmw_dmabuf_unreference(&bo);
@@ -889,6 +1025,9 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
        vmw_priv->num_displays = vmw_read(vmw_priv,
                                          SVGA_REG_NUM_GUEST_DISPLAYS);
 
+       if (vmw_priv->num_displays == 0)
+               vmw_priv->num_displays = 1;
+
        for (i = 0; i < vmw_priv->num_displays; ++i) {
                save = &vmw_priv->vga_save[i];
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
@@ -997,6 +1136,13 @@ out_unlock:
        return ret;
 }
 
+bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+                               uint32_t pitch,
+                               uint32_t height)
+{
+       return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
+}
+
 u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
 {
        return 0;
index 11cb39e3accbfa9581801095ab0398952d2313f1..a01c47ddb5bc724617b74c15ad3bffc2ed63067f 100644 (file)
@@ -427,7 +427,9 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
 {
        struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector);
        struct drm_device *dev = connector->dev;
+       struct vmw_private *dev_priv = vmw_priv(dev);
        struct drm_display_mode *mode = NULL;
+       struct drm_display_mode *bmode;
        struct drm_display_mode prefmode = { DRM_MODE("preferred",
                DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -443,22 +445,30 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
                mode->hdisplay = ldu->pref_width;
                mode->vdisplay = ldu->pref_height;
                mode->vrefresh = drm_mode_vrefresh(mode);
-               drm_mode_probed_add(connector, mode);
+               if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
+                                              mode->vdisplay)) {
+                       drm_mode_probed_add(connector, mode);
 
-               if (ldu->pref_mode) {
-                       list_del_init(&ldu->pref_mode->head);
-                       drm_mode_destroy(dev, ldu->pref_mode);
-               }
+                       if (ldu->pref_mode) {
+                               list_del_init(&ldu->pref_mode->head);
+                               drm_mode_destroy(dev, ldu->pref_mode);
+                       }
 
-               ldu->pref_mode = mode;
+                       ldu->pref_mode = mode;
+               }
        }
 
        for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
-               if (vmw_ldu_connector_builtin[i].hdisplay > max_width ||
-                   vmw_ldu_connector_builtin[i].vdisplay > max_height)
+               bmode = &vmw_ldu_connector_builtin[i];
+               if (bmode->hdisplay > max_width ||
+                   bmode->vdisplay > max_height)
+                       continue;
+
+               if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
+                                               bmode->vdisplay))
                        continue;
 
-               mode = drm_mode_duplicate(dev, &vmw_ldu_connector_builtin[i]);
+               mode = drm_mode_duplicate(dev, bmode);
                if (!mode)
                        return 0;
                mode->vrefresh = drm_mode_vrefresh(mode);
index c8c40e9979dbd21442a5d87cf6ac4cdcad08cfad..36e129f0023ffc3d6407b9c0251376aa3fc9f5c0 100644 (file)
@@ -765,28 +765,11 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
        return bo_user_size + page_array_size;
 }
 
-void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
-{
-       struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-       struct ttm_bo_global *glob = bo->glob;
-       struct vmw_private *dev_priv =
-               container_of(bo->bdev, struct vmw_private, bdev);
-
-       if (vmw_bo->gmr_bound) {
-               vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
-               spin_lock(&glob->lru_lock);
-               ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
-               spin_unlock(&glob->lru_lock);
-               vmw_bo->gmr_bound = false;
-       }
-}
-
 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
 {
        struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
        struct ttm_bo_global *glob = bo->glob;
 
-       vmw_dmabuf_gmr_unbind(bo);
        ttm_mem_global_free(glob->mem_glob, bo->acc_size);
        kfree(vmw_bo);
 }
@@ -818,10 +801,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
 
        memset(vmw_bo, 0, sizeof(*vmw_bo));
 
-       INIT_LIST_HEAD(&vmw_bo->gmr_lru);
        INIT_LIST_HEAD(&vmw_bo->validate_list);
-       vmw_bo->gmr_id = 0;
-       vmw_bo->gmr_bound = false;
 
        ret = ttm_bo_init(bdev, &vmw_bo->base, size,
                          ttm_bo_type_device, placement,
@@ -835,7 +815,6 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
        struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
        struct ttm_bo_global *glob = bo->glob;
 
-       vmw_dmabuf_gmr_unbind(bo);
        ttm_mem_global_free(glob->mem_glob, bo->acc_size);
        kfree(vmw_user_bo);
 }
@@ -938,25 +917,6 @@ void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
        vmw_bo->on_validate_list = false;
 }
 
-uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
-{
-       struct vmw_dma_buffer *vmw_bo;
-
-       if (bo->mem.mem_type == TTM_PL_VRAM)
-               return SVGA_GMR_FRAMEBUFFER;
-
-       vmw_bo = vmw_dma_buffer(bo);
-
-       return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
-}
-
-void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
-{
-       struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-       vmw_bo->gmr_bound = true;
-       vmw_bo->gmr_id = id;
-}
-
 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
                           uint32_t handle, struct vmw_dma_buffer **out)
 {
@@ -985,41 +945,6 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
        return 0;
 }
 
-/**
- * TODO: Implement a gmr id eviction mechanism. Currently we just fail
- * when we're out of ids, causing GMR space to be allocated
- * out of VRAM.
- */
-
-int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
-{
-       struct ttm_bo_global *glob = dev_priv->bdev.glob;
-       int id;
-       int ret;
-
-       do {
-               if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
-                       return -ENOMEM;
-
-               spin_lock(&glob->lru_lock);
-               ret = ida_get_new(&dev_priv->gmr_ida, &id);
-               spin_unlock(&glob->lru_lock);
-       } while (ret == -EAGAIN);
-
-       if (unlikely(ret != 0))
-               return ret;
-
-       if (unlikely(id >= dev_priv->max_gmr_ids)) {
-               spin_lock(&glob->lru_lock);
-               ida_remove(&dev_priv->gmr_ida, id);
-               spin_unlock(&glob->lru_lock);
-               return -EBUSY;
-       }
-
-       *p_id = (uint32_t) id;
-       return 0;
-}
-
 /*
  * Stream management
  */
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
new file mode 100644 (file)
index 0000000..742c423
--- /dev/null
@@ -0,0 +1,13 @@
+config STUB_POULSBO
+       tristate "Intel GMA500 Stub Driver"
+       depends on PCI
+       # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
+       # but for select to work, need to select ACPI_VIDEO's dependencies, ick
+       select ACPI_VIDEO if ACPI
+       help
+         Choose this option if you have a system that has Intel GMA500
+         (Poulsbo) integrated graphics. If M is selected, the module will
+         be called Poulsbo. This driver is a stub driver for Poulsbo that
+         will call poulsbo.ko to enable the acpi backlight control sysfs
+         entry file because there have no poulsbo native driver can support
+         intel opregion.
diff --git a/drivers/gpu/stub/Makefile b/drivers/gpu/stub/Makefile
new file mode 100644 (file)
index 0000000..cd940cc
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_STUB_POULSBO) += poulsbo.o
diff --git a/drivers/gpu/stub/poulsbo.c b/drivers/gpu/stub/poulsbo.c
new file mode 100644 (file)
index 0000000..7edfd27
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Intel Poulsbo Stub driver
+ *
+ * Copyright (C) 2010 Novell <jlee@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <acpi/video.h>
+
+#define DRIVER_NAME "poulsbo"
+
+enum {
+       CHIP_PSB_8108 = 0,
+       CHIP_PSB_8109 = 1,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+       {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
+       {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
+       {0, 0, 0}
+};
+
+static int poulsbo_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       return acpi_video_register();
+}
+
+static void poulsbo_remove(struct pci_dev *pdev)
+{
+       acpi_video_unregister();
+}
+
+static struct pci_driver poulsbo_driver = {
+       .name = DRIVER_NAME,
+       .id_table = pciidlist,
+       .probe = poulsbo_probe,
+       .remove = poulsbo_remove,
+};
+
+static int __init poulsbo_init(void)
+{
+       return pci_register_driver(&poulsbo_driver);
+}
+
+static void __exit poulsbo_exit(void)
+{
+       pci_unregister_driver(&poulsbo_driver);
+}
+
+module_init(poulsbo_init);
+module_exit(poulsbo_exit);
+
+MODULE_AUTHOR("Lee, Chun-Yi <jlee@novell.com>");
+MODULE_DESCRIPTION("Poulsbo Stub Driver");
+MODULE_LICENSE("GPL");
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
index cb3ccf3ed221ecef98c996fc50206b17ba1f1c1a..41665d2f9f93c1fa585533184468da72039fe7cf 100644 (file)
@@ -74,7 +74,7 @@ static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1;
 static unsigned int mwait_substates;
 
 /* Reliable LAPIC Timer States, bit 1 for C1 etc.  */
-static unsigned int lapic_timer_reliable_states;
+static unsigned int lapic_timer_reliable_states = (1 << 1);     /* Default to only C1 */
 
 static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
 static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
@@ -94,7 +94,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .driver_data = (void *) 0x00,
                .flags = CPUIDLE_FLAG_TIME_VALID,
                .exit_latency = 3,
-               .power_usage = 1000,
                .target_residency = 6,
                .enter = &intel_idle },
        { /* MWAIT C2 */
@@ -103,7 +102,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .driver_data = (void *) 0x10,
                .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
                .exit_latency = 20,
-               .power_usage = 500,
                .target_residency = 80,
                .enter = &intel_idle },
        { /* MWAIT C3 */
@@ -112,11 +110,46 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .driver_data = (void *) 0x20,
                .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
                .exit_latency = 200,
-               .power_usage = 350,
                .target_residency = 800,
                .enter = &intel_idle },
 };
 
+static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
+       { /* MWAIT C0 */ },
+       { /* MWAIT C1 */
+               .name = "SNB-C1",
+               .desc = "MWAIT 0x00",
+               .driver_data = (void *) 0x00,
+               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .exit_latency = 1,
+               .target_residency = 4,
+               .enter = &intel_idle },
+       { /* MWAIT C2 */
+               .name = "SNB-C3",
+               .desc = "MWAIT 0x10",
+               .driver_data = (void *) 0x10,
+               .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 80,
+               .target_residency = 160,
+               .enter = &intel_idle },
+       { /* MWAIT C3 */
+               .name = "SNB-C6",
+               .desc = "MWAIT 0x20",
+               .driver_data = (void *) 0x20,
+               .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 104,
+               .target_residency = 208,
+               .enter = &intel_idle },
+       { /* MWAIT C4 */
+               .name = "SNB-C7",
+               .desc = "MWAIT 0x30",
+               .driver_data = (void *) 0x30,
+               .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 109,
+               .target_residency = 300,
+               .enter = &intel_idle },
+};
+
 static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
        { /* MWAIT C0 */ },
        { /* MWAIT C1 */
@@ -125,7 +158,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .driver_data = (void *) 0x00,
                .flags = CPUIDLE_FLAG_TIME_VALID,
                .exit_latency = 1,
-               .power_usage = 1000,
                .target_residency = 4,
                .enter = &intel_idle },
        { /* MWAIT C2 */
@@ -134,7 +166,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .driver_data = (void *) 0x10,
                .flags = CPUIDLE_FLAG_TIME_VALID,
                .exit_latency = 20,
-               .power_usage = 500,
                .target_residency = 80,
                .enter = &intel_idle },
        { /* MWAIT C3 */ },
@@ -144,7 +175,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .driver_data = (void *) 0x30,
                .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
                .exit_latency = 100,
-               .power_usage = 250,
                .target_residency = 400,
                .enter = &intel_idle },
        { /* MWAIT C5 */ },
@@ -154,7 +184,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .driver_data = (void *) 0x52,
                .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
                .exit_latency = 140,
-               .power_usage = 150,
                .target_residency = 560,
                .enter = &intel_idle },
 };
@@ -179,13 +208,10 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
        local_irq_disable();
 
        /*
-        * If the state flag indicates that the TLB will be flushed or if this
-        * is the deepest c-state supported, do a voluntary leave mm to avoid
-        * costly and mostly unnecessary wakeups for flushing the user TLB's
-        * associated with the active mm.
+        * leave_mm() to avoid costly and often unnecessary wakeups
+        * for flushing the user TLB's associated with the active mm.
         */
-       if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED ||
-           (&dev->states[dev->state_count - 1] == state))
+       if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
                leave_mm(cpu);
 
        if (!(lapic_timer_reliable_states & (1 << (cstate))))
@@ -269,9 +295,14 @@ static int intel_idle_probe(void)
 
        case 0x1C:      /* 28 - Atom Processor */
        case 0x26:      /* 38 - Lincroft Atom Processor */
-               lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */
+               lapic_timer_reliable_states = (1 << 1); /* C1 */
                cpuidle_state_table = atom_cstates;
                break;
+
+       case 0x2A:      /* SNB */
+       case 0x2D:      /* SNB Xeon */
+               cpuidle_state_table = snb_cstates;
+               break;
 #ifdef FUTURE_USE
        case 0x17:      /* 23 - Core 2 Duo */
                lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */
index ae7c2880e6248fff4746613d69330f0f07e55fce..91916a8d5de408b239050f614f86ed09d4717433 100644 (file)
@@ -59,8 +59,8 @@ __ib_get_agent_port(struct ib_device *device, int port_num)
        struct ib_agent_port_private *entry;
 
        list_for_each_entry(entry, &ib_agent_port_list, port_list) {
-               if (entry->agent[0]->device == device &&
-                   entry->agent[0]->port_num == port_num)
+               if (entry->agent[1]->device == device &&
+                   entry->agent[1]->port_num == port_num)
                        return entry;
        }
        return NULL;
@@ -155,14 +155,16 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
                goto error1;
        }
 
-       /* Obtain send only MAD agent for SMI QP */
-       port_priv->agent[0] = ib_register_mad_agent(device, port_num,
-                                                   IB_QPT_SMI, NULL, 0,
-                                                   &agent_send_handler,
-                                                   NULL, NULL);
-       if (IS_ERR(port_priv->agent[0])) {
-               ret = PTR_ERR(port_priv->agent[0]);
-               goto error2;
+       if (rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND) {
+               /* Obtain send only MAD agent for SMI QP */
+               port_priv->agent[0] = ib_register_mad_agent(device, port_num,
+                                                           IB_QPT_SMI, NULL, 0,
+                                                           &agent_send_handler,
+                                                           NULL, NULL);
+               if (IS_ERR(port_priv->agent[0])) {
+                       ret = PTR_ERR(port_priv->agent[0]);
+                       goto error2;
+               }
        }
 
        /* Obtain send only MAD agent for GSI QP */
@@ -182,7 +184,8 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
        return 0;
 
 error3:
-       ib_unregister_mad_agent(port_priv->agent[0]);
+       if (port_priv->agent[0])
+               ib_unregister_mad_agent(port_priv->agent[0]);
 error2:
        kfree(port_priv);
 error1:
@@ -205,7 +208,9 @@ int ib_agent_port_close(struct ib_device *device, int port_num)
        spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
 
        ib_unregister_mad_agent(port_priv->agent[1]);
-       ib_unregister_mad_agent(port_priv->agent[0]);
+       if (port_priv->agent[0])
+               ib_unregister_mad_agent(port_priv->agent[0]);
+
        kfree(port_priv);
        return 0;
 }
index b930b8110a63b22f23822cc812740b0acf120988..6884da24fde1e8338ef572aec075167faf5a73f0 100644 (file)
@@ -59,6 +59,7 @@ MODULE_LICENSE("Dual BSD/GPL");
 #define CMA_CM_RESPONSE_TIMEOUT 20
 #define CMA_MAX_CM_RETRIES 15
 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
+#define CMA_IBOE_PACKET_LIFETIME 18
 
 static void cma_add_one(struct ib_device *device);
 static void cma_remove_one(struct ib_device *device);
@@ -157,6 +158,7 @@ struct cma_multicast {
        struct list_head        list;
        void                    *context;
        struct sockaddr_storage addr;
+       struct kref             mcref;
 };
 
 struct cma_work {
@@ -173,6 +175,12 @@ struct cma_ndev_work {
        struct rdma_cm_event    event;
 };
 
+struct iboe_mcast_work {
+       struct work_struct       work;
+       struct rdma_id_private  *id;
+       struct cma_multicast    *mc;
+};
+
 union cma_ip_addr {
        struct in6_addr ip6;
        struct {
@@ -281,6 +289,8 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv,
        atomic_inc(&cma_dev->refcount);
        id_priv->cma_dev = cma_dev;
        id_priv->id.device = cma_dev->device;
+       id_priv->id.route.addr.dev_addr.transport =
+               rdma_node_get_transport(cma_dev->device->node_type);
        list_add_tail(&id_priv->list, &cma_dev->id_list);
 }
 
@@ -290,6 +300,14 @@ static inline void cma_deref_dev(struct cma_device *cma_dev)
                complete(&cma_dev->comp);
 }
 
+static inline void release_mc(struct kref *kref)
+{
+       struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
+
+       kfree(mc->multicast.ib);
+       kfree(mc);
+}
+
 static void cma_detach_from_dev(struct rdma_id_private *id_priv)
 {
        list_del(&id_priv->list);
@@ -323,22 +341,63 @@ static int cma_set_qkey(struct rdma_id_private *id_priv)
        return ret;
 }
 
+static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_num)
+{
+       int i;
+       int err;
+       struct ib_port_attr props;
+       union ib_gid tmp;
+
+       err = ib_query_port(device, port_num, &props);
+       if (err)
+               return 1;
+
+       for (i = 0; i < props.gid_tbl_len; ++i) {
+               err = ib_query_gid(device, port_num, i, &tmp);
+               if (err)
+                       return 1;
+               if (!memcmp(&tmp, gid, sizeof tmp))
+                       return 0;
+       }
+
+       return -EAGAIN;
+}
+
 static int cma_acquire_dev(struct rdma_id_private *id_priv)
 {
        struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
        struct cma_device *cma_dev;
-       union ib_gid gid;
+       union ib_gid gid, iboe_gid;
        int ret = -ENODEV;
+       u8 port;
+       enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
+               IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
 
-       rdma_addr_get_sgid(dev_addr, &gid);
+       iboe_addr_get_sgid(dev_addr, &iboe_gid);
+       memcpy(&gid, dev_addr->src_dev_addr +
+              rdma_addr_gid_offset(dev_addr), sizeof gid);
        list_for_each_entry(cma_dev, &dev_list, list) {
-               ret = ib_find_cached_gid(cma_dev->device, &gid,
-                                        &id_priv->id.port_num, NULL);
-               if (!ret) {
-                       cma_attach_to_dev(id_priv, cma_dev);
-                       break;
+               for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
+                       if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) {
+                               if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
+                                   rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
+                                       ret = find_gid_port(cma_dev->device, &iboe_gid, port);
+                               else
+                                       ret = find_gid_port(cma_dev->device, &gid, port);
+
+                               if (!ret) {
+                                       id_priv->id.port_num = port;
+                                       goto out;
+                               } else if (ret == 1)
+                                       break;
+                       }
                }
        }
+
+out:
+       if (!ret)
+               cma_attach_to_dev(id_priv, cma_dev);
+
        return ret;
 }
 
@@ -556,10 +615,16 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
 {
        struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
        int ret;
+       u16 pkey;
+
+       if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) ==
+           IB_LINK_LAYER_INFINIBAND)
+               pkey = ib_addr_get_pkey(dev_addr);
+       else
+               pkey = 0xffff;
 
        ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
-                                 ib_addr_get_pkey(dev_addr),
-                                 &qp_attr->pkey_index);
+                                 pkey, &qp_attr->pkey_index);
        if (ret)
                return ret;
 
@@ -737,8 +802,8 @@ static inline int cma_user_data_offset(enum rdma_port_space ps)
 
 static void cma_cancel_route(struct rdma_id_private *id_priv)
 {
-       switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
-       case RDMA_TRANSPORT_IB:
+       switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) {
+       case IB_LINK_LAYER_INFINIBAND:
                if (id_priv->query)
                        ib_sa_cancel_query(id_priv->query_id, id_priv->query);
                break;
@@ -816,8 +881,17 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
                mc = container_of(id_priv->mc_list.next,
                                  struct cma_multicast, list);
                list_del(&mc->list);
-               ib_sa_free_multicast(mc->multicast.ib);
-               kfree(mc);
+               switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) {
+               case IB_LINK_LAYER_INFINIBAND:
+                       ib_sa_free_multicast(mc->multicast.ib);
+                       kfree(mc);
+                       break;
+               case IB_LINK_LAYER_ETHERNET:
+                       kref_put(&mc->mcref, release_mc);
+                       break;
+               default:
+                       break;
+               }
        }
 }
 
@@ -833,7 +907,7 @@ void rdma_destroy_id(struct rdma_cm_id *id)
        mutex_lock(&lock);
        if (id_priv->cma_dev) {
                mutex_unlock(&lock);
-               switch (rdma_node_get_transport(id->device->node_type)) {
+               switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
                case RDMA_TRANSPORT_IB:
                        if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
                                ib_destroy_cm_id(id_priv->cm_id.ib);
@@ -1708,6 +1782,81 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
        return 0;
 }
 
+static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
+{
+       struct rdma_route *route = &id_priv->id.route;
+       struct rdma_addr *addr = &route->addr;
+       struct cma_work *work;
+       int ret;
+       struct sockaddr_in *src_addr = (struct sockaddr_in *)&route->addr.src_addr;
+       struct sockaddr_in *dst_addr = (struct sockaddr_in *)&route->addr.dst_addr;
+       struct net_device *ndev = NULL;
+       u16 vid;
+
+       if (src_addr->sin_family != dst_addr->sin_family)
+               return -EINVAL;
+
+       work = kzalloc(sizeof *work, GFP_KERNEL);
+       if (!work)
+               return -ENOMEM;
+
+       work->id = id_priv;
+       INIT_WORK(&work->work, cma_work_handler);
+
+       route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
+       if (!route->path_rec) {
+               ret = -ENOMEM;
+               goto err1;
+       }
+
+       route->num_paths = 1;
+
+       if (addr->dev_addr.bound_dev_if)
+               ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
+       if (!ndev) {
+               ret = -ENODEV;
+               goto err2;
+       }
+
+       vid = rdma_vlan_dev_vlan_id(ndev);
+
+       iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, vid);
+       iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, vid);
+
+       route->path_rec->hop_limit = 1;
+       route->path_rec->reversible = 1;
+       route->path_rec->pkey = cpu_to_be16(0xffff);
+       route->path_rec->mtu_selector = IB_SA_EQ;
+       route->path_rec->sl = id_priv->tos >> 5;
+
+       route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
+       route->path_rec->rate_selector = IB_SA_EQ;
+       route->path_rec->rate = iboe_get_rate(ndev);
+       dev_put(ndev);
+       route->path_rec->packet_life_time_selector = IB_SA_EQ;
+       route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
+       if (!route->path_rec->mtu) {
+               ret = -EINVAL;
+               goto err2;
+       }
+
+       work->old_state = CMA_ROUTE_QUERY;
+       work->new_state = CMA_ROUTE_RESOLVED;
+       work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+       work->event.status = 0;
+
+       queue_work(cma_wq, &work->work);
+
+       return 0;
+
+err2:
+       kfree(route->path_rec);
+       route->path_rec = NULL;
+err1:
+       kfree(work);
+       return ret;
+}
+
 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
 {
        struct rdma_id_private *id_priv;
@@ -1720,7 +1869,16 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
        atomic_inc(&id_priv->refcount);
        switch (rdma_node_get_transport(id->device->node_type)) {
        case RDMA_TRANSPORT_IB:
-               ret = cma_resolve_ib_route(id_priv, timeout_ms);
+               switch (rdma_port_get_link_layer(id->device, id->port_num)) {
+               case IB_LINK_LAYER_INFINIBAND:
+                       ret = cma_resolve_ib_route(id_priv, timeout_ms);
+                       break;
+               case IB_LINK_LAYER_ETHERNET:
+                       ret = cma_resolve_iboe_route(id_priv);
+                       break;
+               default:
+                       ret = -ENOSYS;
+               }
                break;
        case RDMA_TRANSPORT_IWARP:
                ret = cma_resolve_iw_route(id_priv, timeout_ms);
@@ -1773,7 +1931,7 @@ port_found:
                goto out;
 
        id_priv->id.route.addr.dev_addr.dev_type =
-               (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB) ?
+               (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ?
                ARPHRD_INFINIBAND : ARPHRD_ETHER;
 
        rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
@@ -2758,6 +2916,102 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
        return 0;
 }
 
+static void iboe_mcast_work_handler(struct work_struct *work)
+{
+       struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work);
+       struct cma_multicast *mc = mw->mc;
+       struct ib_sa_multicast *m = mc->multicast.ib;
+
+       mc->multicast.ib->context = mc;
+       cma_ib_mc_handler(0, m);
+       kref_put(&mc->mcref, release_mc);
+       kfree(mw);
+}
+
+static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid)
+{
+       struct sockaddr_in *sin = (struct sockaddr_in *)addr;
+       struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
+
+       if (cma_any_addr(addr)) {
+               memset(mgid, 0, sizeof *mgid);
+       } else if (addr->sa_family == AF_INET6) {
+               memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
+       } else {
+               mgid->raw[0] = 0xff;
+               mgid->raw[1] = 0x0e;
+               mgid->raw[2] = 0;
+               mgid->raw[3] = 0;
+               mgid->raw[4] = 0;
+               mgid->raw[5] = 0;
+               mgid->raw[6] = 0;
+               mgid->raw[7] = 0;
+               mgid->raw[8] = 0;
+               mgid->raw[9] = 0;
+               mgid->raw[10] = 0xff;
+               mgid->raw[11] = 0xff;
+               *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr;
+       }
+}
+
+static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+                                  struct cma_multicast *mc)
+{
+       struct iboe_mcast_work *work;
+       struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+       int err;
+       struct sockaddr *addr = (struct sockaddr *)&mc->addr;
+       struct net_device *ndev = NULL;
+
+       if (cma_zero_addr((struct sockaddr *)&mc->addr))
+               return -EINVAL;
+
+       work = kzalloc(sizeof *work, GFP_KERNEL);
+       if (!work)
+               return -ENOMEM;
+
+       mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL);
+       if (!mc->multicast.ib) {
+               err = -ENOMEM;
+               goto out1;
+       }
+
+       cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid);
+
+       mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
+       if (id_priv->id.ps == RDMA_PS_UDP)
+               mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
+
+       if (dev_addr->bound_dev_if)
+               ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
+       if (!ndev) {
+               err = -ENODEV;
+               goto out2;
+       }
+       mc->multicast.ib->rec.rate = iboe_get_rate(ndev);
+       mc->multicast.ib->rec.hop_limit = 1;
+       mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu);
+       dev_put(ndev);
+       if (!mc->multicast.ib->rec.mtu) {
+               err = -EINVAL;
+               goto out2;
+       }
+       iboe_addr_get_sgid(dev_addr, &mc->multicast.ib->rec.port_gid);
+       work->id = id_priv;
+       work->mc = mc;
+       INIT_WORK(&work->work, iboe_mcast_work_handler);
+       kref_get(&mc->mcref);
+       queue_work(cma_wq, &work->work);
+
+       return 0;
+
+out2:
+       kfree(mc->multicast.ib);
+out1:
+       kfree(work);
+       return err;
+}
+
 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
                        void *context)
 {
@@ -2784,7 +3038,17 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
 
        switch (rdma_node_get_transport(id->device->node_type)) {
        case RDMA_TRANSPORT_IB:
-               ret = cma_join_ib_multicast(id_priv, mc);
+               switch (rdma_port_get_link_layer(id->device, id->port_num)) {
+               case IB_LINK_LAYER_INFINIBAND:
+                       ret = cma_join_ib_multicast(id_priv, mc);
+                       break;
+               case IB_LINK_LAYER_ETHERNET:
+                       kref_init(&mc->mcref);
+                       ret = cma_iboe_join_multicast(id_priv, mc);
+                       break;
+               default:
+                       ret = -EINVAL;
+               }
                break;
        default:
                ret = -ENOSYS;
@@ -2817,8 +3081,19 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
                                ib_detach_mcast(id->qp,
                                                &mc->multicast.ib->rec.mgid,
                                                mc->multicast.ib->rec.mlid);
-                       ib_sa_free_multicast(mc->multicast.ib);
-                       kfree(mc);
+                       if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
+                               switch (rdma_port_get_link_layer(id->device, id->port_num)) {
+                               case IB_LINK_LAYER_INFINIBAND:
+                                       ib_sa_free_multicast(mc->multicast.ib);
+                                       kfree(mc);
+                                       break;
+                               case IB_LINK_LAYER_ETHERNET:
+                                       kref_put(&mc->mcref, release_mc);
+                                       break;
+                               default:
+                                       break;
+                               }
+                       }
                        return;
                }
        }
index bfead5bc25f6e14efcc09c4e5be51050785acb53..2a1e9ae134b4c330bbffd048d6ce037e75652d45 100644 (file)
@@ -506,6 +506,8 @@ int iw_cm_accept(struct iw_cm_id *cm_id,
        qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
        if (!qp) {
                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+               clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+               wake_up_all(&cm_id_priv->connect_wait);
                return -EINVAL;
        }
        cm_id->device->iwcm->add_ref(qp);
@@ -565,6 +567,8 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
        qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
        if (!qp) {
                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+               clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+               wake_up_all(&cm_id_priv->connect_wait);
                return -EINVAL;
        }
        cm_id->device->iwcm->add_ref(qp);
index ef1304f151dc6fbaf9cf847a79557b1953f3e448..822cfdcd9f785af9fffb9a2b2b9b9f037b328e33 100644 (file)
@@ -2598,6 +2598,9 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
        struct ib_mad_private *recv;
        struct ib_mad_list_head *mad_list;
 
+       if (!qp_info->qp)
+               return;
+
        while (!list_empty(&qp_info->recv_queue.list)) {
 
                mad_list = list_entry(qp_info->recv_queue.list.next,
@@ -2639,6 +2642,9 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
 
        for (i = 0; i < IB_MAD_QPS_CORE; i++) {
                qp = port_priv->qp_info[i].qp;
+               if (!qp)
+                       continue;
+
                /*
                 * PKey index for QP1 is irrelevant but
                 * one is needed for the Reset to Init transition
@@ -2680,6 +2686,9 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
        }
 
        for (i = 0; i < IB_MAD_QPS_CORE; i++) {
+               if (!port_priv->qp_info[i].qp)
+                       continue;
+
                ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
                if (ret) {
                        printk(KERN_ERR PFX "Couldn't post receive WRs\n");
@@ -2758,6 +2767,9 @@ error:
 
 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
 {
+       if (!qp_info->qp)
+               return;
+
        ib_destroy_qp(qp_info->qp);
        kfree(qp_info->snoop_table);
 }
@@ -2773,6 +2785,7 @@ static int ib_mad_port_open(struct ib_device *device,
        struct ib_mad_port_private *port_priv;
        unsigned long flags;
        char name[sizeof "ib_mad123"];
+       int has_smi;
 
        /* Create new device info */
        port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
@@ -2788,7 +2801,11 @@ static int ib_mad_port_open(struct ib_device *device,
        init_mad_qp(port_priv, &port_priv->qp_info[0]);
        init_mad_qp(port_priv, &port_priv->qp_info[1]);
 
-       cq_size = (mad_sendq_size + mad_recvq_size) * 2;
+       cq_size = mad_sendq_size + mad_recvq_size;
+       has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND;
+       if (has_smi)
+               cq_size *= 2;
+
        port_priv->cq = ib_create_cq(port_priv->device,
                                     ib_mad_thread_completion_handler,
                                     NULL, port_priv, cq_size, 0);
@@ -2812,9 +2829,11 @@ static int ib_mad_port_open(struct ib_device *device,
                goto error5;
        }
 
-       ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
-       if (ret)
-               goto error6;
+       if (has_smi) {
+               ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
+               if (ret)
+                       goto error6;
+       }
        ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
        if (ret)
                goto error7;
index a519801dcfb758f785a885ce49259fa7b1a01f8d..68b4162fd9d2b17ee33a2d565b381bafa94c7ae2 100644 (file)
@@ -774,6 +774,10 @@ static void mcast_event_handler(struct ib_event_handler *handler,
        int index;
 
        dev = container_of(handler, struct mcast_device, event_handler);
+       if (rdma_port_get_link_layer(dev->device, event->element.port_num) !=
+           IB_LINK_LAYER_INFINIBAND)
+               return;
+
        index = event->element.port_num - dev->start_port;
 
        switch (event->event) {
@@ -796,6 +800,7 @@ static void mcast_add_one(struct ib_device *device)
        struct mcast_device *dev;
        struct mcast_port *port;
        int i;
+       int count = 0;
 
        if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
                return;
@@ -813,6 +818,9 @@ static void mcast_add_one(struct ib_device *device)
        }
 
        for (i = 0; i <= dev->end_port - dev->start_port; i++) {
+               if (rdma_port_get_link_layer(device, dev->start_port + i) !=
+                   IB_LINK_LAYER_INFINIBAND)
+                       continue;
                port = &dev->port[i];
                port->dev = dev;
                port->port_num = dev->start_port + i;
@@ -820,6 +828,12 @@ static void mcast_add_one(struct ib_device *device)
                port->table = RB_ROOT;
                init_completion(&port->comp);
                atomic_set(&port->refcount, 1);
+               ++count;
+       }
+
+       if (!count) {
+               kfree(dev);
+               return;
        }
 
        dev->device = device;
@@ -843,9 +857,12 @@ static void mcast_remove_one(struct ib_device *device)
        flush_workqueue(mcast_wq);
 
        for (i = 0; i <= dev->end_port - dev->start_port; i++) {
-               port = &dev->port[i];
-               deref_port(port);
-               wait_for_completion(&port->comp);
+               if (rdma_port_get_link_layer(device, dev->start_port + i) ==
+                   IB_LINK_LAYER_INFINIBAND) {
+                       port = &dev->port[i];
+                       deref_port(port);
+                       wait_for_completion(&port->comp);
+               }
        }
 
        kfree(dev);
index 7e1ffd8ccd5c07084317fecbae15cd3574938991..91a660310b7c03b3c920bb8b70f86a790720414b 100644 (file)
@@ -416,6 +416,9 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
                struct ib_sa_port *port =
                        &sa_dev->port[event->element.port_num - sa_dev->start_port];
 
+               if (rdma_port_get_link_layer(handler->device, port->port_num) != IB_LINK_LAYER_INFINIBAND)
+                       return;
+
                spin_lock_irqsave(&port->ah_lock, flags);
                if (port->sm_ah)
                        kref_put(&port->sm_ah->ref, free_sm_ah);
@@ -493,6 +496,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
 {
        int ret;
        u16 gid_index;
+       int force_grh;
 
        memset(ah_attr, 0, sizeof *ah_attr);
        ah_attr->dlid = be16_to_cpu(rec->dlid);
@@ -502,7 +506,9 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
        ah_attr->port_num = port_num;
        ah_attr->static_rate = rec->rate;
 
-       if (rec->hop_limit > 1) {
+       force_grh = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_ETHERNET;
+
+       if (rec->hop_limit > 1 || force_grh) {
                ah_attr->ah_flags = IB_AH_GRH;
                ah_attr->grh.dgid = rec->dgid;
 
@@ -1007,7 +1013,7 @@ static void ib_sa_add_one(struct ib_device *device)
                e = device->phys_port_cnt;
        }
 
-       sa_dev = kmalloc(sizeof *sa_dev +
+       sa_dev = kzalloc(sizeof *sa_dev +
                         (e - s + 1) * sizeof (struct ib_sa_port),
                         GFP_KERNEL);
        if (!sa_dev)
@@ -1017,9 +1023,12 @@ static void ib_sa_add_one(struct ib_device *device)
        sa_dev->end_port   = e;
 
        for (i = 0; i <= e - s; ++i) {
+               spin_lock_init(&sa_dev->port[i].ah_lock);
+               if (rdma_port_get_link_layer(device, i + 1) != IB_LINK_LAYER_INFINIBAND)
+                       continue;
+
                sa_dev->port[i].sm_ah    = NULL;
                sa_dev->port[i].port_num = i + s;
-               spin_lock_init(&sa_dev->port[i].ah_lock);
 
                sa_dev->port[i].agent =
                        ib_register_mad_agent(device, i + s, IB_QPT_GSI,
@@ -1045,13 +1054,15 @@ static void ib_sa_add_one(struct ib_device *device)
                goto err;
 
        for (i = 0; i <= e - s; ++i)
-               update_sm_ah(&sa_dev->port[i].update_task);
+               if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
+                       update_sm_ah(&sa_dev->port[i].update_task);
 
        return;
 
 err:
        while (--i >= 0)
-               ib_unregister_mad_agent(sa_dev->port[i].agent);
+               if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
+                       ib_unregister_mad_agent(sa_dev->port[i].agent);
 
        kfree(sa_dev);
 
@@ -1071,9 +1082,12 @@ static void ib_sa_remove_one(struct ib_device *device)
        flush_scheduled_work();
 
        for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
-               ib_unregister_mad_agent(sa_dev->port[i].agent);
-               if (sa_dev->port[i].sm_ah)
-                       kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
+               if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) {
+                       ib_unregister_mad_agent(sa_dev->port[i].agent);
+                       if (sa_dev->port[i].sm_ah)
+                               kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
+               }
+
        }
 
        kfree(sa_dev);
index 3627300e2a10e1efe8d1435d32313a05b3302507..9ab5df72df7bf3b3c93dfb7ef6dc21b9810592a3 100644 (file)
@@ -222,6 +222,19 @@ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
        }
 }
 
+static ssize_t link_layer_show(struct ib_port *p, struct port_attribute *unused,
+                              char *buf)
+{
+       switch (rdma_port_get_link_layer(p->ibdev, p->port_num)) {
+       case IB_LINK_LAYER_INFINIBAND:
+               return sprintf(buf, "%s\n", "InfiniBand");
+       case IB_LINK_LAYER_ETHERNET:
+               return sprintf(buf, "%s\n", "Ethernet");
+       default:
+               return sprintf(buf, "%s\n", "Unknown");
+       }
+}
+
 static PORT_ATTR_RO(state);
 static PORT_ATTR_RO(lid);
 static PORT_ATTR_RO(lid_mask_count);
@@ -230,6 +243,7 @@ static PORT_ATTR_RO(sm_sl);
 static PORT_ATTR_RO(cap_mask);
 static PORT_ATTR_RO(rate);
 static PORT_ATTR_RO(phys_state);
+static PORT_ATTR_RO(link_layer);
 
 static struct attribute *port_default_attrs[] = {
        &port_attr_state.attr,
@@ -240,6 +254,7 @@ static struct attribute *port_default_attrs[] = {
        &port_attr_cap_mask.attr,
        &port_attr_rate.attr,
        &port_attr_phys_state.attr,
+       &port_attr_link_layer.attr,
        NULL
 };
 
index ac7edc24165cd5902d1557df18a0e130cf7f5458..ca12acf383793615a72ca1a828d544c8e32b0324 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/in6.h>
 #include <linux/miscdevice.h>
 #include <linux/slab.h>
+#include <linux/sysctl.h>
 
 #include <rdma/rdma_user_cm.h>
 #include <rdma/ib_marshall.h>
@@ -50,8 +51,24 @@ MODULE_AUTHOR("Sean Hefty");
 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
 MODULE_LICENSE("Dual BSD/GPL");
 
-enum {
-       UCMA_MAX_BACKLOG        = 128
+static unsigned int max_backlog = 1024;
+
+static struct ctl_table_header *ucma_ctl_table_hdr;
+static ctl_table ucma_ctl_table[] = {
+       {
+               .procname       = "max_backlog",
+               .data           = &max_backlog,
+               .maxlen         = sizeof max_backlog,
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       { }
+};
+
+static struct ctl_path ucma_ctl_path[] = {
+       { .procname = "net" },
+       { .procname = "rdma_ucm" },
+       { }
 };
 
 struct ucma_file {
@@ -583,6 +600,42 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
        }
 }
 
+static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
+                                struct rdma_route *route)
+{
+       struct rdma_dev_addr *dev_addr;
+       struct net_device *dev;
+       u16 vid = 0;
+
+       resp->num_paths = route->num_paths;
+       switch (route->num_paths) {
+       case 0:
+               dev_addr = &route->addr.dev_addr;
+               dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
+                       if (dev) {
+                               vid = rdma_vlan_dev_vlan_id(dev);
+                               dev_put(dev);
+                       }
+
+               iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid,
+                                   dev_addr->dst_dev_addr, vid);
+               iboe_addr_get_sgid(dev_addr,
+                                  (union ib_gid *) &resp->ib_route[0].sgid);
+               resp->ib_route[0].pkey = cpu_to_be16(0xffff);
+               break;
+       case 2:
+               ib_copy_path_rec_to_user(&resp->ib_route[1],
+                                        &route->path_rec[1]);
+               /* fall through */
+       case 1:
+               ib_copy_path_rec_to_user(&resp->ib_route[0],
+                                        &route->path_rec[0]);
+               break;
+       default:
+               break;
+       }
+}
+
 static ssize_t ucma_query_route(struct ucma_file *file,
                                const char __user *inbuf,
                                int in_len, int out_len)
@@ -617,12 +670,17 @@ static ssize_t ucma_query_route(struct ucma_file *file,
 
        resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
        resp.port_num = ctx->cm_id->port_num;
-       switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
-       case RDMA_TRANSPORT_IB:
-               ucma_copy_ib_route(&resp, &ctx->cm_id->route);
-               break;
-       default:
-               break;
+       if (rdma_node_get_transport(ctx->cm_id->device->node_type) == RDMA_TRANSPORT_IB) {
+               switch (rdma_port_get_link_layer(ctx->cm_id->device, ctx->cm_id->port_num)) {
+               case IB_LINK_LAYER_INFINIBAND:
+                       ucma_copy_ib_route(&resp, &ctx->cm_id->route);
+                       break;
+               case IB_LINK_LAYER_ETHERNET:
+                       ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
+                       break;
+               default:
+                       break;
+               }
        }
 
 out:
@@ -686,8 +744,8 @@ static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
-       ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ?
-                      cmd.backlog : UCMA_MAX_BACKLOG;
+       ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
+                      cmd.backlog : max_backlog;
        ret = rdma_listen(ctx->cm_id, ctx->backlog);
        ucma_put_ctx(ctx);
        return ret;
@@ -1279,16 +1337,26 @@ static int __init ucma_init(void)
        ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
        if (ret) {
                printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
-               goto err;
+               goto err1;
+       }
+
+       ucma_ctl_table_hdr = register_sysctl_paths(ucma_ctl_path, ucma_ctl_table);
+       if (!ucma_ctl_table_hdr) {
+               printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
+               ret = -ENOMEM;
+               goto err2;
        }
        return 0;
-err:
+err2:
+       device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
+err1:
        misc_deregister(&ucma_misc);
        return ret;
 }
 
 static void __exit ucma_cleanup(void)
 {
+       unregister_sysctl_table(ucma_ctl_table_hdr);
        device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
        misc_deregister(&ucma_misc);
        idr_destroy(&ctx_idr);
index 650b501eb142718fb322019a9357e2186c02a71c..bb7e19280821c1074c6efd23a992f243ba33d258 100644 (file)
@@ -33,6 +33,7 @@
 
 #include <linux/errno.h>
 #include <linux/string.h>
+#include <linux/if_ether.h>
 
 #include <rdma/ib_pack.h>
 
@@ -80,6 +81,40 @@ static const struct ib_field lrh_table[]  = {
          .size_bits    = 16 }
 };
 
+static const struct ib_field eth_table[]  = {
+       { STRUCT_FIELD(eth, dmac_h),
+         .offset_words = 0,
+         .offset_bits  = 0,
+         .size_bits    = 32 },
+       { STRUCT_FIELD(eth, dmac_l),
+         .offset_words = 1,
+         .offset_bits  = 0,
+         .size_bits    = 16 },
+       { STRUCT_FIELD(eth, smac_h),
+         .offset_words = 1,
+         .offset_bits  = 16,
+         .size_bits    = 16 },
+       { STRUCT_FIELD(eth, smac_l),
+         .offset_words = 2,
+         .offset_bits  = 0,
+         .size_bits    = 32 },
+       { STRUCT_FIELD(eth, type),
+         .offset_words = 3,
+         .offset_bits  = 0,
+         .size_bits    = 16 }
+};
+
+static const struct ib_field vlan_table[]  = {
+       { STRUCT_FIELD(vlan, tag),
+         .offset_words = 0,
+         .offset_bits  = 0,
+         .size_bits    = 16 },
+       { STRUCT_FIELD(vlan, type),
+         .offset_words = 0,
+         .offset_bits  = 16,
+         .size_bits    = 16 }
+};
+
 static const struct ib_field grh_table[]  = {
        { STRUCT_FIELD(grh, ip_version),
          .offset_words = 0,
@@ -180,38 +215,43 @@ static const struct ib_field deth_table[] = {
 /**
  * ib_ud_header_init - Initialize UD header structure
  * @payload_bytes:Length of packet payload
+ * @lrh_present: specify if LRH is present
+ * @eth_present: specify if Eth header is present
+ * @vlan_present: packet is tagged vlan
  * @grh_present:GRH flag (if non-zero, GRH will be included)
- * @immediate_present: specify if immediate data should be used
+ * @immediate_present: specify if immediate data is present
  * @header:Structure to initialize
- *
- * ib_ud_header_init() initializes the lrh.link_version, lrh.link_next_header,
- * lrh.packet_length, grh.ip_version, grh.payload_length,
- * grh.next_header, bth.opcode, bth.pad_count and
- * bth.transport_header_version fields of a &struct ib_ud_header given
- * the payload length and whether a GRH will be included.
  */
 void ib_ud_header_init(int                         payload_bytes,
+                      int                  lrh_present,
+                      int                  eth_present,
+                      int                  vlan_present,
                       int                  grh_present,
                       int                  immediate_present,
                       struct ib_ud_header *header)
 {
-       u16 packet_length;
-
        memset(header, 0, sizeof *header);
 
-       header->lrh.link_version     = 0;
-       header->lrh.link_next_header =
-               grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL;
-       packet_length                = (IB_LRH_BYTES     +
-                                       IB_BTH_BYTES     +
-                                       IB_DETH_BYTES    +
-                                       payload_bytes    +
-                                       4                + /* ICRC     */
-                                       3) / 4;            /* round up */
-
-       header->grh_present          = grh_present;
+       if (lrh_present) {
+               u16 packet_length;
+
+               header->lrh.link_version     = 0;
+               header->lrh.link_next_header =
+                       grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL;
+               packet_length = (IB_LRH_BYTES   +
+                                IB_BTH_BYTES   +
+                                IB_DETH_BYTES  +
+                                (grh_present ? IB_GRH_BYTES : 0) +
+                                payload_bytes  +
+                                4              + /* ICRC     */
+                                3) / 4;          /* round up */
+               header->lrh.packet_length = cpu_to_be16(packet_length);
+       }
+
+       if (vlan_present)
+               header->eth.type = cpu_to_be16(ETH_P_8021Q);
+
        if (grh_present) {
-               packet_length              += IB_GRH_BYTES / 4;
                header->grh.ip_version      = 6;
                header->grh.payload_length  =
                        cpu_to_be16((IB_BTH_BYTES     +
@@ -222,18 +262,51 @@ void ib_ud_header_init(int                    payload_bytes,
                header->grh.next_header     = 0x1b;
        }
 
-       header->lrh.packet_length = cpu_to_be16(packet_length);
-
-       header->immediate_present            = immediate_present;
        if (immediate_present)
                header->bth.opcode           = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
        else
                header->bth.opcode           = IB_OPCODE_UD_SEND_ONLY;
        header->bth.pad_count                = (4 - payload_bytes) & 3;
        header->bth.transport_header_version = 0;
+
+       header->lrh_present = lrh_present;
+       header->eth_present = eth_present;
+       header->vlan_present = vlan_present;
+       header->grh_present = grh_present;
+       header->immediate_present = immediate_present;
 }
 EXPORT_SYMBOL(ib_ud_header_init);
 
+/**
+ * ib_lrh_header_pack - Pack LRH header struct into wire format
+ * @lrh:unpacked LRH header struct
+ * @buf:Buffer to pack into
+ *
+ * ib_lrh_header_pack() packs the LRH header structure @lrh into
+ * wire format in the buffer @buf.
+ */
+int ib_lrh_header_pack(struct ib_unpacked_lrh *lrh, void *buf)
+{
+       ib_pack(lrh_table, ARRAY_SIZE(lrh_table), lrh, buf);
+       return 0;
+}
+EXPORT_SYMBOL(ib_lrh_header_pack);
+
+/**
+ * ib_lrh_header_unpack - Unpack LRH structure from wire format
+ * @lrh:unpacked LRH header struct
+ * @buf:Buffer to pack into
+ *
+ * ib_lrh_header_unpack() unpacks the LRH header structure from
+ * wire format (in buf) into @lrh.
+ */
+int ib_lrh_header_unpack(void *buf, struct ib_unpacked_lrh *lrh)
+{
+       ib_unpack(lrh_table, ARRAY_SIZE(lrh_table), buf, lrh);
+       return 0;
+}
+EXPORT_SYMBOL(ib_lrh_header_unpack);
+
 /**
  * ib_ud_header_pack - Pack UD header struct into wire format
  * @header:UD header struct
@@ -247,10 +320,21 @@ int ib_ud_header_pack(struct ib_ud_header *header,
 {
        int len = 0;
 
-       ib_pack(lrh_table, ARRAY_SIZE(lrh_table),
-               &header->lrh, buf);
-       len += IB_LRH_BYTES;
-
+       if (header->lrh_present) {
+               ib_pack(lrh_table, ARRAY_SIZE(lrh_table),
+                       &header->lrh, buf + len);
+               len += IB_LRH_BYTES;
+       }
+       if (header->eth_present) {
+               ib_pack(eth_table, ARRAY_SIZE(eth_table),
+                       &header->eth, buf + len);
+               len += IB_ETH_BYTES;
+       }
+       if (header->vlan_present) {
+               ib_pack(vlan_table, ARRAY_SIZE(vlan_table),
+                       &header->vlan, buf + len);
+               len += IB_VLAN_BYTES;
+       }
        if (header->grh_present) {
                ib_pack(grh_table, ARRAY_SIZE(grh_table),
                        &header->grh, buf + len);
index 5fa856909511248c01bb925166bb592e8e94a0e4..cd1996d0ad089950960ba8bb3f4eaa0bff24f7fe 100644 (file)
@@ -1022,7 +1022,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
 
        port->ib_dev   = device;
        port->port_num = port_num;
-       init_MUTEX(&port->sm_sem);
+       sema_init(&port->sm_sem, 1);
        mutex_init(&port->file_mutex);
        INIT_LIST_HEAD(&port->file_list);
 
index 6fcfbeb24a234a419ec9313c956c7c4ba581bcf6..b342248aec059060e19f64ffd69ec28af256682a 100644 (file)
@@ -460,6 +460,8 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
        resp.active_width    = attr.active_width;
        resp.active_speed    = attr.active_speed;
        resp.phys_state      = attr.phys_state;
+       resp.link_layer      = rdma_port_get_link_layer(file->device->ib_dev,
+                                                       cmd.port_num);
 
        if (copy_to_user((void __user *) (unsigned long) cmd.response,
                         &resp, sizeof resp))
index e0fa222387157b3f639bec876297f43ee55ad20a..af7a8b08b2e95abe26704c747ba68bea63e97937 100644 (file)
@@ -94,6 +94,22 @@ rdma_node_get_transport(enum rdma_node_type node_type)
 }
 EXPORT_SYMBOL(rdma_node_get_transport);
 
+enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
+{
+       if (device->get_link_layer)
+               return device->get_link_layer(device, port_num);
+
+       switch (rdma_node_get_transport(device->node_type)) {
+       case RDMA_TRANSPORT_IB:
+               return IB_LINK_LAYER_INFINIBAND;
+       case RDMA_TRANSPORT_IWARP:
+               return IB_LINK_LAYER_ETHERNET;
+       default:
+               return IB_LINK_LAYER_UNSPECIFIED;
+       }
+}
+EXPORT_SYMBOL(rdma_port_get_link_layer);
+
 /* Protection domains */
 
 struct ib_pd *ib_alloc_pd(struct ib_device *device)
index 06964c4af84961864398bb7258c7fae13ffb54f2..950dfabcd89d25f3bd2b33a50f88dc839fce9141 100644 (file)
@@ -1,6 +1,4 @@
-ifdef CONFIG_INFINIBAND_AMSO1100_DEBUG
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_INFINIBAND_AMSO1100_DEBUG) := -DDEBUG
 
 obj-$(CONFIG_INFINIBAND_AMSO1100) += iw_c2.o
 
index 3b5095470cb3d37955cb24c101f8d07ac609432b..0ebe4e806b86c7a712142aa53fcb117a3d4d8158 100644 (file)
@@ -62,8 +62,8 @@ void c2_rnic_interrupt(struct c2_dev *c2dev)
 static void handle_mq(struct c2_dev *c2dev, u32 mq_index)
 {
        if (c2dev->qptr_array[mq_index] == NULL) {
-               pr_debug(KERN_INFO "handle_mq: stray activity for mq_index=%d\n",
-                       mq_index);
+               pr_debug("handle_mq: stray activity for mq_index=%d\n",
+                        mq_index);
                return;
        }
 
index 7e7b5a66f042ad096b9960cdb6e6ebf66e759d6a..621619c794e5aaed29e56f685e7007e5c57b2e04 100644 (file)
@@ -1,10 +1,8 @@
-EXTRA_CFLAGS += -Idrivers/net/cxgb3
+ccflags-y := -Idrivers/net/cxgb3
 
 obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o
 
 iw_cxgb3-y :=  iwch_cm.o iwch_ev.o iwch_cq.o iwch_qp.o iwch_mem.o \
               iwch_provider.o iwch.o cxio_hal.o cxio_resource.o
 
-ifdef CONFIG_INFINIBAND_CXGB3_DEBUG
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_INFINIBAND_CXGB3_DEBUG) += -DDEBUG
index 005b7b52bc1e5751f8e4eb3f359bd4debcf54202..09dda0b8740eac9ee1a6d50ed536dec3a709a37d 100644 (file)
@@ -160,6 +160,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
        struct rdma_cq_setup setup;
        int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
 
+       size += 1; /* one extra page for storing cq-in-err state */
        cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
        if (!cq->cqid)
                return -ENOMEM;
index e5ddb63e7d234d66ac16aeb6cfb3b33b1cd9dcb2..4bb997aa39d0c69154074d47f40243ff3c314995 100644 (file)
@@ -728,6 +728,22 @@ struct t3_cq {
 #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
                                         CQE_GENBIT(*cqe))
 
+struct t3_cq_status_page {
+       u32 cq_err;
+};
+
+static inline int cxio_cq_in_error(struct t3_cq *cq)
+{
+       return ((struct t3_cq_status_page *)
+               &cq->queue[1 << cq->size_log2])->cq_err;
+}
+
+static inline void cxio_set_cq_in_error(struct t3_cq *cq)
+{
+       ((struct t3_cq_status_page *)
+        &cq->queue[1 << cq->size_log2])->cq_err = 1;
+}
+
 static inline void cxio_set_wq_in_error(struct t3_wq *wq)
 {
        wq->queue->wq_in_err.err |= 1;
index 13c88871dc3b90f564a52b4651aa371a4cd15633..d02dcc6e5963bf99f35020525d0c4c2c306c5464 100644 (file)
@@ -1093,8 +1093,8 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
        PDBG("%s ep %p credits %u\n", __func__, ep, credits);
 
        if (credits == 0) {
-               PDBG(KERN_ERR "%s 0 credit ack  ep %p state %u\n",
-                       __func__, ep, state_read(&ep->com));
+               PDBG("%s 0 credit ack  ep %p state %u\n",
+                    __func__, ep, state_read(&ep->com));
                return CPL_RET_BUF_DONE;
        }
 
index 6afc89e7572c8d880db196cc34402d3a168e39d8..71e0d845da3d976588542aebd2803e9d4a6ef2e9 100644 (file)
@@ -76,6 +76,14 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
        atomic_inc(&qhp->refcnt);
        spin_unlock(&rnicp->lock);
 
+       if (qhp->attr.state == IWCH_QP_STATE_RTS) {
+               attrs.next_state = IWCH_QP_STATE_TERMINATE;
+               iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE,
+                              &attrs, 1);
+               if (send_term)
+                       iwch_post_terminate(qhp, rsp_msg);
+       }
+
        event.event = ib_event;
        event.device = chp->ibcq.device;
        if (ib_event == IB_EVENT_CQ_ERR)
@@ -86,13 +94,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
        if (qhp->ibqp.event_handler)
                (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
 
-       if (qhp->attr.state == IWCH_QP_STATE_RTS) {
-               attrs.next_state = IWCH_QP_STATE_TERMINATE;
-               iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE,
-                              &attrs, 1);
-               if (send_term)
-                       iwch_post_terminate(qhp, rsp_msg);
-       }
+       (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
 
        if (atomic_dec_and_test(&qhp->refcnt))
                wake_up(&qhp->wait);
@@ -179,7 +181,6 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
        case TPT_ERR_BOUND:
        case TPT_ERR_INVALIDATE_SHARED_MR:
        case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
-               (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
                post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
                break;
 
index fca0b4b747e4e5aedfb7668a74bb40fafdad9ff2..2e2741307af4be4114df4545a2fb5676f360c8ea 100644 (file)
@@ -154,6 +154,8 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
        struct iwch_create_cq_resp uresp;
        struct iwch_create_cq_req ureq;
        struct iwch_ucontext *ucontext = NULL;
+       static int warned;
+       size_t resplen;
 
        PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
        rhp = to_iwch_dev(ibdev);
@@ -217,15 +219,26 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
                uresp.key = ucontext->key;
                ucontext->key += PAGE_SIZE;
                spin_unlock(&ucontext->mmap_lock);
-               if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
+               mm->key = uresp.key;
+               mm->addr = virt_to_phys(chp->cq.queue);
+               if (udata->outlen < sizeof uresp) {
+                       if (!warned++)
+                               printk(KERN_WARNING MOD "Warning - "
+                                      "downlevel libcxgb3 (non-fatal).\n");
+                       mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
+                                            sizeof(struct t3_cqe));
+                       resplen = sizeof(struct iwch_create_cq_resp_v0);
+               } else {
+                       mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
+                                            sizeof(struct t3_cqe));
+                       uresp.memsize = mm->len;
+                       resplen = sizeof uresp;
+               }
+               if (ib_copy_to_udata(udata, &uresp, resplen)) {
                        kfree(mm);
                        iwch_destroy_cq(&chp->ibcq);
                        return ERR_PTR(-EFAULT);
                }
-               mm->key = uresp.key;
-               mm->addr = virt_to_phys(chp->cq.queue);
-               mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
-                                            sizeof (struct t3_cqe));
                insert_mmap(ucontext, mm);
        }
        PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
@@ -1414,6 +1427,7 @@ int iwch_register_device(struct iwch_dev *dev)
        dev->ibdev.post_send = iwch_post_send;
        dev->ibdev.post_recv = iwch_post_receive;
        dev->ibdev.get_protocol_stats = iwch_get_mib;
+       dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
 
        dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
        if (!dev->ibdev.iwcm)
index c64d27bf2c15b20eba43ddf91051ece1212495fb..0993137181d7a893d6084688f003ca05358d3b42 100644 (file)
@@ -802,14 +802,12 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
 /*
  * Assumes qhp lock is held.
  */
-static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
+static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
+                               struct iwch_cq *schp, unsigned long *flag)
 {
-       struct iwch_cq *rchp, *schp;
        int count;
        int flushed;
 
-       rchp = get_chp(qhp->rhp, qhp->attr.rcq);
-       schp = get_chp(qhp->rhp, qhp->attr.scq);
 
        PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
        /* take a ref on the qhp since we must release the lock */
@@ -847,10 +845,23 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
 
 static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
 {
-       if (qhp->ibqp.uobject)
+       struct iwch_cq *rchp, *schp;
+
+       rchp = get_chp(qhp->rhp, qhp->attr.rcq);
+       schp = get_chp(qhp->rhp, qhp->attr.scq);
+
+       if (qhp->ibqp.uobject) {
                cxio_set_wq_in_error(&qhp->wq);
-       else
-               __flush_qp(qhp, flag);
+               cxio_set_cq_in_error(&rchp->cq);
+               (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+               if (schp != rchp) {
+                       cxio_set_cq_in_error(&schp->cq);
+                       (*schp->ibcq.comp_handler)(&schp->ibcq,
+                                                  schp->ibcq.cq_context);
+               }
+               return;
+       }
+       __flush_qp(qhp, rchp, schp, flag);
 }
 
 
index cb7086f558c1e769ad66eee48fee14d1bbe97081..a277c31fcaf7bed35f727c329dabd63b7e10960f 100644 (file)
@@ -45,10 +45,18 @@ struct iwch_create_cq_req {
        __u64 user_rptr_addr;
 };
 
+struct iwch_create_cq_resp_v0 {
+       __u64 key;
+       __u32 cqid;
+       __u32 size_log2;
+};
+
 struct iwch_create_cq_resp {
        __u64 key;
        __u32 cqid;
        __u32 size_log2;
+       __u32 memsize;
+       __u32 reserved;
 };
 
 struct iwch_create_qp_resp {
index e31a499f0172373b274e7c6b4f4321506f7c32dd..cd20b1342aec18e7b017a98678607a9fa26c661d 100644 (file)
@@ -1,4 +1,4 @@
-EXTRA_CFLAGS += -Idrivers/net/cxgb4
+ccflags-y := -Idrivers/net/cxgb4
 
 obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
 
index 32d352a88d50fba3e4526608b6ca345af630ab16..0dc62b1438bee2b52483381db9708d5a1f5c837e 100644 (file)
@@ -117,9 +117,9 @@ static int rcv_win = 256 * 1024;
 module_param(rcv_win, int, 0644);
 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
 
-static int snd_win = 32 * 1024;
+static int snd_win = 128 * 1024;
 module_param(snd_win, int, 0644);
-MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
+MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
 
 static struct workqueue_struct *workq;
 
@@ -172,7 +172,7 @@ static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
        error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
        if (error < 0)
                kfree_skb(skb);
-       return error;
+       return error < 0 ? error : 0;
 }
 
 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
@@ -187,7 +187,7 @@ int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
        error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
        if (error < 0)
                kfree_skb(skb);
-       return error;
+       return error < 0 ? error : 0;
 }
 
 static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
@@ -219,12 +219,11 @@ static void set_emss(struct c4iw_ep *ep, u16 opt)
 
 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
 {
-       unsigned long flags;
        enum c4iw_ep_state state;
 
-       spin_lock_irqsave(&epc->lock, flags);
+       mutex_lock(&epc->mutex);
        state = epc->state;
-       spin_unlock_irqrestore(&epc->lock, flags);
+       mutex_unlock(&epc->mutex);
        return state;
 }
 
@@ -235,12 +234,10 @@ static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
 
 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&epc->lock, flags);
+       mutex_lock(&epc->mutex);
        PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
        __state_set(epc, new);
-       spin_unlock_irqrestore(&epc->lock, flags);
+       mutex_unlock(&epc->mutex);
        return;
 }
 
@@ -251,8 +248,8 @@ static void *alloc_ep(int size, gfp_t gfp)
        epc = kzalloc(size, gfp);
        if (epc) {
                kref_init(&epc->kref);
-               spin_lock_init(&epc->lock);
-               init_waitqueue_head(&epc->waitq);
+               mutex_init(&epc->mutex);
+               c4iw_init_wr_wait(&epc->wr_wait);
        }
        PDBG("%s alloc ep %p\n", __func__, epc);
        return epc;
@@ -1131,7 +1128,6 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
 {
        struct c4iw_ep *ep;
        struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
-       unsigned long flags;
        int release = 0;
        unsigned int tid = GET_TID(rpl);
        struct tid_info *t = dev->rdev.lldi.tids;
@@ -1139,7 +1135,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
        ep = lookup_tid(t, tid);
        PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
        BUG_ON(!ep);
-       spin_lock_irqsave(&ep->com.lock, flags);
+       mutex_lock(&ep->com.mutex);
        switch (ep->com.state) {
        case ABORTING:
                __state_set(&ep->com, DEAD);
@@ -1150,7 +1146,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
                     __func__, ep, ep->com.state);
                break;
        }
-       spin_unlock_irqrestore(&ep->com.lock, flags);
+       mutex_unlock(&ep->com.mutex);
 
        if (release)
                release_ep_resources(ep);
@@ -1213,9 +1209,9 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
        }
        PDBG("%s ep %p status %d error %d\n", __func__, ep,
             rpl->status, status2errno(rpl->status));
-       ep->com.rpl_err = status2errno(rpl->status);
-       ep->com.rpl_done = 1;
-       wake_up(&ep->com.waitq);
+       ep->com.wr_wait.ret = status2errno(rpl->status);
+       ep->com.wr_wait.done = 1;
+       wake_up(&ep->com.wr_wait.wait);
 
        return 0;
 }
@@ -1249,9 +1245,9 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
        struct c4iw_listen_ep *ep = lookup_stid(t, stid);
 
        PDBG("%s ep %p\n", __func__, ep);
-       ep->com.rpl_err = status2errno(rpl->status);
-       ep->com.rpl_done = 1;
-       wake_up(&ep->com.waitq);
+       ep->com.wr_wait.ret = status2errno(rpl->status);
+       ep->com.wr_wait.done = 1;
+       wake_up(&ep->com.wr_wait.wait);
        return 0;
 }
 
@@ -1478,7 +1474,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
        struct cpl_peer_close *hdr = cplhdr(skb);
        struct c4iw_ep *ep;
        struct c4iw_qp_attributes attrs;
-       unsigned long flags;
        int disconnect = 1;
        int release = 0;
        int closing = 0;
@@ -1489,7 +1484,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
        PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
        dst_confirm(ep->dst);
 
-       spin_lock_irqsave(&ep->com.lock, flags);
+       mutex_lock(&ep->com.mutex);
        switch (ep->com.state) {
        case MPA_REQ_WAIT:
                __state_set(&ep->com, CLOSING);
@@ -1507,17 +1502,17 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
                 * in rdma connection migration (see c4iw_accept_cr()).
                 */
                __state_set(&ep->com, CLOSING);
-               ep->com.rpl_done = 1;
-               ep->com.rpl_err = -ECONNRESET;
+               ep->com.wr_wait.done = 1;
+               ep->com.wr_wait.ret = -ECONNRESET;
                PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
-               wake_up(&ep->com.waitq);
+               wake_up(&ep->com.wr_wait.wait);
                break;
        case MPA_REP_SENT:
                __state_set(&ep->com, CLOSING);
-               ep->com.rpl_done = 1;
-               ep->com.rpl_err = -ECONNRESET;
+               ep->com.wr_wait.done = 1;
+               ep->com.wr_wait.ret = -ECONNRESET;
                PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
-               wake_up(&ep->com.waitq);
+               wake_up(&ep->com.wr_wait.wait);
                break;
        case FPDU_MODE:
                start_ep_timer(ep);
@@ -1550,7 +1545,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
        default:
                BUG_ON(1);
        }
-       spin_unlock_irqrestore(&ep->com.lock, flags);
+       mutex_unlock(&ep->com.mutex);
        if (closing) {
                attrs.next_state = C4IW_QP_STATE_CLOSING;
                c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
@@ -1581,7 +1576,6 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
        struct c4iw_qp_attributes attrs;
        int ret;
        int release = 0;
-       unsigned long flags;
        struct tid_info *t = dev->rdev.lldi.tids;
        unsigned int tid = GET_TID(req);
 
@@ -1591,9 +1585,17 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
                     ep->hwtid);
                return 0;
        }
-       spin_lock_irqsave(&ep->com.lock, flags);
        PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
             ep->com.state);
+
+       /*
+        * Wake up any threads in rdma_init() or rdma_fini().
+        */
+       ep->com.wr_wait.done = 1;
+       ep->com.wr_wait.ret = -ECONNRESET;
+       wake_up(&ep->com.wr_wait.wait);
+
+       mutex_lock(&ep->com.mutex);
        switch (ep->com.state) {
        case CONNECTING:
                break;
@@ -1605,23 +1607,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
                connect_reply_upcall(ep, -ECONNRESET);
                break;
        case MPA_REP_SENT:
-               ep->com.rpl_done = 1;
-               ep->com.rpl_err = -ECONNRESET;
-               PDBG("waking up ep %p\n", ep);
-               wake_up(&ep->com.waitq);
                break;
        case MPA_REQ_RCVD:
-
-               /*
-                * We're gonna mark this puppy DEAD, but keep
-                * the reference on it until the ULP accepts or
-                * rejects the CR. Also wake up anyone waiting
-                * in rdma connection migration (see c4iw_accept_cr()).
-                */
-               ep->com.rpl_done = 1;
-               ep->com.rpl_err = -ECONNRESET;
-               PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
-               wake_up(&ep->com.waitq);
                break;
        case MORIBUND:
        case CLOSING:
@@ -1644,7 +1631,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
                break;
        case DEAD:
                PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
-               spin_unlock_irqrestore(&ep->com.lock, flags);
+               mutex_unlock(&ep->com.mutex);
                return 0;
        default:
                BUG_ON(1);
@@ -1655,7 +1642,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
                __state_set(&ep->com, DEAD);
                release = 1;
        }
-       spin_unlock_irqrestore(&ep->com.lock, flags);
+       mutex_unlock(&ep->com.mutex);
 
        rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
        if (!rpl_skb) {
@@ -1681,7 +1668,6 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
        struct c4iw_ep *ep;
        struct c4iw_qp_attributes attrs;
        struct cpl_close_con_rpl *rpl = cplhdr(skb);
-       unsigned long flags;
        int release = 0;
        struct tid_info *t = dev->rdev.lldi.tids;
        unsigned int tid = GET_TID(rpl);
@@ -1692,7 +1678,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
        BUG_ON(!ep);
 
        /* The cm_id may be null if we failed to connect */
-       spin_lock_irqsave(&ep->com.lock, flags);
+       mutex_lock(&ep->com.mutex);
        switch (ep->com.state) {
        case CLOSING:
                __state_set(&ep->com, MORIBUND);
@@ -1717,7 +1703,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
                BUG_ON(1);
                break;
        }
-       spin_unlock_irqrestore(&ep->com.lock, flags);
+       mutex_unlock(&ep->com.mutex);
        if (release)
                release_ep_resources(ep);
        return 0;
@@ -1725,23 +1711,24 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
 
 static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
 {
-       struct c4iw_ep *ep;
-       struct cpl_rdma_terminate *term = cplhdr(skb);
+       struct cpl_rdma_terminate *rpl = cplhdr(skb);
        struct tid_info *t = dev->rdev.lldi.tids;
-       unsigned int tid = GET_TID(term);
+       unsigned int tid = GET_TID(rpl);
+       struct c4iw_ep *ep;
+       struct c4iw_qp_attributes attrs;
 
        ep = lookup_tid(t, tid);
+       BUG_ON(!ep);
 
-       if (state_read(&ep->com) != FPDU_MODE)
-               return 0;
+       if (ep->com.qp) {
+               printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
+                      ep->com.qp->wq.sq.qid);
+               attrs.next_state = C4IW_QP_STATE_TERMINATE;
+               c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+                              C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+       } else
+               printk(KERN_WARNING MOD "TERM received tid %u no qp\n", tid);
 
-       PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
-       skb_pull(skb, sizeof *term);
-       PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
-       skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
-                                 skb->len);
-       ep->com.qp->attr.terminate_msg_len = skb->len;
-       ep->com.qp->attr.is_terminate_local = 0;
        return 0;
 }
 
@@ -1762,8 +1749,8 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
        ep = lookup_tid(t, tid);
        PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
        if (credits == 0) {
-               PDBG(KERN_ERR "%s 0 credit ack ep %p tid %u state %u\n",
-                       __func__, ep, ep->hwtid, state_read(&ep->com));
+               PDBG("%s 0 credit ack ep %p tid %u state %u\n",
+                    __func__, ep, ep->hwtid, state_read(&ep->com));
                return 0;
        }
 
@@ -2042,6 +2029,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
        }
 
        state_set(&ep->com, LISTEN);
+       c4iw_init_wr_wait(&ep->com.wr_wait);
        err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid,
                                  ep->com.local_addr.sin_addr.s_addr,
                                  ep->com.local_addr.sin_port,
@@ -2050,15 +2038,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
                goto fail3;
 
        /* wait for pass_open_rpl */
-       wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO);
-       if (ep->com.rpl_done)
-               err = ep->com.rpl_err;
-       else {
-               printk(KERN_ERR MOD "Device %s not responding!\n",
-                      pci_name(ep->com.dev->rdev.lldi.pdev));
-               ep->com.dev->rdev.flags = T4_FATAL_ERROR;
-               err = -EIO;
-       }
+       err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
+                                 __func__);
        if (!err) {
                cm_id->provider_data = ep;
                goto out;
@@ -2082,20 +2063,12 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
 
        might_sleep();
        state_set(&ep->com, DEAD);
-       ep->com.rpl_done = 0;
-       ep->com.rpl_err = 0;
+       c4iw_init_wr_wait(&ep->com.wr_wait);
        err = listen_stop(ep);
        if (err)
                goto done;
-       wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO);
-       if (ep->com.rpl_done)
-               err = ep->com.rpl_err;
-       else {
-               printk(KERN_ERR MOD "Device %s not responding!\n",
-                      pci_name(ep->com.dev->rdev.lldi.pdev));
-               ep->com.dev->rdev.flags = T4_FATAL_ERROR;
-               err = -EIO;
-       }
+       err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
+                                 __func__);
        cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
 done:
        cm_id->rem_ref(cm_id);
@@ -2106,12 +2079,11 @@ done:
 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
 {
        int ret = 0;
-       unsigned long flags;
        int close = 0;
        int fatal = 0;
        struct c4iw_rdev *rdev;
 
-       spin_lock_irqsave(&ep->com.lock, flags);
+       mutex_lock(&ep->com.mutex);
 
        PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
             states[ep->com.state], abrupt);
@@ -2158,7 +2130,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
                break;
        }
 
-       spin_unlock_irqrestore(&ep->com.lock, flags);
+       mutex_unlock(&ep->com.mutex);
        if (close) {
                if (abrupt)
                        ret = abort_connection(ep, NULL, gfp);
@@ -2172,6 +2144,13 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
        return ret;
 }
 
+static int async_event(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+       struct cpl_fw6_msg *rpl = cplhdr(skb);
+       c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
+       return 0;
+}
+
 /*
  * These are the real handlers that are called from a
  * work queue.
@@ -2190,7 +2169,8 @@ static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
        [CPL_ABORT_REQ_RSS] = peer_abort,
        [CPL_CLOSE_CON_RPL] = close_con_rpl,
        [CPL_RDMA_TERMINATE] = terminate,
-       [CPL_FW4_ACK] = fw4_ack
+       [CPL_FW4_ACK] = fw4_ack,
+       [CPL_FW6_MSG] = async_event
 };
 
 static void process_timeout(struct c4iw_ep *ep)
@@ -2198,7 +2178,7 @@ static void process_timeout(struct c4iw_ep *ep)
        struct c4iw_qp_attributes attrs;
        int abort = 1;
 
-       spin_lock_irq(&ep->com.lock);
+       mutex_lock(&ep->com.mutex);
        PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
             ep->com.state);
        switch (ep->com.state) {
@@ -2225,7 +2205,7 @@ static void process_timeout(struct c4iw_ep *ep)
                WARN_ON(1);
                abort = 0;
        }
-       spin_unlock_irq(&ep->com.lock);
+       mutex_unlock(&ep->com.mutex);
        if (abort)
                abort_connection(ep, NULL, GFP_KERNEL);
        c4iw_put_ep(&ep->com);
@@ -2309,6 +2289,7 @@ static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
                printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
                       "for tid %u\n", rpl->status, GET_TID(rpl));
        }
+       kfree_skb(skb);
        return 0;
 }
 
@@ -2323,20 +2304,25 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
        switch (rpl->type) {
        case 1:
                ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
-               wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
+               wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
                PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
                if (wr_waitp) {
-                       wr_waitp->ret = ret;
+                       if (ret)
+                               wr_waitp->ret = -ret;
+                       else
+                               wr_waitp->ret = 0;
                        wr_waitp->done = 1;
                        wake_up(&wr_waitp->wait);
                }
+               kfree_skb(skb);
                break;
        case 2:
-               c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
+               sched(dev, skb);
                break;
        default:
                printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
                       rpl->type);
+               kfree_skb(skb);
                break;
        }
        return 0;
index b3daf39eed4a0ac52e4d10e18f45cd233d76e969..8d8f8add6fcd93e71fd5b8931616554099bd7b00 100644 (file)
@@ -55,7 +55,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
                        V_FW_RI_RES_WR_NRES(1) |
                        FW_WR_COMPL(1));
        res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
-       res_wr->cookie = (u64)&wr_wait;
+       res_wr->cookie = (unsigned long) &wr_wait;
        res = res_wr->res;
        res->u.cq.restype = FW_RI_RES_TYPE_CQ;
        res->u.cq.op = FW_RI_RES_OP_RESET;
@@ -64,14 +64,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
        c4iw_init_wr_wait(&wr_wait);
        ret = c4iw_ofld_send(rdev, skb);
        if (!ret) {
-               wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
-               if (!wr_wait.done) {
-                       printk(KERN_ERR MOD "Device %s not responding!\n",
-                              pci_name(rdev->lldi.pdev));
-                       rdev->flags = T4_FATAL_ERROR;
-                       ret = -EIO;
-               } else
-                       ret = wr_wait.ret;
+               ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
        }
 
        kfree(cq->sw_queue);
@@ -132,7 +125,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
                        V_FW_RI_RES_WR_NRES(1) |
                        FW_WR_COMPL(1));
        res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
-       res_wr->cookie = (u64)&wr_wait;
+       res_wr->cookie = (unsigned long) &wr_wait;
        res = res_wr->res;
        res->u.cq.restype = FW_RI_RES_TYPE_CQ;
        res->u.cq.op = FW_RI_RES_OP_WRITE;
@@ -157,14 +150,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
        if (ret)
                goto err4;
        PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
-       wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
-       if (!wr_wait.done) {
-               printk(KERN_ERR MOD "Device %s not responding!\n",
-                      pci_name(rdev->lldi.pdev));
-               rdev->flags = T4_FATAL_ERROR;
-               ret = -EIO;
-       } else
-               ret = wr_wait.ret;
+       ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
        if (ret)
                goto err4;
 
@@ -476,6 +462,11 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
                goto proc_cqe;
        }
 
+       if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
+               ret = -EAGAIN;
+               goto skip_cqe;
+       }
+
        /*
         * RECV completion.
         */
@@ -696,6 +687,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
                case T4_ERR_MSN_RANGE:
                case T4_ERR_IRD_OVERFLOW:
                case T4_ERR_OPCODE:
+               case T4_ERR_INTERNAL_ERR:
                        wc->status = IB_WC_FATAL_ERR;
                        break;
                case T4_ERR_SWFLUSH:
index 9bbf491d5d9ee087e2081ef024e7ebdd0d2c23aa..54fbc1118abe0eb2e2e4592ab0724e1e72ca0f5a 100644 (file)
@@ -49,29 +49,33 @@ static DEFINE_MUTEX(dev_mutex);
 
 static struct dentry *c4iw_debugfs_root;
 
-struct debugfs_qp_data {
+struct c4iw_debugfs_data {
        struct c4iw_dev *devp;
        char *buf;
        int bufsize;
        int pos;
 };
 
-static int count_qps(int id, void *p, void *data)
+static int count_idrs(int id, void *p, void *data)
 {
-       struct c4iw_qp *qp = p;
        int *countp = data;
 
-       if (id != qp->wq.sq.qid)
-               return 0;
-
        *countp = *countp + 1;
        return 0;
 }
 
-static int dump_qps(int id, void *p, void *data)
+static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
+                           loff_t *ppos)
+{
+       struct c4iw_debugfs_data *d = file->private_data;
+
+       return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
+}
+
+static int dump_qp(int id, void *p, void *data)
 {
        struct c4iw_qp *qp = p;
-       struct debugfs_qp_data *qpd = data;
+       struct c4iw_debugfs_data *qpd = data;
        int space;
        int cc;
 
@@ -101,7 +105,7 @@ static int dump_qps(int id, void *p, void *data)
 
 static int qp_release(struct inode *inode, struct file *file)
 {
-       struct debugfs_qp_data *qpd = file->private_data;
+       struct c4iw_debugfs_data *qpd = file->private_data;
        if (!qpd) {
                printk(KERN_INFO "%s null qpd?\n", __func__);
                return 0;
@@ -113,7 +117,7 @@ static int qp_release(struct inode *inode, struct file *file)
 
 static int qp_open(struct inode *inode, struct file *file)
 {
-       struct debugfs_qp_data *qpd;
+       struct c4iw_debugfs_data *qpd;
        int ret = 0;
        int count = 1;
 
@@ -126,7 +130,7 @@ static int qp_open(struct inode *inode, struct file *file)
        qpd->pos = 0;
 
        spin_lock_irq(&qpd->devp->lock);
-       idr_for_each(&qpd->devp->qpidr, count_qps, &count);
+       idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
        spin_unlock_irq(&qpd->devp->lock);
 
        qpd->bufsize = count * 128;
@@ -137,7 +141,7 @@ static int qp_open(struct inode *inode, struct file *file)
        }
 
        spin_lock_irq(&qpd->devp->lock);
-       idr_for_each(&qpd->devp->qpidr, dump_qps, qpd);
+       idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
        spin_unlock_irq(&qpd->devp->lock);
 
        qpd->buf[qpd->pos++] = 0;
@@ -149,43 +153,86 @@ out:
        return ret;
 }
 
-static ssize_t qp_read(struct file *file, char __user *buf, size_t count,
-                       loff_t *ppos)
+static const struct file_operations qp_debugfs_fops = {
+       .owner   = THIS_MODULE,
+       .open    = qp_open,
+       .release = qp_release,
+       .read    = debugfs_read,
+       .llseek  = default_llseek,
+};
+
+static int dump_stag(int id, void *p, void *data)
 {
-       struct debugfs_qp_data *qpd = file->private_data;
-       loff_t pos = *ppos;
-       loff_t avail = qpd->pos;
+       struct c4iw_debugfs_data *stagd = data;
+       int space;
+       int cc;
 
-       if (pos < 0)
-               return -EINVAL;
-       if (pos >= avail)
+       space = stagd->bufsize - stagd->pos - 1;
+       if (space == 0)
+               return 1;
+
+       cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8);
+       if (cc < space)
+               stagd->pos += cc;
+       return 0;
+}
+
+static int stag_release(struct inode *inode, struct file *file)
+{
+       struct c4iw_debugfs_data *stagd = file->private_data;
+       if (!stagd) {
+               printk(KERN_INFO "%s null stagd?\n", __func__);
                return 0;
-       if (count > avail - pos)
-               count = avail - pos;
+       }
+       kfree(stagd->buf);
+       kfree(stagd);
+       return 0;
+}
 
-       while (count) {
-               size_t len = 0;
+static int stag_open(struct inode *inode, struct file *file)
+{
+       struct c4iw_debugfs_data *stagd;
+       int ret = 0;
+       int count = 1;
 
-               len = min((int)count, (int)qpd->pos - (int)pos);
-               if (copy_to_user(buf, qpd->buf + pos, len))
-                       return -EFAULT;
-               if (len == 0)
-                       return -EINVAL;
+       stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
+       if (!stagd) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       stagd->devp = inode->i_private;
+       stagd->pos = 0;
+
+       spin_lock_irq(&stagd->devp->lock);
+       idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
+       spin_unlock_irq(&stagd->devp->lock);
 
-               buf += len;
-               pos += len;
-               count -= len;
+       stagd->bufsize = count * sizeof("0x12345678\n");
+       stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL);
+       if (!stagd->buf) {
+               ret = -ENOMEM;
+               goto err1;
        }
-       count = pos - *ppos;
-       *ppos = pos;
-       return count;
+
+       spin_lock_irq(&stagd->devp->lock);
+       idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
+       spin_unlock_irq(&stagd->devp->lock);
+
+       stagd->buf[stagd->pos++] = 0;
+       file->private_data = stagd;
+       goto out;
+err1:
+       kfree(stagd);
+out:
+       return ret;
 }
 
-static const struct file_operations qp_debugfs_fops = {
+static const struct file_operations stag_debugfs_fops = {
        .owner   = THIS_MODULE,
-       .open    = qp_open,
-       .release = qp_release,
-       .read    = qp_read,
+       .open    = stag_open,
+       .release = stag_release,
+       .read    = debugfs_read,
+       .llseek  = default_llseek,
 };
 
 static int setup_debugfs(struct c4iw_dev *devp)
@@ -199,6 +246,11 @@ static int setup_debugfs(struct c4iw_dev *devp)
                                 (void *)devp, &qp_debugfs_fops);
        if (de && de->d_inode)
                de->d_inode->i_size = 4096;
+
+       de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root,
+                                (void *)devp, &stag_debugfs_fops);
+       if (de && de->d_inode)
+               de->d_inode->i_size = 4096;
        return 0;
 }
 
@@ -290,7 +342,14 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
                printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
                goto err3;
        }
+       err = c4iw_ocqp_pool_create(rdev);
+       if (err) {
+               printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
+               goto err4;
+       }
        return 0;
+err4:
+       c4iw_rqtpool_destroy(rdev);
 err3:
        c4iw_pblpool_destroy(rdev);
 err2:
@@ -317,6 +376,7 @@ static void c4iw_remove(struct c4iw_dev *dev)
        idr_destroy(&dev->cqidr);
        idr_destroy(&dev->qpidr);
        idr_destroy(&dev->mmidr);
+       iounmap(dev->rdev.oc_mw_kva);
        ib_dealloc_device(&dev->ibdev);
 }
 
@@ -332,6 +392,17 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
        }
        devp->rdev.lldi = *infop;
 
+       devp->rdev.oc_mw_pa = pci_resource_start(devp->rdev.lldi.pdev, 2) +
+               (pci_resource_len(devp->rdev.lldi.pdev, 2) -
+                roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size));
+       devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
+                                              devp->rdev.lldi.vr->ocq.size);
+
+       printk(KERN_INFO MOD "ocq memory: "
+              "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
+              devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
+              devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
+
        mutex_lock(&dev_mutex);
 
        ret = c4iw_rdev_open(&devp->rdev);
@@ -383,46 +454,6 @@ out:
        return dev;
 }
 
-static struct sk_buff *t4_pktgl_to_skb(const struct pkt_gl *gl,
-                                      unsigned int skb_len,
-                                      unsigned int pull_len)
-{
-       struct sk_buff *skb;
-       struct skb_shared_info *ssi;
-
-       if (gl->tot_len <= 512) {
-               skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
-               if (unlikely(!skb))
-                       goto out;
-               __skb_put(skb, gl->tot_len);
-               skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
-       } else {
-               skb = alloc_skb(skb_len, GFP_ATOMIC);
-               if (unlikely(!skb))
-                       goto out;
-               __skb_put(skb, pull_len);
-               skb_copy_to_linear_data(skb, gl->va, pull_len);
-
-               ssi = skb_shinfo(skb);
-               ssi->frags[0].page = gl->frags[0].page;
-               ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
-               ssi->frags[0].size = gl->frags[0].size - pull_len;
-               if (gl->nfrags > 1)
-                       memcpy(&ssi->frags[1], &gl->frags[1],
-                              (gl->nfrags - 1) * sizeof(skb_frag_t));
-               ssi->nr_frags = gl->nfrags;
-
-               skb->len = gl->tot_len;
-               skb->data_len = skb->len - pull_len;
-               skb->truesize += skb->data_len;
-
-               /* Get a reference for the last page, we don't own it */
-               get_page(gl->frags[gl->nfrags - 1].page);
-       }
-out:
-       return skb;
-}
-
 static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
                        const struct pkt_gl *gl)
 {
@@ -447,7 +478,7 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
                c4iw_ev_handler(dev, qid);
                return 0;
        } else {
-               skb = t4_pktgl_to_skb(gl, 128, 128);
+               skb = cxgb4_pktgl_to_skb(gl, 128, 128);
                if (unlikely(!skb))
                        goto nomem;
        }
index 491e76a0327ff7fe52597683aa45c2ea88a164d6..c13041a0aeba90b033b7b338e26ecc80efc42753 100644 (file)
@@ -60,7 +60,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
        if (qhp->attr.state == C4IW_QP_STATE_RTS) {
                attrs.next_state = C4IW_QP_STATE_TERMINATE;
                c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
-                              &attrs, 1);
+                              &attrs, 0);
        }
 
        event.event = ib_event;
index ed459b8f800fb65c0a9ab90b6dfeea6959d28e0c..16032cdb433779656f438b519a582bb710638c98 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/timer.h>
 #include <linux/io.h>
 #include <linux/kfifo.h>
+#include <linux/mutex.h>
 
 #include <asm/byteorder.h>
 
@@ -79,21 +80,6 @@ static inline void *cplhdr(struct sk_buff *skb)
        return skb->data;
 }
 
-#define C4IW_WR_TO (10*HZ)
-
-struct c4iw_wr_wait {
-       wait_queue_head_t wait;
-       int done;
-       int ret;
-};
-
-static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
-{
-       wr_waitp->ret = 0;
-       wr_waitp->done = 0;
-       init_waitqueue_head(&wr_waitp->wait);
-}
-
 struct c4iw_resource {
        struct kfifo tpt_fifo;
        spinlock_t tpt_fifo_lock;
@@ -127,8 +113,11 @@ struct c4iw_rdev {
        struct c4iw_dev_ucontext uctx;
        struct gen_pool *pbl_pool;
        struct gen_pool *rqt_pool;
+       struct gen_pool *ocqp_pool;
        u32 flags;
        struct cxgb4_lld_info lldi;
+       unsigned long oc_mw_pa;
+       void __iomem *oc_mw_kva;
 };
 
 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -141,6 +130,44 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
        return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
 }
 
+#define C4IW_WR_TO (10*HZ)
+
+struct c4iw_wr_wait {
+       wait_queue_head_t wait;
+       int done;
+       int ret;
+};
+
+static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
+{
+       wr_waitp->ret = 0;
+       wr_waitp->done = 0;
+       init_waitqueue_head(&wr_waitp->wait);
+}
+
+static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
+                                struct c4iw_wr_wait *wr_waitp,
+                                u32 hwtid, u32 qpid,
+                                const char *func)
+{
+       unsigned to = C4IW_WR_TO;
+       do {
+
+               wait_event_timeout(wr_waitp->wait, wr_waitp->done, to);
+               if (!wr_waitp->done) {
+                       printk(KERN_ERR MOD "%s - Device %s not responding - "
+                              "tid %u qpid %u\n", func,
+                              pci_name(rdev->lldi.pdev), hwtid, qpid);
+                       to = to << 2;
+               }
+       } while (!wr_waitp->done);
+       if (wr_waitp->ret)
+               printk(KERN_WARNING MOD "%s: FW reply %d tid %u qpid %u\n",
+                      pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
+       return wr_waitp->ret;
+}
+
+
 struct c4iw_dev {
        struct ib_device ibdev;
        struct c4iw_rdev rdev;
@@ -327,6 +354,7 @@ struct c4iw_qp {
        struct c4iw_qp_attributes attr;
        struct t4_wq wq;
        spinlock_t lock;
+       struct mutex mutex;
        atomic_t refcnt;
        wait_queue_head_t wait;
        struct timer_list timer;
@@ -579,12 +607,10 @@ struct c4iw_ep_common {
        struct c4iw_dev *dev;
        enum c4iw_ep_state state;
        struct kref kref;
-       spinlock_t lock;
+       struct mutex mutex;
        struct sockaddr_in local_addr;
        struct sockaddr_in remote_addr;
-       wait_queue_head_t waitq;
-       int rpl_done;
-       int rpl_err;
+       struct c4iw_wr_wait wr_wait;
        unsigned long flags;
 };
 
@@ -654,8 +680,10 @@ int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
 int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
 int c4iw_pblpool_create(struct c4iw_rdev *rdev);
 int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
+int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev);
 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
+void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
 void c4iw_destroy_resource(struct c4iw_resource *rscp);
 int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
 int c4iw_register_device(struct c4iw_dev *dev);
@@ -721,6 +749,8 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
+u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
+void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
 void c4iw_flush_hw_cq(struct t4_cq *cq);
 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
index 269373a62f228edb347bae6750e2680050e8f30e..273ffe49525a5d3d1642a292730af15a388ad9da 100644 (file)
@@ -71,7 +71,7 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
                if (i == (num_wqe-1)) {
                        req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
                                                    FW_WR_COMPL(1));
-                       req->wr.wr_lo = (__force __be64)&wr_wait;
+                       req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
                } else
                        req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR));
                req->wr.wr_mid = cpu_to_be32(
@@ -103,14 +103,7 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
                len -= C4IW_MAX_INLINE_SIZE;
        }
 
-       wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
-       if (!wr_wait.done) {
-               printk(KERN_ERR MOD "Device %s not responding!\n",
-                      pci_name(rdev->lldi.pdev));
-               rdev->flags = T4_FATAL_ERROR;
-               ret = -EIO;
-       } else
-               ret = wr_wait.ret;
+       ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
        return ret;
 }
 
index 8f645c83a125c1c9a1e3ad3ff400c3d90a0fec3a..f66dd8bf5128a0fd4f1a0acf686b4e4815813384 100644 (file)
@@ -54,9 +54,9 @@
 
 #include "iw_cxgb4.h"
 
-static int fastreg_support;
+static int fastreg_support = 1;
 module_param(fastreg_support, int, 0644);
-MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=0)");
+MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
 
 static int c4iw_modify_port(struct ib_device *ibdev,
                            u8 port, int port_modify_mask,
@@ -149,19 +149,28 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
        addr = mm->addr;
        kfree(mm);
 
-       if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
-           (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
-                      pci_resource_len(rdev->lldi.pdev, 2)))) {
+       if ((addr >= pci_resource_start(rdev->lldi.pdev, 0)) &&
+           (addr < (pci_resource_start(rdev->lldi.pdev, 0) +
+                   pci_resource_len(rdev->lldi.pdev, 0)))) {
 
                /*
-                * Map T4 DB register.
+                * MA_SYNC register...
                 */
-               if (vma->vm_flags & VM_READ)
-                       return -EPERM;
-
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-               vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
-               vma->vm_flags &= ~VM_MAYREAD;
+               ret = io_remap_pfn_range(vma, vma->vm_start,
+                                        addr >> PAGE_SHIFT,
+                                        len, vma->vm_page_prot);
+       } else if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
+                  (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
+                   pci_resource_len(rdev->lldi.pdev, 2)))) {
+
+               /*
+                * Map user DB or OCQP memory...
+                */
+               if (addr >= rdev->oc_mw_pa)
+                       vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
+               else
+                       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
                ret = io_remap_pfn_range(vma, vma->vm_start,
                                         addr >> PAGE_SHIFT,
                                         len, vma->vm_page_prot);
@@ -382,7 +391,17 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
 static int c4iw_get_mib(struct ib_device *ibdev,
                        union rdma_protocol_stats *stats)
 {
-       return -ENOSYS;
+       struct tp_tcp_stats v4, v6;
+       struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev);
+
+       cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6);
+       memset(stats, 0, sizeof *stats);
+       stats->iw.tcpInSegs = v4.tcpInSegs + v6.tcpInSegs;
+       stats->iw.tcpOutSegs = v4.tcpOutSegs + v6.tcpOutSegs;
+       stats->iw.tcpRetransSegs = v4.tcpRetransSegs + v6.tcpRetransSegs;
+       stats->iw.tcpOutRsts = v4.tcpOutRsts + v6.tcpOutSegs;
+
+       return 0;
 }
 
 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
@@ -472,6 +491,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
        dev->ibdev.post_send = c4iw_post_send;
        dev->ibdev.post_recv = c4iw_post_receive;
        dev->ibdev.get_protocol_stats = c4iw_get_mib;
+       dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
 
        dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
        if (!dev->ibdev.iwcm)
index 93f6e5bf0ec57ccc53d3ad68a2cde7103eda5931..057cb2505ea12ce00431ba3dd2f86fae59da12ea 100644 (file)
  */
 #include "iw_cxgb4.h"
 
+static int ocqp_support;
+module_param(ocqp_support, int, 0644);
+MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=0)");
+
+static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
+{
+       unsigned long flag;
+       spin_lock_irqsave(&qhp->lock, flag);
+       qhp->attr.state = state;
+       spin_unlock_irqrestore(&qhp->lock, flag);
+}
+
+static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+       c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
+}
+
+static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+       dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
+                         pci_unmap_addr(sq, mapping));
+}
+
+static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+       if (t4_sq_onchip(sq))
+               dealloc_oc_sq(rdev, sq);
+       else
+               dealloc_host_sq(rdev, sq);
+}
+
+static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+       if (!ocqp_support || !t4_ocqp_supported())
+               return -ENOSYS;
+       sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
+       if (!sq->dma_addr)
+               return -ENOMEM;
+       sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
+                       rdev->lldi.vr->ocq.start;
+       sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
+                                           rdev->lldi.vr->ocq.start);
+       sq->flags |= T4_SQ_ONCHIP;
+       return 0;
+}
+
+static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+       sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
+                                      &(sq->dma_addr), GFP_KERNEL);
+       if (!sq->queue)
+               return -ENOMEM;
+       sq->phys_addr = virt_to_phys(sq->queue);
+       pci_unmap_addr_set(sq, mapping, sq->dma_addr);
+       return 0;
+}
+
 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
                      struct c4iw_dev_ucontext *uctx)
 {
@@ -41,9 +98,7 @@ static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
        dma_free_coherent(&(rdev->lldi.pdev->dev),
                          wq->rq.memsize, wq->rq.queue,
                          dma_unmap_addr(&wq->rq, mapping));
-       dma_free_coherent(&(rdev->lldi.pdev->dev),
-                         wq->sq.memsize, wq->sq.queue,
-                         dma_unmap_addr(&wq->sq, mapping));
+       dealloc_sq(rdev, &wq->sq);
        c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
        kfree(wq->rq.sw_rq);
        kfree(wq->sq.sw_sq);
@@ -93,11 +148,12 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
        if (!wq->rq.rqt_hwaddr)
                goto err4;
 
-       wq->sq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
-                                         wq->sq.memsize, &(wq->sq.dma_addr),
-                                         GFP_KERNEL);
-       if (!wq->sq.queue)
-               goto err5;
+       if (user) {
+               if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
+                       goto err5;
+       } else
+               if (alloc_host_sq(rdev, &wq->sq))
+                       goto err5;
        memset(wq->sq.queue, 0, wq->sq.memsize);
        dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
 
@@ -144,7 +200,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
                        V_FW_RI_RES_WR_NRES(2) |
                        FW_WR_COMPL(1));
        res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
-       res_wr->cookie = (u64)&wr_wait;
+       res_wr->cookie = (unsigned long) &wr_wait;
        res = res_wr->res;
        res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
        res->u.sqrq.op = FW_RI_RES_OP_WRITE;
@@ -158,6 +214,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
                V_FW_RI_RES_WR_HOSTFCMODE(0) |  /* no host cidx updates */
                V_FW_RI_RES_WR_CPRIO(0) |       /* don't keep in chip cache */
                V_FW_RI_RES_WR_PCIECHN(0) |     /* set by uP at ri_init time */
+               t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0 |
                V_FW_RI_RES_WR_IQID(scq->cqid));
        res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
                V_FW_RI_RES_WR_DCAEN(0) |
@@ -198,14 +255,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
        ret = c4iw_ofld_send(rdev, skb);
        if (ret)
                goto err7;
-       wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
-       if (!wr_wait.done) {
-               printk(KERN_ERR MOD "Device %s not responding!\n",
-                      pci_name(rdev->lldi.pdev));
-               rdev->flags = T4_FATAL_ERROR;
-               ret = -EIO;
-       } else
-               ret = wr_wait.ret;
+       ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
        if (ret)
                goto err7;
 
@@ -219,9 +269,7 @@ err7:
                          wq->rq.memsize, wq->rq.queue,
                          dma_unmap_addr(&wq->rq, mapping));
 err6:
-       dma_free_coherent(&(rdev->lldi.pdev->dev),
-                         wq->sq.memsize, wq->sq.queue,
-                         dma_unmap_addr(&wq->sq, mapping));
+       dealloc_sq(rdev, &wq->sq);
 err5:
        c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
 err4:
@@ -263,6 +311,9 @@ static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
                        rem -= len;
                }
        }
+       len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
+       if (len)
+               memset(dstp, 0, len);
        immdp->op = FW_RI_DATA_IMMD;
        immdp->r1 = 0;
        immdp->r2 = 0;
@@ -292,6 +343,7 @@ static int build_isgl(__be64 *queue_start, __be64 *queue_end,
                if (++flitp == queue_end)
                        flitp = queue_start;
        }
+       *flitp = (__force __be64)0;
        isglp->op = FW_RI_DATA_ISGL;
        isglp->r1 = 0;
        isglp->nsge = cpu_to_be16(num_sge);
@@ -453,13 +505,15 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
        return 0;
 }
 
-static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
+                        struct ib_send_wr *wr, u8 *len16)
 {
 
        struct fw_ri_immd *imdp;
        __be64 *p;
        int i;
        int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
+       int rem;
 
        if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
                return -EINVAL;
@@ -474,32 +528,28 @@ static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
        wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
        wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
                                        0xffffffff);
-       if (pbllen > T4_MAX_FR_IMMD) {
-               struct c4iw_fr_page_list *c4pl =
-                               to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
-               struct fw_ri_dsgl *sglp;
-
-               sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
-               sglp->op = FW_RI_DATA_DSGL;
-               sglp->r1 = 0;
-               sglp->nsge = cpu_to_be16(1);
-               sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
-               sglp->len0 = cpu_to_be32(pbllen);
-
-               *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16);
-       } else {
-               imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
-               imdp->op = FW_RI_DATA_IMMD;
-               imdp->r1 = 0;
-               imdp->r2 = 0;
-               imdp->immdlen = cpu_to_be32(pbllen);
-               p = (__be64 *)(imdp + 1);
-               for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++)
-                       *p = cpu_to_be64(
-                               (u64)wr->wr.fast_reg.page_list->page_list[i]);
-               *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen,
-                                     16);
+       WARN_ON(pbllen > T4_MAX_FR_IMMD);
+       imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
+       imdp->op = FW_RI_DATA_IMMD;
+       imdp->r1 = 0;
+       imdp->r2 = 0;
+       imdp->immdlen = cpu_to_be32(pbllen);
+       p = (__be64 *)(imdp + 1);
+       rem = pbllen;
+       for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+               *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
+               rem -= sizeof *p;
+               if (++p == (__be64 *)&sq->queue[sq->size])
+                       p = (__be64 *)sq->queue;
        }
+       BUG_ON(rem < 0);
+       while (rem) {
+               *p = 0;
+               rem -= sizeof *p;
+               if (++p == (__be64 *)&sq->queue[sq->size])
+                       p = (__be64 *)sq->queue;
+       }
+       *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16);
        return 0;
 }
 
@@ -587,7 +637,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        fw_opcode = FW_RI_RDMA_READ_WR;
                        swsqe->opcode = FW_RI_READ_REQ;
                        if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
-                               fw_flags |= FW_RI_RDMA_READ_INVALIDATE;
+                               fw_flags = FW_RI_RDMA_READ_INVALIDATE;
                        else
                                fw_flags = 0;
                        err = build_rdma_read(wqe, wr, &len16);
@@ -600,7 +650,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                case IB_WR_FAST_REG_MR:
                        fw_opcode = FW_RI_FR_NSMR_WR;
                        swsqe->opcode = FW_RI_FAST_REGISTER;
-                       err = build_fastreg(wqe, wr, &len16);
+                       err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16);
                        break;
                case IB_WR_LOCAL_INV:
                        if (wr->send_flags & IB_SEND_FENCE)
@@ -905,46 +955,38 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
  * Assumes qhp lock is held.
  */
 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
-                      struct c4iw_cq *schp, unsigned long *flag)
+                      struct c4iw_cq *schp)
 {
        int count;
        int flushed;
+       unsigned long flag;
 
        PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
-       /* take a ref on the qhp since we must release the lock */
-       atomic_inc(&qhp->refcnt);
-       spin_unlock_irqrestore(&qhp->lock, *flag);
 
        /* locking hierarchy: cq lock first, then qp lock. */
-       spin_lock_irqsave(&rchp->lock, *flag);
+       spin_lock_irqsave(&rchp->lock, flag);
        spin_lock(&qhp->lock);
        c4iw_flush_hw_cq(&rchp->cq);
        c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
        flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
        spin_unlock(&qhp->lock);
-       spin_unlock_irqrestore(&rchp->lock, *flag);
+       spin_unlock_irqrestore(&rchp->lock, flag);
        if (flushed)
                (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
 
        /* locking hierarchy: cq lock first, then qp lock. */
-       spin_lock_irqsave(&schp->lock, *flag);
+       spin_lock_irqsave(&schp->lock, flag);
        spin_lock(&qhp->lock);
        c4iw_flush_hw_cq(&schp->cq);
        c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
        flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
        spin_unlock(&qhp->lock);
-       spin_unlock_irqrestore(&schp->lock, *flag);
+       spin_unlock_irqrestore(&schp->lock, flag);
        if (flushed)
                (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
-
-       /* deref */
-       if (atomic_dec_and_test(&qhp->refcnt))
-               wake_up(&qhp->wait);
-
-       spin_lock_irqsave(&qhp->lock, *flag);
 }
 
-static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
+static void flush_qp(struct c4iw_qp *qhp)
 {
        struct c4iw_cq *rchp, *schp;
 
@@ -958,7 +1000,7 @@ static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
                        t4_set_cq_in_error(&schp->cq);
                return;
        }
-       __flush_qp(qhp, rchp, schp, flag);
+       __flush_qp(qhp, rchp, schp);
 }
 
 static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
@@ -966,7 +1008,6 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
 {
        struct fw_ri_wr *wqe;
        int ret;
-       struct c4iw_wr_wait wr_wait;
        struct sk_buff *skb;
 
        PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
@@ -985,28 +1026,16 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
        wqe->flowid_len16 = cpu_to_be32(
                FW_WR_FLOWID(ep->hwtid) |
                FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
-       wqe->cookie = (u64)&wr_wait;
+       wqe->cookie = (unsigned long) &ep->com.wr_wait;
 
        wqe->u.fini.type = FW_RI_TYPE_FINI;
-       c4iw_init_wr_wait(&wr_wait);
+       c4iw_init_wr_wait(&ep->com.wr_wait);
        ret = c4iw_ofld_send(&rhp->rdev, skb);
        if (ret)
                goto out;
 
-       wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
-       if (!wr_wait.done) {
-               printk(KERN_ERR MOD "Device %s not responding!\n",
-                      pci_name(rhp->rdev.lldi.pdev));
-               rhp->rdev.flags = T4_FATAL_ERROR;
-               ret = -EIO;
-       } else {
-               ret = wr_wait.ret;
-               if (ret)
-                       printk(KERN_WARNING MOD
-                              "%s: Abnormal close qpid %d ret %u\n",
-                              pci_name(rhp->rdev.lldi.pdev), qhp->wq.sq.qid,
-                              ret);
-       }
+       ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
+                            qhp->wq.sq.qid, __func__);
 out:
        PDBG("%s ret %d\n", __func__, ret);
        return ret;
@@ -1040,7 +1069,6 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
 {
        struct fw_ri_wr *wqe;
        int ret;
-       struct c4iw_wr_wait wr_wait;
        struct sk_buff *skb;
 
        PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
@@ -1060,7 +1088,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
                FW_WR_FLOWID(qhp->ep->hwtid) |
                FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
 
-       wqe->cookie = (u64)&wr_wait;
+       wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait;
 
        wqe->u.init.type = FW_RI_TYPE_INIT;
        wqe->u.init.mpareqbit_p2ptype =
@@ -1097,19 +1125,13 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
        if (qhp->attr.mpa_attr.initiator)
                build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
 
-       c4iw_init_wr_wait(&wr_wait);
+       c4iw_init_wr_wait(&qhp->ep->com.wr_wait);
        ret = c4iw_ofld_send(&rhp->rdev, skb);
        if (ret)
                goto out;
 
-       wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
-       if (!wr_wait.done) {
-               printk(KERN_ERR MOD "Device %s not responding!\n",
-                      pci_name(rhp->rdev.lldi.pdev));
-               rhp->rdev.flags = T4_FATAL_ERROR;
-               ret = -EIO;
-       } else
-               ret = wr_wait.ret;
+       ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
+                                 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
 out:
        PDBG("%s ret %d\n", __func__, ret);
        return ret;
@@ -1122,7 +1144,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
 {
        int ret = 0;
        struct c4iw_qp_attributes newattr = qhp->attr;
-       unsigned long flag;
        int disconnect = 0;
        int terminate = 0;
        int abort = 0;
@@ -1133,7 +1154,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
             qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
             (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
 
-       spin_lock_irqsave(&qhp->lock, flag);
+       mutex_lock(&qhp->mutex);
 
        /* Process attr changes if in IDLE */
        if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
@@ -1184,7 +1205,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                        qhp->attr.mpa_attr = attrs->mpa_attr;
                        qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
                        qhp->ep = qhp->attr.llp_stream_handle;
-                       qhp->attr.state = C4IW_QP_STATE_RTS;
+                       set_state(qhp, C4IW_QP_STATE_RTS);
 
                        /*
                         * Ref the endpoint here and deref when we
@@ -1193,15 +1214,13 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                         * transition.
                         */
                        c4iw_get_ep(&qhp->ep->com);
-                       spin_unlock_irqrestore(&qhp->lock, flag);
                        ret = rdma_init(rhp, qhp);
-                       spin_lock_irqsave(&qhp->lock, flag);
                        if (ret)
                                goto err;
                        break;
                case C4IW_QP_STATE_ERROR:
-                       qhp->attr.state = C4IW_QP_STATE_ERROR;
-                       flush_qp(qhp, &flag);
+                       set_state(qhp, C4IW_QP_STATE_ERROR);
+                       flush_qp(qhp);
                        break;
                default:
                        ret = -EINVAL;
@@ -1212,38 +1231,38 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                switch (attrs->next_state) {
                case C4IW_QP_STATE_CLOSING:
                        BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
-                       qhp->attr.state = C4IW_QP_STATE_CLOSING;
+                       set_state(qhp, C4IW_QP_STATE_CLOSING);
                        ep = qhp->ep;
                        if (!internal) {
                                abort = 0;
                                disconnect = 1;
-                               c4iw_get_ep(&ep->com);
+                               c4iw_get_ep(&qhp->ep->com);
                        }
-                       spin_unlock_irqrestore(&qhp->lock, flag);
                        ret = rdma_fini(rhp, qhp, ep);
-                       spin_lock_irqsave(&qhp->lock, flag);
                        if (ret) {
-                               c4iw_get_ep(&ep->com);
+                               if (internal)
+                                       c4iw_get_ep(&qhp->ep->com);
                                disconnect = abort = 1;
                                goto err;
                        }
                        break;
                case C4IW_QP_STATE_TERMINATE:
-                       qhp->attr.state = C4IW_QP_STATE_TERMINATE;
+                       set_state(qhp, C4IW_QP_STATE_TERMINATE);
                        if (qhp->ibqp.uobject)
                                t4_set_wq_in_error(&qhp->wq);
                        ep = qhp->ep;
-                       c4iw_get_ep(&ep->com);
-                       terminate = 1;
+                       if (!internal)
+                               terminate = 1;
                        disconnect = 1;
+                       c4iw_get_ep(&qhp->ep->com);
                        break;
                case C4IW_QP_STATE_ERROR:
-                       qhp->attr.state = C4IW_QP_STATE_ERROR;
+                       set_state(qhp, C4IW_QP_STATE_ERROR);
                        if (!internal) {
                                abort = 1;
                                disconnect = 1;
                                ep = qhp->ep;
-                               c4iw_get_ep(&ep->com);
+                               c4iw_get_ep(&qhp->ep->com);
                        }
                        goto err;
                        break;
@@ -1259,8 +1278,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                }
                switch (attrs->next_state) {
                case C4IW_QP_STATE_IDLE:
-                       flush_qp(qhp, &flag);
-                       qhp->attr.state = C4IW_QP_STATE_IDLE;
+                       flush_qp(qhp);
+                       set_state(qhp, C4IW_QP_STATE_IDLE);
                        qhp->attr.llp_stream_handle = NULL;
                        c4iw_put_ep(&qhp->ep->com);
                        qhp->ep = NULL;
@@ -1282,7 +1301,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                        ret = -EINVAL;
                        goto out;
                }
-               qhp->attr.state = C4IW_QP_STATE_IDLE;
+               set_state(qhp, C4IW_QP_STATE_IDLE);
                break;
        case C4IW_QP_STATE_TERMINATE:
                if (!internal) {
@@ -1305,15 +1324,16 @@ err:
 
        /* disassociate the LLP connection */
        qhp->attr.llp_stream_handle = NULL;
-       ep = qhp->ep;
+       if (!ep)
+               ep = qhp->ep;
        qhp->ep = NULL;
-       qhp->attr.state = C4IW_QP_STATE_ERROR;
+       set_state(qhp, C4IW_QP_STATE_ERROR);
        free = 1;
        wake_up(&qhp->wait);
        BUG_ON(!ep);
-       flush_qp(qhp, &flag);
+       flush_qp(qhp);
 out:
-       spin_unlock_irqrestore(&qhp->lock, flag);
+       mutex_unlock(&qhp->mutex);
 
        if (terminate)
                post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
@@ -1335,7 +1355,6 @@ out:
         */
        if (free)
                c4iw_put_ep(&ep->com);
-
        PDBG("%s exit state %d\n", __func__, qhp->attr.state);
        return ret;
 }
@@ -1380,7 +1399,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        int sqsize, rqsize;
        struct c4iw_ucontext *ucontext;
        int ret;
-       struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4;
+       struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
 
        PDBG("%s ib_pd %p\n", __func__, pd);
 
@@ -1450,6 +1469,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        qhp->attr.max_ord = 1;
        qhp->attr.max_ird = 1;
        spin_lock_init(&qhp->lock);
+       mutex_init(&qhp->mutex);
        init_waitqueue_head(&qhp->wait);
        atomic_set(&qhp->refcnt, 1);
 
@@ -1478,7 +1498,15 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
                        ret = -ENOMEM;
                        goto err6;
                }
-
+               if (t4_sq_onchip(&qhp->wq.sq)) {
+                       mm5 = kmalloc(sizeof *mm5, GFP_KERNEL);
+                       if (!mm5) {
+                               ret = -ENOMEM;
+                               goto err7;
+                       }
+                       uresp.flags = C4IW_QPF_ONCHIP;
+               } else
+                       uresp.flags = 0;
                uresp.qid_mask = rhp->rdev.qpmask;
                uresp.sqid = qhp->wq.sq.qid;
                uresp.sq_size = qhp->wq.sq.size;
@@ -1487,6 +1515,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
                uresp.rq_size = qhp->wq.rq.size;
                uresp.rq_memsize = qhp->wq.rq.memsize;
                spin_lock(&ucontext->mmap_lock);
+               if (mm5) {
+                       uresp.ma_sync_key = ucontext->key;
+                       ucontext->key += PAGE_SIZE;
+               }
                uresp.sq_key = ucontext->key;
                ucontext->key += PAGE_SIZE;
                uresp.rq_key = ucontext->key;
@@ -1498,9 +1530,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
                spin_unlock(&ucontext->mmap_lock);
                ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
                if (ret)
-                       goto err7;
+                       goto err8;
                mm1->key = uresp.sq_key;
-               mm1->addr = virt_to_phys(qhp->wq.sq.queue);
+               mm1->addr = qhp->wq.sq.phys_addr;
                mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
                insert_mmap(ucontext, mm1);
                mm2->key = uresp.rq_key;
@@ -1515,6 +1547,13 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
                mm4->addr = qhp->wq.rq.udb;
                mm4->len = PAGE_SIZE;
                insert_mmap(ucontext, mm4);
+               if (mm5) {
+                       mm5->key = uresp.ma_sync_key;
+                       mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
+                                   + A_PCIE_MA_SYNC) & PAGE_MASK;
+                       mm5->len = PAGE_SIZE;
+                       insert_mmap(ucontext, mm5);
+               }
        }
        qhp->ibqp.qp_num = qhp->wq.sq.qid;
        init_timer(&(qhp->timer));
@@ -1522,6 +1561,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
             __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
             qhp->wq.sq.qid);
        return &qhp->ibqp;
+err8:
+       kfree(mm5);
 err7:
        kfree(mm4);
 err6:
index 83b23dfa250dea9171ef44a9e37fa3fc7efe3698..4fb50d58b4934a18fb298e3ef542a7c2f424dab5 100644 (file)
@@ -311,6 +311,9 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
 {
        unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
        PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
+       if (!addr && printk_ratelimit())
+               printk(KERN_WARNING MOD "%s: Out of PBL memory\n",
+                      pci_name(rdev->lldi.pdev));
        return (u32)addr;
 }
 
@@ -370,6 +373,9 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
 {
        unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
        PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
+       if (!addr && printk_ratelimit())
+               printk(KERN_WARNING MOD "%s: Out of RQT memory\n",
+                      pci_name(rdev->lldi.pdev));
        return (u32)addr;
 }
 
@@ -416,3 +422,59 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
 {
        gen_pool_destroy(rdev->rqt_pool);
 }
+
+/*
+ * On-Chip QP Memory.
+ */
+#define MIN_OCQP_SHIFT 12      /* 4KB == min ocqp size */
+
+u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
+{
+       unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
+       PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
+       return (u32)addr;
+}
+
+void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
+{
+       PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
+       gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size);
+}
+
+int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
+{
+       unsigned start, chunk, top;
+
+       rdev->ocqp_pool = gen_pool_create(MIN_OCQP_SHIFT, -1);
+       if (!rdev->ocqp_pool)
+               return -ENOMEM;
+
+       start = rdev->lldi.vr->ocq.start;
+       chunk = rdev->lldi.vr->ocq.size;
+       top = start + chunk;
+
+       while (start < top) {
+               chunk = min(top - start + 1, chunk);
+               if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) {
+                       PDBG("%s failed to add OCQP chunk (%x/%x)\n",
+                            __func__, start, chunk);
+                       if (chunk <= 1024 << MIN_OCQP_SHIFT) {
+                               printk(KERN_WARNING MOD
+                                      "Failed to add all OCQP chunks (%x/%x)\n",
+                                      start, top - start);
+                               return 0;
+                       }
+                       chunk >>= 1;
+               } else {
+                       PDBG("%s added OCQP chunk (%x/%x)\n",
+                            __func__, start, chunk);
+                       start += chunk;
+               }
+       }
+       return 0;
+}
+
+void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev)
+{
+       gen_pool_destroy(rdev->ocqp_pool);
+}
index 24f369046ef3cc772a84ca54082b8b8d945916f8..70004425d695b0d765a3305406dc3ac5c0970d44 100644 (file)
@@ -52,6 +52,7 @@
 #define T4_STAG_UNSET 0xffffffff
 #define T4_FW_MAJ 0
 #define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
+#define A_PCIE_MA_SYNC 0x30b4
 
 struct t4_status_page {
        __be32 rsvd1;   /* flit 0 - hw owns */
@@ -65,7 +66,7 @@ struct t4_status_page {
 
 #define T4_EQ_ENTRY_SIZE 64
 
-#define T4_SQ_NUM_SLOTS 4
+#define T4_SQ_NUM_SLOTS 5
 #define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS)
 #define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
                        sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
@@ -78,7 +79,7 @@ struct t4_status_page {
                        sizeof(struct fw_ri_rdma_write_wr) - \
                        sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
 #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
-                       sizeof(struct fw_ri_immd)))
+                       sizeof(struct fw_ri_immd)) & ~31UL)
 #define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
 
 #define T4_RQ_NUM_SLOTS 2
@@ -266,10 +267,36 @@ struct t4_swsqe {
        u16                     idx;
 };
 
+static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
+{
+#if defined(__i386__) || defined(__x86_64__)
+       return pgprot_writecombine(prot);
+#elif defined(CONFIG_PPC64)
+       return __pgprot((pgprot_val(prot) | _PAGE_NO_CACHE) &
+                       ~(pgprot_t)_PAGE_GUARDED);
+#else
+       return pgprot_noncached(prot);
+#endif
+}
+
+static inline int t4_ocqp_supported(void)
+{
+#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
+       return 1;
+#else
+       return 0;
+#endif
+}
+
+enum {
+       T4_SQ_ONCHIP = (1<<0),
+};
+
 struct t4_sq {
        union t4_wr *queue;
        dma_addr_t dma_addr;
        DEFINE_DMA_UNMAP_ADDR(mapping);
+       unsigned long phys_addr;
        struct t4_swsqe *sw_sq;
        struct t4_swsqe *oldest_read;
        u64 udb;
@@ -280,6 +307,7 @@ struct t4_sq {
        u16 cidx;
        u16 pidx;
        u16 wq_pidx;
+       u16 flags;
 };
 
 struct t4_swrqe {
@@ -350,6 +378,11 @@ static inline void t4_rq_consume(struct t4_wq *wq)
                wq->rq.cidx = 0;
 }
 
+static inline int t4_sq_onchip(struct t4_sq *sq)
+{
+       return sq->flags & T4_SQ_ONCHIP;
+}
+
 static inline int t4_sq_empty(struct t4_wq *wq)
 {
        return wq->sq.in_use == 0;
@@ -396,30 +429,27 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc)
 
 static inline int t4_wq_in_error(struct t4_wq *wq)
 {
-       return wq->sq.queue[wq->sq.size].status.qp_err;
+       return wq->rq.queue[wq->rq.size].status.qp_err;
 }
 
 static inline void t4_set_wq_in_error(struct t4_wq *wq)
 {
-       wq->sq.queue[wq->sq.size].status.qp_err = 1;
        wq->rq.queue[wq->rq.size].status.qp_err = 1;
 }
 
 static inline void t4_disable_wq_db(struct t4_wq *wq)
 {
-       wq->sq.queue[wq->sq.size].status.db_off = 1;
        wq->rq.queue[wq->rq.size].status.db_off = 1;
 }
 
 static inline void t4_enable_wq_db(struct t4_wq *wq)
 {
-       wq->sq.queue[wq->sq.size].status.db_off = 0;
        wq->rq.queue[wq->rq.size].status.db_off = 0;
 }
 
 static inline int t4_wq_db_enabled(struct t4_wq *wq)
 {
-       return !wq->sq.queue[wq->sq.size].status.db_off;
+       return !wq->rq.queue[wq->rq.size].status.db_off;
 }
 
 struct t4_cq {
index ed6414abde024c322585530bbb71b0b45d44c433..e6669d54770e8ce034541f1c5e7d314d8274070f 100644 (file)
@@ -50,7 +50,13 @@ struct c4iw_create_cq_resp {
        __u32 qid_mask;
 };
 
+
+enum {
+       C4IW_QPF_ONCHIP = (1<<0)
+};
+
 struct c4iw_create_qp_resp {
+       __u64 ma_sync_key;
        __u64 sq_key;
        __u64 rq_key;
        __u64 sq_db_gts_key;
@@ -62,5 +68,6 @@ struct c4iw_create_qp_resp {
        __u32 sq_size;
        __u32 rq_size;
        __u32 qid_mask;
+       __u32 flags;
 };
 #endif
index 53f4cd4fc19a33d26da6e58d8fb0d4d5a41930e8..43cae84005f0a5bb7d80b440eb0e34e1e6a2500e 100644 (file)
@@ -171,7 +171,7 @@ struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
                }
 
                ret = ehca_reg_maxmr(shca, e_maxmr,
-                                    (void *)ehca_map_vaddr((void *)KERNELBASE),
+                                    (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)),
                                     mr_access_flags, e_pd,
                                     &e_maxmr->ib.ib_mr.lkey,
                                     &e_maxmr->ib.ib_mr.rkey);
@@ -1636,7 +1636,7 @@ int ehca_reg_internal_maxmr(
 
        /* register internal max-MR on HCA */
        size_maxmr = ehca_mr_len;
-       iova_start = (u64 *)ehca_map_vaddr((void *)KERNELBASE);
+       iova_start = (u64 *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START));
        ib_pbuf.addr = 0;
        ib_pbuf.size = size_maxmr;
        num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
@@ -2209,7 +2209,7 @@ int ehca_mr_is_maxmr(u64 size,
 {
        /* a MR is treated as max-MR only if it fits following: */
        if ((size == ehca_mr_len) &&
-           (iova_start == (void *)ehca_map_vaddr((void *)KERNELBASE))) {
+           (iova_start == (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)))) {
                ehca_gen_dbg("this is a max-MR");
                return 1;
        } else
index fa3df82681dfb9b695232bca88434d17948b4027..4496f2820c92d98197e3fcd69b4b938165215163 100644 (file)
@@ -1,4 +1,4 @@
-EXTRA_CFLAGS += -DIPATH_IDSTR='"QLogic kernel.org driver"' \
+ccflags-y := -DIPATH_IDSTR='"QLogic kernel.org driver"' \
        -DIPATH_KERN_TYPE=0
 
 obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
index d13e72685dcfa17388c274a62df80f88f68b558f..12d5bf76302cc0eff403b5538607350f4e554e42 100644 (file)
@@ -57,6 +57,7 @@ static int ipathfs_mknod(struct inode *dir, struct dentry *dentry,
                goto bail;
        }
 
+       inode->i_ino = get_next_ino();
        inode->i_mode = mode;
        inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
        inode->i_private = data;
index 11a236f8d884a4902b6243139eddd6da6c98c76a..4b8f9c49397e2c23cd9471ad854936ce1af8ed46 100644 (file)
  * SOFTWARE.
  */
 
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+
 #include <linux/slab.h>
+#include <linux/inet.h>
+#include <linux/string.h>
 
 #include "mlx4_ib.h"
 
-struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
+                       u8 *mac, int *is_mcast, u8 port)
 {
-       struct mlx4_dev *dev = to_mdev(pd->device)->dev;
-       struct mlx4_ib_ah *ah;
+       struct in6_addr in6;
 
-       ah = kmalloc(sizeof *ah, GFP_ATOMIC);
-       if (!ah)
-               return ERR_PTR(-ENOMEM);
+       *is_mcast = 0;
 
-       memset(&ah->av, 0, sizeof ah->av);
+       memcpy(&in6, ah_attr->grh.dgid.raw, sizeof in6);
+       if (rdma_link_local_addr(&in6))
+               rdma_get_ll_mac(&in6, mac);
+       else if (rdma_is_multicast_addr(&in6)) {
+               rdma_get_mcast_mac(&in6, mac);
+               *is_mcast = 1;
+       } else
+               return -EINVAL;
 
-       ah->av.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
-       ah->av.g_slid  = ah_attr->src_path_bits;
-       ah->av.dlid    = cpu_to_be16(ah_attr->dlid);
-       if (ah_attr->static_rate) {
-               ah->av.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
-               while (ah->av.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
-                      !(1 << ah->av.stat_rate & dev->caps.stat_rate_support))
-                       --ah->av.stat_rate;
-       }
-       ah->av.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
+       return 0;
+}
+
+static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+                                 struct mlx4_ib_ah *ah)
+{
+       struct mlx4_dev *dev = to_mdev(pd->device)->dev;
+
+       ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
+       ah->av.ib.g_slid  = ah_attr->src_path_bits;
        if (ah_attr->ah_flags & IB_AH_GRH) {
-               ah->av.g_slid   |= 0x80;
-               ah->av.gid_index = ah_attr->grh.sgid_index;
-               ah->av.hop_limit = ah_attr->grh.hop_limit;
-               ah->av.sl_tclass_flowlabel |=
+               ah->av.ib.g_slid   |= 0x80;
+               ah->av.ib.gid_index = ah_attr->grh.sgid_index;
+               ah->av.ib.hop_limit = ah_attr->grh.hop_limit;
+               ah->av.ib.sl_tclass_flowlabel |=
                        cpu_to_be32((ah_attr->grh.traffic_class << 20) |
                                    ah_attr->grh.flow_label);
-               memcpy(ah->av.dgid, ah_attr->grh.dgid.raw, 16);
+               memcpy(ah->av.ib.dgid, ah_attr->grh.dgid.raw, 16);
+       }
+
+       ah->av.ib.dlid    = cpu_to_be16(ah_attr->dlid);
+       if (ah_attr->static_rate) {
+               ah->av.ib.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
+               while (ah->av.ib.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
+                      !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
+                       --ah->av.ib.stat_rate;
        }
+       ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
 
        return &ah->ibah;
 }
 
+static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+                                   struct mlx4_ib_ah *ah)
+{
+       struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
+       struct mlx4_dev *dev = ibdev->dev;
+       union ib_gid sgid;
+       u8 mac[6];
+       int err;
+       int is_mcast;
+       u16 vlan_tag;
+
+       err = mlx4_ib_resolve_grh(ibdev, ah_attr, mac, &is_mcast, ah_attr->port_num);
+       if (err)
+               return ERR_PTR(err);
+
+       memcpy(ah->av.eth.mac, mac, 6);
+       err = ib_get_cached_gid(pd->device, ah_attr->port_num, ah_attr->grh.sgid_index, &sgid);
+       if (err)
+               return ERR_PTR(err);
+       vlan_tag = rdma_get_vlan_id(&sgid);
+       if (vlan_tag < 0x1000)
+               vlan_tag |= (ah_attr->sl & 7) << 13;
+       ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
+       ah->av.eth.gid_index = ah_attr->grh.sgid_index;
+       ah->av.eth.vlan = cpu_to_be16(vlan_tag);
+       if (ah_attr->static_rate) {
+               ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
+               while (ah->av.eth.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
+                      !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support))
+                       --ah->av.eth.stat_rate;
+       }
+
+       /*
+        * HW requires multicast LID so we just choose one.
+        */
+       if (is_mcast)
+               ah->av.ib.dlid = cpu_to_be16(0xc000);
+
+       memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
+       ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
+
+       return &ah->ibah;
+}
+
+struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+{
+       struct mlx4_ib_ah *ah;
+       struct ib_ah *ret;
+
+       ah = kzalloc(sizeof *ah, GFP_ATOMIC);
+       if (!ah)
+               return ERR_PTR(-ENOMEM);
+
+       if (rdma_port_get_link_layer(pd->device, ah_attr->port_num) == IB_LINK_LAYER_ETHERNET) {
+               if (!(ah_attr->ah_flags & IB_AH_GRH)) {
+                       ret = ERR_PTR(-EINVAL);
+               } else {
+                       /*
+                        * TBD: need to handle the case when we get
+                        * called in an atomic context and there we
+                        * might sleep.  We don't expect this
+                        * currently since we're working with link
+                        * local addresses which we can translate
+                        * without going to sleep.
+                        */
+                       ret = create_iboe_ah(pd, ah_attr, ah);
+               }
+
+               if (IS_ERR(ret))
+                       kfree(ah);
+
+               return ret;
+       } else
+               return create_ib_ah(pd, ah_attr, ah); /* never fails */
+}
+
 int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
 {
        struct mlx4_ib_ah *ah = to_mah(ibah);
+       enum rdma_link_layer ll;
 
        memset(ah_attr, 0, sizeof *ah_attr);
-       ah_attr->dlid          = be16_to_cpu(ah->av.dlid);
-       ah_attr->sl            = be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28;
-       ah_attr->port_num      = be32_to_cpu(ah->av.port_pd) >> 24;
-       if (ah->av.stat_rate)
-               ah_attr->static_rate = ah->av.stat_rate - MLX4_STAT_RATE_OFFSET;
-       ah_attr->src_path_bits = ah->av.g_slid & 0x7F;
+       ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
+       ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24;
+       ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num);
+       ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0;
+       if (ah->av.ib.stat_rate)
+               ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET;
+       ah_attr->src_path_bits = ah->av.ib.g_slid & 0x7F;
 
        if (mlx4_ib_ah_grh_present(ah)) {
                ah_attr->ah_flags = IB_AH_GRH;
 
                ah_attr->grh.traffic_class =
-                       be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 20;
+                       be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20;
                ah_attr->grh.flow_label =
-                       be32_to_cpu(ah->av.sl_tclass_flowlabel) & 0xfffff;
-               ah_attr->grh.hop_limit  = ah->av.hop_limit;
-               ah_attr->grh.sgid_index = ah->av.gid_index;
-               memcpy(ah_attr->grh.dgid.raw, ah->av.dgid, 16);
+                       be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) & 0xfffff;
+               ah_attr->grh.hop_limit  = ah->av.ib.hop_limit;
+               ah_attr->grh.sgid_index = ah->av.ib.gid_index;
+               memcpy(ah_attr->grh.dgid.raw, ah->av.ib.dgid, 16);
        }
 
        return 0;
index f38d5b1189273d8fb320374d06c3c3f044d7cec2..c9a8dd63b9e24b757eb5813f18d8fda08a89d988 100644 (file)
@@ -311,19 +311,25 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
        struct ib_mad_agent *agent;
        int p, q;
        int ret;
+       enum rdma_link_layer ll;
 
-       for (p = 0; p < dev->num_ports; ++p)
+       for (p = 0; p < dev->num_ports; ++p) {
+               ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
                for (q = 0; q <= 1; ++q) {
-                       agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
-                                                     q ? IB_QPT_GSI : IB_QPT_SMI,
-                                                     NULL, 0, send_handler,
-                                                     NULL, NULL);
-                       if (IS_ERR(agent)) {
-                               ret = PTR_ERR(agent);
-                               goto err;
-                       }
-                       dev->send_agent[p][q] = agent;
+                       if (ll == IB_LINK_LAYER_INFINIBAND) {
+                               agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
+                                                             q ? IB_QPT_GSI : IB_QPT_SMI,
+                                                             NULL, 0, send_handler,
+                                                             NULL, NULL);
+                               if (IS_ERR(agent)) {
+                                       ret = PTR_ERR(agent);
+                                       goto err;
+                               }
+                               dev->send_agent[p][q] = agent;
+                       } else
+                               dev->send_agent[p][q] = NULL;
                }
+       }
 
        return 0;
 
@@ -344,8 +350,10 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
        for (p = 0; p < dev->num_ports; ++p) {
                for (q = 0; q <= 1; ++q) {
                        agent = dev->send_agent[p][q];
-                       dev->send_agent[p][q] = NULL;
-                       ib_unregister_mad_agent(agent);
+                       if (agent) {
+                               dev->send_agent[p][q] = NULL;
+                               ib_unregister_mad_agent(agent);
+                       }
                }
 
                if (dev->sm_ah[p])
index 4e94e360e43b42ed1f31685bdcb8b7387bc1b46e..bf3e20cd029859f2beb23e962fa2640e30faf44c 100644 (file)
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
 
 #include <rdma/ib_smi.h>
 #include <rdma/ib_user_verbs.h>
+#include <rdma/ib_addr.h>
 
 #include <linux/mlx4/driver.h>
 #include <linux/mlx4/cmd.h>
@@ -58,6 +63,15 @@ static const char mlx4_ib_version[] =
        DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
        DRV_VERSION " (" DRV_RELDATE ")\n";
 
+struct update_gid_work {
+       struct work_struct      work;
+       union ib_gid            gids[128];
+       struct mlx4_ib_dev     *dev;
+       int                     port;
+};
+
+static struct workqueue_struct *wq;
+
 static void init_query_mad(struct ib_smp *mad)
 {
        mad->base_version  = 1;
@@ -66,6 +80,8 @@ static void init_query_mad(struct ib_smp *mad)
        mad->method        = IB_MGMT_METHOD_GET;
 }
 
+static union ib_gid zgid;
+
 static int mlx4_ib_query_device(struct ib_device *ibdev,
                                struct ib_device_attr *props)
 {
@@ -135,7 +151,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
        props->max_srq             = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
        props->max_srq_wr          = dev->dev->caps.max_srq_wqes - 1;
        props->max_srq_sge         = dev->dev->caps.max_srq_sge;
-       props->max_fast_reg_page_list_len = PAGE_SIZE / sizeof (u64);
+       props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
        props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
        props->atomic_cap          = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
                IB_ATOMIC_HCA : IB_ATOMIC_NONE;
@@ -154,28 +170,19 @@ out:
        return err;
 }
 
-static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
-                             struct ib_port_attr *props)
+static enum rdma_link_layer
+mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
 {
-       struct ib_smp *in_mad  = NULL;
-       struct ib_smp *out_mad = NULL;
-       int err = -ENOMEM;
-
-       in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
-       out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
-       if (!in_mad || !out_mad)
-               goto out;
-
-       memset(props, 0, sizeof *props);
-
-       init_query_mad(in_mad);
-       in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
-       in_mad->attr_mod = cpu_to_be32(port);
+       struct mlx4_dev *dev = to_mdev(device)->dev;
 
-       err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
-       if (err)
-               goto out;
+       return dev->caps.port_mask & (1 << (port_num - 1)) ?
+               IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
+}
 
+static int ib_link_query_port(struct ib_device *ibdev, u8 port,
+                             struct ib_port_attr *props,
+                             struct ib_smp *out_mad)
+{
        props->lid              = be16_to_cpup((__be16 *) (out_mad->data + 16));
        props->lmc              = out_mad->data[34] & 0x7;
        props->sm_lid           = be16_to_cpup((__be16 *) (out_mad->data + 18));
@@ -196,6 +203,80 @@ static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
        props->max_vl_num       = out_mad->data[37] >> 4;
        props->init_type_reply  = out_mad->data[41] >> 4;
 
+       return 0;
+}
+
+static u8 state_to_phys_state(enum ib_port_state state)
+{
+       return state == IB_PORT_ACTIVE ? 5 : 3;
+}
+
+static int eth_link_query_port(struct ib_device *ibdev, u8 port,
+                              struct ib_port_attr *props,
+                              struct ib_smp *out_mad)
+{
+       struct mlx4_ib_iboe *iboe = &to_mdev(ibdev)->iboe;
+       struct net_device *ndev;
+       enum ib_mtu tmp;
+
+       props->active_width     = IB_WIDTH_4X;
+       props->active_speed     = 4;
+       props->port_cap_flags   = IB_PORT_CM_SUP;
+       props->gid_tbl_len      = to_mdev(ibdev)->dev->caps.gid_table_len[port];
+       props->max_msg_sz       = to_mdev(ibdev)->dev->caps.max_msg_sz;
+       props->pkey_tbl_len     = 1;
+       props->bad_pkey_cntr    = be16_to_cpup((__be16 *) (out_mad->data + 46));
+       props->qkey_viol_cntr   = be16_to_cpup((__be16 *) (out_mad->data + 48));
+       props->max_mtu          = IB_MTU_2048;
+       props->subnet_timeout   = 0;
+       props->max_vl_num       = out_mad->data[37] >> 4;
+       props->init_type_reply  = 0;
+       props->state            = IB_PORT_DOWN;
+       props->phys_state       = state_to_phys_state(props->state);
+       props->active_mtu       = IB_MTU_256;
+       spin_lock(&iboe->lock);
+       ndev = iboe->netdevs[port - 1];
+       if (!ndev)
+               goto out;
+
+       tmp = iboe_get_mtu(ndev->mtu);
+       props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
+
+       props->state            = netif_running(ndev) &&  netif_oper_up(ndev) ?
+                                       IB_PORT_ACTIVE : IB_PORT_DOWN;
+       props->phys_state       = state_to_phys_state(props->state);
+
+out:
+       spin_unlock(&iboe->lock);
+       return 0;
+}
+
+static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
+                             struct ib_port_attr *props)
+{
+       struct ib_smp *in_mad  = NULL;
+       struct ib_smp *out_mad = NULL;
+       int err = -ENOMEM;
+
+       in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+       out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+       if (!in_mad || !out_mad)
+               goto out;
+
+       memset(props, 0, sizeof *props);
+
+       init_query_mad(in_mad);
+       in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
+       in_mad->attr_mod = cpu_to_be32(port);
+
+       err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
+       if (err)
+               goto out;
+
+       err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
+               ib_link_query_port(ibdev, port, props, out_mad) :
+               eth_link_query_port(ibdev, port, props, out_mad);
+
 out:
        kfree(in_mad);
        kfree(out_mad);
@@ -203,8 +284,8 @@ out:
        return err;
 }
 
-static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
-                            union ib_gid *gid)
+static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+                              union ib_gid *gid)
 {
        struct ib_smp *in_mad  = NULL;
        struct ib_smp *out_mad = NULL;
@@ -241,6 +322,25 @@ out:
        return err;
 }
 
+static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
+                         union ib_gid *gid)
+{
+       struct mlx4_ib_dev *dev = to_mdev(ibdev);
+
+       *gid = dev->iboe.gid_table[port - 1][index];
+
+       return 0;
+}
+
+static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+                            union ib_gid *gid)
+{
+       if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
+               return __mlx4_ib_query_gid(ibdev, port, index, gid);
+       else
+               return iboe_query_gid(ibdev, port, index, gid);
+}
+
 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
                              u16 *pkey)
 {
@@ -272,14 +372,32 @@ out:
 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
                                 struct ib_device_modify *props)
 {
+       struct mlx4_cmd_mailbox *mailbox;
+
        if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
                return -EOPNOTSUPP;
 
-       if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
-               spin_lock(&to_mdev(ibdev)->sm_lock);
-               memcpy(ibdev->node_desc, props->node_desc, 64);
-               spin_unlock(&to_mdev(ibdev)->sm_lock);
-       }
+       if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
+               return 0;
+
+       spin_lock(&to_mdev(ibdev)->sm_lock);
+       memcpy(ibdev->node_desc, props->node_desc, 64);
+       spin_unlock(&to_mdev(ibdev)->sm_lock);
+
+       /*
+        * If possible, pass node desc to FW, so it can generate
+        * a 144 trap.  If cmd fails, just ignore.
+        */
+       mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
+       if (IS_ERR(mailbox))
+               return 0;
+
+       memset(mailbox->buf, 0, 256);
+       memcpy(mailbox->buf, props->node_desc, 64);
+       mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
+                MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A);
+
+       mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
 
        return 0;
 }
@@ -289,6 +407,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
 {
        struct mlx4_cmd_mailbox *mailbox;
        int err;
+       u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
 
        mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
        if (IS_ERR(mailbox))
@@ -304,7 +423,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
                ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
        }
 
-       err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
+       err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
                       MLX4_CMD_TIME_CLASS_B);
 
        mlx4_free_cmd_mailbox(dev->dev, mailbox);
@@ -447,18 +566,132 @@ static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
        return 0;
 }
 
+static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
+{
+       struct mlx4_ib_qp *mqp = to_mqp(ibqp);
+       struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+       struct mlx4_ib_gid_entry *ge;
+
+       ge = kzalloc(sizeof *ge, GFP_KERNEL);
+       if (!ge)
+               return -ENOMEM;
+
+       ge->gid = *gid;
+       if (mlx4_ib_add_mc(mdev, mqp, gid)) {
+               ge->port = mqp->port;
+               ge->added = 1;
+       }
+
+       mutex_lock(&mqp->mutex);
+       list_add_tail(&ge->list, &mqp->gid_list);
+       mutex_unlock(&mqp->mutex);
+
+       return 0;
+}
+
+int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
+                  union ib_gid *gid)
+{
+       u8 mac[6];
+       struct net_device *ndev;
+       int ret = 0;
+
+       if (!mqp->port)
+               return 0;
+
+       spin_lock(&mdev->iboe.lock);
+       ndev = mdev->iboe.netdevs[mqp->port - 1];
+       if (ndev)
+               dev_hold(ndev);
+       spin_unlock(&mdev->iboe.lock);
+
+       if (ndev) {
+               rdma_get_mcast_mac((struct in6_addr *)gid, mac);
+               rtnl_lock();
+               dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac);
+               ret = 1;
+               rtnl_unlock();
+               dev_put(ndev);
+       }
+
+       return ret;
+}
+
 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 {
-       return mlx4_multicast_attach(to_mdev(ibqp->device)->dev,
-                                    &to_mqp(ibqp)->mqp, gid->raw,
-                                    !!(to_mqp(ibqp)->flags &
-                                       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK));
+       int err;
+       struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+       struct mlx4_ib_qp *mqp = to_mqp(ibqp);
+
+       err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, !!(mqp->flags &
+                                   MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK));
+       if (err)
+               return err;
+
+       err = add_gid_entry(ibqp, gid);
+       if (err)
+               goto err_add;
+
+       return 0;
+
+err_add:
+       mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw);
+       return err;
+}
+
+static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
+{
+       struct mlx4_ib_gid_entry *ge;
+       struct mlx4_ib_gid_entry *tmp;
+       struct mlx4_ib_gid_entry *ret = NULL;
+
+       list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
+               if (!memcmp(raw, ge->gid.raw, 16)) {
+                       ret = ge;
+                       break;
+               }
+       }
+
+       return ret;
 }
 
 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 {
-       return mlx4_multicast_detach(to_mdev(ibqp->device)->dev,
-                                    &to_mqp(ibqp)->mqp, gid->raw);
+       int err;
+       struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+       struct mlx4_ib_qp *mqp = to_mqp(ibqp);
+       u8 mac[6];
+       struct net_device *ndev;
+       struct mlx4_ib_gid_entry *ge;
+
+       err = mlx4_multicast_detach(mdev->dev,
+                                   &mqp->mqp, gid->raw);
+       if (err)
+               return err;
+
+       mutex_lock(&mqp->mutex);
+       ge = find_gid_entry(mqp, gid->raw);
+       if (ge) {
+               spin_lock(&mdev->iboe.lock);
+               ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
+               if (ndev)
+                       dev_hold(ndev);
+               spin_unlock(&mdev->iboe.lock);
+               rdma_get_mcast_mac((struct in6_addr *)gid, mac);
+               if (ndev) {
+                       rtnl_lock();
+                       dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac);
+                       rtnl_unlock();
+                       dev_put(ndev);
+               }
+               list_del(&ge->list);
+               kfree(ge);
+       } else
+               printk(KERN_WARNING "could not find mgid entry\n");
+
+       mutex_unlock(&mqp->mutex);
+
+       return 0;
 }
 
 static int init_node_data(struct mlx4_ib_dev *dev)
@@ -543,15 +776,215 @@ static struct device_attribute *mlx4_class_attributes[] = {
        &dev_attr_board_id
 };
 
+static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
+{
+       memcpy(eui, dev->dev_addr, 3);
+       memcpy(eui + 5, dev->dev_addr + 3, 3);
+       if (vlan_id < 0x1000) {
+               eui[3] = vlan_id >> 8;
+               eui[4] = vlan_id & 0xff;
+       } else {
+               eui[3] = 0xff;
+               eui[4] = 0xfe;
+       }
+       eui[0] ^= 2;
+}
+
+static void update_gids_task(struct work_struct *work)
+{
+       struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
+       struct mlx4_cmd_mailbox *mailbox;
+       union ib_gid *gids;
+       int err;
+       struct mlx4_dev *dev = gw->dev->dev;
+       struct ib_event event;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox)) {
+               printk(KERN_WARNING "update gid table failed %ld\n", PTR_ERR(mailbox));
+               return;
+       }
+
+       gids = mailbox->buf;
+       memcpy(gids, gw->gids, sizeof gw->gids);
+
+       err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
+                      1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B);
+       if (err)
+               printk(KERN_WARNING "set port command failed\n");
+       else {
+               memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
+               event.device = &gw->dev->ib_dev;
+               event.element.port_num = gw->port;
+               event.event    = IB_EVENT_LID_CHANGE;
+               ib_dispatch_event(&event);
+       }
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       kfree(gw);
+}
+
+static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
+{
+       struct net_device *ndev = dev->iboe.netdevs[port - 1];
+       struct update_gid_work *work;
+       struct net_device *tmp;
+       int i;
+       u8 *hits;
+       int ret;
+       union ib_gid gid;
+       int free;
+       int found;
+       int need_update = 0;
+       u16 vid;
+
+       work = kzalloc(sizeof *work, GFP_ATOMIC);
+       if (!work)
+               return -ENOMEM;
+
+       hits = kzalloc(128, GFP_ATOMIC);
+       if (!hits) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       read_lock(&dev_base_lock);
+       for_each_netdev(&init_net, tmp) {
+               if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
+                       gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+                       vid = rdma_vlan_dev_vlan_id(tmp);
+                       mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev);
+                       found = 0;
+                       free = -1;
+                       for (i = 0; i < 128; ++i) {
+                               if (free < 0 &&
+                                   !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
+                                       free = i;
+                               if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) {
+                                       hits[i] = 1;
+                                       found = 1;
+                                       break;
+                               }
+                       }
+
+                       if (!found) {
+                               if (tmp == ndev &&
+                                   (memcmp(&dev->iboe.gid_table[port - 1][0],
+                                           &gid, sizeof gid) ||
+                                    !memcmp(&dev->iboe.gid_table[port - 1][0],
+                                            &zgid, sizeof gid))) {
+                                       dev->iboe.gid_table[port - 1][0] = gid;
+                                       ++need_update;
+                                       hits[0] = 1;
+                               } else if (free >= 0) {
+                                       dev->iboe.gid_table[port - 1][free] = gid;
+                                       hits[free] = 1;
+                                       ++need_update;
+                               }
+                       }
+               }
+       }
+       read_unlock(&dev_base_lock);
+
+       for (i = 0; i < 128; ++i)
+               if (!hits[i]) {
+                       if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
+                               ++need_update;
+                       dev->iboe.gid_table[port - 1][i] = zgid;
+               }
+
+       if (need_update) {
+               memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids);
+               INIT_WORK(&work->work, update_gids_task);
+               work->port = port;
+               work->dev = dev;
+               queue_work(wq, &work->work);
+       } else
+               kfree(work);
+
+       kfree(hits);
+       return 0;
+
+out:
+       kfree(work);
+       return ret;
+}
+
+static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event)
+{
+       switch (event) {
+       case NETDEV_UP:
+       case NETDEV_CHANGEADDR:
+               update_ipv6_gids(dev, port, 0);
+               break;
+
+       case NETDEV_DOWN:
+               update_ipv6_gids(dev, port, 1);
+               dev->iboe.netdevs[port - 1] = NULL;
+       }
+}
+
+static void netdev_added(struct mlx4_ib_dev *dev, int port)
+{
+       update_ipv6_gids(dev, port, 0);
+}
+
+static void netdev_removed(struct mlx4_ib_dev *dev, int port)
+{
+       update_ipv6_gids(dev, port, 1);
+}
+
+static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
+                               void *ptr)
+{
+       struct net_device *dev = ptr;
+       struct mlx4_ib_dev *ibdev;
+       struct net_device *oldnd;
+       struct mlx4_ib_iboe *iboe;
+       int port;
+
+       if (!net_eq(dev_net(dev), &init_net))
+               return NOTIFY_DONE;
+
+       ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
+       iboe = &ibdev->iboe;
+
+       spin_lock(&iboe->lock);
+       mlx4_foreach_ib_transport_port(port, ibdev->dev) {
+               oldnd = iboe->netdevs[port - 1];
+               iboe->netdevs[port - 1] =
+                       mlx4_get_protocol_dev(ibdev->dev, MLX4_PROTOCOL_EN, port);
+               if (oldnd != iboe->netdevs[port - 1]) {
+                       if (iboe->netdevs[port - 1])
+                               netdev_added(ibdev, port);
+                       else
+                               netdev_removed(ibdev, port);
+               }
+       }
+
+       if (dev == iboe->netdevs[0] ||
+           (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0]))
+               handle_en_event(ibdev, 1, event);
+       else if (dev == iboe->netdevs[1]
+                || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1]))
+               handle_en_event(ibdev, 2, event);
+
+       spin_unlock(&iboe->lock);
+
+       return NOTIFY_DONE;
+}
+
 static void *mlx4_ib_add(struct mlx4_dev *dev)
 {
        struct mlx4_ib_dev *ibdev;
        int num_ports = 0;
        int i;
+       int err;
+       struct mlx4_ib_iboe *iboe;
 
        printk_once(KERN_INFO "%s", mlx4_ib_version);
 
-       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+       mlx4_foreach_ib_transport_port(i, dev)
                num_ports++;
 
        /* No point in registering a device with no ports... */
@@ -564,6 +997,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                return NULL;
        }
 
+       iboe = &ibdev->iboe;
+
        if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
                goto err_dealloc;
 
@@ -612,6 +1047,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 
        ibdev->ib_dev.query_device      = mlx4_ib_query_device;
        ibdev->ib_dev.query_port        = mlx4_ib_query_port;
+       ibdev->ib_dev.get_link_layer    = mlx4_ib_port_link_layer;
        ibdev->ib_dev.query_gid         = mlx4_ib_query_gid;
        ibdev->ib_dev.query_pkey        = mlx4_ib_query_pkey;
        ibdev->ib_dev.modify_device     = mlx4_ib_modify_device;
@@ -656,6 +1092,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
        ibdev->ib_dev.unmap_fmr         = mlx4_ib_unmap_fmr;
        ibdev->ib_dev.dealloc_fmr       = mlx4_ib_fmr_dealloc;
 
+       spin_lock_init(&iboe->lock);
+
        if (init_node_data(ibdev))
                goto err_map;
 
@@ -668,16 +1106,28 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
        if (mlx4_ib_mad_init(ibdev))
                goto err_reg;
 
+       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) {
+               iboe->nb.notifier_call = mlx4_ib_netdev_event;
+               err = register_netdevice_notifier(&iboe->nb);
+               if (err)
+                       goto err_reg;
+       }
+
        for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) {
                if (device_create_file(&ibdev->ib_dev.dev,
                                       mlx4_class_attributes[i]))
-                       goto err_reg;
+                       goto err_notif;
        }
 
        ibdev->ib_active = true;
 
        return ibdev;
 
+err_notif:
+       if (unregister_netdevice_notifier(&ibdev->iboe.nb))
+               printk(KERN_WARNING "failure unregistering notifier\n");
+       flush_workqueue(wq);
+
 err_reg:
        ib_unregister_device(&ibdev->ib_dev);
 
@@ -703,11 +1153,16 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
 
        mlx4_ib_mad_cleanup(ibdev);
        ib_unregister_device(&ibdev->ib_dev);
+       if (ibdev->iboe.nb.notifier_call) {
+               if (unregister_netdevice_notifier(&ibdev->iboe.nb))
+                       printk(KERN_WARNING "failure unregistering notifier\n");
+               ibdev->iboe.nb.notifier_call = NULL;
+       }
+       iounmap(ibdev->uar_map);
 
-       for (p = 1; p <= ibdev->num_ports; ++p)
+       mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
                mlx4_CLOSE_PORT(dev, p);
 
-       iounmap(ibdev->uar_map);
        mlx4_uar_free(dev, &ibdev->priv_uar);
        mlx4_pd_free(dev, ibdev->priv_pdn);
        ib_dealloc_device(&ibdev->ib_dev);
@@ -747,19 +1202,33 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
 }
 
 static struct mlx4_interface mlx4_ib_interface = {
-       .add    = mlx4_ib_add,
-       .remove = mlx4_ib_remove,
-       .event  = mlx4_ib_event
+       .add            = mlx4_ib_add,
+       .remove         = mlx4_ib_remove,
+       .event          = mlx4_ib_event,
+       .protocol       = MLX4_PROTOCOL_IB
 };
 
 static int __init mlx4_ib_init(void)
 {
-       return mlx4_register_interface(&mlx4_ib_interface);
+       int err;
+
+       wq = create_singlethread_workqueue("mlx4_ib");
+       if (!wq)
+               return -ENOMEM;
+
+       err = mlx4_register_interface(&mlx4_ib_interface);
+       if (err) {
+               destroy_workqueue(wq);
+               return err;
+       }
+
+       return 0;
 }
 
 static void __exit mlx4_ib_cleanup(void)
 {
        mlx4_unregister_interface(&mlx4_ib_interface);
+       destroy_workqueue(wq);
 }
 
 module_init(mlx4_ib_init);
index 3486d7675e56dfece310b090abf0eec93d140ec6..2a322f21049fad5366a4c9464e998dba4ebe8167 100644 (file)
@@ -112,6 +112,13 @@ enum mlx4_ib_qp_flags {
        MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK     = 1 << 1,
 };
 
+struct mlx4_ib_gid_entry {
+       struct list_head        list;
+       union ib_gid            gid;
+       int                     added;
+       u8                      port;
+};
+
 struct mlx4_ib_qp {
        struct ib_qp            ibqp;
        struct mlx4_qp          mqp;
@@ -138,6 +145,8 @@ struct mlx4_ib_qp {
        u8                      resp_depth;
        u8                      sq_no_prefetch;
        u8                      state;
+       int                     mlx_type;
+       struct list_head        gid_list;
 };
 
 struct mlx4_ib_srq {
@@ -157,7 +166,14 @@ struct mlx4_ib_srq {
 
 struct mlx4_ib_ah {
        struct ib_ah            ibah;
-       struct mlx4_av          av;
+       union mlx4_ext_av       av;
+};
+
+struct mlx4_ib_iboe {
+       spinlock_t              lock;
+       struct net_device      *netdevs[MLX4_MAX_PORTS];
+       struct notifier_block   nb;
+       union ib_gid            gid_table[MLX4_MAX_PORTS][128];
 };
 
 struct mlx4_ib_dev {
@@ -176,6 +192,7 @@ struct mlx4_ib_dev {
 
        struct mutex            cap_mask_mutex;
        bool                    ib_active;
+       struct mlx4_ib_iboe     iboe;
 };
 
 static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
@@ -314,9 +331,20 @@ int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
 int mlx4_ib_unmap_fmr(struct list_head *fmr_list);
 int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr);
 
+int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
+                       u8 *mac, int *is_mcast, u8 port);
+
 static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
 {
-       return !!(ah->av.g_slid & 0x80);
+       u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
+
+       if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET)
+               return 1;
+
+       return !!(ah->av.ib.g_slid & 0x80);
 }
 
+int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
+                  union ib_gid *gid);
+
 #endif /* MLX4_IB_H */
index 1d27b9a8e2d6b6944e002296479116952456d73b..dca55b19a6f198ee9d9fee7b6f003306a55279a9 100644 (file)
@@ -226,7 +226,7 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device
        struct mlx4_ib_fast_reg_page_list *mfrpl;
        int size = page_list_len * sizeof (u64);
 
-       if (size > PAGE_SIZE)
+       if (page_list_len > MLX4_MAX_FAST_REG_PAGES)
                return ERR_PTR(-EINVAL);
 
        mfrpl = kmalloc(sizeof *mfrpl, GFP_KERNEL);
index 6a60827b2301b1d19581f1de95266f433e9d586d..9a7794ac34c1ead2f08a9da3078db777ee1ef1ec 100644 (file)
 
 #include <linux/log2.h>
 #include <linux/slab.h>
+#include <linux/netdevice.h>
 
 #include <rdma/ib_cache.h>
 #include <rdma/ib_pack.h>
+#include <rdma/ib_addr.h>
 
 #include <linux/mlx4/qp.h>
 
@@ -48,17 +50,26 @@ enum {
 
 enum {
        MLX4_IB_DEFAULT_SCHED_QUEUE     = 0x83,
-       MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f
+       MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
+       MLX4_IB_LINK_TYPE_IB            = 0,
+       MLX4_IB_LINK_TYPE_ETH           = 1
 };
 
 enum {
        /*
-        * Largest possible UD header: send with GRH and immediate data.
+        * Largest possible UD header: send with GRH and immediate
+        * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
+        * tag.  (LRH would only use 8 bytes, so Ethernet is the
+        * biggest case)
         */
-       MLX4_IB_UD_HEADER_SIZE          = 72,
+       MLX4_IB_UD_HEADER_SIZE          = 82,
        MLX4_IB_LSO_HEADER_SPARE        = 128,
 };
 
+enum {
+       MLX4_IB_IBOE_ETHERTYPE          = 0x8915
+};
+
 struct mlx4_ib_sqp {
        struct mlx4_ib_qp       qp;
        int                     pkey_index;
@@ -462,6 +473,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
        mutex_init(&qp->mutex);
        spin_lock_init(&qp->sq.lock);
        spin_lock_init(&qp->rq.lock);
+       INIT_LIST_HEAD(&qp->gid_list);
 
        qp->state        = IB_QPS_RESET;
        if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
@@ -649,6 +661,16 @@ static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *re
        }
 }
 
+static void del_gid_entries(struct mlx4_ib_qp *qp)
+{
+       struct mlx4_ib_gid_entry *ge, *tmp;
+
+       list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
+               list_del(&ge->list);
+               kfree(ge);
+       }
+}
+
 static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
                              int is_user)
 {
@@ -695,6 +717,8 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
                if (!qp->ibqp.srq)
                        mlx4_db_free(dev->dev, &qp->db);
        }
+
+       del_gid_entries(qp);
 }
 
 struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
@@ -852,6 +876,14 @@ static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
 static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
                         struct mlx4_qp_path *path, u8 port)
 {
+       int err;
+       int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
+               IB_LINK_LAYER_ETHERNET;
+       u8 mac[6];
+       int is_mcast;
+       u16 vlan_tag;
+       int vidx;
+
        path->grh_mylmc     = ah->src_path_bits & 0x7f;
        path->rlid          = cpu_to_be16(ah->dlid);
        if (ah->static_rate) {
@@ -879,12 +911,49 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
                memcpy(path->rgid, ah->grh.dgid.raw, 16);
        }
 
-       path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
-               ((port - 1) << 6) | ((ah->sl & 0xf) << 2);
+       if (is_eth) {
+               path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
+                       ((port - 1) << 6) | ((ah->sl & 7) << 3) | ((ah->sl & 8) >> 1);
+
+               if (!(ah->ah_flags & IB_AH_GRH))
+                       return -1;
+
+               err = mlx4_ib_resolve_grh(dev, ah, mac, &is_mcast, port);
+               if (err)
+                       return err;
+
+               memcpy(path->dmac, mac, 6);
+               path->ackto = MLX4_IB_LINK_TYPE_ETH;
+               /* use index 0 into MAC table for IBoE */
+               path->grh_mylmc &= 0x80;
+
+               vlan_tag = rdma_get_vlan_id(&dev->iboe.gid_table[port - 1][ah->grh.sgid_index]);
+               if (vlan_tag < 0x1000) {
+                       if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx))
+                               return -ENOENT;
+
+                       path->vlan_index = vidx;
+                       path->fl = 1 << 6;
+               }
+       } else
+               path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
+                       ((port - 1) << 6) | ((ah->sl & 0xf) << 2);
 
        return 0;
 }
 
+static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
+{
+       struct mlx4_ib_gid_entry *ge, *tmp;
+
+       list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
+               if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) {
+                       ge->added = 1;
+                       ge->port = qp->port;
+               }
+       }
+}
+
 static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                               const struct ib_qp_attr *attr, int attr_mask,
                               enum ib_qp_state cur_state, enum ib_qp_state new_state)
@@ -980,7 +1049,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
        }
 
        if (attr_mask & IB_QP_TIMEOUT) {
-               context->pri_path.ackto = attr->timeout << 3;
+               context->pri_path.ackto |= attr->timeout << 3;
                optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT;
        }
 
@@ -1118,8 +1187,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                qp->atomic_rd_en = attr->qp_access_flags;
        if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
                qp->resp_depth = attr->max_dest_rd_atomic;
-       if (attr_mask & IB_QP_PORT)
+       if (attr_mask & IB_QP_PORT) {
                qp->port = attr->port_num;
+               update_mcg_macs(dev, qp);
+       }
        if (attr_mask & IB_QP_ALT_PATH)
                qp->alt_port = attr->alt_port_num;
 
@@ -1221,40 +1292,59 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
        struct mlx4_wqe_mlx_seg *mlx = wqe;
        struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
        struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
+       union ib_gid sgid;
        u16 pkey;
        int send_size;
        int header_size;
        int spc;
        int i;
+       int is_eth;
+       int is_vlan = 0;
+       int is_grh;
+       u16 vlan;
 
        send_size = 0;
        for (i = 0; i < wr->num_sge; ++i)
                send_size += wr->sg_list[i].length;
 
-       ib_ud_header_init(send_size, mlx4_ib_ah_grh_present(ah), 0, &sqp->ud_header);
+       is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
+       is_grh = mlx4_ib_ah_grh_present(ah);
+       if (is_eth) {
+               ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24,
+                                 ah->av.ib.gid_index, &sgid);
+               vlan = rdma_get_vlan_id(&sgid);
+               is_vlan = vlan < 0x1000;
+       }
+       ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header);
+
+       if (!is_eth) {
+               sqp->ud_header.lrh.service_level =
+                       be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
+               sqp->ud_header.lrh.destination_lid = ah->av.ib.dlid;
+               sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f);
+       }
 
-       sqp->ud_header.lrh.service_level   =
-               be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28;
-       sqp->ud_header.lrh.destination_lid = ah->av.dlid;
-       sqp->ud_header.lrh.source_lid      = cpu_to_be16(ah->av.g_slid & 0x7f);
-       if (mlx4_ib_ah_grh_present(ah)) {
+       if (is_grh) {
                sqp->ud_header.grh.traffic_class =
-                       (be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 20) & 0xff;
+                       (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff;
                sqp->ud_header.grh.flow_label    =
-                       ah->av.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
-               sqp->ud_header.grh.hop_limit     = ah->av.hop_limit;
-               ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.port_pd) >> 24,
-                                 ah->av.gid_index, &sqp->ud_header.grh.source_gid);
+                       ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
+               sqp->ud_header.grh.hop_limit     = ah->av.ib.hop_limit;
+               ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24,
+                                 ah->av.ib.gid_index, &sqp->ud_header.grh.source_gid);
                memcpy(sqp->ud_header.grh.destination_gid.raw,
-                      ah->av.dgid, 16);
+                      ah->av.ib.dgid, 16);
        }
 
        mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
-       mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
-                                 (sqp->ud_header.lrh.destination_lid ==
-                                  IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) |
-                                 (sqp->ud_header.lrh.service_level << 8));
-       mlx->rlid   = sqp->ud_header.lrh.destination_lid;
+
+       if (!is_eth) {
+               mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
+                                         (sqp->ud_header.lrh.destination_lid ==
+                                          IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) |
+                                         (sqp->ud_header.lrh.service_level << 8));
+               mlx->rlid = sqp->ud_header.lrh.destination_lid;
+       }
 
        switch (wr->opcode) {
        case IB_WR_SEND:
@@ -1270,9 +1360,29 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
                return -EINVAL;
        }
 
-       sqp->ud_header.lrh.virtual_lane    = !sqp->qp.ibqp.qp_num ? 15 : 0;
-       if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
-               sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
+       if (is_eth) {
+               u8 *smac;
+
+               memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
+               /* FIXME: cache smac value? */
+               smac = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]->dev_addr;
+               memcpy(sqp->ud_header.eth.smac_h, smac, 6);
+               if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
+                       mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
+               if (!is_vlan) {
+                       sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
+               } else {
+                       u16 pcp;
+
+                       sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
+                       pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 27 & 3) << 13;
+                       sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
+               }
+       } else {
+               sqp->ud_header.lrh.virtual_lane    = !sqp->qp.ibqp.qp_num ? 15 : 0;
+               if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
+                       sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
+       }
        sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
        if (!sqp->qp.ibqp.qp_num)
                ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
@@ -1429,11 +1539,14 @@ static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
 }
 
 static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
-                            struct ib_send_wr *wr)
+                            struct ib_send_wr *wr, __be16 *vlan)
 {
        memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
        dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
        dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
+       dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan;
+       memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6);
+       *vlan = dseg->vlan;
 }
 
 static void set_mlx_icrc_seg(void *dseg)
@@ -1536,6 +1649,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        __be32 uninitialized_var(lso_hdr_sz);
        __be32 blh;
        int i;
+       __be16 vlan = cpu_to_be16(0xffff);
 
        spin_lock_irqsave(&qp->sq.lock, flags);
 
@@ -1639,7 +1753,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        break;
 
                case IB_QPT_UD:
-                       set_datagram_seg(wqe, wr);
+                       set_datagram_seg(wqe, wr, &vlan);
                        wqe  += sizeof (struct mlx4_wqe_datagram_seg);
                        size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
 
@@ -1717,6 +1831,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
                        (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh;
 
+               if (be16_to_cpu(vlan) < 0x1000) {
+                       ctrl->ins_vlan = 1 << 6;
+                       ctrl->vlan_tag = vlan;
+               }
+
                stamp = ind + qp->sq_spare_wqes;
                ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
 
@@ -1866,17 +1985,27 @@ static int to_ib_qp_access_flags(int mlx4_flags)
        return ib_flags;
 }
 
-static void to_ib_ah_attr(struct mlx4_dev *dev, struct ib_ah_attr *ib_ah_attr,
+static void to_ib_ah_attr(struct mlx4_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
                                struct mlx4_qp_path *path)
 {
+       struct mlx4_dev *dev = ibdev->dev;
+       int is_eth;
+
        memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
        ib_ah_attr->port_num      = path->sched_queue & 0x40 ? 2 : 1;
 
        if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
                return;
 
+       is_eth = rdma_port_get_link_layer(&ibdev->ib_dev, ib_ah_attr->port_num) ==
+               IB_LINK_LAYER_ETHERNET;
+       if (is_eth)
+               ib_ah_attr->sl = ((path->sched_queue >> 3) & 0x7) |
+               ((path->sched_queue & 4) << 1);
+       else
+               ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf;
+
        ib_ah_attr->dlid          = be16_to_cpu(path->rlid);
-       ib_ah_attr->sl            = (path->sched_queue >> 2) & 0xf;
        ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f;
        ib_ah_attr->static_rate   = path->static_rate ? path->static_rate - 5 : 0;
        ib_ah_attr->ah_flags      = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
@@ -1929,8 +2058,8 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
                to_ib_qp_access_flags(be32_to_cpu(context.params2));
 
        if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
-               to_ib_ah_attr(dev->dev, &qp_attr->ah_attr, &context.pri_path);
-               to_ib_ah_attr(dev->dev, &qp_attr->alt_ah_attr, &context.alt_path);
+               to_ib_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path);
+               to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path);
                qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f;
                qp_attr->alt_port_num   = qp_attr->alt_ah_attr.port_num;
        }
index d2d172e6289c2171d02c07c36515d90dc2d8c7ce..a34c9d38e82250ba50c31aa367afed1fb7f9d0b0 100644 (file)
@@ -1493,7 +1493,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
        int err;
        u16 pkey;
 
-       ib_ud_header_init(256, /* assume a MAD */
+       ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0,
                          mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0,
                          &sqp->ud_header);
 
index 6220d9d75b582f468789c9680d1f01399d043792..25ad0f9944c0198782e308e9e606cee1c6aba446 100644 (file)
@@ -1424,7 +1424,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
 {
 
        int     reset = 0;      /* whether to send reset in case of err.. */
-       int     passive_state;
        atomic_inc(&cm_resets_recvd);
        nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
                        " refcnt=%d\n", cm_node, cm_node->state,
@@ -1439,7 +1438,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
                active_open_err(cm_node, skb, reset);
                break;
        case NES_CM_STATE_MPAREQ_RCVD:
-               passive_state = atomic_add_return(1, &cm_node->passive_state);
+               atomic_inc(&cm_node->passive_state);
                dev_kfree_skb_any(skb);
                break;
        case NES_CM_STATE_ESTABLISHED:
index 10560c796fd6c0ffc591601c610579d3e1b6e8ca..3892e2c0e95a57b38f2bd0258355d43540003e14 100644 (file)
@@ -271,6 +271,7 @@ static int nes_netdev_stop(struct net_device *netdev)
 
        if (netif_msg_ifdown(nesvnic))
                printk(KERN_INFO PFX "%s: disabling interface\n", netdev->name);
+       netif_carrier_off(netdev);
 
        /* Disable network packets */
        napi_disable(&nesvnic->napi);
index 546fc22405fe315636cdf36a2207e5f14302497b..99933e4e48ff853da4a79f14ce899a1dea5afc67 100644 (file)
@@ -476,9 +476,9 @@ static struct ib_fast_reg_page_list *nes_alloc_fast_reg_page_list(
        }
        nes_debug(NES_DBG_MR, "nes_alloc_fast_reg_pbl: nes_frpl = %p, "
                  "ibfrpl = %p, ibfrpl.page_list = %p, pbl.kva = %p, "
-                 "pbl.paddr= %p\n", pnesfrpl, &pnesfrpl->ibfrpl,
+                 "pbl.paddr = %llx\n", pnesfrpl, &pnesfrpl->ibfrpl,
                  pnesfrpl->ibfrpl.page_list, pnesfrpl->nes_wqe_pbl.kva,
-                 (void *)pnesfrpl->nes_wqe_pbl.paddr);
+                 (unsigned long long) pnesfrpl->nes_wqe_pbl.paddr);
 
        return pifrpl;
 }
@@ -584,7 +584,9 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
        props->lmc = 0;
        props->sm_lid = 0;
        props->sm_sl = 0;
-       if (nesvnic->linkup)
+       if (netif_queue_stopped(netdev))
+               props->state = IB_PORT_DOWN;
+       else if (nesvnic->linkup)
                props->state = IB_PORT_ACTIVE;
        else
                props->state = IB_PORT_DOWN;
@@ -3483,13 +3485,13 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
                        for (i = 0; i < ib_wr->wr.fast_reg.page_list_len; i++)
                                dst_page_list[i] = cpu_to_le64(src_page_list[i]);
 
-                       nes_debug(NES_DBG_IW_TX, "SQ_FMR: iova_start: %p, "
-                                 "length: %d, rkey: %0x, pgl_paddr: %p, "
+                       nes_debug(NES_DBG_IW_TX, "SQ_FMR: iova_start: %llx, "
+                                 "length: %d, rkey: %0x, pgl_paddr: %llx, "
                                  "page_list_len: %u, wqe_misc: %x\n",
-                                 (void *)ib_wr->wr.fast_reg.iova_start,
+                                 (unsigned long long) ib_wr->wr.fast_reg.iova_start,
                                  ib_wr->wr.fast_reg.length,
                                  ib_wr->wr.fast_reg.rkey,
-                                 (void *)pnesfrpl->nes_wqe_pbl.paddr,
+                                 (unsigned long long) pnesfrpl->nes_wqe_pbl.paddr,
                                  ib_wr->wr.fast_reg.page_list_len,
                                  wqe_misc);
                        break;
index 61de0654820ed8dd6c401816a6c26ec22dae4673..64c9e7d02d4a3e667bebf099fd6ed93160329f22 100644 (file)
@@ -1406,7 +1406,7 @@ extern struct mutex qib_mutex;
  */
 #define qib_early_err(dev, fmt, ...) \
        do { \
-               dev_info(dev, KERN_ERR QIB_DRV_NAME ": " fmt, ##__VA_ARGS__); \
+               dev_err(dev, fmt, ##__VA_ARGS__); \
        } while (0)
 
 #define qib_dev_err(dd, fmt, ...) \
index a0e6613e8be6151d3fdc00efc3005ad8c6f22ebd..7e433d75c775a963fdedb38612ef734a6211ce79 100644 (file)
@@ -58,6 +58,7 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
                goto bail;
        }
 
+       inode->i_ino = get_next_ino();
        inode->i_mode = mode;
        inode->i_uid = 0;
        inode->i_gid = 0;
index f1d16d3a01f66c72ab004b62dbb6575dde11ba02..f3b50393604307a92fd1ee0b4e1f2e9bd706607c 100644 (file)
@@ -1243,6 +1243,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
                qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot "
                      "work if CONFIG_PCI_MSI is not enabled\n",
                      ent->device);
+               dd = ERR_PTR(-ENODEV);
 #endif
                break;
 
index 7fa6e5592630247b7208f22205e980c574c3af83..48b6674cbc49f97b9fd4e8d225325b6304833b5d 100644 (file)
@@ -103,16 +103,20 @@ int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
                ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
        } else
                ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-       if (ret)
+       if (ret) {
                qib_early_err(&pdev->dev,
                              "Unable to set DMA consistent mask: %d\n", ret);
+               goto bail;
+       }
 
        pci_set_master(pdev);
        ret = pci_enable_pcie_error_reporting(pdev);
-       if (ret)
+       if (ret) {
                qib_early_err(&pdev->dev,
                              "Unable to enable pcie error reporting: %d\n",
                              ret);
+               ret = 0;
+       }
        goto done;
 
 bail:
index a0931119bd78c50a7324b6da4350832e66312bac..955fb7157793f4a65582e21998e74a3f7474496d 100644 (file)
@@ -2068,7 +2068,10 @@ send_last:
                        goto nack_op_err;
                if (!ret)
                        goto rnr_nak;
-               goto send_last_imm;
+               wc.ex.imm_data = ohdr->u.rc.imm_data;
+               hdrsize += 4;
+               wc.wc_flags = IB_WC_WITH_IMM;
+               goto send_last;
 
        case OP(RDMA_READ_REQUEST): {
                struct qib_ack_entry *e;
index b9c8b6346c1b3e475a5d945a4ce611b36a5a6e3c..32ccf3c824ca5545bdf5972123016dca5fb110d4 100644 (file)
@@ -457,8 +457,10 @@ rdma_first:
                }
                if (opcode == OP(RDMA_WRITE_ONLY))
                        goto rdma_last;
-               else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+               else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
+                       wc.ex.imm_data = ohdr->u.rc.imm_data;
                        goto rdma_last_imm;
+               }
                /* FALLTHROUGH */
        case OP(RDMA_WRITE_MIDDLE):
                /* Check for invalid length PMTU or posted rwqe len. */
@@ -471,8 +473,8 @@ rdma_first:
                break;
 
        case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
-rdma_last_imm:
                wc.ex.imm_data = ohdr->u.imm_data;
+rdma_last_imm:
                hdrsize += 4;
                wc.wc_flags = IB_WC_WITH_IMM;
 
index ec6b4fbe25e4416fb45ef9e76f5703093c26e86a..dfa71903d6e467c59071e3cc01b04fea98c256b3 100644 (file)
@@ -223,6 +223,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
        unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
        struct sk_buff *skb;
        u64 mapping[IPOIB_UD_RX_SG];
+       union ib_gid *dgid;
 
        ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
                       wr_id, wc->status);
@@ -271,6 +272,16 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
        ipoib_ud_dma_unmap_rx(priv, mapping);
        ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
 
+       /* First byte of dgid signals multicast when 0xff */
+       dgid = &((struct ib_grh *)skb->data)->dgid;
+
+       if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
+               skb->pkt_type = PACKET_HOST;
+       else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
+               skb->pkt_type = PACKET_BROADCAST;
+       else
+               skb->pkt_type = PACKET_MULTICAST;
+
        skb_pull(skb, IB_GRH_BYTES);
 
        skb->protocol = ((struct ipoib_header *) skb->data)->proto;
@@ -281,9 +292,6 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
        dev->stats.rx_bytes += skb->len;
 
        skb->dev = dev;
-       /* XXX get correct PACKET_ type here */
-       skb->pkt_type = PACKET_HOST;
-
        if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
                skb->ip_summed = CHECKSUM_UNNECESSARY;
 
index b4b22576f12a0a0aba0c99f421fcdf7cd2699016..9ff7bc73ed95e8689bede687e30086acc99fe4fe 100644 (file)
@@ -1240,6 +1240,7 @@ static struct net_device *ipoib_add_port(const char *format,
                goto alloc_mem_failed;
 
        SET_NETDEV_DEV(priv->dev, hca->dma_device);
+       priv->dev->dev_id = port - 1;
 
        if (!ib_query_port(hca, port, &attr))
                priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
@@ -1362,6 +1363,8 @@ static void ipoib_add_one(struct ib_device *device)
        }
 
        for (p = s; p <= e; ++p) {
+               if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND)
+                       continue;
                dev = ipoib_add_port("ib%d", device, p);
                if (!IS_ERR(dev)) {
                        priv = netdev_priv(dev);
@@ -1409,8 +1412,7 @@ static int __init ipoib_init_module(void)
 
        ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
        ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
-       ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE,
-                                                    IPOIB_MIN_QUEUE_SIZE));
+       ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
 #ifdef CONFIG_INFINIBAND_IPOIB_CM
        ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
 #endif
index 7f8f16bad75339388160db5b0095d8e0d131dd96..cfc1d65c4577bf4ed2ce07bc2ea1924f406bd445 100644 (file)
@@ -291,7 +291,7 @@ static void srp_free_target_ib(struct srp_target_port *target)
 
        for (i = 0; i < SRP_RQ_SIZE; ++i)
                srp_free_iu(target->srp_host, target->rx_ring[i]);
-       for (i = 0; i < SRP_SQ_SIZE + 1; ++i)
+       for (i = 0; i < SRP_SQ_SIZE; ++i)
                srp_free_iu(target->srp_host, target->tx_ring[i]);
 }
 
@@ -811,6 +811,75 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
        return len;
 }
 
+/*
+ * Must be called with target->scsi_host->host_lock held to protect
+ * req_lim and tx_head.  Lock cannot be dropped between call here and
+ * call to __srp_post_send().
+ *
+ * Note:
+ * An upper limit for the number of allocated information units for each
+ * request type is:
+ * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
+ *   more than Scsi_Host.can_queue requests.
+ * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
+ * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
+ *   one unanswered SRP request to an initiator.
+ */
+static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
+                                     enum srp_iu_type iu_type)
+{
+       s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
+       struct srp_iu *iu;
+
+       srp_send_completion(target->send_cq, target);
+
+       if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
+               return NULL;
+
+       /* Initiator responses to target requests do not consume credits */
+       if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) {
+               ++target->zero_req_lim;
+               return NULL;
+       }
+
+       iu = target->tx_ring[target->tx_head & SRP_SQ_MASK];
+       iu->type = iu_type;
+       return iu;
+}
+
+/*
+ * Must be called with target->scsi_host->host_lock held to protect
+ * req_lim and tx_head.
+ */
+static int __srp_post_send(struct srp_target_port *target,
+                          struct srp_iu *iu, int len)
+{
+       struct ib_sge list;
+       struct ib_send_wr wr, *bad_wr;
+       int ret = 0;
+
+       list.addr   = iu->dma;
+       list.length = len;
+       list.lkey   = target->srp_host->srp_dev->mr->lkey;
+
+       wr.next       = NULL;
+       wr.wr_id      = target->tx_head & SRP_SQ_MASK;
+       wr.sg_list    = &list;
+       wr.num_sge    = 1;
+       wr.opcode     = IB_WR_SEND;
+       wr.send_flags = IB_SEND_SIGNALED;
+
+       ret = ib_post_send(target->qp, &wr, &bad_wr);
+
+       if (!ret) {
+               ++target->tx_head;
+               if (iu->type != SRP_IU_RSP)
+                       --target->req_lim;
+       }
+
+       return ret;
+}
+
 static int srp_post_recv(struct srp_target_port *target)
 {
        unsigned long flags;
@@ -822,7 +891,7 @@ static int srp_post_recv(struct srp_target_port *target)
 
        spin_lock_irqsave(target->scsi_host->host_lock, flags);
 
-       next     = target->rx_head & (SRP_RQ_SIZE - 1);
+       next     = target->rx_head & SRP_RQ_MASK;
        wr.wr_id = next;
        iu       = target->rx_ring[next];
 
@@ -896,6 +965,71 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
        spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
 }
 
+static int srp_response_common(struct srp_target_port *target, s32 req_delta,
+                              void *rsp, int len)
+{
+       struct ib_device *dev;
+       unsigned long flags;
+       struct srp_iu *iu;
+       int err = 1;
+
+       dev = target->srp_host->srp_dev->dev;
+
+       spin_lock_irqsave(target->scsi_host->host_lock, flags);
+       target->req_lim += req_delta;
+
+       iu = __srp_get_tx_iu(target, SRP_IU_RSP);
+       if (!iu) {
+               shost_printk(KERN_ERR, target->scsi_host, PFX
+                            "no IU available to send response\n");
+               goto out;
+       }
+
+       ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
+       memcpy(iu->buf, rsp, len);
+       ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
+
+       err = __srp_post_send(target, iu, len);
+       if (err)
+               shost_printk(KERN_ERR, target->scsi_host, PFX
+                            "unable to post response: %d\n", err);
+
+out:
+       spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
+       return err;
+}
+
+static void srp_process_cred_req(struct srp_target_port *target,
+                                struct srp_cred_req *req)
+{
+       struct srp_cred_rsp rsp = {
+               .opcode = SRP_CRED_RSP,
+               .tag = req->tag,
+       };
+       s32 delta = be32_to_cpu(req->req_lim_delta);
+
+       if (srp_response_common(target, delta, &rsp, sizeof rsp))
+               shost_printk(KERN_ERR, target->scsi_host, PFX
+                            "problems processing SRP_CRED_REQ\n");
+}
+
+static void srp_process_aer_req(struct srp_target_port *target,
+                               struct srp_aer_req *req)
+{
+       struct srp_aer_rsp rsp = {
+               .opcode = SRP_AER_RSP,
+               .tag = req->tag,
+       };
+       s32 delta = be32_to_cpu(req->req_lim_delta);
+
+       shost_printk(KERN_ERR, target->scsi_host, PFX
+                    "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
+
+       if (srp_response_common(target, delta, &rsp, sizeof rsp))
+               shost_printk(KERN_ERR, target->scsi_host, PFX
+                            "problems processing SRP_AER_REQ\n");
+}
+
 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
 {
        struct ib_device *dev;
@@ -923,6 +1057,14 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
                srp_process_rsp(target, iu->buf);
                break;
 
+       case SRP_CRED_REQ:
+               srp_process_cred_req(target, iu->buf);
+               break;
+
+       case SRP_AER_REQ:
+               srp_process_aer_req(target, iu->buf);
+               break;
+
        case SRP_T_LOGOUT:
                /* XXX Handle target logout */
                shost_printk(KERN_WARNING, target->scsi_host,
@@ -981,61 +1123,6 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
        }
 }
 
-/*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head.  Lock cannot be dropped between call here and
- * call to __srp_post_send().
- */
-static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
-                                       enum srp_request_type req_type)
-{
-       s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2;
-
-       srp_send_completion(target->send_cq, target);
-
-       if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
-               return NULL;
-
-       if (target->req_lim < min) {
-               ++target->zero_req_lim;
-               return NULL;
-       }
-
-       return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
-}
-
-/*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head.
- */
-static int __srp_post_send(struct srp_target_port *target,
-                          struct srp_iu *iu, int len)
-{
-       struct ib_sge list;
-       struct ib_send_wr wr, *bad_wr;
-       int ret = 0;
-
-       list.addr   = iu->dma;
-       list.length = len;
-       list.lkey   = target->srp_host->srp_dev->mr->lkey;
-
-       wr.next       = NULL;
-       wr.wr_id      = target->tx_head & SRP_SQ_SIZE;
-       wr.sg_list    = &list;
-       wr.num_sge    = 1;
-       wr.opcode     = IB_WR_SEND;
-       wr.send_flags = IB_SEND_SIGNALED;
-
-       ret = ib_post_send(target->qp, &wr, &bad_wr);
-
-       if (!ret) {
-               ++target->tx_head;
-               --target->req_lim;
-       }
-
-       return ret;
-}
-
 static int srp_queuecommand(struct scsi_cmnd *scmnd,
                            void (*done)(struct scsi_cmnd *))
 {
@@ -1056,7 +1143,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
                return 0;
        }
 
-       iu = __srp_get_tx_iu(target, SRP_REQ_NORMAL);
+       iu = __srp_get_tx_iu(target, SRP_IU_CMD);
        if (!iu)
                goto err;
 
@@ -1064,7 +1151,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
        ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
                                   DMA_TO_DEVICE);
 
-       req = list_entry(target->free_reqs.next, struct srp_request, list);
+       req = list_first_entry(&target->free_reqs, struct srp_request, list);
 
        scmnd->scsi_done     = done;
        scmnd->result        = 0;
@@ -1121,7 +1208,7 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
                        goto err;
        }
 
-       for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
+       for (i = 0; i < SRP_SQ_SIZE; ++i) {
                target->tx_ring[i] = srp_alloc_iu(target->srp_host,
                                                  srp_max_iu_len,
                                                  GFP_KERNEL, DMA_TO_DEVICE);
@@ -1137,7 +1224,7 @@ err:
                target->rx_ring[i] = NULL;
        }
 
-       for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
+       for (i = 0; i < SRP_SQ_SIZE; ++i) {
                srp_free_iu(target->srp_host, target->tx_ring[i]);
                target->tx_ring[i] = NULL;
        }
@@ -1252,8 +1339,13 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
                        target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len);
                        target->req_lim       = be32_to_cpu(rsp->req_lim_delta);
 
-                       target->scsi_host->can_queue = min(target->req_lim,
-                                                          target->scsi_host->can_queue);
+                       /*
+                        * Reserve credits for task management so we don't
+                        * bounce requests back to the SCSI mid-layer.
+                        */
+                       target->scsi_host->can_queue
+                               = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
+                                     target->scsi_host->can_queue);
                } else {
                        shost_printk(KERN_WARNING, target->scsi_host,
                                    PFX "Unhandled RSP opcode %#x\n", opcode);
@@ -1350,6 +1442,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
 static int srp_send_tsk_mgmt(struct srp_target_port *target,
                             struct srp_request *req, u8 func)
 {
+       struct ib_device *dev = target->srp_host->srp_dev->dev;
        struct srp_iu *iu;
        struct srp_tsk_mgmt *tsk_mgmt;
 
@@ -1363,10 +1456,12 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
 
        init_completion(&req->done);
 
-       iu = __srp_get_tx_iu(target, SRP_REQ_TASK_MGMT);
+       iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
        if (!iu)
                goto out;
 
+       ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
+                                  DMA_TO_DEVICE);
        tsk_mgmt = iu->buf;
        memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
 
@@ -1376,6 +1471,8 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
        tsk_mgmt->tsk_mgmt_func = func;
        tsk_mgmt->task_tag      = req->index;
 
+       ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
+                                     DMA_TO_DEVICE);
        if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
                goto out;
 
@@ -1626,9 +1723,9 @@ static struct scsi_host_template srp_template = {
        .eh_abort_handler               = srp_abort,
        .eh_device_reset_handler        = srp_reset_device,
        .eh_host_reset_handler          = srp_reset_host,
-       .can_queue                      = SRP_SQ_SIZE,
+       .can_queue                      = SRP_CMD_SQ_SIZE,
        .this_id                        = -1,
-       .cmd_per_lun                    = SRP_SQ_SIZE,
+       .cmd_per_lun                    = SRP_CMD_SQ_SIZE,
        .use_clustering                 = ENABLE_CLUSTERING,
        .shost_attrs                    = srp_host_attrs
 };
@@ -1813,7 +1910,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
                                printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p);
                                goto out;
                        }
-                       target->scsi_host->cmd_per_lun = min(token, SRP_SQ_SIZE);
+                       target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
                        break;
 
                case SRP_OPT_IO_CLASS:
@@ -1891,7 +1988,7 @@ static ssize_t srp_create_target(struct device *dev,
 
        INIT_LIST_HEAD(&target->free_reqs);
        INIT_LIST_HEAD(&target->req_queue);
-       for (i = 0; i < SRP_SQ_SIZE; ++i) {
+       for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
                target->req_ring[i].index = i;
                list_add_tail(&target->req_ring[i].list, &target->free_reqs);
        }
@@ -2159,6 +2256,9 @@ static int __init srp_init_module(void)
 {
        int ret;
 
+       BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE);
+       BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE);
+
        if (srp_sg_tablesize > 255) {
                printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
                srp_sg_tablesize = 255;
index 5a80eac6fdaaca5339f2efa5a17abc57170874b7..ed0dce9e479fc9ed199d633662094e435944be0d 100644 (file)
@@ -59,7 +59,14 @@ enum {
 
        SRP_RQ_SHIFT            = 6,
        SRP_RQ_SIZE             = 1 << SRP_RQ_SHIFT,
-       SRP_SQ_SIZE             = SRP_RQ_SIZE - 1,
+       SRP_RQ_MASK             = SRP_RQ_SIZE - 1,
+
+       SRP_SQ_SIZE             = SRP_RQ_SIZE,
+       SRP_SQ_MASK             = SRP_SQ_SIZE - 1,
+       SRP_RSP_SQ_SIZE         = 1,
+       SRP_REQ_SQ_SIZE         = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE,
+       SRP_TSK_MGMT_SQ_SIZE    = 1,
+       SRP_CMD_SQ_SIZE         = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE,
 
        SRP_TAG_TSK_MGMT        = 1 << (SRP_RQ_SHIFT + 1),
 
@@ -75,9 +82,10 @@ enum srp_target_state {
        SRP_TARGET_REMOVED
 };
 
-enum srp_request_type {
-       SRP_REQ_NORMAL,
-       SRP_REQ_TASK_MGMT,
+enum srp_iu_type {
+       SRP_IU_CMD,
+       SRP_IU_TSK_MGMT,
+       SRP_IU_RSP,
 };
 
 struct srp_device {
@@ -144,11 +152,11 @@ struct srp_target_port {
 
        unsigned                tx_head;
        unsigned                tx_tail;
-       struct srp_iu          *tx_ring[SRP_SQ_SIZE + 1];
+       struct srp_iu          *tx_ring[SRP_SQ_SIZE];
 
        struct list_head        free_reqs;
        struct list_head        req_queue;
-       struct srp_request      req_ring[SRP_SQ_SIZE];
+       struct srp_request      req_ring[SRP_CMD_SQ_SIZE];
 
        struct work_struct      work;
 
@@ -164,6 +172,7 @@ struct srp_iu {
        void                   *buf;
        size_t                  size;
        enum dma_data_direction direction;
+       enum srp_iu_type        type;
 };
 
 #endif /* IB_SRP_H */
index 947d4afa25ca9abfe199ffea35f2233e944c9498..30e6195e19d4ddca5c25f6d26534e7a5e6c55785 100644 (file)
@@ -482,7 +482,7 @@ static s32 pm121_correct(s32 new_setpoint,
        new_min += correction->offset;
        new_min = (new_min >> 16) + min;
 
-       return max(new_setpoint, max(new_min, 0));
+       return max3(new_setpoint, new_min, 0);
 }
 
 static s32 pm121_connect(unsigned int control_id, s32 setpoint)
index 0b61792a278041bc6bedc9c04800457f93db1f6d..2129cdb115dc0e72caced734068deee43f10aa1e 100644 (file)
@@ -254,7 +254,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
         * Issue the synchronous I/O from a different thread
         * to avoid generic_make_request recursion.
         */
-       INIT_WORK_ON_STACK(&req.work, do_metadata);
+       INIT_WORK_ONSTACK(&req.work, do_metadata);
        queue_work(ps->metadata_wq, &req.work);
        flush_workqueue(ps->metadata_wq);
 
index 1f69743b12ec036239fa6fe05540cec600c5b4b9..5a74db75f66f364daaf8b850893858731ed7e7da 100644 (file)
@@ -4,7 +4,6 @@
 
 menuconfig MISC_DEVICES
        bool "Misc devices"
-       default y
        ---help---
          Say Y here to get to see options for device drivers from various
          different categories. This option alone does not add any kernel code.
@@ -24,7 +23,8 @@ config AD525X_DPOT
          AD5260, AD5262, AD5263, AD5290, AD5291, AD5292, AD5293,
          AD7376, AD8400, AD8402, AD8403, ADN2850, AD5241, AD5242,
          AD5243, AD5245, AD5246, AD5247, AD5248, AD5280, AD5282,
-         ADN2860, AD5273, AD5171, AD5170, AD5172, AD5173
+         ADN2860, AD5273, AD5171, AD5170, AD5172, AD5173, AD5270,
+         AD5271, AD5272, AD5274
          digital potentiometer chips.
 
          See Documentation/misc-devices/ad525x_dpot.txt for the
@@ -284,6 +284,16 @@ config SGI_GRU_DEBUG
        This option enables addition debugging code for the SGI GRU driver. If
        you are unsure, say N.
 
+config APDS9802ALS
+       tristate "Medfield Avago APDS9802 ALS Sensor module"
+       depends on I2C
+       help
+         If you say yes here you get support for the ALS APDS9802 ambient
+         light sensor.
+
+         This driver can also be built as a module.  If so, the module
+         will be called apds9802als.
+
 config ISL29003
        tristate "Intersil ISL29003 ambient light sensor"
        depends on I2C && SYSFS
@@ -294,6 +304,16 @@ config ISL29003
          This driver can also be built as a module.  If so, the module
          will be called isl29003.
 
+config ISL29020
+       tristate "Intersil ISL29020 ambient light sensor"
+       depends on I2C
+       help
+         If you say yes here you get support for the Intersil ISL29020
+         ambient light sensor.
+
+         This driver can also be built as a module.  If so, the module
+         will be called isl29020.
+
 config SENSORS_TSL2550
        tristate "Taos TSL2550 ambient light sensor"
        depends on I2C && SYSFS
@@ -314,6 +334,27 @@ config SENSORS_BH1780
          This driver can also be built as a module.  If so, the module
          will be called bh1780gli.
 
+config SENSORS_BH1770
+         tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor"
+         depends on I2C
+         ---help---
+           Say Y here if you want to build a driver for BH1770GLC (ROHM) or
+          SFH7770 (Osram) combined ambient light and proximity sensor chip.
+
+           To compile this driver as a module, choose M here: the
+           module will be called bh1770glc. If unsure, say N here.
+
+config SENSORS_APDS990X
+        tristate "APDS990X combined als and proximity sensors"
+        depends on I2C
+        default n
+        ---help---
+          Say Y here if you want to build a driver for Avago APDS990x
+          combined ambient light and proximity sensor chip.
+
+          To compile this driver as a module, choose M here: the
+          module will be called apds990x. If unsure, say N here.
+
 config HMC6352
        tristate "Honeywell HMC6352 compass"
        depends on I2C
index 9f2986b4da2fd7031c071ec3ac0a2ce636ffe4b4..4be5c6fc5ef4a47ae8a16c213492beba700321bb 100644 (file)
@@ -16,6 +16,8 @@ obj-$(CONFIG_TIFM_CORE)               += tifm_core.o
 obj-$(CONFIG_TIFM_7XX1)        += tifm_7xx1.o
 obj-$(CONFIG_PHANTOM)          += phantom.o
 obj-$(CONFIG_SENSORS_BH1780)   += bh1780gli.o
+obj-$(CONFIG_SENSORS_BH1770)   += bh1770glc.o
+obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
 obj-$(CONFIG_SGI_IOC4)         += ioc4.o
 obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
 obj-$(CONFIG_KGDB_TESTS)       += kgdbts.o
@@ -23,7 +25,9 @@ obj-$(CONFIG_SGI_XP)          += sgi-xp/
 obj-$(CONFIG_SGI_GRU)          += sgi-gru/
 obj-$(CONFIG_CS5535_MFGPT)     += cs5535-mfgpt.o
 obj-$(CONFIG_HP_ILO)           += hpilo.o
+obj-$(CONFIG_APDS9802ALS)      += apds9802als.o
 obj-$(CONFIG_ISL29003)         += isl29003.o
+obj-$(CONFIG_ISL29020)         += isl29020.o
 obj-$(CONFIG_SENSORS_TSL2550)  += tsl2550.o
 obj-$(CONFIG_EP93XX_PWM)       += ep93xx_pwm.o
 obj-$(CONFIG_DS1682)           += ds1682.o
index 374352af79791f8fed39281789169519ce764ceb..4ff73c215746688357ca5406e332b3ed320a5985 100644 (file)
@@ -102,6 +102,8 @@ static const struct i2c_device_id ad_dpot_id[] = {
        {"ad5170", AD5170_ID},
        {"ad5172", AD5172_ID},
        {"ad5173", AD5173_ID},
+       {"ad5272", AD5272_ID},
+       {"ad5274", AD5274_ID},
        {}
 };
 MODULE_DEVICE_TABLE(i2c, ad_dpot_id);
index b8c6df9c8437213603cd997c7138f41fe76b94c6..7f9a55afe05d77b224c88f87ac38d367788d643f 100644 (file)
@@ -38,6 +38,8 @@ static const struct ad_dpot_id ad_dpot_spi_devlist[] = {
        {.name = "ad8402", .devid = AD8402_ID},
        {.name = "ad8403", .devid = AD8403_ID},
        {.name = "adn2850", .devid = ADN2850_ID},
+       {.name = "ad5270", .devid = AD5270_ID},
+       {.name = "ad5271", .devid = AD5271_ID},
        {}
 };
 
@@ -53,13 +55,13 @@ static int write8(void *client, u8 val)
 static int write16(void *client, u8 reg, u8 val)
 {
        u8 data[2] = {reg, val};
-       return spi_write(client, data, 1);
+       return spi_write(client, data, 2);
 }
 
 static int write24(void *client, u8 reg, u16 val)
 {
        u8 data[3] = {reg, val >> 8, val};
-       return spi_write(client, data, 1);
+       return spi_write(client, data, 3);
 }
 
 static int read8(void *client)
index 5e6fa8449e8b7bba11c2aca88fce59f34b80277e..7cb911028d094ca4416382806191a5e3e4b68e82 100644 (file)
@@ -29,9 +29,9 @@
  * AD5262              2               256             20, 50, 200
  * AD5263              4               256             20, 50, 200
  * AD5290              1               256             10, 50, 100
- * AD5291              1               256             20
- * AD5292              1               1024            20
- * AD5293              1               1024            20
+ * AD5291              1               256             20, 50, 100  (20-TP)
+ * AD5292              1               1024            20, 50, 100  (20-TP)
+ * AD5293              1               1024            20, 50, 100
  * AD7376              1               128             10, 50, 100, 1M
  * AD8400              1               256             1, 10, 50, 100
  * AD8402              2               256             1, 10, 50, 100
  * AD5170              1               256             2.5, 10, 50, 100 (OTP)
  * AD5172              2               256             2.5, 10, 50, 100 (OTP)
  * AD5173              2               256             2.5, 10, 50, 100 (OTP)
+ * AD5270              1               1024            20, 50, 100 (50-TP)
+ * AD5271              1               256             20, 50, 100 (50-TP)
+ * AD5272              1               1024            20, 50, 100 (50-TP)
+ * AD5274              1               256             20, 50, 100 (50-TP)
  *
  * See Documentation/misc-devices/ad525x_dpot.txt for more info.
  *
@@ -126,18 +130,38 @@ static inline int dpot_write_r8d16(struct dpot_data *dpot, u8 reg, u16 val)
 static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
 {
        unsigned ctrl = 0;
+       int value;
 
        if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) {
 
                if (dpot->feat & F_RDACS_WONLY)
                        return dpot->rdac_cache[reg & DPOT_RDAC_MASK];
-
                if (dpot->uid == DPOT_UID(AD5291_ID) ||
                        dpot->uid == DPOT_UID(AD5292_ID) ||
-                       dpot->uid == DPOT_UID(AD5293_ID))
-                       return dpot_read_r8d8(dpot,
+                       dpot->uid == DPOT_UID(AD5293_ID)) {
+
+                       value = dpot_read_r8d8(dpot,
                                DPOT_AD5291_READ_RDAC << 2);
 
+                       if (dpot->uid == DPOT_UID(AD5291_ID))
+                               value = value >> 2;
+
+                       return value;
+               } else if (dpot->uid == DPOT_UID(AD5270_ID) ||
+                       dpot->uid == DPOT_UID(AD5271_ID)) {
+
+                       value = dpot_read_r8d8(dpot,
+                               DPOT_AD5270_1_2_4_READ_RDAC << 2);
+
+                       if (value < 0)
+                               return value;
+
+                       if (dpot->uid == DPOT_UID(AD5271_ID))
+                               value = value >> 2;
+
+                       return value;
+               }
+
                ctrl = DPOT_SPI_READ_RDAC;
        } else if (reg & DPOT_ADDR_EEPROM) {
                ctrl = DPOT_SPI_READ_EEPROM;
@@ -153,6 +177,7 @@ static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
 
 static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
 {
+       int value;
        unsigned ctrl = 0;
        switch (dpot->uid) {
        case DPOT_UID(AD5246_ID):
@@ -166,7 +191,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
        case DPOT_UID(AD5280_ID):
        case DPOT_UID(AD5282_ID):
                ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
-                       0 : DPOT_AD5291_RDAC_AB;
+                       0 : DPOT_AD5282_RDAC_AB;
                return dpot_read_r8d8(dpot, ctrl);
        case DPOT_UID(AD5170_ID):
        case DPOT_UID(AD5171_ID):
@@ -175,8 +200,27 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
        case DPOT_UID(AD5172_ID):
        case DPOT_UID(AD5173_ID):
                ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
-                       0 : DPOT_AD5272_3_A0;
+                       0 : DPOT_AD5172_3_A0;
                return dpot_read_r8d8(dpot, ctrl);
+       case DPOT_UID(AD5272_ID):
+       case DPOT_UID(AD5274_ID):
+                       dpot_write_r8d8(dpot,
+                               (DPOT_AD5270_1_2_4_READ_RDAC << 2), 0);
+
+                       value = dpot_read_r8d16(dpot,
+                               DPOT_AD5270_1_2_4_RDAC << 2);
+
+                       if (value < 0)
+                               return value;
+                       /*
+                        * AD5272/AD5274 returns high byte first, however
+                        * underling smbus expects low byte first.
+                        */
+                       value = swab16(value);
+
+                       if (dpot->uid == DPOT_UID(AD5271_ID))
+                               value = value >> 2;
+               return value;
        default:
                if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256))
                        return dpot_read_r8d16(dpot, (reg & 0xF8) |
@@ -198,7 +242,7 @@ static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
 {
        unsigned val = 0;
 
-       if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) {
+       if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD | DPOT_ADDR_OTP))) {
                if (dpot->feat & F_RDACS_WONLY)
                        dpot->rdac_cache[reg & DPOT_RDAC_MASK] = value;
 
@@ -219,11 +263,30 @@ static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
                } else {
                        if (dpot->uid == DPOT_UID(AD5291_ID) ||
                                dpot->uid == DPOT_UID(AD5292_ID) ||
-                               dpot->uid == DPOT_UID(AD5293_ID))
+                               dpot->uid == DPOT_UID(AD5293_ID)) {
+
+                               dpot_write_r8d8(dpot, DPOT_AD5291_CTRLREG << 2,
+                                               DPOT_AD5291_UNLOCK_CMD);
+
+                               if (dpot->uid == DPOT_UID(AD5291_ID))
+                                       value = value << 2;
+
                                return dpot_write_r8d8(dpot,
                                        (DPOT_AD5291_RDAC << 2) |
                                        (value >> 8), value & 0xFF);
+                       } else if (dpot->uid == DPOT_UID(AD5270_ID) ||
+                               dpot->uid == DPOT_UID(AD5271_ID)) {
+                               dpot_write_r8d8(dpot,
+                                               DPOT_AD5270_1_2_4_CTRLREG << 2,
+                                               DPOT_AD5270_1_2_4_UNLOCK_CMD);
+
+                               if (dpot->uid == DPOT_UID(AD5271_ID))
+                                       value = value << 2;
 
+                               return dpot_write_r8d8(dpot,
+                                       (DPOT_AD5270_1_2_4_RDAC << 2) |
+                                       (value >> 8), value & 0xFF);
+                       }
                        val = DPOT_SPI_RDAC | (reg & DPOT_RDAC_MASK);
                }
        } else if (reg & DPOT_ADDR_EEPROM) {
@@ -243,6 +306,16 @@ static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
                        val = DPOT_SPI_INC_ALL;
                        break;
                }
+       } else if (reg & DPOT_ADDR_OTP) {
+               if (dpot->uid == DPOT_UID(AD5291_ID) ||
+                       dpot->uid == DPOT_UID(AD5292_ID)) {
+                       return dpot_write_r8d8(dpot,
+                               DPOT_AD5291_STORE_XTPM << 2, 0);
+               } else if (dpot->uid == DPOT_UID(AD5270_ID) ||
+                       dpot->uid == DPOT_UID(AD5271_ID)) {
+                       return dpot_write_r8d8(dpot,
+                               DPOT_AD5270_1_2_4_STORE_XTPM << 2, 0);
+               }
        } else
                BUG();
 
@@ -273,7 +346,7 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
        case DPOT_UID(AD5280_ID):
        case DPOT_UID(AD5282_ID):
                ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
-                       0 : DPOT_AD5291_RDAC_AB;
+                       0 : DPOT_AD5282_RDAC_AB;
                return dpot_write_r8d8(dpot, ctrl, value);
                break;
        case DPOT_UID(AD5171_ID):
@@ -289,12 +362,12 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
        case DPOT_UID(AD5172_ID):
        case DPOT_UID(AD5173_ID):
                ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
-                       0 : DPOT_AD5272_3_A0;
+                       0 : DPOT_AD5172_3_A0;
                if (reg & DPOT_ADDR_OTP) {
                        tmp = dpot_read_r8d16(dpot, ctrl);
                        if (tmp >> 14) /* Ready to Program? */
                                return -EFAULT;
-                       ctrl |= DPOT_AD5270_2_3_FUSE;
+                       ctrl |= DPOT_AD5170_2_3_FUSE;
                }
                return dpot_write_r8d8(dpot, ctrl, value);
                break;
@@ -303,10 +376,25 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
                        tmp = dpot_read_r8d16(dpot, tmp);
                        if (tmp >> 14) /* Ready to Program? */
                                return -EFAULT;
-                       ctrl = DPOT_AD5270_2_3_FUSE;
+                       ctrl = DPOT_AD5170_2_3_FUSE;
                }
                return dpot_write_r8d8(dpot, ctrl, value);
                break;
+       case DPOT_UID(AD5272_ID):
+       case DPOT_UID(AD5274_ID):
+               dpot_write_r8d8(dpot, DPOT_AD5270_1_2_4_CTRLREG << 2,
+                               DPOT_AD5270_1_2_4_UNLOCK_CMD);
+
+               if (reg & DPOT_ADDR_OTP)
+                       return dpot_write_r8d8(dpot,
+                                       DPOT_AD5270_1_2_4_STORE_XTPM << 2, 0);
+
+               if (dpot->uid == DPOT_UID(AD5274_ID))
+                       value = value << 2;
+
+               return dpot_write_r8d8(dpot, (DPOT_AD5270_1_2_4_RDAC << 2) |
+                                      (value >> 8), value & 0xFF);
+               break;
        default:
                if (reg & DPOT_ADDR_CMD)
                        return dpot_write_d8(dpot, reg);
@@ -320,7 +408,6 @@ static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
        }
 }
 
-
 static s32 dpot_write(struct dpot_data *dpot, u8 reg, u16 value)
 {
        if (dpot->feat & F_SPI)
index 78b89fd2e2fd48d20a22e2bf40292e947b388a24..a662f5987b6892591008291e3b5b05506918ed9e 100644 (file)
@@ -47,9 +47,9 @@ enum dpot_devid {
        AD5258_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 6, 0), /* I2C */
        AD5259_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 8, 1),
        AD5251_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
-                       BRDAC0 | BRDAC3, 6, 2),
+                       BRDAC1 | BRDAC3, 6, 2),
        AD5252_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
-                       BRDAC0 | BRDAC3, 8, 3),
+                       BRDAC1 | BRDAC3, 8, 3),
        AD5253_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
                        BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 4),
        AD5254_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
@@ -93,8 +93,10 @@ enum dpot_devid {
                        BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 23),
        AD5290_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
                        BRDAC0, 8, 24),
-       AD5291_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 8, 25),
-       AD5292_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 26),
+       AD5291_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT | F_CMD_OTP,
+                       BRDAC0, 8, 25),
+       AD5292_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT | F_CMD_OTP,
+                       BRDAC0, 10, 26),
        AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27),
        AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
                        BRDAC0, 7, 28),
@@ -122,6 +124,12 @@ enum dpot_devid {
        AD5170_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 8, 45),
        AD5172_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 46),
        AD5173_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 47),
+       AD5270_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP | F_SPI_16BIT,
+                       BRDAC0, 10, 48),
+       AD5271_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP | F_SPI_16BIT,
+                       BRDAC0, 8, 49),
+       AD5272_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 10, 50),
+       AD5274_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 8, 51),
 };
 
 #define DPOT_RDAC0             0
@@ -165,15 +173,24 @@ enum dpot_devid {
 /* AD5291/2/3 use special commands */
 #define DPOT_AD5291_RDAC       0x01
 #define DPOT_AD5291_READ_RDAC  0x02
+#define DPOT_AD5291_STORE_XTPM 0x03
+#define DPOT_AD5291_CTRLREG    0x06
+#define DPOT_AD5291_UNLOCK_CMD 0x03
 
-/* AD524x use special commands */
-#define DPOT_AD5291_RDAC_AB    0x80
+/* AD5270/1/2/4 use special commands */
+#define DPOT_AD5270_1_2_4_RDAC         0x01
+#define DPOT_AD5270_1_2_4_READ_RDAC    0x02
+#define DPOT_AD5270_1_2_4_STORE_XTPM   0x03
+#define DPOT_AD5270_1_2_4_CTRLREG      0x07
+#define DPOT_AD5270_1_2_4_UNLOCK_CMD   0x03
+
+#define DPOT_AD5282_RDAC_AB    0x80
 
 #define DPOT_AD5273_FUSE       0x80
-#define DPOT_AD5270_2_3_FUSE   0x20
-#define DPOT_AD5270_2_3_OW     0x08
-#define DPOT_AD5272_3_A0       0x08
-#define DPOT_AD5270_2FUSE      0x80
+#define DPOT_AD5170_2_3_FUSE   0x20
+#define DPOT_AD5170_2_3_OW     0x08
+#define DPOT_AD5172_3_A0       0x08
+#define DPOT_AD5170_2FUSE      0x80
 
 struct dpot_data;
 
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
new file mode 100644 (file)
index 0000000..f9b91ba
--- /dev/null
@@ -0,0 +1,347 @@
+/*
+ * apds9802als.c - apds9802  ALS Driver
+ *
+ * Copyright (C) 2009 Intel Corp
+ *
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+#include <linux/pm_runtime.h>
+
+#define ALS_MIN_RANGE_VAL 1
+#define ALS_MAX_RANGE_VAL 2
+#define POWER_STA_ENABLE 1
+#define POWER_STA_DISABLE 0
+
+#define DRIVER_NAME "apds9802als"
+
+struct als_data {
+       struct mutex mutex;
+};
+
+static ssize_t als_sensing_range_show(struct device *dev,
+                       struct device_attribute *attr,  char *buf)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       int  val;
+
+       val = i2c_smbus_read_byte_data(client, 0x81);
+       if (val < 0)
+               return val;
+       if (val & 1)
+               return sprintf(buf, "4095\n");
+       else
+               return sprintf(buf, "65535\n");
+}
+
+static int als_wait_for_data_ready(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       int ret;
+       int retry = 10;
+
+       do {
+               msleep(30);
+               ret = i2c_smbus_read_byte_data(client, 0x86);
+       } while (!(ret & 0x80) && retry--);
+
+       if (!retry) {
+               dev_warn(dev, "timeout waiting for data ready\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+static ssize_t als_lux0_input_data_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct als_data *data = i2c_get_clientdata(client);
+       int ret_val;
+       int temp;
+
+       /* Protect against parallel reads */
+       pm_runtime_get_sync(dev);
+       mutex_lock(&data->mutex);
+
+       /* clear EOC interrupt status */
+       i2c_smbus_write_byte(client, 0x40);
+       /* start measurement */
+       temp = i2c_smbus_read_byte_data(client, 0x81);
+       i2c_smbus_write_byte_data(client, 0x81, temp | 0x08);
+
+       ret_val = als_wait_for_data_ready(dev);
+       if (ret_val < 0)
+               goto failed;
+
+       temp = i2c_smbus_read_byte_data(client, 0x8C); /* LSB data */
+       if (temp < 0) {
+               ret_val = temp;
+               goto failed;
+       }
+       ret_val = i2c_smbus_read_byte_data(client, 0x8D); /* MSB data */
+       if (ret_val < 0)
+               goto failed;
+
+       mutex_unlock(&data->mutex);
+       pm_runtime_put_sync(dev);
+
+       temp = (ret_val << 8) | temp;
+       return sprintf(buf, "%d\n", temp);
+failed:
+       mutex_unlock(&data->mutex);
+       pm_runtime_put_sync(dev);
+       return ret_val;
+}
+
+static ssize_t als_sensing_range_store(struct device *dev,
+               struct device_attribute *attr, const  char *buf, size_t count)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct als_data *data = i2c_get_clientdata(client);
+       unsigned int ret_val;
+       unsigned long val;
+
+       if (strict_strtoul(buf, 10, &val))
+               return -EINVAL;
+
+       if (val < 4096)
+               val = 1;
+       else if (val < 65536)
+               val = 2;
+       else
+               return -ERANGE;
+
+       pm_runtime_get_sync(dev);
+
+       /* Make sure nobody else reads/modifies/writes 0x81 while we
+          are active */
+       mutex_lock(&data->mutex);
+
+       ret_val = i2c_smbus_read_byte_data(client, 0x81);
+       if (ret_val < 0)
+               goto fail;
+
+       /* Reset the bits before setting them */
+       ret_val = ret_val & 0xFA;
+
+       if (val == 1) /* Setting detection range up to 4k LUX */
+               ret_val = (ret_val | 0x01);
+       else /* Setting detection range up to 64k LUX*/
+               ret_val = (ret_val | 0x00);
+
+       ret_val = i2c_smbus_write_byte_data(client, 0x81, ret_val);
+
+       if (ret_val >= 0) {
+               /* All OK */
+               mutex_unlock(&data->mutex);
+               pm_runtime_put_sync(dev);
+               return count;
+       }
+fail:
+       mutex_unlock(&data->mutex);
+       pm_runtime_put_sync(dev);
+       return ret_val;
+}
+
+static int als_set_power_state(struct i2c_client *client, bool on_off)
+{
+       int ret_val;
+       struct als_data *data = i2c_get_clientdata(client);
+
+       mutex_lock(&data->mutex);
+       ret_val = i2c_smbus_read_byte_data(client, 0x80);
+       if (ret_val < 0)
+               goto fail;
+       if (on_off)
+               ret_val = ret_val | 0x01;
+       else
+               ret_val = ret_val & 0xFE;
+       ret_val = i2c_smbus_write_byte_data(client, 0x80, ret_val);
+fail:
+       mutex_unlock(&data->mutex);
+       return ret_val;
+}
+
+static DEVICE_ATTR(lux0_sensor_range, S_IRUGO | S_IWUSR,
+       als_sensing_range_show, als_sensing_range_store);
+static DEVICE_ATTR(lux0_input, S_IRUGO, als_lux0_input_data_show, NULL);
+
+static struct attribute *mid_att_als[] = {
+       &dev_attr_lux0_sensor_range.attr,
+       &dev_attr_lux0_input.attr,
+       NULL
+};
+
+static struct attribute_group m_als_gr = {
+       .name = "apds9802als",
+       .attrs = mid_att_als
+};
+
+static int als_set_default_config(struct i2c_client *client)
+{
+       int ret_val;
+       /* Write the command and then switch on */
+       ret_val = i2c_smbus_write_byte_data(client, 0x80, 0x01);
+       if (ret_val < 0) {
+               dev_err(&client->dev, "failed default switch on write\n");
+               return ret_val;
+       }
+       /* detection range: 1~64K Lux, maunal measurement */
+       ret_val = i2c_smbus_write_byte_data(client, 0x81, 0x08);
+       if (ret_val < 0)
+               dev_err(&client->dev, "failed default LUX on write\n");
+
+       /*  We always get 0 for the 1st measurement after system power on,
+        *  so make sure it is finished before user asks for data.
+        */
+       als_wait_for_data_ready(&client->dev);
+
+       return ret_val;
+}
+
+static int apds9802als_probe(struct i2c_client *client,
+                            const struct i2c_device_id *id)
+{
+       int res;
+       struct als_data *data;
+
+       data = kzalloc(sizeof(struct als_data), GFP_KERNEL);
+       if (data == NULL) {
+               dev_err(&client->dev, "Memory allocation failed\n");
+               return -ENOMEM;
+       }
+       i2c_set_clientdata(client, data);
+       res = sysfs_create_group(&client->dev.kobj, &m_als_gr);
+       if (res) {
+               dev_err(&client->dev, "device create file failed\n");
+               goto als_error1;
+       }
+       dev_info(&client->dev, "ALS chip found\n");
+       als_set_default_config(client);
+       mutex_init(&data->mutex);
+
+       pm_runtime_enable(&client->dev);
+       pm_runtime_get(&client->dev);
+       pm_runtime_put(&client->dev);
+
+       return res;
+als_error1:
+       i2c_set_clientdata(client, NULL);
+       kfree(data);
+       return res;
+}
+
+static int apds9802als_remove(struct i2c_client *client)
+{
+       struct als_data *data = i2c_get_clientdata(client);
+
+       als_set_power_state(client, false);
+       sysfs_remove_group(&client->dev.kobj, &m_als_gr);
+       kfree(data);
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int apds9802als_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+       als_set_power_state(client, false);
+       return 0;
+}
+
+static int apds9802als_resume(struct i2c_client *client)
+{
+       als_set_default_config(client);
+
+       pm_runtime_get(&client->dev);
+       pm_runtime_put(&client->dev);
+       return 0;
+}
+
+static int apds9802als_runtime_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+
+       als_set_power_state(client, false);
+       return 0;
+}
+
+static int apds9802als_runtime_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+
+       als_set_power_state(client, true);
+       return 0;
+}
+
+static const struct dev_pm_ops apds9802als_pm_ops = {
+       .runtime_suspend = apds9802als_runtime_suspend,
+       .runtime_resume = apds9802als_runtime_resume,
+};
+
+#define APDS9802ALS_PM_OPS (&apds9802als_pm_ops)
+
+#else  /* CONFIG_PM */
+#define apds9802als_suspend NULL
+#define apds9802als_resume NULL
+#define APDS9802ALS_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static struct i2c_device_id apds9802als_id[] = {
+       { DRIVER_NAME, 0 },
+       { }
+};
+
+MODULE_DEVICE_TABLE(i2c, apds9802als_id);
+
+static struct i2c_driver apds9802als_driver = {
+       .driver = {
+               .name = DRIVER_NAME,
+               .pm = APDS9802ALS_PM_OPS,
+       },
+       .probe = apds9802als_probe,
+       .remove = apds9802als_remove,
+       .suspend = apds9802als_suspend,
+       .resume = apds9802als_resume,
+       .id_table = apds9802als_id,
+};
+
+static int __init sensor_apds9802als_init(void)
+{
+       return i2c_add_driver(&apds9802als_driver);
+}
+
+static void  __exit sensor_apds9802als_exit(void)
+{
+       i2c_del_driver(&apds9802als_driver);
+}
+module_init(sensor_apds9802als_init);
+module_exit(sensor_apds9802als_exit);
+
+MODULE_AUTHOR("Anantha Narayanan <Anantha.Narayanan@intel.com");
+MODULE_DESCRIPTION("Avago apds9802als ALS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
new file mode 100644 (file)
index 0000000..200311f
--- /dev/null
@@ -0,0 +1,1295 @@
+/*
+ * This file is part of the APDS990x sensor driver.
+ * Chip is combined proximity and ambient light sensor.
+ *
+ * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/i2c/apds990x.h>
+
+/* Register map */
+#define APDS990X_ENABLE         0x00 /* Enable of states and interrupts */
+#define APDS990X_ATIME  0x01 /* ALS ADC time  */
+#define APDS990X_PTIME  0x02 /* Proximity ADC time  */
+#define APDS990X_WTIME  0x03 /* Wait time  */
+#define APDS990X_AILTL  0x04 /* ALS interrupt low threshold low byte */
+#define APDS990X_AILTH  0x05 /* ALS interrupt low threshold hi byte */
+#define APDS990X_AIHTL  0x06 /* ALS interrupt hi threshold low byte */
+#define APDS990X_AIHTH  0x07 /* ALS interrupt hi threshold hi byte */
+#define APDS990X_PILTL  0x08 /* Proximity interrupt low threshold low byte */
+#define APDS990X_PILTH  0x09 /* Proximity interrupt low threshold hi byte */
+#define APDS990X_PIHTL  0x0a /* Proximity interrupt hi threshold low byte */
+#define APDS990X_PIHTH  0x0b /* Proximity interrupt hi threshold hi byte */
+#define APDS990X_PERS   0x0c /* Interrupt persistence filters */
+#define APDS990X_CONFIG         0x0d /* Configuration */
+#define APDS990X_PPCOUNT 0x0e /* Proximity pulse count */
+#define APDS990X_CONTROL 0x0f /* Gain control register */
+#define APDS990X_REV    0x11 /* Revision Number */
+#define APDS990X_ID     0x12 /* Device ID */
+#define APDS990X_STATUS         0x13 /* Device status */
+#define APDS990X_CDATAL         0x14 /* Clear ADC low data register */
+#define APDS990X_CDATAH         0x15 /* Clear ADC high data register */
+#define APDS990X_IRDATAL 0x16 /* IR ADC low data register */
+#define APDS990X_IRDATAH 0x17 /* IR ADC high data register */
+#define APDS990X_PDATAL         0x18 /* Proximity ADC low data register */
+#define APDS990X_PDATAH         0x19 /* Proximity ADC high data register */
+
+/* Control */
+#define APDS990X_MAX_AGAIN     3
+
+/* Enable register */
+#define APDS990X_EN_PIEN       (0x1 << 5)
+#define APDS990X_EN_AIEN       (0x1 << 4)
+#define APDS990X_EN_WEN                (0x1 << 3)
+#define APDS990X_EN_PEN                (0x1 << 2)
+#define APDS990X_EN_AEN                (0x1 << 1)
+#define APDS990X_EN_PON                (0x1 << 0)
+#define APDS990X_EN_DISABLE_ALL 0
+
+/* Status register */
+#define APDS990X_ST_PINT       (0x1 << 5)
+#define APDS990X_ST_AINT       (0x1 << 4)
+
+/* I2C access types */
+#define APDS990x_CMD_TYPE_MASK (0x03 << 5)
+#define APDS990x_CMD_TYPE_RB   (0x00 << 5) /* Repeated byte */
+#define APDS990x_CMD_TYPE_INC  (0x01 << 5) /* Auto increment */
+#define APDS990x_CMD_TYPE_SPE  (0x03 << 5) /* Special function */
+
+#define APDS990x_ADDR_SHIFT    0
+#define APDS990x_CMD           0x80
+
+/* Interrupt ack commands */
+#define APDS990X_INT_ACK_ALS   0x6
+#define APDS990X_INT_ACK_PS    0x5
+#define APDS990X_INT_ACK_BOTH  0x7
+
+/* ptime */
+#define APDS990X_PTIME_DEFAULT 0xff /* Recommended conversion time 2.7ms*/
+
+/* wtime */
+#define APDS990X_WTIME_DEFAULT 0xee /* ~50ms wait time */
+
+#define APDS990X_TIME_TO_ADC   1024 /* One timetick as ADC count value */
+
+/* Persistence */
+#define APDS990X_APERS_SHIFT   0
+#define APDS990X_PPERS_SHIFT   4
+
+/* Supported ID:s */
+#define APDS990X_ID_0          0x0
+#define APDS990X_ID_4          0x4
+#define APDS990X_ID_29         0x29
+
+/* pgain and pdiode settings */
+#define APDS_PGAIN_1X         0x0
+#define APDS_PDIODE_IR        0x2
+
+#define APDS990X_LUX_OUTPUT_SCALE 10
+
+/* Reverse chip factors for threshold calculation */
+struct reverse_factors {
+       u32 afactor;
+       int cf1;
+       int irf1;
+       int cf2;
+       int irf2;
+};
+
+struct apds990x_chip {
+       struct apds990x_platform_data   *pdata;
+       struct i2c_client               *client;
+       struct mutex                    mutex; /* avoid parallel access */
+       struct regulator_bulk_data      regs[2];
+       wait_queue_head_t               wait;
+
+       int     prox_en;
+       bool    prox_continuous_mode;
+       bool    lux_wait_fresh_res;
+
+       /* Chip parameters */
+       struct  apds990x_chip_factors   cf;
+       struct  reverse_factors         rcf;
+       u16     atime;          /* als integration time */
+       u16     arate;          /* als reporting rate */
+       u16     a_max_result;   /* Max possible ADC value with current atime */
+       u8      again_meas;     /* Gain used in last measurement */
+       u8      again_next;     /* Next calculated gain */
+       u8      pgain;
+       u8      pdiode;
+       u8      pdrive;
+       u8      lux_persistence;
+       u8      prox_persistence;
+
+       u32     lux_raw;
+       u32     lux;
+       u16     lux_clear;
+       u16     lux_ir;
+       u16     lux_calib;
+       u32     lux_thres_hi;
+       u32     lux_thres_lo;
+
+       u32     prox_thres;
+       u16     prox_data;
+       u16     prox_calib;
+
+       char    chipname[10];
+       u8      revision;
+};
+
+#define APDS_CALIB_SCALER              8192
+#define APDS_LUX_NEUTRAL_CALIB_VALUE   (1 * APDS_CALIB_SCALER)
+#define APDS_PROX_NEUTRAL_CALIB_VALUE  (1 * APDS_CALIB_SCALER)
+
+#define APDS_PROX_DEF_THRES            600
+#define APDS_PROX_HYSTERESIS           50
+#define APDS_LUX_DEF_THRES_HI          101
+#define APDS_LUX_DEF_THRES_LO          100
+#define APDS_DEFAULT_PROX_PERS         1
+
+#define APDS_TIMEOUT                   2000
+#define APDS_STARTUP_DELAY             25000 /* us */
+#define APDS_RANGE                     65535
+#define APDS_PROX_RANGE                        1023
+#define APDS_LUX_GAIN_LO_LIMIT         100
+#define APDS_LUX_GAIN_LO_LIMIT_STRICT  25
+
+#define TIMESTEP                       87 /* 2.7ms is about 87 / 32 */
+#define TIME_STEP_SCALER               32
+
+#define APDS_LUX_AVERAGING_TIME                50 /* tolerates 50/60Hz ripple */
+#define APDS_LUX_DEFAULT_RATE          200
+
+static const u8 again[]        = {1, 8, 16, 120}; /* ALS gain steps */
+static const u8 ir_currents[]  = {100, 50, 25, 12}; /* IRled currents in mA */
+
+/* Following two tables must match i.e 10Hz rate means 1 as persistence value */
+static const u16 arates_hz[] = {10, 5, 2, 1};
+static const u8 apersis[] = {1, 2, 4, 5};
+
+/* Regulators */
+static const char reg_vcc[] = "Vdd";
+static const char reg_vled[] = "Vled";
+
+static int apds990x_read_byte(struct apds990x_chip *chip, u8 reg, u8 *data)
+{
+       struct i2c_client *client = chip->client;
+       s32 ret;
+
+       reg &= ~APDS990x_CMD_TYPE_MASK;
+       reg |= APDS990x_CMD | APDS990x_CMD_TYPE_RB;
+
+       ret = i2c_smbus_read_byte_data(client, reg);
+       *data = ret;
+       return (int)ret;
+}
+
+static int apds990x_read_word(struct apds990x_chip *chip, u8 reg, u16 *data)
+{
+       struct i2c_client *client = chip->client;
+       s32 ret;
+
+       reg &= ~APDS990x_CMD_TYPE_MASK;
+       reg |= APDS990x_CMD | APDS990x_CMD_TYPE_INC;
+
+       ret = i2c_smbus_read_word_data(client, reg);
+       *data = ret;
+       return (int)ret;
+}
+
+static int apds990x_write_byte(struct apds990x_chip *chip, u8 reg, u8 data)
+{
+       struct i2c_client *client = chip->client;
+       s32 ret;
+
+       reg &= ~APDS990x_CMD_TYPE_MASK;
+       reg |= APDS990x_CMD | APDS990x_CMD_TYPE_RB;
+
+       ret = i2c_smbus_write_byte_data(client, reg, data);
+       return (int)ret;
+}
+
+static int apds990x_write_word(struct apds990x_chip *chip, u8 reg, u16 data)
+{
+       struct i2c_client *client = chip->client;
+       s32 ret;
+
+       reg &= ~APDS990x_CMD_TYPE_MASK;
+       reg |= APDS990x_CMD | APDS990x_CMD_TYPE_INC;
+
+       ret = i2c_smbus_write_word_data(client, reg, data);
+       return (int)ret;
+}
+
+static int apds990x_mode_on(struct apds990x_chip *chip)
+{
+       /* ALS is mandatory, proximity optional */
+       u8 reg = APDS990X_EN_AIEN | APDS990X_EN_PON | APDS990X_EN_AEN |
+               APDS990X_EN_WEN;
+
+       if (chip->prox_en)
+               reg |= APDS990X_EN_PIEN | APDS990X_EN_PEN;
+
+       return apds990x_write_byte(chip, APDS990X_ENABLE, reg);
+}
+
+static u16 apds990x_lux_to_threshold(struct apds990x_chip *chip, u32 lux)
+{
+       u32 thres;
+       u32 cpl;
+       u32 ir;
+
+       if (lux == 0)
+               return 0;
+       else if (lux == APDS_RANGE)
+               return APDS_RANGE;
+
+       /*
+        * Reported LUX value is a combination of the IR and CLEAR channel
+        * values. However, interrupt threshold is only for clear channel.
+        * This function approximates needed HW threshold value for a given
+        * LUX value in the current lightning type.
+        * IR level compared to visible light varies heavily depending on the
+        * source of the light
+        *
+        * Calculate threshold value for the next measurement period.
+        * Math: threshold = lux * cpl where
+        * cpl = atime * again / (glass_attenuation * device_factor)
+        * (count-per-lux)
+        *
+        * First remove calibration. Division by four is to avoid overflow
+        */
+       lux = lux * (APDS_CALIB_SCALER / 4) / (chip->lux_calib / 4);
+
+       /* Multiplication by 64 is to increase accuracy */
+       cpl = ((u32)chip->atime * (u32)again[chip->again_next] *
+               APDS_PARAM_SCALE * 64) / (chip->cf.ga * chip->cf.df);
+
+       thres = lux * cpl / 64;
+       /*
+        * Convert IR light from the latest result to match with
+        * new gain step. This helps to adapt with the current
+        * source of light.
+        */
+       ir = (u32)chip->lux_ir * (u32)again[chip->again_next] /
+               (u32)again[chip->again_meas];
+
+       /*
+        * Compensate count with IR light impact
+        * IAC1 > IAC2 (see apds990x_get_lux for formulas)
+        */
+       if (chip->lux_clear * APDS_PARAM_SCALE >=
+               chip->rcf.afactor * chip->lux_ir)
+               thres = (chip->rcf.cf1 * thres + chip->rcf.irf1 * ir) /
+                       APDS_PARAM_SCALE;
+       else
+               thres = (chip->rcf.cf2 * thres + chip->rcf.irf2 * ir) /
+                       APDS_PARAM_SCALE;
+
+       if (thres >= chip->a_max_result)
+               thres = chip->a_max_result - 1;
+       return thres;
+}
+
+static inline int apds990x_set_atime(struct apds990x_chip *chip, u32 time_ms)
+{
+       u8 reg_value;
+
+       chip->atime = time_ms;
+       /* Formula is specified in the data sheet */
+       reg_value = 256 - ((time_ms * TIME_STEP_SCALER) / TIMESTEP);
+       /* Calculate max ADC value for given integration time */
+       chip->a_max_result = (u16)(256 - reg_value) * APDS990X_TIME_TO_ADC;
+       return apds990x_write_byte(chip, APDS990X_ATIME, reg_value);
+}
+
+/* Called always with mutex locked */
+static int apds990x_refresh_pthres(struct apds990x_chip *chip, int data)
+{
+       int ret, lo, hi;
+
+       /* If the chip is not in use, don't try to access it */
+       if (pm_runtime_suspended(&chip->client->dev))
+               return 0;
+
+       if (data < chip->prox_thres) {
+               lo = 0;
+               hi = chip->prox_thres;
+       } else {
+               lo = chip->prox_thres - APDS_PROX_HYSTERESIS;
+               if (chip->prox_continuous_mode)
+                       hi = chip->prox_thres;
+               else
+                       hi = APDS_RANGE;
+       }
+
+       ret = apds990x_write_word(chip, APDS990X_PILTL, lo);
+       ret |= apds990x_write_word(chip, APDS990X_PIHTL, hi);
+       return ret;
+}
+
+/* Called always with mutex locked */
+static int apds990x_refresh_athres(struct apds990x_chip *chip)
+{
+       int ret;
+       /* If the chip is not in use, don't try to access it */
+       if (pm_runtime_suspended(&chip->client->dev))
+               return 0;
+
+       ret = apds990x_write_word(chip, APDS990X_AILTL,
+                       apds990x_lux_to_threshold(chip, chip->lux_thres_lo));
+       ret |= apds990x_write_word(chip, APDS990X_AIHTL,
+                       apds990x_lux_to_threshold(chip, chip->lux_thres_hi));
+
+       return ret;
+}
+
+/* Called always with mutex locked */
+static void apds990x_force_a_refresh(struct apds990x_chip *chip)
+{
+       /* This will force ALS interrupt after the next measurement. */
+       apds990x_write_word(chip, APDS990X_AILTL, APDS_LUX_DEF_THRES_LO);
+       apds990x_write_word(chip, APDS990X_AIHTL, APDS_LUX_DEF_THRES_HI);
+}
+
+/* Called always with mutex locked */
+static void apds990x_force_p_refresh(struct apds990x_chip *chip)
+{
+       /* This will force proximity interrupt after the next measurement. */
+       apds990x_write_word(chip, APDS990X_PILTL, APDS_PROX_DEF_THRES - 1);
+       apds990x_write_word(chip, APDS990X_PIHTL, APDS_PROX_DEF_THRES);
+}
+
+/* Called always with mutex locked */
+static int apds990x_calc_again(struct apds990x_chip *chip)
+{
+       int curr_again = chip->again_meas;
+       int next_again = chip->again_meas;
+       int ret = 0;
+
+       /* Calculate suitable als gain */
+       if (chip->lux_clear == chip->a_max_result)
+               next_again -= 2; /* ALS saturated. Decrease gain by 2 steps */
+       else if (chip->lux_clear > chip->a_max_result / 2)
+               next_again--;
+       else if (chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT_STRICT)
+               next_again += 2; /* Too dark. Increase gain by 2 steps */
+       else if (chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT)
+               next_again++;
+
+       /* Limit gain to available range */
+       if (next_again < 0)
+               next_again = 0;
+       else if (next_again > APDS990X_MAX_AGAIN)
+               next_again = APDS990X_MAX_AGAIN;
+
+       /* Let's check can we trust the measured result */
+       if (chip->lux_clear == chip->a_max_result)
+               /* Result can be totally garbage due to saturation */
+               ret = -ERANGE;
+       else if (next_again != curr_again &&
+               chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT_STRICT)
+               /*
+                * Gain is changed and measurement result is very small.
+                * Result can be totally garbage due to underflow
+                */
+               ret = -ERANGE;
+
+       chip->again_next = next_again;
+       apds990x_write_byte(chip, APDS990X_CONTROL,
+                       (chip->pdrive << 6) |
+                       (chip->pdiode << 4) |
+                       (chip->pgain << 2) |
+                       (chip->again_next << 0));
+
+       /*
+        * Error means bad result -> re-measurement is needed. The forced
+        * refresh uses fastest possible persistence setting to get result
+        * as soon as possible.
+        */
+       if (ret < 0)
+               apds990x_force_a_refresh(chip);
+       else
+               apds990x_refresh_athres(chip);
+
+       return ret;
+}
+
+/* Called always with mutex locked */
+static int apds990x_get_lux(struct apds990x_chip *chip, int clear, int ir)
+{
+       int iac, iac1, iac2; /* IR adjusted counts */
+       u32 lpc; /* Lux per count */
+
+       /* Formulas:
+        * iac1 = CF1 * CLEAR_CH - IRF1 * IR_CH
+        * iac2 = CF2 * CLEAR_CH - IRF2 * IR_CH
+        */
+       iac1 = (chip->cf.cf1 * clear - chip->cf.irf1 * ir) / APDS_PARAM_SCALE;
+       iac2 = (chip->cf.cf2 * clear - chip->cf.irf2 * ir) / APDS_PARAM_SCALE;
+
+       iac = max(iac1, iac2);
+       iac = max(iac, 0);
+
+       lpc = APDS990X_LUX_OUTPUT_SCALE * (chip->cf.df * chip->cf.ga) /
+               (u32)(again[chip->again_meas] * (u32)chip->atime);
+
+       return (iac * lpc) / APDS_PARAM_SCALE;
+}
+
+static int apds990x_ack_int(struct apds990x_chip *chip, u8 mode)
+{
+       struct i2c_client *client = chip->client;
+       s32 ret;
+       u8 reg = APDS990x_CMD | APDS990x_CMD_TYPE_SPE;
+
+       switch (mode & (APDS990X_ST_AINT | APDS990X_ST_PINT)) {
+       case APDS990X_ST_AINT:
+               reg |= APDS990X_INT_ACK_ALS;
+               break;
+       case APDS990X_ST_PINT:
+               reg |= APDS990X_INT_ACK_PS;
+               break;
+       default:
+               reg |= APDS990X_INT_ACK_BOTH;
+               break;
+       }
+
+       ret = i2c_smbus_read_byte_data(client, reg);
+       return (int)ret;
+}
+
+static irqreturn_t apds990x_irq(int irq, void *data)
+{
+       struct apds990x_chip *chip = data;
+       u8 status;
+
+       apds990x_read_byte(chip, APDS990X_STATUS, &status);
+       apds990x_ack_int(chip, status);
+
+       mutex_lock(&chip->mutex);
+       if (!pm_runtime_suspended(&chip->client->dev)) {
+               if (status & APDS990X_ST_AINT) {
+                       apds990x_read_word(chip, APDS990X_CDATAL,
+                                       &chip->lux_clear);
+                       apds990x_read_word(chip, APDS990X_IRDATAL,
+                                       &chip->lux_ir);
+                       /* Store used gain for calculations */
+                       chip->again_meas = chip->again_next;
+
+                       chip->lux_raw = apds990x_get_lux(chip,
+                                                       chip->lux_clear,
+                                                       chip->lux_ir);
+
+                       if (apds990x_calc_again(chip) == 0) {
+                               /* Result is valid */
+                               chip->lux = chip->lux_raw;
+                               chip->lux_wait_fresh_res = false;
+                               wake_up(&chip->wait);
+                               sysfs_notify(&chip->client->dev.kobj,
+                                       NULL, "lux0_input");
+                       }
+               }
+
+               if ((status & APDS990X_ST_PINT) && chip->prox_en) {
+                       u16 clr_ch;
+
+                       apds990x_read_word(chip, APDS990X_CDATAL, &clr_ch);
+                       /*
+                        * If ALS channel is saturated at min gain,
+                        * proximity gives false posivite values.
+                        * Just ignore them.
+                        */
+                       if (chip->again_meas == 0 &&
+                               clr_ch == chip->a_max_result)
+                               chip->prox_data = 0;
+                       else
+                               apds990x_read_word(chip,
+                                               APDS990X_PDATAL,
+                                               &chip->prox_data);
+
+                       apds990x_refresh_pthres(chip, chip->prox_data);
+                       if (chip->prox_data < chip->prox_thres)
+                               chip->prox_data = 0;
+                       else if (!chip->prox_continuous_mode)
+                               chip->prox_data = APDS_PROX_RANGE;
+                       sysfs_notify(&chip->client->dev.kobj,
+                               NULL, "prox0_raw");
+               }
+       }
+       mutex_unlock(&chip->mutex);
+       return IRQ_HANDLED;
+}
+
+static int apds990x_configure(struct apds990x_chip *chip)
+{
+       /* It is recommended to use disabled mode during these operations */
+       apds990x_write_byte(chip, APDS990X_ENABLE, APDS990X_EN_DISABLE_ALL);
+
+       /* conversion and wait times for different state machince states */
+       apds990x_write_byte(chip, APDS990X_PTIME, APDS990X_PTIME_DEFAULT);
+       apds990x_write_byte(chip, APDS990X_WTIME, APDS990X_WTIME_DEFAULT);
+       apds990x_set_atime(chip, APDS_LUX_AVERAGING_TIME);
+
+       apds990x_write_byte(chip, APDS990X_CONFIG, 0);
+
+       /* Persistence levels */
+       apds990x_write_byte(chip, APDS990X_PERS,
+                       (chip->lux_persistence << APDS990X_APERS_SHIFT) |
+                       (chip->prox_persistence << APDS990X_PPERS_SHIFT));
+
+       apds990x_write_byte(chip, APDS990X_PPCOUNT, chip->pdata->ppcount);
+
+       /* Start with relatively small gain */
+       chip->again_meas = 1;
+       chip->again_next = 1;
+       apds990x_write_byte(chip, APDS990X_CONTROL,
+                       (chip->pdrive << 6) |
+                       (chip->pdiode << 4) |
+                       (chip->pgain << 2) |
+                       (chip->again_next << 0));
+       return 0;
+}
+
+static int apds990x_detect(struct apds990x_chip *chip)
+{
+       struct i2c_client *client = chip->client;
+       int ret;
+       u8 id;
+
+       ret = apds990x_read_byte(chip, APDS990X_ID, &id);
+       if (ret < 0) {
+               dev_err(&client->dev, "ID read failed\n");
+               return ret;
+       }
+
+       ret = apds990x_read_byte(chip, APDS990X_REV, &chip->revision);
+       if (ret < 0) {
+               dev_err(&client->dev, "REV read failed\n");
+               return ret;
+       }
+
+       switch (id) {
+       case APDS990X_ID_0:
+       case APDS990X_ID_4:
+       case APDS990X_ID_29:
+               snprintf(chip->chipname, sizeof(chip->chipname), "APDS-990x");
+               break;
+       default:
+               ret = -ENODEV;
+               break;
+       }
+       return ret;
+}
+
+static int apds990x_chip_on(struct apds990x_chip *chip)
+{
+       int err  = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
+                                       chip->regs);
+       if (err < 0)
+               return err;
+
+       usleep_range(APDS_STARTUP_DELAY, 2 * APDS_STARTUP_DELAY);
+
+       /* Refresh all configs in case of regulators were off */
+       chip->prox_data = 0;
+       apds990x_configure(chip);
+       apds990x_mode_on(chip);
+       return 0;
+}
+
+static int apds990x_chip_off(struct apds990x_chip *chip)
+{
+       apds990x_write_byte(chip, APDS990X_ENABLE, APDS990X_EN_DISABLE_ALL);
+       regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+       return 0;
+}
+
+static ssize_t apds990x_lux_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct apds990x_chip *chip = dev_get_drvdata(dev);
+       ssize_t ret;
+       u32 result;
+       long timeout;
+
+       if (pm_runtime_suspended(dev))
+               return -EIO;
+
+       timeout = wait_event_interruptible_timeout(chip->wait,
+                                               !chip->lux_wait_fresh_res,
+                                               msecs_to_jiffies(APDS_TIMEOUT));
+       if (!timeout)
+               return -EIO;
+
+       mutex_lock(&chip->mutex);
+       result = (chip->lux * chip->lux_calib) / APDS_CALIB_SCALER;
+       if (result > (APDS_RANGE * APDS990X_LUX_OUTPUT_SCALE))
+               result = APDS_RANGE * APDS990X_LUX_OUTPUT_SCALE;
+
+       ret = sprintf(buf, "%d.%d\n",
+               result / APDS990X_LUX_OUTPUT_SCALE,
+               result % APDS990X_LUX_OUTPUT_SCALE);
+       mutex_unlock(&chip->mutex);
+       return ret;
+}
+
+static DEVICE_ATTR(lux0_input, S_IRUGO, apds990x_lux_show, NULL);
+
+static ssize_t apds990x_lux_range_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%u\n", APDS_RANGE);
+}
+
+static DEVICE_ATTR(lux0_sensor_range, S_IRUGO, apds990x_lux_range_show, NULL);
+
+static ssize_t apds990x_lux_calib_format_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%u\n", APDS_CALIB_SCALER);
+}
+
+static DEVICE_ATTR(lux0_calibscale_default, S_IRUGO,
+               apds990x_lux_calib_format_show, NULL);
+
+static ssize_t apds990x_lux_calib_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct apds990x_chip *chip = dev_get_drvdata(dev);
+
+       return sprintf(buf, "%u\n", chip->lux_calib);
+}
+
+static ssize_t apds990x_lux_calib_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t len)
+{
+       struct apds990x_chip *chip = dev_get_drvdata(dev);
+       unsigned long value;
+
+       if (strict_strtoul(buf, 0, &value))
+               return -EINVAL;
+
+       if (chip->lux_calib > APDS_RANGE)
+               return -EINVAL;
+
+       chip->lux_calib = value;
+
+       return len;
+}
+
+static DEVICE_ATTR(lux0_calibscale, S_IRUGO | S_IWUSR, apds990x_lux_calib_show,
+               apds990x_lux_calib_store);
+
+static ssize_t apds990x_rate_avail(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       int i;
+       int pos = 0;
+       for (i = 0; i < ARRAY_SIZE(arates_hz); i++)
+               pos += sprintf(buf + pos, "%d ", arates_hz[i]);
+       sprintf(buf + pos - 1, "\n");
+       return pos;
+}
+
+static ssize_t apds990x_rate_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct apds990x_chip *chip =  dev_get_drvdata(dev);
+       return sprintf(buf, "%d\n", chip->arate);
+}
+
+static int apds990x_set_arate(struct apds990x_chip *chip, int rate)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(arates_hz); i++)
+               if (rate >= arates_hz[i])
+                       break;
+
+       if (i == ARRAY_SIZE(arates_hz))
+               return -EINVAL;
+
+       /* Pick up corresponding persistence value */
+       chip->lux_persistence = apersis[i];
+       chip->arate = arates_hz[i];
+
+       /* If the chip is not in use, don't try to access it */
+       if (pm_runtime_suspended(&chip->client->dev))
+               return 0;
+
+       /* Persistence levels */
+       return apds990x_write_byte(chip, APDS990X_PERS,
+                       (chip->lux_persistence << APDS990X_APERS_SHIFT) |
+                       (chip->prox_persistence << APDS990X_PPERS_SHIFT));
+}
+
+static ssize_t apds990x_rate_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t len)
+{
+       struct apds990x_chip *chip =  dev_get_drvdata(dev);
+       unsigned long value;
+       int ret;
+
+       if (strict_strtoul(buf, 0, &value))
+               return -EINVAL;
+
+       mutex_lock(&chip->mutex);
+       ret = apds990x_set_arate(chip, value);
+       mutex_unlock(&chip->mutex);
+
+       if (ret < 0)
+               return ret;
+       return len;
+}
+
+static DEVICE_ATTR(lux0_rate_avail, S_IRUGO, apds990x_rate_avail, NULL);
+
+static DEVICE_ATTR(lux0_rate, S_IRUGO | S_IWUSR, apds990x_rate_show,
+                                                apds990x_rate_store);
+
+static ssize_t apds990x_prox_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       ssize_t ret;
+       struct apds990x_chip *chip =  dev_get_drvdata(dev);
+       if (pm_runtime_suspended(dev) || !chip->prox_en)
+               return -EIO;
+
+       mutex_lock(&chip->mutex);
+       ret = sprintf(buf, "%d\n", chip->prox_data);
+       mutex_unlock(&chip->mutex);
+       return ret;
+}
+
+static DEVICE_ATTR(prox0_raw, S_IRUGO, apds990x_prox_show, NULL);
+
+static ssize_t apds990x_prox_range_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%u\n", APDS_PROX_RANGE);
+}
+
+static DEVICE_ATTR(prox0_sensor_range, S_IRUGO, apds990x_prox_range_show, NULL);
+
+static ssize_t apds990x_prox_enable_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct apds990x_chip *chip =  dev_get_drvdata(dev);
+       return sprintf(buf, "%d\n", chip->prox_en);
+}
+
+static ssize_t apds990x_prox_enable_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t len)
+{
+       struct apds990x_chip *chip =  dev_get_drvdata(dev);
+       unsigned long value;
+
+       if (strict_strtoul(buf, 0, &value))
+               return -EINVAL;
+
+       mutex_lock(&chip->mutex);
+
+       if (!chip->prox_en)
+               chip->prox_data = 0;
+
+       if (value)
+               chip->prox_en++;
+       else if (chip->prox_en > 0)
+               chip->prox_en--;
+
+       if (!pm_runtime_suspended(dev))
+               apds990x_mode_on(chip);
+       mutex_unlock(&chip->mutex);
+       return len;
+}
+
+static DEVICE_ATTR(prox0_raw_en, S_IRUGO | S_IWUSR, apds990x_prox_enable_show,
+                                                  apds990x_prox_enable_store);
+
+static const char reporting_modes[][9] = {"trigger", "periodic"};
+
+static ssize_t apds990x_prox_reporting_mode_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct apds990x_chip *chip =  dev_get_drvdata(dev);
+       return sprintf(buf, "%s\n",
+               reporting_modes[!!chip->prox_continuous_mode]);
+}
+
+static ssize_t apds990x_prox_reporting_mode_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t len)
+{
+       struct apds990x_chip *chip =  dev_get_drvdata(dev);
+
+       if (sysfs_streq(buf, reporting_modes[0]))
+               chip->prox_continuous_mode = 0;
+       else if (sysfs_streq(buf, reporting_modes[1]))
+               chip->prox_continuous_mode = 1;
+       else
+               return -EINVAL;
+       return len;
+}
+
+static DEVICE_ATTR(prox0_reporting_mode, S_IRUGO | S_IWUSR,
+               apds990x_prox_reporting_mode_show,
+               apds990x_prox_reporting_mode_store);
+
+static ssize_t apds990x_prox_reporting_avail_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%s %s\n", reporting_modes[0], reporting_modes[1]);
+}
+
+static DEVICE_ATTR(prox0_reporting_mode_avail, S_IRUGO | S_IWUSR,
+               apds990x_prox_reporting_avail_show, NULL);
+
+
+static ssize_t apds990x_lux_thresh_above_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct apds990x_chip *chip =  dev_get_drvdata(dev);
+       return sprintf(buf, "%d\n", chip->lux_thres_hi);
+}
+
+static ssize_t apds990x_lux_thresh_below_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct apds990x_chip *chip =  dev_get_drvdata(dev);
+       return sprintf(buf, "%d\n", chip->lux_thres_lo);
+}
+
+static ssize_t apds990x_set_lux_thresh(struct apds990x_chip *chip, u32 *target,
+                               const char *buf)
+{
+       int ret = 0;
+       unsigned long thresh;
+
+       if (strict_strtoul(buf, 0, &thresh))
+               return -EINVAL;
+
+       if (thresh > APDS_RANGE)
+               return -EINVAL;
+
+       mutex_lock(&chip->mutex);
+       *target = thresh;
+       /*
+        * Don't update values in HW if we are still waiting for
+        * first interrupt to come after device handle open call.
+        */
+       if (!chip->lux_wait_fresh_res)
+               apds990x_refresh_athres(chip);
+       mutex_unlock(&chip->mutex);
+       return ret;
+
+}
+
+static ssize_t apds990x_lux_thresh_above_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t len)
+{
+       struct apds990x_chip *chip =  dev_get_drvdata(dev);
+       int ret = apds990x_set_lux_thresh(chip, &chip->lux_thres_hi, buf);
+       if (ret < 0)
+               return ret;
+       return len;
+}
+
+static ssize_t apds990x_lux_thresh_below_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t len)
+{
+       struct apds990x_chip *chip =  dev_get_drvdata(dev);
+       int ret = apds990x_set_lux_thresh(chip, &chip->lux_thres_lo, buf);
+       if (ret < 0)
+               return ret;
+       return len;
+}
+
+static DEVICE_ATTR(lux0_thresh_above_value, S_IRUGO | S_IWUSR,
+               apds990x_lux_thresh_above_show,
+               apds990x_lux_thresh_above_store);
+
+static DEVICE_ATTR(lux0_thresh_below_value, S_IRUGO | S_IWUSR,
+               apds990x_lux_thresh_below_show,
+               apds990x_lux_thresh_below_store);
+
+static ssize_t apds990x_prox_threshold_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct apds990x_chip *chip =  dev_get_drvdata(dev);
+       return sprintf(buf, "%d\n", chip->prox_thres);
+}
+
+static ssize_t apds990x_prox_threshold_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t len)
+{
+       struct apds990x_chip *chip =  dev_get_drvdata(dev);
+       unsigned long value;
+
+       if (strict_strtoul(buf, 0, &value))
+               return -EINVAL;
+
+       if ((value > APDS_RANGE) || (value == 0) ||
+               (value < APDS_PROX_HYSTERESIS))
+               return -EINVAL;
+
+       mutex_lock(&chip->mutex);
+       chip->prox_thres = value;
+
+       apds990x_force_p_refresh(chip);
+       mutex_unlock(&chip->mutex);
+       return len;
+}
+
+static DEVICE_ATTR(prox0_thresh_above_value, S_IRUGO | S_IWUSR,
+               apds990x_prox_threshold_show,
+               apds990x_prox_threshold_store);
+
+static ssize_t apds990x_power_state_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%d\n", !pm_runtime_suspended(dev));
+       return 0;
+}
+
+static ssize_t apds990x_power_state_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t len)
+{
+       struct apds990x_chip *chip =  dev_get_drvdata(dev);
+       unsigned long value;
+
+       if (strict_strtoul(buf, 0, &value))
+               return -EINVAL;
+       if (value) {
+               pm_runtime_get_sync(dev);
+               mutex_lock(&chip->mutex);
+               chip->lux_wait_fresh_res = true;
+               apds990x_force_a_refresh(chip);
+               apds990x_force_p_refresh(chip);
+               mutex_unlock(&chip->mutex);
+       } else {
+               if (!pm_runtime_suspended(dev))
+                       pm_runtime_put(dev);
+       }
+       return len;
+}
+
+static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
+               apds990x_power_state_show,
+               apds990x_power_state_store);
+
+static ssize_t apds990x_chip_id_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct apds990x_chip *chip =  dev_get_drvdata(dev);
+       return sprintf(buf, "%s %d\n", chip->chipname, chip->revision);
+}
+
+static DEVICE_ATTR(chip_id, S_IRUGO, apds990x_chip_id_show, NULL);
+
+static struct attribute *sysfs_attrs_ctrl[] = {
+       &dev_attr_lux0_calibscale.attr,
+       &dev_attr_lux0_calibscale_default.attr,
+       &dev_attr_lux0_input.attr,
+       &dev_attr_lux0_sensor_range.attr,
+       &dev_attr_lux0_rate.attr,
+       &dev_attr_lux0_rate_avail.attr,
+       &dev_attr_lux0_thresh_above_value.attr,
+       &dev_attr_lux0_thresh_below_value.attr,
+       &dev_attr_prox0_raw_en.attr,
+       &dev_attr_prox0_raw.attr,
+       &dev_attr_prox0_sensor_range.attr,
+       &dev_attr_prox0_thresh_above_value.attr,
+       &dev_attr_prox0_reporting_mode.attr,
+       &dev_attr_prox0_reporting_mode_avail.attr,
+       &dev_attr_chip_id.attr,
+       &dev_attr_power_state.attr,
+       NULL
+};
+
+static struct attribute_group apds990x_attribute_group[] = {
+       {.attrs = sysfs_attrs_ctrl },
+};
+
+static int __devinit apds990x_probe(struct i2c_client *client,
+                               const struct i2c_device_id *id)
+{
+       struct apds990x_chip *chip;
+       int err;
+
+       chip = kzalloc(sizeof *chip, GFP_KERNEL);
+       if (!chip)
+               return -ENOMEM;
+
+       i2c_set_clientdata(client, chip);
+       chip->client  = client;
+
+       init_waitqueue_head(&chip->wait);
+       mutex_init(&chip->mutex);
+       chip->pdata     = client->dev.platform_data;
+
+       if (chip->pdata == NULL) {
+               dev_err(&client->dev, "platform data is mandatory\n");
+               err = -EINVAL;
+               goto fail1;
+       }
+
+       if (chip->pdata->cf.ga == 0) {
+               /* set uncovered sensor default parameters */
+               chip->cf.ga = 1966; /* 0.48 * APDS_PARAM_SCALE */
+               chip->cf.cf1 = 4096; /* 1.00 * APDS_PARAM_SCALE */
+               chip->cf.irf1 = 9134; /* 2.23 * APDS_PARAM_SCALE */
+               chip->cf.cf2 = 2867; /* 0.70 * APDS_PARAM_SCALE */
+               chip->cf.irf2 = 5816; /* 1.42 * APDS_PARAM_SCALE */
+               chip->cf.df = 52;
+       } else {
+               chip->cf = chip->pdata->cf;
+       }
+
+       /* precalculate inverse chip factors for threshold control */
+       chip->rcf.afactor =
+               (chip->cf.irf1 - chip->cf.irf2) * APDS_PARAM_SCALE /
+               (chip->cf.cf1 - chip->cf.cf2);
+       chip->rcf.cf1 = APDS_PARAM_SCALE * APDS_PARAM_SCALE /
+               chip->cf.cf1;
+       chip->rcf.irf1 = chip->cf.irf1 * APDS_PARAM_SCALE /
+               chip->cf.cf1;
+       chip->rcf.cf2 = APDS_PARAM_SCALE * APDS_PARAM_SCALE /
+               chip->cf.cf2;
+       chip->rcf.irf2 = chip->cf.irf2 * APDS_PARAM_SCALE /
+               chip->cf.cf2;
+
+       /* Set something to start with */
+       chip->lux_thres_hi = APDS_LUX_DEF_THRES_HI;
+       chip->lux_thres_lo = APDS_LUX_DEF_THRES_LO;
+       chip->lux_calib = APDS_LUX_NEUTRAL_CALIB_VALUE;
+
+       chip->prox_thres = APDS_PROX_DEF_THRES;
+       chip->pdrive = chip->pdata->pdrive;
+       chip->pdiode = APDS_PDIODE_IR;
+       chip->pgain = APDS_PGAIN_1X;
+       chip->prox_calib = APDS_PROX_NEUTRAL_CALIB_VALUE;
+       chip->prox_persistence = APDS_DEFAULT_PROX_PERS;
+       chip->prox_continuous_mode = false;
+
+       chip->regs[0].supply = reg_vcc;
+       chip->regs[1].supply = reg_vled;
+
+       err = regulator_bulk_get(&client->dev,
+                                ARRAY_SIZE(chip->regs), chip->regs);
+       if (err < 0) {
+               dev_err(&client->dev, "Cannot get regulators\n");
+               goto fail1;
+       }
+
+       err = regulator_bulk_enable(ARRAY_SIZE(chip->regs), chip->regs);
+       if (err < 0) {
+               dev_err(&client->dev, "Cannot enable regulators\n");
+               goto fail2;
+       }
+
+       usleep_range(APDS_STARTUP_DELAY, 2 * APDS_STARTUP_DELAY);
+
+       err = apds990x_detect(chip);
+       if (err < 0) {
+               dev_err(&client->dev, "APDS990X not found\n");
+               goto fail3;
+       }
+
+       pm_runtime_set_active(&client->dev);
+
+       apds990x_configure(chip);
+       apds990x_set_arate(chip, APDS_LUX_DEFAULT_RATE);
+       apds990x_mode_on(chip);
+
+       pm_runtime_enable(&client->dev);
+
+       if (chip->pdata->setup_resources) {
+               err = chip->pdata->setup_resources();
+               if (err) {
+                       err = -EINVAL;
+                       goto fail3;
+               }
+       }
+
+       err = sysfs_create_group(&chip->client->dev.kobj,
+                               apds990x_attribute_group);
+       if (err < 0) {
+               dev_err(&chip->client->dev, "Sysfs registration failed\n");
+               goto fail4;
+       }
+
+       err = request_threaded_irq(client->irq, NULL,
+                               apds990x_irq,
+                               IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW |
+                               IRQF_ONESHOT,
+                               "apds990x", chip);
+       if (err) {
+               dev_err(&client->dev, "could not get IRQ %d\n",
+                       client->irq);
+               goto fail5;
+       }
+       return err;
+fail5:
+       sysfs_remove_group(&chip->client->dev.kobj,
+                       &apds990x_attribute_group[0]);
+fail4:
+       if (chip->pdata && chip->pdata->release_resources)
+               chip->pdata->release_resources();
+fail3:
+       regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+fail2:
+       regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
+fail1:
+       kfree(chip);
+       return err;
+}
+
+static int __devexit apds990x_remove(struct i2c_client *client)
+{
+       struct apds990x_chip *chip = i2c_get_clientdata(client);
+
+       free_irq(client->irq, chip);
+       sysfs_remove_group(&chip->client->dev.kobj,
+                       apds990x_attribute_group);
+
+       if (chip->pdata && chip->pdata->release_resources)
+               chip->pdata->release_resources();
+
+       if (!pm_runtime_suspended(&client->dev))
+               apds990x_chip_off(chip);
+
+       pm_runtime_disable(&client->dev);
+       pm_runtime_set_suspended(&client->dev);
+
+       regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
+
+       kfree(chip);
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int apds990x_suspend(struct device *dev)
+{
+       struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+       struct apds990x_chip *chip = i2c_get_clientdata(client);
+
+       apds990x_chip_off(chip);
+       return 0;
+}
+
+static int apds990x_resume(struct device *dev)
+{
+       struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+       struct apds990x_chip *chip = i2c_get_clientdata(client);
+
+       /*
+        * If we were enabled at suspend time, it is expected
+        * everything works nice and smoothly. Chip_on is enough
+        */
+       apds990x_chip_on(chip);
+
+       return 0;
+}
+#else
+#define apds990x_suspend  NULL
+#define apds990x_resume          NULL
+#define apds990x_shutdown NULL
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int apds990x_runtime_suspend(struct device *dev)
+{
+       struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+       struct apds990x_chip *chip = i2c_get_clientdata(client);
+
+       apds990x_chip_off(chip);
+       return 0;
+}
+
+static int apds990x_runtime_resume(struct device *dev)
+{
+       struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+       struct apds990x_chip *chip = i2c_get_clientdata(client);
+
+       apds990x_chip_on(chip);
+       return 0;
+}
+
+#endif
+
+static const struct i2c_device_id apds990x_id[] = {
+       {"apds990x", 0 },
+       {}
+};
+
+MODULE_DEVICE_TABLE(i2c, apds990x_id);
+
+static const struct dev_pm_ops apds990x_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(apds990x_suspend, apds990x_resume)
+       SET_RUNTIME_PM_OPS(apds990x_runtime_suspend,
+                       apds990x_runtime_resume,
+                       NULL)
+};
+
+static struct i2c_driver apds990x_driver = {
+       .driver  = {
+               .name   = "apds990x",
+               .owner  = THIS_MODULE,
+               .pm     = &apds990x_pm_ops,
+       },
+       .probe    = apds990x_probe,
+       .remove   = __devexit_p(apds990x_remove),
+       .id_table = apds990x_id,
+};
+
+static int __init apds990x_init(void)
+{
+       return i2c_add_driver(&apds990x_driver);
+}
+
+static void __exit apds990x_exit(void)
+{
+       i2c_del_driver(&apds990x_driver);
+}
+
+MODULE_DESCRIPTION("APDS990X combined ALS and proximity sensor");
+MODULE_AUTHOR("Samu Onkalo, Nokia Corporation");
+MODULE_LICENSE("GPL v2");
+
+module_init(apds990x_init);
+module_exit(apds990x_exit);
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
new file mode 100644 (file)
index 0000000..cee632e
--- /dev/null
@@ -0,0 +1,1413 @@
+/*
+ * This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver.
+ * Chip is combined proximity and ambient light sensor.
+ *
+ * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.         See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/i2c/bh1770glc.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+
+#define BH1770_ALS_CONTROL     0x80 /* ALS operation mode control */
+#define BH1770_PS_CONTROL      0x81 /* PS operation mode control */
+#define BH1770_I_LED           0x82 /* active LED and LED1, LED2 current */
+#define BH1770_I_LED3          0x83 /* LED3 current setting */
+#define BH1770_ALS_PS_MEAS     0x84 /* Forced mode trigger */
+#define BH1770_PS_MEAS_RATE    0x85 /* PS meas. rate at stand alone mode */
+#define BH1770_ALS_MEAS_RATE   0x86 /* ALS meas. rate at stand alone mode */
+#define BH1770_PART_ID         0x8a /* Part number and revision ID */
+#define BH1770_MANUFACT_ID     0x8b /* Manufacturerer ID */
+#define BH1770_ALS_DATA_0      0x8c /* ALS DATA low byte */
+#define BH1770_ALS_DATA_1      0x8d /* ALS DATA high byte */
+#define BH1770_ALS_PS_STATUS   0x8e /* Measurement data and int status */
+#define BH1770_PS_DATA_LED1    0x8f /* PS data from LED1 */
+#define BH1770_PS_DATA_LED2    0x90 /* PS data from LED2 */
+#define BH1770_PS_DATA_LED3    0x91 /* PS data from LED3 */
+#define BH1770_INTERRUPT       0x92 /* Interrupt setting */
+#define BH1770_PS_TH_LED1      0x93 /* PS interrupt threshold for LED1 */
+#define BH1770_PS_TH_LED2      0x94 /* PS interrupt threshold for LED2 */
+#define BH1770_PS_TH_LED3      0x95 /* PS interrupt threshold for LED3 */
+#define BH1770_ALS_TH_UP_0     0x96 /* ALS upper threshold low byte */
+#define BH1770_ALS_TH_UP_1     0x97 /* ALS upper threshold high byte */
+#define BH1770_ALS_TH_LOW_0    0x98 /* ALS lower threshold low byte */
+#define BH1770_ALS_TH_LOW_1    0x99 /* ALS lower threshold high byte */
+
+/* MANUFACT_ID */
+#define BH1770_MANUFACT_ROHM   0x01
+#define BH1770_MANUFACT_OSRAM  0x03
+
+/* PART_ID */
+#define BH1770_PART            0x90
+#define BH1770_PART_MASK       0xf0
+#define BH1770_REV_MASK                0x0f
+#define BH1770_REV_SHIFT       0
+#define BH1770_REV_0           0x00
+#define BH1770_REV_1           0x01
+
+/* Operating modes for both */
+#define BH1770_STANDBY         0x00
+#define BH1770_FORCED          0x02
+#define BH1770_STANDALONE      0x03
+#define BH1770_SWRESET         (0x01 << 2)
+
+#define BH1770_PS_TRIG_MEAS    (1 << 0)
+#define BH1770_ALS_TRIG_MEAS   (1 << 1)
+
+/* Interrupt control */
+#define BH1770_INT_OUTPUT_MODE (1 << 3) /* 0 = latched */
+#define BH1770_INT_POLARITY    (1 << 2) /* 1 = active high */
+#define BH1770_INT_ALS_ENA     (1 << 1)
+#define BH1770_INT_PS_ENA      (1 << 0)
+
+/* Interrupt status */
+#define BH1770_INT_LED1_DATA   (1 << 0)
+#define BH1770_INT_LED1_INT    (1 << 1)
+#define BH1770_INT_LED2_DATA   (1 << 2)
+#define BH1770_INT_LED2_INT    (1 << 3)
+#define BH1770_INT_LED3_DATA   (1 << 4)
+#define BH1770_INT_LED3_INT    (1 << 5)
+#define BH1770_INT_LEDS_INT    ((1 << 1) | (1 << 3) | (1 << 5))
+#define BH1770_INT_ALS_DATA    (1 << 6)
+#define BH1770_INT_ALS_INT     (1 << 7)
+
+/* Led channels */
+#define BH1770_LED1            0x00
+
+#define BH1770_DISABLE         0
+#define BH1770_ENABLE          1
+#define BH1770_PROX_CHANNELS   1
+
+#define BH1770_LUX_DEFAULT_RATE        1 /* Index to lux rate table */
+#define BH1770_PROX_DEFAULT_RATE 1 /* Direct HW value =~ 50Hz */
+#define BH1770_PROX_DEF_RATE_THRESH 6 /* Direct HW value =~ 5 Hz */
+#define BH1770_STARTUP_DELAY   50
+#define BH1770_RESET_TIME      10
+#define BH1770_TIMEOUT         2100 /* Timeout in 2.1 seconds */
+
+#define BH1770_LUX_RANGE       65535
+#define BH1770_PROX_RANGE      255
+#define BH1770_COEF_SCALER     1024
+#define BH1770_CALIB_SCALER    8192
+#define BH1770_LUX_NEUTRAL_CALIB_VALUE (1 * BH1770_CALIB_SCALER)
+#define BH1770_LUX_DEF_THRES   1000
+#define BH1770_PROX_DEF_THRES  70
+#define BH1770_PROX_DEF_ABS_THRES   100
+#define BH1770_DEFAULT_PERSISTENCE  10
+#define BH1770_PROX_MAX_PERSISTENCE 50
+#define BH1770_LUX_GA_SCALE    16384
+#define BH1770_LUX_CF_SCALE    2048 /* CF ChipFactor */
+#define BH1770_NEUTRAL_CF      BH1770_LUX_CF_SCALE
+#define BH1770_LUX_CORR_SCALE  4096
+
+#define PROX_ABOVE_THRESHOLD   1
+#define PROX_BELOW_THRESHOLD   0
+
+#define PROX_IGNORE_LUX_LIMIT  500
+
+struct bh1770_chip {
+       struct bh1770_platform_data     *pdata;
+       char                            chipname[10];
+       u8                              revision;
+       struct i2c_client               *client;
+       struct regulator_bulk_data      regs[2];
+       struct mutex                    mutex; /* avoid parallel access */
+       wait_queue_head_t               wait;
+
+       bool                    int_mode_prox;
+       bool                    int_mode_lux;
+       struct delayed_work     prox_work;
+       u32     lux_cf; /* Chip specific factor */
+       u32     lux_ga;
+       u32     lux_calib;
+       int     lux_rate_index;
+       u32     lux_corr;
+       u16     lux_data_raw;
+       u16     lux_threshold_hi;
+       u16     lux_threshold_lo;
+       u16     lux_thres_hi_onchip;
+       u16     lux_thres_lo_onchip;
+       bool    lux_wait_result;
+
+       int     prox_enable_count;
+       u16     prox_coef;
+       u16     prox_const;
+       int     prox_rate;
+       int     prox_rate_threshold;
+       u8      prox_persistence;
+       u8      prox_persistence_counter;
+       u8      prox_data;
+       u8      prox_threshold;
+       u8      prox_threshold_hw;
+       bool    prox_force_update;
+       u8      prox_abs_thres;
+       u8      prox_led;
+};
+
+static const char reg_vcc[] = "Vcc";
+static const char reg_vleds[] = "Vleds";
+
+/*
+ * Supported stand alone rates in ms from chip data sheet
+ * {10, 20, 30, 40, 70, 100, 200, 500, 1000, 2000};
+ */
+static const s16 prox_rates_hz[] = {100, 50, 33, 25, 14, 10, 5, 2};
+static const s16 prox_rates_ms[] = {10, 20, 30, 40, 70, 100, 200, 500};
+
+/* Supported IR-led currents in mA */
+static const u8 prox_curr_ma[] = {5, 10, 20, 50, 100, 150, 200};
+
+/*
+ * Supported stand alone rates in ms from chip data sheet
+ * {100, 200, 500, 1000, 2000};
+ */
+static const s16 lux_rates_hz[] = {10, 5, 2, 1, 0};
+
+/*
+ * interrupt control functions are called while keeping chip->mutex
+ * excluding module probe / remove
+ */
+static inline int bh1770_lux_interrupt_control(struct bh1770_chip *chip,
+                                       int lux)
+{
+       chip->int_mode_lux = lux;
+       /* Set interrupt modes, interrupt active low, latched */
+       return i2c_smbus_write_byte_data(chip->client,
+                                       BH1770_INTERRUPT,
+                                       (lux << 1) | chip->int_mode_prox);
+}
+
+static inline int bh1770_prox_interrupt_control(struct bh1770_chip *chip,
+                                       int ps)
+{
+       chip->int_mode_prox = ps;
+       return i2c_smbus_write_byte_data(chip->client,
+                                       BH1770_INTERRUPT,
+                                       (chip->int_mode_lux << 1) | (ps << 0));
+}
+
+/* chip->mutex is always kept here */
+static int bh1770_lux_rate(struct bh1770_chip *chip, int rate_index)
+{
+       /* sysfs may call this when the chip is powered off */
+       if (pm_runtime_suspended(&chip->client->dev))
+               return 0;
+
+       /* Proper proximity response needs fastest lux rate (100ms) */
+       if (chip->prox_enable_count)
+               rate_index = 0;
+
+       return i2c_smbus_write_byte_data(chip->client,
+                                       BH1770_ALS_MEAS_RATE,
+                                       rate_index);
+}
+
+static int bh1770_prox_rate(struct bh1770_chip *chip, int mode)
+{
+       int rate;
+
+       rate = (mode == PROX_ABOVE_THRESHOLD) ?
+               chip->prox_rate_threshold : chip->prox_rate;
+
+       return i2c_smbus_write_byte_data(chip->client,
+                                       BH1770_PS_MEAS_RATE,
+                                       rate);
+}
+
+/* InfraredLED is controlled by the chip during proximity scanning */
+static inline int bh1770_led_cfg(struct bh1770_chip *chip)
+{
+       /* LED cfg, current for leds 1 and 2 */
+       return i2c_smbus_write_byte_data(chip->client,
+                                       BH1770_I_LED,
+                                       (BH1770_LED1 << 6) |
+                                       (BH1770_LED_5mA << 3) |
+                                       chip->prox_led);
+}
+
+/*
+ * Following two functions converts raw ps values from HW to normalized
+ * values. Purpose is to compensate differences between different sensor
+ * versions and variants so that result means about the same between
+ * versions.
+ */
+static inline u8 bh1770_psraw_to_adjusted(struct bh1770_chip *chip, u8 psraw)
+{
+       u16 adjusted;
+       adjusted = (u16)(((u32)(psraw + chip->prox_const) * chip->prox_coef) /
+               BH1770_COEF_SCALER);
+       if (adjusted > BH1770_PROX_RANGE)
+               adjusted = BH1770_PROX_RANGE;
+       return adjusted;
+}
+
+static inline u8 bh1770_psadjusted_to_raw(struct bh1770_chip *chip, u8 ps)
+{
+       u16 raw;
+
+       raw = (((u32)ps * BH1770_COEF_SCALER) / chip->prox_coef);
+       if (raw > chip->prox_const)
+               raw = raw - chip->prox_const;
+       else
+               raw = 0;
+       return raw;
+}
+
+/*
+ * Following two functions converts raw lux values from HW to normalized
+ * values. Purpose is to compensate differences between different sensor
+ * versions and variants so that result means about the same between
+ * versions. Chip->mutex is kept when this is called.
+ */
+static int bh1770_prox_set_threshold(struct bh1770_chip *chip)
+{
+       u8 tmp = 0;
+
+       /* sysfs may call this when the chip is powered off */
+       if (pm_runtime_suspended(&chip->client->dev))
+               return 0;
+
+       tmp = bh1770_psadjusted_to_raw(chip, chip->prox_threshold);
+       chip->prox_threshold_hw = tmp;
+
+       return  i2c_smbus_write_byte_data(chip->client, BH1770_PS_TH_LED1,
+                                       tmp);
+}
+
+static inline u16 bh1770_lux_raw_to_adjusted(struct bh1770_chip *chip, u16 raw)
+{
+       u32 lux;
+       lux = ((u32)raw * chip->lux_corr) / BH1770_LUX_CORR_SCALE;
+       return min(lux, (u32)BH1770_LUX_RANGE);
+}
+
+static inline u16 bh1770_lux_adjusted_to_raw(struct bh1770_chip *chip,
+                                       u16 adjusted)
+{
+       return (u32)adjusted * BH1770_LUX_CORR_SCALE / chip->lux_corr;
+}
+
+/* chip->mutex is kept when this is called */
+static int bh1770_lux_update_thresholds(struct bh1770_chip *chip,
+                                       u16 threshold_hi, u16 threshold_lo)
+{
+       u8 data[4];
+       int ret;
+
+       /* sysfs may call this when the chip is powered off */
+       if (pm_runtime_suspended(&chip->client->dev))
+               return 0;
+
+       /*
+        * Compensate threshold values with the correction factors if not
+        * set to minimum or maximum.
+        * Min & max values disables interrupts.
+        */
+       if (threshold_hi != BH1770_LUX_RANGE && threshold_hi != 0)
+               threshold_hi = bh1770_lux_adjusted_to_raw(chip, threshold_hi);
+
+       if (threshold_lo != BH1770_LUX_RANGE && threshold_lo != 0)
+               threshold_lo = bh1770_lux_adjusted_to_raw(chip, threshold_lo);
+
+       if (chip->lux_thres_hi_onchip == threshold_hi &&
+           chip->lux_thres_lo_onchip == threshold_lo)
+               return 0;
+
+       chip->lux_thres_hi_onchip = threshold_hi;
+       chip->lux_thres_lo_onchip = threshold_lo;
+
+       data[0] = threshold_hi;
+       data[1] = threshold_hi >> 8;
+       data[2] = threshold_lo;
+       data[3] = threshold_lo >> 8;
+
+       ret = i2c_smbus_write_i2c_block_data(chip->client,
+                                       BH1770_ALS_TH_UP_0,
+                                       ARRAY_SIZE(data),
+                                       data);
+       return ret;
+}
+
+static int bh1770_lux_get_result(struct bh1770_chip *chip)
+{
+       u16 data;
+       int ret;
+
+       ret = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_DATA_0);
+       if (ret < 0)
+               return ret;
+
+       data = ret & 0xff;
+       ret = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_DATA_1);
+       if (ret < 0)
+               return ret;
+
+       chip->lux_data_raw = data | ((ret & 0xff) << 8);
+
+       return 0;
+}
+
+/* Calculate correction value which contains chip and device specific parts */
+static u32 bh1770_get_corr_value(struct bh1770_chip *chip)
+{
+       u32 tmp;
+       /* Impact of glass attenuation correction */
+       tmp = (BH1770_LUX_CORR_SCALE * chip->lux_ga) / BH1770_LUX_GA_SCALE;
+       /* Impact of chip factor correction */
+       tmp = (tmp * chip->lux_cf) / BH1770_LUX_CF_SCALE;
+       /* Impact of Device specific calibration correction */
+       tmp = (tmp * chip->lux_calib) / BH1770_CALIB_SCALER;
+       return tmp;
+}
+
+static int bh1770_lux_read_result(struct bh1770_chip *chip)
+{
+       bh1770_lux_get_result(chip);
+       return bh1770_lux_raw_to_adjusted(chip, chip->lux_data_raw);
+}
+
+/*
+ * Chip on / off functions are called while keeping mutex except probe
+ * or remove phase
+ */
+static int bh1770_chip_on(struct bh1770_chip *chip)
+{
+       int ret = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
+                                       chip->regs);
+       if (ret < 0)
+               return ret;
+
+       usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2);
+
+       /* Reset the chip */
+       i2c_smbus_write_byte_data(chip->client, BH1770_ALS_CONTROL,
+                               BH1770_SWRESET);
+       usleep_range(BH1770_RESET_TIME, BH1770_RESET_TIME * 2);
+
+       /*
+        * ALS is started always since proximity needs als results
+        * for realibility estimation.
+        * Let's assume dark until the first ALS measurement is ready.
+        */
+       chip->lux_data_raw = 0;
+       chip->prox_data = 0;
+       ret = i2c_smbus_write_byte_data(chip->client,
+                                       BH1770_ALS_CONTROL, BH1770_STANDALONE);
+
+       /* Assume reset defaults */
+       chip->lux_thres_hi_onchip = BH1770_LUX_RANGE;
+       chip->lux_thres_lo_onchip = 0;
+
+       return ret;
+}
+
+static void bh1770_chip_off(struct bh1770_chip *chip)
+{
+       i2c_smbus_write_byte_data(chip->client,
+                                       BH1770_INTERRUPT, BH1770_DISABLE);
+       i2c_smbus_write_byte_data(chip->client,
+                               BH1770_ALS_CONTROL, BH1770_STANDBY);
+       i2c_smbus_write_byte_data(chip->client,
+                               BH1770_PS_CONTROL, BH1770_STANDBY);
+       regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+}
+
+/* chip->mutex is kept when this is called */
+static int bh1770_prox_mode_control(struct bh1770_chip *chip)
+{
+       if (chip->prox_enable_count) {
+               chip->prox_force_update = true; /* Force immediate update */
+
+               bh1770_lux_rate(chip, chip->lux_rate_index);
+               bh1770_prox_set_threshold(chip);
+               bh1770_led_cfg(chip);
+               bh1770_prox_rate(chip, PROX_BELOW_THRESHOLD);
+               bh1770_prox_interrupt_control(chip, BH1770_ENABLE);
+               i2c_smbus_write_byte_data(chip->client,
+                                       BH1770_PS_CONTROL, BH1770_STANDALONE);
+       } else {
+               chip->prox_data = 0;
+               bh1770_lux_rate(chip, chip->lux_rate_index);
+               bh1770_prox_interrupt_control(chip, BH1770_DISABLE);
+               i2c_smbus_write_byte_data(chip->client,
+                                       BH1770_PS_CONTROL, BH1770_STANDBY);
+       }
+       return 0;
+}
+
+/* chip->mutex is kept when this is called */
+static int bh1770_prox_read_result(struct bh1770_chip *chip)
+{
+       int ret;
+       bool above;
+       u8 mode;
+
+       ret = i2c_smbus_read_byte_data(chip->client, BH1770_PS_DATA_LED1);
+       if (ret < 0)
+               goto out;
+
+       if (ret > chip->prox_threshold_hw)
+               above = true;
+       else
+               above = false;
+
+       /*
+        * when ALS levels goes above limit, proximity result may be
+        * false proximity. Thus ignore the result. With real proximity
+        * there is a shadow causing low als levels.
+        */
+       if (chip->lux_data_raw > PROX_IGNORE_LUX_LIMIT)
+               ret = 0;
+
+       chip->prox_data = bh1770_psraw_to_adjusted(chip, ret);
+
+       /* Strong proximity level or force mode requires immediate response */
+       if (chip->prox_data >= chip->prox_abs_thres ||
+           chip->prox_force_update)
+               chip->prox_persistence_counter = chip->prox_persistence;
+
+       chip->prox_force_update = false;
+
+       /* Persistence filttering to reduce false proximity events */
+       if (likely(above)) {
+               if (chip->prox_persistence_counter < chip->prox_persistence) {
+                       chip->prox_persistence_counter++;
+                       ret = -ENODATA;
+               } else {
+                       mode = PROX_ABOVE_THRESHOLD;
+                       ret = 0;
+               }
+       } else {
+               chip->prox_persistence_counter = 0;
+               mode = PROX_BELOW_THRESHOLD;
+               chip->prox_data = 0;
+               ret = 0;
+       }
+
+       /* Set proximity detection rate based on above or below value */
+       if (ret == 0) {
+               bh1770_prox_rate(chip, mode);
+               sysfs_notify(&chip->client->dev.kobj, NULL, "prox0_raw");
+       }
+out:
+       return ret;
+}
+
+static int bh1770_detect(struct bh1770_chip *chip)
+{
+       struct i2c_client *client = chip->client;
+       s32 ret;
+       u8 manu, part;
+
+       ret = i2c_smbus_read_byte_data(client, BH1770_MANUFACT_ID);
+       if (ret < 0)
+               goto error;
+       manu = (u8)ret;
+
+       ret = i2c_smbus_read_byte_data(client, BH1770_PART_ID);
+       if (ret < 0)
+               goto error;
+       part = (u8)ret;
+
+       chip->revision = (part & BH1770_REV_MASK) >> BH1770_REV_SHIFT;
+       chip->prox_coef = BH1770_COEF_SCALER;
+       chip->prox_const = 0;
+       chip->lux_cf = BH1770_NEUTRAL_CF;
+
+       if ((manu == BH1770_MANUFACT_ROHM) &&
+           ((part & BH1770_PART_MASK) == BH1770_PART)) {
+               snprintf(chip->chipname, sizeof(chip->chipname), "BH1770GLC");
+               return 0;
+       }
+
+       if ((manu == BH1770_MANUFACT_OSRAM) &&
+           ((part & BH1770_PART_MASK) == BH1770_PART)) {
+               snprintf(chip->chipname, sizeof(chip->chipname), "SFH7770");
+               /* Values selected by comparing different versions */
+               chip->prox_coef = 819; /* 0.8 * BH1770_COEF_SCALER */
+               chip->prox_const = 40;
+               return 0;
+       }
+
+       ret = -ENODEV;
+error:
+       dev_dbg(&client->dev, "BH1770 or SFH7770 not found\n");
+
+       return ret;
+}
+
+/*
+ * This work is re-scheduled at every proximity interrupt.
+ * If this work is running, it means that there hasn't been any
+ * proximity interrupt in time. Situation is handled as no-proximity.
+ * It would be nice to have low-threshold interrupt or interrupt
+ * when measurement and hi-threshold are both 0. But neither of those exists.
+ * This is a workaroud for missing HW feature.
+ */
+
+static void bh1770_prox_work(struct work_struct *work)
+{
+       struct bh1770_chip *chip =
+               container_of(work, struct bh1770_chip, prox_work.work);
+
+       mutex_lock(&chip->mutex);
+       bh1770_prox_read_result(chip);
+       mutex_unlock(&chip->mutex);
+}
+
+/* This is threaded irq handler */
+static irqreturn_t bh1770_irq(int irq, void *data)
+{
+       struct bh1770_chip *chip = data;
+       int status;
+       int rate = 0;
+
+       mutex_lock(&chip->mutex);
+       status = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_PS_STATUS);
+
+       /* Acknowledge interrupt by reading this register */
+       i2c_smbus_read_byte_data(chip->client, BH1770_INTERRUPT);
+
+       /*
+        * Check if there is fresh data available for als.
+        * If this is the very first data, update thresholds after that.
+        */
+       if (status & BH1770_INT_ALS_DATA) {
+               bh1770_lux_get_result(chip);
+               if (unlikely(chip->lux_wait_result)) {
+                       chip->lux_wait_result = false;
+                       wake_up(&chip->wait);
+                       bh1770_lux_update_thresholds(chip,
+                                               chip->lux_threshold_hi,
+                                               chip->lux_threshold_lo);
+               }
+       }
+
+       /* Disable interrupt logic to guarantee acknowledgement */
+       i2c_smbus_write_byte_data(chip->client, BH1770_INTERRUPT,
+                                 (0 << 1) | (0 << 0));
+
+       if ((status & BH1770_INT_ALS_INT))
+               sysfs_notify(&chip->client->dev.kobj, NULL, "lux0_input");
+
+       if (chip->int_mode_prox && (status & BH1770_INT_LEDS_INT)) {
+               rate = prox_rates_ms[chip->prox_rate_threshold];
+               bh1770_prox_read_result(chip);
+       }
+
+       /* Re-enable interrupt logic */
+       i2c_smbus_write_byte_data(chip->client, BH1770_INTERRUPT,
+                                 (chip->int_mode_lux << 1) |
+                                 (chip->int_mode_prox << 0));
+       mutex_unlock(&chip->mutex);
+
+       /*
+        * Can't cancel work while keeping mutex since the work uses the
+        * same mutex.
+        */
+       if (rate) {
+               /*
+                * Simulate missing no-proximity interrupt 50ms after the
+                * next expected interrupt time.
+                */
+               cancel_delayed_work_sync(&chip->prox_work);
+               schedule_delayed_work(&chip->prox_work,
+                               msecs_to_jiffies(rate + 50));
+       }
+       return IRQ_HANDLED;
+}
+
+static ssize_t bh1770_power_state_store(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t count)
+{
+       struct bh1770_chip *chip =  dev_get_drvdata(dev);
+       unsigned long value;
+       size_t ret;
+
+       if (strict_strtoul(buf, 0, &value))
+               return -EINVAL;
+
+       mutex_lock(&chip->mutex);
+       if (value) {
+               pm_runtime_get_sync(dev);
+
+               ret = bh1770_lux_rate(chip, chip->lux_rate_index);
+               ret |= bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
+
+               if (ret < 0) {
+                       pm_runtime_put(dev);
+                       goto leave;
+               }
+
+               /* This causes interrupt after the next measurement cycle */
+               bh1770_lux_update_thresholds(chip, BH1770_LUX_DEF_THRES,
+                                       BH1770_LUX_DEF_THRES);
+               /* Inform that we are waiting for a result from ALS */
+               chip->lux_wait_result = true;
+               bh1770_prox_mode_control(chip);
+       } else if (!pm_runtime_suspended(dev)) {
+               pm_runtime_put(dev);
+       }
+       ret = count;
+leave:
+       mutex_unlock(&chip->mutex);
+       return ret;
+}
+
+static ssize_t bh1770_power_state_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%d\n", !pm_runtime_suspended(dev));
+}
+
+static ssize_t bh1770_lux_result_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct bh1770_chip *chip =  dev_get_drvdata(dev);
+       ssize_t ret;
+       long timeout;
+
+       if (pm_runtime_suspended(dev))
+               return -EIO; /* Chip is not enabled at all */
+
+       timeout = wait_event_interruptible_timeout(chip->wait,
+                                       !chip->lux_wait_result,
+                                       msecs_to_jiffies(BH1770_TIMEOUT));
+       if (!timeout)
+               return -EIO;
+
+       mutex_lock(&chip->mutex);
+       ret = sprintf(buf, "%d\n", bh1770_lux_read_result(chip));
+       mutex_unlock(&chip->mutex);
+
+       return ret;
+}
+
+static ssize_t bh1770_lux_range_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%d\n", BH1770_LUX_RANGE);
+}
+
+static ssize_t bh1770_prox_enable_store(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t count)
+{
+       struct bh1770_chip *chip =  dev_get_drvdata(dev);
+       unsigned long value;
+
+       if (strict_strtoul(buf, 0, &value))
+               return -EINVAL;
+
+       mutex_lock(&chip->mutex);
+       /* Assume no proximity. Sensor will tell real state soon */
+       if (!chip->prox_enable_count)
+               chip->prox_data = 0;
+
+       if (value)
+               chip->prox_enable_count++;
+       else if (chip->prox_enable_count > 0)
+               chip->prox_enable_count--;
+       else
+               goto leave;
+
+       /* Run control only when chip is powered on */
+       if (!pm_runtime_suspended(dev))
+               bh1770_prox_mode_control(chip);
+leave:
+       mutex_unlock(&chip->mutex);
+       return count;
+}
+
+static ssize_t bh1770_prox_enable_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct bh1770_chip *chip =  dev_get_drvdata(dev);
+       ssize_t len;
+
+       mutex_lock(&chip->mutex);
+       len = sprintf(buf, "%d\n", chip->prox_enable_count);
+       mutex_unlock(&chip->mutex);
+       return len;
+}
+
+static ssize_t bh1770_prox_result_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct bh1770_chip *chip =  dev_get_drvdata(dev);
+       ssize_t ret;
+
+       mutex_lock(&chip->mutex);
+       if (chip->prox_enable_count && !pm_runtime_suspended(dev))
+               ret = sprintf(buf, "%d\n", chip->prox_data);
+       else
+               ret = -EIO;
+       mutex_unlock(&chip->mutex);
+       return ret;
+}
+
+static ssize_t bh1770_prox_range_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%d\n", BH1770_PROX_RANGE);
+}
+
+static ssize_t bh1770_get_prox_rate_avail(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       int i;
+       int pos = 0;
+       for (i = 0; i < ARRAY_SIZE(prox_rates_hz); i++)
+               pos += sprintf(buf + pos, "%d ", prox_rates_hz[i]);
+       sprintf(buf + pos - 1, "\n");
+       return pos;
+}
+
+static ssize_t bh1770_get_prox_rate_above(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct bh1770_chip *chip =  dev_get_drvdata(dev);
+       return sprintf(buf, "%d\n", prox_rates_hz[chip->prox_rate_threshold]);
+}
+
+static ssize_t bh1770_get_prox_rate_below(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct bh1770_chip *chip =  dev_get_drvdata(dev);
+       return sprintf(buf, "%d\n", prox_rates_hz[chip->prox_rate]);
+}
+
+static int bh1770_prox_rate_validate(int rate)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(prox_rates_hz) - 1; i++)
+               if (rate >= prox_rates_hz[i])
+                       break;
+       return i;
+}
+
+static ssize_t bh1770_set_prox_rate_above(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t count)
+{
+       struct bh1770_chip *chip =  dev_get_drvdata(dev);
+       unsigned long value;
+
+       if (strict_strtoul(buf, 0, &value))
+               return -EINVAL;
+
+       mutex_lock(&chip->mutex);
+       chip->prox_rate_threshold = bh1770_prox_rate_validate(value);
+       mutex_unlock(&chip->mutex);
+       return count;
+}
+
+static ssize_t bh1770_set_prox_rate_below(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t count)
+{
+       struct bh1770_chip *chip =  dev_get_drvdata(dev);
+       unsigned long value;
+
+       if (strict_strtoul(buf, 0, &value))
+               return -EINVAL;
+
+       mutex_lock(&chip->mutex);
+       chip->prox_rate = bh1770_prox_rate_validate(value);
+       mutex_unlock(&chip->mutex);
+       return count;
+}
+
+static ssize_t bh1770_get_prox_thres(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct bh1770_chip *chip =  dev_get_drvdata(dev);
+       return sprintf(buf, "%d\n", chip->prox_threshold);
+}
+
+static ssize_t bh1770_set_prox_thres(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t count)
+{
+       struct bh1770_chip *chip =  dev_get_drvdata(dev);
+       unsigned long value;
+       int ret;
+
+       if (strict_strtoul(buf, 0, &value))
+               return -EINVAL;
+       if (value > BH1770_PROX_RANGE)
+               return -EINVAL;
+
+       mutex_lock(&chip->mutex);
+       chip->prox_threshold = value;
+       ret = bh1770_prox_set_threshold(chip);
+       mutex_unlock(&chip->mutex);
+       if (ret < 0)
+               return ret;
+       return count;
+}
+
+static ssize_t bh1770_prox_persistence_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct bh1770_chip *chip = dev_get_drvdata(dev);
+
+       return sprintf(buf, "%u\n", chip->prox_persistence);
+}
+
+static ssize_t bh1770_prox_persistence_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t len)
+{
+       struct bh1770_chip *chip = dev_get_drvdata(dev);
+       unsigned long value;
+
+       if (strict_strtoul(buf, 0, &value))
+               return -EINVAL;
+
+       if (value > BH1770_PROX_MAX_PERSISTENCE)
+               return -EINVAL;
+
+       chip->prox_persistence = value;
+
+       return len;
+}
+
+static ssize_t bh1770_prox_abs_thres_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct bh1770_chip *chip = dev_get_drvdata(dev);
+       return sprintf(buf, "%u\n", chip->prox_abs_thres);
+}
+
+static ssize_t bh1770_prox_abs_thres_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t len)
+{
+       struct bh1770_chip *chip = dev_get_drvdata(dev);
+       unsigned long value;
+
+       if (strict_strtoul(buf, 0, &value))
+               return -EINVAL;
+
+       if (value > BH1770_PROX_RANGE)
+               return -EINVAL;
+
+       chip->prox_abs_thres = value;
+
+       return len;
+}
+
+static ssize_t bh1770_chip_id_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct bh1770_chip *chip =  dev_get_drvdata(dev);
+       return sprintf(buf, "%s rev %d\n", chip->chipname, chip->revision);
+}
+
+static ssize_t bh1770_lux_calib_default_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%u\n", BH1770_CALIB_SCALER);
+}
+
+static ssize_t bh1770_lux_calib_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct bh1770_chip *chip = dev_get_drvdata(dev);
+       ssize_t len;
+
+       mutex_lock(&chip->mutex);
+       len = sprintf(buf, "%u\n", chip->lux_calib);
+       mutex_unlock(&chip->mutex);
+       return len;
+}
+
+static ssize_t bh1770_lux_calib_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t len)
+{
+       struct bh1770_chip *chip = dev_get_drvdata(dev);
+       unsigned long value;
+       u32 old_calib;
+       u32 new_corr;
+
+       if (strict_strtoul(buf, 0, &value))
+               return -EINVAL;
+
+       mutex_lock(&chip->mutex);
+       old_calib = chip->lux_calib;
+       chip->lux_calib = value;
+       new_corr = bh1770_get_corr_value(chip);
+       if (new_corr == 0) {
+               chip->lux_calib = old_calib;
+               mutex_unlock(&chip->mutex);
+               return -EINVAL;
+       }
+       chip->lux_corr = new_corr;
+       /* Refresh thresholds on HW after changing correction value */
+       bh1770_lux_update_thresholds(chip, chip->lux_threshold_hi,
+                               chip->lux_threshold_lo);
+
+       mutex_unlock(&chip->mutex);
+
+       return len;
+}
+
+static ssize_t bh1770_get_lux_rate_avail(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       int i;
+       int pos = 0;
+       for (i = 0; i < ARRAY_SIZE(lux_rates_hz); i++)
+               pos += sprintf(buf + pos, "%d ", lux_rates_hz[i]);
+       sprintf(buf + pos - 1, "\n");
+       return pos;
+}
+
+static ssize_t bh1770_get_lux_rate(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct bh1770_chip *chip =  dev_get_drvdata(dev);
+       return sprintf(buf, "%d\n", lux_rates_hz[chip->lux_rate_index]);
+}
+
+static ssize_t bh1770_set_lux_rate(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t count)
+{
+       struct bh1770_chip *chip =  dev_get_drvdata(dev);
+       unsigned long rate_hz;
+       int ret, i;
+
+       if (strict_strtoul(buf, 0, &rate_hz))
+               return -EINVAL;
+
+       for (i = 0; i < ARRAY_SIZE(lux_rates_hz) - 1; i++)
+               if (rate_hz >= lux_rates_hz[i])
+                       break;
+
+       mutex_lock(&chip->mutex);
+       chip->lux_rate_index = i;
+       ret = bh1770_lux_rate(chip, i);
+       mutex_unlock(&chip->mutex);
+
+       if (ret < 0)
+               return ret;
+
+       return count;
+}
+
+static ssize_t bh1770_get_lux_thresh_above(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct bh1770_chip *chip =  dev_get_drvdata(dev);
+       return sprintf(buf, "%d\n", chip->lux_threshold_hi);
+}
+
+static ssize_t bh1770_get_lux_thresh_below(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct bh1770_chip *chip =  dev_get_drvdata(dev);
+       return sprintf(buf, "%d\n", chip->lux_threshold_lo);
+}
+
+static ssize_t bh1770_set_lux_thresh(struct bh1770_chip *chip, u16 *target,
+                               const char *buf)
+{
+       int ret = 0;
+       unsigned long thresh;
+
+       if (strict_strtoul(buf, 0, &thresh))
+               return -EINVAL;
+
+       if (thresh > BH1770_LUX_RANGE)
+               return -EINVAL;
+
+       mutex_lock(&chip->mutex);
+       *target = thresh;
+       /*
+        * Don't update values in HW if we are still waiting for
+        * first interrupt to come after device handle open call.
+        */
+       if (!chip->lux_wait_result)
+               ret = bh1770_lux_update_thresholds(chip,
+                                               chip->lux_threshold_hi,
+                                               chip->lux_threshold_lo);
+       mutex_unlock(&chip->mutex);
+       return ret;
+
+}
+
+static ssize_t bh1770_set_lux_thresh_above(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t len)
+{
+       struct bh1770_chip *chip =  dev_get_drvdata(dev);
+       int ret = bh1770_set_lux_thresh(chip, &chip->lux_threshold_hi, buf);
+       if (ret < 0)
+               return ret;
+       return len;
+}
+
+static ssize_t bh1770_set_lux_thresh_below(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t len)
+{
+       struct bh1770_chip *chip =  dev_get_drvdata(dev);
+       int ret = bh1770_set_lux_thresh(chip, &chip->lux_threshold_lo, buf);
+       if (ret < 0)
+               return ret;
+       return len;
+}
+
+static DEVICE_ATTR(prox0_raw_en, S_IRUGO | S_IWUSR, bh1770_prox_enable_show,
+                                               bh1770_prox_enable_store);
+static DEVICE_ATTR(prox0_thresh_above1_value, S_IRUGO | S_IWUSR,
+                                               bh1770_prox_abs_thres_show,
+                                               bh1770_prox_abs_thres_store);
+static DEVICE_ATTR(prox0_thresh_above0_value, S_IRUGO | S_IWUSR,
+                                               bh1770_get_prox_thres,
+                                               bh1770_set_prox_thres);
+static DEVICE_ATTR(prox0_raw, S_IRUGO, bh1770_prox_result_show, NULL);
+static DEVICE_ATTR(prox0_sensor_range, S_IRUGO, bh1770_prox_range_show, NULL);
+static DEVICE_ATTR(prox0_thresh_above_count, S_IRUGO | S_IWUSR,
+                                               bh1770_prox_persistence_show,
+                                               bh1770_prox_persistence_store);
+static DEVICE_ATTR(prox0_rate_above, S_IRUGO | S_IWUSR,
+                                               bh1770_get_prox_rate_above,
+                                               bh1770_set_prox_rate_above);
+static DEVICE_ATTR(prox0_rate_below, S_IRUGO | S_IWUSR,
+                                               bh1770_get_prox_rate_below,
+                                               bh1770_set_prox_rate_below);
+static DEVICE_ATTR(prox0_rate_avail, S_IRUGO, bh1770_get_prox_rate_avail, NULL);
+
+static DEVICE_ATTR(lux0_calibscale, S_IRUGO | S_IWUSR, bh1770_lux_calib_show,
+                                               bh1770_lux_calib_store);
+static DEVICE_ATTR(lux0_calibscale_default, S_IRUGO,
+                                               bh1770_lux_calib_default_show,
+                                               NULL);
+static DEVICE_ATTR(lux0_input, S_IRUGO, bh1770_lux_result_show, NULL);
+static DEVICE_ATTR(lux0_sensor_range, S_IRUGO, bh1770_lux_range_show, NULL);
+static DEVICE_ATTR(lux0_rate, S_IRUGO | S_IWUSR, bh1770_get_lux_rate,
+                                               bh1770_set_lux_rate);
+static DEVICE_ATTR(lux0_rate_avail, S_IRUGO, bh1770_get_lux_rate_avail, NULL);
+static DEVICE_ATTR(lux0_thresh_above_value, S_IRUGO | S_IWUSR,
+                                               bh1770_get_lux_thresh_above,
+                                               bh1770_set_lux_thresh_above);
+static DEVICE_ATTR(lux0_thresh_below_value, S_IRUGO | S_IWUSR,
+                                               bh1770_get_lux_thresh_below,
+                                               bh1770_set_lux_thresh_below);
+static DEVICE_ATTR(chip_id, S_IRUGO, bh1770_chip_id_show, NULL);
+static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR, bh1770_power_state_show,
+                                                bh1770_power_state_store);
+
+
+static struct attribute *sysfs_attrs[] = {
+       &dev_attr_lux0_calibscale.attr,
+       &dev_attr_lux0_calibscale_default.attr,
+       &dev_attr_lux0_input.attr,
+       &dev_attr_lux0_sensor_range.attr,
+       &dev_attr_lux0_rate.attr,
+       &dev_attr_lux0_rate_avail.attr,
+       &dev_attr_lux0_thresh_above_value.attr,
+       &dev_attr_lux0_thresh_below_value.attr,
+       &dev_attr_prox0_raw.attr,
+       &dev_attr_prox0_sensor_range.attr,
+       &dev_attr_prox0_raw_en.attr,
+       &dev_attr_prox0_thresh_above_count.attr,
+       &dev_attr_prox0_rate_above.attr,
+       &dev_attr_prox0_rate_below.attr,
+       &dev_attr_prox0_rate_avail.attr,
+       &dev_attr_prox0_thresh_above0_value.attr,
+       &dev_attr_prox0_thresh_above1_value.attr,
+       &dev_attr_chip_id.attr,
+       &dev_attr_power_state.attr,
+       NULL
+};
+
+static struct attribute_group bh1770_attribute_group = {
+       .attrs = sysfs_attrs
+};
+
+static int __devinit bh1770_probe(struct i2c_client *client,
+                               const struct i2c_device_id *id)
+{
+       struct bh1770_chip *chip;
+       int err;
+
+       chip = kzalloc(sizeof *chip, GFP_KERNEL);
+       if (!chip)
+               return -ENOMEM;
+
+       i2c_set_clientdata(client, chip);
+       chip->client  = client;
+
+       mutex_init(&chip->mutex);
+       init_waitqueue_head(&chip->wait);
+       INIT_DELAYED_WORK(&chip->prox_work, bh1770_prox_work);
+
+       if (client->dev.platform_data == NULL) {
+               dev_err(&client->dev, "platform data is mandatory\n");
+               err = -EINVAL;
+               goto fail1;
+       }
+
+       chip->pdata             = client->dev.platform_data;
+       chip->lux_calib         = BH1770_LUX_NEUTRAL_CALIB_VALUE;
+       chip->lux_rate_index    = BH1770_LUX_DEFAULT_RATE;
+       chip->lux_threshold_lo  = BH1770_LUX_DEF_THRES;
+       chip->lux_threshold_hi  = BH1770_LUX_DEF_THRES;
+
+       if (chip->pdata->glass_attenuation == 0)
+               chip->lux_ga = BH1770_NEUTRAL_GA;
+       else
+               chip->lux_ga = chip->pdata->glass_attenuation;
+
+       chip->prox_threshold    = BH1770_PROX_DEF_THRES;
+       chip->prox_led          = chip->pdata->led_def_curr;
+       chip->prox_abs_thres    = BH1770_PROX_DEF_ABS_THRES;
+       chip->prox_persistence  = BH1770_DEFAULT_PERSISTENCE;
+       chip->prox_rate_threshold = BH1770_PROX_DEF_RATE_THRESH;
+       chip->prox_rate         = BH1770_PROX_DEFAULT_RATE;
+       chip->prox_data         = 0;
+
+       chip->regs[0].supply = reg_vcc;
+       chip->regs[1].supply = reg_vleds;
+
+       err = regulator_bulk_get(&client->dev,
+                                ARRAY_SIZE(chip->regs), chip->regs);
+       if (err < 0) {
+               dev_err(&client->dev, "Cannot get regulators\n");
+               goto fail1;
+       }
+
+       err = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
+                               chip->regs);
+       if (err < 0) {
+               dev_err(&client->dev, "Cannot enable regulators\n");
+               goto fail2;
+       }
+
+       usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2);
+       err = bh1770_detect(chip);
+       if (err < 0)
+               goto fail3;
+
+       /* Start chip */
+       bh1770_chip_on(chip);
+       pm_runtime_set_active(&client->dev);
+       pm_runtime_enable(&client->dev);
+
+       chip->lux_corr = bh1770_get_corr_value(chip);
+       if (chip->lux_corr == 0) {
+               dev_err(&client->dev, "Improper correction values\n");
+               err = -EINVAL;
+               goto fail3;
+       }
+
+       if (chip->pdata->setup_resources) {
+               err = chip->pdata->setup_resources();
+               if (err) {
+                       err = -EINVAL;
+                       goto fail3;
+               }
+       }
+
+       err = sysfs_create_group(&chip->client->dev.kobj,
+                               &bh1770_attribute_group);
+       if (err < 0) {
+               dev_err(&chip->client->dev, "Sysfs registration failed\n");
+               goto fail4;
+       }
+
+       /*
+        * Chip needs level triggered interrupt to work. However,
+        * level triggering doesn't work always correctly with power
+        * management. Select both
+        */
+       err = request_threaded_irq(client->irq, NULL,
+                               bh1770_irq,
+                               IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
+                               IRQF_TRIGGER_LOW,
+                               "bh1770", chip);
+       if (err) {
+               dev_err(&client->dev, "could not get IRQ %d\n",
+                       client->irq);
+               goto fail5;
+       }
+       regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+       return err;
+fail5:
+       sysfs_remove_group(&chip->client->dev.kobj,
+                       &bh1770_attribute_group);
+fail4:
+       if (chip->pdata->release_resources)
+               chip->pdata->release_resources();
+fail3:
+       regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
+fail2:
+       regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
+fail1:
+       kfree(chip);
+       return err;
+}
+
+static int __devexit bh1770_remove(struct i2c_client *client)
+{
+       struct bh1770_chip *chip = i2c_get_clientdata(client);
+
+       free_irq(client->irq, chip);
+
+       sysfs_remove_group(&chip->client->dev.kobj,
+                       &bh1770_attribute_group);
+
+       if (chip->pdata->release_resources)
+               chip->pdata->release_resources();
+
+       cancel_delayed_work_sync(&chip->prox_work);
+
+       if (!pm_runtime_suspended(&client->dev))
+               bh1770_chip_off(chip);
+
+       pm_runtime_disable(&client->dev);
+       pm_runtime_set_suspended(&client->dev);
+
+       regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
+       kfree(chip);
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int bh1770_suspend(struct device *dev)
+{
+       struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+       struct bh1770_chip *chip = i2c_get_clientdata(client);
+
+       bh1770_chip_off(chip);
+
+       return 0;
+}
+
+static int bh1770_resume(struct device *dev)
+{
+       struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+       struct bh1770_chip *chip = i2c_get_clientdata(client);
+       int ret = 0;
+
+       bh1770_chip_on(chip);
+
+       if (!pm_runtime_suspended(dev)) {
+               /*
+                * If we were enabled at suspend time, it is expected
+                * everything works nice and smoothly
+                */
+               ret = bh1770_lux_rate(chip, chip->lux_rate_index);
+               ret |= bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
+
+               /* This causes interrupt after the next measurement cycle */
+               bh1770_lux_update_thresholds(chip, BH1770_LUX_DEF_THRES,
+                                       BH1770_LUX_DEF_THRES);
+               /* Inform that we are waiting for a result from ALS */
+               chip->lux_wait_result = true;
+               bh1770_prox_mode_control(chip);
+       }
+       return ret;
+}
+
+#else
+#define bh1770_suspend NULL
+#define bh1770_shutdown NULL
+#define bh1770_resume  NULL
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int bh1770_runtime_suspend(struct device *dev)
+{
+       struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+       struct bh1770_chip *chip = i2c_get_clientdata(client);
+
+       bh1770_chip_off(chip);
+
+       return 0;
+}
+
+static int bh1770_runtime_resume(struct device *dev)
+{
+       struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+       struct bh1770_chip *chip = i2c_get_clientdata(client);
+
+       bh1770_chip_on(chip);
+
+       return 0;
+}
+#endif
+
+static const struct i2c_device_id bh1770_id[] = {
+       {"bh1770glc", 0 },
+       {"sfh7770", 0 },
+       {}
+};
+
+MODULE_DEVICE_TABLE(i2c, bh1770_id);
+
+static const struct dev_pm_ops bh1770_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(bh1770_suspend, bh1770_resume)
+       SET_RUNTIME_PM_OPS(bh1770_runtime_suspend, bh1770_runtime_resume, NULL)
+};
+
+static struct i2c_driver bh1770_driver = {
+       .driver  = {
+               .name   = "bh1770glc",
+               .owner  = THIS_MODULE,
+               .pm     = &bh1770_pm_ops,
+       },
+       .probe    = bh1770_probe,
+       .remove   = __devexit_p(bh1770_remove),
+       .id_table = bh1770_id,
+};
+
+static int __init bh1770_init(void)
+{
+       return i2c_add_driver(&bh1770_driver);
+}
+
+static void __exit bh1770_exit(void)
+{
+       i2c_del_driver(&bh1770_driver);
+}
+
+MODULE_DESCRIPTION("BH1770GLC / SFH7770 combined ALS and proximity sensor");
+MODULE_AUTHOR("Samu Onkalo, Nokia Corporation");
+MODULE_LICENSE("GPL v2");
+
+module_init(bh1770_init);
+module_exit(bh1770_exit);
index af2497ae5fe32be990aabdb6732c44eaaf0591fb..0a53500636c9d3f4c4d1f886d386f1b1b675f95d 100644 (file)
@@ -146,6 +146,7 @@ static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode)
        struct inode *ret = new_inode(sb);
 
        if (ret) {
+               ret->i_ino = get_next_ino();
                ret->i_mode = mode;
                ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
        }
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
new file mode 100644 (file)
index 0000000..34fe835
--- /dev/null
@@ -0,0 +1,248 @@
+/*
+ * isl29020.c - Intersil  ALS Driver
+ *
+ * Copyright (C) 2008 Intel Corp
+ *
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Data sheet at: http://www.intersil.com/data/fn/fn6505.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/pm_runtime.h>
+
+static DEFINE_MUTEX(mutex);
+
+static ssize_t als_sensing_range_show(struct device *dev,
+                       struct device_attribute *attr,  char *buf)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       int  val;
+
+       val = i2c_smbus_read_byte_data(client, 0x00);
+
+       if (val < 0)
+               return val;
+       return sprintf(buf, "%d000\n", 1 << (2 * (val & 3)));
+
+}
+
+static ssize_t als_lux_input_data_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       int ret_val, val;
+       unsigned long int lux;
+       int temp;
+
+       pm_runtime_get_sync(dev);
+       msleep(100);
+
+       mutex_lock(&mutex);
+       temp = i2c_smbus_read_byte_data(client, 0x02); /* MSB data */
+       if (temp < 0) {
+               pm_runtime_put_sync(dev);
+               mutex_unlock(&mutex);
+               return temp;
+       }
+
+       ret_val = i2c_smbus_read_byte_data(client, 0x01); /* LSB data */
+       mutex_unlock(&mutex);
+
+       if (ret_val < 0) {
+               pm_runtime_put_sync(dev);
+               return ret_val;
+       }
+
+       ret_val |= temp << 8;
+       val = i2c_smbus_read_byte_data(client, 0x00);
+       pm_runtime_put_sync(dev);
+       if (val < 0)
+               return val;
+       lux = ((((1 << (2 * (val & 3))))*1000) * ret_val) / 65536;
+       return sprintf(buf, "%ld\n", lux);
+}
+
+static ssize_t als_sensing_range_store(struct device *dev,
+               struct device_attribute *attr, const  char *buf, size_t count)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       unsigned int ret_val;
+       unsigned long val;
+
+       if (strict_strtoul(buf, 10, &val))
+               return -EINVAL;
+       if (val < 1 || val > 64000)
+               return -EINVAL;
+
+       /* Pick the smallest sensor range that will meet our requirements */
+       if (val <= 1000)
+               val = 1;
+       else if (val <= 4000)
+               val = 2;
+       else if (val <= 16000)
+               val = 3;
+       else
+               val = 4;
+
+       ret_val = i2c_smbus_read_byte_data(client, 0x00);
+
+       ret_val &= 0xFC; /*reset the bit before setting them */
+       ret_val |= val - 1;
+       ret_val = i2c_smbus_write_byte_data(client, 0x00, ret_val);
+
+       if (ret_val < 0)
+               return ret_val;
+       return count;
+}
+
+static void als_set_power_state(struct i2c_client *client, int enable)
+{
+       int ret_val;
+
+       ret_val = i2c_smbus_read_byte_data(client, 0x00);
+       if (ret_val < 0)
+               return;
+
+       if (enable)
+               ret_val |= 0x80;
+       else
+               ret_val &= 0x7F;
+
+       i2c_smbus_write_byte_data(client, 0x00, ret_val);
+}
+
+static DEVICE_ATTR(lux0_sensor_range, S_IRUGO | S_IWUSR,
+       als_sensing_range_show, als_sensing_range_store);
+static DEVICE_ATTR(lux0_input, S_IRUGO, als_lux_input_data_show, NULL);
+
+static struct attribute *mid_att_als[] = {
+       &dev_attr_lux0_sensor_range.attr,
+       &dev_attr_lux0_input.attr,
+       NULL
+};
+
+static struct attribute_group m_als_gr = {
+       .name = "isl29020",
+       .attrs = mid_att_als
+};
+
+static int als_set_default_config(struct i2c_client *client)
+{
+       int retval;
+
+       retval = i2c_smbus_write_byte_data(client, 0x00, 0xc0);
+       if (retval < 0) {
+               dev_err(&client->dev, "default write failed.");
+               return retval;
+       }
+       return 0;;
+}
+
+static int  isl29020_probe(struct i2c_client *client,
+                                       const struct i2c_device_id *id)
+{
+       int res;
+
+       res = als_set_default_config(client);
+       if (res <  0)
+               return res;
+
+       res = sysfs_create_group(&client->dev.kobj, &m_als_gr);
+       if (res) {
+               dev_err(&client->dev, "isl29020: device create file failed\n");
+               return res;
+       }
+       dev_info(&client->dev, "%s isl29020: ALS chip found\n", client->name);
+       als_set_power_state(client, 0);
+       pm_runtime_enable(&client->dev);
+       return res;
+}
+
+static int isl29020_remove(struct i2c_client *client)
+{
+       struct als_data *data = i2c_get_clientdata(client);
+       sysfs_remove_group(&client->dev.kobj, &m_als_gr);
+       kfree(data);
+       return 0;
+}
+
+static struct i2c_device_id isl29020_id[] = {
+       { "isl29020", 0 },
+       { }
+};
+
+MODULE_DEVICE_TABLE(i2c, isl29020_id);
+
+#ifdef CONFIG_PM
+
+static int isl29020_runtime_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       als_set_power_state(client, 0);
+       return 0;
+}
+
+static int isl29020_runtime_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       als_set_power_state(client, 1);
+       return 0;
+}
+
+static const struct dev_pm_ops isl29020_pm_ops = {
+       .runtime_suspend = isl29020_runtime_suspend,
+       .runtime_resume = isl29020_runtime_resume,
+};
+
+#define ISL29020_PM_OPS (&isl29020_pm_ops)
+#else  /* CONFIG_PM */
+#define ISL29020_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static struct i2c_driver isl29020_driver = {
+       .driver = {
+               .name = "isl29020",
+               .pm = ISL29020_PM_OPS,
+       },
+       .probe = isl29020_probe,
+       .remove = isl29020_remove,
+       .id_table = isl29020_id,
+};
+
+static int __init sensor_isl29020_init(void)
+{
+       return i2c_add_driver(&isl29020_driver);
+}
+
+static void  __exit sensor_isl29020_exit(void)
+{
+       i2c_del_driver(&isl29020_driver);
+}
+
+module_init(sensor_isl29020_init);
+module_exit(sensor_isl29020_exit);
+
+MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com");
+MODULE_DESCRIPTION("Intersil isl29020 ALS Driver");
+MODULE_LICENSE("GPL v2");
index 343b5d8ea6971fc07ddbb443b91ddfee6439e31c..81d7fa4ec0db0a284891a31ab280720a19bd32d9 100644 (file)
 #define REC_NUM_DEFAULT 10
 
 enum cname {
-       INVALID,
-       INT_HARDWARE_ENTRY,
-       INT_HW_IRQ_EN,
-       INT_TASKLET_ENTRY,
-       FS_DEVRW,
-       MEM_SWAPOUT,
-       TIMERADD,
-       SCSI_DISPATCH_CMD,
-       IDE_CORE_CP,
-       DIRECT,
+       CN_INVALID,
+       CN_INT_HARDWARE_ENTRY,
+       CN_INT_HW_IRQ_EN,
+       CN_INT_TASKLET_ENTRY,
+       CN_FS_DEVRW,
+       CN_MEM_SWAPOUT,
+       CN_TIMERADD,
+       CN_SCSI_DISPATCH_CMD,
+       CN_IDE_CORE_CP,
+       CN_DIRECT,
 };
 
 enum ctype {
-       NONE,
-       PANIC,
-       BUG,
-       EXCEPTION,
-       LOOP,
-       OVERFLOW,
-       CORRUPT_STACK,
-       UNALIGNED_LOAD_STORE_WRITE,
-       OVERWRITE_ALLOCATION,
-       WRITE_AFTER_FREE,
-       SOFTLOCKUP,
-       HARDLOCKUP,
-       HUNG_TASK,
+       CT_NONE,
+       CT_PANIC,
+       CT_BUG,
+       CT_EXCEPTION,
+       CT_LOOP,
+       CT_OVERFLOW,
+       CT_CORRUPT_STACK,
+       CT_UNALIGNED_LOAD_STORE_WRITE,
+       CT_OVERWRITE_ALLOCATION,
+       CT_WRITE_AFTER_FREE,
+       CT_SOFTLOCKUP,
+       CT_HARDLOCKUP,
+       CT_HUNG_TASK,
 };
 
 static char* cp_name[] = {
@@ -117,8 +117,8 @@ static char* cpoint_type;
 static int cpoint_count = DEFAULT_COUNT;
 static int recur_count = REC_NUM_DEFAULT;
 
-static enum cname cpoint = INVALID;
-static enum ctype cptype = NONE;
+static enum cname cpoint = CN_INVALID;
+static enum ctype cptype = CT_NONE;
 static int count = DEFAULT_COUNT;
 
 module_param(recur_count, int, 0644);
@@ -207,12 +207,12 @@ static enum ctype parse_cp_type(const char *what, size_t count)
                        return i + 1;
        }
 
-       return NONE;
+       return CT_NONE;
 }
 
 static const char *cp_type_to_str(enum ctype type)
 {
-       if (type == NONE || type < 0 || type > ARRAY_SIZE(cp_type))
+       if (type == CT_NONE || type < 0 || type > ARRAY_SIZE(cp_type))
                return "None";
 
        return cp_type[type - 1];
@@ -220,7 +220,7 @@ static const char *cp_type_to_str(enum ctype type)
 
 static const char *cp_name_to_str(enum cname name)
 {
-       if (name == INVALID || name < 0 || name > ARRAY_SIZE(cp_name))
+       if (name == CN_INVALID || name < 0 || name > ARRAY_SIZE(cp_name))
                return "INVALID";
 
        return cp_name[name - 1];
@@ -245,7 +245,7 @@ static int lkdtm_parse_commandline(void)
                return -EINVAL;
 
        cptype = parse_cp_type(cpoint_type, strlen(cpoint_type));
-       if (cptype == NONE)
+       if (cptype == CT_NONE)
                return -EINVAL;
 
        for (i = 0; i < ARRAY_SIZE(cp_name); i++) {
@@ -274,30 +274,30 @@ static int recursive_loop(int a)
 static void lkdtm_do_action(enum ctype which)
 {
        switch (which) {
-       case PANIC:
+       case CT_PANIC:
                panic("dumptest");
                break;
-       case BUG:
+       case CT_BUG:
                BUG();
                break;
-       case EXCEPTION:
+       case CT_EXCEPTION:
                *((int *) 0) = 0;
                break;
-       case LOOP:
+       case CT_LOOP:
                for (;;)
                        ;
                break;
-       case OVERFLOW:
+       case CT_OVERFLOW:
                (void) recursive_loop(0);
                break;
-       case CORRUPT_STACK: {
+       case CT_CORRUPT_STACK: {
                volatile u32 data[8];
                volatile u32 *p = data;
 
                p[12] = 0x12345678;
                break;
        }
-       case UNALIGNED_LOAD_STORE_WRITE: {
+       case CT_UNALIGNED_LOAD_STORE_WRITE: {
                static u8 data[5] __attribute__((aligned(4))) = {1, 2,
                                3, 4, 5};
                u32 *p;
@@ -309,7 +309,7 @@ static void lkdtm_do_action(enum ctype which)
                *p = val;
                 break;
        }
-       case OVERWRITE_ALLOCATION: {
+       case CT_OVERWRITE_ALLOCATION: {
                size_t len = 1020;
                u32 *data = kmalloc(len, GFP_KERNEL);
 
@@ -317,7 +317,7 @@ static void lkdtm_do_action(enum ctype which)
                kfree(data);
                break;
        }
-       case WRITE_AFTER_FREE: {
+       case CT_WRITE_AFTER_FREE: {
                size_t len = 1024;
                u32 *data = kmalloc(len, GFP_KERNEL);
 
@@ -326,21 +326,21 @@ static void lkdtm_do_action(enum ctype which)
                memset(data, 0x78, len);
                break;
        }
-       case SOFTLOCKUP:
+       case CT_SOFTLOCKUP:
                preempt_disable();
                for (;;)
                        cpu_relax();
                break;
-       case HARDLOCKUP:
+       case CT_HARDLOCKUP:
                local_irq_disable();
                for (;;)
                        cpu_relax();
                break;
-       case HUNG_TASK:
+       case CT_HUNG_TASK:
                set_current_state(TASK_UNINTERRUPTIBLE);
                schedule();
                break;
-       case NONE:
+       case CT_NONE:
        default:
                break;
        }
@@ -363,43 +363,43 @@ static int lkdtm_register_cpoint(enum cname which)
 {
        int ret;
 
-       cpoint = INVALID;
+       cpoint = CN_INVALID;
        if (lkdtm.entry != NULL)
                unregister_jprobe(&lkdtm);
 
        switch (which) {
-       case DIRECT:
+       case CN_DIRECT:
                lkdtm_do_action(cptype);
                return 0;
-       case INT_HARDWARE_ENTRY:
+       case CN_INT_HARDWARE_ENTRY:
                lkdtm.kp.symbol_name = "do_IRQ";
                lkdtm.entry = (kprobe_opcode_t*) jp_do_irq;
                break;
-       case INT_HW_IRQ_EN:
+       case CN_INT_HW_IRQ_EN:
                lkdtm.kp.symbol_name = "handle_IRQ_event";
                lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event;
                break;
-       case INT_TASKLET_ENTRY:
+       case CN_INT_TASKLET_ENTRY:
                lkdtm.kp.symbol_name = "tasklet_action";
                lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action;
                break;
-       case FS_DEVRW:
+       case CN_FS_DEVRW:
                lkdtm.kp.symbol_name = "ll_rw_block";
                lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block;
                break;
-       case MEM_SWAPOUT:
+       case CN_MEM_SWAPOUT:
                lkdtm.kp.symbol_name = "shrink_inactive_list";
                lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list;
                break;
-       case TIMERADD:
+       case CN_TIMERADD:
                lkdtm.kp.symbol_name = "hrtimer_start";
                lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start;
                break;
-       case SCSI_DISPATCH_CMD:
+       case CN_SCSI_DISPATCH_CMD:
                lkdtm.kp.symbol_name = "scsi_dispatch_cmd";
                lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd;
                break;
-       case IDE_CORE_CP:
+       case CN_IDE_CORE_CP:
 #ifdef CONFIG_IDE
                lkdtm.kp.symbol_name = "generic_ide_ioctl";
                lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
@@ -416,7 +416,7 @@ static int lkdtm_register_cpoint(enum cname which)
        cpoint = which;
        if ((ret = register_jprobe(&lkdtm)) < 0) {
                printk(KERN_INFO "lkdtm: Couldn't register jprobe\n");
-               cpoint = INVALID;
+               cpoint = CN_INVALID;
        }
 
        return ret;
@@ -445,7 +445,7 @@ static ssize_t do_register_entry(enum cname which, struct file *f,
        cptype = parse_cp_type(buf, count);
        free_page((unsigned long) buf);
 
-       if (cptype == NONE)
+       if (cptype == CT_NONE)
                return -EINVAL;
 
        err = lkdtm_register_cpoint(which);
@@ -487,49 +487,49 @@ static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
 static ssize_t int_hardware_entry(struct file *f, const char __user *buf,
                size_t count, loff_t *off)
 {
-       return do_register_entry(INT_HARDWARE_ENTRY, f, buf, count, off);
+       return do_register_entry(CN_INT_HARDWARE_ENTRY, f, buf, count, off);
 }
 
 static ssize_t int_hw_irq_en(struct file *f, const char __user *buf,
                size_t count, loff_t *off)
 {
-       return do_register_entry(INT_HW_IRQ_EN, f, buf, count, off);
+       return do_register_entry(CN_INT_HW_IRQ_EN, f, buf, count, off);
 }
 
 static ssize_t int_tasklet_entry(struct file *f, const char __user *buf,
                size_t count, loff_t *off)
 {
-       return do_register_entry(INT_TASKLET_ENTRY, f, buf, count, off);
+       return do_register_entry(CN_INT_TASKLET_ENTRY, f, buf, count, off);
 }
 
 static ssize_t fs_devrw_entry(struct file *f, const char __user *buf,
                size_t count, loff_t *off)
 {
-       return do_register_entry(FS_DEVRW, f, buf, count, off);
+       return do_register_entry(CN_FS_DEVRW, f, buf, count, off);
 }
 
 static ssize_t mem_swapout_entry(struct file *f, const char __user *buf,
                size_t count, loff_t *off)
 {
-       return do_register_entry(MEM_SWAPOUT, f, buf, count, off);
+       return do_register_entry(CN_MEM_SWAPOUT, f, buf, count, off);
 }
 
 static ssize_t timeradd_entry(struct file *f, const char __user *buf,
                size_t count, loff_t *off)
 {
-       return do_register_entry(TIMERADD, f, buf, count, off);
+       return do_register_entry(CN_TIMERADD, f, buf, count, off);
 }
 
 static ssize_t scsi_dispatch_cmd_entry(struct file *f,
                const char __user *buf, size_t count, loff_t *off)
 {
-       return do_register_entry(SCSI_DISPATCH_CMD, f, buf, count, off);
+       return do_register_entry(CN_SCSI_DISPATCH_CMD, f, buf, count, off);
 }
 
 static ssize_t ide_core_cp_entry(struct file *f, const char __user *buf,
                size_t count, loff_t *off)
 {
-       return do_register_entry(IDE_CORE_CP, f, buf, count, off);
+       return do_register_entry(CN_IDE_CORE_CP, f, buf, count, off);
 }
 
 /* Special entry to just crash directly. Available without KPROBEs */
@@ -557,7 +557,7 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf,
 
        type = parse_cp_type(buf, count);
        free_page((unsigned long) buf);
-       if (type == NONE)
+       if (type == CT_NONE)
                return -EINVAL;
 
        printk(KERN_INFO "lkdtm: Performing direct entry %s\n",
@@ -649,7 +649,7 @@ static int __init lkdtm_module_init(void)
                goto out_err;
        }
 
-       if (cpoint != INVALID && cptype != NONE) {
+       if (cpoint != CN_INVALID && cptype != CT_NONE) {
                ret = lkdtm_register_cpoint(cpoint);
                if (ret < 0) {
                        printk(KERN_INFO "lkdtm: Invalid crash point %d\n",
index 4197a3cb26ba42d674ef7d0935207adee0d70be1..b05db55c8c8e757c312184c797321942d814ec55 100644 (file)
@@ -343,8 +343,10 @@ static int __devinit phantom_probe(struct pci_dev *pdev,
        int retval;
 
        retval = pci_enable_device(pdev);
-       if (retval)
+       if (retval) {
+               dev_err(&pdev->dev, "pci_enable_device failed!\n");
                goto err;
+       }
 
        minor = phantom_get_free();
        if (minor == PHANTOM_MAX_MINORS) {
@@ -356,8 +358,10 @@ static int __devinit phantom_probe(struct pci_dev *pdev,
        phantom_devices[minor] = 1;
 
        retval = pci_request_regions(pdev, "phantom");
-       if (retval)
+       if (retval) {
+               dev_err(&pdev->dev, "pci_request_regions failed!\n");
                goto err_null;
+       }
 
        retval = -ENOMEM;
        pht = kzalloc(sizeof(*pht), GFP_KERNEL);
index 1f59ee2226ca4220e470842326ec64fba62377a2..17bbacb1b4b131b119b0076b339f44012fcc51f7 100644 (file)
@@ -417,6 +417,7 @@ xpc_process_activate_IRQ_rcvd_uv(void)
 static void
 xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
                              struct xpc_activate_mq_msghdr_uv *msg_hdr,
+                             int part_setup,
                              int *wakeup_hb_checker)
 {
        unsigned long irq_flags;
@@ -481,6 +482,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
        case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
                struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
 
+               if (!part_setup)
+                       break;
+
                msg = container_of(msg_hdr, struct
                                   xpc_activate_mq_msg_chctl_closerequest_uv,
                                   hdr);
@@ -497,6 +501,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
        case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
                struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
 
+               if (!part_setup)
+                       break;
+
                msg = container_of(msg_hdr, struct
                                   xpc_activate_mq_msg_chctl_closereply_uv,
                                   hdr);
@@ -511,6 +518,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
        case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
                struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
 
+               if (!part_setup)
+                       break;
+
                msg = container_of(msg_hdr, struct
                                   xpc_activate_mq_msg_chctl_openrequest_uv,
                                   hdr);
@@ -528,6 +538,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
        case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
                struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
 
+               if (!part_setup)
+                       break;
+
                msg = container_of(msg_hdr, struct
                                   xpc_activate_mq_msg_chctl_openreply_uv, hdr);
                args = &part->remote_openclose_args[msg->ch_number];
@@ -545,6 +558,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
        case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
                struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
 
+               if (!part_setup)
+                       break;
+
                msg = container_of(msg_hdr, struct
                                xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
                spin_lock_irqsave(&part->chctl_lock, irq_flags);
@@ -621,6 +637,7 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
 
                        part_referenced = xpc_part_ref(part);
                        xpc_handle_activate_mq_msg_uv(part, msg_hdr,
+                                                     part_referenced,
                                                      &wakeup_hb_checker);
                        if (part_referenced)
                                xpc_part_deref(part);
index 14390641704881ffb71ded6fd2d19118be07a4d6..f6e0d40cd876407ddf998ff846cf095cfaeddd4f 100644 (file)
@@ -124,6 +124,13 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
        return 0;
 }
 
+static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port)
+{
+       struct mlx4_en_dev *endev = ctx;
+
+       return endev->pndev[port];
+}
+
 static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
                          enum mlx4_dev_event event, int port)
 {
@@ -282,9 +289,11 @@ err_free_res:
 }
 
 static struct mlx4_interface mlx4_en_interface = {
-       .add    = mlx4_en_add,
-       .remove = mlx4_en_remove,
-       .event  = mlx4_en_event,
+       .add            = mlx4_en_add,
+       .remove         = mlx4_en_remove,
+       .event          = mlx4_en_event,
+       .get_dev        = mlx4_en_get_netdev,
+       .protocol       = MLX4_PROTOCOL_EN,
 };
 
 static int __init mlx4_en_init(void)
index 79478bd4211a958b369f9c8882351e3bcbb89424..6d6806b361e3145e31e654db0acef6152fec7b4b 100644 (file)
@@ -69,6 +69,7 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
        int err;
+       int idx;
 
        if (!priv->vlgrp)
                return;
@@ -83,7 +84,10 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
                if (err)
                        en_err(priv, "Failed configuring VLAN filter\n");
        }
+       if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
+               en_err(priv, "failed adding vlan %d\n", vid);
        mutex_unlock(&mdev->state_lock);
+
 }
 
 static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
@@ -91,6 +95,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
        int err;
+       int idx;
 
        if (!priv->vlgrp)
                return;
@@ -101,6 +106,11 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 
        /* Remove VID from port VLAN filter */
        mutex_lock(&mdev->state_lock);
+       if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
+               mlx4_unregister_vlan(mdev->dev, priv->port, idx);
+       else
+               en_err(priv, "could not find vid %d in cache\n", vid);
+
        if (mdev->device_up && priv->port_up) {
                err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
                if (err)
index aa3ef2aee5bf8435575a6f75a8e65502ebc09456..7f5a3221e0c1dc8c8c9071808e18e9f552c11eca 100644 (file)
@@ -127,8 +127,8 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
        memset(context, 0, sizeof *context);
 
        context->base_qpn = cpu_to_be32(base_qpn);
-       context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn);
-       context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn);
+       context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_EN_SHIFT | base_qpn);
+       context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_MODE_SHIFT | base_qpn);
        context->intra_no_vlan = 0;
        context->no_vlan = MLX4_NO_VLAN_IDX;
        context->intra_vlan_miss = 0;
index f6511aa2b7dfba0a8fca919f606189be9b3a2a5b..092e814b19813bbff8fbef391b69b8acaf0cc291 100644 (file)
@@ -36,7 +36,8 @@
 
 
 #define SET_PORT_GEN_ALL_VALID 0x7
-#define SET_PORT_PROMISC_SHIFT 31
+#define SET_PORT_PROMISC_EN_SHIFT      31
+#define SET_PORT_PROMISC_MODE_SHIFT    30
 
 enum {
        MLX4_CMD_SET_VLAN_FLTR  = 0x47,
index b716e1a1b2982bae0f25b1ed57793c3ccc31aaa7..b68eee2414c208765849d0136dfb7cb0065f52ba 100644 (file)
@@ -98,7 +98,8 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
                [20] = "Address vector port checking support",
                [21] = "UD multicast support",
                [24] = "Demand paging support",
-               [25] = "Router support"
+               [25] = "Router support",
+               [30] = "IBoE support"
        };
        int i;
 
index 5550678027513fa6f7b443bffe63e72198f4ec1c..73c94fcdfddf0096318e4db7649a4c5a858e9f20 100644 (file)
@@ -161,3 +161,24 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
 
        mutex_unlock(&intf_mutex);
 }
+
+void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_device_context *dev_ctx;
+       unsigned long flags;
+       void *result = NULL;
+
+       spin_lock_irqsave(&priv->ctx_lock, flags);
+
+       list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+               if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) {
+                       result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port);
+                       break;
+               }
+
+       spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+       return result;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
index 569fa3df381f8131bb6b91454a577bde8314500b..782f11d8fa71d52e2ae8549c41970c52cdb2cbb8 100644 (file)
@@ -103,7 +103,7 @@ MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
 
 static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
-MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
+MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
 
 int mlx4_check_port_params(struct mlx4_dev *dev,
                           enum mlx4_port_type *port_type)
@@ -1310,7 +1310,7 @@ static int __init mlx4_verify_params(void)
                return -1;
        }
 
-       if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
+       if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
                pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
                return -1;
        }
index 1fc16ab7ad2f0ebc0d6b7b6d604e593ceb223e69..dfed6a07c2d77d34e9a6a8775afacfac76f07c6c 100644 (file)
@@ -475,6 +475,7 @@ struct mlx4_en_priv {
        char *mc_addrs;
        int mc_addrs_cnt;
        struct mlx4_en_stat_out_mbox hw_stats;
+       int vids[128];
 };
 
 
index 606aa58afdead9458de5f3de2f19594074936798..56371ef328efebec61d170de0d9cc569dcbd5913 100644 (file)
@@ -182,6 +182,25 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
        return err;
 }
 
+int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
+{
+       struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
+       int i;
+
+       for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
+               if (table->refs[i] &&
+                   (vid == (MLX4_VLAN_MASK &
+                             be32_to_cpu(table->entries[i])))) {
+                       /* VLAN already registered, increase reference count */
+                       *idx = i;
+                       return 0;
+               }
+       }
+
+       return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
+
 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
 {
        struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
index 95f711b251adf33a80dd69eab2a6b792a11313ac..449de59bf35bb8a255d73b1a77eff0d040f953be 100644 (file)
@@ -28,6 +28,7 @@ static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
        struct inode *inode = new_inode(sb);
 
        if (inode) {
+               inode->i_ino = get_next_ino();
                inode->i_mode = mode;
                inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
        }
index 0bab84ebb15da8961e20be1ebfd120355ea3cdea..19bc73695475078a2b3a419e9ccfb9c4a4979882 100644 (file)
@@ -12,11 +12,12 @@ void pnp_unregister_protocol(struct pnp_protocol *protocol);
 
 #define PNP_EISA_ID_MASK 0x7fffffff
 void pnp_eisa_id_to_string(u32 id, char *str);
-struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *, int id, char *pnpid);
+struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *, int id,
+                             const char *pnpid);
 struct pnp_card *pnp_alloc_card(struct pnp_protocol *, int id, char *pnpid);
 
 int pnp_add_device(struct pnp_dev *dev);
-struct pnp_id *pnp_add_id(struct pnp_dev *dev, char *id);
+struct pnp_id *pnp_add_id(struct pnp_dev *dev, const char *id);
 
 int pnp_add_card(struct pnp_card *card);
 void pnp_remove_card(struct pnp_card *card);
index 88b3cde525968a2f6ee5e3b9be37e3177125a52f..0f34d962fd3c53bf4de3002524eab809899e4ec5 100644 (file)
@@ -124,7 +124,8 @@ static void pnp_release_device(struct device *dmdev)
        kfree(dev);
 }
 
-struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id, char *pnpid)
+struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id,
+                             const char *pnpid)
 {
        struct pnp_dev *dev;
        struct pnp_id *dev_id;
@@ -194,8 +195,9 @@ int pnp_add_device(struct pnp_dev *dev)
        for (id = dev->id; id; id = id->next)
                len += scnprintf(buf + len, sizeof(buf) - len, " %s", id->id);
 
-       pnp_dbg(&dev->dev, "%s device, IDs%s (%s)\n",
-               dev->protocol->name, buf, dev->active ? "active" : "disabled");
+       dev_printk(KERN_DEBUG, &dev->dev, "%s device, IDs%s (%s)\n",
+                  dev->protocol->name, buf,
+                  dev->active ? "active" : "disabled");
        return 0;
 }
 
index cd11b113494fabf7ca2367a2567bbbbc8af71ac7..d1dbb9df53faa7bc7e9b6fe8de8158595188c949 100644 (file)
@@ -236,7 +236,7 @@ void pnp_unregister_driver(struct pnp_driver *drv)
  * @dev: pointer to the desired device
  * @id: pointer to an EISA id string
  */
-struct pnp_id *pnp_add_id(struct pnp_dev *dev, char *id)
+struct pnp_id *pnp_add_id(struct pnp_dev *dev, const char *id)
 {
        struct pnp_id *dev_id, *ptr;
 
index dc4e32e031e9d61f77a3d706d8582ac48285b039..2d73dfcecdbb4e2098e3d881a6a091a9e8ca5e62 100644 (file)
@@ -28,7 +28,7 @@
 #include "../base.h"
 #include "pnpacpi.h"
 
-static int num = 0;
+static int num;
 
 /* We need only to blacklist devices that have already an acpi driver that
  * can't use pnp layer. We don't need to blacklist device that are directly
@@ -59,7 +59,7 @@ static inline int __init is_exclusive_device(struct acpi_device *dev)
 #define TEST_ALPHA(c) \
        if (!('@' <= (c) || (c) <= 'Z')) \
                return 0
-static int __init ispnpidacpi(char *id)
+static int __init ispnpidacpi(const char *id)
 {
        TEST_ALPHA(id[0]);
        TEST_ALPHA(id[1]);
@@ -180,11 +180,24 @@ struct pnp_protocol pnpacpi_protocol = {
 };
 EXPORT_SYMBOL(pnpacpi_protocol);
 
+static char *pnpacpi_get_id(struct acpi_device *device)
+{
+       struct acpi_hardware_id *id;
+
+       list_for_each_entry(id, &device->pnp.ids, list) {
+               if (ispnpidacpi(id->id))
+                       return id->id;
+       }
+
+       return NULL;
+}
+
 static int __init pnpacpi_add_device(struct acpi_device *device)
 {
        acpi_handle temp = NULL;
        acpi_status status;
        struct pnp_dev *dev;
+       char *pnpid;
        struct acpi_hardware_id *id;
 
        /*
@@ -192,11 +205,17 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
         * driver should not be loaded.
         */
        status = acpi_get_handle(device->handle, "_CRS", &temp);
-       if (ACPI_FAILURE(status) || !ispnpidacpi(acpi_device_hid(device)) ||
-           is_exclusive_device(device) || (!device->status.present))
+       if (ACPI_FAILURE(status))
+               return 0;
+
+       pnpid = pnpacpi_get_id(device);
+       if (!pnpid)
+               return 0;
+
+       if (is_exclusive_device(device) || !device->status.present)
                return 0;
 
-       dev = pnp_alloc_dev(&pnpacpi_protocol, num, acpi_device_hid(device));
+       dev = pnp_alloc_dev(&pnpacpi_protocol, num, pnpid);
        if (!dev)
                return -ENOMEM;
 
@@ -227,7 +246,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
                pnpacpi_parse_resource_option_data(dev);
 
        list_for_each_entry(id, &device->pnp.ids, list) {
-               if (!strcmp(id->id, acpi_device_hid(device)))
+               if (!strcmp(id->id, pnpid))
                        continue;
                if (!ispnpidacpi(id->id))
                        continue;
index e3446ab8b563743d462756fb7237225a8a492f79..a925e6b63d72cff9953672c285210de80c3c488e 100644 (file)
@@ -523,7 +523,7 @@ struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
        res->start = irq;
        res->end = irq;
 
-       pnp_dbg(&dev->dev, "  add %pr\n", res);
+       dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
        return pnp_res;
 }
 
@@ -544,7 +544,7 @@ struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma,
        res->start = dma;
        res->end = dma;
 
-       pnp_dbg(&dev->dev, "  add %pr\n", res);
+       dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
        return pnp_res;
 }
 
@@ -568,7 +568,7 @@ struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev,
        res->start = start;
        res->end = end;
 
-       pnp_dbg(&dev->dev, "  add %pr\n", res);
+       dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
        return pnp_res;
 }
 
@@ -592,7 +592,7 @@ struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev,
        res->start = start;
        res->end = end;
 
-       pnp_dbg(&dev->dev, "  add %pr\n", res);
+       dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
        return pnp_res;
 }
 
@@ -616,7 +616,7 @@ struct pnp_resource *pnp_add_bus_resource(struct pnp_dev *dev,
        res->start = start;
        res->end = end;
 
-       pnp_dbg(&dev->dev, "  add %pr\n", res);
+       dev_printk(KERN_DEBUG, &dev->dev, "%pR\n", res);
        return pnp_res;
 }
 
index 97dae297ca3c1483006c05b52f25b92386971330..c62d30017c07257bb7ac073a465f3e1adfa148ce 100644 (file)
@@ -882,12 +882,8 @@ static struct inode *pohmelfs_alloc_inode(struct super_block *sb)
 static int pohmelfs_fsync(struct file *file, int datasync)
 {
        struct inode *inode = file->f_mapping->host;
-       struct writeback_control wbc = {
-               .sync_mode = WB_SYNC_ALL,
-               .nr_to_write = 0,       /* sys_fsync did this */
-       };
 
-       return sync_inode(inode, &wbc);
+       return sync_inode_metadata(inode, 1);
 }
 
 ssize_t pohmelfs_write(struct file *file, const char __user *buf,
index 095fa53666909e02e2325badb15bfe5f63dc139b..e2f63c0ea09df16286ea8c4ad04a743ff3d0a9b5 100644 (file)
@@ -276,6 +276,7 @@ static struct inode *usbfs_get_inode (struct super_block *sb, int mode, dev_t de
        struct inode *inode = new_inode(sb);
 
        if (inode) {
+               inode->i_ino = get_next_ino();
                inode->i_mode = mode;
                inode->i_uid = current_fsuid();
                inode->i_gid = current_fsgid();
index e4f59505520889e86ecd7d131e9ba283cdbf3dc9..f276e9594f00eab91ff7f789b58e80e7a95a8bcc 100644 (file)
@@ -980,6 +980,7 @@ ffs_sb_make_inode(struct super_block *sb, void *data,
        if (likely(inode)) {
                struct timespec current_time = CURRENT_TIME;
 
+               inode->i_ino     = get_next_ino();
                inode->i_mode    = perms->mode;
                inode->i_uid     = perms->uid;
                inode->i_gid     = perms->gid;
index d1d72d946b04d465014b593e7cd6c39604a57f4e..ba145e7fbe03ef27fcd37598dc46a9f1fb31eb27 100644 (file)
@@ -1991,6 +1991,7 @@ gadgetfs_make_inode (struct super_block *sb,
        struct inode *inode = new_inode (sb);
 
        if (inode) {
+               inode->i_ino = get_next_ino();
                inode->i_mode = mode;
                inode->i_uid = default_uid;
                inode->i_gid = default_gid;
index 596ef6b922bfa4965682179b29fce8c0cd3bfca1..27c1fb4b1e0d71f91dcdd7d528dafa3b3457cb32 100644 (file)
@@ -17,6 +17,8 @@ source "drivers/gpu/vga/Kconfig"
 
 source "drivers/gpu/drm/Kconfig"
 
+source "drivers/gpu/stub/Kconfig"
+
 config VGASTATE
        tristate
        default n
index e77e8e4280fb4005854f68c5450031d4764b040b..4ea187d937681ae154eeaef876675dbf9fd5622c 100644 (file)
@@ -1079,7 +1079,7 @@ static int au1200fb_fb_check_var(struct fb_var_screeninfo *var,
         * clock can only be obtain by dividing this value by an even integer.
         * Fallback to a slower pixel clock if necessary. */
        pixclock = max((u32)(PICOS2KHZ(var->pixclock) * 1000), fbi->monspecs.dclkmin);
-       pixclock = min(pixclock, min(fbi->monspecs.dclkmax, (u32)AU1200_LCD_MAX_CLK/2));
+       pixclock = min3(pixclock, fbi->monspecs.dclkmax, (u32)AU1200_LCD_MAX_CLK/2);
 
        if (AU1200_LCD_MAX_CLK % pixclock) {
                int diff = AU1200_LCD_MAX_CLK % pixclock;
index 7d24b0d94ed4756615ef66b3304b270f66707ba4..347f17edad77bab15f1e5ca286a833a3a8392906 100644 (file)
@@ -261,7 +261,7 @@ static void init_evtchn_cpu_bindings(void)
        }
 #endif
 
-       memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
+       memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s));
 }
 
 static inline void clear_evtchn(int port)
@@ -377,7 +377,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
                irq = find_unbound_irq();
 
                set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
-                                             handle_edge_irq, "event");
+                                             handle_fasteoi_irq, "event");
 
                evtchn_to_irq[evtchn] = irq;
                irq_info[irq] = mk_evtchn_info(evtchn);
@@ -435,6 +435,11 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
        irq = per_cpu(virq_to_irq, cpu)[virq];
 
        if (irq == -1) {
+               irq = find_unbound_irq();
+
+               set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
+                                             handle_percpu_irq, "virq");
+
                bind_virq.virq = virq;
                bind_virq.vcpu = cpu;
                if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
@@ -442,11 +447,6 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
                        BUG();
                evtchn = bind_virq.port;
 
-               irq = find_unbound_irq();
-
-               set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
-                                             handle_percpu_irq, "virq");
-
                evtchn_to_irq[evtchn] = irq;
                irq_info[irq] = mk_virq_info(evtchn, virq);
 
@@ -578,41 +578,75 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
 {
        struct shared_info *sh = HYPERVISOR_shared_info;
        int cpu = smp_processor_id();
+       unsigned long *cpu_evtchn = cpu_evtchn_mask(cpu);
        int i;
        unsigned long flags;
        static DEFINE_SPINLOCK(debug_lock);
+       struct vcpu_info *v;
 
        spin_lock_irqsave(&debug_lock, flags);
 
-       printk("vcpu %d\n  ", cpu);
+       printk("\nvcpu %d\n  ", cpu);
 
        for_each_online_cpu(i) {
-               struct vcpu_info *v = per_cpu(xen_vcpu, i);
-               printk("%d: masked=%d pending=%d event_sel %08lx\n  ", i,
-                       (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
-                       v->evtchn_upcall_pending,
-                       v->evtchn_pending_sel);
+               int pending;
+               v = per_cpu(xen_vcpu, i);
+               pending = (get_irq_regs() && i == cpu)
+                       ? xen_irqs_disabled(get_irq_regs())
+                       : v->evtchn_upcall_mask;
+               printk("%d: masked=%d pending=%d event_sel %0*lx\n  ", i,
+                      pending, v->evtchn_upcall_pending,
+                      (int)(sizeof(v->evtchn_pending_sel)*2),
+                      v->evtchn_pending_sel);
+       }
+       v = per_cpu(xen_vcpu, cpu);
+
+       printk("\npending:\n   ");
+       for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
+               printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
+                      sh->evtchn_pending[i],
+                      i % 8 == 0 ? "\n   " : " ");
+       printk("\nglobal mask:\n   ");
+       for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
+               printk("%0*lx%s",
+                      (int)(sizeof(sh->evtchn_mask[0])*2),
+                      sh->evtchn_mask[i],
+                      i % 8 == 0 ? "\n   " : " ");
+
+       printk("\nglobally unmasked:\n   ");
+       for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
+               printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
+                      sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
+                      i % 8 == 0 ? "\n   " : " ");
+
+       printk("\nlocal cpu%d mask:\n   ", cpu);
+       for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
+               printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
+                      cpu_evtchn[i],
+                      i % 8 == 0 ? "\n   " : " ");
+
+       printk("\nlocally unmasked:\n   ");
+       for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
+               unsigned long pending = sh->evtchn_pending[i]
+                       & ~sh->evtchn_mask[i]
+                       & cpu_evtchn[i];
+               printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
+                      pending, i % 8 == 0 ? "\n   " : " ");
        }
-       printk("pending:\n   ");
-       for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
-               printk("%08lx%s", sh->evtchn_pending[i],
-                       i % 8 == 0 ? "\n   " : " ");
-       printk("\nmasks:\n   ");
-       for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
-               printk("%08lx%s", sh->evtchn_mask[i],
-                       i % 8 == 0 ? "\n   " : " ");
-
-       printk("\nunmasked:\n   ");
-       for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
-               printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
-                       i % 8 == 0 ? "\n   " : " ");
 
        printk("\npending list:\n");
-       for(i = 0; i < NR_EVENT_CHANNELS; i++) {
+       for (i = 0; i < NR_EVENT_CHANNELS; i++) {
                if (sync_test_bit(i, sh->evtchn_pending)) {
-                       printk("  %d: event %d -> irq %d\n",
+                       int word_idx = i / BITS_PER_LONG;
+                       printk("  %d: event %d -> irq %d%s%s%s\n",
                               cpu_from_evtchn(i), i,
-                              evtchn_to_irq[i]);
+                              evtchn_to_irq[i],
+                              sync_test_bit(word_idx, &v->evtchn_pending_sel)
+                                            ? "" : " l2-clear",
+                              !sync_test_bit(i, sh->evtchn_mask)
+                                            ? "" : " globally-masked",
+                              sync_test_bit(i, cpu_evtchn)
+                                            ? "" : " locally-masked");
                }
        }
 
@@ -663,6 +697,9 @@ static void __xen_evtchn_do_upcall(void)
                                int irq = evtchn_to_irq[port];
                                struct irq_desc *desc;
 
+                               mask_evtchn(port);
+                               clear_evtchn(port);
+
                                if (irq != -1) {
                                        desc = irq_to_desc(irq);
                                        if (desc)
@@ -800,10 +837,10 @@ static void ack_dynirq(unsigned int irq)
 {
        int evtchn = evtchn_from_irq(irq);
 
-       move_native_irq(irq);
+       move_masked_irq(irq);
 
        if (VALID_EVTCHN(evtchn))
-               clear_evtchn(evtchn);
+               unmask_evtchn(evtchn);
 }
 
 static int retrigger_dynirq(unsigned int irq)
@@ -959,7 +996,7 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
        .mask           = disable_dynirq,
        .unmask         = enable_dynirq,
 
-       .ack            = ack_dynirq,
+       .eoi            = ack_dynirq,
        .set_affinity   = set_affinity_irq,
        .retrigger      = retrigger_dynirq,
 };
index d409495876f11b24fabaebc01d57f02224fe459f..132939f3602014abb1c9c62e3c97236fd7d45e02 100644 (file)
 
 
 int xen_store_evtchn;
-EXPORT_SYMBOL(xen_store_evtchn);
+EXPORT_SYMBOL_GPL(xen_store_evtchn);
 
 struct xenstore_domain_interface *xen_store_interface;
+EXPORT_SYMBOL_GPL(xen_store_interface);
+
 static unsigned long xen_store_mfn;
 
 static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
index 25275c3bbdffe0b8889eed5def224f1b9a5ce0d2..4fde9440fe1f453aad9a4d6e4a98dceecbf6dbe4 100644 (file)
@@ -1,3 +1,4 @@
 obj-$(CONFIG_XENFS) += xenfs.o
 
-xenfs-objs = super.o xenbus.o
\ No newline at end of file
+xenfs-y                          = super.o xenbus.o privcmd.o
+xenfs-$(CONFIG_XEN_DOM0) += xenstored.o
diff --git a/drivers/xen/xenfs/privcmd.c b/drivers/xen/xenfs/privcmd.c
new file mode 100644 (file)
index 0000000..f80be7f
--- /dev/null
@@ -0,0 +1,404 @@
+/******************************************************************************
+ * privcmd.c
+ *
+ * Interface to privileged domain-0 commands.
+ *
+ * Copyright (c) 2002-2004, K A Fraser, B Dragovic
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <linux/swap.h>
+#include <linux/smp_lock.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/seq_file.h>
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/tlb.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+
+#include <xen/xen.h>
+#include <xen/privcmd.h>
+#include <xen/interface/xen.h>
+#include <xen/features.h>
+#include <xen/page.h>
+#include <xen/xen-ops.h>
+
+#ifndef HAVE_ARCH_PRIVCMD_MMAP
+static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
+#endif
+
+static long privcmd_ioctl_hypercall(void __user *udata)
+{
+       struct privcmd_hypercall hypercall;
+       long ret;
+
+       if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
+               return -EFAULT;
+
+       ret = privcmd_call(hypercall.op,
+                          hypercall.arg[0], hypercall.arg[1],
+                          hypercall.arg[2], hypercall.arg[3],
+                          hypercall.arg[4]);
+
+       return ret;
+}
+
+static void free_page_list(struct list_head *pages)
+{
+       struct page *p, *n;
+
+       list_for_each_entry_safe(p, n, pages, lru)
+               __free_page(p);
+
+       INIT_LIST_HEAD(pages);
+}
+
+/*
+ * Given an array of items in userspace, return a list of pages
+ * containing the data.  If copying fails, either because of memory
+ * allocation failure or a problem reading user memory, return an
+ * error code; its up to the caller to dispose of any partial list.
+ */
+static int gather_array(struct list_head *pagelist,
+                       unsigned nelem, size_t size,
+                       void __user *data)
+{
+       unsigned pageidx;
+       void *pagedata;
+       int ret;
+
+       if (size > PAGE_SIZE)
+               return 0;
+
+       pageidx = PAGE_SIZE;
+       pagedata = NULL;        /* quiet, gcc */
+       while (nelem--) {
+               if (pageidx > PAGE_SIZE-size) {
+                       struct page *page = alloc_page(GFP_KERNEL);
+
+                       ret = -ENOMEM;
+                       if (page == NULL)
+                               goto fail;
+
+                       pagedata = page_address(page);
+
+                       list_add_tail(&page->lru, pagelist);
+                       pageidx = 0;
+               }
+
+               ret = -EFAULT;
+               if (copy_from_user(pagedata + pageidx, data, size))
+                       goto fail;
+
+               data += size;
+               pageidx += size;
+       }
+
+       ret = 0;
+
+fail:
+       return ret;
+}
+
+/*
+ * Call function "fn" on each element of the array fragmented
+ * over a list of pages.
+ */
+static int traverse_pages(unsigned nelem, size_t size,
+                         struct list_head *pos,
+                         int (*fn)(void *data, void *state),
+                         void *state)
+{
+       void *pagedata;
+       unsigned pageidx;
+       int ret = 0;
+
+       BUG_ON(size > PAGE_SIZE);
+
+       pageidx = PAGE_SIZE;
+       pagedata = NULL;        /* hush, gcc */
+
+       while (nelem--) {
+               if (pageidx > PAGE_SIZE-size) {
+                       struct page *page;
+                       pos = pos->next;
+                       page = list_entry(pos, struct page, lru);
+                       pagedata = page_address(page);
+                       pageidx = 0;
+               }
+
+               ret = (*fn)(pagedata + pageidx, state);
+               if (ret)
+                       break;
+               pageidx += size;
+       }
+
+       return ret;
+}
+
+struct mmap_mfn_state {
+       unsigned long va;
+       struct vm_area_struct *vma;
+       domid_t domain;
+};
+
+static int mmap_mfn_range(void *data, void *state)
+{
+       struct privcmd_mmap_entry *msg = data;
+       struct mmap_mfn_state *st = state;
+       struct vm_area_struct *vma = st->vma;
+       int rc;
+
+       /* Do not allow range to wrap the address space. */
+       if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
+           ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
+               return -EINVAL;
+
+       /* Range chunks must be contiguous in va space. */
+       if ((msg->va != st->va) ||
+           ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
+               return -EINVAL;
+
+       rc = xen_remap_domain_mfn_range(vma,
+                                       msg->va & PAGE_MASK,
+                                       msg->mfn, msg->npages,
+                                       vma->vm_page_prot,
+                                       st->domain);
+       if (rc < 0)
+               return rc;
+
+       st->va += msg->npages << PAGE_SHIFT;
+
+       return 0;
+}
+
+static long privcmd_ioctl_mmap(void __user *udata)
+{
+       struct privcmd_mmap mmapcmd;
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       int rc;
+       LIST_HEAD(pagelist);
+       struct mmap_mfn_state state;
+
+       if (!xen_initial_domain())
+               return -EPERM;
+
+       if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
+               return -EFAULT;
+
+       rc = gather_array(&pagelist,
+                         mmapcmd.num, sizeof(struct privcmd_mmap_entry),
+                         mmapcmd.entry);
+
+       if (rc || list_empty(&pagelist))
+               goto out;
+
+       down_write(&mm->mmap_sem);
+
+       {
+               struct page *page = list_first_entry(&pagelist,
+                                                    struct page, lru);
+               struct privcmd_mmap_entry *msg = page_address(page);
+
+               vma = find_vma(mm, msg->va);
+               rc = -EINVAL;
+
+               if (!vma || (msg->va != vma->vm_start) ||
+                   !privcmd_enforce_singleshot_mapping(vma))
+                       goto out_up;
+       }
+
+       state.va = vma->vm_start;
+       state.vma = vma;
+       state.domain = mmapcmd.dom;
+
+       rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
+                           &pagelist,
+                           mmap_mfn_range, &state);
+
+
+out_up:
+       up_write(&mm->mmap_sem);
+
+out:
+       free_page_list(&pagelist);
+
+       return rc;
+}
+
+struct mmap_batch_state {
+       domid_t domain;
+       unsigned long va;
+       struct vm_area_struct *vma;
+       int err;
+
+       xen_pfn_t __user *user;
+};
+
+static int mmap_batch_fn(void *data, void *state)
+{
+       xen_pfn_t *mfnp = data;
+       struct mmap_batch_state *st = state;
+
+       if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
+                                      st->vma->vm_page_prot, st->domain) < 0) {
+               *mfnp |= 0xf0000000U;
+               st->err++;
+       }
+       st->va += PAGE_SIZE;
+
+       return 0;
+}
+
+static int mmap_return_errors(void *data, void *state)
+{
+       xen_pfn_t *mfnp = data;
+       struct mmap_batch_state *st = state;
+
+       put_user(*mfnp, st->user++);
+
+       return 0;
+}
+
+static struct vm_operations_struct privcmd_vm_ops;
+
+static long privcmd_ioctl_mmap_batch(void __user *udata)
+{
+       int ret;
+       struct privcmd_mmapbatch m;
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       unsigned long nr_pages;
+       LIST_HEAD(pagelist);
+       struct mmap_batch_state state;
+
+       if (!xen_initial_domain())
+               return -EPERM;
+
+       if (copy_from_user(&m, udata, sizeof(m)))
+               return -EFAULT;
+
+       nr_pages = m.num;
+       if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
+               return -EINVAL;
+
+       ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t),
+                          m.arr);
+
+       if (ret || list_empty(&pagelist))
+               goto out;
+
+       down_write(&mm->mmap_sem);
+
+       vma = find_vma(mm, m.addr);
+       ret = -EINVAL;
+       if (!vma ||
+           vma->vm_ops != &privcmd_vm_ops ||
+           (m.addr != vma->vm_start) ||
+           ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
+           !privcmd_enforce_singleshot_mapping(vma)) {
+               up_write(&mm->mmap_sem);
+               goto out;
+       }
+
+       state.domain = m.dom;
+       state.vma = vma;
+       state.va = m.addr;
+       state.err = 0;
+
+       ret = traverse_pages(m.num, sizeof(xen_pfn_t),
+                            &pagelist, mmap_batch_fn, &state);
+
+       up_write(&mm->mmap_sem);
+
+       if (state.err > 0) {
+               ret = 0;
+
+               state.user = m.arr;
+               traverse_pages(m.num, sizeof(xen_pfn_t),
+                              &pagelist,
+                              mmap_return_errors, &state);
+       }
+
+out:
+       free_page_list(&pagelist);
+
+       return ret;
+}
+
+static long privcmd_ioctl(struct file *file,
+                         unsigned int cmd, unsigned long data)
+{
+       int ret = -ENOSYS;
+       void __user *udata = (void __user *) data;
+
+       switch (cmd) {
+       case IOCTL_PRIVCMD_HYPERCALL:
+               ret = privcmd_ioctl_hypercall(udata);
+               break;
+
+       case IOCTL_PRIVCMD_MMAP:
+               ret = privcmd_ioctl_mmap(udata);
+               break;
+
+       case IOCTL_PRIVCMD_MMAPBATCH:
+               ret = privcmd_ioctl_mmap_batch(udata);
+               break;
+
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+#ifndef HAVE_ARCH_PRIVCMD_MMAP
+static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
+              vma, vma->vm_start, vma->vm_end,
+              vmf->pgoff, vmf->virtual_address);
+
+       return VM_FAULT_SIGBUS;
+}
+
+static struct vm_operations_struct privcmd_vm_ops = {
+       .fault = privcmd_fault
+};
+
+static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       /* Unsupported for auto-translate guests. */
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return -ENOSYS;
+
+       /* DONTCOPY is essential for Xen as copy_page_range is broken. */
+       vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
+       vma->vm_ops = &privcmd_vm_ops;
+       vma->vm_private_data = NULL;
+
+       return 0;
+}
+
+static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
+{
+       return (xchg(&vma->vm_private_data, (void *)1) == NULL);
+}
+#endif
+
+const struct file_operations privcmd_file_ops = {
+       .unlocked_ioctl = privcmd_ioctl,
+       .mmap = privcmd_mmap,
+};
index bd96340063c1d6175f633fa507e8c7e2bf34ba93..d6662b789b6bf05a3cc172623216ea662e681781 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/module.h>
 #include <linux/fs.h>
 #include <linux/magic.h>
+#include <linux/mm.h>
+#include <linux/backing-dev.h>
 
 #include <xen/xen.h>
 
 MODULE_DESCRIPTION("Xen filesystem");
 MODULE_LICENSE("GPL");
 
+static int xenfs_set_page_dirty(struct page *page)
+{
+       return !TestSetPageDirty(page);
+}
+
+static const struct address_space_operations xenfs_aops = {
+       .set_page_dirty = xenfs_set_page_dirty,
+};
+
+static struct backing_dev_info xenfs_backing_dev_info = {
+       .ra_pages       = 0,    /* No readahead */
+       .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
+};
+
+static struct inode *xenfs_make_inode(struct super_block *sb, int mode)
+{
+       struct inode *ret = new_inode(sb);
+
+       if (ret) {
+               ret->i_mode = mode;
+               ret->i_mapping->a_ops = &xenfs_aops;
+               ret->i_mapping->backing_dev_info = &xenfs_backing_dev_info;
+               ret->i_uid = ret->i_gid = 0;
+               ret->i_blocks = 0;
+               ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
+       }
+       return ret;
+}
+
+static struct dentry *xenfs_create_file(struct super_block *sb,
+                                       struct dentry *parent,
+                                       const char *name,
+                                       const struct file_operations *fops,
+                                       void *data,
+                                       int mode)
+{
+       struct dentry *dentry;
+       struct inode *inode;
+
+       dentry = d_alloc_name(parent, name);
+       if (!dentry)
+               return NULL;
+
+       inode = xenfs_make_inode(sb, S_IFREG | mode);
+       if (!inode) {
+               dput(dentry);
+               return NULL;
+       }
+
+       inode->i_fop = fops;
+       inode->i_private = data;
+
+       d_add(dentry, inode);
+       return dentry;
+}
+
 static ssize_t capabilities_read(struct file *file, char __user *buf,
                                 size_t size, loff_t *off)
 {
@@ -44,10 +102,23 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
                [1] = {},
                { "xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR },
                { "capabilities", &capabilities_file_ops, S_IRUGO },
+               { "privcmd", &privcmd_file_ops, S_IRUSR|S_IWUSR },
                {""},
        };
+       int rc;
 
-       return simple_fill_super(sb, XENFS_SUPER_MAGIC, xenfs_files);
+       rc = simple_fill_super(sb, XENFS_SUPER_MAGIC, xenfs_files);
+       if (rc < 0)
+               return rc;
+
+       if (xen_initial_domain()) {
+               xenfs_create_file(sb, sb->s_root, "xsd_kva",
+                                 &xsd_kva_file_ops, NULL, S_IRUSR|S_IWUSR);
+               xenfs_create_file(sb, sb->s_root, "xsd_port",
+                                 &xsd_port_file_ops, NULL, S_IRUSR|S_IWUSR);
+       }
+
+       return rc;
 }
 
 static int xenfs_get_sb(struct file_system_type *fs_type,
@@ -66,11 +137,25 @@ static struct file_system_type xenfs_type = {
 
 static int __init xenfs_init(void)
 {
-       if (xen_domain())
-               return register_filesystem(&xenfs_type);
+       int err;
+       if (!xen_domain()) {
+               printk(KERN_INFO "xenfs: not registering filesystem on non-xen platform\n");
+               return 0;
+       }
+
+       err = register_filesystem(&xenfs_type);
+       if (err) {
+               printk(KERN_ERR "xenfs: Unable to register filesystem!\n");
+               goto out;
+       }
+
+       err = bdi_init(&xenfs_backing_dev_info);
+       if (err)
+               unregister_filesystem(&xenfs_type);
+
+ out:
 
-       printk(KERN_INFO "XENFS: not registering filesystem on non-xen platform\n");
-       return 0;
+       return err;
 }
 
 static void __exit xenfs_exit(void)
index 51f08b2d0bf15a44732bd35a31ec18c36e5ed24c..b68aa6200003575549e7a5844ae7b43b6bec135f 100644 (file)
@@ -2,5 +2,8 @@
 #define _XENFS_XENBUS_H
 
 extern const struct file_operations xenbus_file_ops;
+extern const struct file_operations privcmd_file_ops;
+extern const struct file_operations xsd_kva_file_ops;
+extern const struct file_operations xsd_port_file_ops;
 
 #endif /* _XENFS_XENBUS_H */
diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
new file mode 100644 (file)
index 0000000..fef20db
--- /dev/null
@@ -0,0 +1,68 @@
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+
+#include <xen/page.h>
+
+#include "xenfs.h"
+#include "../xenbus/xenbus_comms.h"
+
+static ssize_t xsd_read(struct file *file, char __user *buf,
+                           size_t size, loff_t *off)
+{
+       const char *str = (const char *)file->private_data;
+       return simple_read_from_buffer(buf, size, off, str, strlen(str));
+}
+
+static int xsd_release(struct inode *inode, struct file *file)
+{
+       kfree(file->private_data);
+       return 0;
+}
+
+static int xsd_kva_open(struct inode *inode, struct file *file)
+{
+       file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
+                                              xen_store_interface);
+       if (!file->private_data)
+               return -ENOMEM;
+       return 0;
+}
+
+static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       size_t size = vma->vm_end - vma->vm_start;
+
+       if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
+               return -EINVAL;
+
+       if (remap_pfn_range(vma, vma->vm_start,
+                           virt_to_pfn(xen_store_interface),
+                           size, vma->vm_page_prot))
+               return -EAGAIN;
+
+       return 0;
+}
+
+const struct file_operations xsd_kva_file_ops = {
+       .open = xsd_kva_open,
+       .mmap = xsd_kva_mmap,
+       .read = xsd_read,
+       .release = xsd_release,
+};
+
+static int xsd_port_open(struct inode *inode, struct file *file)
+{
+       file->private_data = (void *)kasprintf(GFP_KERNEL, "%d",
+                                              xen_store_evtchn);
+       if (!file->private_data)
+               return -ENOMEM;
+       return 0;
+}
+
+const struct file_operations xsd_port_file_ops = {
+       .open = xsd_port_open,
+       .read = xsd_read,
+       .release = xsd_release,
+};
index 5a03ba8c8364e9cc4dde9655871d4a13b6ff2463..ba0cf0b601bb925fc3ce514d1f1dd55569feac91 100644 (file)
@@ -55,6 +55,7 @@ static int output_records(int outfd);
 
 static int sort_records = 0;
 static int wide_records = 0;
+static int include_jump = 0;
 
 static int usage(void)
 {
@@ -63,6 +64,7 @@ static int usage(void)
        fprintf(stderr, "usage: ihex2fw [<options>] <src.HEX> <dst.fw>\n");
        fprintf(stderr, "       -w: wide records (16-bit length)\n");
        fprintf(stderr, "       -s: sort records by address\n");
+       fprintf(stderr, "       -j: include records for CS:IP/EIP address\n");
        return 1;
 }
 
@@ -73,7 +75,7 @@ int main(int argc, char **argv)
        uint8_t *data;
        int opt;
 
-       while ((opt = getopt(argc, argv, "ws")) != -1) {
+       while ((opt = getopt(argc, argv, "wsj")) != -1) {
                switch (opt) {
                case 'w':
                        wide_records = 1;
@@ -81,7 +83,9 @@ int main(int argc, char **argv)
                case 's':
                        sort_records = 1;
                        break;
-               default:
+               case 'j':
+                       include_jump = 1;
+                       break;
                        return usage();
                }
        }
@@ -128,6 +132,7 @@ static int process_ihex(uint8_t *data, ssize_t size)
 {
        struct ihex_binrec *record;
        uint32_t offset = 0;
+       uint32_t data32;
        uint8_t type, crc = 0, crcbyte = 0;
        int i, j;
        int line = 1;
@@ -223,8 +228,14 @@ next_record:
                        return -EINVAL;
                }
 
+               memcpy(&data32, &record->data[0], sizeof(data32));
+               data32 = htonl(data32);
+               memcpy(&record->data[0], &data32, sizeof(data32));
+
                /* These records contain the CS/IP or EIP where execution
-                * starts. Don't really know what to do with them. */
+                * starts. If requested output this as a record. */
+               if (include_jump)
+                       file_record(record);
                goto next_record;
 
        default:
index 9e670d527646fc4abe2be6f0dfc992f4a3178042..ef5905f7c8a39c9395362d5e368a846d43fcedbd 100644 (file)
@@ -1789,9 +1789,10 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
                kfree(st);
        } else {
                /* Caching disabled. No need to get upto date stat info.
-                * This dentry will be released immediately. So, just i_count++
+                * This dentry will be released immediately. So, just hold the
+                * inode
                 */
-               atomic_inc(&old_dentry->d_inode->i_count);
+               ihold(old_dentry->d_inode);
        }
 
        dentry->d_op = old_dentry->d_op;
index 3d185308ec883bd0c06be0d151385c65e15b3353..97673c955484463e4181d4e3f689742bbe558a06 100644 (file)
@@ -47,6 +47,9 @@ source "fs/nilfs2/Kconfig"
 
 endif # BLOCK
 
+config EXPORTFS
+       tristate
+
 config FILE_LOCKING
        bool "Enable POSIX file locking API" if EMBEDDED
        default y
@@ -221,9 +224,6 @@ config LOCKD_V4
        depends on FILE_LOCKING
        default y
 
-config EXPORTFS
-       tristate
-
 config NFS_ACL_SUPPORT
        tristate
        select FS_POSIX_ACL
index c4a9875bd1a60520c682bed74f7a27bc3d2e1da0..0a90dcd46de28d33f2768fde829ac7800abdc9e4 100644 (file)
@@ -894,9 +894,9 @@ affs_truncate(struct inode *inode)
                if (AFFS_SB(sb)->s_flags & SF_OFS) {
                        struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
                        u32 tmp;
-                       if (IS_ERR(ext_bh)) {
+                       if (IS_ERR(bh)) {
                                affs_warning(sb, "truncate", "unexpected read error for last block %u (%d)",
-                                            ext, PTR_ERR(ext_bh));
+                                            ext, PTR_ERR(bh));
                                return;
                        }
                        tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next);
index 3a0fdec175ba6d5d84ea8ae27f117d0e2cc46644..5d828903ac69ced919eaa3d6eb2429a49922d0e2 100644 (file)
@@ -388,7 +388,7 @@ affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s3
                affs_adjust_checksum(inode_bh, block - be32_to_cpu(chain));
                mark_buffer_dirty_inode(inode_bh, inode);
                inode->i_nlink = 2;
-               atomic_inc(&inode->i_count);
+               ihold(inode);
        }
        affs_fix_checksum(sb, bh);
        mark_buffer_dirty_inode(bh, inode);
index 0d38c09bd55e10f4eaf5f0e25ef11fa3d9d82919..5439e1bc9a86a5702094eaea35eb4c6ebd8e67fe 100644 (file)
@@ -1045,7 +1045,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
        if (ret < 0)
                goto link_error;
 
-       atomic_inc(&vnode->vfs_inode.i_count);
+       ihold(&vnode->vfs_inode);
        d_instantiate(dentry, &vnode->vfs_inode);
        key_put(key);
        _leave(" = 0");
index 722743b152d817e94d5ea44131c96026f45e6898..15690bb1d3b531e65ae9cce255eaf532081667db 100644 (file)
@@ -438,7 +438,6 @@ no_more:
  */
 int afs_writepage(struct page *page, struct writeback_control *wbc)
 {
-       struct backing_dev_info *bdi = page->mapping->backing_dev_info;
        struct afs_writeback *wb;
        int ret;
 
@@ -455,8 +454,6 @@ int afs_writepage(struct page *page, struct writeback_control *wbc)
        }
 
        wbc->nr_to_write -= ret;
-       if (wbc->nonblocking && bdi_write_congested(bdi))
-               wbc->encountered_congestion = 1;
 
        _leave(" = 0");
        return 0;
@@ -469,7 +466,6 @@ static int afs_writepages_region(struct address_space *mapping,
                                 struct writeback_control *wbc,
                                 pgoff_t index, pgoff_t end, pgoff_t *_next)
 {
-       struct backing_dev_info *bdi = mapping->backing_dev_info;
        struct afs_writeback *wb;
        struct page *page;
        int ret, n;
@@ -529,11 +525,6 @@ static int afs_writepages_region(struct address_space *mapping,
 
                wbc->nr_to_write -= ret;
 
-               if (wbc->nonblocking && bdi_write_congested(bdi)) {
-                       wbc->encountered_congestion = 1;
-                       break;
-               }
-
                cond_resched();
        } while (index < end && wbc->nr_to_write > 0);
 
@@ -548,24 +539,16 @@ static int afs_writepages_region(struct address_space *mapping,
 int afs_writepages(struct address_space *mapping,
                   struct writeback_control *wbc)
 {
-       struct backing_dev_info *bdi = mapping->backing_dev_info;
        pgoff_t start, end, next;
        int ret;
 
        _enter("");
 
-       if (wbc->nonblocking && bdi_write_congested(bdi)) {
-               wbc->encountered_congestion = 1;
-               _leave(" = 0 [congest]");
-               return 0;
-       }
-
        if (wbc->range_cyclic) {
                start = mapping->writeback_index;
                end = -1;
                ret = afs_writepages_region(mapping, wbc, start, end, &next);
-               if (start > 0 && wbc->nr_to_write > 0 && ret == 0 &&
-                   !(wbc->nonblocking && wbc->encountered_congestion))
+               if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
                        ret = afs_writepages_region(mapping, wbc, 0, start,
                                                    &next);
                mapping->writeback_index = next;
index 250b0a73c8a8ca92b78c3a0282d2425ce7649dcf..8c8f6c5b6d7930657f7a3aadf44fe4b4dc96b738 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1543,7 +1543,19 @@ static void aio_batch_add(struct address_space *mapping,
        }
 
        abe = mempool_alloc(abe_pool, GFP_KERNEL);
-       BUG_ON(!igrab(mapping->host));
+
+       /*
+        * we should be using igrab here, but
+        * we don't want to hammer on the global
+        * inode spinlock just to take an extra
+        * reference on a file that we must already
+        * have a reference to.
+        *
+        * When we're called, we always have a reference
+        * on the file, so we must always have a reference
+        * on the inode, so ihold() is safe here.
+        */
+       ihold(mapping->host);
        abe->mapping = mapping;
        hlist_add_head(&abe->list, &batch_hash[bucket]);
        return;
index e4b75d6eda839d6fddae166904d1002f85a4d09f..5365527ca43fec37cdc021afbc21acf76c321889 100644 (file)
@@ -111,10 +111,9 @@ struct file *anon_inode_getfile(const char *name,
        path.mnt = mntget(anon_inode_mnt);
        /*
         * We know the anon_inode inode count is always greater than zero,
-        * so we can avoid doing an igrab() and we can use an open-coded
-        * atomic_inc().
+        * so ihold() is safe.
         */
-       atomic_inc(&anon_inode_inode->i_count);
+       ihold(anon_inode_inode);
 
        path.dentry->d_op = &anon_inodefs_dentry_operations;
        d_instantiate(path.dentry, anon_inode_inode);
@@ -194,6 +193,7 @@ static struct inode *anon_inode_mkinode(void)
        if (!inode)
                return ERR_PTR(-ENOMEM);
 
+       inode->i_ino = get_next_ino();
        inode->i_fop = &anon_inode_fops;
 
        inode->i_mapping->a_ops = &anon_aops;
index 821b2b955dac2c7b847a91d7abc91d4e0162c838..ac87e49fa70604e4b8238cd95fbae7a3cc1dcb07 100644 (file)
@@ -398,6 +398,7 @@ struct inode *autofs4_get_inode(struct super_block *sb,
                inode->i_gid = sb->s_root->d_inode->i_gid;
        }
        inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+       inode->i_ino = get_next_ino();
 
        if (S_ISDIR(inf->mode)) {
                inode->i_nlink = 2;
index d967e052b779d80490e08e097f5849ddc88ae48d..685ecff3ab31728d2362a9be39f98dfbc76947c4 100644 (file)
@@ -176,7 +176,7 @@ static int bfs_link(struct dentry *old, struct inode *dir,
        inc_nlink(inode);
        inode->i_ctime = CURRENT_TIME_SEC;
        mark_inode_dirty(inode);
-       atomic_inc(&inode->i_count);
+       ihold(inode);
        d_instantiate(new, inode);
        mutex_unlock(&info->bfs_lock);
        return 0;
index 139fc8083f53cbd9351b3c4ebeeedc79f30102b4..29990f0eee0c90adb18771c2c327fc99ee32c765 100644 (file)
@@ -495,6 +495,7 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
        struct inode * inode = new_inode(sb);
 
        if (inode) {
+               inode->i_ino = get_next_ino();
                inode->i_mode = mode;
                inode->i_atime = inode->i_mtime = inode->i_ctime =
                        current_fs_time(inode->i_sb);
index b737451e2e9dde95ae0d098b0443f44af1d21899..dea3b628a6ce9a9058c24d2b13c364b7c6adb3c3 100644 (file)
@@ -48,6 +48,21 @@ inline struct block_device *I_BDEV(struct inode *inode)
 
 EXPORT_SYMBOL(I_BDEV);
 
+/*
+ * move the inode from it's current bdi to the a new bdi. if the inode is dirty
+ * we need to move it onto the dirty list of @dst so that the inode is always
+ * on the right list.
+ */
+static void bdev_inode_switch_bdi(struct inode *inode,
+                       struct backing_dev_info *dst)
+{
+       spin_lock(&inode_lock);
+       inode->i_data.backing_dev_info = dst;
+       if (inode->i_state & I_DIRTY)
+               list_move(&inode->i_wb_list, &dst->wb.b_dirty);
+       spin_unlock(&inode_lock);
+}
+
 static sector_t max_block(struct block_device *bdev)
 {
        sector_t retval = ~((sector_t)0);
@@ -550,7 +565,7 @@ EXPORT_SYMBOL(bdget);
  */
 struct block_device *bdgrab(struct block_device *bdev)
 {
-       atomic_inc(&bdev->bd_inode->i_count);
+       ihold(bdev->bd_inode);
        return bdev;
 }
 
@@ -580,7 +595,7 @@ static struct block_device *bd_acquire(struct inode *inode)
        spin_lock(&bdev_lock);
        bdev = inode->i_bdev;
        if (bdev) {
-               atomic_inc(&bdev->bd_inode->i_count);
+               ihold(bdev->bd_inode);
                spin_unlock(&bdev_lock);
                return bdev;
        }
@@ -591,12 +606,12 @@ static struct block_device *bd_acquire(struct inode *inode)
                spin_lock(&bdev_lock);
                if (!inode->i_bdev) {
                        /*
-                        * We take an additional bd_inode->i_count for inode,
+                        * We take an additional reference to bd_inode,
                         * and it's released in clear_inode() of inode.
                         * So, we can access it via ->i_mapping always
                         * without igrab().
                         */
-                       atomic_inc(&bdev->bd_inode->i_count);
+                       ihold(bdev->bd_inode);
                        inode->i_bdev = bdev;
                        inode->i_mapping = bdev->bd_inode->i_mapping;
                        list_add(&inode->i_devices, &bdev->bd_inodes);
@@ -1390,7 +1405,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                                bdi = blk_get_backing_dev_info(bdev);
                                if (bdi == NULL)
                                        bdi = &default_backing_dev_info;
-                               bdev->bd_inode->i_data.backing_dev_info = bdi;
+                               bdev_inode_switch_bdi(bdev->bd_inode, bdi);
                        }
                        if (bdev->bd_invalidated)
                                rescan_partitions(disk, bdev);
@@ -1405,8 +1420,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                        if (ret)
                                goto out_clear;
                        bdev->bd_contains = whole;
-                       bdev->bd_inode->i_data.backing_dev_info =
-                          whole->bd_inode->i_data.backing_dev_info;
+                       bdev_inode_switch_bdi(bdev->bd_inode,
+                               whole->bd_inode->i_data.backing_dev_info);
                        bdev->bd_part = disk_get_part(disk, partno);
                        if (!(disk->flags & GENHD_FL_UP) ||
                            !bdev->bd_part || !bdev->bd_part->nr_sects) {
@@ -1439,7 +1454,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
        disk_put_part(bdev->bd_part);
        bdev->bd_disk = NULL;
        bdev->bd_part = NULL;
-       bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
+       bdev_inode_switch_bdi(bdev->bd_inode, &default_backing_dev_info);
        if (bdev != bdev->bd_contains)
                __blkdev_put(bdev->bd_contains, mode, 1);
        bdev->bd_contains = NULL;
@@ -1533,7 +1548,8 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
                disk_put_part(bdev->bd_part);
                bdev->bd_part = NULL;
                bdev->bd_disk = NULL;
-               bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
+               bdev_inode_switch_bdi(bdev->bd_inode,
+                                       &default_backing_dev_info);
                if (bdev != bdev->bd_contains)
                        victim = bdev->bd_contains;
                bdev->bd_contains = NULL;
index c03864406af3ed49b66599fc1cdace0b2bbd2e22..64f99cf69ce0c6b2da2cb62c4884fa44c150bdda 100644 (file)
@@ -3849,7 +3849,7 @@ again:
        p = &root->inode_tree.rb_node;
        parent = NULL;
 
-       if (hlist_unhashed(&inode->i_hash))
+       if (inode_unhashed(inode))
                return;
 
        spin_lock(&root->inode_lock);
@@ -4758,7 +4758,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
        }
 
        btrfs_set_trans_block_group(trans, dir);
-       atomic_inc(&inode->i_count);
+       ihold(inode);
 
        err = btrfs_add_nondir(trans, dentry, inode, 1, index);
 
index 7f0b9b083f778676ed3024fbecf6c8a77fd399bc..5930e382959bc504c58bbb428588a372742d4aa4 100644 (file)
@@ -905,7 +905,6 @@ try_again:
 
                bh->b_state = 0;
                atomic_set(&bh->b_count, 0);
-               bh->b_private = NULL;
                bh->b_size = size;
 
                /* Link the buffer to its page */
@@ -1706,7 +1705,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
                 * and kswapd activity, but those code paths have their own
                 * higher-level throttling.
                 */
-               if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
+               if (wbc->sync_mode != WB_SYNC_NONE) {
                        lock_buffer(bh);
                } else if (!trylock_buffer(bh)) {
                        redirty_page_for_writepage(wbc, page);
@@ -1834,9 +1833,11 @@ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
 }
 EXPORT_SYMBOL(page_zero_new_buffers);
 
-int block_prepare_write(struct page *page, unsigned from, unsigned to,
+int __block_write_begin(struct page *page, loff_t pos, unsigned len,
                get_block_t *get_block)
 {
+       unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+       unsigned to = from + len;
        struct inode *inode = page->mapping->host;
        unsigned block_start, block_end;
        sector_t block;
@@ -1916,7 +1917,7 @@ int block_prepare_write(struct page *page, unsigned from, unsigned to,
        }
        return err;
 }
-EXPORT_SYMBOL(block_prepare_write);
+EXPORT_SYMBOL(__block_write_begin);
 
 static int __block_commit_write(struct inode *inode, struct page *page,
                unsigned from, unsigned to)
@@ -1953,15 +1954,6 @@ static int __block_commit_write(struct inode *inode, struct page *page,
        return 0;
 }
 
-int __block_write_begin(struct page *page, loff_t pos, unsigned len,
-               get_block_t *get_block)
-{
-       unsigned start = pos & (PAGE_CACHE_SIZE - 1);
-
-       return block_prepare_write(page, start, start + len, get_block);
-}
-EXPORT_SYMBOL(__block_write_begin);
-
 /*
  * block_write_begin takes care of the basic task of block allocation and
  * bringing partial write blocks uptodate first.
@@ -2379,7 +2371,7 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
        else
                end = PAGE_CACHE_SIZE;
 
-       ret = block_prepare_write(page, 0, end, get_block);
+       ret = __block_write_begin(page, 0, end, get_block);
        if (!ret)
                ret = block_commit_write(page, 0, end);
 
@@ -2466,11 +2458,10 @@ int nobh_write_begin(struct address_space *mapping,
        *fsdata = NULL;
 
        if (page_has_buffers(page)) {
-               unlock_page(page);
-               page_cache_release(page);
-               *pagep = NULL;
-               return block_write_begin(mapping, pos, len, flags, pagep,
-                                        get_block);
+               ret = __block_write_begin(page, pos, len, get_block);
+               if (unlikely(ret))
+                       goto out_release;
+               return ret;
        }
 
        if (PageMappedToDisk(page))
index 51bcc5ce323024a995d300b4a6035ecf8e72e94b..e9c874abc9e1f4a7ec00b68595fdd674a4f1da2e 100644 (file)
@@ -591,7 +591,6 @@ static int ceph_writepages_start(struct address_space *mapping,
                                 struct writeback_control *wbc)
 {
        struct inode *inode = mapping->host;
-       struct backing_dev_info *bdi = mapping->backing_dev_info;
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_fs_client *fsc;
        pgoff_t index, start, end;
@@ -633,13 +632,6 @@ static int ceph_writepages_start(struct address_space *mapping,
 
        pagevec_init(&pvec, 0);
 
-       /* ?? */
-       if (wbc->nonblocking && bdi_write_congested(bdi)) {
-               dout(" writepages congested\n");
-               wbc->encountered_congestion = 1;
-               goto out_final;
-       }
-
        /* where to start/end? */
        if (wbc->range_cyclic) {
                start = mapping->writeback_index; /* Start from prev offset */
@@ -885,7 +877,6 @@ out:
                rc = 0;  /* vfs expects us to return 0 */
        ceph_put_snap_context(snapc);
        dout("writepages done, rc = %d\n", rc);
-out_final:
        return rc;
 }
 
index 8c81e7b14d53d5d347cf0701d5d65ef8cafb328e..45af003865d2607da7eb0f9ffdc43eed18f82b4d 100644 (file)
@@ -1303,7 +1303,6 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
 static int cifs_writepages(struct address_space *mapping,
                           struct writeback_control *wbc)
 {
-       struct backing_dev_info *bdi = mapping->backing_dev_info;
        unsigned int bytes_to_write;
        unsigned int bytes_written;
        struct cifs_sb_info *cifs_sb;
@@ -1326,15 +1325,6 @@ static int cifs_writepages(struct address_space *mapping,
        int scanned = 0;
        int xid, long_op;
 
-       /*
-        * BB: Is this meaningful for a non-block-device file system?
-        * If it is, we should test it again after we do I/O
-        */
-       if (wbc->nonblocking && bdi_write_congested(bdi)) {
-               wbc->encountered_congestion = 1;
-               return 0;
-       }
-
        cifs_sb = CIFS_SB(mapping->host->i_sb);
 
        /*
index 96fbeab77f2f42476683cd41ead808f519a14a1e..5d8b35539601b819389057b9114dd49a75828597 100644 (file)
@@ -276,7 +276,7 @@ static int coda_link(struct dentry *source_de, struct inode *dir_inode,
        }
 
        coda_dir_update_mtime(dir_inode);
-       atomic_inc(&inode->i_count);
+       ihold(inode);
        d_instantiate(de, inode);
        inc_nlink(inode);
        return 0;
index cf78d44a8d6a8bcff1a546e3606058de97e90722..253476d78ed86f4d07ec04e3ef63578fccdfa223 100644 (file)
@@ -135,6 +135,7 @@ struct inode * configfs_new_inode(mode_t mode, struct configfs_dirent * sd)
 {
        struct inode * inode = new_inode(configfs_sb);
        if (inode) {
+               inode->i_ino = get_next_ino();
                inode->i_mapping->a_ops = &configfs_aops;
                inode->i_mapping->backing_dev_info = &configfs_backing_dev_info;
                inode->i_op = &configfs_inode_operations;
index 83293be4814965373d4c81e5b7d91bc63f3a55c6..23702a9d4e6d130b6ef226d1157f1a484f93a282 100644 (file)
@@ -67,33 +67,43 @@ struct dentry_stat_t dentry_stat = {
        .age_limit = 45,
 };
 
-static void __d_free(struct dentry *dentry)
+static struct percpu_counter nr_dentry __cacheline_aligned_in_smp;
+static struct percpu_counter nr_dentry_unused __cacheline_aligned_in_smp;
+
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
+int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
+                  size_t *lenp, loff_t *ppos)
+{
+       dentry_stat.nr_dentry = percpu_counter_sum_positive(&nr_dentry);
+       dentry_stat.nr_unused = percpu_counter_sum_positive(&nr_dentry_unused);
+       return proc_dointvec(table, write, buffer, lenp, ppos);
+}
+#endif
+
+static void __d_free(struct rcu_head *head)
 {
+       struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
+
        WARN_ON(!list_empty(&dentry->d_alias));
        if (dname_external(dentry))
                kfree(dentry->d_name.name);
        kmem_cache_free(dentry_cache, dentry); 
 }
 
-static void d_callback(struct rcu_head *head)
-{
-       struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu);
-       __d_free(dentry);
-}
-
 /*
- * no dcache_lock, please.  The caller must decrement dentry_stat.nr_dentry
- * inside dcache_lock.
+ * no dcache_lock, please.
  */
 static void d_free(struct dentry *dentry)
 {
+       percpu_counter_dec(&nr_dentry);
        if (dentry->d_op && dentry->d_op->d_release)
                dentry->d_op->d_release(dentry);
+
        /* if dentry was never inserted into hash, immediate free is OK */
        if (hlist_unhashed(&dentry->d_hash))
-               __d_free(dentry);
+               __d_free(&dentry->d_u.d_rcu);
        else
-               call_rcu(&dentry->d_u.d_rcu, d_callback);
+               call_rcu(&dentry->d_u.d_rcu, __d_free);
 }
 
 /*
@@ -123,37 +133,34 @@ static void dentry_iput(struct dentry * dentry)
 }
 
 /*
- * dentry_lru_(add|add_tail|del|del_init) must be called with dcache_lock held.
+ * dentry_lru_(add|del|move_tail) must be called with dcache_lock held.
  */
 static void dentry_lru_add(struct dentry *dentry)
 {
-       list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
-       dentry->d_sb->s_nr_dentry_unused++;
-       dentry_stat.nr_unused++;
-}
-
-static void dentry_lru_add_tail(struct dentry *dentry)
-{
-       list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
-       dentry->d_sb->s_nr_dentry_unused++;
-       dentry_stat.nr_unused++;
+       if (list_empty(&dentry->d_lru)) {
+               list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
+               dentry->d_sb->s_nr_dentry_unused++;
+               percpu_counter_inc(&nr_dentry_unused);
+       }
 }
 
 static void dentry_lru_del(struct dentry *dentry)
 {
        if (!list_empty(&dentry->d_lru)) {
-               list_del(&dentry->d_lru);
+               list_del_init(&dentry->d_lru);
                dentry->d_sb->s_nr_dentry_unused--;
-               dentry_stat.nr_unused--;
+               percpu_counter_dec(&nr_dentry_unused);
        }
 }
 
-static void dentry_lru_del_init(struct dentry *dentry)
+static void dentry_lru_move_tail(struct dentry *dentry)
 {
-       if (likely(!list_empty(&dentry->d_lru))) {
-               list_del_init(&dentry->d_lru);
-               dentry->d_sb->s_nr_dentry_unused--;
-               dentry_stat.nr_unused--;
+       if (list_empty(&dentry->d_lru)) {
+               list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
+               dentry->d_sb->s_nr_dentry_unused++;
+               percpu_counter_inc(&nr_dentry_unused);
+       } else {
+               list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
        }
 }
 
@@ -172,7 +179,6 @@ static struct dentry *d_kill(struct dentry *dentry)
        struct dentry *parent;
 
        list_del(&dentry->d_u.d_child);
-       dentry_stat.nr_dentry--;        /* For d_free, below */
        /*drops the locks, at that point nobody can reach this dentry */
        dentry_iput(dentry);
        if (IS_ROOT(dentry))
@@ -237,13 +243,15 @@ repeat:
                if (dentry->d_op->d_delete(dentry))
                        goto unhash_it;
        }
+
        /* Unreachable? Get rid of it */
        if (d_unhashed(dentry))
                goto kill_it;
-       if (list_empty(&dentry->d_lru)) {
-               dentry->d_flags |= DCACHE_REFERENCED;
-               dentry_lru_add(dentry);
-       }
+
+       /* Otherwise leave it cached and ensure it's on the LRU */
+       dentry->d_flags |= DCACHE_REFERENCED;
+       dentry_lru_add(dentry);
+
        spin_unlock(&dentry->d_lock);
        spin_unlock(&dcache_lock);
        return;
@@ -318,11 +326,10 @@ int d_invalidate(struct dentry * dentry)
 EXPORT_SYMBOL(d_invalidate);
 
 /* This should be called _only_ with dcache_lock held */
-
 static inline struct dentry * __dget_locked(struct dentry *dentry)
 {
        atomic_inc(&dentry->d_count);
-       dentry_lru_del_init(dentry);
+       dentry_lru_del(dentry);
        return dentry;
 }
 
@@ -441,73 +448,27 @@ static void prune_one_dentry(struct dentry * dentry)
 
                if (dentry->d_op && dentry->d_op->d_delete)
                        dentry->d_op->d_delete(dentry);
-               dentry_lru_del_init(dentry);
+               dentry_lru_del(dentry);
                __d_drop(dentry);
                dentry = d_kill(dentry);
                spin_lock(&dcache_lock);
        }
 }
 
-/*
- * Shrink the dentry LRU on a given superblock.
- * @sb   : superblock to shrink dentry LRU.
- * @count: If count is NULL, we prune all dentries on superblock.
- * @flags: If flags is non-zero, we need to do special processing based on
- * which flags are set. This means we don't need to maintain multiple
- * similar copies of this loop.
- */
-static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
+static void shrink_dentry_list(struct list_head *list)
 {
-       LIST_HEAD(referenced);
-       LIST_HEAD(tmp);
        struct dentry *dentry;
-       int cnt = 0;
 
-       BUG_ON(!sb);
-       BUG_ON((flags & DCACHE_REFERENCED) && count == NULL);
-       spin_lock(&dcache_lock);
-       if (count != NULL)
-               /* called from prune_dcache() and shrink_dcache_parent() */
-               cnt = *count;
-restart:
-       if (count == NULL)
-               list_splice_init(&sb->s_dentry_lru, &tmp);
-       else {
-               while (!list_empty(&sb->s_dentry_lru)) {
-                       dentry = list_entry(sb->s_dentry_lru.prev,
-                                       struct dentry, d_lru);
-                       BUG_ON(dentry->d_sb != sb);
+       while (!list_empty(list)) {
+               dentry = list_entry(list->prev, struct dentry, d_lru);
+               dentry_lru_del(dentry);
 
-                       spin_lock(&dentry->d_lock);
-                       /*
-                        * If we are honouring the DCACHE_REFERENCED flag and
-                        * the dentry has this flag set, don't free it. Clear
-                        * the flag and put it back on the LRU.
-                        */
-                       if ((flags & DCACHE_REFERENCED)
-                               && (dentry->d_flags & DCACHE_REFERENCED)) {
-                               dentry->d_flags &= ~DCACHE_REFERENCED;
-                               list_move(&dentry->d_lru, &referenced);
-                               spin_unlock(&dentry->d_lock);
-                       } else {
-                               list_move_tail(&dentry->d_lru, &tmp);
-                               spin_unlock(&dentry->d_lock);
-                               cnt--;
-                               if (!cnt)
-                                       break;
-                       }
-                       cond_resched_lock(&dcache_lock);
-               }
-       }
-       while (!list_empty(&tmp)) {
-               dentry = list_entry(tmp.prev, struct dentry, d_lru);
-               dentry_lru_del_init(dentry);
-               spin_lock(&dentry->d_lock);
                /*
                 * We found an inuse dentry which was not removed from
                 * the LRU because of laziness during lookup.  Do not free
                 * it - just keep it off the LRU list.
                 */
+               spin_lock(&dentry->d_lock);
                if (atomic_read(&dentry->d_count)) {
                        spin_unlock(&dentry->d_lock);
                        continue;
@@ -516,13 +477,60 @@ restart:
                /* dentry->d_lock was dropped in prune_one_dentry() */
                cond_resched_lock(&dcache_lock);
        }
-       if (count == NULL && !list_empty(&sb->s_dentry_lru))
-               goto restart;
-       if (count != NULL)
-               *count = cnt;
+}
+
+/**
+ * __shrink_dcache_sb - shrink the dentry LRU on a given superblock
+ * @sb:                superblock to shrink dentry LRU.
+ * @count:     number of entries to prune
+ * @flags:     flags to control the dentry processing
+ *
+ * If flags contains DCACHE_REFERENCED reference dentries will not be pruned.
+ */
+static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
+{
+       /* called from prune_dcache() and shrink_dcache_parent() */
+       struct dentry *dentry;
+       LIST_HEAD(referenced);
+       LIST_HEAD(tmp);
+       int cnt = *count;
+
+       spin_lock(&dcache_lock);
+       while (!list_empty(&sb->s_dentry_lru)) {
+               dentry = list_entry(sb->s_dentry_lru.prev,
+                               struct dentry, d_lru);
+               BUG_ON(dentry->d_sb != sb);
+
+               /*
+                * If we are honouring the DCACHE_REFERENCED flag and the
+                * dentry has this flag set, don't free it.  Clear the flag
+                * and put it back on the LRU.
+                */
+               if (flags & DCACHE_REFERENCED) {
+                       spin_lock(&dentry->d_lock);
+                       if (dentry->d_flags & DCACHE_REFERENCED) {
+                               dentry->d_flags &= ~DCACHE_REFERENCED;
+                               list_move(&dentry->d_lru, &referenced);
+                               spin_unlock(&dentry->d_lock);
+                               cond_resched_lock(&dcache_lock);
+                               continue;
+                       }
+                       spin_unlock(&dentry->d_lock);
+               }
+
+               list_move_tail(&dentry->d_lru, &tmp);
+               if (!--cnt)
+                       break;
+               cond_resched_lock(&dcache_lock);
+       }
+
+       *count = cnt;
+       shrink_dentry_list(&tmp);
+
        if (!list_empty(&referenced))
                list_splice(&referenced, &sb->s_dentry_lru);
        spin_unlock(&dcache_lock);
+
 }
 
 /**
@@ -538,7 +546,7 @@ static void prune_dcache(int count)
 {
        struct super_block *sb, *p = NULL;
        int w_count;
-       int unused = dentry_stat.nr_unused;
+       int unused = percpu_counter_sum_positive(&nr_dentry_unused);
        int prune_ratio;
        int pruned;
 
@@ -608,13 +616,19 @@ static void prune_dcache(int count)
  * shrink_dcache_sb - shrink dcache for a superblock
  * @sb: superblock
  *
- * Shrink the dcache for the specified super block. This
- * is used to free the dcache before unmounting a file
- * system
+ * Shrink the dcache for the specified super block. This is used to free
+ * the dcache before unmounting a file system.
  */
-void shrink_dcache_sb(struct super_block * sb)
+void shrink_dcache_sb(struct super_block *sb)
 {
-       __shrink_dcache_sb(sb, NULL, 0);
+       LIST_HEAD(tmp);
+
+       spin_lock(&dcache_lock);
+       while (!list_empty(&sb->s_dentry_lru)) {
+               list_splice_init(&sb->s_dentry_lru, &tmp);
+               shrink_dentry_list(&tmp);
+       }
+       spin_unlock(&dcache_lock);
 }
 EXPORT_SYMBOL(shrink_dcache_sb);
 
@@ -632,7 +646,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
 
        /* detach this root from the system */
        spin_lock(&dcache_lock);
-       dentry_lru_del_init(dentry);
+       dentry_lru_del(dentry);
        __d_drop(dentry);
        spin_unlock(&dcache_lock);
 
@@ -646,7 +660,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
                        spin_lock(&dcache_lock);
                        list_for_each_entry(loop, &dentry->d_subdirs,
                                            d_u.d_child) {
-                               dentry_lru_del_init(loop);
+                               dentry_lru_del(loop);
                                __d_drop(loop);
                                cond_resched_lock(&dcache_lock);
                        }
@@ -703,20 +717,13 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
                         * otherwise we ascend to the parent and move to the
                         * next sibling if there is one */
                        if (!parent)
-                               goto out;
-
+                               return;
                        dentry = parent;
-
                } while (list_empty(&dentry->d_subdirs));
 
                dentry = list_entry(dentry->d_subdirs.next,
                                    struct dentry, d_u.d_child);
        }
-out:
-       /* several dentries were freed, need to correct nr_dentry */
-       spin_lock(&dcache_lock);
-       dentry_stat.nr_dentry -= detached;
-       spin_unlock(&dcache_lock);
 }
 
 /*
@@ -830,14 +837,15 @@ resume:
                struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
                next = tmp->next;
 
-               dentry_lru_del_init(dentry);
                /* 
                 * move only zero ref count dentries to the end 
                 * of the unused list for prune_dcache
                 */
                if (!atomic_read(&dentry->d_count)) {
-                       dentry_lru_add_tail(dentry);
+                       dentry_lru_move_tail(dentry);
                        found++;
+               } else {
+                       dentry_lru_del(dentry);
                }
 
                /*
@@ -900,12 +908,16 @@ EXPORT_SYMBOL(shrink_dcache_parent);
  */
 static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
 {
+       int nr_unused;
+
        if (nr) {
                if (!(gfp_mask & __GFP_FS))
                        return -1;
                prune_dcache(nr);
        }
-       return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
+
+       nr_unused = percpu_counter_sum_positive(&nr_dentry_unused);
+       return (nr_unused / 100) * sysctl_vfs_cache_pressure;
 }
 
 static struct shrinker dcache_shrinker = {
@@ -972,9 +984,10 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
        spin_lock(&dcache_lock);
        if (parent)
                list_add(&dentry->d_u.d_child, &parent->d_subdirs);
-       dentry_stat.nr_dentry++;
        spin_unlock(&dcache_lock);
 
+       percpu_counter_inc(&nr_dentry);
+
        return dentry;
 }
 EXPORT_SYMBOL(d_alloc);
@@ -1478,33 +1491,26 @@ out:
  * This is used by ncpfs in its readdir implementation.
  * Zero is returned in the dentry is invalid.
  */
-int d_validate(struct dentry *dentry, struct dentry *dparent)
+int d_validate(struct dentry *dentry, struct dentry *parent)
 {
-       struct hlist_head *base;
-       struct hlist_node *lhp;
+       struct hlist_head *head = d_hash(parent, dentry->d_name.hash);
+       struct hlist_node *node;
+       struct dentry *d;
 
        /* Check whether the ptr might be valid at all.. */
        if (!kmem_ptr_validate(dentry_cache, dentry))
-               goto out;
-
-       if (dentry->d_parent != dparent)
-               goto out;
+               return 0;
+       if (dentry->d_parent != parent)
+               return 0;
 
-       spin_lock(&dcache_lock);
-       base = d_hash(dparent, dentry->d_name.hash);
-       hlist_for_each(lhp,base) { 
-               /* hlist_for_each_entry_rcu() not required for d_hash list
-                * as it is parsed under dcache_lock
-                */
-               if (dentry == hlist_entry(lhp, struct dentry, d_hash)) {
-                       __dget_locked(dentry);
-                       spin_unlock(&dcache_lock);
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(d, node, head, d_hash) {
+               if (d == dentry) {
+                       dget(dentry);
                        return 1;
                }
        }
-       spin_unlock(&dcache_lock);
-out:
+       rcu_read_unlock();
        return 0;
 }
 EXPORT_SYMBOL(d_validate);
@@ -1994,7 +2000,7 @@ global_root:
  * Returns a pointer into the buffer or an error code if the
  * path was too long.
  *
- * "buflen" should be positive. Caller holds the dcache_lock.
+ * "buflen" should be positive.
  *
  * If path is not reachable from the supplied root, then the value of
  * root is changed (without modifying refcounts).
@@ -2006,10 +2012,12 @@ char *__d_path(const struct path *path, struct path *root,
        int error;
 
        prepend(&res, &buflen, "\0", 1);
+       spin_lock(&dcache_lock);
        error = prepend_path(path, root, &res, &buflen);
+       spin_unlock(&dcache_lock);
+
        if (error)
                return ERR_PTR(error);
-
        return res;
 }
 
@@ -2419,6 +2427,9 @@ static void __init dcache_init(void)
 {
        int loop;
 
+       percpu_counter_init(&nr_dentry, 0);
+       percpu_counter_init(&nr_dentry_unused, 0);
+
        /* 
         * A constructor could be added for stable state like the lists,
         * but it is probably not worth it because of the cache nature
index 30a87b3dbcac1286de7fa400f6855b2937796d5b..a4ed8380e98a630197edf4c556b8f4067a1c872f 100644 (file)
@@ -40,6 +40,7 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d
        struct inode *inode = new_inode(sb);
 
        if (inode) {
+               inode->i_ino = get_next_ino();
                inode->i_mode = mode;
                inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
                switch (mode & S_IFMT) {
index 48d74c7391d13f4f07393c45d19825e937ecbcd1..85882f6ba5f73c66b614a79ad3594a3dab5dc2b0 100644 (file)
@@ -218,7 +218,7 @@ static struct page *dio_get_page(struct dio *dio)
  * filesystems can use it to hold additional state between get_block calls and
  * dio_complete.
  */
-static int dio_complete(struct dio *dio, loff_t offset, int ret, bool is_async)
+static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is_async)
 {
        ssize_t transferred = 0;
 
index 6d2b6f93685813ba2b2119dc71c14a941061cf46..3aa75b8888a14589f268f54a8a7b57c490cbcf9c 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -54,6 +54,7 @@
 #include <linux/fsnotify.h>
 #include <linux/fs_struct.h>
 #include <linux/pipe_fs_i.h>
+#include <linux/oom.h>
 
 #include <asm/uaccess.h>
 #include <asm/mmu_context.h>
@@ -759,6 +760,10 @@ static int exec_mmap(struct mm_struct *mm)
        tsk->mm = mm;
        tsk->active_mm = mm;
        activate_mm(active_mm, mm);
+       if (old_mm && tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
+               atomic_dec(&old_mm->oom_disable_count);
+               atomic_inc(&tsk->mm->oom_disable_count);
+       }
        task_unlock(tsk);
        arch_pick_mmap_layout(mm);
        if (old_mm) {
index 68cb23e3bb9828e88c2647a7f028db6d2f94bd78..b905c79b4f0afc88b7a1997009960f09f50f595f 100644 (file)
@@ -46,10 +46,6 @@ static int exofs_file_fsync(struct file *filp, int datasync)
 {
        int ret;
        struct inode *inode = filp->f_mapping->host;
-       struct writeback_control wbc = {
-               .sync_mode = WB_SYNC_ALL,
-               .nr_to_write = 0, /* metadata-only; caller takes care of data */
-       };
        struct super_block *sb;
 
        if (!(inode->i_state & I_DIRTY))
@@ -57,7 +53,7 @@ static int exofs_file_fsync(struct file *filp, int datasync)
        if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
                return 0;
 
-       ret = sync_inode(inode, &wbc);
+       ret = sync_inode_metadata(inode, 1);
 
        /* This is a good place to write the sb */
        /* TODO: Sechedule an sb-sync on create */
index b7dd0c23686376822ffd40c185bc9b1683796d31..264e95d02830f28d992eefb717460ccd0be09d4d 100644 (file)
@@ -153,7 +153,7 @@ static int exofs_link(struct dentry *old_dentry, struct inode *dir,
 
        inode->i_ctime = CURRENT_TIME;
        inode_inc_link_count(inode);
-       atomic_inc(&inode->i_count);
+       ihold(inode);
 
        return exofs_add_nondir(dentry, inode);
 }
index e9e175949a637822a8689e757362673284af0d29..51b304056f10fcd47c18593a9a12bf56e8633063 100644 (file)
@@ -74,21 +74,20 @@ static struct dentry *
 find_disconnected_root(struct dentry *dentry)
 {
        dget(dentry);
-       spin_lock(&dentry->d_lock);
-       while (!IS_ROOT(dentry) &&
-              (dentry->d_parent->d_flags & DCACHE_DISCONNECTED)) {
-               struct dentry *parent = dentry->d_parent;
-               dget(parent);
-               spin_unlock(&dentry->d_lock);
+       while (!IS_ROOT(dentry)) {
+               struct dentry *parent = dget_parent(dentry);
+
+               if (!(parent->d_flags & DCACHE_DISCONNECTED)) {
+                       dput(parent);
+                       break;
+               }
+
                dput(dentry);
                dentry = parent;
-               spin_lock(&dentry->d_lock);
        }
-       spin_unlock(&dentry->d_lock);
        return dentry;
 }
 
-
 /*
  * Make sure target_dir is fully connected to the dentry tree.
  *
index 764109886ec00f8f8bfafed187e88d7ebed63202..2709b34206abdbc442377c2d02ed0d17e5959ec9 100644 (file)
@@ -98,7 +98,7 @@ static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len)
        if (IS_DIRSYNC(dir)) {
                err = write_one_page(page, 1);
                if (!err)
-                       err = ext2_sync_inode(dir);
+                       err = sync_inode_metadata(dir, 1);
        } else {
                unlock_page(page);
        }
index 416daa62242c5410e99ea455dca1854ddfbc0353..6346a2acf326075b2e23739a2ba9b55ce6d56c33 100644 (file)
@@ -120,7 +120,6 @@ extern unsigned long ext2_count_free (struct buffer_head *, unsigned);
 extern struct inode *ext2_iget (struct super_block *, unsigned long);
 extern int ext2_write_inode (struct inode *, struct writeback_control *);
 extern void ext2_evict_inode(struct inode *);
-extern int ext2_sync_inode (struct inode *);
 extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int);
 extern int ext2_setattr (struct dentry *, struct iattr *);
 extern void ext2_set_inode_flags(struct inode *inode);
index 533699c1604060417e2fb3cdc42d61f5c9b52bdc..40ad210a5049a6eed9b184b3c30750ce6951fa46 100644 (file)
@@ -1203,7 +1203,7 @@ static int ext2_setsize(struct inode *inode, loff_t newsize)
        inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
        if (inode_needs_sync(inode)) {
                sync_mapping_buffers(inode->i_mapping);
-               ext2_sync_inode (inode);
+               sync_inode_metadata(inode, 1);
        } else {
                mark_inode_dirty(inode);
        }
@@ -1523,15 +1523,6 @@ int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
        return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
 }
 
-int ext2_sync_inode(struct inode *inode)
-{
-       struct writeback_control wbc = {
-               .sync_mode = WB_SYNC_ALL,
-               .nr_to_write = 0,       /* sys_fsync did this */
-       };
-       return sync_inode(inode, &wbc);
-}
-
 int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
 {
        struct inode *inode = dentry->d_inode;
index 71efb0e9a3f2ababa1ae74033e5caa73332fba61..f8aecd2e32976c40b1af182a13a312ae0b8e23fe 100644 (file)
@@ -206,7 +206,7 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir,
 
        inode->i_ctime = CURRENT_TIME_SEC;
        inode_inc_link_count(inode);
-       atomic_inc(&inode->i_count);
+       ihold(inode);
 
        err = ext2_add_link(dentry, inode);
        if (!err) {
index 85df87d0f7b70a01dde7b39826f6d9abbeb08799..0901320671da20631092a1cc57113e9a0ef46d8d 100644 (file)
@@ -1221,9 +1221,7 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
        }
 
        es = sbi->s_es;
-       if (((sbi->s_mount_opt & EXT2_MOUNT_XIP) !=
-           (old_mount_opt & EXT2_MOUNT_XIP)) &&
-           invalidate_inodes(sb)) {
+       if ((sbi->s_mount_opt ^ old_mount_opt) & EXT2_MOUNT_XIP) {
                ext2_msg(sb, KERN_WARNING, "warning: refusing change of "
                         "xip flag with busy inodes while remounting");
                sbi->s_mount_opt &= ~EXT2_MOUNT_XIP;
index 8c29ae15129ed06bacb04727d4e2d6da17700960..f84700be3274292808c9cfa2b9517274d9b17fc1 100644 (file)
@@ -699,7 +699,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
        EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
        inode->i_ctime = CURRENT_TIME_SEC;
        if (IS_SYNC(inode)) {
-               error = ext2_sync_inode (inode);
+               error = sync_inode_metadata(inode, 1);
                /* In case sync failed due to ENOSPC the inode was actually
                 * written (only some dirty data were not) so we just proceed
                 * as if nothing happened and cleanup the unused block */
index 5e0faf4cda797800713a335f3c6d3d5d64fcd2c2..ad05353040a12338c1a0c84626abcfc20f96408f 100644 (file)
@@ -1696,8 +1696,8 @@ static int ext3_journalled_writepage(struct page *page,
                 * doesn't seem much point in redirtying the page here.
                 */
                ClearPageChecked(page);
-               ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
-                                       ext3_get_block);
+               ret = __block_write_begin(page, 0, PAGE_CACHE_SIZE,
+                                         ext3_get_block);
                if (ret != 0) {
                        ext3_journal_stop(handle);
                        goto out_unlock;
index 2b35ddb70d65535a53f98268310ab377ea94e1f2..bce9dce639b874989fc687173531728842557c24 100644 (file)
@@ -2260,7 +2260,7 @@ retry:
 
        inode->i_ctime = CURRENT_TIME_SEC;
        inc_nlink(inode);
-       atomic_inc(&inode->i_count);
+       ihold(inode);
 
        err = ext3_add_entry(handle, dentry, inode);
        if (!err) {
index 4b8debeb39652fa0e10bcf36d397d6125c750d1a..49635ef236f84d40b0090a78a1a96860fae1fb6b 100644 (file)
@@ -1538,10 +1538,10 @@ static int do_journal_get_write_access(handle_t *handle,
        if (!buffer_mapped(bh) || buffer_freed(bh))
                return 0;
        /*
-        * __block_prepare_write() could have dirtied some buffers. Clean
+        * __block_write_begin() could have dirtied some buffers. Clean
         * the dirty bit as jbd2_journal_get_write_access() could complain
         * otherwise about fs integrity issues. Setting of the dirty bit
-        * by __block_prepare_write() isn't a real problem here as we clear
+        * by __block_write_begin() isn't a real problem here as we clear
         * the bit before releasing a page lock and thus writeback cannot
         * ever write the buffer.
         */
@@ -2550,8 +2550,7 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
                if (buffer_delay(bh))
                        return 0; /* Not sure this could or should happen */
                /*
-                * XXX: __block_prepare_write() unmaps passed block,
-                * is it OK?
+                * XXX: __block_write_begin() unmaps passed block, is it OK?
                 */
                ret = ext4_da_reserve_space(inode, iblock);
                if (ret)
@@ -2583,7 +2582,7 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
 /*
  * This function is used as a standard get_block_t calback function
  * when there is no desire to allocate any blocks.  It is used as a
- * callback function for block_prepare_write() and block_write_full_page().
+ * callback function for block_write_begin() and block_write_full_page().
  * These functions should only try to map a single block at a time.
  *
  * Since this function doesn't do block allocations even if the caller
@@ -2743,7 +2742,7 @@ static int ext4_writepage(struct page *page,
                 * all are mapped and non delay. We don't want to
                 * do block allocation here.
                 */
-               ret = block_prepare_write(page, 0, len,
+               ret = __block_write_begin(page, 0, len,
                                          noalloc_get_block_write);
                if (!ret) {
                        page_bufs = page_buffers(page);
index 19aa0d44d82283680f462f1cc2b76fe2fe220618..42f77b1dc72d810f48deb583baeb0f984c6fbc61 100644 (file)
@@ -2373,6 +2373,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
                printk(KERN_ERR "EXT4-fs: can't get new inode\n");
                goto err_freesgi;
        }
+       sbi->s_buddy_cache->i_ino = get_next_ino();
        EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
        for (i = 0; i < ngroups; i++) {
                desc = ext4_get_group_desc(sb, i, NULL);
index 314c0d3b3fa9aece3015a113b6d348b7bc2be4aa..bd39885b599854329922fa443a016e405aeac1f9 100644 (file)
@@ -2312,7 +2312,7 @@ retry:
 
        inode->i_ctime = ext4_current_time(inode);
        ext4_inc_count(handle, inode);
-       atomic_inc(&inode->i_count);
+       ihold(inode);
 
        err = ext4_add_entry(handle, dentry, inode);
        if (!err) {
index a04bdd81c11ca3799d4429d8c5eb3579621e0628..c3dee381f1b4ea588f2188a33b3237f8efc251df 100644 (file)
@@ -60,7 +60,7 @@ static inline void file_free(struct file *f)
 /*
  * Return the total number of open files in the system
  */
-static int get_nr_files(void)
+static long get_nr_files(void)
 {
        return percpu_counter_read_positive(&nr_files);
 }
@@ -68,7 +68,7 @@ static int get_nr_files(void)
 /*
  * Return the maximum number of open files in the system
  */
-int get_max_files(void)
+unsigned long get_max_files(void)
 {
        return files_stat.max_files;
 }
@@ -82,7 +82,7 @@ int proc_nr_files(ctl_table *table, int write,
                      void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        files_stat.nr_files = get_nr_files();
-       return proc_dointvec(table, write, buffer, lenp, ppos);
+       return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 }
 #else
 int proc_nr_files(ctl_table *table, int write,
@@ -105,7 +105,7 @@ int proc_nr_files(ctl_table *table, int write,
 struct file *get_empty_filp(void)
 {
        const struct cred *cred = current_cred();
-       static int old_max;
+       static long old_max;
        struct file * f;
 
        /*
@@ -140,8 +140,7 @@ struct file *get_empty_filp(void)
 over:
        /* Ran out of filps - report that */
        if (get_nr_files() > old_max) {
-               printk(KERN_INFO "VFS: file-max limit %d reached\n",
-                                       get_max_files());
+               pr_info("VFS: file-max limit %lu reached\n", get_max_files());
                old_max = get_nr_files();
        }
        goto fail;
@@ -487,7 +486,7 @@ retry:
 
 void __init files_init(unsigned long mempages)
 { 
-       int n; 
+       unsigned long n;
 
        filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
                        SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
@@ -498,9 +497,7 @@ void __init files_init(unsigned long mempages)
         */ 
 
        n = (mempages * (PAGE_SIZE / 1024)) / 10;
-       files_stat.max_files = n; 
-       if (files_stat.max_files < NR_FILE)
-               files_stat.max_files = NR_FILE;
+       files_stat.max_files = max_t(unsigned long, n, NR_FILE);
        files_defer_init();
        lg_lock_init(files_lglock);
        percpu_counter_init(&nr_files, 0);
index 79d1b4ea13e79bc6531761ed817f0011653492bd..8c04eac5079d34d5ecdec820e23990294cc1d2ba 100644 (file)
@@ -260,6 +260,7 @@ vxfs_get_fake_inode(struct super_block *sbp, struct vxfs_inode_info *vip)
        struct inode                    *ip = NULL;
 
        if ((ip = new_inode(sbp))) {
+               ip->i_ino = get_next_ino();
                vxfs_iinit(ip, vip);
                ip->i_mapping->a_ops = &vxfs_aops;
        }
index ab38fef1c9a1a52eab7128fa6a7dad217d4ad744..aed881a76b229602ece291fb8c55fbd5ec45ee90 100644 (file)
@@ -79,6 +79,11 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
        return sb->s_bdi;
 }
 
+static inline struct inode *wb_inode(struct list_head *head)
+{
+       return list_entry(head, struct inode, i_wb_list);
+}
+
 static void bdi_queue_work(struct backing_dev_info *bdi,
                struct wb_writeback_work *work)
 {
@@ -172,11 +177,11 @@ static void redirty_tail(struct inode *inode)
        if (!list_empty(&wb->b_dirty)) {
                struct inode *tail;
 
-               tail = list_entry(wb->b_dirty.next, struct inode, i_list);
+               tail = wb_inode(wb->b_dirty.next);
                if (time_before(inode->dirtied_when, tail->dirtied_when))
                        inode->dirtied_when = jiffies;
        }
-       list_move(&inode->i_list, &wb->b_dirty);
+       list_move(&inode->i_wb_list, &wb->b_dirty);
 }
 
 /*
@@ -186,7 +191,7 @@ static void requeue_io(struct inode *inode)
 {
        struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
 
-       list_move(&inode->i_list, &wb->b_more_io);
+       list_move(&inode->i_wb_list, &wb->b_more_io);
 }
 
 static void inode_sync_complete(struct inode *inode)
@@ -227,14 +232,14 @@ static void move_expired_inodes(struct list_head *delaying_queue,
        int do_sb_sort = 0;
 
        while (!list_empty(delaying_queue)) {
-               inode = list_entry(delaying_queue->prev, struct inode, i_list);
+               inode = wb_inode(delaying_queue->prev);
                if (older_than_this &&
                    inode_dirtied_after(inode, *older_than_this))
                        break;
                if (sb && sb != inode->i_sb)
                        do_sb_sort = 1;
                sb = inode->i_sb;
-               list_move(&inode->i_list, &tmp);
+               list_move(&inode->i_wb_list, &tmp);
        }
 
        /* just one sb in list, splice to dispatch_queue and we're done */
@@ -245,12 +250,11 @@ static void move_expired_inodes(struct list_head *delaying_queue,
 
        /* Move inodes from one superblock together */
        while (!list_empty(&tmp)) {
-               inode = list_entry(tmp.prev, struct inode, i_list);
-               sb = inode->i_sb;
+               sb = wb_inode(tmp.prev)->i_sb;
                list_for_each_prev_safe(pos, node, &tmp) {
-                       inode = list_entry(pos, struct inode, i_list);
+                       inode = wb_inode(pos);
                        if (inode->i_sb == sb)
-                               list_move(&inode->i_list, dispatch_queue);
+                               list_move(&inode->i_wb_list, dispatch_queue);
                }
        }
 }
@@ -408,16 +412,13 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
                         * completion.
                         */
                        redirty_tail(inode);
-               } else if (atomic_read(&inode->i_count)) {
-                       /*
-                        * The inode is clean, inuse
-                        */
-                       list_move(&inode->i_list, &inode_in_use);
                } else {
                        /*
-                        * The inode is clean, unused
+                        * The inode is clean.  At this point we either have
+                        * a reference to the inode or it's on it's way out.
+                        * No need to add it back to the LRU.
                         */
-                       list_move(&inode->i_list, &inode_unused);
+                       list_del_init(&inode->i_wb_list);
                }
        }
        inode_sync_complete(inode);
@@ -465,8 +466,7 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
 {
        while (!list_empty(&wb->b_io)) {
                long pages_skipped;
-               struct inode *inode = list_entry(wb->b_io.prev,
-                                                struct inode, i_list);
+               struct inode *inode = wb_inode(wb->b_io.prev);
 
                if (inode->i_sb != sb) {
                        if (only_this_sb) {
@@ -487,10 +487,16 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
                        return 0;
                }
 
-               if (inode->i_state & (I_NEW | I_WILL_FREE)) {
+               /*
+                * Don't bother with new inodes or inodes beeing freed, first
+                * kind does not need peridic writeout yet, and for the latter
+                * kind writeout is handled by the freer.
+                */
+               if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
                        requeue_io(inode);
                        continue;
                }
+
                /*
                 * Was this inode dirtied after sync_sb_inodes was called?
                 * This keeps sync from extra jobs and livelock.
@@ -498,7 +504,6 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
                if (inode_dirtied_after(inode, wbc->wb_start))
                        return 1;
 
-               BUG_ON(inode->i_state & I_FREEING);
                __iget(inode);
                pages_skipped = wbc->pages_skipped;
                writeback_single_inode(inode, wbc);
@@ -536,8 +541,7 @@ void writeback_inodes_wb(struct bdi_writeback *wb,
                queue_io(wb, wbc->older_than_this);
 
        while (!list_empty(&wb->b_io)) {
-               struct inode *inode = list_entry(wb->b_io.prev,
-                                                struct inode, i_list);
+               struct inode *inode = wb_inode(wb->b_io.prev);
                struct super_block *sb = inode->i_sb;
 
                if (!pin_sb_for_writeback(sb)) {
@@ -582,7 +586,7 @@ static inline bool over_bground_thresh(void)
        global_dirty_limits(&background_thresh, &dirty_thresh);
 
        return (global_page_state(NR_FILE_DIRTY) +
-               global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
+               global_page_state(NR_UNSTABLE_NFS) > background_thresh);
 }
 
 /*
@@ -675,8 +679,7 @@ static long wb_writeback(struct bdi_writeback *wb,
                 */
                spin_lock(&inode_lock);
                if (!list_empty(&wb->b_more_io))  {
-                       inode = list_entry(wb->b_more_io.prev,
-                                               struct inode, i_list);
+                       inode = wb_inode(wb->b_more_io.prev);
                        trace_wbc_writeback_wait(&wbc, wb->bdi);
                        inode_wait_for_writeback(inode);
                }
@@ -721,9 +724,13 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb)
                return 0;
 
        wb->last_old_flush = jiffies;
+       /*
+        * Add in the number of potentially dirty inodes, because each inode
+        * write can dirty pagecache in the underlying blockdev.
+        */
        nr_pages = global_page_state(NR_FILE_DIRTY) +
                        global_page_state(NR_UNSTABLE_NFS) +
-                       (inodes_stat.nr_inodes - inodes_stat.nr_unused);
+                       get_nr_dirty_inodes();
 
        if (nr_pages) {
                struct wb_writeback_work work = {
@@ -790,7 +797,7 @@ int bdi_writeback_thread(void *data)
        struct backing_dev_info *bdi = wb->bdi;
        long pages_written;
 
-       current->flags |= PF_FLUSHER | PF_SWAPWRITE;
+       current->flags |= PF_SWAPWRITE;
        set_freezable();
        wb->last_active = jiffies;
 
@@ -962,7 +969,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
                 * dirty list.  Add blockdev inodes as well.
                 */
                if (!S_ISBLK(inode->i_mode)) {
-                       if (hlist_unhashed(&inode->i_hash))
+                       if (inode_unhashed(inode))
                                goto out;
                }
                if (inode->i_state & I_FREEING)
@@ -990,7 +997,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
                        }
 
                        inode->dirtied_when = jiffies;
-                       list_move(&inode->i_list, &bdi->wb.b_dirty);
+                       list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
                }
        }
 out:
@@ -1090,8 +1097,7 @@ void writeback_inodes_sb(struct super_block *sb)
 
        WARN_ON(!rwsem_is_locked(&sb->s_umount));
 
-       work.nr_pages = nr_dirty + nr_unstable +
-                       (inodes_stat.nr_inodes - inodes_stat.nr_unused);
+       work.nr_pages = nr_dirty + nr_unstable + get_nr_dirty_inodes();
 
        bdi_queue_work(sb->s_bdi, &work);
        wait_for_completion(&done);
@@ -1198,3 +1204,23 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc)
        return ret;
 }
 EXPORT_SYMBOL(sync_inode);
+
+/**
+ * sync_inode - write an inode to disk
+ * @inode: the inode to sync
+ * @wait: wait for I/O to complete.
+ *
+ * Write an inode to disk and adjust it's dirty state after completion.
+ *
+ * Note: only writes the actual inode, no associated data or other metadata.
+ */
+int sync_inode_metadata(struct inode *inode, int wait)
+{
+       struct writeback_control wbc = {
+               .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
+               .nr_to_write = 0, /* metadata-only */
+       };
+
+       return sync_inode(inode, &wbc);
+}
+EXPORT_SYMBOL(sync_inode_metadata);
index 7367e177186f4b0efb96d1281134860058050e6d..4eba07661e5c562b50462d8feee6982e39fe2c4b 100644 (file)
@@ -222,6 +222,7 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
        if (!inode)
                return NULL;
 
+       inode->i_ino = get_next_ino();
        inode->i_mode = mode;
        inode->i_uid = fc->user_id;
        inode->i_gid = fc->group_id;
index cde755cca5642d41fb9cbbe05ac2d01f70f53c69..b98664275f02460c01c5fb2b01d1ddf933671b08 100644 (file)
@@ -809,11 +809,9 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
        int err;
        struct page *page = *pagep;
 
-       if (page && zeroing && count < PAGE_SIZE) {
-               void *mapaddr = kmap_atomic(page, KM_USER1);
-               memset(mapaddr, 0, PAGE_SIZE);
-               kunmap_atomic(mapaddr, KM_USER1);
-       }
+       if (page && zeroing && count < PAGE_SIZE)
+               clear_highpage(page);
+
        while (count) {
                if (cs->write && cs->pipebufs && page) {
                        return fuse_ref_page(cs, page, offset, count);
@@ -830,10 +828,10 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
                        }
                }
                if (page) {
-                       void *mapaddr = kmap_atomic(page, KM_USER1);
+                       void *mapaddr = kmap_atomic(page, KM_USER0);
                        void *buf = mapaddr + offset;
                        offset += fuse_copy_do(cs, &buf, &count);
-                       kunmap_atomic(mapaddr, KM_USER1);
+                       kunmap_atomic(mapaddr, KM_USER0);
                } else
                        offset += fuse_copy_do(cs, NULL, &count);
        }
index 6b24afb96aaedade304b48bb427e664eae8e6e53..4f36f8832b9b1b167321e119260da82d89b4cdf0 100644 (file)
@@ -618,7 +618,6 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
        struct gfs2_alloc *al = NULL;
        pgoff_t index = pos >> PAGE_CACHE_SHIFT;
        unsigned from = pos & (PAGE_CACHE_SIZE - 1);
-       unsigned to = from + len;
        struct page *page;
 
        gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
@@ -691,7 +690,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
        }
 
 prepare_write:
-       error = block_prepare_write(page, from, to, gfs2_block_map);
+       error = __block_write_begin(page, from, len, gfs2_block_map);
 out:
        if (error == 0)
                return 0;
index f3b071f921aa6ea723e35b483675d4c5ee4151c8..939739c7b3f9deb30cf2a2df4ce3c4ba8290b697 100644 (file)
@@ -55,7 +55,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
                 * activity, but those code paths have their own higher-level
                 * throttling.
                 */
-               if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
+               if (wbc->sync_mode != WB_SYNC_NONE) {
                        lock_buffer(bh);
                } else if (!trylock_buffer(bh)) {
                        redirty_page_for_writepage(wbc, page);
index aeafc233dc897fdb102df29bf052fc040b61faef..cade1acbcea9420dc7c69622888d820af24eea99 100644 (file)
@@ -1219,7 +1219,6 @@ fail_sb:
 fail_locking:
        init_locking(sdp, &mount_gh, UNDO);
 fail_lm:
-       invalidate_inodes(sb);
        gfs2_gl_hash_clear(sdp);
        gfs2_lm_unmount(sdp);
 fail_sys:
index 0534510200d5961887d3ee957d799e3b4669e76b..12cbea7502c26040fb90db5750e764bdd831079a 100644 (file)
@@ -255,7 +255,7 @@ out_parent:
        gfs2_holder_uninit(ghs);
        gfs2_holder_uninit(ghs + 1);
        if (!error) {
-               atomic_inc(&inode->i_count);
+               ihold(inode);
                d_instantiate(dentry, inode);
                mark_inode_dirty(inode);
        }
@@ -1294,7 +1294,7 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
        int error;
 
        if (!page_has_buffers(page)) {
-               error = block_prepare_write(page, from, to, gfs2_block_map);
+               error = __block_write_begin(page, from, to - from, gfs2_block_map);
                if (unlikely(error))
                        return error;
 
@@ -1313,7 +1313,7 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
                next += bh->b_size;
                if (buffer_mapped(bh)) {
                        if (end) {
-                               error = block_prepare_write(page, start, end,
+                               error = __block_write_begin(page, start, end - start,
                                                            gfs2_block_map);
                                if (unlikely(error))
                                        return error;
@@ -1328,7 +1328,7 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
        } while (next < to);
 
        if (end) {
-               error = block_prepare_write(page, start, end, gfs2_block_map);
+               error = __block_write_begin(page, start, end - start, gfs2_block_map);
                if (unlikely(error))
                        return error;
                empty_write_end(page, start, end);
index 047d1176096c79d6f0ce50227b8c098755fb0150..2b2c4997430b708113af14a08d4c58449b97ccbe 100644 (file)
@@ -857,7 +857,6 @@ restart:
        gfs2_clear_rgrpd(sdp);
        gfs2_jindex_free(sdp);
        /*  Take apart glock structures and buffer lists  */
-       invalidate_inodes(sdp->sd_vfs);
        gfs2_gl_hash_clear(sdp);
        /*  Unmount the locking protocol  */
        gfs2_lm_unmount(sdp);
index 4f55651aaa51e888e6c6dbcb19b78384e20caa61..c8cffb81e849e68e8e85d6e4c19666e6263e3f25 100644 (file)
@@ -147,8 +147,6 @@ struct hfs_sb_info {
        u16 blockoffset;
 
        int fs_div;
-
-       struct hlist_head rsrc_inodes;
 };
 
 #define HFS_FLG_BITMAP_DIRTY   0
@@ -254,17 +252,6 @@ static inline void hfs_bitmap_dirty(struct super_block *sb)
        sb->s_dirt = 1;
 }
 
-static inline void hfs_buffer_sync(struct buffer_head *bh)
-{
-       while (buffer_locked(bh)) {
-               wait_on_buffer(bh);
-       }
-       if (buffer_dirty(bh)) {
-               ll_rw_block(WRITE, 1, &bh);
-               wait_on_buffer(bh);
-       }
-}
-
 #define sb_bread512(sb, sec, data) ({                  \
        struct buffer_head *__bh;                       \
        sector_t __block;                               \
index 397b7adc7ce668dd2880fa73b4f5ee6cf1b050a2..dffb4e996643557f04ae1c83292bd729ef5adada 100644 (file)
@@ -524,7 +524,7 @@ static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry,
        HFS_I(inode)->rsrc_inode = dir;
        HFS_I(dir)->rsrc_inode = inode;
        igrab(dir);
-       hlist_add_head(&inode->i_hash, &HFS_SB(dir->i_sb)->rsrc_inodes);
+       hlist_add_fake(&inode->i_hash);
        mark_inode_dirty(inode);
 out:
        d_add(dentry, inode);
index 86428f5ac991c59dd46a088b25cffe12b5a13dee..1563d5ce57643e23ac08e3a7db02e0b336603c60 100644 (file)
@@ -220,7 +220,7 @@ int hfs_mdb_get(struct super_block *sb)
                mdb->drLsMod = hfs_mtime();
 
                mark_buffer_dirty(HFS_SB(sb)->mdb_bh);
-               hfs_buffer_sync(HFS_SB(sb)->mdb_bh);
+               sync_dirty_buffer(HFS_SB(sb)->mdb_bh);
        }
 
        return 0;
@@ -287,7 +287,7 @@ void hfs_mdb_commit(struct super_block *sb)
                HFS_SB(sb)->alt_mdb->drAtrb |= cpu_to_be16(HFS_SB_ATTRIB_UNMNT);
                HFS_SB(sb)->alt_mdb->drAtrb &= cpu_to_be16(~HFS_SB_ATTRIB_INCNSTNT);
                mark_buffer_dirty(HFS_SB(sb)->alt_mdb_bh);
-               hfs_buffer_sync(HFS_SB(sb)->alt_mdb_bh);
+               sync_dirty_buffer(HFS_SB(sb)->alt_mdb_bh);
        }
 
        if (test_and_clear_bit(HFS_FLG_BITMAP_DIRTY, &HFS_SB(sb)->flags)) {
index 33254160f650e22da90ff1b0b153df229c81f0df..6ee1586f2334f0c05be1dd8a5305dc96be12674a 100644 (file)
@@ -382,7 +382,6 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
                return -ENOMEM;
 
        sb->s_fs_info = sbi;
-       INIT_HLIST_HEAD(&sbi->rsrc_inodes);
 
        res = -EINVAL;
        if (!parse_options((char *)data, sbi)) {
index d236d85ec9d73f703384ecaa9d6522fe7433c775..e318bbc0daf6eb9ac4cd4dac18213cc7e7e89ea8 100644 (file)
@@ -286,7 +286,7 @@ static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir,
 
        inc_nlink(inode);
        hfsplus_instantiate(dst_dentry, inode, cnid);
-       atomic_inc(&inode->i_count);
+       ihold(inode);
        inode->i_ctime = CURRENT_TIME_SEC;
        mark_inode_dirty(inode);
        sbi->file_count++;
index 78449280dae08471958a328afca7f225fe9d744f..8afd7e84f98d5d0417b8c256d60f0ff0329521db 100644 (file)
@@ -211,7 +211,7 @@ static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dent
         * appear hashed, but do not put on any lists.  hlist_del()
         * will work fine and require no locking.
         */
-       inode->i_hash.pprev = &inode->i_hash.next;
+       hlist_add_fake(&inode->i_hash);
 
        mark_inode_dirty(inode);
 out:
index 7c232c1487ee761f3557a705a49a2fc7e9da5fb1..bf15a43016b91b5e342bb16a442a19f4e9dadd33 100644 (file)
@@ -91,7 +91,6 @@ extern int rename_file(char *from, char *to);
 extern int do_statfs(char *root, long *bsize_out, long long *blocks_out,
                     long long *bfree_out, long long *bavail_out,
                     long long *files_out, long long *ffree_out,
-                    void *fsid_out, int fsid_size, long *namelen_out,
-                    long *spare_out);
+                    void *fsid_out, int fsid_size, long *namelen_out);
 
 #endif
index f7dc9b5f9ef8c80560cb545d937aa9818a3b646f..cd7c93917cc7c29636ee7c87a8846177e4098d23 100644 (file)
@@ -217,7 +217,7 @@ int hostfs_statfs(struct dentry *dentry, struct kstatfs *sf)
        err = do_statfs(dentry->d_sb->s_fs_info,
                        &sf->f_bsize, &f_blocks, &f_bfree, &f_bavail, &f_files,
                        &f_ffree, &sf->f_fsid, sizeof(sf->f_fsid),
-                       &sf->f_namelen, sf->f_spare);
+                       &sf->f_namelen);
        if (err)
                return err;
        sf->f_blocks = f_blocks;
index 6777aa06ce2cc894de1885f65c4d145e9bc8f0a8..d51a98384bc03b0410f7ba3d5c021804511d10bf 100644 (file)
@@ -94,8 +94,7 @@ void *open_dir(char *path, int *err_out)
 
        dir = opendir(path);
        *err_out = errno;
-       if (dir == NULL)
-               return NULL;
+
        return dir;
 }
 
@@ -205,7 +204,7 @@ int set_attr(const char *file, struct hostfs_iattr *attrs, int fd)
        if (attrs->ia_valid & HOSTFS_ATTR_MODE) {
                if (fd >= 0) {
                        if (fchmod(fd, attrs->ia_mode) != 0)
-                               return (-errno);
+                               return -errno;
                } else if (chmod(file, attrs->ia_mode) != 0) {
                        return -errno;
                }
@@ -364,8 +363,7 @@ int rename_file(char *from, char *to)
 int do_statfs(char *root, long *bsize_out, long long *blocks_out,
              long long *bfree_out, long long *bavail_out,
              long long *files_out, long long *ffree_out,
-             void *fsid_out, int fsid_size, long *namelen_out,
-             long *spare_out)
+             void *fsid_out, int fsid_size, long *namelen_out)
 {
        struct statfs64 buf;
        int err;
@@ -384,10 +382,6 @@ int do_statfs(char *root, long *bsize_out, long long *blocks_out,
               sizeof(buf.f_fsid) > fsid_size ? fsid_size :
               sizeof(buf.f_fsid));
        *namelen_out = buf.f_namelen;
-       spare_out[0] = buf.f_spare[0];
-       spare_out[1] = buf.f_spare[1];
-       spare_out[2] = buf.f_spare[2];
-       spare_out[3] = buf.f_spare[3];
-       spare_out[4] = buf.f_spare[4];
+
        return 0;
 }
index a14328d270e855a4d16d3772b9ad464bab296bdc..b14be3f781c714129d4fafbd17a7ff9874193b3f 100644 (file)
@@ -456,6 +456,7 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid,
        inode = new_inode(sb);
        if (inode) {
                struct hugetlbfs_inode_info *info;
+               inode->i_ino = get_next_ino();
                inode->i_mode = mode;
                inode->i_uid = uid;
                inode->i_gid = gid;
index 56d909d69bc88cda72bb70a528fbf0c70e877d0d..ae2727ab0c3ab7695b14f256da0bccdfd3668d81 100644 (file)
@@ -29,7 +29,6 @@
 /*
  * This is needed for the following functions:
  *  - inode_has_buffers
- *  - invalidate_inode_buffers
  *  - invalidate_bdev
  *
  * FIXME: remove all knowledge of the buffer layer from this file
@@ -73,8 +72,7 @@ static unsigned int i_hash_shift __read_mostly;
  * allowing for low-overhead inode sync() operations.
  */
 
-LIST_HEAD(inode_in_use);
-LIST_HEAD(inode_unused);
+static LIST_HEAD(inode_lru);
 static struct hlist_head *inode_hashtable __read_mostly;
 
 /*
@@ -104,8 +102,41 @@ static DECLARE_RWSEM(iprune_sem);
  */
 struct inodes_stat_t inodes_stat;
 
+static struct percpu_counter nr_inodes __cacheline_aligned_in_smp;
+static struct percpu_counter nr_inodes_unused __cacheline_aligned_in_smp;
+
 static struct kmem_cache *inode_cachep __read_mostly;
 
+static inline int get_nr_inodes(void)
+{
+       return percpu_counter_sum_positive(&nr_inodes);
+}
+
+static inline int get_nr_inodes_unused(void)
+{
+       return percpu_counter_sum_positive(&nr_inodes_unused);
+}
+
+int get_nr_dirty_inodes(void)
+{
+       int nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
+       return nr_dirty > 0 ? nr_dirty : 0;
+
+}
+
+/*
+ * Handle nr_inode sysctl
+ */
+#ifdef CONFIG_SYSCTL
+int proc_nr_inodes(ctl_table *table, int write,
+                  void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       inodes_stat.nr_inodes = get_nr_inodes();
+       inodes_stat.nr_unused = get_nr_inodes_unused();
+       return proc_dointvec(table, write, buffer, lenp, ppos);
+}
+#endif
+
 static void wake_up_inode(struct inode *inode)
 {
        /*
@@ -193,6 +224,8 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
        inode->i_fsnotify_mask = 0;
 #endif
 
+       percpu_counter_inc(&nr_inodes);
+
        return 0;
 out:
        return -ENOMEM;
@@ -233,11 +266,13 @@ void __destroy_inode(struct inode *inode)
        if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
                posix_acl_release(inode->i_default_acl);
 #endif
+       percpu_counter_dec(&nr_inodes);
 }
 EXPORT_SYMBOL(__destroy_inode);
 
-void destroy_inode(struct inode *inode)
+static void destroy_inode(struct inode *inode)
 {
+       BUG_ON(!list_empty(&inode->i_lru));
        __destroy_inode(inode);
        if (inode->i_sb->s_op->destroy_inode)
                inode->i_sb->s_op->destroy_inode(inode);
@@ -256,6 +291,8 @@ void inode_init_once(struct inode *inode)
        INIT_HLIST_NODE(&inode->i_hash);
        INIT_LIST_HEAD(&inode->i_dentry);
        INIT_LIST_HEAD(&inode->i_devices);
+       INIT_LIST_HEAD(&inode->i_wb_list);
+       INIT_LIST_HEAD(&inode->i_lru);
        INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
        spin_lock_init(&inode->i_data.tree_lock);
        spin_lock_init(&inode->i_data.i_mmap_lock);
@@ -282,14 +319,109 @@ static void init_once(void *foo)
  */
 void __iget(struct inode *inode)
 {
-       if (atomic_inc_return(&inode->i_count) != 1)
-               return;
+       atomic_inc(&inode->i_count);
+}
+
+/*
+ * get additional reference to inode; caller must already hold one.
+ */
+void ihold(struct inode *inode)
+{
+       WARN_ON(atomic_inc_return(&inode->i_count) < 2);
+}
+EXPORT_SYMBOL(ihold);
+
+static void inode_lru_list_add(struct inode *inode)
+{
+       if (list_empty(&inode->i_lru)) {
+               list_add(&inode->i_lru, &inode_lru);
+               percpu_counter_inc(&nr_inodes_unused);
+       }
+}
 
-       if (!(inode->i_state & (I_DIRTY|I_SYNC)))
-               list_move(&inode->i_list, &inode_in_use);
-       inodes_stat.nr_unused--;
+static void inode_lru_list_del(struct inode *inode)
+{
+       if (!list_empty(&inode->i_lru)) {
+               list_del_init(&inode->i_lru);
+               percpu_counter_dec(&nr_inodes_unused);
+       }
+}
+
+static inline void __inode_sb_list_add(struct inode *inode)
+{
+       list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
 }
 
+/**
+ * inode_sb_list_add - add inode to the superblock list of inodes
+ * @inode: inode to add
+ */
+void inode_sb_list_add(struct inode *inode)
+{
+       spin_lock(&inode_lock);
+       __inode_sb_list_add(inode);
+       spin_unlock(&inode_lock);
+}
+EXPORT_SYMBOL_GPL(inode_sb_list_add);
+
+static inline void __inode_sb_list_del(struct inode *inode)
+{
+       list_del_init(&inode->i_sb_list);
+}
+
+static unsigned long hash(struct super_block *sb, unsigned long hashval)
+{
+       unsigned long tmp;
+
+       tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
+                       L1_CACHE_BYTES;
+       tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
+       return tmp & I_HASHMASK;
+}
+
+/**
+ *     __insert_inode_hash - hash an inode
+ *     @inode: unhashed inode
+ *     @hashval: unsigned long value used to locate this object in the
+ *             inode_hashtable.
+ *
+ *     Add an inode to the inode hash for this superblock.
+ */
+void __insert_inode_hash(struct inode *inode, unsigned long hashval)
+{
+       struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
+
+       spin_lock(&inode_lock);
+       hlist_add_head(&inode->i_hash, b);
+       spin_unlock(&inode_lock);
+}
+EXPORT_SYMBOL(__insert_inode_hash);
+
+/**
+ *     __remove_inode_hash - remove an inode from the hash
+ *     @inode: inode to unhash
+ *
+ *     Remove an inode from the superblock.
+ */
+static void __remove_inode_hash(struct inode *inode)
+{
+       hlist_del_init(&inode->i_hash);
+}
+
+/**
+ *     remove_inode_hash - remove an inode from the hash
+ *     @inode: inode to unhash
+ *
+ *     Remove an inode from the superblock.
+ */
+void remove_inode_hash(struct inode *inode)
+{
+       spin_lock(&inode_lock);
+       hlist_del_init(&inode->i_hash);
+       spin_unlock(&inode_lock);
+}
+EXPORT_SYMBOL(remove_inode_hash);
+
 void end_writeback(struct inode *inode)
 {
        might_sleep();
@@ -328,101 +460,113 @@ static void evict(struct inode *inode)
  */
 static void dispose_list(struct list_head *head)
 {
-       int nr_disposed = 0;
-
        while (!list_empty(head)) {
                struct inode *inode;
 
-               inode = list_first_entry(head, struct inode, i_list);
-               list_del(&inode->i_list);
+               inode = list_first_entry(head, struct inode, i_lru);
+               list_del_init(&inode->i_lru);
 
                evict(inode);
 
                spin_lock(&inode_lock);
-               hlist_del_init(&inode->i_hash);
-               list_del_init(&inode->i_sb_list);
+               __remove_inode_hash(inode);
+               __inode_sb_list_del(inode);
                spin_unlock(&inode_lock);
 
                wake_up_inode(inode);
                destroy_inode(inode);
-               nr_disposed++;
        }
-       spin_lock(&inode_lock);
-       inodes_stat.nr_inodes -= nr_disposed;
-       spin_unlock(&inode_lock);
 }
 
-/*
- * Invalidate all inodes for a device.
+/**
+ * evict_inodes        - evict all evictable inodes for a superblock
+ * @sb:                superblock to operate on
+ *
+ * Make sure that no inodes with zero refcount are retained.  This is
+ * called by superblock shutdown after having MS_ACTIVE flag removed,
+ * so any inode reaching zero refcount during or after that call will
+ * be immediately evicted.
  */
-static int invalidate_list(struct list_head *head, struct list_head *dispose)
+void evict_inodes(struct super_block *sb)
 {
-       struct list_head *next;
-       int busy = 0, count = 0;
-
-       next = head->next;
-       for (;;) {
-               struct list_head *tmp = next;
-               struct inode *inode;
+       struct inode *inode, *next;
+       LIST_HEAD(dispose);
 
-               /*
-                * We can reschedule here without worrying about the list's
-                * consistency because the per-sb list of inodes must not
-                * change during umount anymore, and because iprune_sem keeps
-                * shrink_icache_memory() away.
-                */
-               cond_resched_lock(&inode_lock);
+       down_write(&iprune_sem);
 
-               next = next->next;
-               if (tmp == head)
-                       break;
-               inode = list_entry(tmp, struct inode, i_sb_list);
-               if (inode->i_state & I_NEW)
+       spin_lock(&inode_lock);
+       list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
+               if (atomic_read(&inode->i_count))
                        continue;
-               invalidate_inode_buffers(inode);
-               if (!atomic_read(&inode->i_count)) {
-                       list_move(&inode->i_list, dispose);
-                       WARN_ON(inode->i_state & I_NEW);
-                       inode->i_state |= I_FREEING;
-                       count++;
+
+               if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
+                       WARN_ON(1);
                        continue;
                }
-               busy = 1;
+
+               inode->i_state |= I_FREEING;
+
+               /*
+                * Move the inode off the IO lists and LRU once I_FREEING is
+                * set so that it won't get moved back on there if it is dirty.
+                */
+               list_move(&inode->i_lru, &dispose);
+               list_del_init(&inode->i_wb_list);
+               if (!(inode->i_state & (I_DIRTY | I_SYNC)))
+                       percpu_counter_dec(&nr_inodes_unused);
        }
-       /* only unused inodes may be cached with i_count zero */
-       inodes_stat.nr_unused -= count;
-       return busy;
+       spin_unlock(&inode_lock);
+
+       dispose_list(&dispose);
+       up_write(&iprune_sem);
 }
 
 /**
- *     invalidate_inodes       - discard the inodes on a device
- *     @sb: superblock
+ * invalidate_inodes   - attempt to free all inodes on a superblock
+ * @sb:                superblock to operate on
  *
- *     Discard all of the inodes for a given superblock. If the discard
- *     fails because there are busy inodes then a non zero value is returned.
- *     If the discard is successful all the inodes have been discarded.
+ * Attempts to free all inodes for a given superblock.  If there were any
+ * busy inodes return a non-zero value, else zero.
  */
 int invalidate_inodes(struct super_block *sb)
 {
-       int busy;
-       LIST_HEAD(throw_away);
+       int busy = 0;
+       struct inode *inode, *next;
+       LIST_HEAD(dispose);
 
        down_write(&iprune_sem);
+
        spin_lock(&inode_lock);
-       fsnotify_unmount_inodes(&sb->s_inodes);
-       busy = invalidate_list(&sb->s_inodes, &throw_away);
+       list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
+               if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
+                       continue;
+               if (atomic_read(&inode->i_count)) {
+                       busy = 1;
+                       continue;
+               }
+
+               inode->i_state |= I_FREEING;
+
+               /*
+                * Move the inode off the IO lists and LRU once I_FREEING is
+                * set so that it won't get moved back on there if it is dirty.
+                */
+               list_move(&inode->i_lru, &dispose);
+               list_del_init(&inode->i_wb_list);
+               if (!(inode->i_state & (I_DIRTY | I_SYNC)))
+                       percpu_counter_dec(&nr_inodes_unused);
+       }
        spin_unlock(&inode_lock);
 
-       dispose_list(&throw_away);
+       dispose_list(&dispose);
        up_write(&iprune_sem);
 
        return busy;
 }
-EXPORT_SYMBOL(invalidate_inodes);
 
 static int can_unuse(struct inode *inode)
 {
-       if (inode->i_state)
+       if (inode->i_state & ~I_REFERENCED)
                return 0;
        if (inode_has_buffers(inode))
                return 0;
@@ -434,22 +578,24 @@ static int can_unuse(struct inode *inode)
 }
 
 /*
- * Scan `goal' inodes on the unused list for freeable ones. They are moved to
- * temporary list and then are freed outside inode_lock by dispose_list().
+ * Scan `goal' inodes on the unused list for freeable ones. They are moved to a
+ * temporary list and then are freed outside inode_lock by dispose_list().
  *
  * Any inodes which are pinned purely because of attached pagecache have their
- * pagecache removed.  We expect the final iput() on that inode to add it to
- * the front of the inode_unused list.  So look for it there and if the
- * inode is still freeable, proceed.  The right inode is found 99.9% of the
- * time in testing on a 4-way.
+ * pagecache removed.  If the inode has metadata buffers attached to
+ * mapping->private_list then try to remove them.
  *
- * If the inode has metadata buffers attached to mapping->private_list then
- * try to remove them.
+ * If the inode has the I_REFERENCED flag set, then it means that it has been
+ * used recently - the flag is set in iput_final(). When we encounter such an
+ * inode, clear the flag and move it to the back of the LRU so it gets another
+ * pass through the LRU before it gets reclaimed. This is necessary because of
+ * the fact we are doing lazy LRU updates to minimise lock contention so the
+ * LRU does not have strict ordering. Hence we don't want to reclaim inodes
+ * with this flag set because they are the inodes that are out of order.
  */
 static void prune_icache(int nr_to_scan)
 {
        LIST_HEAD(freeable);
-       int nr_pruned = 0;
        int nr_scanned;
        unsigned long reap = 0;
 
@@ -458,13 +604,26 @@ static void prune_icache(int nr_to_scan)
        for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
                struct inode *inode;
 
-               if (list_empty(&inode_unused))
+               if (list_empty(&inode_lru))
                        break;
 
-               inode = list_entry(inode_unused.prev, struct inode, i_list);
+               inode = list_entry(inode_lru.prev, struct inode, i_lru);
 
-               if (inode->i_state || atomic_read(&inode->i_count)) {
-                       list_move(&inode->i_list, &inode_unused);
+               /*
+                * Referenced or dirty inodes are still in use. Give them
+                * another pass through the LRU as we canot reclaim them now.
+                */
+               if (atomic_read(&inode->i_count) ||
+                   (inode->i_state & ~I_REFERENCED)) {
+                       list_del_init(&inode->i_lru);
+                       percpu_counter_dec(&nr_inodes_unused);
+                       continue;
+               }
+
+               /* recently referenced inodes get one more pass */
+               if (inode->i_state & I_REFERENCED) {
+                       list_move(&inode->i_lru, &inode_lru);
+                       inode->i_state &= ~I_REFERENCED;
                        continue;
                }
                if (inode_has_buffers(inode) || inode->i_data.nrpages) {
@@ -476,18 +635,23 @@ static void prune_icache(int nr_to_scan)
                        iput(inode);
                        spin_lock(&inode_lock);
 
-                       if (inode != list_entry(inode_unused.next,
-                                               struct inode, i_list))
+                       if (inode != list_entry(inode_lru.next,
+                                               struct inode, i_lru))
                                continue;       /* wrong inode or list_empty */
                        if (!can_unuse(inode))
                                continue;
                }
-               list_move(&inode->i_list, &freeable);
                WARN_ON(inode->i_state & I_NEW);
                inode->i_state |= I_FREEING;
-               nr_pruned++;
+
+               /*
+                * Move the inode off the IO lists and LRU once I_FREEING is
+                * set so that it won't get moved back on there if it is dirty.
+                */
+               list_move(&inode->i_lru, &freeable);
+               list_del_init(&inode->i_wb_list);
+               percpu_counter_dec(&nr_inodes_unused);
        }
-       inodes_stat.nr_unused -= nr_pruned;
        if (current_is_kswapd())
                __count_vm_events(KSWAPD_INODESTEAL, reap);
        else
@@ -519,7 +683,7 @@ static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
                        return -1;
                prune_icache(nr);
        }
-       return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
+       return (get_nr_inodes_unused() / 100) * sysctl_vfs_cache_pressure;
 }
 
 static struct shrinker icache_shrinker = {
@@ -530,9 +694,6 @@ static struct shrinker icache_shrinker = {
 static void __wait_on_freeing_inode(struct inode *inode);
 /*
  * Called with the inode lock held.
- * NOTE: we are not increasing the inode-refcount, you must call __iget()
- * by hand after calling find_inode now! This simplifies iunique and won't
- * add any additional branch in the common code.
  */
 static struct inode *find_inode(struct super_block *sb,
                                struct hlist_head *head,
@@ -552,9 +713,10 @@ repeat:
                        __wait_on_freeing_inode(inode);
                        goto repeat;
                }
-               break;
+               __iget(inode);
+               return inode;
        }
-       return node ? inode : NULL;
+       return NULL;
 }
 
 /*
@@ -577,53 +739,49 @@ repeat:
                        __wait_on_freeing_inode(inode);
                        goto repeat;
                }
-               break;
+               __iget(inode);
+               return inode;
        }
-       return node ? inode : NULL;
-}
-
-static unsigned long hash(struct super_block *sb, unsigned long hashval)
-{
-       unsigned long tmp;
-
-       tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
-                       L1_CACHE_BYTES;
-       tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS);
-       return tmp & I_HASHMASK;
-}
-
-static inline void
-__inode_add_to_lists(struct super_block *sb, struct hlist_head *head,
-                       struct inode *inode)
-{
-       inodes_stat.nr_inodes++;
-       list_add(&inode->i_list, &inode_in_use);
-       list_add(&inode->i_sb_list, &sb->s_inodes);
-       if (head)
-               hlist_add_head(&inode->i_hash, head);
+       return NULL;
 }
 
-/**
- * inode_add_to_lists - add a new inode to relevant lists
- * @sb: superblock inode belongs to
- * @inode: inode to mark in use
+/*
+ * Each cpu owns a range of LAST_INO_BATCH numbers.
+ * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
+ * to renew the exhausted range.
  *
- * When an inode is allocated it needs to be accounted for, added to the in use
- * list, the owning superblock and the inode hash. This needs to be done under
- * the inode_lock, so export a function to do this rather than the inode lock
- * itself. We calculate the hash list to add to here so it is all internal
- * which requires the caller to have already set up the inode number in the
- * inode to add.
+ * This does not significantly increase overflow rate because every CPU can
+ * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
+ * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
+ * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
+ * overflow rate by 2x, which does not seem too significant.
+ *
+ * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
+ * error if st_ino won't fit in target struct field. Use 32bit counter
+ * here to attempt to avoid that.
  */
-void inode_add_to_lists(struct super_block *sb, struct inode *inode)
+#define LAST_INO_BATCH 1024
+static DEFINE_PER_CPU(unsigned int, last_ino);
+
+unsigned int get_next_ino(void)
 {
-       struct hlist_head *head = inode_hashtable + hash(sb, inode->i_ino);
+       unsigned int *p = &get_cpu_var(last_ino);
+       unsigned int res = *p;
 
-       spin_lock(&inode_lock);
-       __inode_add_to_lists(sb, head, inode);
-       spin_unlock(&inode_lock);
+#ifdef CONFIG_SMP
+       if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
+               static atomic_t shared_last_ino;
+               int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
+
+               res = next - LAST_INO_BATCH;
+       }
+#endif
+
+       *p = ++res;
+       put_cpu_var(last_ino);
+       return res;
 }
-EXPORT_SYMBOL_GPL(inode_add_to_lists);
+EXPORT_SYMBOL(get_next_ino);
 
 /**
  *     new_inode       - obtain an inode
@@ -639,12 +797,6 @@ EXPORT_SYMBOL_GPL(inode_add_to_lists);
  */
 struct inode *new_inode(struct super_block *sb)
 {
-       /*
-        * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
-        * error if st_ino won't fit in target struct field. Use 32bit counter
-        * here to attempt to avoid that.
-        */
-       static unsigned int last_ino;
        struct inode *inode;
 
        spin_lock_prefetch(&inode_lock);
@@ -652,8 +804,7 @@ struct inode *new_inode(struct super_block *sb)
        inode = alloc_inode(sb);
        if (inode) {
                spin_lock(&inode_lock);
-               __inode_add_to_lists(sb, NULL, inode);
-               inode->i_ino = ++last_ino;
+               __inode_sb_list_add(inode);
                inode->i_state = 0;
                spin_unlock(&inode_lock);
        }
@@ -664,7 +815,7 @@ EXPORT_SYMBOL(new_inode);
 void unlock_new_inode(struct inode *inode)
 {
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
-       if (inode->i_mode & S_IFDIR) {
+       if (S_ISDIR(inode->i_mode)) {
                struct file_system_type *type = inode->i_sb->s_type;
 
                /* Set new key only if filesystem hasn't already changed it */
@@ -721,7 +872,8 @@ static struct inode *get_new_inode(struct super_block *sb,
                        if (set(inode, data))
                                goto set_failed;
 
-                       __inode_add_to_lists(sb, head, inode);
+                       hlist_add_head(&inode->i_hash, head);
+                       __inode_sb_list_add(inode);
                        inode->i_state = I_NEW;
                        spin_unlock(&inode_lock);
 
@@ -736,7 +888,6 @@ static struct inode *get_new_inode(struct super_block *sb,
                 * us. Use the old inode instead of the one we just
                 * allocated.
                 */
-               __iget(old);
                spin_unlock(&inode_lock);
                destroy_inode(inode);
                inode = old;
@@ -768,7 +919,8 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
                old = find_inode_fast(sb, head, ino);
                if (!old) {
                        inode->i_ino = ino;
-                       __inode_add_to_lists(sb, head, inode);
+                       hlist_add_head(&inode->i_hash, head);
+                       __inode_sb_list_add(inode);
                        inode->i_state = I_NEW;
                        spin_unlock(&inode_lock);
 
@@ -783,7 +935,6 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
                 * us. Use the old inode instead of the one we just
                 * allocated.
                 */
-               __iget(old);
                spin_unlock(&inode_lock);
                destroy_inode(inode);
                inode = old;
@@ -792,6 +943,27 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
        return inode;
 }
 
+/*
+ * search the inode cache for a matching inode number.
+ * If we find one, then the inode number we are trying to
+ * allocate is not unique and so we should not use it.
+ *
+ * Returns 1 if the inode number is unique, 0 if it is not.
+ */
+static int test_inode_iunique(struct super_block *sb, unsigned long ino)
+{
+       struct hlist_head *b = inode_hashtable + hash(sb, ino);
+       struct hlist_node *node;
+       struct inode *inode;
+
+       hlist_for_each_entry(inode, node, b, i_hash) {
+               if (inode->i_ino == ino && inode->i_sb == sb)
+                       return 0;
+       }
+
+       return 1;
+}
+
 /**
  *     iunique - get a unique inode number
  *     @sb: superblock
@@ -813,19 +985,18 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
         * error if st_ino won't fit in target struct field. Use 32bit counter
         * here to attempt to avoid that.
         */
+       static DEFINE_SPINLOCK(iunique_lock);
        static unsigned int counter;
-       struct inode *inode;
-       struct hlist_head *head;
        ino_t res;
 
        spin_lock(&inode_lock);
+       spin_lock(&iunique_lock);
        do {
                if (counter <= max_reserved)
                        counter = max_reserved + 1;
                res = counter++;
-               head = inode_hashtable + hash(sb, res);
-               inode = find_inode_fast(sb, head, res);
-       } while (inode != NULL);
+       } while (!test_inode_iunique(sb, res));
+       spin_unlock(&iunique_lock);
        spin_unlock(&inode_lock);
 
        return res;
@@ -877,7 +1048,6 @@ static struct inode *ifind(struct super_block *sb,
        spin_lock(&inode_lock);
        inode = find_inode(sb, head, test, data);
        if (inode) {
-               __iget(inode);
                spin_unlock(&inode_lock);
                if (likely(wait))
                        wait_on_inode(inode);
@@ -910,7 +1080,6 @@ static struct inode *ifind_fast(struct super_block *sb,
        spin_lock(&inode_lock);
        inode = find_inode_fast(sb, head, ino);
        if (inode) {
-               __iget(inode);
                spin_unlock(&inode_lock);
                wait_on_inode(inode);
                return inode;
@@ -1096,7 +1265,7 @@ int insert_inode_locked(struct inode *inode)
                __iget(old);
                spin_unlock(&inode_lock);
                wait_on_inode(old);
-               if (unlikely(!hlist_unhashed(&old->i_hash))) {
+               if (unlikely(!inode_unhashed(old))) {
                        iput(old);
                        return -EBUSY;
                }
@@ -1135,7 +1304,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
                __iget(old);
                spin_unlock(&inode_lock);
                wait_on_inode(old);
-               if (unlikely(!hlist_unhashed(&old->i_hash))) {
+               if (unlikely(!inode_unhashed(old))) {
                        iput(old);
                        return -EBUSY;
                }
@@ -1144,36 +1313,6 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
 }
 EXPORT_SYMBOL(insert_inode_locked4);
 
-/**
- *     __insert_inode_hash - hash an inode
- *     @inode: unhashed inode
- *     @hashval: unsigned long value used to locate this object in the
- *             inode_hashtable.
- *
- *     Add an inode to the inode hash for this superblock.
- */
-void __insert_inode_hash(struct inode *inode, unsigned long hashval)
-{
-       struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
-       spin_lock(&inode_lock);
-       hlist_add_head(&inode->i_hash, head);
-       spin_unlock(&inode_lock);
-}
-EXPORT_SYMBOL(__insert_inode_hash);
-
-/**
- *     remove_inode_hash - remove an inode from the hash
- *     @inode: inode to unhash
- *
- *     Remove an inode from the superblock.
- */
-void remove_inode_hash(struct inode *inode)
-{
-       spin_lock(&inode_lock);
-       hlist_del_init(&inode->i_hash);
-       spin_unlock(&inode_lock);
-}
-EXPORT_SYMBOL(remove_inode_hash);
 
 int generic_delete_inode(struct inode *inode)
 {
@@ -1188,7 +1327,7 @@ EXPORT_SYMBOL(generic_delete_inode);
  */
 int generic_drop_inode(struct inode *inode)
 {
-       return !inode->i_nlink || hlist_unhashed(&inode->i_hash);
+       return !inode->i_nlink || inode_unhashed(inode);
 }
 EXPORT_SYMBOL_GPL(generic_drop_inode);
 
@@ -1214,10 +1353,11 @@ static void iput_final(struct inode *inode)
                drop = generic_drop_inode(inode);
 
        if (!drop) {
-               if (!(inode->i_state & (I_DIRTY|I_SYNC)))
-                       list_move(&inode->i_list, &inode_unused);
-               inodes_stat.nr_unused++;
                if (sb->s_flags & MS_ACTIVE) {
+                       inode->i_state |= I_REFERENCED;
+                       if (!(inode->i_state & (I_DIRTY|I_SYNC))) {
+                               inode_lru_list_add(inode);
+                       }
                        spin_unlock(&inode_lock);
                        return;
                }
@@ -1228,19 +1368,23 @@ static void iput_final(struct inode *inode)
                spin_lock(&inode_lock);
                WARN_ON(inode->i_state & I_NEW);
                inode->i_state &= ~I_WILL_FREE;
-               inodes_stat.nr_unused--;
-               hlist_del_init(&inode->i_hash);
+               __remove_inode_hash(inode);
        }
-       list_del_init(&inode->i_list);
-       list_del_init(&inode->i_sb_list);
+
        WARN_ON(inode->i_state & I_NEW);
        inode->i_state |= I_FREEING;
-       inodes_stat.nr_inodes--;
+
+       /*
+        * Move the inode off the IO lists and LRU once I_FREEING is
+        * set so that it won't get moved back on there if it is dirty.
+        */
+       inode_lru_list_del(inode);
+       list_del_init(&inode->i_wb_list);
+
+       __inode_sb_list_del(inode);
        spin_unlock(&inode_lock);
        evict(inode);
-       spin_lock(&inode_lock);
-       hlist_del_init(&inode->i_hash);
-       spin_unlock(&inode_lock);
+       remove_inode_hash(inode);
        wake_up_inode(inode);
        BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
        destroy_inode(inode);
@@ -1504,6 +1648,8 @@ void __init inode_init(void)
                                         SLAB_MEM_SPREAD),
                                         init_once);
        register_shrinker(&icache_shrinker);
+       percpu_counter_init(&nr_inodes, 0);
+       percpu_counter_init(&nr_inodes_unused, 0);
 
        /* Hash may have been set up in inode_init_early */
        if (!hashdist)
index a6910e91cee8799196e991c7cab98ad11206cf01..ebad3b90752dae0b37838734293ee36864af2c02 100644 (file)
@@ -101,3 +101,10 @@ extern void put_super(struct super_block *sb);
 struct nameidata;
 extern struct file *nameidata_to_filp(struct nameidata *);
 extern void release_open_intent(struct nameidata *);
+
+/*
+ * inode.c
+ */
+extern int get_nr_dirty_inodes(void);
+extern int evict_inodes(struct super_block *);
+extern int invalidate_inodes(struct super_block *);
index 09ff41a752a092431068b991b9e323bbe5276fbd..60c2b944d76267bbd40358f7384dfe3fc9240bd3 100644 (file)
@@ -962,25 +962,23 @@ static int isofs_statfs (struct dentry *dentry, struct kstatfs *buf)
  * or getblk() if they are not.  Returns the number of blocks inserted
  * (-ve == error.)
  */
-int isofs_get_blocks(struct inode *inode, sector_t iblock_s,
+int isofs_get_blocks(struct inode *inode, sector_t iblock,
                     struct buffer_head **bh, unsigned long nblocks)
 {
-       unsigned long b_off;
+       unsigned long b_off = iblock;
        unsigned offset, sect_size;
        unsigned int firstext;
        unsigned long nextblk, nextoff;
-       long iblock = (long)iblock_s;
        int section, rv, error;
        struct iso_inode_info *ei = ISOFS_I(inode);
 
        error = -EIO;
        rv = 0;
-       if (iblock < 0 || iblock != iblock_s) {
+       if (iblock != b_off) {
                printk(KERN_DEBUG "%s: block number too large\n", __func__);
                goto abort;
        }
 
-       b_off = iblock;
 
        offset = 0;
        firstext = ei->i_first_extent;
@@ -998,8 +996,9 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock_s,
                 * I/O errors.
                 */
                if (b_off > ((inode->i_size + PAGE_CACHE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) {
-                       printk(KERN_DEBUG "%s: block >= EOF (%ld, %ld)\n",
-                               __func__, iblock, (unsigned long) inode->i_size);
+                       printk(KERN_DEBUG "%s: block >= EOF (%lu, %llu)\n",
+                               __func__, b_off,
+                               (unsigned long long)inode->i_size);
                        goto abort;
                }
 
@@ -1025,9 +1024,9 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock_s,
                        if (++section > 100) {
                                printk(KERN_DEBUG "%s: More than 100 file sections ?!?"
                                        " aborting...\n", __func__);
-                               printk(KERN_DEBUG "%s: block=%ld firstext=%u sect_size=%u "
+                               printk(KERN_DEBUG "%s: block=%lu firstext=%u sect_size=%u "
                                        "nextblk=%lu nextoff=%lu\n", __func__,
-                                       iblock, firstext, (unsigned) sect_size,
+                                       b_off, firstext, (unsigned) sect_size,
                                        nextblk, nextoff);
                                goto abort;
                        }
index ed78a3cf3cb047046d8ea385af9554e63214aac8..79121aa5858b8a4aa02861ba57cf17d9a8dfe13b 100644 (file)
@@ -289,7 +289,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
                mutex_unlock(&f->sem);
                d_instantiate(dentry, old_dentry->d_inode);
                dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
-               atomic_inc(&old_dentry->d_inode->i_count);
+               ihold(old_dentry->d_inode);
        }
        return ret;
 }
@@ -864,7 +864,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
                printk(KERN_NOTICE "jffs2_rename(): Link succeeded, unlink failed (err %d). You now have a hard link\n", ret);
                /* Might as well let the VFS know */
                d_instantiate(new_dentry, old_dentry->d_inode);
-               atomic_inc(&old_dentry->d_inode->i_count);
+               ihold(old_dentry->d_inode);
                new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now);
                return ret;
        }
index f8332dc8eeb2111430d2a16ae716b6d1925fde99..3a09423b6c22c6bf0d1635ddd572ef4620c146b2 100644 (file)
@@ -497,7 +497,7 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
         * appear hashed, but do not put on any lists.  hlist_del()
         * will work fine and require no locking.
         */
-       ip->i_hash.pprev = &ip->i_hash.next;
+       hlist_add_fake(&ip->i_hash);
 
        return (ip);
 }
index d945ea76b445249de68fece3be7dea631cf2b6b1..9466957ec84173f56707bd591e97b007c7bba57c 100644 (file)
@@ -1279,7 +1279,7 @@ int txCommit(tid_t tid,           /* transaction identifier */
         * lazy commit thread finishes processing
         */
        if (tblk->xflag & COMMIT_DELETE) {
-               atomic_inc(&tblk->u.ip->i_count);
+               ihold(tblk->u.ip);
                /*
                 * Avoid a rare deadlock
                 *
index a9cf8e8675be8713dc5f9311faa20fefcac85156..231ca4af9bce4f27d2c12d71e9c0a237616e512d 100644 (file)
@@ -839,7 +839,7 @@ static int jfs_link(struct dentry *old_dentry,
        ip->i_ctime = CURRENT_TIME;
        dir->i_ctime = dir->i_mtime = CURRENT_TIME;
        mark_inode_dirty(dir);
-       atomic_inc(&ip->i_count);
+       ihold(ip);
 
        iplist[0] = ip;
        iplist[1] = dir;
index 62baa0387d6e03869e726b89ffaddccce4fc090c..304a5132ca270d018799d3003170bd813c23dd9b 100644 (file)
@@ -255,7 +255,7 @@ int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *den
 
        inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
        inc_nlink(inode);
-       atomic_inc(&inode->i_count);
+       ihold(inode);
        dget(dentry);
        d_instantiate(dentry, inode);
        return 0;
@@ -892,10 +892,6 @@ EXPORT_SYMBOL_GPL(generic_fh_to_parent);
  */
 int generic_file_fsync(struct file *file, int datasync)
 {
-       struct writeback_control wbc = {
-               .sync_mode = WB_SYNC_ALL,
-               .nr_to_write = 0, /* metadata-only; caller takes care of data */
-       };
        struct inode *inode = file->f_mapping->host;
        int err;
        int ret;
@@ -906,7 +902,7 @@ int generic_file_fsync(struct file *file, int datasync)
        if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
                return ret;
 
-       err = sync_inode(inode, &wbc);
+       err = sync_inode_metadata(inode, 1);
        if (ret == 0)
                ret = err;
        return ret;
index 74c3df99c0e144702d259991ed42c84d93593adf..50ec15927aab3164a456875e6c11b1f4fa2f8bef 100644 (file)
@@ -2114,7 +2114,7 @@ EXPORT_SYMBOL_GPL(vfs_cancel_lock);
 #include <linux/seq_file.h>
 
 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
-                                                       int id, char *pfx)
+                           loff_t id, char *pfx)
 {
        struct inode *inode = NULL;
        unsigned int fl_pid;
@@ -2127,7 +2127,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
        if (fl->fl_file != NULL)
                inode = fl->fl_file->f_path.dentry->d_inode;
 
-       seq_printf(f, "%d:%s ", id, pfx);
+       seq_printf(f, "%lld:%s ", id, pfx);
        if (IS_POSIX(fl)) {
                seq_printf(f, "%6s %s ",
                             (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
@@ -2190,24 +2190,27 @@ static int locks_show(struct seq_file *f, void *v)
 
        fl = list_entry(v, struct file_lock, fl_link);
 
-       lock_get_status(f, fl, (long)f->private, "");
+       lock_get_status(f, fl, *((loff_t *)f->private), "");
 
        list_for_each_entry(bfl, &fl->fl_block, fl_block)
-               lock_get_status(f, bfl, (long)f->private, " ->");
+               lock_get_status(f, bfl, *((loff_t *)f->private), " ->");
 
-       f->private++;
        return 0;
 }
 
 static void *locks_start(struct seq_file *f, loff_t *pos)
 {
+       loff_t *p = f->private;
+
        lock_flocks();
-       f->private = (void *)1;
+       *p = (*pos + 1);
        return seq_list_start(&file_lock_list, *pos);
 }
 
 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
 {
+       loff_t *p = f->private;
+       ++*p;
        return seq_list_next(v, &file_lock_list, pos);
 }
 
@@ -2225,14 +2228,14 @@ static const struct seq_operations locks_seq_operations = {
 
 static int locks_open(struct inode *inode, struct file *filp)
 {
-       return seq_open(filp, &locks_seq_operations);
+       return seq_open_private(filp, &locks_seq_operations, sizeof(loff_t));
 }
 
 static const struct file_operations proc_locks_operations = {
        .open           = locks_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
-       .release        = seq_release,
+       .release        = seq_release_private,
 };
 
 static int __init proc_locks_init(void)
index 1eb4e89e045b082d9f02d6d005edd3f61e5c48c8..409dfd65e9a1764029edf55071069e61f447d29d 100644 (file)
@@ -569,7 +569,7 @@ static int logfs_link(struct dentry *old_dentry, struct inode *dir,
                return -EMLINK;
 
        inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
-       atomic_inc(&inode->i_count);
+       ihold(inode);
        inode->i_nlink++;
        mark_inode_dirty_sync(inode);
 
index f3f3578393a417085812ba1ad7e0b7c1e4e5c981..c0d35a3accef98dd5ed7619214146e0f0f5d3260 100644 (file)
@@ -101,7 +101,7 @@ static int minix_link(struct dentry * old_dentry, struct inode * dir,
 
        inode->i_ctime = CURRENT_TIME_SEC;
        inode_inc_link_count(inode);
-       atomic_inc(&inode->i_count);
+       ihold(inode);
        return add_nondir(dentry, inode);
 }
 
index 24896e8335658c9c0ce2c81c117cb664b964da70..f7dbc06857abfe6e1959654f7fd66b393dd161d0 100644 (file)
@@ -1121,11 +1121,13 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
 static struct dentry *__lookup_hash(struct qstr *name,
                struct dentry *base, struct nameidata *nd)
 {
+       struct inode *inode = base->d_inode;
        struct dentry *dentry;
-       struct inode *inode;
        int err;
 
-       inode = base->d_inode;
+       err = exec_permission(inode);
+       if (err)
+               return ERR_PTR(err);
 
        /*
         * See if the low-level filesystem might want
@@ -1161,11 +1163,6 @@ out:
  */
 static struct dentry *lookup_hash(struct nameidata *nd)
 {
-       int err;
-
-       err = exec_permission(nd->path.dentry->d_inode);
-       if (err)
-               return ERR_PTR(err);
        return __lookup_hash(&nd->last, nd->path.dentry, nd);
 }
 
@@ -1213,9 +1210,6 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
        if (err)
                return ERR_PTR(err);
 
-       err = exec_permission(base->d_inode);
-       if (err)
-               return ERR_PTR(err);
        return __lookup_hash(&this, base, NULL);
 }
 
@@ -2291,7 +2285,7 @@ static long do_unlinkat(int dfd, const char __user *pathname)
                        goto slashes;
                inode = dentry->d_inode;
                if (inode)
-                       atomic_inc(&inode->i_count);
+                       ihold(inode);
                error = mnt_want_write(nd.path.mnt);
                if (error)
                        goto exit2;
index 7ca5182c0bedd547e21531e777175d2058ea2982..8a415c9c5e552efd945cdb85af6f74af4b2b6de6 100644 (file)
@@ -595,7 +595,7 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
                                goto out_free;
                }
 
-               mnt->mnt_flags = old->mnt_flags;
+               mnt->mnt_flags = old->mnt_flags & ~MNT_WRITE_HOLD;
                atomic_inc(&sb->s_active);
                mnt->mnt_sb = sb;
                mnt->mnt_root = dget(root);
index 257e4052492e9eb174d2db47b4cdc94bd8f81ef6..07ac3847e562b54c26efd30c9a9eabab468c309d 100644 (file)
@@ -1801,7 +1801,7 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
        d_drop(dentry);
        error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
        if (error == 0) {
-               atomic_inc(&inode->i_count);
+               ihold(inode);
                d_add(dentry, inode);
        }
        return error;
index a70e446e1605fa43fb1b23b96418d8296773109f..ac7b814ce1620f3e5cb0afa3334d31d1d540a52b 100644 (file)
@@ -54,8 +54,7 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i
                        iput(inode);
                        return -ENOMEM;
                }
-               /* Circumvent igrab(): we know the inode is not being freed */
-               atomic_inc(&inode->i_count);
+               ihold(inode);
                /*
                 * Ensure that this dentry is invisible to d_find_alias().
                 * Otherwise, it may be spliced into the tree by
index 460df3652889af49cfb5d5ed2ef36382720c610a..903908a20023bf83b08d7fc6c808cbd4f20e0aa2 100644 (file)
@@ -101,6 +101,7 @@ static char nfs_export_path[NFS_MAXPATHLEN + 1] __initdata = "";
 /* server:export path string passed to super.c */
 static char nfs_root_device[NFS_MAXPATHLEN + 1] __initdata = "";
 
+#ifdef RPC_DEBUG
 /*
  * When the "nfsrootdebug" kernel command line option is specified,
  * enable debugging messages for NFSROOT.
@@ -112,6 +113,7 @@ static int __init nfs_root_debug(char *__unused)
 }
 
 __setup("nfsrootdebug", nfs_root_debug);
+#endif
 
 /*
  *  Parse NFS server and directory information passed on the kernel
index 605e292501f493cbf921c8ee0a4b22caf863572c..4c14c17a5276a410bb383e3650f106ab56cf47d6 100644 (file)
@@ -290,9 +290,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st
        nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
 
        nfs_pageio_cond_complete(pgio, page->index);
-       ret = nfs_page_async_flush(pgio, page,
-                       wbc->sync_mode == WB_SYNC_NONE ||
-                       wbc->nonblocking != 0);
+       ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
        if (ret == -EAGAIN) {
                redirty_page_for_writepage(wbc, page);
                ret = 0;
index 661a6cf8e8265eaeca988e2aabc0ba1ede767717..184938fcff04d5dff712b8bee01f5d7e41bb20ad 100644 (file)
@@ -281,23 +281,13 @@ commit_metadata(struct svc_fh *fhp)
 {
        struct inode *inode = fhp->fh_dentry->d_inode;
        const struct export_operations *export_ops = inode->i_sb->s_export_op;
-       int error = 0;
 
        if (!EX_ISSYNC(fhp->fh_export))
                return 0;
 
-       if (export_ops->commit_metadata) {
-               error = export_ops->commit_metadata(inode);
-       } else {
-               struct writeback_control wbc = {
-                       .sync_mode = WB_SYNC_ALL,
-                       .nr_to_write = 0, /* metadata only */
-               };
-
-               error = sync_inode(inode, &wbc);
-       }
-
-       return error;
+       if (export_ops->commit_metadata)
+               return export_ops->commit_metadata(inode);
+       return sync_inode_metadata(inode, 1);
 }
 
 /*
index 185d1607cb00c2f6c0b1e224ea148af0fb17e224..6e9557ecf161fd664a0715d9a3b8e0f3e34501dd 100644 (file)
@@ -207,7 +207,7 @@ static int nilfs_link(struct dentry *old_dentry, struct inode *dir,
 
        inode->i_ctime = CURRENT_TIME;
        inode_inc_link_count(inode);
-       atomic_inc(&inode->i_count);
+       ihold(inode);
 
        err = nilfs_add_nondir(dentry, inode);
        if (!err)
index d926af626177d9ca3fd275cf1be57d8fe105a0d2..687d090cea341fea2f0cf88ac9b4b41e36dcf957 100644 (file)
@@ -1609,7 +1609,7 @@ nilfs_copy_replace_page_buffers(struct page *page, struct list_head *out)
        kunmap_atomic(kaddr, KM_USER0);
 
        if (!TestSetPageWriteback(clone_page))
-               inc_zone_page_state(clone_page, NR_WRITEBACK);
+               account_page_writeback(clone_page);
        unlock_page(clone_page);
 
        return 0;
index 36802420d69a94024eefde05f1eab9f6a9932241..4498a208df940b92117323dcad6cb355969fd164 100644 (file)
@@ -88,8 +88,6 @@ void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
 {
        struct dentry *parent;
        struct inode *p_inode;
-       bool send = false;
-       bool should_update_children = false;
 
        if (!dentry)
                dentry = path->dentry;
@@ -97,29 +95,12 @@ void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
        if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED))
                return;
 
-       spin_lock(&dentry->d_lock);
-       parent = dentry->d_parent;
+       parent = dget_parent(dentry);
        p_inode = parent->d_inode;
 
-       if (fsnotify_inode_watches_children(p_inode)) {
-               if (p_inode->i_fsnotify_mask & mask) {
-                       dget(parent);
-                       send = true;
-               }
-       } else {
-               /*
-                * The parent doesn't care about events on it's children but
-                * at least one child thought it did.  We need to run all the
-                * children and update their d_flags to let them know p_inode
-                * doesn't care about them any more.
-                */
-               dget(parent);
-               should_update_children = true;
-       }
-
-       spin_unlock(&dentry->d_lock);
-
-       if (send) {
+       if (unlikely(!fsnotify_inode_watches_children(p_inode)))
+               __fsnotify_update_child_dentry_flags(p_inode);
+       else if (p_inode->i_fsnotify_mask & mask) {
                /* we are notifying a parent so come up with the new mask which
                 * specifies these are events which came from a child. */
                mask |= FS_EVENT_ON_CHILD;
@@ -130,13 +111,9 @@ void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
                else
                        fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
                                 dentry->d_name.name, 0);
-               dput(parent);
        }
 
-       if (unlikely(should_update_children)) {
-               __fsnotify_update_child_dentry_flags(p_inode);
-               dput(parent);
-       }
+       dput(parent);
 }
 EXPORT_SYMBOL_GPL(__fsnotify_parent);
 
index 33297c00506050afb6bbe6227f8d47e4eb9cdd11..21ed10660b80770c34eb136a311254330568fac2 100644 (file)
@@ -240,6 +240,7 @@ void fsnotify_unmount_inodes(struct list_head *list)
 {
        struct inode *inode, *next_i, *need_iput = NULL;
 
+       spin_lock(&inode_lock);
        list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
                struct inode *need_iput_tmp;
 
@@ -297,4 +298,5 @@ void fsnotify_unmount_inodes(struct list_head *list)
 
                spin_lock(&inode_lock);
        }
+       spin_unlock(&inode_lock);
 }
index 19c5180f8a28c063b9a8cf5fefc58a6c61512455..d3fbe5730bfc1bb1c1264e9e3923c9425321fea0 100644 (file)
@@ -2911,8 +2911,8 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
                goto unl_upcase_iput_tmp_ino_err_out_now;
        }
        if ((sb->s_root = d_alloc_root(vol->root_ino))) {
-               /* We increment i_count simulating an ntfs_iget(). */
-               atomic_inc(&vol->root_ino->i_count);
+               /* We grab a reference, simulating an ntfs_iget(). */
+               ihold(vol->root_ino);
                ntfs_debug("Exiting, status successful.");
                /* Release the default upcase if it has no users. */
                mutex_lock(&ntfs_lock);
@@ -3021,21 +3021,6 @@ iput_tmp_ino_err_out_now:
        if (vol->mft_ino && vol->mft_ino != tmp_ino)
                iput(vol->mft_ino);
        vol->mft_ino = NULL;
-       /*
-        * This is needed to get ntfs_clear_extent_inode() called for each
-        * inode we have ever called ntfs_iget()/iput() on, otherwise we A)
-        * leak resources and B) a subsequent mount fails automatically due to
-        * ntfs_iget() never calling down into our ntfs_read_locked_inode()
-        * method again... FIXME: Do we need to do this twice now because of
-        * attribute inodes? I think not, so leave as is for now... (AIA)
-        */
-       if (invalidate_inodes(sb)) {
-               ntfs_error(sb, "Busy inodes left. This is most likely a NTFS "
-                               "driver bug.");
-               /* Copied from fs/super.c. I just love this message. (-; */
-               printk("NTFS: Busy inodes after umount. Self-destruct in 5 "
-                               "seconds.  Have a nice day...\n");
-       }
        /* Errors at this stage are irrelevant. */
 err_out_now:
        sb->s_fs_info = NULL;
index 5cfeee11815881b04a3250d42a72838e40289200..f1e962cb3b73084699a182933b7760926c36880e 100644 (file)
@@ -165,7 +165,7 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock,
         * ocfs2 never allocates in this function - the only time we
         * need to use BH_New is when we're extending i_size on a file
         * system which doesn't support holes, in which case BH_New
-        * allows block_prepare_write() to zero.
+        * allows __block_write_begin() to zero.
         *
         * If we see this on a sparse file system, then a truncate has
         * raced us and removed the cluster. In this case, we clear
@@ -407,21 +407,6 @@ static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
        return ret;
 }
 
-/*
- * This is called from ocfs2_write_zero_page() which has handled it's
- * own cluster locking and has ensured allocation exists for those
- * blocks to be written.
- */
-int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
-                              unsigned from, unsigned to)
-{
-       int ret;
-
-       ret = block_prepare_write(page, from, to, ocfs2_get_block);
-
-       return ret;
-}
-
 /* Taken from ext3. We don't necessarily need the full blown
  * functionality yet, but IMHO it's better to cut and paste the whole
  * thing so we can avoid introducing our own bugs (and easily pick up
@@ -732,7 +717,7 @@ static int ocfs2_should_read_blk(struct inode *inode, struct page *page,
 }
 
 /*
- * Some of this taken from block_prepare_write(). We already have our
+ * Some of this taken from __block_write_begin(). We already have our
  * mapping by now though, and the entire write will be allocating or
  * it won't, so not much need to use BH_New.
  *
index 7606f663da6d1e1181dac172736ed9dfbbb3fd84..76bfdfda691a03b1790ac9670a8bb79d042e7584 100644 (file)
@@ -22,9 +22,6 @@
 #ifndef OCFS2_AOPS_H
 #define OCFS2_AOPS_H
 
-int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
-                              unsigned from, unsigned to);
-
 handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
                                                         struct page *page,
                                                         unsigned from,
index a7ebd9d42dc8853e89dd381af65afeab3e5df8fe..75e115f1bd730d2b12d4e1d924d203a5864d5150 100644 (file)
@@ -400,6 +400,7 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb)
        if (inode) {
                ip = DLMFS_I(inode);
 
+               inode->i_ino = get_next_ino();
                inode->i_mode = mode;
                inode->i_uid = current_fsuid();
                inode->i_gid = current_fsgid();
@@ -425,6 +426,7 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
        if (!inode)
                return NULL;
 
+       inode->i_ino = get_next_ino();
        inode->i_mode = mode;
        inode->i_uid = current_fsuid();
        inode->i_gid = current_fsgid();
index 1ca6867935bbc62e4f733f703951d9f39c7bab93..77b4c04a2809831e7209d1be3f8e860a0ba7edc8 100644 (file)
@@ -796,13 +796,12 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
                block_end = block_start + (1 << inode->i_blkbits);
 
                /*
-                * block_start is block-aligned.  Bump it by one to
-                * force ocfs2_{prepare,commit}_write() to zero the
+                * block_start is block-aligned.  Bump it by one to force
+                * __block_write_begin and block_commit_write to zero the
                 * whole block.
                 */
-               ret = ocfs2_prepare_write_nolock(inode, page,
-                                                block_start + 1,
-                                                block_start + 1);
+               ret = __block_write_begin(page, block_start + 1, 0,
+                                         ocfs2_get_block);
                if (ret < 0) {
                        mlog_errno(ret);
                        goto out_unlock;
index e7bde21149aee4f4e1dfed501069358927a064d7..ff5744e1e36fcb9200158eb419abc3ebdf33ec5b 100644 (file)
@@ -742,7 +742,7 @@ static int ocfs2_link(struct dentry *old_dentry,
                goto out_commit;
        }
 
-       atomic_inc(&inode->i_count);
+       ihold(inode);
        dentry->d_op = &ocfs2_dentry_ops;
        d_instantiate(dentry, inode);
 
index 37eb1ebeaa906ea068f940e6298f8cd632feacb4..d2d7566ce68e5cb4fc9713e531670d8f05d52d02 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -954,6 +954,8 @@ static struct inode * get_pipe_inode(void)
        if (!inode)
                goto fail_inode;
 
+       inode->i_ino = get_next_ino();
+
        pipe = alloc_pipe_info(inode);
        if (!pipe)
                goto fail_iput;
index 50f8f0600f0657d47de7081883f7dfe0fa460b69..6a0068841d96c00ef3b1bbc38182bf1db9b54f17 100644 (file)
@@ -33,8 +33,8 @@ config PROC_KCORE
        depends on PROC_FS && MMU
 
 config PROC_VMCORE
-        bool "/proc/vmcore support (EXPERIMENTAL)"
-        depends on PROC_FS && CRASH_DUMP
+       bool "/proc/vmcore support"
+       depends on PROC_FS && CRASH_DUMP
        default y
         help
         Exports the dump image of crashed kernel in ELF format.
index dc5d5f51f3fe4dc564a19e06cb330d09abc9b6a4..9b094c1c846542160ff6c265171467d5ef0050ff 100644 (file)
@@ -771,6 +771,8 @@ static const struct file_operations proc_single_file_operations = {
 static int mem_open(struct inode* inode, struct file* file)
 {
        file->private_data = (void*)((long)current->self_exec_id);
+       /* OK to pass negative loff_t, we can catch out-of-range */
+       file->f_mode |= FMODE_UNSIGNED_OFFSET;
        return 0;
 }
 
@@ -1023,28 +1025,47 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
        memset(buffer, 0, sizeof(buffer));
        if (count > sizeof(buffer) - 1)
                count = sizeof(buffer) - 1;
-       if (copy_from_user(buffer, buf, count))
-               return -EFAULT;
+       if (copy_from_user(buffer, buf, count)) {
+               err = -EFAULT;
+               goto out;
+       }
 
        err = strict_strtol(strstrip(buffer), 0, &oom_adjust);
        if (err)
-               return -EINVAL;
+               goto out;
        if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) &&
-            oom_adjust != OOM_DISABLE)
-               return -EINVAL;
+            oom_adjust != OOM_DISABLE) {
+               err = -EINVAL;
+               goto out;
+       }
 
        task = get_proc_task(file->f_path.dentry->d_inode);
-       if (!task)
-               return -ESRCH;
+       if (!task) {
+               err = -ESRCH;
+               goto out;
+       }
+
+       task_lock(task);
+       if (!task->mm) {
+               err = -EINVAL;
+               goto err_task_lock;
+       }
+
        if (!lock_task_sighand(task, &flags)) {
-               put_task_struct(task);
-               return -ESRCH;
+               err = -ESRCH;
+               goto err_task_lock;
        }
 
        if (oom_adjust < task->signal->oom_adj && !capable(CAP_SYS_RESOURCE)) {
-               unlock_task_sighand(task, &flags);
-               put_task_struct(task);
-               return -EACCES;
+               err = -EACCES;
+               goto err_sighand;
+       }
+
+       if (oom_adjust != task->signal->oom_adj) {
+               if (oom_adjust == OOM_DISABLE)
+                       atomic_inc(&task->mm->oom_disable_count);
+               if (task->signal->oom_adj == OOM_DISABLE)
+                       atomic_dec(&task->mm->oom_disable_count);
        }
 
        /*
@@ -1065,10 +1086,13 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
        else
                task->signal->oom_score_adj = (oom_adjust * OOM_SCORE_ADJ_MAX) /
                                                                -OOM_DISABLE;
+err_sighand:
        unlock_task_sighand(task, &flags);
+err_task_lock:
+       task_unlock(task);
        put_task_struct(task);
-
-       return count;
+out:
+       return err < 0 ? err : count;
 }
 
 static const struct file_operations proc_oom_adjust_operations = {
@@ -1109,30 +1133,49 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
        memset(buffer, 0, sizeof(buffer));
        if (count > sizeof(buffer) - 1)
                count = sizeof(buffer) - 1;
-       if (copy_from_user(buffer, buf, count))
-               return -EFAULT;
+       if (copy_from_user(buffer, buf, count)) {
+               err = -EFAULT;
+               goto out;
+       }
 
        err = strict_strtol(strstrip(buffer), 0, &oom_score_adj);
        if (err)
-               return -EINVAL;
+               goto out;
        if (oom_score_adj < OOM_SCORE_ADJ_MIN ||
-                       oom_score_adj > OOM_SCORE_ADJ_MAX)
-               return -EINVAL;
+                       oom_score_adj > OOM_SCORE_ADJ_MAX) {
+               err = -EINVAL;
+               goto out;
+       }
 
        task = get_proc_task(file->f_path.dentry->d_inode);
-       if (!task)
-               return -ESRCH;
+       if (!task) {
+               err = -ESRCH;
+               goto out;
+       }
+
+       task_lock(task);
+       if (!task->mm) {
+               err = -EINVAL;
+               goto err_task_lock;
+       }
+
        if (!lock_task_sighand(task, &flags)) {
-               put_task_struct(task);
-               return -ESRCH;
+               err = -ESRCH;
+               goto err_task_lock;
        }
+
        if (oom_score_adj < task->signal->oom_score_adj &&
                        !capable(CAP_SYS_RESOURCE)) {
-               unlock_task_sighand(task, &flags);
-               put_task_struct(task);
-               return -EACCES;
+               err = -EACCES;
+               goto err_sighand;
        }
 
+       if (oom_score_adj != task->signal->oom_score_adj) {
+               if (oom_score_adj == OOM_SCORE_ADJ_MIN)
+                       atomic_inc(&task->mm->oom_disable_count);
+               if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
+                       atomic_dec(&task->mm->oom_disable_count);
+       }
        task->signal->oom_score_adj = oom_score_adj;
        /*
         * Scale /proc/pid/oom_adj appropriately ensuring that OOM_DISABLE is
@@ -1143,9 +1186,13 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
        else
                task->signal->oom_adj = (oom_score_adj * OOM_ADJUST_MAX) /
                                                        OOM_SCORE_ADJ_MAX;
+err_sighand:
        unlock_task_sighand(task, &flags);
+err_task_lock:
+       task_unlock(task);
        put_task_struct(task);
-       return count;
+out:
+       return err < 0 ? err : count;
 }
 
 static const struct file_operations proc_oom_score_adj_operations = {
@@ -1601,6 +1648,7 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
 
        /* Common stuff */
        ei = PROC_I(inode);
+       inode->i_ino = get_next_ino();
        inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
        inode->i_op = &proc_def_inode_operations;
 
@@ -2547,6 +2595,7 @@ static struct dentry *proc_base_instantiate(struct inode *dir,
 
        /* Initialize the inode */
        ei = PROC_I(inode);
+       inode->i_ino = get_next_ino();
        inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
 
        /*
index 2fc52552271d91cf904213ee98576e20237ec1da..b652cb00906b02927c7f4afa048d6bd83615936d 100644 (file)
@@ -23,6 +23,8 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
        if (!inode)
                goto out;
 
+       inode->i_ino = get_next_ino();
+
        sysctl_head_get(head);
        ei = PROC_I(inode);
        ei->sysctl = head;
index a5ebae70dc6d4c90213770630f185d56ec6d6009..67fadb1ad2c11a6428480aad801ee616d90eebac 100644 (file)
@@ -58,6 +58,7 @@ struct inode *ramfs_get_inode(struct super_block *sb,
        struct inode * inode = new_inode(sb);
 
        if (inode) {
+               inode->i_ino = get_next_ino();
                inode_init_owner(inode, dir, mode);
                inode->i_mapping->a_ops = &ramfs_aops;
                inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info;
index e757ef26e4cecb6d47fd628b32f204e90c4b8497..9cd9d148105d287b55c9f60af4034f300cc6d1af 100644 (file)
@@ -31,6 +31,20 @@ const struct file_operations generic_ro_fops = {
 
 EXPORT_SYMBOL(generic_ro_fops);
 
+static int
+__negative_fpos_check(struct file *file, loff_t pos, size_t count)
+{
+       /*
+        * pos or pos+count is negative here, check overflow.
+        * too big "count" will be caught in rw_verify_area().
+        */
+       if ((pos < 0) && (pos + count < pos))
+               return -EOVERFLOW;
+       if (file->f_mode & FMODE_UNSIGNED_OFFSET)
+               return 0;
+       return -EINVAL;
+}
+
 /**
  * generic_file_llseek_unlocked - lockless generic llseek implementation
  * @file:      file structure to seek on
@@ -62,7 +76,9 @@ generic_file_llseek_unlocked(struct file *file, loff_t offset, int origin)
                break;
        }
 
-       if (offset < 0 || offset > inode->i_sb->s_maxbytes)
+       if (offset < 0 && __negative_fpos_check(file, offset, 0))
+               return -EINVAL;
+       if (offset > inode->i_sb->s_maxbytes)
                return -EINVAL;
 
        /* Special lock needed here? */
@@ -137,7 +153,7 @@ loff_t default_llseek(struct file *file, loff_t offset, int origin)
                        offset += file->f_pos;
        }
        retval = -EINVAL;
-       if (offset >= 0) {
+       if (offset >= 0 || !__negative_fpos_check(file, offset, 0)) {
                if (offset != file->f_pos) {
                        file->f_pos = offset;
                        file->f_version = 0;
@@ -221,6 +237,7 @@ bad:
 }
 #endif
 
+
 /*
  * rw_verify_area doesn't like huge counts. We limit
  * them to something that fits in "int" so that others
@@ -238,8 +255,11 @@ int rw_verify_area(int read_write, struct file *file, loff_t *ppos, size_t count
        if (unlikely((ssize_t) count < 0))
                return retval;
        pos = *ppos;
-       if (unlikely((pos < 0) || (loff_t) (pos + count) < 0))
-               return retval;
+       if (unlikely((pos < 0) || (loff_t) (pos + count) < 0)) {
+               retval = __negative_fpos_check(file, pos, count);
+               if (retval)
+                       return retval;
+       }
 
        if (unlikely(inode->i_flock && mandatory_lock(inode))) {
                retval = locks_mandatory_area(
index caa758377d66b8ba5530d029635d8cdfa98048ba..41656d40dc5c87fc8bcfd9ec5aea4bc635ebc092 100644 (file)
@@ -22,8 +22,6 @@
 
 int reiserfs_commit_write(struct file *f, struct page *page,
                          unsigned from, unsigned to);
-int reiserfs_prepare_write(struct file *f, struct page *page,
-                          unsigned from, unsigned to);
 
 void reiserfs_evict_inode(struct inode *inode)
 {
@@ -165,7 +163,7 @@ inline void make_le_item_head(struct item_head *ih, const struct cpu_key *key,
 ** but tail is still sitting in a direct item, and we can't write to
 ** it.  So, look through this page, and check all the mapped buffers
 ** to make sure they have valid block numbers.  Any that don't need
-** to be unmapped, so that block_prepare_write will correctly call
+** to be unmapped, so that __block_write_begin will correctly call
 ** reiserfs_get_block to convert the tail into an unformatted node
 */
 static inline void fix_tail_page_for_writing(struct page *page)
@@ -439,13 +437,13 @@ static int reiserfs_bmap(struct inode *inode, sector_t block,
 }
 
 /* special version of get_block that is only used by grab_tail_page right
-** now.  It is sent to block_prepare_write, and when you try to get a
+** now.  It is sent to __block_write_begin, and when you try to get a
 ** block past the end of the file (or a block from a hole) it returns
-** -ENOENT instead of a valid buffer.  block_prepare_write expects to
+** -ENOENT instead of a valid buffer.  __block_write_begin expects to
 ** be able to do i/o on the buffers returned, unless an error value
 ** is also returned.
 **
-** So, this allows block_prepare_write to be used for reading a single block
+** So, this allows __block_write_begin to be used for reading a single block
 ** in a page.  Where it does not produce a valid page for holes, or past the
 ** end of the file.  This turns out to be exactly what we need for reading
 ** tails for conversion.
@@ -558,11 +556,12 @@ static int convert_tail_for_hole(struct inode *inode,
         **
         ** We must fix the tail page for writing because it might have buffers
         ** that are mapped, but have a block number of 0.  This indicates tail
-        ** data that has been read directly into the page, and block_prepare_write
-        ** won't trigger a get_block in this case.
+        ** data that has been read directly into the page, and
+        ** __block_write_begin won't trigger a get_block in this case.
         */
        fix_tail_page_for_writing(tail_page);
-       retval = reiserfs_prepare_write(NULL, tail_page, tail_start, tail_end);
+       retval = __reiserfs_write_begin(tail_page, tail_start,
+                                     tail_end - tail_start);
        if (retval)
                goto unlock;
 
@@ -2033,7 +2032,7 @@ static int grab_tail_page(struct inode *inode,
        /* start within the page of the last block in the file */
        start = (offset / blocksize) * blocksize;
 
-       error = block_prepare_write(page, start, offset,
+       error = __block_write_begin(page, start, offset - start,
                                    reiserfs_get_block_create_0);
        if (error)
                goto unlock;
@@ -2438,7 +2437,7 @@ static int reiserfs_write_full_page(struct page *page,
                /* from this point on, we know the buffer is mapped to a
                 * real block and not a direct item
                 */
-               if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
+               if (wbc->sync_mode != WB_SYNC_NONE) {
                        lock_buffer(bh);
                } else {
                        if (!trylock_buffer(bh)) {
@@ -2628,8 +2627,7 @@ static int reiserfs_write_begin(struct file *file,
        return ret;
 }
 
-int reiserfs_prepare_write(struct file *f, struct page *page,
-                          unsigned from, unsigned to)
+int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len)
 {
        struct inode *inode = page->mapping->host;
        int ret;
@@ -2650,7 +2648,7 @@ int reiserfs_prepare_write(struct file *f, struct page *page,
                th->t_refcount++;
        }
 
-       ret = block_prepare_write(page, from, to, reiserfs_get_block);
+       ret = __block_write_begin(page, from, len, reiserfs_get_block);
        if (ret && reiserfs_transaction_running(inode->i_sb)) {
                struct reiserfs_transaction_handle *th = current->journal_info;
                /* this gets a little ugly.  If reiserfs_get_block returned an
index 5cbb81e134aca031b21e3c88661400ff1c1b174f..adf22b485ceac01e0f5ed7b0a8d1593956a7b81f 100644 (file)
@@ -160,8 +160,6 @@ long reiserfs_compat_ioctl(struct file *file, unsigned int cmd,
 
 int reiserfs_commit_write(struct file *f, struct page *page,
                          unsigned from, unsigned to);
-int reiserfs_prepare_write(struct file *f, struct page *page,
-                          unsigned from, unsigned to);
 /*
 ** reiserfs_unpack
 ** Function try to convert tail from direct item into indirect.
@@ -200,7 +198,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
        }
 
        /* we unpack by finding the page with the tail, and calling
-        ** reiserfs_prepare_write on that page.  This will force a
+        ** __reiserfs_write_begin on that page.  This will force a
         ** reiserfs_get_block to unpack the tail for us.
         */
        index = inode->i_size >> PAGE_CACHE_SHIFT;
@@ -210,7 +208,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
        if (!page) {
                goto out;
        }
-       retval = reiserfs_prepare_write(NULL, page, write_from, write_from);
+       retval = __reiserfs_write_begin(page, write_from, 0);
        if (retval)
                goto out_unlock;
 
index ee78d4a0086a983609749a72ec0fb840615255fd..ba5f51ec345829499982f17196fc61ac444429a0 100644 (file)
@@ -1156,7 +1156,7 @@ static int reiserfs_link(struct dentry *old_dentry, struct inode *dir,
        inode->i_ctime = CURRENT_TIME_SEC;
        reiserfs_update_sd(&th, inode);
 
-       atomic_inc(&inode->i_count);
+       ihold(inode);
        d_instantiate(dentry, inode);
        retval = journal_end(&th, dir->i_sb, jbegin_count);
        reiserfs_write_unlock(dir->i_sb);
index 8c4cf273c672185fe7cb4c5d0a6e30ddc85e5bf3..5d04a7828e7a4ee05d27446cbc8ee7fdfb433ad9 100644 (file)
@@ -418,13 +418,11 @@ static inline __u32 xattr_hash(const char *msg, int len)
 
 int reiserfs_commit_write(struct file *f, struct page *page,
                          unsigned from, unsigned to);
-int reiserfs_prepare_write(struct file *f, struct page *page,
-                          unsigned from, unsigned to);
 
 static void update_ctime(struct inode *inode)
 {
        struct timespec now = current_fs_time(inode->i_sb);
-       if (hlist_unhashed(&inode->i_hash) || !inode->i_nlink ||
+       if (inode_unhashed(inode) || !inode->i_nlink ||
            timespec_equal(&inode->i_ctime, &now))
                return;
 
@@ -532,8 +530,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
                        rxh->h_hash = cpu_to_le32(xahash);
                }
 
-               err = reiserfs_prepare_write(NULL, page, page_offset,
-                                           page_offset + chunk + skip);
+               err = __reiserfs_write_begin(page, page_offset, chunk + skip);
                if (!err) {
                        if (buffer)
                                memcpy(data + skip, buffer + buffer_pos, chunk);
index 0e7cb1395a94cfb2c98aad791672576315b88526..05d6b0e78c959a341137c97fbb2ea2fa89b25197 100644 (file)
@@ -462,9 +462,7 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
        if (size) {
                char *p;
 
-               spin_lock(&dcache_lock);
                p = __d_path(path, root, buf, size);
-               spin_unlock(&dcache_lock);
                res = PTR_ERR(p);
                if (!IS_ERR(p)) {
                        char *end = mangle_path(buf, p, esc);
index 00a70cab1f36ea7fc7bd96317d4bccdd8486a736..f678d421e54166917f7c8ca7cf3a51632cbd1d52 100644 (file)
@@ -406,21 +406,15 @@ void
 smb_renew_times(struct dentry * dentry)
 {
        dget(dentry);
-       spin_lock(&dentry->d_lock);
-       for (;;) {
-               struct dentry *parent;
+       dentry->d_time = jiffies;
 
-               dentry->d_time = jiffies;
-               if (IS_ROOT(dentry))
-                       break;
-               parent = dentry->d_parent;
-               dget(parent);
-               spin_unlock(&dentry->d_lock);
+       while (!IS_ROOT(dentry)) {
+               struct dentry *parent = dget_parent(dentry);
                dput(dentry);
                dentry = parent;
-               spin_lock(&dentry->d_lock);
+
+               dentry->d_time = jiffies;
        }
-       spin_unlock(&dentry->d_lock);
        dput(dentry);
 }
 
index 8fc5e50e142fad71ad05aa38f726ae0d6d4ca566..f6e9ee59757ec29c11be43e6fcab2713bab2562e 100644 (file)
@@ -229,7 +229,6 @@ smb_invalidate_inodes(struct smb_sb_info *server)
 {
        VERBOSE("\n");
        shrink_dcache_sb(SB_of(server));
-       invalidate_inodes(SB_of(server));
 }
 
 /*
index 71c29b6670b402719f8475ed34b39a306bda807d..3dcf638d4d3a0ff836fd9aba946361edd894b630 100644 (file)
@@ -332,16 +332,15 @@ static int smb_build_path(struct smb_sb_info *server, unsigned char *buf,
         * and store it in reversed order [see reverse_string()]
         */
        dget(entry);
-       spin_lock(&entry->d_lock);
        while (!IS_ROOT(entry)) {
                struct dentry *parent;
 
                if (maxlen < (3<<unicode)) {
-                       spin_unlock(&entry->d_lock);
                        dput(entry);
                        return -ENAMETOOLONG;
                }
 
+               spin_lock(&entry->d_lock);
                len = server->ops->convert(path, maxlen-2, 
                                      entry->d_name.name, entry->d_name.len,
                                      server->local_nls, server->remote_nls);
@@ -359,15 +358,12 @@ static int smb_build_path(struct smb_sb_info *server, unsigned char *buf,
                }
                *path++ = '\\';
                maxlen -= len+1;
-
-               parent = entry->d_parent;
-               dget(parent);
                spin_unlock(&entry->d_lock);
+
+               parent = dget_parent(entry);
                dput(entry);
                entry = parent;
-               spin_lock(&entry->d_lock);
        }
-       spin_unlock(&entry->d_lock);
        dput(entry);
        reverse_string(buf, path-buf);
 
index 8819e3a7ff203fb521b537672d16d5e2d4cea80d..b9c9869165db359d018134e99e0a39e2738e6dba 100644 (file)
@@ -273,14 +273,14 @@ void generic_shutdown_super(struct super_block *sb)
                get_fs_excl();
                sb->s_flags &= ~MS_ACTIVE;
 
-               /* bad name - it should be evict_inodes() */
-               invalidate_inodes(sb);
+               fsnotify_unmount_inodes(&sb->s_inodes);
+
+               evict_inodes(sb);
 
                if (sop->put_super)
                        sop->put_super(sb);
 
-               /* Forget any remaining inodes */
-               if (invalidate_inodes(sb)) {
+               if (!list_empty(&sb->s_inodes)) {
                        printk("VFS: Busy inodes after unmount of %s. "
                           "Self-destruct in 5 seconds.  Have a nice day...\n",
                           sb->s_id);
index 33e047b59b8d659975d6c325835f0d11a2df27d4..11e7f7d11cd06fe85bf694397d3608b805db1fe3 100644 (file)
@@ -126,7 +126,7 @@ static int sysv_link(struct dentry * old_dentry, struct inode * dir,
 
        inode->i_ctime = CURRENT_TIME_SEC;
        inode_inc_link_count(inode);
-       atomic_inc(&inode->i_count);
+       ihold(inode);
 
        return add_nondir(dentry, inode);
 }
index 87ebcce72213e2889f66127125908190a4ef213d..14f64b689d7f58edef4be03d58a8712c442f62ef 100644 (file)
@@ -550,7 +550,7 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
 
        lock_2_inodes(dir, inode);
        inc_nlink(inode);
-       atomic_inc(&inode->i_count);
+       ihold(inode);
        inode->i_ctime = ubifs_current_time(inode);
        dir->i_size += sz_change;
        dir_ui->ui_size = dir->i_size;
index bf5fc674193c8bba9e5e31589ba883d49f106270..6d8dc02baebb57e05e5839464b24e8f443263546 100644 (file)
@@ -1101,7 +1101,7 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
        inc_nlink(inode);
        inode->i_ctime = current_fs_time(inode->i_sb);
        mark_inode_dirty(inode);
-       atomic_inc(&inode->i_count);
+       ihold(inode);
        d_instantiate(dentry, inode);
        unlock_kernel();
 
index b056f02b1fb306c810f4b429ae99417a8f6238e5..12f39b9e4437db73784cd77b8e7f898457d157db 100644 (file)
@@ -180,7 +180,7 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir,
 
        inode->i_ctime = CURRENT_TIME_SEC;
        inode_inc_link_count(inode);
-       atomic_inc(&inode->i_count);
+       ihold(inode);
 
        error = ufs_add_nondir(dentry, inode);
        unlock_kernel();
index b552f816de15942095f82ad0302b8656f7db09ae..c9af48fffcd77fc559f73d5d97317e22ec4d3413 100644 (file)
@@ -1139,8 +1139,7 @@ xfs_vm_writepage(
                                type = IO_DELAY;
                                flags = BMAPI_ALLOCATE;
 
-                               if (wbc->sync_mode == WB_SYNC_NONE &&
-                                   wbc->nonblocking)
+                               if (wbc->sync_mode == WB_SYNC_NONE)
                                        flags |= BMAPI_TRYLOCK;
                        }
 
index ba5312802aa99dfe266148b90d9dfa8e3f54e19d..63fd2c07cb57d861837bf463d9ad7b3aa5392088 100644 (file)
@@ -1580,6 +1580,7 @@ xfs_mapping_buftarg(
                        XFS_BUFTARG_NAME(btp));
                return ENOMEM;
        }
+       inode->i_ino = get_next_ino();
        inode->i_mode = S_IFBLK;
        inode->i_bdev = bdev;
        inode->i_rdev = bdev->bd_dev;
index ec858e09d546bf3bf34ca1dcbe0f7b74cb52ffa9..96107efc0c61908c3f1a94a646604836fc2131e1 100644 (file)
@@ -317,7 +317,7 @@ xfs_vn_link(
        if (unlikely(error))
                return -error;
 
-       atomic_inc(&inode->i_count);
+       ihold(inode);
        d_instantiate(dentry, inode);
        return 0;
 }
@@ -760,7 +760,9 @@ xfs_setup_inode(
 
        inode->i_ino = ip->i_ino;
        inode->i_state = I_NEW;
-       inode_add_to_lists(ip->i_mount->m_super, inode);
+
+       inode_sb_list_add(inode);
+       insert_inode_hash(inode);
 
        inode->i_mode   = ip->i_d.di_mode;
        inode->i_nlink  = ip->i_d.di_nlink;
index ab31ce5aeaf9cde4c5c008e1492c7a42f5399863..cf808782c0650d86e967e149a8fac2baf80dd093 100644 (file)
@@ -576,7 +576,7 @@ xfs_max_file_offset(
 
        /* Figure out maximum filesize, on Linux this can depend on
         * the filesystem blocksize (on 32 bit platforms).
-        * __block_prepare_write does this in an [unsigned] long...
+        * __block_write_begin does this in an [unsigned] long...
         *      page->index << (PAGE_CACHE_SHIFT - bbits)
         * So, for page sized blocks (4K on 32 bit platforms),
         * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
index fac52290de90c050b6ab0c9ea5d208541cd4c839..fb2ca2e4cdc935c112d1504a85aca3130f511179 100644 (file)
@@ -500,7 +500,7 @@ void                xfs_mark_inode_dirty_sync(xfs_inode_t *);
 #define IHOLD(ip) \
 do { \
        ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \
-       atomic_inc(&(VFS_I(ip)->i_count)); \
+       ihold(VFS_I(ip)); \
        trace_xfs_ihold(ip, _THIS_IP_); \
 } while (0)
 
index 4de84ce3a927123f118861df950202a8dba1a536..359ef11725a65da5bf6d07d8fc0cccee33be803f 100644 (file)
@@ -184,7 +184,7 @@ struct acpi_device_pnp {
 
 #define acpi_device_bid(d)     ((d)->pnp.bus_id)
 #define acpi_device_adr(d)     ((d)->pnp.bus_address)
-char *acpi_device_hid(struct acpi_device *device);
+const char *acpi_device_hid(struct acpi_device *device);
 #define acpi_device_name(d)    ((d)->pnp.device_name)
 #define acpi_device_class(d)   ((d)->pnp.device_class)
 
@@ -389,21 +389,25 @@ struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
 int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state);
 int acpi_disable_wakeup_device_power(struct acpi_device *dev);
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM_OPS
 int acpi_pm_device_sleep_state(struct device *, int *);
-int acpi_pm_device_sleep_wake(struct device *, bool);
-#else /* !CONFIG_PM_SLEEP */
+#else
 static inline int acpi_pm_device_sleep_state(struct device *d, int *p)
 {
        if (p)
                *p = ACPI_STATE_D0;
        return ACPI_STATE_D3;
 }
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+int acpi_pm_device_sleep_wake(struct device *, bool);
+#else
 static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
 {
        return -ENODEV;
 }
-#endif /* !CONFIG_PM_SLEEP */
+#endif
 
 #endif                         /* CONFIG_ACPI */
 
index 23d78b4d088b70eeb95f5ab9737fded3044ec910..3090471b2a5ef6069927b82c9bbea9bec8b2590e 100644 (file)
@@ -115,8 +115,6 @@ void pci_acpi_crs_quirks(void);
 #define ACPI_PROCESSOR_LIMIT_INCREMENT 0x01
 #define ACPI_PROCESSOR_LIMIT_DECREMENT 0x02
 
-int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
-
 /*--------------------------------------------------------------------------
                                   Dock Station
   -------------------------------------------------------------------------- */
index 29bf945143e8358e4fd46ed8d98bf2f007c0e227..65b3f5888f42749510f4d51f7d9a0bed43e770bf 100644 (file)
@@ -98,8 +98,6 @@ acpi_os_table_override(struct acpi_table_header *existing_table,
 /*
  * Spinlock primitives
  */
-acpi_status acpi_os_create_lock(acpi_spinlock * out_handle);
-
 void acpi_os_delete_lock(acpi_spinlock handle);
 
 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock handle);
@@ -223,25 +221,15 @@ acpi_os_write_memory(acpi_physical_address address, u32 value, u32 width);
  */
 acpi_status
 acpi_os_read_pci_configuration(struct acpi_pci_id *pci_id,
-                              u32 reg, u32 *value, u32 width);
+                              u32 reg, u64 *value, u32 width);
 
 acpi_status
 acpi_os_write_pci_configuration(struct acpi_pci_id *pci_id,
                                u32 reg, u64 value, u32 width);
 
-/*
- * Interim function needed for PCI IRQ routing
- */
-void
-acpi_os_derive_pci_id(acpi_handle device,
-                     acpi_handle region, struct acpi_pci_id **pci_id);
-
 /*
  * Miscellaneous
  */
-acpi_status acpi_os_validate_interface(char *interface);
-acpi_status acpi_osi_invalidate(char* interface);
-
 acpi_status
 acpi_os_validate_address(u8 space_id, acpi_physical_address address,
                         acpi_size length, char *name);
index 984cdc62e30bc52da4cef907f3dd5094aa5ffd71..53b7cfd924a300ec924ca0cea5ed9dde18ba8e40 100644 (file)
@@ -47,7 +47,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20100702
+#define ACPI_CA_VERSION                 0x20101013
 
 #include "actypes.h"
 #include "actbl.h"
@@ -72,6 +72,7 @@ extern u8 acpi_gbl_truncate_io_addresses;
 
 extern u32 acpi_current_gpe_count;
 extern struct acpi_table_fadt acpi_gbl_FADT;
+extern u8 acpi_gbl_system_awake_and_running;
 
 extern u32 acpi_rsdt_forced;
 /*
@@ -105,6 +106,10 @@ const char *acpi_format_exception(acpi_status exception);
 
 acpi_status acpi_purge_cached_objects(void);
 
+acpi_status acpi_install_interface(acpi_string interface_name);
+
+acpi_status acpi_remove_interface(acpi_string interface_name);
+
 /*
  * ACPI Memory management
  */
@@ -263,6 +268,8 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
 acpi_status acpi_install_exception_handler(acpi_exception_handler handler);
 #endif
 
+acpi_status acpi_install_interface_handler(acpi_interface_handler handler);
+
 /*
  * Event interfaces
  */
@@ -308,6 +315,8 @@ acpi_install_gpe_block(acpi_handle gpe_device,
 
 acpi_status acpi_remove_gpe_block(acpi_handle gpe_device);
 
+acpi_status acpi_update_gpes(void);
+
 /*
  * Resource interfaces
  */
index 5db8f472fec992205455a4924f7bc0e484743293..2b134b691e349dbbab0f87966e96e3d70185e4f5 100644 (file)
  *
  * ACPI_SIZE        16/32/64-bit unsigned value
  * ACPI_NATIVE_INT  16/32/64-bit signed value
- *
  */
 
 /*******************************************************************************
@@ -132,6 +131,16 @@ typedef COMPILER_DEPENDENT_INT64 INT64;
 
 /*! [End] no source code translation !*/
 
+/*
+ * Value returned by acpi_os_get_thread_id. There is no standard "thread_id"
+ * across operating systems or even the various UNIX systems. Since ACPICA
+ * only needs the thread ID as a unique thread identifier, we use a u64
+ * as the only common data type - it will accommodate any type of pointer or
+ * any type of integer. It is up to the host-dependent OSL to cast the
+ * native thread ID type to a u64 (in acpi_os_get_thread_id).
+ */
+#define acpi_thread_id                  u64
+
 /*******************************************************************************
  *
  * Types specific to 64-bit targets
@@ -211,12 +220,6 @@ typedef u32 acpi_physical_address;
  *
  ******************************************************************************/
 
-/* Value returned by acpi_os_get_thread_id */
-
-#ifndef acpi_thread_id
-#define acpi_thread_id                 acpi_size
-#endif
-
 /* Flags for acpi_os_acquire_lock/acpi_os_release_lock */
 
 #ifndef acpi_cpu_flags
@@ -375,16 +378,6 @@ typedef void *acpi_handle; /* Actually a ptr to a NS Node */
 typedef u8 acpi_owner_id;
 #define ACPI_OWNER_ID_MAX               0xFF
 
-struct uint64_struct {
-       u32 lo;
-       u32 hi;
-};
-
-union uint64_overlay {
-       u64 full;
-       struct uint64_struct part;
-};
-
 #define ACPI_INTEGER_BIT_SIZE           64
 #define ACPI_MAX_DECIMAL_DIGITS         20     /* 2^64 = 18,446,744,073,709,551,616 */
 
@@ -950,6 +943,9 @@ acpi_status(*acpi_walk_callback) (acpi_handle object,
                                  u32 nesting_level,
                                  void *context, void **return_value);
 
+typedef
+u32 (*acpi_interface_handler) (acpi_string interface_name, u32 supported);
+
 /* Interrupt handler return values */
 
 #define ACPI_INTERRUPT_NOT_HANDLED      0x00
index c05aeba9e8f0e42a3441a66c75b02b0a973a046d..a3e334ab11191aeb596b93cfc28a7c4dcef85d4d 100644 (file)
 #define ACPI_MUTEX_TYPE             ACPI_BINARY_SEMAPHORE
 #endif
 
+/* "inline" keywords - configurable since inline is not standardized */
+
+#ifndef ACPI_INLINE
+#define ACPI_INLINE
+#endif
+
 /*
  * Debugger threading model
  * Use single threaded if the entire subsystem is contained in an application
index 0cd53e3cd1a37789bb54edf99ec843df82c3b025..5dcb9537343c7a91d82317dd4e50aa7d385149f5 100644 (file)
@@ -44,6 +44,8 @@
 #ifndef __ACGCC_H__
 #define __ACGCC_H__
 
+#define ACPI_INLINE             __inline__
+
 /* Function name is used for debug output. Non-ANSI, compiler-dependent */
 
 #define ACPI_GET_FUNCTION_NAME          __func__
index 103f08aca764e23ffe81d9ee6da4fd6206e3de7b..572189e37133d697db734ff2d8e10c1707917463 100644 (file)
@@ -75,7 +75,6 @@
 #define acpi_cache_t                        struct kmem_cache
 #define acpi_spinlock                       spinlock_t *
 #define acpi_cpu_flags                      unsigned long
-#define acpi_thread_id                      struct task_struct *
 
 #else /* !__KERNEL__ */
 
@@ -88,7 +87,7 @@
 /* Host-dependent types and defines for user-space ACPICA */
 
 #define ACPI_FLUSH_CPU_CACHE()
-#define acpi_thread_id                      pthread_t
+#define ACPI_CAST_PTHREAD_T(pthread) ((acpi_thread_id) (pthread))
 
 #if defined(__ia64__) || defined(__x86_64__)
 #define ACPI_MACHINE_WIDTH          64
 
 
 #ifdef __KERNEL__
+#include <acpi/actypes.h>
 /*
  * Overrides for in-kernel ACPICA
  */
 static inline acpi_thread_id acpi_os_get_thread_id(void)
 {
-       return current;
+       return (acpi_thread_id)(unsigned long)current;
 }
 
 /*
@@ -127,7 +127,6 @@ static inline acpi_thread_id acpi_os_get_thread_id(void)
  * However, boot has  (system_state != SYSTEM_RUNNING)
  * to quiet __might_sleep() in kmalloc() and resume does not.
  */
-#include <acpi/actypes.h>
 static inline void *acpi_os_allocate(acpi_size size)
 {
        return kmalloc(size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
index f4229fb315e1d7d81caddb7557cbcb6c65072fed..2c0fc10956baf2123f5f24ed203572874369e206 100644 (file)
 #define DATA_DATA                                                      \
        *(.data)                                                        \
        *(.ref.data)                                                    \
+       *(.data..shared_aligned) /* percpu related */                   \
        DEV_KEEP(init.data)                                             \
        DEV_KEEP(exit.data)                                             \
        CPU_KEEP(init.data)                                             \
 
 #ifdef CONFIG_BLK_DEV_INITRD
 #define INIT_RAM_FS                                                    \
-       . = ALIGN(PAGE_SIZE);                                           \
+       . = ALIGN(4);                                                   \
        VMLINUX_SYMBOL(__initramfs_start) = .;                          \
        *(.init.ramfs)                                                  \
        VMLINUX_SYMBOL(__initramfs_end) = .;
index 4c9461a4f9e67b4b3e67c5bb73192aed44695079..274eaaa15c36a86ee48a6b743f6c9d7cfb36b010 100644 (file)
@@ -699,13 +699,8 @@ struct drm_driver {
        int (*suspend) (struct drm_device *, pm_message_t state);
        int (*resume) (struct drm_device *);
        int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
-       void (*dma_ready) (struct drm_device *);
        int (*dma_quiescent) (struct drm_device *);
-       int (*context_ctor) (struct drm_device *dev, int context);
        int (*context_dtor) (struct drm_device *dev, int context);
-       int (*kernel_context_switch) (struct drm_device *dev, int old,
-                                     int new);
-       void (*kernel_context_switch_unlock) (struct drm_device *dev);
 
        /**
         * get_vblank_counter - get raw hardware vblank counter
@@ -777,8 +772,6 @@ struct drm_driver {
                                        struct drm_file *file_priv);
        void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
                                            struct drm_file *file_priv);
-       resource_size_t (*get_map_ofs) (struct drm_local_map * map);
-       resource_size_t (*get_reg_ofs) (struct drm_device *dev);
        void (*set_version) (struct drm_device *dev,
                             struct drm_set_version *sv);
 
@@ -795,8 +788,6 @@ struct drm_driver {
        void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv,
                            bool from_release);
 
-       int (*proc_init)(struct drm_minor *minor);
-       void (*proc_cleanup)(struct drm_minor *minor);
        int (*debugfs_init)(struct drm_minor *minor);
        void (*debugfs_cleanup)(struct drm_minor *minor);
 
@@ -972,7 +963,6 @@ struct drm_device {
        __volatile__ long context_flag; /**< Context swapping flag */
        __volatile__ long interrupt_flag; /**< Interruption handler flag */
        __volatile__ long dma_flag;     /**< DMA dispatch flag */
-       struct timer_list timer;        /**< Timer for delaying ctx switch */
        wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */
        int last_checked;               /**< Last context checked for DMA */
        int last_context;               /**< Last current context */
@@ -1045,25 +1035,12 @@ struct drm_device {
        struct drm_minor *control;              /**< Control node for card */
        struct drm_minor *primary;              /**< render type primary screen head */
 
-       /** \name Drawable information */
-       /*@{ */
-       spinlock_t drw_lock;
-       struct idr drw_idr;
-       /*@} */
-
         struct drm_mode_config mode_config;    /**< Current mode config */
 
        /** \name GEM information */
        /*@{ */
        spinlock_t object_name_lock;
        struct idr object_name_idr;
-       atomic_t object_count;
-       atomic_t object_memory;
-       atomic_t pin_count;
-       atomic_t pin_memory;
-       atomic_t gtt_count;
-       atomic_t gtt_memory;
-       uint32_t gtt_total;
        uint32_t invalidate_domains;    /* domains pending invalidation */
        uint32_t flush_domains;         /* domains pending flush */
        /*@} */
@@ -1175,8 +1152,6 @@ extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
 extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
 extern void drm_vm_open_locked(struct vm_area_struct *vma);
 extern void drm_vm_close_locked(struct vm_area_struct *vma);
-extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map);
-extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev);
 extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
 
                                /* Memory management support (drm_memory.h) */
@@ -1186,8 +1161,7 @@ extern int drm_mem_info(char *buf, char **start, off_t offset,
                        int request, int *eof, void *data);
 extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
 
-extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
-extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
+extern void drm_free_agp(DRM_AGP_MEM * handle, int pages);
 extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
 extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
                                       struct page **pages,
@@ -1239,17 +1213,6 @@ extern int drm_setsareactx(struct drm_device *dev, void *data,
 extern int drm_getsareactx(struct drm_device *dev, void *data,
                           struct drm_file *file_priv);
 
-                               /* Drawable IOCTL support (drm_drawable.h) */
-extern int drm_adddraw(struct drm_device *dev, void *data,
-                      struct drm_file *file_priv);
-extern int drm_rmdraw(struct drm_device *dev, void *data,
-                     struct drm_file *file_priv);
-extern int drm_update_drawable_info(struct drm_device *dev, void *data,
-                                   struct drm_file *file_priv);
-extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
-                                                 drm_drawable_t id);
-extern void drm_drawable_free_all(struct drm_device *dev);
-
                                /* Authentication IOCTL support (drm_auth.h) */
 extern int drm_getmagic(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
@@ -1264,7 +1227,6 @@ extern int drm_lock(struct drm_device *dev, void *data,
                    struct drm_file *file_priv);
 extern int drm_unlock(struct drm_device *dev, void *data,
                      struct drm_file *file_priv);
-extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
 extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
 extern void drm_idlelock_take(struct drm_lock_data *lock_data);
 extern void drm_idlelock_release(struct drm_lock_data *lock_data);
@@ -1359,10 +1321,6 @@ extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
 extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
 extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
-extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type);
-extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
-extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
-extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
 extern void drm_agp_chipset_flush(struct drm_device *dev);
 
                                /* Stub support (drm_stub.h) */
@@ -1414,7 +1372,6 @@ extern int drm_bufs_info(struct seq_file *m, void *data);
 extern int drm_vblank_info(struct seq_file *m, void *data);
 extern int drm_clients_info(struct seq_file *m, void* data);
 extern int drm_gem_name_info(struct seq_file *m, void *data);
-extern int drm_gem_object_info(struct seq_file *m, void* data);
 
 #if DRM_DEBUG_CODE
 extern int drm_vma_info(struct seq_file *m, void *data);
index 3e5a51af757c76ba07f514d00265a5e7300b6627..029aa688e787c4673c12f38919999982a926e5f6 100644 (file)
@@ -221,7 +221,8 @@ struct drm_framebuffer_funcs {
         * the semantics and arguments have a one to one mapping
         * on this function.
         */
-       int (*dirty)(struct drm_framebuffer *framebuffer, unsigned flags,
+       int (*dirty)(struct drm_framebuffer *framebuffer,
+                    struct drm_file *file_priv, unsigned flags,
                     unsigned color, struct drm_clip_rect *clips,
                     unsigned num_clips);
 };
@@ -762,6 +763,7 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
 extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
                                    void *data, struct drm_file *file_priv);
 extern bool drm_detect_hdmi_monitor(struct edid *edid);
+extern bool drm_detect_monitor_audio(struct edid *edid);
 extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
                                    void *data, struct drm_file *file_priv);
 extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
index 59b7073b13fe334815fc3dd290b49e73c4e162d4..73b071203dcc8bf61d65a75bac74524ba579fbcb 100644 (file)
 
 #include <linux/fb.h>
 
+enum mode_set_atomic {
+       LEAVE_ATOMIC_MODE_SET,
+       ENTER_ATOMIC_MODE_SET,
+};
+
 struct drm_crtc_helper_funcs {
        /*
         * Control power levels on the CRTC.  If the mode passed in is
@@ -61,7 +66,8 @@ struct drm_crtc_helper_funcs {
        int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
                             struct drm_framebuffer *old_fb);
        int (*mode_set_base_atomic)(struct drm_crtc *crtc,
-                                   struct drm_framebuffer *fb, int x, int y);
+                                   struct drm_framebuffer *fb, int x, int y,
+                                   enum mode_set_atomic);
 
        /* reload the current crtc LUT */
        void (*load_lut)(struct drm_crtc *crtc);
index a49e791db0b0308f284a17408a7051559102ca1d..83a389e44543f19911efe7726ad48d4dd602f967 100644 (file)
@@ -23,6 +23,9 @@
 #ifndef _DRM_DP_HELPER_H_
 #define _DRM_DP_HELPER_H_
 
+#include <linux/types.h>
+#include <linux/i2c.h>
+
 /* From the VESA DisplayPort spec */
 
 #define AUX_NATIVE_WRITE       0x8
index e41c74facb6a3e41e84e3c66df6395d1a94add76..8c641bed9bbd36526870b6741fdd16f780e3b0ec 100644 (file)
@@ -286,6 +286,7 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_HAS_PAGEFLIPPING     8
 #define I915_PARAM_HAS_EXECBUF2          9
 #define I915_PARAM_HAS_BSD              10
+#define I915_PARAM_HAS_BLT              11
 
 typedef struct drm_i915_getparam {
        int param;
@@ -627,8 +628,11 @@ struct drm_i915_gem_execbuffer2 {
        __u32 num_cliprects;
        /** This is a struct drm_clip_rect *cliprects */
        __u64 cliprects_ptr;
+#define I915_EXEC_RING_MASK              (7<<0)
+#define I915_EXEC_DEFAULT                (0<<0)
 #define I915_EXEC_RENDER                 (1<<0)
-#define I915_EXEC_BSD                    (1<<1)
+#define I915_EXEC_BSD                    (2<<0)
+#define I915_EXEC_BLT                    (3<<0)
        __u64 flags;
        __u64 rsvd1;
        __u64 rsvd2;
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
new file mode 100644 (file)
index 0000000..d3c8194
--- /dev/null
@@ -0,0 +1,18 @@
+/* Common header for intel-gtt.ko and i915.ko */
+
+#ifndef _DRM_INTEL_GTT_H
+#define        _DRM_INTEL_GTT_H
+struct intel_gtt {
+       /* Number of stolen gtt entries at the beginning. */
+       unsigned int gtt_stolen_entries;
+       /* Total number of gtt entries. */
+       unsigned int gtt_total_entries;
+       /* Part of the gtt that is mappable by the cpu, for those chips where
+        * this is not the full gtt. */
+       unsigned int gtt_mappable_entries;
+};
+
+struct intel_gtt *intel_gtt_get(void);
+
+#endif
+
index 2040e6c4f1729a7de3001d5077277b8521427fb9..5afa5b52063e6d385926de88ec49faf1324a185a 100644 (file)
@@ -102,7 +102,8 @@ struct ttm_bus_placement {
  */
 
 struct ttm_mem_reg {
-       struct drm_mm_node *mm_node;
+       void *mm_node;
+       unsigned long start;
        unsigned long size;
        unsigned long num_pages;
        uint32_t page_alignment;
index b87504235f183f45c01d527ae585cbbafbd197ab..d01b4ddbdc56acb88fd1064b0e4580f82aa03b76 100644 (file)
@@ -203,7 +203,22 @@ struct ttm_tt {
  * It's set up by the ttm_bo_driver::init_mem_type method.
  */
 
+struct ttm_mem_type_manager;
+
+struct ttm_mem_type_manager_func {
+       int  (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
+       int  (*takedown)(struct ttm_mem_type_manager *man);
+       int  (*get_node)(struct ttm_mem_type_manager *man,
+                        struct ttm_buffer_object *bo,
+                        struct ttm_placement *placement,
+                        struct ttm_mem_reg *mem);
+       void (*put_node)(struct ttm_mem_type_manager *man,
+                        struct ttm_mem_reg *mem);
+       void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
+};
+
 struct ttm_mem_type_manager {
+       struct ttm_bo_device *bdev;
 
        /*
         * No protection. Constant from start.
@@ -222,8 +237,8 @@ struct ttm_mem_type_manager {
         * TODO: Consider one lru_lock per ttm_mem_type_manager.
         * Plays ill with list removal, though.
         */
-
-       struct drm_mm manager;
+       const struct ttm_mem_type_manager_func *func;
+       void *priv;
        struct list_head lru;
 };
 
@@ -649,6 +664,12 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                                struct ttm_mem_reg *mem,
                                bool interruptible,
                                bool no_wait_reserve, bool no_wait_gpu);
+
+extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
+                          struct ttm_mem_reg *mem);
+extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
+                                 struct ttm_mem_reg *mem);
+
 /**
  * ttm_bo_wait_for_cpu
  *
@@ -891,6 +912,8 @@ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
  */
 extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
 
+extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
+
 #if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
 #define TTM_HAS_AGP
 #include <linux/agp_backend.h>
index 4d0842391edc666f8985329fed245b989e3a0c2d..650e6bf6f69f101999d0294497458c3d5100e55f 100644 (file)
@@ -72,6 +72,7 @@
 #define DRM_VMW_PARAM_FIFO_OFFSET      3
 #define DRM_VMW_PARAM_HW_CAPS          4
 #define DRM_VMW_PARAM_FIFO_CAPS        5
+#define DRM_VMW_PARAM_MAX_FB_SIZE      6
 
 /**
  * struct drm_vmw_getparam_arg
index c227757feb06b4c22eb15ca3faf82c1dfa9f107e..050a7bccb8365ef117f365ebe3d3a8d337efc390 100644 (file)
@@ -245,8 +245,6 @@ int acpi_check_resource_conflict(const struct resource *res);
 
 int acpi_check_region(resource_size_t start, resource_size_t n,
                      const char *name);
-int acpi_check_mem_region(resource_size_t start, resource_size_t n,
-                     const char *name);
 
 int acpi_resources_are_enforced(void);
 
@@ -308,6 +306,9 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
                                             u32 *mask, u32 req);
 extern void acpi_early_init(void);
 
+int acpi_os_map_generic_address(struct acpi_generic_address *addr);
+void acpi_os_unmap_generic_address(struct acpi_generic_address *addr);
+
 #else  /* !CONFIG_ACPI */
 
 #define acpi_disabled 1
@@ -344,12 +345,6 @@ static inline int acpi_check_region(resource_size_t start, resource_size_t n,
        return 0;
 }
 
-static inline int acpi_check_mem_region(resource_size_t start,
-                                       resource_size_t n, const char *name)
-{
-       return 0;
-}
-
 struct acpi_table_header;
 static inline int acpi_table_parse(char *id,
                                int (*handler)(struct acpi_table_header *))
index 35b00746c712fc11776e78345edf786b98f9063b..4ce34fa937d4910cf8d7546e54de1e888fb0abb2 100644 (file)
@@ -111,6 +111,7 @@ void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
 
 extern spinlock_t bdi_lock;
 extern struct list_head bdi_list;
+extern struct list_head bdi_pending_list;
 
 static inline int wb_has_dirty_io(struct bdi_writeback *wb)
 {
@@ -285,7 +286,7 @@ enum {
 void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
 void set_bdi_congested(struct backing_dev_info *bdi, int sync);
 long congestion_wait(int sync, long timeout);
-
+long wait_iff_congested(struct zone *zone, int sync, long timeout);
 
 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
 {
index dd1b25b2641c6474925537ced10bd45831912754..68d1fe7b877c82e2d302fb31f3702836bc6d6843 100644 (file)
@@ -212,7 +212,6 @@ int generic_write_end(struct file *, struct address_space *,
                                loff_t, unsigned, unsigned,
                                struct page *, void *);
 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
-int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
 int cont_write_begin(struct file *, struct address_space *, loff_t,
                        unsigned, unsigned, struct page **, void **,
                        get_block_t *, loff_t *);
index 51e3145196f6f77e852cca9b5f141155bbf10685..36d57f74cd01c6c126ee2f7c2ea2c98f66868b2f 100644 (file)
@@ -10,7 +10,7 @@
 
 #include <linux/wait.h>
 
-/**
+/*
  * struct completion - structure used to maintain state for a "completion"
  *
  * This is the opaque structure used to maintain the state for a "completion".
@@ -34,7 +34,7 @@ struct completion {
        ({ init_completion(&work); work; })
 
 /**
- * DECLARE_COMPLETION: - declare and initialize a completion structure
+ * DECLARE_COMPLETION - declare and initialize a completion structure
  * @work:  identifier for the completion structure
  *
  * This macro declares and initializes a completion structure. Generally used
@@ -50,7 +50,7 @@ struct completion {
  * are on the kernel stack:
  */
 /**
- * DECLARE_COMPLETION_ONSTACK: - declare and initialize a completion structure
+ * DECLARE_COMPLETION_ONSTACK - declare and initialize a completion structure
  * @work:  identifier for the completion structure
  *
  * This macro declares and initializes a completion structure on the kernel
@@ -64,7 +64,7 @@ struct completion {
 #endif
 
 /**
- * init_completion: - Initialize a dynamically allocated completion
+ * init_completion - Initialize a dynamically allocated completion
  * @x:  completion structure that is to be initialized
  *
  * This inline function will initialize a dynamically created completion
@@ -92,7 +92,7 @@ extern void complete(struct completion *);
 extern void complete_all(struct completion *);
 
 /**
- * INIT_COMPLETION: - reinitialize a completion structure
+ * INIT_COMPLETION - reinitialize a completion structure
  * @x:  completion structure to be reinitialized
  *
  * This macro should be used to reinitialize a completion structure so it can
index 56285e5e1de4b6d80c8efd61ddf5495071c6a38a..b2a6009cba10f6e018093dd50fec8d913e7d5817 100644 (file)
@@ -34,9 +34,9 @@
 
 /* And dynamically-tunable limits and defaults: */
 struct files_stat_struct {
-       int nr_files;           /* read only */
-       int nr_free_files;      /* read only */
-       int max_files;          /* tunable */
+       unsigned long nr_files;         /* read only */
+       unsigned long nr_free_files;    /* read only */
+       unsigned long max_files;                /* tunable */
 };
 
 struct inodes_stat_t {
@@ -92,6 +92,9 @@ struct inodes_stat_t {
 /* Expect random access pattern */
 #define FMODE_RANDOM           ((__force fmode_t)0x1000)
 
+/* File is huge (eg. /dev/kmem): treat loff_t as unsigned */
+#define FMODE_UNSIGNED_OFFSET  ((__force fmode_t)0x2000)
+
 /* File was opened by fanotify and shouldn't generate fanotify events */
 #define FMODE_NONOTIFY         ((__force fmode_t)0x1000000)
 
@@ -402,7 +405,7 @@ extern void __init inode_init_early(void);
 extern void __init files_init(unsigned long);
 
 extern struct files_stat_struct files_stat;
-extern int get_max_files(void);
+extern unsigned long get_max_files(void);
 extern int sysctl_nr_open;
 extern struct inodes_stat_t inodes_stat;
 extern int leases_enable, lease_break_time;
@@ -722,7 +725,8 @@ struct posix_acl;
 
 struct inode {
        struct hlist_node       i_hash;
-       struct list_head        i_list;         /* backing dev IO list */
+       struct list_head        i_wb_list;      /* backing dev IO list */
+       struct list_head        i_lru;          /* inode LRU list */
        struct list_head        i_sb_list;
        struct list_head        i_dentry;
        unsigned long           i_ino;
@@ -789,6 +793,11 @@ struct inode {
        void                    *i_private; /* fs or device private pointer */
 };
 
+static inline int inode_unhashed(struct inode *inode)
+{
+       return hlist_unhashed(&inode->i_hash);
+}
+
 /*
  * inode->i_mutex nesting subclasses for the lock validator:
  *
@@ -1645,16 +1654,17 @@ struct super_operations {
  *
  * Q: What is the difference between I_WILL_FREE and I_FREEING?
  */
-#define I_DIRTY_SYNC           1
-#define I_DIRTY_DATASYNC       2
-#define I_DIRTY_PAGES          4
+#define I_DIRTY_SYNC           (1 << 0)
+#define I_DIRTY_DATASYNC       (1 << 1)
+#define I_DIRTY_PAGES          (1 << 2)
 #define __I_NEW                        3
 #define I_NEW                  (1 << __I_NEW)
-#define I_WILL_FREE            16
-#define I_FREEING              32
-#define I_CLEAR                        64
+#define I_WILL_FREE            (1 << 4)
+#define I_FREEING              (1 << 5)
+#define I_CLEAR                        (1 << 6)
 #define __I_SYNC               7
 #define I_SYNC                 (1 << __I_SYNC)
+#define I_REFERENCED           (1 << 8)
 
 #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
 
@@ -1746,6 +1756,7 @@ static inline void file_accessed(struct file *file)
 }
 
 int sync_inode(struct inode *inode, struct writeback_control *wbc);
+int sync_inode_metadata(struct inode *inode, int wait);
 
 struct file_system_type {
        const char *name;
@@ -2090,7 +2101,6 @@ extern int check_disk_change(struct block_device *);
 extern int __invalidate_device(struct block_device *);
 extern int invalidate_partition(struct gendisk *, int);
 #endif
-extern int invalidate_inodes(struct super_block *);
 unsigned long invalidate_mapping_pages(struct address_space *mapping,
                                        pgoff_t start, pgoff_t end);
 
@@ -2174,7 +2184,7 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
 
 extern int inode_init_always(struct super_block *, struct inode *);
 extern void inode_init_once(struct inode *);
-extern void inode_add_to_lists(struct super_block *, struct inode *);
+extern void ihold(struct inode * inode);
 extern void iput(struct inode *);
 extern struct inode * igrab(struct inode *);
 extern ino_t iunique(struct super_block *, ino_t);
@@ -2194,11 +2204,11 @@ extern struct inode * iget_locked(struct super_block *, unsigned long);
 extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
 extern int insert_inode_locked(struct inode *);
 extern void unlock_new_inode(struct inode *);
+extern unsigned int get_next_ino(void);
 
 extern void __iget(struct inode * inode);
 extern void iget_failed(struct inode *);
 extern void end_writeback(struct inode *);
-extern void destroy_inode(struct inode *);
 extern void __destroy_inode(struct inode *);
 extern struct inode *new_inode(struct super_block *);
 extern int should_remove_suid(struct dentry *);
@@ -2206,9 +2216,11 @@ extern int file_remove_suid(struct file *);
 
 extern void __insert_inode_hash(struct inode *, unsigned long hashval);
 extern void remove_inode_hash(struct inode *);
-static inline void insert_inode_hash(struct inode *inode) {
+static inline void insert_inode_hash(struct inode *inode)
+{
        __insert_inode_hash(inode, inode->i_ino);
 }
+extern void inode_sb_list_add(struct inode *inode);
 
 #ifdef CONFIG_BLOCK
 extern void submit_bio(int, struct bio *);
@@ -2491,7 +2503,10 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
 struct ctl_table;
 int proc_nr_files(struct ctl_table *table, int write,
                  void __user *buffer, size_t *lenp, loff_t *ppos);
-
+int proc_nr_dentry(struct ctl_table *table, int write,
+                 void __user *buffer, size_t *lenp, loff_t *ppos);
+int proc_nr_inodes(struct ctl_table *table, int write,
+                  void __user *buffer, size_t *lenp, loff_t *ppos);
 int __init get_filesystem_list(char *buf);
 
 #define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
index 975609cb8548504f20fd09e91476197a39dfbef1..e8713d55360ab90d85af0daca3b36bdfbcd430d1 100644 (file)
@@ -9,6 +9,32 @@
 
 struct vm_area_struct;
 
+/* Plain integer GFP bitmasks. Do not use this directly. */
+#define ___GFP_DMA             0x01u
+#define ___GFP_HIGHMEM         0x02u
+#define ___GFP_DMA32           0x04u
+#define ___GFP_MOVABLE         0x08u
+#define ___GFP_WAIT            0x10u
+#define ___GFP_HIGH            0x20u
+#define ___GFP_IO              0x40u
+#define ___GFP_FS              0x80u
+#define ___GFP_COLD            0x100u
+#define ___GFP_NOWARN          0x200u
+#define ___GFP_REPEAT          0x400u
+#define ___GFP_NOFAIL          0x800u
+#define ___GFP_NORETRY         0x1000u
+#define ___GFP_COMP            0x4000u
+#define ___GFP_ZERO            0x8000u
+#define ___GFP_NOMEMALLOC      0x10000u
+#define ___GFP_HARDWALL                0x20000u
+#define ___GFP_THISNODE                0x40000u
+#define ___GFP_RECLAIMABLE     0x80000u
+#ifdef CONFIG_KMEMCHECK
+#define ___GFP_NOTRACK         0x200000u
+#else
+#define ___GFP_NOTRACK         0
+#endif
+
 /*
  * GFP bitmasks..
  *
@@ -18,10 +44,10 @@ struct vm_area_struct;
  * without the underscores and use them consistently. The definitions here may
  * be used in bit comparisons.
  */
-#define __GFP_DMA      ((__force gfp_t)0x01u)
-#define __GFP_HIGHMEM  ((__force gfp_t)0x02u)
-#define __GFP_DMA32    ((__force gfp_t)0x04u)
-#define __GFP_MOVABLE  ((__force gfp_t)0x08u)  /* Page is movable */
+#define __GFP_DMA      ((__force gfp_t)___GFP_DMA)
+#define __GFP_HIGHMEM  ((__force gfp_t)___GFP_HIGHMEM)
+#define __GFP_DMA32    ((__force gfp_t)___GFP_DMA32)
+#define __GFP_MOVABLE  ((__force gfp_t)___GFP_MOVABLE)  /* Page is movable */
 #define GFP_ZONEMASK   (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
 /*
  * Action modifiers - doesn't change the zoning
@@ -38,27 +64,22 @@ struct vm_area_struct;
  * __GFP_MOVABLE: Flag that this page will be movable by the page migration
  * mechanism or reclaimed
  */
-#define __GFP_WAIT     ((__force gfp_t)0x10u)  /* Can wait and reschedule? */
-#define __GFP_HIGH     ((__force gfp_t)0x20u)  /* Should access emergency pools? */
-#define __GFP_IO       ((__force gfp_t)0x40u)  /* Can start physical IO? */
-#define __GFP_FS       ((__force gfp_t)0x80u)  /* Can call down to low-level FS? */
-#define __GFP_COLD     ((__force gfp_t)0x100u) /* Cache-cold page required */
-#define __GFP_NOWARN   ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */
-#define __GFP_REPEAT   ((__force gfp_t)0x400u) /* See above */
-#define __GFP_NOFAIL   ((__force gfp_t)0x800u) /* See above */
-#define __GFP_NORETRY  ((__force gfp_t)0x1000u)/* See above */
-#define __GFP_COMP     ((__force gfp_t)0x4000u)/* Add compound page metadata */
-#define __GFP_ZERO     ((__force gfp_t)0x8000u)/* Return zeroed page on success */
-#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
-#define __GFP_HARDWALL   ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
-#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
-#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
-
-#ifdef CONFIG_KMEMCHECK
-#define __GFP_NOTRACK  ((__force gfp_t)0x200000u)  /* Don't track with kmemcheck */
-#else
-#define __GFP_NOTRACK  ((__force gfp_t)0)
-#endif
+#define __GFP_WAIT     ((__force gfp_t)___GFP_WAIT)    /* Can wait and reschedule? */
+#define __GFP_HIGH     ((__force gfp_t)___GFP_HIGH)    /* Should access emergency pools? */
+#define __GFP_IO       ((__force gfp_t)___GFP_IO)      /* Can start physical IO? */
+#define __GFP_FS       ((__force gfp_t)___GFP_FS)      /* Can call down to low-level FS? */
+#define __GFP_COLD     ((__force gfp_t)___GFP_COLD)    /* Cache-cold page required */
+#define __GFP_NOWARN   ((__force gfp_t)___GFP_NOWARN)  /* Suppress page allocation failure warning */
+#define __GFP_REPEAT   ((__force gfp_t)___GFP_REPEAT)  /* See above */
+#define __GFP_NOFAIL   ((__force gfp_t)___GFP_NOFAIL)  /* See above */
+#define __GFP_NORETRY  ((__force gfp_t)___GFP_NORETRY) /* See above */
+#define __GFP_COMP     ((__force gfp_t)___GFP_COMP)    /* Add compound page metadata */
+#define __GFP_ZERO     ((__force gfp_t)___GFP_ZERO)    /* Return zeroed page on success */
+#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves */
+#define __GFP_HARDWALL   ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
+#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
+#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
+#define __GFP_NOTRACK  ((__force gfp_t)___GFP_NOTRACK)  /* Don't track with kmemcheck */
 
 /*
  * This may seem redundant, but it's a way of annotating false positives vs.
@@ -186,14 +207,14 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags)
 #endif
 
 #define GFP_ZONE_TABLE ( \
-       (ZONE_NORMAL << 0 * ZONES_SHIFT)                                \
-       | (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT)                     \
-       | (OPT_ZONE_HIGHMEM << __GFP_HIGHMEM * ZONES_SHIFT)             \
-       | (OPT_ZONE_DMA32 << __GFP_DMA32 * ZONES_SHIFT)                 \
-       | (ZONE_NORMAL << __GFP_MOVABLE * ZONES_SHIFT)                  \
-       | (OPT_ZONE_DMA << (__GFP_MOVABLE | __GFP_DMA) * ZONES_SHIFT)   \
-       | (ZONE_MOVABLE << (__GFP_MOVABLE | __GFP_HIGHMEM) * ZONES_SHIFT)\
-       | (OPT_ZONE_DMA32 << (__GFP_MOVABLE | __GFP_DMA32) * ZONES_SHIFT)\
+       (ZONE_NORMAL << 0 * ZONES_SHIFT)                                      \
+       | (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT)                          \
+       | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT)                  \
+       | (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT)                      \
+       | (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT)                       \
+       | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT)       \
+       | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT)   \
+       | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT)   \
 )
 
 /*
@@ -203,20 +224,20 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags)
  * allowed.
  */
 #define GFP_ZONE_BAD ( \
-       1 << (__GFP_DMA | __GFP_HIGHMEM)                                \
-       | 1 << (__GFP_DMA | __GFP_DMA32)                                \
-       | 1 << (__GFP_DMA32 | __GFP_HIGHMEM)                            \
-       | 1 << (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)                \
-       | 1 << (__GFP_MOVABLE | __GFP_HIGHMEM | __GFP_DMA)              \
-       | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA)                \
-       | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_HIGHMEM)            \
-       | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA | __GFP_HIGHMEM)\
+       1 << (___GFP_DMA | ___GFP_HIGHMEM)                                    \
+       | 1 << (___GFP_DMA | ___GFP_DMA32)                                    \
+       | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM)                                \
+       | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM)                   \
+       | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA)                 \
+       | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA)                   \
+       | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM)               \
+       | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM)  \
 )
 
 static inline enum zone_type gfp_zone(gfp_t flags)
 {
        enum zone_type z;
-       int bit = flags & GFP_ZONEMASK;
+       int bit = (__force int) (flags & GFP_ZONEMASK);
 
        z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) &
                                         ((1 << ZONES_SHIFT) - 1);
index e3060ef85b6dead79e7922faaaffd1351f209e95..8a85ec109a3a57f1dbc8bd5a01a90fb6ad093bb3 100644 (file)
@@ -28,18 +28,6 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
 
 #include <asm/kmap_types.h>
 
-#ifdef CONFIG_DEBUG_HIGHMEM
-
-void debug_kmap_atomic(enum km_type type);
-
-#else
-
-static inline void debug_kmap_atomic(enum km_type type)
-{
-}
-
-#endif
-
 #ifdef CONFIG_HIGHMEM
 #include <asm/highmem.h>
 
@@ -49,6 +37,27 @@ extern unsigned long totalhigh_pages;
 
 void kmap_flush_unused(void);
 
+DECLARE_PER_CPU(int, __kmap_atomic_idx);
+
+static inline int kmap_atomic_idx_push(void)
+{
+       int idx = __get_cpu_var(__kmap_atomic_idx)++;
+#ifdef CONFIG_DEBUG_HIGHMEM
+       WARN_ON_ONCE(in_irq() && !irqs_disabled());
+       BUG_ON(idx > KM_TYPE_NR);
+#endif
+       return idx;
+}
+
+static inline int kmap_atomic_idx_pop(void)
+{
+       int idx = --__get_cpu_var(__kmap_atomic_idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+       BUG_ON(idx < 0);
+#endif
+       return idx;
+}
+
 #else /* CONFIG_HIGHMEM */
 
 static inline unsigned int nr_free_highpages(void) { return 0; }
@@ -66,19 +75,19 @@ static inline void kunmap(struct page *page)
 {
 }
 
-static inline void *kmap_atomic(struct page *page, enum km_type idx)
+static inline void *__kmap_atomic(struct page *page)
 {
        pagefault_disable();
        return page_address(page);
 }
-#define kmap_atomic_prot(page, idx, prot)      kmap_atomic(page, idx)
+#define kmap_atomic_prot(page, prot)   __kmap_atomic(page)
 
-static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx)
+static inline void __kunmap_atomic(void *addr)
 {
        pagefault_enable();
 }
 
-#define kmap_atomic_pfn(pfn, idx)      kmap_atomic(pfn_to_page(pfn), (idx))
+#define kmap_atomic_pfn(pfn)   kmap_atomic(pfn_to_page(pfn))
 #define kmap_atomic_to_page(ptr)       virt_to_page(ptr)
 
 #define kmap_flush_unused()    do {} while(0)
@@ -86,12 +95,20 @@ static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx)
 
 #endif /* CONFIG_HIGHMEM */
 
-/* Prevent people trying to call kunmap_atomic() as if it were kunmap() */
-/* kunmap_atomic() should get the return value of kmap_atomic, not the page. */
-#define kunmap_atomic(addr, idx) do { \
-               BUILD_BUG_ON(__same_type((addr), struct page *)); \
-               kunmap_atomic_notypecheck((addr), (idx)); \
-       } while (0)
+/*
+ * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work.
+ */
+#define kmap_atomic(page, args...) __kmap_atomic(page)
+
+/*
+ * Prevent people trying to call kunmap_atomic() as if it were kunmap()
+ * kunmap_atomic() should get the return value of kmap_atomic, not the page.
+ */
+#define kunmap_atomic(addr, args...)                           \
+do {                                                           \
+       BUILD_BUG_ON(__same_type((addr), struct page *));       \
+       __kunmap_atomic(addr);                                  \
+} while (0)
 
 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
 #ifndef clear_user_highpage
@@ -201,8 +218,8 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
        vfrom = kmap_atomic(from, KM_USER0);
        vto = kmap_atomic(to, KM_USER1);
        copy_user_page(vto, vfrom, vaddr, to);
-       kunmap_atomic(vfrom, KM_USER0);
        kunmap_atomic(vto, KM_USER1);
+       kunmap_atomic(vfrom, KM_USER0);
 }
 
 #endif
@@ -214,8 +231,8 @@ static inline void copy_highpage(struct page *to, struct page *from)
        vfrom = kmap_atomic(from, KM_USER0);
        vto = kmap_atomic(to, KM_USER1);
        copy_page(vto, vfrom);
-       kunmap_atomic(vfrom, KM_USER0);
        kunmap_atomic(vto, KM_USER1);
+       kunmap_atomic(vfrom, KM_USER0);
 }
 
 #endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/i2c/apds990x.h b/include/linux/i2c/apds990x.h
new file mode 100644 (file)
index 0000000..d186fcc
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * This file is part of the APDS990x sensor driver.
+ * Chip is combined proximity and ambient light sensor.
+ *
+ * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __APDS990X_H__
+#define __APDS990X_H__
+
+
+#define APDS_IRLED_CURR_12mA   0x3
+#define APDS_IRLED_CURR_25mA   0x2
+#define APDS_IRLED_CURR_50mA   0x1
+#define APDS_IRLED_CURR_100mA  0x0
+
+/**
+ * struct apds990x_chip_factors - defines effect of the cover window
+ * @ga: Total glass attenuation
+ * @cf1: clear channel factor 1 for raw to lux conversion
+ * @irf1: IR channel factor 1 for raw to lux conversion
+ * @cf2: clear channel factor 2 for raw to lux conversion
+ * @irf2: IR channel factor 2 for raw to lux conversion
+ * @df: device factor for conversion formulas
+ *
+ * Structure for tuning ALS calculation to match with environment.
+ * Values depend on the material above the sensor and the sensor
+ * itself. If the GA is zero, driver will use uncovered sensor default values
+ * format: decimal value * APDS_PARAM_SCALE except df which is plain integer.
+ */
+#define APDS_PARAM_SCALE 4096
+struct apds990x_chip_factors {
+       int ga;
+       int cf1;
+       int irf1;
+       int cf2;
+       int irf2;
+       int df;
+};
+
+/**
+ * struct apds990x_platform_data - platform data for apsd990x.c driver
+ * @cf: chip factor data
+ * @pddrive: IR-led driving current
+ * @ppcount: number of IR pulses used for proximity estimation
+ * @setup_resources: interrupt line setup call back function
+ * @release_resources: interrupt line release call back function
+ *
+ * Proximity detection result depends heavily on correct ppcount, pdrive
+ * and cover window.
+ *
+ */
+
+struct apds990x_platform_data {
+       struct apds990x_chip_factors cf;
+       u8     pdrive;
+       u8     ppcount;
+       int    (*setup_resources)(void);
+       int    (*release_resources)(void);
+};
+
+#endif
diff --git a/include/linux/i2c/bh1770glc.h b/include/linux/i2c/bh1770glc.h
new file mode 100644 (file)
index 0000000..8b5e2df
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver.
+ * Chip is combined proximity and ambient light sensor.
+ *
+ * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.         See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __BH1770_H__
+#define __BH1770_H__
+
+/**
+ * struct bh1770_platform_data - platform data for bh1770glc driver
+ * @led_def_curr: IR led driving current.
+ * @glass_attenuation: Attenuation factor for covering window.
+ * @setup_resources: Call back for interrupt line setup function
+ * @release_resources: Call back for interrupte line release function
+ *
+ * Example of glass attenuation: 16384 * 385 / 100 means attenuation factor
+ * of 3.85. i.e. light_above_sensor = light_above_cover_window / 3.85
+ */
+
+struct bh1770_platform_data {
+#define BH1770_LED_5mA 0
+#define BH1770_LED_10mA        1
+#define BH1770_LED_20mA        2
+#define BH1770_LED_50mA        3
+#define BH1770_LED_100mA 4
+#define BH1770_LED_150mA 5
+#define BH1770_LED_200mA 6
+       __u8 led_def_curr;
+#define BH1770_NEUTRAL_GA 16384 /* 16384 / 16384 = 1 */
+       __u32 glass_attenuation;
+       int (*setup_resources)(void);
+       int (*release_resources)(void);
+};
+#endif
index 928ae712709faa041160a1a4a2f45ad5dcf1baf3..13a801f3d028225533f2ce701349b2b0657ca734 100644 (file)
@@ -81,6 +81,7 @@ struct idr {
 #define _idr_rc_to_errno(rc) ((rc) == -1 ? -EAGAIN : -ENOSPC)
 
 /**
+ * DOC: idr sync
  * idr synchronization (stolen from radix-tree.h)
  *
  * idr_find() is able to be called locklessly, using RCU. The caller must
index 7fb59279373823339f6fdda86158952e3e6fac45..8cdcc2a199ad1641c3ca652263922fbdcddf6f6a 100644 (file)
@@ -81,8 +81,7 @@ io_mapping_free(struct io_mapping *mapping)
 /* Atomic map/unmap */
 static inline void __iomem *
 io_mapping_map_atomic_wc(struct io_mapping *mapping,
-                        unsigned long offset,
-                        int slot)
+                        unsigned long offset)
 {
        resource_size_t phys_addr;
        unsigned long pfn;
@@ -90,13 +89,13 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
        BUG_ON(offset >= mapping->size);
        phys_addr = mapping->base + offset;
        pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
-       return iomap_atomic_prot_pfn(pfn, slot, mapping->prot);
+       return iomap_atomic_prot_pfn(pfn, mapping->prot);
 }
 
 static inline void
-io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr)
 {
-       iounmap_atomic(vaddr, slot);
+       iounmap_atomic(vaddr);
 }
 
 static inline void __iomem *
@@ -137,14 +136,13 @@ io_mapping_free(struct io_mapping *mapping)
 /* Atomic map/unmap */
 static inline void __iomem *
 io_mapping_map_atomic_wc(struct io_mapping *mapping,
-                        unsigned long offset,
-                        int slot)
+                        unsigned long offset)
 {
        return ((char __force __iomem *) mapping) + offset;
 }
 
 static inline void
-io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr)
 {
 }
 
index edef168a04064b61161cff70e9e8d803aa2ed1a4..450092c1e35f459f6db7dba29dbb94ec1a2c683c 100644 (file)
@@ -173,6 +173,11 @@ extern int _cond_resched(void);
                (__x < 0) ? -__x : __x;         \
        })
 
+#define abs64(x) ({                            \
+               s64 __x = (x);                  \
+               (__x < 0) ? -__x : __x;         \
+       })
+
 #ifdef CONFIG_PROVE_LOCKING
 void might_fault(void);
 #else
@@ -203,10 +208,10 @@ extern unsigned long simple_strtoul(const char *,char **,unsigned int);
 extern long simple_strtol(const char *,char **,unsigned int);
 extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
 extern long long simple_strtoll(const char *,char **,unsigned int);
-extern int strict_strtoul(const char *, unsigned int, unsigned long *);
-extern int strict_strtol(const char *, unsigned int, long *);
-extern int strict_strtoull(const char *, unsigned int, unsigned long long *);
-extern int strict_strtoll(const char *, unsigned int, long long *);
+extern int __must_check strict_strtoul(const char *, unsigned int, unsigned long *);
+extern int __must_check strict_strtol(const char *, unsigned int, long *);
+extern int __must_check strict_strtoull(const char *, unsigned int, unsigned long long *);
+extern int __must_check strict_strtoll(const char *, unsigned int, long long *);
 extern int sprintf(char * buf, const char * fmt, ...)
        __attribute__ ((format (printf, 2, 3)));
 extern int vsprintf(char *buf, const char *, va_list)
@@ -277,6 +282,11 @@ asmlinkage int vprintk(const char *fmt, va_list args)
 asmlinkage int printk(const char * fmt, ...)
        __attribute__ ((format (printf, 1, 2))) __cold;
 
+/*
+ * Please don't use printk_ratelimit(), because it shares ratelimiting state
+ * with all other unrelated printk_ratelimit() callsites.  Instead use
+ * printk_ratelimited() or plain old __ratelimit().
+ */
 extern int __printk_ratelimit(const char *func);
 #define printk_ratelimit() __printk_ratelimit(__func__)
 extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
@@ -651,6 +661,24 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
        (void) (&_max1 == &_max2);              \
        _max1 > _max2 ? _max1 : _max2; })
 
+#define min3(x, y, z) ({                       \
+       typeof(x) _min1 = (x);                  \
+       typeof(y) _min2 = (y);                  \
+       typeof(z) _min3 = (z);                  \
+       (void) (&_min1 == &_min2);              \
+       (void) (&_min1 == &_min3);              \
+       _min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \
+               (_min2 < _min3 ? _min2 : _min3); })
+
+#define max3(x, y, z) ({                       \
+       typeof(x) _max1 = (x);                  \
+       typeof(y) _max2 = (y);                  \
+       typeof(z) _max3 = (z);                  \
+       (void) (&_max1 == &_max2);              \
+       (void) (&_max1 == &_max3);              \
+       _max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \
+               (_max2 > _max3 ? _max2 : _max3); })
+
 /**
  * min_not_zero - return the minimum that is _not_ zero, unless both are zero
  * @x: value1
index 62dbee554f608c91fe7b2b3bf01f390260821c52..c238ad2f82eae4f149e70fc7138fdd5beaa40b13 100644 (file)
@@ -171,11 +171,8 @@ struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void);
        }
 
 
-static inline unsigned int __must_check
-__kfifo_must_check_helper(unsigned int val)
-{
-       return val;
-}
+/* __kfifo_must_check_helper() is temporarily disabled because it was faulty */
+#define __kfifo_must_check_helper(x) (x)
 
 /**
  * kfifo_initialized - Check if the fifo is initialized
index 88a000617d775fb74333a5f54aceac8460cb5008..9a5f8a71810c55f231565422f8912bea5bbe2d7b 100644 (file)
@@ -636,6 +636,12 @@ static inline void hlist_add_after(struct hlist_node *n,
                next->next->pprev  = &next->next;
 }
 
+/* after that we'll appear to be on some hlist and hlist_del will work */
+static inline void hlist_add_fake(struct hlist_node *n)
+{
+       n->pprev = &n->next;
+}
+
 /*
  * Move a list from one list head to another. Fixup the pprev
  * reference of the first entry if it exists.
index c87f1528703a68c2dd024d612a203ae4517b5cea..23fcdfcba81b33ef0d5e2957c591f8baeb1f1836 100644 (file)
@@ -35,6 +35,14 @@ static inline u64 div64_u64(u64 dividend, u64 divisor)
        return dividend / divisor;
 }
 
+/**
+ * div64_s64 - signed 64bit divide with 64bit divisor
+ */
+static inline s64 div64_s64(s64 dividend, s64 divisor)
+{
+       return dividend / divisor;
+}
+
 #elif BITS_PER_LONG == 32
 
 #ifndef div_u64_rem
@@ -53,6 +61,10 @@ extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
 extern u64 div64_u64(u64 dividend, u64 divisor);
 #endif
 
+#ifndef div64_s64
+extern s64 div64_s64(s64 dividend, s64 divisor);
+#endif
+
 #endif /* BITS_PER_LONG */
 
 /**
index 864035fb8f8a83e547efbdd161093117bac94bc2..4307231bd22fb23ad9732295f472bc4b8564e200 100644 (file)
@@ -70,6 +70,10 @@ extern void online_page(struct page *page);
 extern int online_pages(unsigned long, unsigned long);
 extern void __offline_isolated_pages(unsigned long, unsigned long);
 
+#ifdef CONFIG_MEMORY_HOTREMOVE
+extern bool is_pageblock_removable_nolock(struct page *page);
+#endif /* CONFIG_MEMORY_HOTREMOVE */
+
 /* reasonably generic interface to expand the physical pages in a zone  */
 extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
        unsigned long nr_pages);
index 78a1b96717523ba47b84867fc875aacef4f51e20..9a18667c13cc02a0709e648d339b0c72278887d0 100644 (file)
@@ -58,6 +58,7 @@ enum {
        MLX4_CMD_SENSE_PORT      = 0x4d,
        MLX4_CMD_HW_HEALTH_CHECK = 0x50,
        MLX4_CMD_SET_PORT        = 0xc,
+       MLX4_CMD_SET_NODE        = 0x5a,
        MLX4_CMD_ACCESS_DDR      = 0x2e,
        MLX4_CMD_MAP_ICM         = 0xffa,
        MLX4_CMD_UNMAP_ICM       = 0xff9,
@@ -141,6 +142,7 @@ enum {
        MLX4_SET_PORT_MAC_TABLE = 0x2,
        MLX4_SET_PORT_VLAN_TABLE = 0x3,
        MLX4_SET_PORT_PRIO_MAP  = 0x4,
+       MLX4_SET_PORT_GID_TABLE = 0x5,
 };
 
 struct mlx4_dev;
index 7338654c02b4de5402cbba6c80a3f96106e3b717..a7b15bc7648e27744fea2fee5de8cefb83f99340 100644 (file)
@@ -67,7 +67,8 @@ enum {
        MLX4_DEV_CAP_FLAG_ATOMIC        = 1 << 18,
        MLX4_DEV_CAP_FLAG_RAW_MCAST     = 1 << 19,
        MLX4_DEV_CAP_FLAG_UD_AV_PORT    = 1 << 20,
-       MLX4_DEV_CAP_FLAG_UD_MCAST      = 1 << 21
+       MLX4_DEV_CAP_FLAG_UD_MCAST      = 1 << 21,
+       MLX4_DEV_CAP_FLAG_IBOE          = 1 << 30
 };
 
 enum {
@@ -171,6 +172,10 @@ enum {
        MLX4_NUM_FEXCH          = 64 * 1024,
 };
 
+enum {
+       MLX4_MAX_FAST_REG_PAGES = 511,
+};
+
 static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
 {
        return (major << 32) | (minor << 16) | subminor;
@@ -379,6 +384,27 @@ struct mlx4_av {
        u8                      dgid[16];
 };
 
+struct mlx4_eth_av {
+       __be32          port_pd;
+       u8              reserved1;
+       u8              smac_idx;
+       u16             reserved2;
+       u8              reserved3;
+       u8              gid_index;
+       u8              stat_rate;
+       u8              hop_limit;
+       __be32          sl_tclass_flowlabel;
+       u8              dgid[16];
+       u32             reserved4[2];
+       __be16          vlan;
+       u8              mac[6];
+};
+
+union mlx4_ext_av {
+       struct mlx4_av          ib;
+       struct mlx4_eth_av      eth;
+};
+
 struct mlx4_dev {
        struct pci_dev         *pdev;
        unsigned long           flags;
@@ -407,6 +433,12 @@ struct mlx4_init_port_param {
                if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \
                     ~(dev)->caps.port_mask) & 1 << ((port) - 1))
 
+#define mlx4_foreach_ib_transport_port(port, dev)                      \
+       for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)     \
+               if (((dev)->caps.port_mask & 1 << ((port) - 1)) ||      \
+                   ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
+
+
 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
                   struct mlx4_buf *buf);
 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
@@ -474,6 +506,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index);
 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index);
 
+int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
 
index 53c5fdb6eac48cae01caa030700d3817538de7f0..f407cd4bfb341d7a2ac0a6f624130fa8430f6d51 100644 (file)
@@ -44,15 +44,24 @@ enum mlx4_dev_event {
        MLX4_DEV_EVENT_PORT_REINIT,
 };
 
+enum mlx4_protocol {
+       MLX4_PROTOCOL_IB,
+       MLX4_PROTOCOL_EN,
+};
+
 struct mlx4_interface {
        void *                  (*add)   (struct mlx4_dev *dev);
        void                    (*remove)(struct mlx4_dev *dev, void *context);
        void                    (*event) (struct mlx4_dev *dev, void *context,
                                          enum mlx4_dev_event event, int port);
+       void *                  (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
        struct list_head        list;
+       enum mlx4_protocol      protocol;
 };
 
 int mlx4_register_interface(struct mlx4_interface *intf);
 void mlx4_unregister_interface(struct mlx4_interface *intf);
 
+void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
+
 #endif /* MLX4_DRIVER_H */
index 7abe64326f7233bef36c31d3379ff9e8919d5dcb..0eeb2a1a867c97159f5a57c378dad3ad92c226d7 100644 (file)
@@ -109,10 +109,11 @@ struct mlx4_qp_path {
        __be32                  tclass_flowlabel;
        u8                      rgid[16];
        u8                      sched_queue;
-       u8                      snooper_flags;
+       u8                      vlan_index;
        u8                      reserved3[2];
        u8                      counter_index;
-       u8                      reserved4[7];
+       u8                      reserved4;
+       u8                      dmac[6];
 };
 
 struct mlx4_qp_context {
@@ -166,6 +167,7 @@ enum {
        MLX4_WQE_CTRL_TCP_UDP_CSUM      = 1 << 5,
        MLX4_WQE_CTRL_INS_VLAN          = 1 << 6,
        MLX4_WQE_CTRL_STRONG_ORDER      = 1 << 7,
+       MLX4_WQE_CTRL_FORCE_LOOPBACK    = 1 << 0,
 };
 
 struct mlx4_wqe_ctrl_seg {
@@ -219,7 +221,8 @@ struct mlx4_wqe_datagram_seg {
        __be32                  av[8];
        __be32                  dqpn;
        __be32                  qkey;
-       __be32                  reservd[2];
+       __be16                  vlan;
+       u8                      mac[6];
 };
 
 struct mlx4_wqe_lso_seg {
index a4c66846fb8f9547f71820083ac03a3fa468880c..721f451c3029bb756324458f93eccf107778d746 100644 (file)
@@ -144,6 +144,7 @@ extern pgprot_t protection_map[16];
 #define FAULT_FLAG_WRITE       0x01    /* Fault was a write access */
 #define FAULT_FLAG_NONLINEAR   0x02    /* Fault was via a nonlinear mapping */
 #define FAULT_FLAG_MKWRITE     0x04    /* Fault was mkwrite of existing pte */
+#define FAULT_FLAG_ALLOW_RETRY 0x08    /* Retry fault if blocking */
 
 /*
  * This interface is used by x86 PAT code to identify a pfn mapping that is
@@ -497,8 +498,8 @@ static inline void set_compound_order(struct page *page, unsigned long order)
 #define NODES_PGSHIFT          (NODES_PGOFF * (NODES_WIDTH != 0))
 #define ZONES_PGSHIFT          (ZONES_PGOFF * (ZONES_WIDTH != 0))
 
-/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allcator */
-#ifdef NODE_NOT_IN_PAGEFLAGS
+/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
+#ifdef NODE_NOT_IN_PAGE_FLAGS
 #define ZONEID_SHIFT           (SECTIONS_SHIFT + ZONES_SHIFT)
 #define ZONEID_PGOFF           ((SECTIONS_PGOFF < ZONES_PGOFF)? \
                                                SECTIONS_PGOFF : ZONES_PGOFF)
@@ -723,6 +724,7 @@ static inline int page_mapped(struct page *page)
 
 #define VM_FAULT_NOPAGE        0x0100  /* ->fault installed the pte, not return page */
 #define VM_FAULT_LOCKED        0x0200  /* ->fault locked the returned page */
+#define VM_FAULT_RETRY 0x0400  /* ->fault blocked, must retry */
 
 #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
 
@@ -868,6 +870,7 @@ int __set_page_dirty_no_writeback(struct page *page);
 int redirty_page_for_writepage(struct writeback_control *wbc,
                                struct page *page);
 void account_page_dirtied(struct page *page, struct address_space *mapping);
+void account_page_writeback(struct page *page);
 int set_page_dirty(struct page *page);
 int set_page_dirty_lock(struct page *page);
 int clear_page_dirty_for_io(struct page *page);
@@ -1031,7 +1034,15 @@ extern void unregister_shrinker(struct shrinker *);
 
 int vma_wants_writenotify(struct vm_area_struct *vma);
 
-extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
+extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
+                              spinlock_t **ptl);
+static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
+                                   spinlock_t **ptl)
+{
+       pte_t *ptep;
+       __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
+       return ptep;
+}
 
 #ifdef __PAGETABLE_PUD_FOLDED
 static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
index cb57d657ce4d2643c58e21eb23b2b1434f0f7a2f..bb7288a782fde2ff1d19978a7b9cb387476575ba 100644 (file)
@@ -310,6 +310,8 @@ struct mm_struct {
 #ifdef CONFIG_MMU_NOTIFIER
        struct mmu_notifier_mm *mmu_notifier_mm;
 #endif
+       /* How many tasks sharing this mm are OOM_DISABLE */
+       atomic_t oom_disable_count;
 };
 
 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
index 3984c4eb41fdc9c85759dbffe308df5a806391f4..39c24ebe9cfd4e75b8841deec06aa6d78c91c2ad 100644 (file)
@@ -104,6 +104,8 @@ enum zone_stat_item {
        NR_ISOLATED_ANON,       /* Temporary isolated pages from anon lru */
        NR_ISOLATED_FILE,       /* Temporary isolated pages from file lru */
        NR_SHMEM,               /* shmem pages (included tmpfs/GEM pages) */
+       NR_DIRTIED,             /* page dirtyings since bootup */
+       NR_WRITTEN,             /* page writings since bootup */
 #ifdef CONFIG_NUMA
        NUMA_HIT,               /* allocated in intended node */
        NUMA_MISS,              /* allocated in non intended node */
@@ -421,6 +423,9 @@ struct zone {
 typedef enum {
        ZONE_RECLAIM_LOCKED,            /* prevents concurrent reclaim */
        ZONE_OOM_LOCKED,                /* zone is in OOM killer zonelist */
+       ZONE_CONGESTED,                 /* zone has many dirty pages backed by
+                                        * a congested BDI
+                                        */
 } zone_flags_t;
 
 static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
@@ -438,6 +443,11 @@ static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
        clear_bit(flag, &zone->flags);
 }
 
+static inline int zone_is_reclaim_congested(const struct zone *zone)
+{
+       return test_bit(ZONE_CONGESTED, &zone->flags);
+}
+
 static inline int zone_is_reclaim_locked(const struct zone *zone)
 {
        return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
index 9d2f1837b3d8615e64395157c06f993dff16468e..112adf8bd47dd2c1d13800577f340d6cfa9c0b46 100644 (file)
@@ -21,8 +21,8 @@
 #define __module_cat(a,b) ___module_cat(a,b)
 #define __MODULE_INFO(tag, name, info)                                   \
 static const char __module_cat(name,__LINE__)[]                                  \
-  __used                                                                 \
-  __attribute__((section(".modinfo"),unused)) = __stringify(tag) "=" info
+  __used __attribute__((section(".modinfo"), unused, aligned(1)))        \
+  = __stringify(tag) "=" info
 #else  /* !MODULE */
 #define __MODULE_INFO(tag, name, info)
 #endif
index e8c06122be36058f88cb6e291360eb2c0ebe09a0..19ef95d293aecbc9f23e48b0235ef47625e70536 100644 (file)
@@ -67,7 +67,8 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
 
 #define get_pageblock_flags(page) \
                        get_pageblock_flags_group(page, 0, NR_PAGEBLOCK_BITS-1)
-#define set_pageblock_flags(page) \
-                       set_pageblock_flags_group(page, 0, NR_PAGEBLOCK_BITS-1)
+#define set_pageblock_flags(page, flags) \
+                       set_pageblock_flags_group(page, flags,  \
+                                                 0, NR_PAGEBLOCK_BITS-1)
 
 #endif /* PAGEBLOCK_FLAGS_H */
index e12cdc6d79ee79e80ddbcf6709e97a403a4746f7..2d1ffe3cf1ee6ae4d8f443ed5db16044244c413e 100644 (file)
@@ -299,6 +299,8 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
 extern void __lock_page(struct page *page);
 extern int __lock_page_killable(struct page *page);
 extern void __lock_page_nosync(struct page *page);
+extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
+                               unsigned int flags);
 extern void unlock_page(struct page *page);
 
 static inline void __set_page_locked(struct page *page)
@@ -350,6 +352,17 @@ static inline void lock_page_nosync(struct page *page)
                __lock_page_nosync(page);
 }
        
+/*
+ * lock_page_or_retry - Lock the page, unless this would block and the
+ * caller indicated that it can handle a retry.
+ */
+static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
+                                    unsigned int flags)
+{
+       might_sleep();
+       return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
+}
+
 /*
  * This is exported only for wait_on_page_locked/wait_on_page_writeback.
  * Never use this directly!
index 8f69d09a41a50265977c4e89c2ad13672d98a816..03ff67b0cdf5403deab6f69924c3f9a419e644db 100644 (file)
@@ -36,6 +36,8 @@ static inline void ratelimit_state_init(struct ratelimit_state *rs,
        rs->begin = 0;
 }
 
+extern struct ratelimit_state printk_ratelimit_state;
+
 extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
 #define __ratelimit(state) ___ratelimit(state, __func__)
 
index 91a4177e60ce3779e21c7a3fd8bfbc08858d43f3..5ca47e59b7278ffbb0dd4fc624d9e4cf1697d0ae 100644 (file)
@@ -2072,6 +2072,8 @@ void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode);
 void i_attrs_to_sd_attrs(struct inode *inode, __u16 * sd_attrs);
 int reiserfs_setattr(struct dentry *dentry, struct iattr *attr);
 
+int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len);
+
 /* namei.c */
 void set_de_name_and_namelen(struct reiserfs_dir_entry *de);
 int search_by_entry_key(struct super_block *sb, const struct cpu_key *key,
index 31b2fd75dcbae59d3ed633253c1f5cd3850c201a..bb83c0da207197122c9ef3988cf0b3d843404cd3 100644 (file)
@@ -25,8 +25,8 @@
  * pointing to this anon_vma once its vma list is empty.
  */
 struct anon_vma {
-       spinlock_t lock;        /* Serialize access to vma list */
        struct anon_vma *root;  /* Root of this anon_vma tree */
+       spinlock_t lock;        /* Serialize access to vma list */
 #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
 
        /*
@@ -205,9 +205,20 @@ int try_to_unmap_one(struct page *, struct vm_area_struct *,
 /*
  * Called from mm/filemap_xip.c to unmap empty zero page
  */
-pte_t *page_check_address(struct page *, struct mm_struct *,
+pte_t *__page_check_address(struct page *, struct mm_struct *,
                                unsigned long, spinlock_t **, int);
 
+static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
+                                       unsigned long address,
+                                       spinlock_t **ptlp, int sync)
+{
+       pte_t *ptep;
+
+       __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
+                                                      ptlp, sync));
+       return ptep;
+}
+
 /*
  * Used by swapoff to help locate where page is expected in vma.
  */
@@ -230,7 +241,20 @@ int try_to_munlock(struct page *);
 /*
  * Called by memory-failure.c to kill processes.
  */
-struct anon_vma *page_lock_anon_vma(struct page *page);
+struct anon_vma *__page_lock_anon_vma(struct page *page);
+
+static inline struct anon_vma *page_lock_anon_vma(struct page *page)
+{
+       struct anon_vma *anon_vma;
+
+       __cond_lock(RCU, anon_vma = __page_lock_anon_vma(page));
+
+       /* (void) is needed to make gcc happy */
+       (void) __cond_lock(&anon_vma->root->lock, anon_vma);
+
+       return anon_vma;
+}
+
 void page_unlock_anon_vma(struct anon_vma *anon_vma);
 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
 
index 56154bbb8da9cf012f990d31448f741538201bcb..393ce94e54b77155227f5df48cf7e81e6b9ec09f 100644 (file)
@@ -1706,7 +1706,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
 #define PF_DUMPCORE    0x00000200      /* dumped core */
 #define PF_SIGNALED    0x00000400      /* killed by a signal */
 #define PF_MEMALLOC    0x00000800      /* Allocating memory */
-#define PF_FLUSHER     0x00001000      /* responsible for disk writeback */
 #define PF_USED_MATH   0x00002000      /* if unset the fpu must be initialized before use */
 #define PF_FREEZING    0x00004000      /* freeze in progress. do not account to load */
 #define PF_NOFREEZE    0x00008000      /* this thread should not be frozen */
index 0299b4ce63dbd4b96faf4265043a75ccfacc6183..7f770c638e99d670840ed99856dce0b07a660f8b 100644 (file)
@@ -70,9 +70,6 @@
 #define SFI_SIG_APIC           "APIC"
 #define SFI_SIG_XSDT           "XSDT"
 #define SFI_SIG_WAKE           "WAKE"
-#define SFI_SIG_SPIB           "SPIB"
-#define SFI_SIG_I2CB           "I2CB"
-#define SFI_SIG_GPEM           "GPEM"
 #define SFI_SIG_DEVS           "DEVS"
 #define SFI_SIG_GPIO           "GPIO"
 
@@ -168,27 +165,6 @@ struct sfi_gpio_table_entry {
        char    pin_name[16];
 } __packed;
 
-struct sfi_spi_table_entry {
-       u16     host_num;       /* attached to host 0, 1...*/
-       u16     cs;             /* chip select */
-       u16     irq_info;
-       char    name[16];
-       u8      dev_info[10];
-} __packed;
-
-struct sfi_i2c_table_entry {
-       u16     host_num;
-       u16     addr;           /* slave addr */
-       u16     irq_info;
-       char    name[16];
-       u8      dev_info[10];
-} __packed;
-
-struct sfi_gpe_table_entry {
-       u16     logical_id;     /* logical id */
-       u16     phys_id;        /* physical GPE id */
-} __packed;
-
 typedef int (*sfi_table_handler) (struct sfi_table_header *table);
 
 #ifdef CONFIG_SFI
index 7cdd63366f883a164a7f5d5b74ff8882c62b4f8c..eba53e71d2ccfa5f678cc8063dd55c0d56ef1dec 100644 (file)
@@ -271,8 +271,18 @@ extern void scan_mapping_unevictable_pages(struct address_space *);
 extern unsigned long scan_unevictable_pages;
 extern int scan_unevictable_handler(struct ctl_table *, int,
                                        void __user *, size_t *, loff_t *);
+#ifdef CONFIG_NUMA
 extern int scan_unevictable_register_node(struct node *node);
 extern void scan_unevictable_unregister_node(struct node *node);
+#else
+static inline int scan_unevictable_register_node(struct node *node)
+{
+       return 0;
+}
+static inline void scan_unevictable_unregister_node(struct node *node)
+{
+}
+#endif
 
 extern int kswapd_run(int nid);
 extern void kswapd_stop(int nid);
index 357dbc19606f8920f0b060ad04fb1d4fb2dfca00..c2a9eb44f2fa974cc7ac15a35ae9c184c1e6773f 100644 (file)
@@ -121,15 +121,7 @@ typedef            __u64           u_int64_t;
 typedef                __s64           int64_t;
 #endif
 
-/*
- * aligned_u64 should be used in defining kernel<->userspace ABIs to avoid
- * common 32/64-bit compat problems.
- * 64-bit values align to 4-byte boundaries on x86_32 (and possibly other
- * architectures) and to 8-byte boundaries on 64-bit architetures.  The new
- * aligned_64 type enforces 8-byte alignment so that structs containing
- * aligned_64 values have the same alignment on 32-bit and 64-bit architectures.
- * No conversions are necessary between 32-bit user-space and a 64-bit kernel.
- */
+/* this is a special 64bit data type that is 8-byte aligned */
 #define aligned_u64 __u64 __attribute__((aligned(8)))
 #define aligned_be64 __be64 __attribute__((aligned(8)))
 #define aligned_le64 __le64 __attribute__((aligned(8)))
@@ -186,7 +178,15 @@ typedef __u64 __bitwise __be64;
 typedef __u16 __bitwise __sum16;
 typedef __u32 __bitwise __wsum;
 
-/* this is a special 64bit data type that is 8-byte aligned */
+/*
+ * aligned_u64 should be used in defining kernel<->userspace ABIs to avoid
+ * common 32/64-bit compat problems.
+ * 64-bit values align to 4-byte boundaries on x86_32 (and possibly other
+ * architectures) and to 8-byte boundaries on 64-bit architetures.  The new
+ * aligned_64 type enforces 8-byte alignment so that structs containing
+ * aligned_64 values have the same alignment on 32-bit and 64-bit architectures.
+ * No conversions are necessary between 32-bit user-space and a 64-bit kernel.
+ */
 #define __aligned_u64 __u64 __attribute__((aligned(8)))
 #define __aligned_be64 __be64 __attribute__((aligned(8)))
 #define __aligned_le64 __le64 __attribute__((aligned(8)))
index 63a4fe6d51bd68277634943cb062dc4ea62fced1..a03dcf62ca9d591d00290ced4394f7873b81329e 100644 (file)
@@ -53,8 +53,10 @@ static inline void vmalloc_init(void)
 #endif
 
 extern void *vmalloc(unsigned long size);
+extern void *vzalloc(unsigned long size);
 extern void *vmalloc_user(unsigned long size);
 extern void *vmalloc_node(unsigned long size, int node);
+extern void *vzalloc_node(unsigned long size, int node);
 extern void *vmalloc_exec(unsigned long size);
 extern void *vmalloc_32(unsigned long size);
 extern void *vmalloc_32_user(unsigned long size);
index 070bb7a8893646418b8b4ddcfbe9a3436e514711..0c0771f06bfa745e8e4e5add4ec4823cf52eb813 100644 (file)
@@ -190,7 +190,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
                __INIT_WORK((_work), (_func), 0);               \
        } while (0)
 
-#define INIT_WORK_ON_STACK(_work, _func)                       \
+#define INIT_WORK_ONSTACK(_work, _func)                                \
        do {                                                    \
                __INIT_WORK((_work), (_func), 1);               \
        } while (0)
@@ -201,9 +201,9 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
                init_timer(&(_work)->timer);                    \
        } while (0)
 
-#define INIT_DELAYED_WORK_ON_STACK(_work, _func)               \
+#define INIT_DELAYED_WORK_ONSTACK(_work, _func)                        \
        do {                                                    \
-               INIT_WORK_ON_STACK(&(_work)->work, (_func));    \
+               INIT_WORK_ONSTACK(&(_work)->work, (_func));     \
                init_timer_on_stack(&(_work)->timer);           \
        } while (0)
 
index 72a5d647a5f26d9eb3c798894443d4eb7849c0dd..d5c7aaadda59a926032794a4d3a27a46f01bab3c 100644 (file)
@@ -10,8 +10,6 @@
 struct backing_dev_info;
 
 extern spinlock_t inode_lock;
-extern struct list_head inode_in_use;
-extern struct list_head inode_unused;
 
 /*
  * fs/fs-writeback.c
@@ -149,6 +147,8 @@ int write_cache_pages(struct address_space *mapping,
 int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
 void set_page_dirty_balance(struct page *page, int page_mkwrite);
 void writeback_set_ratelimit(void);
+void tag_pages_for_writeback(struct address_space *mapping,
+                            pgoff_t start, pgoff_t end);
 
 /* pdflush.c */
 extern int nr_pdflush_threads; /* Global so it can be exported to sysctl
index fa0d52b8e622cd55c0cc1f8f58e0e9e53231c969..b5fc9f39122b65ab7a7fbd4911dd9e4ab6c98d8e 100644 (file)
@@ -39,7 +39,9 @@
 #include <linux/if_arp.h>
 #include <linux/netdevice.h>
 #include <linux/socket.h>
+#include <linux/if_vlan.h>
 #include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
 
 struct rdma_addr_client {
        atomic_t refcount;
@@ -63,6 +65,7 @@ struct rdma_dev_addr {
        unsigned char broadcast[MAX_ADDR_LEN];
        unsigned short dev_type;
        int bound_dev_if;
+       enum rdma_transport_type transport;
 };
 
 /**
@@ -127,9 +130,51 @@ static inline int rdma_addr_gid_offset(struct rdma_dev_addr *dev_addr)
        return dev_addr->dev_type == ARPHRD_INFINIBAND ? 4 : 0;
 }
 
+static inline void iboe_mac_vlan_to_ll(union ib_gid *gid, u8 *mac, u16 vid)
+{
+       memset(gid->raw, 0, 16);
+       *((__be32 *) gid->raw) = cpu_to_be32(0xfe800000);
+       if (vid < 0x1000) {
+               gid->raw[12] = vid & 0xff;
+               gid->raw[11] = vid >> 8;
+       } else {
+               gid->raw[12] = 0xfe;
+               gid->raw[11] = 0xff;
+       }
+       memcpy(gid->raw + 13, mac + 3, 3);
+       memcpy(gid->raw + 8, mac, 3);
+       gid->raw[8] ^= 2;
+}
+
+static inline u16 rdma_vlan_dev_vlan_id(const struct net_device *dev)
+{
+       return dev->priv_flags & IFF_802_1Q_VLAN ?
+               vlan_dev_vlan_id(dev) : 0xffff;
+}
+
+static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
+                                     union ib_gid *gid)
+{
+       struct net_device *dev;
+       u16 vid = 0xffff;
+
+       dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
+       if (dev) {
+               vid = rdma_vlan_dev_vlan_id(dev);
+               dev_put(dev);
+       }
+
+       iboe_mac_vlan_to_ll(gid, dev_addr->src_dev_addr, vid);
+}
+
 static inline void rdma_addr_get_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
 {
-       memcpy(gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof *gid);
+       if (dev_addr->transport == RDMA_TRANSPORT_IB &&
+           dev_addr->dev_type != ARPHRD_INFINIBAND)
+               iboe_addr_get_sgid(dev_addr, gid);
+       else
+               memcpy(gid, dev_addr->src_dev_addr +
+                      rdma_addr_gid_offset(dev_addr), sizeof *gid);
 }
 
 static inline void rdma_addr_set_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
@@ -147,4 +192,91 @@ static inline void rdma_addr_set_dgid(struct rdma_dev_addr *dev_addr, union ib_g
        memcpy(dev_addr->dst_dev_addr + rdma_addr_gid_offset(dev_addr), gid, sizeof *gid);
 }
 
+static inline enum ib_mtu iboe_get_mtu(int mtu)
+{
+       /*
+        * reduce IB headers from effective IBoE MTU. 28 stands for
+        * atomic header which is the biggest possible header after BTH
+        */
+       mtu = mtu - IB_GRH_BYTES - IB_BTH_BYTES - 28;
+
+       if (mtu >= ib_mtu_enum_to_int(IB_MTU_4096))
+               return IB_MTU_4096;
+       else if (mtu >= ib_mtu_enum_to_int(IB_MTU_2048))
+               return IB_MTU_2048;
+       else if (mtu >= ib_mtu_enum_to_int(IB_MTU_1024))
+               return IB_MTU_1024;
+       else if (mtu >= ib_mtu_enum_to_int(IB_MTU_512))
+               return IB_MTU_512;
+       else if (mtu >= ib_mtu_enum_to_int(IB_MTU_256))
+               return IB_MTU_256;
+       else
+               return 0;
+}
+
+static inline int iboe_get_rate(struct net_device *dev)
+{
+       struct ethtool_cmd cmd;
+
+       if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings ||
+           dev->ethtool_ops->get_settings(dev, &cmd))
+               return IB_RATE_PORT_CURRENT;
+
+       if (cmd.speed >= 40000)
+               return IB_RATE_40_GBPS;
+       else if (cmd.speed >= 30000)
+               return IB_RATE_30_GBPS;
+       else if (cmd.speed >= 20000)
+               return IB_RATE_20_GBPS;
+       else if (cmd.speed >= 10000)
+               return IB_RATE_10_GBPS;
+       else
+               return IB_RATE_PORT_CURRENT;
+}
+
+static inline int rdma_link_local_addr(struct in6_addr *addr)
+{
+       if (addr->s6_addr32[0] == htonl(0xfe800000) &&
+           addr->s6_addr32[1] == 0)
+               return 1;
+
+       return 0;
+}
+
+static inline void rdma_get_ll_mac(struct in6_addr *addr, u8 *mac)
+{
+       memcpy(mac, &addr->s6_addr[8], 3);
+       memcpy(mac + 3, &addr->s6_addr[13], 3);
+       mac[0] ^= 2;
+}
+
+static inline int rdma_is_multicast_addr(struct in6_addr *addr)
+{
+       return addr->s6_addr[0] == 0xff;
+}
+
+static inline void rdma_get_mcast_mac(struct in6_addr *addr, u8 *mac)
+{
+       int i;
+
+       mac[0] = 0x33;
+       mac[1] = 0x33;
+       for (i = 2; i < 6; ++i)
+               mac[i] = addr->s6_addr[i + 10];
+}
+
+static inline u16 rdma_get_vlan_id(union ib_gid *dgid)
+{
+       u16 vid;
+
+       vid = dgid->raw[11] << 8 | dgid->raw[12];
+       return vid < 0x1000 ? vid : 0xffff;
+}
+
+static inline struct net_device *rdma_vlan_dev_real_dev(const struct net_device *dev)
+{
+       return dev->priv_flags & IFF_802_1Q_VLAN ?
+               vlan_dev_real_dev(dev) : 0;
+}
+
 #endif /* IB_ADDR_H */
index cbb50f4da3dd92df64f7af53ebca0e0a79f6e174..b37fe3b10a9da4f397d272a54b6c036045f019c7 100644 (file)
@@ -37,6 +37,8 @@
 
 enum {
        IB_LRH_BYTES  = 8,
+       IB_ETH_BYTES  = 14,
+       IB_VLAN_BYTES = 4,
        IB_GRH_BYTES  = 40,
        IB_BTH_BYTES  = 12,
        IB_DETH_BYTES = 8
@@ -210,14 +212,32 @@ struct ib_unpacked_deth {
        __be32       source_qpn;
 };
 
+struct ib_unpacked_eth {
+       u8      dmac_h[4];
+       u8      dmac_l[2];
+       u8      smac_h[2];
+       u8      smac_l[4];
+       __be16  type;
+};
+
+struct ib_unpacked_vlan {
+       __be16  tag;
+       __be16  type;
+};
+
 struct ib_ud_header {
+       int                     lrh_present;
        struct ib_unpacked_lrh  lrh;
-       int                     grh_present;
-       struct ib_unpacked_grh  grh;
-       struct ib_unpacked_bth  bth;
+       int                     eth_present;
+       struct ib_unpacked_eth  eth;
+       int                     vlan_present;
+       struct ib_unpacked_vlan vlan;
+       int                     grh_present;
+       struct ib_unpacked_grh  grh;
+       struct ib_unpacked_bth  bth;
        struct ib_unpacked_deth deth;
-       int                     immediate_present;
-       __be32                  immediate_data;
+       int                     immediate_present;
+       __be32                  immediate_data;
 };
 
 void ib_pack(const struct ib_field        *desc,
@@ -230,9 +250,12 @@ void ib_unpack(const struct ib_field        *desc,
               void                         *buf,
               void                         *structure);
 
-void ib_ud_header_init(int                        payload_bytes,
-                      int                 grh_present,
-                      int                 immediate_present,
+void ib_ud_header_init(int                 payload_bytes,
+                      int                  lrh_present,
+                      int                  eth_present,
+                      int                  vlan_present,
+                      int                  grh_present,
+                      int                  immediate_present,
                       struct ib_ud_header *header);
 
 int ib_ud_header_pack(struct ib_ud_header *header,
index a17f77106149bb8ee63f02a8bdddf79f72a2c928..fe5b05177a2cb4fe9195cb2b95e4f1aca0242759 100644 (file)
@@ -205,7 +205,8 @@ struct ib_uverbs_query_port_resp {
        __u8  active_width;
        __u8  active_speed;
        __u8  phys_state;
-       __u8  reserved[3];
+       __u8  link_layer;
+       __u8  reserved[2];
 };
 
 struct ib_uverbs_alloc_pd {
index 857b3b9cf120a4bcac7a19607f8b26ce14f23cae..e04c4888d1fdca554edf5dcb6d37f16b15e47d02 100644 (file)
@@ -75,6 +75,12 @@ enum rdma_transport_type {
 enum rdma_transport_type
 rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
 
+enum rdma_link_layer {
+       IB_LINK_LAYER_UNSPECIFIED,
+       IB_LINK_LAYER_INFINIBAND,
+       IB_LINK_LAYER_ETHERNET,
+};
+
 enum ib_device_cap_flags {
        IB_DEVICE_RESIZE_MAX_WR         = 1,
        IB_DEVICE_BAD_PKEY_CNTR         = (1<<1),
@@ -1010,6 +1016,8 @@ struct ib_device {
        int                        (*query_port)(struct ib_device *device,
                                                 u8 port_num,
                                                 struct ib_port_attr *port_attr);
+       enum rdma_link_layer       (*get_link_layer)(struct ib_device *device,
+                                                    u8 port_num);
        int                        (*query_gid)(struct ib_device *device,
                                                u8 port_num, int index,
                                                union ib_gid *gid);
@@ -1222,6 +1230,9 @@ int ib_query_device(struct ib_device *device,
 int ib_query_port(struct ib_device *device,
                  u8 port_num, struct ib_port_attr *port_attr);
 
+enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
+                                              u8 port_num);
+
 int ib_query_gid(struct ib_device *device,
                 u8 port_num, int index, union ib_gid *gid);
 
index ad178fa78f665a69fba3b4d26e66c9aee9443d75..1ae84db4c9fb6525b4937276e42f220dbe41d5e7 100644 (file)
@@ -239,4 +239,42 @@ struct srp_rsp {
        u8      data[0];
 } __attribute__((packed));
 
+struct srp_cred_req {
+       u8      opcode;
+       u8      sol_not;
+       u8      reserved[2];
+       __be32  req_lim_delta;
+       u64     tag;
+};
+
+struct srp_cred_rsp {
+       u8      opcode;
+       u8      reserved[7];
+       u64     tag;
+};
+
+/*
+ * The SRP spec defines the fixed portion of the AER_REQ structure to be
+ * 36 bytes, so it needs to be packed to avoid having it padded to 40 bytes
+ * on 64-bit architectures.
+ */
+struct srp_aer_req {
+       u8      opcode;
+       u8      sol_not;
+       u8      reserved[2];
+       __be32  req_lim_delta;
+       u64     tag;
+       u32     reserved2;
+       __be64  lun;
+       __be32  sense_data_len;
+       u32     reserved3;
+       u8      sense_data[0];
+} __attribute__((packed));
+
+struct srp_aer_rsp {
+       u8      opcode;
+       u8      reserved[7];
+       u64     tag;
+};
+
 #endif /* SCSI_SRP_H */
index 01e9e0076a92c68682fa513e1f4a862e291fb9ae..6bcb00645de47ffb9262c1c366fa14e82291e887 100644 (file)
@@ -242,18 +242,20 @@ TRACE_EVENT(ext4_da_writepages,
                __entry->pages_skipped  = wbc->pages_skipped;
                __entry->range_start    = wbc->range_start;
                __entry->range_end      = wbc->range_end;
-               __entry->nonblocking    = wbc->nonblocking;
                __entry->for_kupdate    = wbc->for_kupdate;
                __entry->for_reclaim    = wbc->for_reclaim;
                __entry->range_cyclic   = wbc->range_cyclic;
                __entry->writeback_index = inode->i_mapping->writeback_index;
        ),
 
-       TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d writeback_index %lu",
+       TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld "
+                 "range_start %llu range_end %llu "
+                 "for_kupdate %d for_reclaim %d "
+                 "range_cyclic %d writeback_index %lu",
                  jbd2_dev_to_name(__entry->dev),
                  (unsigned long) __entry->ino, __entry->nr_to_write,
                  __entry->pages_skipped, __entry->range_start,
-                 __entry->range_end, __entry->nonblocking,
+                 __entry->range_end,
                  __entry->for_kupdate, __entry->for_reclaim,
                  __entry->range_cyclic,
                  (unsigned long) __entry->writeback_index)
index 370aa5a87322fa686f3f6b59474e67bf9cf94567..c255fcc587bfe7d3acca8e6bf8d7642bc5f7ce24 100644 (file)
@@ -10,6 +10,7 @@
 
 #define RECLAIM_WB_ANON                0x0001u
 #define RECLAIM_WB_FILE                0x0002u
+#define RECLAIM_WB_MIXED       0x0010u
 #define RECLAIM_WB_SYNC                0x0004u
 #define RECLAIM_WB_ASYNC       0x0008u
 
        (flags) ? __print_flags(flags, "|",                     \
                {RECLAIM_WB_ANON,       "RECLAIM_WB_ANON"},     \
                {RECLAIM_WB_FILE,       "RECLAIM_WB_FILE"},     \
+               {RECLAIM_WB_MIXED,      "RECLAIM_WB_MIXED"},    \
                {RECLAIM_WB_SYNC,       "RECLAIM_WB_SYNC"},     \
                {RECLAIM_WB_ASYNC,      "RECLAIM_WB_ASYNC"}     \
                ) : "RECLAIM_WB_NONE"
 
 #define trace_reclaim_flags(page, sync) ( \
        (page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
-       (sync == PAGEOUT_IO_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC)   \
+       (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC)   \
+       )
+
+#define trace_shrink_flags(file, sync) ( \
+       (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_MIXED : \
+                       (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) |  \
+       (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
        )
 
 TRACE_EVENT(mm_vmscan_kswapd_sleep,
@@ -269,6 +277,40 @@ TRACE_EVENT(mm_vmscan_writepage,
                show_reclaim_flags(__entry->reclaim_flags))
 );
 
+TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
+
+       TP_PROTO(int nid, int zid,
+                       unsigned long nr_scanned, unsigned long nr_reclaimed,
+                       int priority, int reclaim_flags),
+
+       TP_ARGS(nid, zid, nr_scanned, nr_reclaimed, priority, reclaim_flags),
+
+       TP_STRUCT__entry(
+               __field(int, nid)
+               __field(int, zid)
+               __field(unsigned long, nr_scanned)
+               __field(unsigned long, nr_reclaimed)
+               __field(int, priority)
+               __field(int, reclaim_flags)
+       ),
+
+       TP_fast_assign(
+               __entry->nid = nid;
+               __entry->zid = zid;
+               __entry->nr_scanned = nr_scanned;
+               __entry->nr_reclaimed = nr_reclaimed;
+               __entry->priority = priority;
+               __entry->reclaim_flags = reclaim_flags;
+       ),
+
+       TP_printk("nid=%d zid=%d nr_scanned=%ld nr_reclaimed=%ld priority=%d flags=%s",
+               __entry->nid, __entry->zid,
+               __entry->nr_scanned, __entry->nr_reclaimed,
+               __entry->priority,
+               show_reclaim_flags(__entry->reclaim_flags))
+);
+
+
 #endif /* _TRACE_VMSCAN_H */
 
 /* This part must be outside protection */
index f345f66ae9d100d755c4e4a1afb3f4b2310148da..89a2b2db43751686129d90f8d58fd3c08787726b 100644 (file)
@@ -96,8 +96,6 @@ DECLARE_EVENT_CLASS(wbc_class,
                __field(long, nr_to_write)
                __field(long, pages_skipped)
                __field(int, sync_mode)
-               __field(int, nonblocking)
-               __field(int, encountered_congestion)
                __field(int, for_kupdate)
                __field(int, for_background)
                __field(int, for_reclaim)
@@ -153,6 +151,41 @@ DEFINE_WBC_EVENT(wbc_balance_dirty_written);
 DEFINE_WBC_EVENT(wbc_balance_dirty_wait);
 DEFINE_WBC_EVENT(wbc_writepage);
 
+DECLARE_EVENT_CLASS(writeback_congest_waited_template,
+
+       TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
+
+       TP_ARGS(usec_timeout, usec_delayed),
+
+       TP_STRUCT__entry(
+               __field(        unsigned int,   usec_timeout    )
+               __field(        unsigned int,   usec_delayed    )
+       ),
+
+       TP_fast_assign(
+               __entry->usec_timeout   = usec_timeout;
+               __entry->usec_delayed   = usec_delayed;
+       ),
+
+       TP_printk("usec_timeout=%u usec_delayed=%u",
+                       __entry->usec_timeout,
+                       __entry->usec_delayed)
+);
+
+DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
+
+       TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
+
+       TP_ARGS(usec_timeout, usec_delayed)
+);
+
+DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
+
+       TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
+
+       TP_ARGS(usec_timeout, usec_delayed)
+);
+
 #endif /* _TRACE_WRITEBACK_H */
 
 /* This part must be outside protection */
index 4e65c16a445b06f573afc055e92c7d4b9280d907..84ad8f02fee5e5904c31c21915db049de7c3b2b3 100644 (file)
@@ -1 +1,2 @@
 header-y += evtchn.h
+header-y += privcmd.h
index d3938d3e71f822c01956cdb6fbe93bc90cfb1363..d7a6c13bde69cf505bef40027bcd555a4463d57f 100644 (file)
@@ -186,6 +186,35 @@ struct xen_translate_gpfn_list {
 };
 DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list);
 
+/*
+ * Returns the pseudo-physical memory map as it was when the domain
+ * was started (specified by XENMEM_set_memory_map).
+ * arg == addr of struct xen_memory_map.
+ */
+#define XENMEM_memory_map           9
+struct xen_memory_map {
+    /*
+     * On call the number of entries which can be stored in buffer. On
+     * return the number of entries which have been stored in
+     * buffer.
+     */
+    unsigned int nr_entries;
+
+    /*
+     * Entries in the buffer are in the same format as returned by the
+     * BIOS INT 0x15 EAX=0xE820 call.
+     */
+    GUEST_HANDLE(void) buffer;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_memory_map);
+
+/*
+ * Returns the real physical memory map. Passes the same structure as
+ * XENMEM_memory_map.
+ * arg == addr of struct xen_memory_map.
+ */
+#define XENMEM_machine_memory_map   10
+
 
 /*
  * Prevent the balloon driver from changing the memory reservation
diff --git a/include/xen/privcmd.h b/include/xen/privcmd.h
new file mode 100644 (file)
index 0000000..b42cdfd
--- /dev/null
@@ -0,0 +1,80 @@
+/******************************************************************************
+ * privcmd.h
+ *
+ * Interface to /proc/xen/privcmd.
+ *
+ * Copyright (c) 2003-2005, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __LINUX_PUBLIC_PRIVCMD_H__
+#define __LINUX_PUBLIC_PRIVCMD_H__
+
+#include <linux/types.h>
+
+typedef unsigned long xen_pfn_t;
+
+#ifndef __user
+#define __user
+#endif
+
+struct privcmd_hypercall {
+       __u64 op;
+       __u64 arg[5];
+};
+
+struct privcmd_mmap_entry {
+       __u64 va;
+       __u64 mfn;
+       __u64 npages;
+};
+
+struct privcmd_mmap {
+       int num;
+       domid_t dom; /* target domain */
+       struct privcmd_mmap_entry __user *entry;
+};
+
+struct privcmd_mmapbatch {
+       int num;     /* number of pages to populate */
+       domid_t dom; /* target domain */
+       __u64 addr;  /* virtual address */
+       xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */
+};
+
+/*
+ * @cmd: IOCTL_PRIVCMD_HYPERCALL
+ * @arg: &privcmd_hypercall_t
+ * Return: Value returned from execution of the specified hypercall.
+ */
+#define IOCTL_PRIVCMD_HYPERCALL                                        \
+       _IOC(_IOC_NONE, 'P', 0, sizeof(struct privcmd_hypercall))
+#define IOCTL_PRIVCMD_MMAP                                     \
+       _IOC(_IOC_NONE, 'P', 2, sizeof(struct privcmd_mmap))
+#define IOCTL_PRIVCMD_MMAPBATCH                                        \
+       _IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch))
+
+#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
index 351f4051f6d856d455d5f3d60b75bf3570209d9a..98b92154a2645407808c74ba9326e735f84b75af 100644 (file)
@@ -23,4 +23,9 @@ int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
 
 void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order);
 
+int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+                              unsigned long addr,
+                              unsigned long mfn, int nr,
+                              pgprot_t prot, unsigned domid);
+
 #endif /* INCLUDE_XEN_OPS_H */
index 62a47eafa8e93684fce79f880a8cf030908040fb..830aaec9c7d5e0cb8df39af760791b5335772768 100644 (file)
@@ -291,7 +291,7 @@ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
        if (err)
                return err;
 
-       sys_chdir("/root");
+       sys_chdir((const char __user __force *)"/root");
        ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
        printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
               current->fs->pwd.mnt->mnt_sb->s_type->name,
@@ -488,5 +488,5 @@ void __init prepare_namespace(void)
 out:
        devtmpfs_mount("dev");
        sys_mount(".", "/", NULL, MS_MOVE, NULL);
-       sys_chroot(".");
+       sys_chroot((const char __user __force *)".");
 }
index 69aebbf8fd2dbf4ebeb301cf1fa590d3c864b9c9..32c4799b8c91bb483f418cd6a39e1e11eda0cd7a 100644 (file)
@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
 
        wait_for_device_probe();
 
-       fd = sys_open("/dev/md0", 0, 0);
+       fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
        if (fd >= 0) {
                sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
                sys_close(fd);
index bf3ef667bf3669d4332fbaa5fdd65784823c0f54..6e1ee6987c78dbca4cb3f01aaa031e4b3a42708f 100644 (file)
@@ -168,7 +168,7 @@ int __init rd_load_image(char *from)
        char rotator[4] = { '|' , '/' , '-' , '\\' };
 #endif
 
-       out_fd = sys_open("/dev/ram", O_RDWR, 0);
+       out_fd = sys_open((const char __user __force *) "/dev/ram", O_RDWR, 0);
        if (out_fd < 0)
                goto out;
 
@@ -267,7 +267,7 @@ noclose_input:
        sys_close(out_fd);
 out:
        kfree(buf);
-       sys_unlink("/dev/ram");
+       sys_unlink((const char __user __force *) "/dev/ram");
        return res;
 }
 
index 4b9c20205092e1f7de716633c0d51bd8746e74af..d9c6e782ff5333012ba0b527a084b1d14a35e624 100644 (file)
@@ -528,7 +528,7 @@ static void __init clean_rootfs(void)
        struct linux_dirent64 *dirp;
        int num;
 
-       fd = sys_open("/", O_RDONLY, 0);
+       fd = sys_open((const char __user __force *) "/", O_RDONLY, 0);
        WARN_ON(fd < 0);
        if (fd < 0)
                return;
@@ -590,7 +590,8 @@ static int __init populate_rootfs(void)
                }
                printk(KERN_INFO "rootfs image is not initramfs (%s)"
                                "; looks like an initrd\n", err);
-               fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 0700);
+               fd = sys_open((const char __user __force *) "/initrd.image",
+                             O_WRONLY|O_CREAT, 0700);
                if (fd >= 0) {
                        sys_write(fd, (char *)initrd_start,
                                        initrd_end - initrd_start);
index f4c1a3a1b8c52bd06dbdcadf6833b5328693b0e3..267739d851791b0a3e4526a3be5fb4944d8b507e 100644 (file)
@@ -29,17 +29,17 @@ static int __init default_rootfs(void)
 {
        int err;
 
-       err = sys_mkdir("/dev", 0755);
+       err = sys_mkdir((const char __user __force *) "/dev", 0755);
        if (err < 0)
                goto out;
 
-       err = sys_mknod((const char __user *) "/dev/console",
+       err = sys_mknod((const char __user __force *) "/dev/console",
                        S_IFCHR | S_IRUSR | S_IWUSR,
                        new_encode_dev(MKDEV(5, 1)));
        if (err < 0)
                goto out;
 
-       err = sys_mkdir("/root", 0700);
+       err = sys_mkdir((const char __user __force *) "/root", 0700);
        if (err < 0)
                goto out;
 
index e1e7b9635f5da7c986dfd18426bce94c177d6630..3a61ffefe88472af269e0bc218d6f8eb2959a1c9 100644 (file)
@@ -116,6 +116,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
 
        inode = new_inode(sb);
        if (inode) {
+               inode->i_ino = get_next_ino();
                inode->i_mode = mode;
                inode->i_uid = current_fsuid();
                inode->i_gid = current_fsgid();
@@ -769,7 +770,7 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
 
        inode = dentry->d_inode;
        if (inode)
-               atomic_inc(&inode->i_count);
+               ihold(inode);
        err = mnt_want_write(ipc_ns->mq_mnt);
        if (err)
                goto out_err;
index 7b69b8d0313d63a92bd4efaf4d07676456fe71fc..9270d532ec3c7c6a5be2cf3d1eca21788036b81f 100644 (file)
@@ -777,6 +777,7 @@ static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
        struct inode *inode = new_inode(sb);
 
        if (inode) {
+               inode->i_ino = get_next_ino();
                inode->i_mode = mode;
                inode->i_uid = current_fsuid();
                inode->i_gid = current_fsgid();
index e2bdf37f9fdea71a15acb3523ec20b63a6aa42d3..894179a32ec163fe3347f8614626ddf7a9946cb7 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/perf_event.h>
 #include <trace/events/sched.h>
 #include <linux/hw_breakpoint.h>
+#include <linux/oom.h>
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -687,6 +688,8 @@ static void exit_mm(struct task_struct * tsk)
        enter_lazy_tlb(mm, current);
        /* We don't want this task to be frozen prematurely */
        clear_freeze_flag(tsk);
+       if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
+               atomic_dec(&mm->oom_disable_count);
        task_unlock(tsk);
        mm_update_next_owner(mm);
        mmput(mm);
index c445f8cc408d777dd7a94aec3fa78c07e1d98b98..e87aaaaf5131de2bb924c963d618a52f5de01127 100644 (file)
@@ -65,6 +65,7 @@
 #include <linux/perf_event.h>
 #include <linux/posix-timers.h>
 #include <linux/user-return-notifier.h>
+#include <linux/oom.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -488,6 +489,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
        mm->cached_hole_size = ~0UL;
        mm_init_aio(mm);
        mm_init_owner(mm, p);
+       atomic_set(&mm->oom_disable_count, 0);
 
        if (likely(!mm_alloc_pgd(mm))) {
                mm->def_flags = 0;
@@ -741,6 +743,8 @@ good_mm:
        /* Initializing for Swap token stuff */
        mm->token_priority = 0;
        mm->last_interval = 0;
+       if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
+               atomic_inc(&mm->oom_disable_count);
 
        tsk->mm = mm;
        tsk->active_mm = mm;
@@ -1299,8 +1303,13 @@ bad_fork_cleanup_io:
 bad_fork_cleanup_namespaces:
        exit_task_namespaces(p);
 bad_fork_cleanup_mm:
-       if (p->mm)
+       if (p->mm) {
+               task_lock(p);
+               if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
+                       atomic_dec(&p->mm->oom_disable_count);
+               task_unlock(p);
                mmput(p->mm);
+       }
 bad_fork_cleanup_signal:
        if (!(clone_flags & CLONE_THREAD))
                free_signal_struct(p->signal);
@@ -1693,6 +1702,10 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
                        active_mm = current->active_mm;
                        current->mm = new_mm;
                        current->active_mm = new_mm;
+                       if (current->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
+                               atomic_dec(&mm->oom_disable_count);
+                               atomic_inc(&new_mm->oom_disable_count);
+                       }
                        activate_mm(active_mm, new_mm);
                        new_mm = mm;
                }
index a118bf160e0b05a4b24404beda19d07adead5161..6c683b37f2ce25291a5a0c8f490a16c08c84d674 100644 (file)
@@ -169,7 +169,7 @@ static void get_futex_key_refs(union futex_key *key)
 
        switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
        case FUT_OFF_INODE:
-               atomic_inc(&key->shared.inode->i_count);
+               ihold(key->shared.inode);
                break;
        case FUT_OFF_MMSHARED:
                atomic_inc(&key->private.mm->mm_count);
index c0613f7d673064b7bbe1afcf7477dd569310989c..b55045bc7563186a84ee04267c41011bc0f59cd2 100644 (file)
@@ -816,7 +816,7 @@ static int kimage_load_normal_segment(struct kimage *image,
 
                ptr = kmap(page);
                /* Start with a clear page */
-               memset(ptr, 0, PAGE_SIZE);
+               clear_page(ptr);
                ptr += maddr & ~PAGE_MASK;
                mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
                if (mchunk > mbytes)
index ac7eb109f19635d11930a9cef03ed4cd352cc235..0dac75ea4456f040b14ae18e3dbbe5ce8332a108 100644 (file)
@@ -984,8 +984,8 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
                src = kmap_atomic(s_page, KM_USER0);
                dst = kmap_atomic(d_page, KM_USER1);
                do_copy_page(dst, src);
-               kunmap_atomic(src, KM_USER0);
                kunmap_atomic(dst, KM_USER1);
+               kunmap_atomic(src, KM_USER0);
        } else {
                if (PageHighMem(d_page)) {
                        /* Page pointed to by src may contain some kernel
@@ -993,7 +993,7 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
                         */
                        safe_copy_page(buffer, s_page);
                        dst = kmap_atomic(d_page, KM_USER0);
-                       memcpy(dst, buffer, PAGE_SIZE);
+                       copy_page(dst, buffer);
                        kunmap_atomic(dst, KM_USER0);
                } else {
                        safe_copy_page(page_address(d_page), s_page);
@@ -1687,7 +1687,7 @@ int snapshot_read_next(struct snapshot_handle *handle)
                memory_bm_position_reset(&orig_bm);
                memory_bm_position_reset(&copy_bm);
        } else if (handle->cur <= nr_meta_pages) {
-               memset(buffer, 0, PAGE_SIZE);
+               clear_page(buffer);
                pack_pfns(buffer, &orig_bm);
        } else {
                struct page *page;
@@ -1701,7 +1701,7 @@ int snapshot_read_next(struct snapshot_handle *handle)
                        void *kaddr;
 
                        kaddr = kmap_atomic(page, KM_USER0);
-                       memcpy(buffer, kaddr, PAGE_SIZE);
+                       copy_page(buffer, kaddr);
                        kunmap_atomic(kaddr, KM_USER0);
                        handle->buffer = buffer;
                } else {
@@ -1984,7 +1984,7 @@ static void copy_last_highmem_page(void)
                void *dst;
 
                dst = kmap_atomic(last_highmem_page, KM_USER0);
-               memcpy(dst, buffer, PAGE_SIZE);
+               copy_page(dst, buffer);
                kunmap_atomic(dst, KM_USER0);
                last_highmem_page = NULL;
        }
@@ -2270,11 +2270,11 @@ swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
 
        kaddr1 = kmap_atomic(p1, KM_USER0);
        kaddr2 = kmap_atomic(p2, KM_USER1);
-       memcpy(buf, kaddr1, PAGE_SIZE);
-       memcpy(kaddr1, kaddr2, PAGE_SIZE);
-       memcpy(kaddr2, buf, PAGE_SIZE);
-       kunmap_atomic(kaddr1, KM_USER0);
+       copy_page(buf, kaddr1);
+       copy_page(kaddr1, kaddr2);
+       copy_page(kaddr2, buf);
        kunmap_atomic(kaddr2, KM_USER1);
+       kunmap_atomic(kaddr1, KM_USER0);
 }
 
 /**
index 916eaa79039968ff8412128858e9b8aeda951ac6..a0e4a86ccf94887a6f64ae985db131608def8952 100644 (file)
@@ -251,7 +251,7 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
        if (bio_chain) {
                src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
                if (src) {
-                       memcpy(src, buf, PAGE_SIZE);
+                       copy_page(src, buf);
                } else {
                        WARN_ON_ONCE(1);
                        bio_chain = NULL;       /* Go synchronous */
@@ -325,7 +325,7 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
                error = write_page(handle->cur, handle->cur_swap, NULL);
                if (error)
                        goto out;
-               memset(handle->cur, 0, PAGE_SIZE);
+               clear_page(handle->cur);
                handle->cur_swap = offset;
                handle->k = 0;
        }
@@ -910,7 +910,7 @@ int swsusp_check(void)
        hib_resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
        if (!IS_ERR(hib_resume_bdev)) {
                set_blocksize(hib_resume_bdev, PAGE_SIZE);
-               memset(swsusp_header, 0, PAGE_SIZE);
+               clear_page(swsusp_header);
                error = hib_bio_read_page(swsusp_resume_block,
                                        swsusp_header, NULL);
                if (error)
index 2531017795f63e7d9c60d1db976e2b227c98ffda..b2ebaee8c377d2aa92c073e772748242b5ce5c90 100644 (file)
@@ -210,7 +210,7 @@ __setup("log_buf_len=", log_buf_len_setup);
 
 #ifdef CONFIG_BOOT_PRINTK_DELAY
 
-static unsigned int boot_delay; /* msecs delay after each printk during bootup */
+static int boot_delay; /* msecs delay after each printk during bootup */
 static unsigned long long loops_per_msec;      /* based on boot_delay */
 
 static int __init boot_delay_setup(char *str)
@@ -647,6 +647,7 @@ static inline int can_use_console(unsigned int cpu)
  * released but interrupts still disabled.
  */
 static int acquire_console_semaphore_for_printk(unsigned int cpu)
+       __releases(&logbuf_lock)
 {
        int retval = 0;
 
@@ -1511,7 +1512,7 @@ int kmsg_dump_unregister(struct kmsg_dumper *dumper)
 }
 EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
 
-static const char const *kmsg_reasons[] = {
+static const char * const kmsg_reasons[] = {
        [KMSG_DUMP_OOPS]        = "oops",
        [KMSG_DUMP_PANIC]       = "panic",
        [KMSG_DUMP_KEXEC]       = "kexec",
index 090c28812ce101fbd055821fe12806265bd1ed9b..2df820b03beb17499afccd6c3acc59e08bfd0194 100644 (file)
@@ -262,7 +262,7 @@ repeat:
                cpu_stop_fn_t fn = work->fn;
                void *arg = work->arg;
                struct cpu_stop_done *done = work->done;
-               char ksym_buf[KSYM_NAME_LEN];
+               char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
 
                __set_current_state(TASK_RUNNING);
 
@@ -304,7 +304,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
                p = kthread_create(cpu_stopper_thread, stopper, "migration/%d",
                                   cpu);
                if (IS_ERR(p))
-                       return NOTIFY_BAD;
+                       return notifier_from_errno(PTR_ERR(p));
                get_task_struct(p);
                kthread_bind(p, cpu);
                sched_set_stop_task(cpu, p);
@@ -372,7 +372,7 @@ static int __init cpu_stop_init(void)
        /* start one for the boot cpu */
        err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE,
                                    bcpu);
-       BUG_ON(err == NOTIFY_BAD);
+       BUG_ON(err != NOTIFY_OK);
        cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu);
        register_cpu_notifier(&cpu_stop_cpu_notifier);
 
index 3a45c224770fb82fa4bd76f9c7d4f2f989ee5aa9..c33a1edb799fda6db2e16fdf9d1d0401a9f78278 100644 (file)
@@ -161,8 +161,6 @@ extern int no_unaligned_warning;
 extern int unaligned_dump_stack;
 #endif
 
-extern struct ratelimit_state printk_ratelimit_state;
-
 #ifdef CONFIG_PROC_SYSCTL
 static int proc_do_cad_pid(struct ctl_table *table, int write,
                  void __user *buffer, size_t *lenp, loff_t *ppos);
@@ -1340,28 +1338,28 @@ static struct ctl_table fs_table[] = {
                .data           = &inodes_stat,
                .maxlen         = 2*sizeof(int),
                .mode           = 0444,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_nr_inodes,
        },
        {
                .procname       = "inode-state",
                .data           = &inodes_stat,
                .maxlen         = 7*sizeof(int),
                .mode           = 0444,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_nr_inodes,
        },
        {
                .procname       = "file-nr",
                .data           = &files_stat,
-               .maxlen         = 3*sizeof(int),
+               .maxlen         = sizeof(files_stat),
                .mode           = 0444,
                .proc_handler   = proc_nr_files,
        },
        {
                .procname       = "file-max",
                .data           = &files_stat.max_files,
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(files_stat.max_files),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_doulongvec_minmax,
        },
        {
                .procname       = "nr_open",
@@ -1377,7 +1375,7 @@ static struct ctl_table fs_table[] = {
                .data           = &dentry_stat,
                .maxlen         = 6*sizeof(int),
                .mode           = 0444,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_nr_dentry,
        },
        {
                .procname       = "overflowuid",
index 7e72614b736d82dd108d99ab79daefa55e6cdfe3..2c7d8d5914b188be65c36a686a81fba7eed07d8c 100644 (file)
@@ -91,6 +91,7 @@ static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
  * upon function exit.
  */
 static void free_user(struct user_struct *up, unsigned long flags)
+       __releases(&uidhash_lock)
 {
        uid_hash_remove(up);
        spin_unlock_irqrestore(&uidhash_lock, flags);
index c4bd3d825f35c8c8dd7ed4831cd5fee5fe6a6b9a..b0310eb6cc1e85433a502e7c93c62651e216f47a 100644 (file)
@@ -92,7 +92,7 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
 }
 EXPORT_SYMBOL(prepare_to_wait_exclusive);
 
-/*
+/**
  * finish_wait - clean up after waiting in a queue
  * @q: waitqueue waited on
  * @wait: wait descriptor
@@ -127,11 +127,11 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
 }
 EXPORT_SYMBOL(finish_wait);
 
-/*
+/**
  * abort_exclusive_wait - abort exclusive waiting in a queue
  * @q: waitqueue waited on
  * @wait: wait descriptor
- * @state: runstate of the waiter to be woken
+ * @mode: runstate of the waiter to be woken
  * @key: key to identify a wait bit queue or %NULL
  *
  * Sets current thread back to running state and removes
index e5ff2cbaadc21d8e138b24c63dda236c4acb0350..90db1bd1a97852e4c547c43d840b2ca624c9a773 100644 (file)
@@ -2064,7 +2064,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
         * checks and call back into the fixup functions where we
         * might deadlock.
         */
-       INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
+       INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
        __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
        init_completion(&barr->done);
 
index 69a32664c289692576677a48d5daacca69212752..995840664a5f388b1248bb5b5b363daa1e89948e 100644 (file)
@@ -317,6 +317,14 @@ config DEBUG_OBJECTS_RCU_HEAD
        help
          Enable this to turn on debugging of RCU list heads (call_rcu() usage).
 
+config DEBUG_OBJECTS_PERCPU_COUNTER
+       bool "Debug percpu counter objects"
+       depends on DEBUG_OBJECTS
+       help
+         If you say Y here, additional code will be inserted into the
+         percpu counter routines to track the life time of percpu counter
+         objects and validate the percpu counter operations.
+
 config DEBUG_OBJECTS_ENABLE_DEFAULT
        int "debug_objects bootup default value (0-1)"
         range 0 1
@@ -366,7 +374,7 @@ config SLUB_STATS
 config DEBUG_KMEMLEAK
        bool "Kernel memory leak detector"
        depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \
-               (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE)
+               (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE)
 
        select DEBUG_FS if SYSFS
        select STACKTRACE if STACKTRACE_SUPPORT
@@ -740,6 +748,15 @@ config DEBUG_LIST
 
          If unsure, say N.
 
+config TEST_LIST_SORT
+       bool "Linked list sorting test"
+       depends on DEBUG_KERNEL
+       help
+         Enable this to turn on 'list_sort()' function test. This test is
+         executed only once during system boot, so affects only boot time.
+
+         If unsure, say N.
+
 config DEBUG_SG
        bool "Debug SG table operations"
        depends on DEBUG_KERNEL
index ffb78c916ccdf85ffb0d8ec7d0f3f1e4f41ab3ef..741fae905ae3ba8d81c0a4cbe9564cdd4e0d4ce7 100644 (file)
@@ -359,7 +359,6 @@ EXPORT_SYMBOL(bitmap_find_next_zero_area);
 
 #define CHUNKSZ                                32
 #define nbits_to_hold_value(val)       fls(val)
-#define unhex(c)                       (isdigit(c) ? (c - '0') : (toupper(c) - 'A' + 10))
 #define BASEDEC 10             /* fancier cpuset lists input in decimal */
 
 /**
@@ -466,7 +465,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
                        if (chunk & ~((1UL << (CHUNKSZ - 4)) - 1))
                                return -EOVERFLOW;
 
-                       chunk = (chunk << 4) | unhex(c);
+                       chunk = (chunk << 4) | hex_to_bin(c);
                        ndigits++; totaldigits++;
                }
                if (ndigits == 0)
index a111eb8de9cfeefb2bc82011ef580f970091bfcd..5b4919191778bb4f2224e798037f23e9dd5ea2ad 100644 (file)
@@ -77,26 +77,58 @@ s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
 EXPORT_SYMBOL(div_s64_rem);
 #endif
 
-/* 64bit divisor, dividend and result. dynamic precision */
+/**
+ * div64_u64 - unsigned 64bit divide with 64bit divisor
+ * @dividend:  64bit dividend
+ * @divisor:   64bit divisor
+ *
+ * This implementation is a modified version of the algorithm proposed
+ * by the book 'Hacker's Delight'.  The original source and full proof
+ * can be found here and is available for use without restriction.
+ *
+ * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c'
+ */
 #ifndef div64_u64
 u64 div64_u64(u64 dividend, u64 divisor)
 {
-       u32 high, d;
+       u32 high = divisor >> 32;
+       u64 quot;
 
-       high = divisor >> 32;
-       if (high) {
-               unsigned int shift = fls(high);
+       if (high == 0) {
+               quot = div_u64(dividend, divisor);
+       } else {
+               int n = 1 + fls(high);
+               quot = div_u64(dividend >> n, divisor >> n);
 
-               d = divisor >> shift;
-               dividend >>= shift;
-       } else
-               d = divisor;
+               if (quot != 0)
+                       quot--;
+               if ((dividend - quot * divisor) >= divisor)
+                       quot++;
+       }
 
-       return div_u64(dividend, d);
+       return quot;
 }
 EXPORT_SYMBOL(div64_u64);
 #endif
 
+/**
+ * div64_s64 - signed 64bit divide with 64bit divisor
+ * @dividend:  64bit dividend
+ * @divisor:   64bit divisor
+ */
+#ifndef div64_s64
+s64 div64_s64(s64 dividend, s64 divisor)
+{
+       s64 quot, t;
+
+       quot = div64_u64(abs64(dividend), abs64(divisor));
+       t = (dividend ^ divisor) >> 63;
+
+       return (quot ^ t) - t;
+}
+EXPORT_SYMBOL(div64_s64);
+#endif
+
 #endif /* BITS_PER_LONG == 32 */
 
 /*
index 5e0966be0f7ceead8beee37f0ed5da7b0c63c775..e15502e8b21e5bbb47addef5c8fea61b57118508 100644 (file)
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -106,16 +106,17 @@ static void idr_mark_full(struct idr_layer **pa, int id)
 }
 
 /**
- * idr_pre_get - reserver resources for idr allocation
+ * idr_pre_get - reserve resources for idr allocation
  * @idp:       idr handle
  * @gfp_mask:  memory allocation flags
  *
- * This function should be called prior to locking and calling the
- * idr_get_new* functions. It preallocates enough memory to satisfy
- * the worst possible allocation.
+ * This function should be called prior to calling the idr_get_new* functions.
+ * It preallocates enough memory to satisfy the worst possible allocation. The
+ * caller should pass in GFP_KERNEL if possible.  This of course requires that
+ * no spinning locks be held.
  *
- * If the system is REALLY out of memory this function returns 0,
- * otherwise 1.
+ * If the system is REALLY out of memory this function returns %0,
+ * otherwise %1.
  */
 int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
 {
@@ -290,11 +291,13 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
  * This is the allocate id function.  It should be called with any
  * required locks.
  *
- * If memory is required, it will return -EAGAIN, you should unlock
- * and go back to the idr_pre_get() call.  If the idr is full, it will
- * return -ENOSPC.
+ * If allocation from IDR's private freelist fails, idr_get_new_above() will
+ * return %-EAGAIN.  The caller should retry the idr_pre_get() call to refill
+ * IDR's preallocation and then retry the idr_get_new_above() call.
  *
- * @id returns a value in the range @starting_id ... 0x7fffffff
+ * If the idr is full idr_get_new_above() will return %-ENOSPC.
+ *
+ * @id returns a value in the range @starting_id ... %0x7fffffff
  */
 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
 {
@@ -318,14 +321,13 @@ EXPORT_SYMBOL(idr_get_new_above);
  * @ptr: pointer you want associated with the id
  * @id: pointer to the allocated handle
  *
- * This is the allocate id function.  It should be called with any
- * required locks.
+ * If allocation from IDR's private freelist fails, idr_get_new_above() will
+ * return %-EAGAIN.  The caller should retry the idr_pre_get() call to refill
+ * IDR's preallocation and then retry the idr_get_new_above() call.
  *
- * If memory is required, it will return -EAGAIN, you should unlock
- * and go back to the idr_pre_get() call.  If the idr is full, it will
- * return -ENOSPC.
+ * If the idr is full idr_get_new_above() will return %-ENOSPC.
  *
- * @id returns a value in the range 0 ... 0x7fffffff
+ * @id returns a value in the range %0 ... %0x7fffffff
  */
 int idr_get_new(struct idr *idp, void *ptr, int *id)
 {
@@ -388,7 +390,7 @@ static void sub_remove(struct idr *idp, int shift, int id)
 }
 
 /**
- * idr_remove - remove the given id and free it's slot
+ * idr_remove - remove the given id and free its slot
  * @idp: idr handle
  * @id: unique key
  */
@@ -437,7 +439,7 @@ EXPORT_SYMBOL(idr_remove);
  * function will remove all id mappings and leave all idp_layers
  * unused.
  *
- * A typical clean-up sequence for objects stored in an idr tree, will
+ * A typical clean-up sequence for objects stored in an idr tree will
  * use idr_for_each() to free all objects, if necessay, then
  * idr_remove_all() to remove all ids, and idr_destroy() to free
  * up the cached idr_layers.
@@ -542,7 +544,7 @@ EXPORT_SYMBOL(idr_find);
  * not allowed.
  *
  * We check the return of @fn each time. If it returns anything other
- * than 0, we break out and return that value.
+ * than %0, we break out and return that value.
  *
  * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
  */
@@ -637,8 +639,8 @@ EXPORT_SYMBOL(idr_get_next);
  * @id: lookup key
  *
  * Replace the pointer registered with an id and return the old value.
- * A -ENOENT return indicates that @id was not found.
- * A -EINVAL return indicates that @id was not within valid constraints.
+ * A %-ENOENT return indicates that @id was not found.
+ * A %-EINVAL return indicates that @id was not within valid constraints.
  *
  * The caller must serialize with writers.
  */
@@ -696,10 +698,11 @@ void idr_init(struct idr *idp)
 EXPORT_SYMBOL(idr_init);
 
 
-/*
+/**
+ * DOC: IDA description
  * IDA - IDR based ID allocator
  *
- * this is id allocator without id -> pointer translation.  Memory
+ * This is id allocator without id -> pointer translation.  Memory
  * usage is much lower than full blown idr because each id only
  * occupies a bit.  ida uses a custom leaf node which contains
  * IDA_BITMAP_BITS slots.
@@ -732,8 +735,8 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
  * following function.  It preallocates enough memory to satisfy the
  * worst possible allocation.
  *
- * If the system is REALLY out of memory this function returns 0,
- * otherwise 1.
+ * If the system is REALLY out of memory this function returns %0,
+ * otherwise %1.
  */
 int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
 {
@@ -765,11 +768,11 @@ EXPORT_SYMBOL(ida_pre_get);
  * Allocate new ID above or equal to @ida.  It should be called with
  * any required locks.
  *
- * If memory is required, it will return -EAGAIN, you should unlock
+ * If memory is required, it will return %-EAGAIN, you should unlock
  * and go back to the ida_pre_get() call.  If the ida is full, it will
- * return -ENOSPC.
+ * return %-ENOSPC.
  *
- * @p_id returns a value in the range @starting_id ... 0x7fffffff.
+ * @p_id returns a value in the range @starting_id ... %0x7fffffff.
  */
 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
 {
@@ -851,11 +854,11 @@ EXPORT_SYMBOL(ida_get_new_above);
  *
  * Allocate new ID.  It should be called with any required locks.
  *
- * If memory is required, it will return -EAGAIN, you should unlock
+ * If memory is required, it will return %-EAGAIN, you should unlock
  * and go back to the idr_pre_get() call.  If the idr is full, it will
- * return -ENOSPC.
+ * return %-ENOSPC.
  *
- * @id returns a value in the range 0 ... 0x7fffffff.
+ * @id returns a value in the range %0 ... %0x7fffffff.
  */
 int ida_get_new(struct ida *ida, int *p_id)
 {
index a7616fa3162e844f5b3c7090c1911543c826147e..d7325c6b103f0be078ff3672c35c468ed35738f1 100644 (file)
@@ -141,77 +141,151 @@ void list_sort(void *priv, struct list_head *head,
 }
 EXPORT_SYMBOL(list_sort);
 
-#ifdef DEBUG_LIST_SORT
+#ifdef CONFIG_TEST_LIST_SORT
+
+#include <linux/random.h>
+
+/*
+ * The pattern of set bits in the list length determines which cases
+ * are hit in list_sort().
+ */
+#define TEST_LIST_LEN (512+128+2) /* not including head */
+
+#define TEST_POISON1 0xDEADBEEF
+#define TEST_POISON2 0xA324354C
+
 struct debug_el {
-       struct list_head l_h;
+       unsigned int poison1;
+       struct list_head list;
+       unsigned int poison2;
        int value;
        unsigned serial;
 };
 
-static int cmp(void *priv, struct list_head *a, struct list_head *b)
+/* Array, containing pointers to all elements in the test list */
+static struct debug_el **elts __initdata;
+
+static int __init check(struct debug_el *ela, struct debug_el *elb)
 {
-       return container_of(a, struct debug_el, l_h)->value
-            - container_of(b, struct debug_el, l_h)->value;
+       if (ela->serial >= TEST_LIST_LEN) {
+               printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n",
+                               ela->serial);
+               return -EINVAL;
+       }
+       if (elb->serial >= TEST_LIST_LEN) {
+               printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n",
+                               elb->serial);
+               return -EINVAL;
+       }
+       if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
+               printk(KERN_ERR "list_sort_test: error: phantom element\n");
+               return -EINVAL;
+       }
+       if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
+               printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n",
+                               ela->poison1, ela->poison2);
+               return -EINVAL;
+       }
+       if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
+               printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n",
+                               elb->poison1, elb->poison2);
+               return -EINVAL;
+       }
+       return 0;
 }
 
-/*
- * The pattern of set bits in the list length determines which cases
- * are hit in list_sort().
- */
-#define LIST_SORT_TEST_LENGTH (512+128+2) /* not including head */
+static int __init cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+       struct debug_el *ela, *elb;
+
+       ela = container_of(a, struct debug_el, list);
+       elb = container_of(b, struct debug_el, list);
+
+       check(ela, elb);
+       return ela->value - elb->value;
+}
 
 static int __init list_sort_test(void)
 {
-       int i, r = 1, count;
-       struct list_head *head = kmalloc(sizeof(*head), GFP_KERNEL);
-       struct list_head *cur;
+       int i, count = 1, err = -EINVAL;
+       struct debug_el *el;
+       struct list_head *cur, *tmp;
+       LIST_HEAD(head);
+
+       printk(KERN_DEBUG "list_sort_test: start testing list_sort()\n");
 
-       printk(KERN_WARNING "testing list_sort()\n");
+       elts = kmalloc(sizeof(void *) * TEST_LIST_LEN, GFP_KERNEL);
+       if (!elts) {
+               printk(KERN_ERR "list_sort_test: error: cannot allocate "
+                               "memory\n");
+               goto exit;
+       }
 
-       cur = head;
-       for (i = 0; i < LIST_SORT_TEST_LENGTH; i++) {
-               struct debug_el *el = kmalloc(sizeof(*el), GFP_KERNEL);
-               BUG_ON(!el);
+       for (i = 0; i < TEST_LIST_LEN; i++) {
+               el = kmalloc(sizeof(*el), GFP_KERNEL);
+               if (!el) {
+                       printk(KERN_ERR "list_sort_test: error: cannot "
+                                       "allocate memory\n");
+                       goto exit;
+               }
                 /* force some equivalencies */
-               el->value = (r = (r * 725861) % 6599) % (LIST_SORT_TEST_LENGTH/3);
+               el->value = random32() % (TEST_LIST_LEN/3);
                el->serial = i;
-
-               el->l_h.prev = cur;
-               cur->next = &el->l_h;
-               cur = cur->next;
+               el->poison1 = TEST_POISON1;
+               el->poison2 = TEST_POISON2;
+               elts[i] = el;
+               list_add_tail(&el->list, &head);
        }
-       head->prev = cur;
 
-       list_sort(NULL, head, cmp);
+       list_sort(NULL, &head, cmp);
+
+       for (cur = head.next; cur->next != &head; cur = cur->next) {
+               struct debug_el *el1;
+               int cmp_result;
 
-       count = 1;
-       for (cur = head->next; cur->next != head; cur = cur->next) {
-               struct debug_el *el = container_of(cur, struct debug_el, l_h);
-               int cmp_result = cmp(NULL, cur, cur->next);
                if (cur->next->prev != cur) {
-                       printk(KERN_EMERG "list_sort() returned "
-                                               "a corrupted list!\n");
-                       return 1;
-               } else if (cmp_result > 0) {
-                       printk(KERN_EMERG "list_sort() failed to sort!\n");
-                       return 1;
-               } else if (cmp_result == 0 &&
-                               el->serial >= container_of(cur->next,
-                                       struct debug_el, l_h)->serial) {
-                       printk(KERN_EMERG "list_sort() failed to preserve order"
-                                                " of equivalent elements!\n");
-                       return 1;
+                       printk(KERN_ERR "list_sort_test: error: list is "
+                                       "corrupted\n");
+                       goto exit;
+               }
+
+               cmp_result = cmp(NULL, cur, cur->next);
+               if (cmp_result > 0) {
+                       printk(KERN_ERR "list_sort_test: error: list is not "
+                                       "sorted\n");
+                       goto exit;
+               }
+
+               el = container_of(cur, struct debug_el, list);
+               el1 = container_of(cur->next, struct debug_el, list);
+               if (cmp_result == 0 && el->serial >= el1->serial) {
+                       printk(KERN_ERR "list_sort_test: error: order of "
+                                       "equivalent elements not preserved\n");
+                       goto exit;
+               }
+
+               if (check(el, el1)) {
+                       printk(KERN_ERR "list_sort_test: error: element check "
+                                       "failed\n");
+                       goto exit;
                }
-               kfree(cur->prev);
                count++;
        }
-       kfree(cur);
-       if (count != LIST_SORT_TEST_LENGTH) {
-               printk(KERN_EMERG "list_sort() returned list of"
-                                               "different length!\n");
-               return 1;
+
+       if (count != TEST_LIST_LEN) {
+               printk(KERN_ERR "list_sort_test: error: bad list length %d",
+                               count);
+               goto exit;
        }
-       return 0;
+
+       err = 0;
+exit:
+       kfree(elts);
+       list_for_each_safe(cur, tmp, &head) {
+               list_del(cur);
+               kfree(container_of(cur, struct debug_el, list));
+       }
+       return err;
 }
 module_init(list_sort_test);
-#endif
+#endif /* CONFIG_TEST_LIST_SORT */
index fb34977246bb052923d5fad6341c2ba80315fdfe..6e89eca5cca00b6fb059bae5135b0ee381a5d5d5 100644 (file)
@@ -128,12 +128,13 @@ static int match_number(substring_t *s, int *result, int base)
        char *endp;
        char *buf;
        int ret;
+       size_t len = s->to - s->from;
 
-       buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
+       buf = kmalloc(len + 1, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
-       memcpy(buf, s->from, s->to - s->from);
-       buf[s->to - s->from] = '\0';
+       memcpy(buf, s->from, len);
+       buf[len] = '\0';
        *result = simple_strtol(buf, &endp, base);
        ret = 0;
        if (endp == buf)
index ec9048e74f448ce3a79522aef9ac5f4c4b350737..604678d7d06d9b101feafb31da72654d5f618500 100644 (file)
@@ -8,10 +8,53 @@
 #include <linux/init.h>
 #include <linux/cpu.h>
 #include <linux/module.h>
+#include <linux/debugobjects.h>
 
 static LIST_HEAD(percpu_counters);
 static DEFINE_MUTEX(percpu_counters_lock);
 
+#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
+
+static struct debug_obj_descr percpu_counter_debug_descr;
+
+static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
+{
+       struct percpu_counter *fbc = addr;
+
+       switch (state) {
+       case ODEBUG_STATE_ACTIVE:
+               percpu_counter_destroy(fbc);
+               debug_object_free(fbc, &percpu_counter_debug_descr);
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+static struct debug_obj_descr percpu_counter_debug_descr = {
+       .name           = "percpu_counter",
+       .fixup_free     = percpu_counter_fixup_free,
+};
+
+static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
+{
+       debug_object_init(fbc, &percpu_counter_debug_descr);
+       debug_object_activate(fbc, &percpu_counter_debug_descr);
+}
+
+static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
+{
+       debug_object_deactivate(fbc, &percpu_counter_debug_descr);
+       debug_object_free(fbc, &percpu_counter_debug_descr);
+}
+
+#else  /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
+static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
+{ }
+static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
+{ }
+#endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
+
 void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
 {
        int cpu;
@@ -30,9 +73,9 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
 {
        s64 count;
        s32 *pcount;
-       int cpu = get_cpu();
 
-       pcount = per_cpu_ptr(fbc->counters, cpu);
+       preempt_disable();
+       pcount = this_cpu_ptr(fbc->counters);
        count = *pcount + amount;
        if (count >= batch || count <= -batch) {
                spin_lock(&fbc->lock);
@@ -42,7 +85,7 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
        } else {
                *pcount = count;
        }
-       put_cpu();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__percpu_counter_add);
 
@@ -75,7 +118,11 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
        fbc->counters = alloc_percpu(s32);
        if (!fbc->counters)
                return -ENOMEM;
+
+       debug_percpu_counter_activate(fbc);
+
 #ifdef CONFIG_HOTPLUG_CPU
+       INIT_LIST_HEAD(&fbc->list);
        mutex_lock(&percpu_counters_lock);
        list_add(&fbc->list, &percpu_counters);
        mutex_unlock(&percpu_counters_lock);
@@ -89,6 +136,8 @@ void percpu_counter_destroy(struct percpu_counter *fbc)
        if (!fbc->counters)
                return;
 
+       debug_percpu_counter_deactivate(fbc);
+
 #ifdef CONFIG_HOTPLUG_CPU
        mutex_lock(&percpu_counters_lock);
        list_del(&fbc->list);
index 7af9d841c43beea318540340739ac3432b87ed12..c150d3dafff4a4152f085ff1ae33ae0f122f25f6 100644 (file)
@@ -988,8 +988,15 @@ static noinline_for_stack
 char *pointer(const char *fmt, char *buf, char *end, void *ptr,
              struct printf_spec spec)
 {
-       if (!ptr)
+       if (!ptr) {
+               /*
+                * Print (null) with the same width as a pointer so it makes
+                * tabular output look nice.
+                */
+               if (spec.field_width == -1)
+                       spec.field_width = 2 * sizeof(void *);
                return string(buf, end, "(null)", spec);
+       }
 
        switch (*fmt) {
        case 'F':
@@ -1031,7 +1038,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
        }
        spec.flags |= SMALL;
        if (spec.field_width == -1) {
-               spec.field_width = 2*sizeof(void *);
+               spec.field_width = 2 * sizeof(void *);
                spec.flags |= ZEROPAD;
        }
        spec.base = 16;
@@ -1497,7 +1504,7 @@ EXPORT_SYMBOL(snprintf);
  * @...: Arguments for the format string
  *
  * The return value is the number of characters written into @buf not including
- * the trailing '\0'. If @size is <= 0 the function returns 0.
+ * the trailing '\0'. If @size is == 0 the function returns 0.
  */
 
 int scnprintf(char *buf, size_t size, const char *fmt, ...)
@@ -1509,7 +1516,11 @@ int scnprintf(char *buf, size_t size, const char *fmt, ...)
        i = vsnprintf(buf, size, fmt, args);
        va_end(args);
 
-       return (i >= size) ? (size - 1) : i;
+       if (likely(i < size))
+               return i;
+       if (size != 0)
+               return size - 1;
+       return 0;
 }
 EXPORT_SYMBOL(scnprintf);
 
index 65d420499a615bf68a3be8313c3b0d8b1b330178..027100d30227fead0a4010d1feb0e2791a98fcaa 100644 (file)
@@ -74,11 +74,11 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
 
        nr_wb = nr_dirty = nr_io = nr_more_io = 0;
        spin_lock(&inode_lock);
-       list_for_each_entry(inode, &wb->b_dirty, i_list)
+       list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
                nr_dirty++;
-       list_for_each_entry(inode, &wb->b_io, i_list)
+       list_for_each_entry(inode, &wb->b_io, i_wb_list)
                nr_io++;
-       list_for_each_entry(inode, &wb->b_more_io, i_list)
+       list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
                nr_more_io++;
        spin_unlock(&inode_lock);
 
@@ -362,7 +362,7 @@ static int bdi_forker_thread(void *ptr)
 {
        struct bdi_writeback *me = ptr;
 
-       current->flags |= PF_FLUSHER | PF_SWAPWRITE;
+       current->flags |= PF_SWAPWRITE;
        set_freezable();
 
        /*
@@ -729,6 +729,7 @@ static wait_queue_head_t congestion_wqh[2] = {
                __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
                __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
        };
+static atomic_t nr_bdi_congested[2];
 
 void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
 {
@@ -736,7 +737,8 @@ void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
        wait_queue_head_t *wqh = &congestion_wqh[sync];
 
        bit = sync ? BDI_sync_congested : BDI_async_congested;
-       clear_bit(bit, &bdi->state);
+       if (test_and_clear_bit(bit, &bdi->state))
+               atomic_dec(&nr_bdi_congested[sync]);
        smp_mb__after_clear_bit();
        if (waitqueue_active(wqh))
                wake_up(wqh);
@@ -748,7 +750,8 @@ void set_bdi_congested(struct backing_dev_info *bdi, int sync)
        enum bdi_state bit;
 
        bit = sync ? BDI_sync_congested : BDI_async_congested;
-       set_bit(bit, &bdi->state);
+       if (!test_and_set_bit(bit, &bdi->state))
+               atomic_inc(&nr_bdi_congested[sync]);
 }
 EXPORT_SYMBOL(set_bdi_congested);
 
@@ -764,13 +767,72 @@ EXPORT_SYMBOL(set_bdi_congested);
 long congestion_wait(int sync, long timeout)
 {
        long ret;
+       unsigned long start = jiffies;
        DEFINE_WAIT(wait);
        wait_queue_head_t *wqh = &congestion_wqh[sync];
 
        prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
        ret = io_schedule_timeout(timeout);
        finish_wait(wqh, &wait);
+
+       trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
+                                       jiffies_to_usecs(jiffies - start));
+
        return ret;
 }
 EXPORT_SYMBOL(congestion_wait);
 
+/**
+ * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
+ * @zone: A zone to check if it is heavily congested
+ * @sync: SYNC or ASYNC IO
+ * @timeout: timeout in jiffies
+ *
+ * In the event of a congested backing_dev (any backing_dev) and the given
+ * @zone has experienced recent congestion, this waits for up to @timeout
+ * jiffies for either a BDI to exit congestion of the given @sync queue
+ * or a write to complete.
+ *
+ * In the absense of zone congestion, cond_resched() is called to yield
+ * the processor if necessary but otherwise does not sleep.
+ *
+ * The return value is 0 if the sleep is for the full timeout. Otherwise,
+ * it is the number of jiffies that were still remaining when the function
+ * returned. return_value == timeout implies the function did not sleep.
+ */
+long wait_iff_congested(struct zone *zone, int sync, long timeout)
+{
+       long ret;
+       unsigned long start = jiffies;
+       DEFINE_WAIT(wait);
+       wait_queue_head_t *wqh = &congestion_wqh[sync];
+
+       /*
+        * If there is no congestion, or heavy congestion is not being
+        * encountered in the current zone, yield if necessary instead
+        * of sleeping on the congestion queue
+        */
+       if (atomic_read(&nr_bdi_congested[sync]) == 0 ||
+                       !zone_is_reclaim_congested(zone)) {
+               cond_resched();
+
+               /* In case we scheduled, work out time remaining */
+               ret = timeout - (jiffies - start);
+               if (ret < 0)
+                       ret = 0;
+
+               goto out;
+       }
+
+       /* Sleep until uncongested or a write happens */
+       prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
+       ret = io_schedule_timeout(timeout);
+       finish_wait(wqh, &wait);
+
+out:
+       trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
+                                       jiffies_to_usecs(jiffies - start));
+
+       return ret;
+}
+EXPORT_SYMBOL(wait_iff_congested);
index 3df063706f53e6d579ec0acabace3503a471ba29..4df2de77e06956cf68551192d4aa14578bd6ca44 100644 (file)
@@ -311,6 +311,8 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
        size_t offset;
        void *retval;
 
+       might_sleep_if(mem_flags & __GFP_WAIT);
+
        spin_lock_irqsave(&pool->lock, flags);
  restart:
        list_for_each_entry(page, &pool->page_list, page_list) {
index 3d4df44e4221d25bb041995813823e00903161e5..75572b5f23746a4b47a42920980a22e6a5251fe7 100644 (file)
@@ -612,6 +612,19 @@ void __lock_page_nosync(struct page *page)
                                                        TASK_UNINTERRUPTIBLE);
 }
 
+int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
+                        unsigned int flags)
+{
+       if (!(flags & FAULT_FLAG_ALLOW_RETRY)) {
+               __lock_page(page);
+               return 1;
+       } else {
+               up_read(&mm->mmap_sem);
+               wait_on_page_locked(page);
+               return 0;
+       }
+}
+
 /**
  * find_get_page - find and get a page reference
  * @mapping: the address_space to search
@@ -1539,25 +1552,28 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                 * waiting for the lock.
                 */
                do_async_mmap_readahead(vma, ra, file, page, offset);
-               lock_page(page);
-
-               /* Did it get truncated? */
-               if (unlikely(page->mapping != mapping)) {
-                       unlock_page(page);
-                       put_page(page);
-                       goto no_cached_page;
-               }
        } else {
                /* No page in the page cache at all */
                do_sync_mmap_readahead(vma, ra, file, offset);
                count_vm_event(PGMAJFAULT);
                ret = VM_FAULT_MAJOR;
 retry_find:
-               page = find_lock_page(mapping, offset);
+               page = find_get_page(mapping, offset);
                if (!page)
                        goto no_cached_page;
        }
 
+       if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags))
+               return ret | VM_FAULT_RETRY;
+
+       /* Did it get truncated? */
+       if (unlikely(page->mapping != mapping)) {
+               unlock_page(page);
+               put_page(page);
+               goto retry_find;
+       }
+       VM_BUG_ON(page->index != offset);
+
        /*
         * We have a locked page in the page cache, now we need to check
         * that it's up-to-date. If not, it is going to be due to an error.
@@ -2177,12 +2193,12 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
        }
 
        if (written > 0) {
-               loff_t end = pos + written;
-               if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
-                       i_size_write(inode,  end);
+               pos += written;
+               if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
+                       i_size_write(inode, pos);
                        mark_inode_dirty(inode);
                }
-               *ppos = end;
+               *ppos = pos;
        }
 out:
        return written;
index 7a0aa1be4993521f7d55da731b3f69f8a59ea7c7..781e754a75ac973b15921a7c38c9e62f7a8bad66 100644 (file)
 unsigned long totalhigh_pages __read_mostly;
 EXPORT_SYMBOL(totalhigh_pages);
 
+
+DEFINE_PER_CPU(int, __kmap_atomic_idx);
+EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
+
 unsigned int nr_free_highpages (void)
 {
        pg_data_t *pgdat;
@@ -422,61 +426,3 @@ void __init page_address_init(void)
 }
 
 #endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
-
-#ifdef CONFIG_DEBUG_HIGHMEM
-
-void debug_kmap_atomic(enum km_type type)
-{
-       static int warn_count = 10;
-
-       if (unlikely(warn_count < 0))
-               return;
-
-       if (unlikely(in_interrupt())) {
-               if (in_nmi()) {
-                       if (type != KM_NMI && type != KM_NMI_PTE) {
-                               WARN_ON(1);
-                               warn_count--;
-                       }
-               } else if (in_irq()) {
-                       if (type != KM_IRQ0 && type != KM_IRQ1 &&
-                           type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
-                           type != KM_BOUNCE_READ && type != KM_IRQ_PTE) {
-                               WARN_ON(1);
-                               warn_count--;
-                       }
-               } else if (!irqs_disabled()) {  /* softirq */
-                       if (type != KM_IRQ0 && type != KM_IRQ1 &&
-                           type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
-                           type != KM_SKB_SUNRPC_DATA &&
-                           type != KM_SKB_DATA_SOFTIRQ &&
-                           type != KM_BOUNCE_READ) {
-                               WARN_ON(1);
-                               warn_count--;
-                       }
-               }
-       }
-
-       if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
-                       type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ ||
-                       type == KM_IRQ_PTE || type == KM_NMI ||
-                       type == KM_NMI_PTE ) {
-               if (!irqs_disabled()) {
-                       WARN_ON(1);
-                       warn_count--;
-               }
-       } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
-               if (irq_count() == 0 && !irqs_disabled()) {
-                       WARN_ON(1);
-                       warn_count--;
-               }
-       }
-#ifdef CONFIG_KGDB_KDB
-       if (unlikely(type == KM_KDB && atomic_read(&kgdb_active) == -1)) {
-               WARN_ON(1);
-               warn_count--;
-       }
-#endif /* CONFIG_KGDB_KDB */
-}
-
-#endif
index 96991ded82fe90f46d564e97da0544889a575bf7..c4a3558589ab15de3ac9abb135ee499d17544142 100644 (file)
@@ -2448,8 +2448,11 @@ retry_avoidcopy:
         * When the original hugepage is shared one, it does not have
         * anon_vma prepared.
         */
-       if (unlikely(anon_vma_prepare(vma)))
+       if (unlikely(anon_vma_prepare(vma))) {
+               /* Caller expects lock to be held */
+               spin_lock(&mm->page_table_lock);
                return VM_FAULT_OOM;
+       }
 
        copy_user_huge_page(new_page, old_page, address, vma);
        __SetPageUptodate(new_page);
index 6a697bb97fc589fcdd71ad56a61d43f88862786b..dedb0aff673fcf9f0bbeb71dfc3f82a7ecf068ce 100644 (file)
@@ -62,7 +62,7 @@ extern bool is_free_buddy_page(struct page *page);
  */
 static inline unsigned long page_order(struct page *page)
 {
-       VM_BUG_ON(!PageBuddy(page));
+       /* PageBuddy() must be checked by the caller */
        return page_private(page);
 }
 
index 44a8cefeae6eb9627bf8f81330ea4edaa2452d81..124324134ff67b3c4a0bc06661b4f70f2406840f 100644 (file)
@@ -1292,6 +1292,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
        list_add(&hpage->lru, &pagelist);
        ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0);
        if (ret) {
+                       putback_lru_pages(&pagelist);
                pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
                         pfn, ret, page->flags);
                if (ret > 0)
index af82741caaa496886efa24b6c57817c4ccf80c4c..02e48aa0ed136ff8e4d808d954a20d0b46e6d23d 100644 (file)
@@ -736,7 +736,7 @@ again:
        dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
        if (!dst_pte)
                return -ENOMEM;
-       src_pte = pte_offset_map_nested(src_pmd, addr);
+       src_pte = pte_offset_map(src_pmd, addr);
        src_ptl = pte_lockptr(src_mm, src_pmd);
        spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
        orig_src_pte = src_pte;
@@ -767,7 +767,7 @@ again:
 
        arch_leave_lazy_mmu_mode();
        spin_unlock(src_ptl);
-       pte_unmap_nested(orig_src_pte);
+       pte_unmap(orig_src_pte);
        add_mm_rss_vec(dst_mm, rss);
        pte_unmap_unlock(orig_dst_pte, dst_ptl);
        cond_resched();
@@ -1591,7 +1591,7 @@ struct page *get_dump_page(unsigned long addr)
 }
 #endif /* CONFIG_ELF_CORE */
 
-pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
+pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
                        spinlock_t **ptl)
 {
        pgd_t * pgd = pgd_offset(mm, addr);
@@ -2080,7 +2080,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
                 * zeroes.
                 */
                if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
-                       memset(kaddr, 0, PAGE_SIZE);
+                       clear_page(kaddr);
                kunmap_atomic(kaddr, KM_USER0);
                flush_dcache_page(dst);
        } else
@@ -2108,6 +2108,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned long address, pte_t *page_table, pmd_t *pmd,
                spinlock_t *ptl, pte_t orig_pte)
+       __releases(ptl)
 {
        struct page *old_page, *new_page;
        pte_t entry;
@@ -2627,6 +2628,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *page, *swapcache = NULL;
        swp_entry_t entry;
        pte_t pte;
+       int locked;
        struct mem_cgroup *ptr = NULL;
        int exclusive = 0;
        int ret = 0;
@@ -2677,8 +2679,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto out_release;
        }
 
-       lock_page(page);
+       locked = lock_page_or_retry(page, mm, flags);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
+       if (!locked) {
+               ret |= VM_FAULT_RETRY;
+               goto out_release;
+       }
 
        /*
         * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
@@ -2927,7 +2933,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        vmf.page = NULL;
 
        ret = vma->vm_ops->fault(vma, &vmf);
-       if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
+       if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
+                           VM_FAULT_RETRY)))
                return ret;
 
        if (unlikely(PageHWPoison(vmf.page))) {
@@ -3344,7 +3351,7 @@ int in_gate_area_no_task(unsigned long addr)
 
 #endif /* __HAVE_ARCH_GATE_AREA */
 
-static int follow_pte(struct mm_struct *mm, unsigned long address,
+static int __follow_pte(struct mm_struct *mm, unsigned long address,
                pte_t **ptepp, spinlock_t **ptlp)
 {
        pgd_t *pgd;
@@ -3381,6 +3388,17 @@ out:
        return -EINVAL;
 }
 
+static inline int follow_pte(struct mm_struct *mm, unsigned long address,
+                            pte_t **ptepp, spinlock_t **ptlp)
+{
+       int res;
+
+       /* (void) is needed to make gcc happy */
+       (void) __cond_lock(*ptlp,
+                          !(res = __follow_pte(mm, address, ptepp, ptlp)));
+       return res;
+}
+
 /**
  * follow_pfn - look up PFN at a user virtual address
  * @vma: memory mapping
index d4e940a26945cde1f8251b39b37fde3144639d5b..9260314a221e0720d5cc3ecf5f00325c7ec3fba6 100644 (file)
@@ -602,27 +602,14 @@ static struct page *next_active_pageblock(struct page *page)
 /* Checks if this range of memory is likely to be hot-removable. */
 int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
 {
-       int type;
        struct page *page = pfn_to_page(start_pfn);
        struct page *end_page = page + nr_pages;
 
        /* Check the starting page of each pageblock within the range */
        for (; page < end_page; page = next_active_pageblock(page)) {
-               type = get_pageblock_migratetype(page);
-
-               /*
-                * A pageblock containing MOVABLE or free pages is considered
-                * removable
-                */
-               if (type != MIGRATE_MOVABLE && !pageblock_free(page))
-                       return 0;
-
-               /*
-                * A pageblock starting with a PageReserved page is not
-                * considered removable.
-                */
-               if (PageReserved(page))
+               if (!is_pageblock_removable_nolock(page))
                        return 0;
+               cond_resched();
        }
 
        /* All pageblocks in the memory block are likely to be hot-removable */
@@ -659,7 +646,7 @@ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
  * Scanning pfn is much easier than scanning lru list.
  * Scan pfn from start to end and Find LRU page.
  */
-int scan_lru_pages(unsigned long start, unsigned long end)
+static unsigned long scan_lru_pages(unsigned long start, unsigned long end)
 {
        unsigned long pfn;
        struct page *page;
@@ -709,29 +696,30 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                                            page_is_file_cache(page));
 
                } else {
-                       /* Becasue we don't have big zone->lock. we should
-                          check this again here. */
-                       if (page_count(page))
-                               not_managed++;
 #ifdef CONFIG_DEBUG_VM
                        printk(KERN_ALERT "removing pfn %lx from LRU failed\n",
                               pfn);
                        dump_page(page);
 #endif
+                       /* Becasue we don't have big zone->lock. we should
+                          check this again here. */
+                       if (page_count(page)) {
+                               not_managed++;
+                               ret = -EBUSY;
+                               break;
+                       }
                }
        }
-       ret = -EBUSY;
-       if (not_managed) {
-               if (!list_empty(&source))
+       if (!list_empty(&source)) {
+               if (not_managed) {
+                       putback_lru_pages(&source);
+                       goto out;
+               }
+               /* this function returns # of failed pages */
+               ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1);
+               if (ret)
                        putback_lru_pages(&source);
-               goto out;
        }
-       ret = 0;
-       if (list_empty(&source))
-               goto out;
-       /* this function returns # of failed pages */
-       ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1);
-
 out:
        return ret;
 }
index f969da5dd8a2136b92aab7e7c1a6ecc9083d6cb9..81a127643aea889675e9570ebee5653833e55e95 100644 (file)
@@ -924,15 +924,21 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
        nodemask_t nmask;
        LIST_HEAD(pagelist);
        int err = 0;
+       struct vm_area_struct *vma;
 
        nodes_clear(nmask);
        node_set(source, nmask);
 
-       check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
+       vma = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
                        flags | MPOL_MF_DISCONTIG_OK, &pagelist);
+       if (IS_ERR(vma))
+               return PTR_ERR(vma);
 
-       if (!list_empty(&pagelist))
+       if (!list_empty(&pagelist)) {
                err = migrate_pages(&pagelist, new_node_page, dest, 0);
+               if (err)
+                       putback_lru_pages(&pagelist);
+       }
 
        return err;
 }
@@ -1147,9 +1153,12 @@ static long do_mbind(unsigned long start, unsigned long len,
 
                err = mbind_range(mm, start, end, new);
 
-               if (!list_empty(&pagelist))
+               if (!list_empty(&pagelist)) {
                        nr_failed = migrate_pages(&pagelist, new_vma_page,
                                                (unsigned long)vma, 0);
+                       if (nr_failed)
+                               putback_lru_pages(&pagelist);
+               }
 
                if (!err && nr_failed && (flags & MPOL_MF_STRICT))
                        err = -EIO;
index f8c9bccf252039fd631b5020fe66e8798d8193f7..fe5a3c6a54260f2ae9999fbaabe03082a818982f 100644 (file)
@@ -497,7 +497,6 @@ static int writeout(struct address_space *mapping, struct page *page)
                .nr_to_write = 1,
                .range_start = 0,
                .range_end = LLONG_MAX,
-               .nonblocking = 1,
                .for_reclaim = 1
        };
        int rc;
@@ -884,8 +883,9 @@ out:
  *
  * The function returns after 10 attempts or if no pages
  * are movable anymore because to has become empty
- * or no retryable pages exist anymore. All pages will be
- * returned to the LRU or freed.
+ * or no retryable pages exist anymore.
+ * Caller should call putback_lru_pages to return pages to the LRU
+ * or free list.
  *
  * Return: Number of pages not migrated or error code.
  */
@@ -932,8 +932,6 @@ out:
        if (!swapwrite)
                current->flags &= ~PF_SWAPWRITE;
 
-       putback_lru_pages(from);
-
        if (rc)
                return rc;
 
@@ -1039,7 +1037,7 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
 
                err = -EFAULT;
                vma = find_vma(mm, pp->addr);
-               if (!vma || !vma_migratable(vma))
+               if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
                        goto set_status;
 
                page = follow_page(vma, pp->addr, FOLL_GET);
@@ -1088,9 +1086,12 @@ set_status:
        }
 
        err = 0;
-       if (!list_empty(&pagelist))
+       if (!list_empty(&pagelist)) {
                err = migrate_pages(&pagelist, new_page_node,
                                (unsigned long)pm, 0);
+               if (err)
+                       putback_lru_pages(&pagelist);
+       }
 
        up_read(&mm->mmap_sem);
        return err;
@@ -1203,7 +1204,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
                int err = -EFAULT;
 
                vma = find_vma(mm, addr);
-               if (!vma)
+               if (!vma || addr < vma->vm_start)
                        goto set_status;
 
                page = follow_page(vma, addr, 0);
index cde56ee51ef7a6b10e18f860bd7be08df07f2407..563fbdd6293ae3f0139450d6b7d848f862c1f8dc 100644 (file)
@@ -101,7 +101,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
         * pte locks because exclusive mmap_sem prevents deadlock.
         */
        old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
-       new_pte = pte_offset_map_nested(new_pmd, new_addr);
+       new_pte = pte_offset_map(new_pmd, new_addr);
        new_ptl = pte_lockptr(mm, new_pmd);
        if (new_ptl != old_ptl)
                spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
@@ -119,7 +119,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
        arch_leave_lazy_mmu_mode();
        if (new_ptl != old_ptl)
                spin_unlock(new_ptl);
-       pte_unmap_nested(new_pte - 1);
+       pte_unmap(new_pte - 1);
        pte_unmap_unlock(old_pte - 1, old_ptl);
        if (mapping)
                spin_unlock(&mapping->i_mmap_lock);
index 88ff091eb07a23dcaee1e8e6d0cc70927183b89b..30b5c20eec15e6761bad5dd4815e69ebae1d7dc2 100644 (file)
@@ -293,11 +293,58 @@ void *vmalloc(unsigned long size)
 }
 EXPORT_SYMBOL(vmalloc);
 
+/*
+ *     vzalloc - allocate virtually continguos memory with zero fill
+ *
+ *     @size:          allocation size
+ *
+ *     Allocate enough pages to cover @size from the page level
+ *     allocator and map them into continguos kernel virtual space.
+ *     The memory allocated is set to zero.
+ *
+ *     For tight control over page level allocator and protection flags
+ *     use __vmalloc() instead.
+ */
+void *vzalloc(unsigned long size)
+{
+       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+                       PAGE_KERNEL);
+}
+EXPORT_SYMBOL(vzalloc);
+
+/**
+ * vmalloc_node - allocate memory on a specific node
+ * @size:      allocation size
+ * @node:      numa node
+ *
+ * Allocate enough pages to cover @size from the page level
+ * allocator and map them into contiguous kernel virtual space.
+ *
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc() instead.
+ */
 void *vmalloc_node(unsigned long size, int node)
 {
        return vmalloc(size);
 }
-EXPORT_SYMBOL(vmalloc_node);
+
+/**
+ * vzalloc_node - allocate memory on a specific node with zero fill
+ * @size:      allocation size
+ * @node:      numa node
+ *
+ * Allocate enough pages to cover @size from the page level
+ * allocator and map them into contiguous kernel virtual space.
+ * The memory allocated is set to zero.
+ *
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc() instead.
+ */
+void *vzalloc_node(unsigned long size, int node)
+{
+       return vzalloc(size);
+}
+EXPORT_SYMBOL(vzalloc_node);
 
 #ifndef PAGE_KERNEL_EXEC
 # define PAGE_KERNEL_EXEC PAGE_KERNEL
index 4029583a10241aaa84e3937ee216740e0a88a363..7dcca55ede7ca1493df615af994f6ba23c1bb129 100644 (file)
@@ -162,10 +162,11 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
                return 0;
 
        /*
-        * Shortcut check for OOM_SCORE_ADJ_MIN so the entire heuristic doesn't
-        * need to be executed for something that cannot be killed.
+        * Shortcut check for a thread sharing p->mm that is OOM_SCORE_ADJ_MIN
+        * so the entire heuristic doesn't need to be executed for something
+        * that cannot be killed.
         */
-       if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
+       if (atomic_read(&p->mm->oom_disable_count)) {
                task_unlock(p);
                return 0;
        }
@@ -403,16 +404,40 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
 #define K(x) ((x) << (PAGE_SHIFT-10))
 static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
 {
+       struct task_struct *q;
+       struct mm_struct *mm;
+
        p = find_lock_task_mm(p);
        if (!p)
                return 1;
 
+       /* mm cannot be safely dereferenced after task_unlock(p) */
+       mm = p->mm;
+
        pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
                task_pid_nr(p), p->comm, K(p->mm->total_vm),
                K(get_mm_counter(p->mm, MM_ANONPAGES)),
                K(get_mm_counter(p->mm, MM_FILEPAGES)));
        task_unlock(p);
 
+       /*
+        * Kill all processes sharing p->mm in other thread groups, if any.
+        * They don't get access to memory reserves or a higher scheduler
+        * priority, though, to avoid depletion of all memory or task
+        * starvation.  This prevents mm->mmap_sem livelock when an oom killed
+        * task cannot exit because it requires the semaphore and its contended
+        * by another thread trying to allocate memory itself.  That thread will
+        * now get access to memory reserves since it has a pending fatal
+        * signal.
+        */
+       for_each_process(q)
+               if (q->mm == mm && !same_thread_group(q, p)) {
+                       task_lock(q);   /* Protect ->comm from prctl() */
+                       pr_err("Kill process %d (%s) sharing same memory\n",
+                               task_pid_nr(q), q->comm);
+                       task_unlock(q);
+                       force_sig(SIGKILL, q);
+               }
 
        set_tsk_thread_flag(p, TIF_MEMDIE);
        force_sig(SIGKILL, p);
@@ -680,7 +705,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
        read_lock(&tasklist_lock);
        if (sysctl_oom_kill_allocating_task &&
            !oom_unkillable_task(current, NULL, nodemask) &&
-           (current->signal->oom_adj != OOM_DISABLE)) {
+           current->mm && !atomic_read(&current->mm->oom_disable_count)) {
                /*
                 * oom_kill_process() needs tasklist_lock held.  If it returns
                 * non-zero, current could not be killed so we must fallback to
index e3bccac1f0255bb78373e6268aab787a370ee520..b840afa89761ce0d83690ff963385399246fa6d8 100644 (file)
@@ -415,14 +415,8 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
 
        if (vm_dirty_bytes)
                dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
-       else {
-               int dirty_ratio;
-
-               dirty_ratio = vm_dirty_ratio;
-               if (dirty_ratio < 5)
-                       dirty_ratio = 5;
-               dirty = (dirty_ratio * available_memory) / 100;
-       }
+       else
+               dirty = (vm_dirty_ratio * available_memory) / 100;
 
        if (dirty_background_bytes)
                background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
@@ -510,7 +504,7 @@ static void balance_dirty_pages(struct address_space *mapping,
                 * catch-up. This avoids (excessively) small writeouts
                 * when the bdi limits are ramping up.
                 */
-               if (nr_reclaimable + nr_writeback <
+               if (nr_reclaimable + nr_writeback <=
                                (background_thresh + dirty_thresh) / 2)
                        break;
 
@@ -542,8 +536,8 @@ static void balance_dirty_pages(struct address_space *mapping,
                 * the last resort safeguard.
                 */
                dirty_exceeded =
-                       (bdi_nr_reclaimable + bdi_nr_writeback >= bdi_thresh)
-                       || (nr_reclaimable + nr_writeback >= dirty_thresh);
+                       (bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh)
+                       || (nr_reclaimable + nr_writeback > dirty_thresh);
 
                if (!dirty_exceeded)
                        break;
@@ -1121,6 +1115,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
 {
        if (mapping_cap_account_dirty(mapping)) {
                __inc_zone_page_state(page, NR_FILE_DIRTY);
+               __inc_zone_page_state(page, NR_DIRTIED);
                __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
                task_dirty_inc(current);
                task_io_account_write(PAGE_CACHE_SIZE);
@@ -1128,6 +1123,18 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
 }
 EXPORT_SYMBOL(account_page_dirtied);
 
+/*
+ * Helper function for set_page_writeback family.
+ * NOTE: Unlike account_page_dirtied this does not rely on being atomic
+ * wrt interrupts.
+ */
+void account_page_writeback(struct page *page)
+{
+       inc_zone_page_state(page, NR_WRITEBACK);
+       inc_zone_page_state(page, NR_WRITTEN);
+}
+EXPORT_SYMBOL(account_page_writeback);
+
 /*
  * For address_spaces which do not use buffers.  Just tag the page as dirty in
  * its radix tree.
@@ -1366,7 +1373,7 @@ int test_set_page_writeback(struct page *page)
                ret = TestSetPageWriteback(page);
        }
        if (!ret)
-               inc_zone_page_state(page, NR_WRITEBACK);
+               account_page_writeback(page);
        return ret;
 
 }
index 2a362c52fdf482144eac24111059f56b5147bd42..07a654486f75cfdfe13035f7bf501e2d2dbf0a4a 100644 (file)
@@ -531,7 +531,7 @@ static inline void __free_one_page(struct page *page,
         * so it's less likely to be used soon and more likely to be merged
         * as a higher order page
         */
-       if ((order < MAX_ORDER-1) && pfn_valid_within(page_to_pfn(buddy))) {
+       if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
                struct page *higher_page, *higher_buddy;
                combined_idx = __find_combined_index(page_idx, order);
                higher_page = page + combined_idx - page_idx;
@@ -1907,7 +1907,7 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
                        preferred_zone, migratetype);
 
                if (!page && gfp_mask & __GFP_NOFAIL)
-                       congestion_wait(BLK_RW_ASYNC, HZ/50);
+                       wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
        } while (!page && (gfp_mask & __GFP_NOFAIL));
 
        return page;
@@ -1932,7 +1932,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
        const gfp_t wait = gfp_mask & __GFP_WAIT;
 
        /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
-       BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH);
+       BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
 
        /*
         * The caller may dip into page reserves a bit more if the caller
@@ -1940,7 +1940,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
         * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
         * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
         */
-       alloc_flags |= (gfp_mask & __GFP_HIGH);
+       alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
 
        if (!wait) {
                alloc_flags |= ALLOC_HARDER;
@@ -2095,7 +2095,7 @@ rebalance:
        pages_reclaimed += did_some_progress;
        if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
                /* Wait for some write requests to complete then retry */
-               congestion_wait(BLK_RW_ASYNC, HZ/50);
+               wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
                goto rebalance;
        }
 
@@ -5297,12 +5297,65 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
  * page allocater never alloc memory from ISOLATE block.
  */
 
+static int
+__count_immobile_pages(struct zone *zone, struct page *page, int count)
+{
+       unsigned long pfn, iter, found;
+       /*
+        * For avoiding noise data, lru_add_drain_all() should be called
+        * If ZONE_MOVABLE, the zone never contains immobile pages
+        */
+       if (zone_idx(zone) == ZONE_MOVABLE)
+               return true;
+
+       if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
+               return true;
+
+       pfn = page_to_pfn(page);
+       for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
+               unsigned long check = pfn + iter;
+
+               if (!pfn_valid_within(check)) {
+                       iter++;
+                       continue;
+               }
+               page = pfn_to_page(check);
+               if (!page_count(page)) {
+                       if (PageBuddy(page))
+                               iter += (1 << page_order(page)) - 1;
+                       continue;
+               }
+               if (!PageLRU(page))
+                       found++;
+               /*
+                * If there are RECLAIMABLE pages, we need to check it.
+                * But now, memory offline itself doesn't call shrink_slab()
+                * and it still to be fixed.
+                */
+               /*
+                * If the page is not RAM, page_count()should be 0.
+                * we don't need more check. This is an _used_ not-movable page.
+                *
+                * The problematic thing here is PG_reserved pages. PG_reserved
+                * is set to both of a memory hole page and a _used_ kernel
+                * page at boot.
+                */
+               if (found > count)
+                       return false;
+       }
+       return true;
+}
+
+bool is_pageblock_removable_nolock(struct page *page)
+{
+       struct zone *zone = page_zone(page);
+       return __count_immobile_pages(zone, page, 0);
+}
+
 int set_migratetype_isolate(struct page *page)
 {
        struct zone *zone;
-       struct page *curr_page;
-       unsigned long flags, pfn, iter;
-       unsigned long immobile = 0;
+       unsigned long flags, pfn;
        struct memory_isolate_notify arg;
        int notifier_ret;
        int ret = -EBUSY;
@@ -5312,11 +5365,6 @@ int set_migratetype_isolate(struct page *page)
        zone_idx = zone_idx(zone);
 
        spin_lock_irqsave(&zone->lock, flags);
-       if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE ||
-           zone_idx == ZONE_MOVABLE) {
-               ret = 0;
-               goto out;
-       }
 
        pfn = page_to_pfn(page);
        arg.start_pfn = pfn;
@@ -5336,23 +5384,20 @@ int set_migratetype_isolate(struct page *page)
         */
        notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
        notifier_ret = notifier_to_errno(notifier_ret);
-       if (notifier_ret || !arg.pages_found)
+       if (notifier_ret)
                goto out;
-
-       for (iter = pfn; iter < (pfn + pageblock_nr_pages); iter++) {
-               if (!pfn_valid_within(pfn))
-                       continue;
-
-               curr_page = pfn_to_page(iter);
-               if (!page_count(curr_page) || PageLRU(curr_page))
-                       continue;
-
-               immobile++;
-       }
-
-       if (arg.pages_found == immobile)
+       /*
+        * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
+        * We just check MOVABLE pages.
+        */
+       if (__count_immobile_pages(zone, page, arg.pages_found))
                ret = 0;
 
+       /*
+        * immobile means "not-on-lru" paes. If immobile is larger than
+        * removable-by-driver pages reported by notifier, we'll fail.
+        */
+
 out:
        if (!ret) {
                set_pageblock_migratetype(page, MIGRATE_ISOLATE);
index 5e0ffd967452083c8209dfa0a1334416ded99f68..4ae42bb4089241d146d8a62429cebc600fd7d14e 100644 (file)
@@ -86,7 +86,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
  * all pages in [start_pfn...end_pfn) must be in the same zone.
  * zone->lock must be held before call this.
  *
- * Returns 0 if all pages in the range is isolated.
+ * Returns 1 if all pages in the range is isolated.
  */
 static int
 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
@@ -119,7 +119,6 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
        struct zone *zone;
        int ret;
 
-       pfn = start_pfn;
        /*
         * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page
         * is not aligned to pageblock_nr_pages.
index f5ad996a4a8f76615d1f383fde5e4a783375a7b7..1a8bf76bfd038a7fe84fdd6b9443c4673c8ddd9f 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -80,7 +80,7 @@ static inline struct anon_vma_chain *anon_vma_chain_alloc(void)
        return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL);
 }
 
-void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
+static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
 {
        kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
 }
@@ -314,7 +314,7 @@ void __init anon_vma_init(void)
  * Getting a lock on a stable anon_vma from a page off the LRU is
  * tricky: page_lock_anon_vma rely on RCU to guard against the races.
  */
-struct anon_vma *page_lock_anon_vma(struct page *page)
+struct anon_vma *__page_lock_anon_vma(struct page *page)
 {
        struct anon_vma *anon_vma, *root_anon_vma;
        unsigned long anon_mapping;
@@ -348,6 +348,8 @@ out:
 }
 
 void page_unlock_anon_vma(struct anon_vma *anon_vma)
+       __releases(&anon_vma->root->lock)
+       __releases(RCU)
 {
        anon_vma_unlock(anon_vma);
        rcu_read_unlock();
@@ -407,7 +409,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
  *
  * On success returns with pte mapped and locked.
  */
-pte_t *page_check_address(struct page *page, struct mm_struct *mm,
+pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
                          unsigned long address, spinlock_t **ptlp, int sync)
 {
        pgd_t *pgd;
index 080b09a57a8fab1a4565757d13bb7a4fdb74b74a..f6d350e8adc56da67313a782569e81c34af27286 100644 (file)
@@ -1586,6 +1586,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
 
        inode = new_inode(sb);
        if (inode) {
+               inode->i_ino = get_next_ino();
                inode_init_owner(inode, dir, mode);
                inode->i_blocks = 0;
                inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
@@ -1903,7 +1904,7 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr
        dir->i_size += BOGO_DIRENT_SIZE;
        inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
        inc_nlink(inode);
-       atomic_inc(&inode->i_count);    /* New dentry reference */
+       ihold(inode);   /* New dentry reference */
        dget(dentry);           /* Extra pinning count for the created dentry */
        d_instantiate(dentry, inode);
 out:
@@ -2146,7 +2147,7 @@ static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
        if (*len < 3)
                return 255;
 
-       if (hlist_unhashed(&inode->i_hash)) {
+       if (inode_unhashed(inode)) {
                /* Unfortunately insert_inode_hash is not idempotent,
                 * so as we hash inodes here rather than at creation
                 * time, we need a lock to ensure we only try
@@ -2154,7 +2155,7 @@ static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
                 */
                static DEFINE_SPINLOCK(lock);
                spin_lock(&lock);
-               if (hlist_unhashed(&inode->i_hash))
+               if (inode_unhashed(inode))
                        __insert_inode_hash(inode,
                                            inode->i_ino + inode->i_generation);
                spin_unlock(&lock);
index fcae9815d3b35ce951669e8a404fb71b7a40d34f..b1e40dafbab3cc6326a6913acf17155cbcd8e7f0 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -901,7 +901,7 @@ static int transfer_objects(struct array_cache *to,
                struct array_cache *from, unsigned int max)
 {
        /* Figure out how many entries to transfer */
-       int nr = min(min(from->avail, max), to->limit - to->avail);
+       int nr = min3(from->avail, max, to->limit - to->avail);
 
        if (!nr)
                return 0;
index 9fc7bac7db0c3b3478b49cbcf33a7f906c170b4c..67ddaaf98c744d9e7febe38b0cbdd9cabb55c5b1 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/capability.h>
 #include <linux/syscalls.h>
 #include <linux/memcontrol.h>
+#include <linux/poll.h>
 
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
@@ -58,6 +59,10 @@ static struct swap_info_struct *swap_info[MAX_SWAPFILES];
 
 static DEFINE_MUTEX(swapon_mutex);
 
+static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
+/* Activity counter to indicate that a swapon or swapoff has occurred */
+static atomic_t proc_poll_event = ATOMIC_INIT(0);
+
 static inline unsigned char swap_count(unsigned char ent)
 {
        return ent & ~SWAP_HAS_CACHE;   /* may include SWAP_HAS_CONT flag */
@@ -1680,6 +1685,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
        }
        filp_close(swap_file, NULL);
        err = 0;
+       atomic_inc(&proc_poll_event);
+       wake_up_interruptible(&proc_poll_wait);
 
 out_dput:
        filp_close(victim, NULL);
@@ -1688,6 +1695,25 @@ out:
 }
 
 #ifdef CONFIG_PROC_FS
+struct proc_swaps {
+       struct seq_file seq;
+       int event;
+};
+
+static unsigned swaps_poll(struct file *file, poll_table *wait)
+{
+       struct proc_swaps *s = file->private_data;
+
+       poll_wait(file, &proc_poll_wait, wait);
+
+       if (s->event != atomic_read(&proc_poll_event)) {
+               s->event = atomic_read(&proc_poll_event);
+               return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
+       }
+
+       return POLLIN | POLLRDNORM;
+}
+
 /* iterator */
 static void *swap_start(struct seq_file *swap, loff_t *pos)
 {
@@ -1771,7 +1797,24 @@ static const struct seq_operations swaps_op = {
 
 static int swaps_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &swaps_op);
+       struct proc_swaps *s;
+       int ret;
+
+       s = kmalloc(sizeof(struct proc_swaps), GFP_KERNEL);
+       if (!s)
+               return -ENOMEM;
+
+       file->private_data = s;
+
+       ret = seq_open(file, &swaps_op);
+       if (ret) {
+               kfree(s);
+               return ret;
+       }
+
+       s->seq.private = s;
+       s->event = atomic_read(&proc_poll_event);
+       return ret;
 }
 
 static const struct file_operations proc_swaps_operations = {
@@ -1779,6 +1822,7 @@ static const struct file_operations proc_swaps_operations = {
        .read           = seq_read,
        .llseek         = seq_lseek,
        .release        = seq_release,
+       .poll           = swaps_poll,
 };
 
 static int __init procswaps_init(void)
@@ -2084,6 +2128,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
                swap_info[prev]->next = type;
        spin_unlock(&swap_lock);
        mutex_unlock(&swapon_mutex);
+       atomic_inc(&proc_poll_event);
+       wake_up_interruptible(&proc_poll_wait);
+
        error = 0;
        goto out;
 bad_swap:
index 9f909622a25eb9f5ac3f5489465d18c02d961c24..a3d66b3dc5cb0c9136b13764958d6aedf5b10526 100644 (file)
@@ -293,13 +293,13 @@ static void __insert_vmap_area(struct vmap_area *va)
        struct rb_node *tmp;
 
        while (*p) {
-               struct vmap_area *tmp;
+               struct vmap_area *tmp_va;
 
                parent = *p;
-               tmp = rb_entry(parent, struct vmap_area, rb_node);
-               if (va->va_start < tmp->va_end)
+               tmp_va = rb_entry(parent, struct vmap_area, rb_node);
+               if (va->va_start < tmp_va->va_end)
                        p = &(*p)->rb_left;
-               else if (va->va_end > tmp->va_start)
+               else if (va->va_end > tmp_va->va_start)
                        p = &(*p)->rb_right;
                else
                        BUG();
@@ -1596,6 +1596,13 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
 }
 EXPORT_SYMBOL(__vmalloc);
 
+static inline void *__vmalloc_node_flags(unsigned long size,
+                                       int node, gfp_t flags)
+{
+       return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
+                                       node, __builtin_return_address(0));
+}
+
 /**
  *     vmalloc  -  allocate virtually contiguous memory
  *     @size:          allocation size
@@ -1607,11 +1614,27 @@ EXPORT_SYMBOL(__vmalloc);
  */
 void *vmalloc(unsigned long size)
 {
-       return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
-                                       -1, __builtin_return_address(0));
+       return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
 }
 EXPORT_SYMBOL(vmalloc);
 
+/**
+ *     vzalloc - allocate virtually contiguous memory with zero fill
+ *     @size:  allocation size
+ *     Allocate enough pages to cover @size from the page level
+ *     allocator and map them into contiguous kernel virtual space.
+ *     The memory allocated is set to zero.
+ *
+ *     For tight control over page level allocator and protection flags
+ *     use __vmalloc() instead.
+ */
+void *vzalloc(unsigned long size)
+{
+       return __vmalloc_node_flags(size, -1,
+                               GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+}
+EXPORT_SYMBOL(vzalloc);
+
 /**
  * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
  * @size: allocation size
@@ -1653,6 +1676,25 @@ void *vmalloc_node(unsigned long size, int node)
 }
 EXPORT_SYMBOL(vmalloc_node);
 
+/**
+ * vzalloc_node - allocate memory on a specific node with zero fill
+ * @size:      allocation size
+ * @node:      numa node
+ *
+ * Allocate enough pages to cover @size from the page level
+ * allocator and map them into contiguous kernel virtual space.
+ * The memory allocated is set to zero.
+ *
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc_node() instead.
+ */
+void *vzalloc_node(unsigned long size, int node)
+{
+       return __vmalloc_node_flags(size, node,
+                        GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+}
+EXPORT_SYMBOL(vzalloc_node);
+
 #ifndef PAGE_KERNEL_EXEC
 # define PAGE_KERNEL_EXEC PAGE_KERNEL
 #endif
@@ -2350,6 +2392,7 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
 
 #ifdef CONFIG_PROC_FS
 static void *s_start(struct seq_file *m, loff_t *pos)
+       __acquires(&vmlist_lock)
 {
        loff_t n = *pos;
        struct vm_struct *v;
@@ -2376,6 +2419,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos)
 }
 
 static void s_stop(struct seq_file *m, void *p)
+       __releases(&vmlist_lock)
 {
        read_unlock(&vmlist_lock);
 }
index b94c9464f2620b4e06e4219899114c2274749735..b8a6fdc2131258db15321021375117cae27cec0d 100644 (file)
 #define CREATE_TRACE_POINTS
 #include <trace/events/vmscan.h>
 
+enum lumpy_mode {
+       LUMPY_MODE_NONE,
+       LUMPY_MODE_ASYNC,
+       LUMPY_MODE_SYNC,
+};
+
 struct scan_control {
        /* Incremented by the number of inactive pages that were scanned */
        unsigned long nr_scanned;
@@ -82,7 +88,7 @@ struct scan_control {
         * Intend to reclaim enough continuous memory rather than reclaim
         * enough amount of memory. i.e, mode for high order allocation.
         */
-       bool lumpy_reclaim_mode;
+       enum lumpy_mode lumpy_reclaim_mode;
 
        /* Which cgroup do we reclaim from */
        struct mem_cgroup *mem_cgroup;
@@ -265,6 +271,36 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
        return ret;
 }
 
+static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc,
+                                  bool sync)
+{
+       enum lumpy_mode mode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC;
+
+       /*
+        * Some reclaim have alredy been failed. No worth to try synchronous
+        * lumpy reclaim.
+        */
+       if (sync && sc->lumpy_reclaim_mode == LUMPY_MODE_NONE)
+               return;
+
+       /*
+        * If we need a large contiguous chunk of memory, or have
+        * trouble getting a small set of contiguous pages, we
+        * will reclaim both active and inactive pages.
+        */
+       if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+               sc->lumpy_reclaim_mode = mode;
+       else if (sc->order && priority < DEF_PRIORITY - 2)
+               sc->lumpy_reclaim_mode = mode;
+       else
+               sc->lumpy_reclaim_mode = LUMPY_MODE_NONE;
+}
+
+static void disable_lumpy_reclaim_mode(struct scan_control *sc)
+{
+       sc->lumpy_reclaim_mode = LUMPY_MODE_NONE;
+}
+
 static inline int is_page_cache_freeable(struct page *page)
 {
        /*
@@ -275,7 +311,8 @@ static inline int is_page_cache_freeable(struct page *page)
        return page_count(page) - page_has_private(page) == 2;
 }
 
-static int may_write_to_queue(struct backing_dev_info *bdi)
+static int may_write_to_queue(struct backing_dev_info *bdi,
+                             struct scan_control *sc)
 {
        if (current->flags & PF_SWAPWRITE)
                return 1;
@@ -283,6 +320,10 @@ static int may_write_to_queue(struct backing_dev_info *bdi)
                return 1;
        if (bdi == current->backing_dev_info)
                return 1;
+
+       /* lumpy reclaim for hugepage often need a lot of write */
+       if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+               return 1;
        return 0;
 }
 
@@ -307,12 +348,6 @@ static void handle_write_error(struct address_space *mapping,
        unlock_page(page);
 }
 
-/* Request for sync pageout. */
-enum pageout_io {
-       PAGEOUT_IO_ASYNC,
-       PAGEOUT_IO_SYNC,
-};
-
 /* possible outcome of pageout() */
 typedef enum {
        /* failed to write page out, page is locked */
@@ -330,7 +365,7 @@ typedef enum {
  * Calls ->writepage().
  */
 static pageout_t pageout(struct page *page, struct address_space *mapping,
-                                               enum pageout_io sync_writeback)
+                        struct scan_control *sc)
 {
        /*
         * If the page is dirty, only perform writeback if that write
@@ -366,7 +401,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
        }
        if (mapping->a_ops->writepage == NULL)
                return PAGE_ACTIVATE;
-       if (!may_write_to_queue(mapping->backing_dev_info))
+       if (!may_write_to_queue(mapping->backing_dev_info, sc))
                return PAGE_KEEP;
 
        if (clear_page_dirty_for_io(page)) {
@@ -376,7 +411,6 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
                        .nr_to_write = SWAP_CLUSTER_MAX,
                        .range_start = 0,
                        .range_end = LLONG_MAX,
-                       .nonblocking = 1,
                        .for_reclaim = 1,
                };
 
@@ -394,7 +428,8 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
                 * direct reclaiming a large contiguous area and the
                 * first attempt to free a range of pages fails.
                 */
-               if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
+               if (PageWriteback(page) &&
+                   sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC)
                        wait_on_page_writeback(page);
 
                if (!PageWriteback(page)) {
@@ -402,7 +437,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
                        ClearPageReclaim(page);
                }
                trace_mm_vmscan_writepage(page,
-                       trace_reclaim_flags(page, sync_writeback));
+                       trace_reclaim_flags(page, sc->lumpy_reclaim_mode));
                inc_zone_page_state(page, NR_VMSCAN_WRITE);
                return PAGE_SUCCESS;
        }
@@ -580,7 +615,7 @@ static enum page_references page_check_references(struct page *page,
        referenced_page = TestClearPageReferenced(page);
 
        /* Lumpy reclaim - ignore references */
-       if (sc->lumpy_reclaim_mode)
+       if (sc->lumpy_reclaim_mode != LUMPY_MODE_NONE)
                return PAGEREF_RECLAIM;
 
        /*
@@ -616,7 +651,7 @@ static enum page_references page_check_references(struct page *page,
        }
 
        /* Reclaim if clean, defer dirty pages to writeback */
-       if (referenced_page)
+       if (referenced_page && !PageSwapBacked(page))
                return PAGEREF_RECLAIM_CLEAN;
 
        return PAGEREF_RECLAIM;
@@ -644,12 +679,14 @@ static noinline_for_stack void free_page_list(struct list_head *free_pages)
  * shrink_page_list() returns the number of reclaimed pages
  */
 static unsigned long shrink_page_list(struct list_head *page_list,
-                                       struct scan_control *sc,
-                                       enum pageout_io sync_writeback)
+                                     struct zone *zone,
+                                     struct scan_control *sc)
 {
        LIST_HEAD(ret_pages);
        LIST_HEAD(free_pages);
        int pgactivate = 0;
+       unsigned long nr_dirty = 0;
+       unsigned long nr_congested = 0;
        unsigned long nr_reclaimed = 0;
 
        cond_resched();
@@ -669,6 +706,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                        goto keep;
 
                VM_BUG_ON(PageActive(page));
+               VM_BUG_ON(page_zone(page) != zone);
 
                sc->nr_scanned++;
 
@@ -694,10 +732,13 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                         * for any page for which writeback has already
                         * started.
                         */
-                       if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
+                       if (sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC &&
+                           may_enter_fs)
                                wait_on_page_writeback(page);
-                       else
-                               goto keep_locked;
+                       else {
+                               unlock_page(page);
+                               goto keep_lumpy;
+                       }
                }
 
                references = page_check_references(page, sc);
@@ -743,6 +784,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                }
 
                if (PageDirty(page)) {
+                       nr_dirty++;
+
                        if (references == PAGEREF_RECLAIM_CLEAN)
                                goto keep_locked;
                        if (!may_enter_fs)
@@ -751,14 +794,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                goto keep_locked;
 
                        /* Page is dirty, try to write it out here */
-                       switch (pageout(page, mapping, sync_writeback)) {
+                       switch (pageout(page, mapping, sc)) {
                        case PAGE_KEEP:
+                               nr_congested++;
                                goto keep_locked;
                        case PAGE_ACTIVATE:
                                goto activate_locked;
                        case PAGE_SUCCESS:
-                               if (PageWriteback(page) || PageDirty(page))
+                               if (PageWriteback(page))
+                                       goto keep_lumpy;
+                               if (PageDirty(page))
                                        goto keep;
+
                                /*
                                 * A synchronous write - probably a ramdisk.  Go
                                 * ahead and try to reclaim the page.
@@ -841,6 +888,7 @@ cull_mlocked:
                        try_to_free_swap(page);
                unlock_page(page);
                putback_lru_page(page);
+               disable_lumpy_reclaim_mode(sc);
                continue;
 
 activate_locked:
@@ -853,10 +901,21 @@ activate_locked:
 keep_locked:
                unlock_page(page);
 keep:
+               disable_lumpy_reclaim_mode(sc);
+keep_lumpy:
                list_add(&page->lru, &ret_pages);
                VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
        }
 
+       /*
+        * Tag a zone as congested if all the dirty pages encountered were
+        * backed by a congested BDI. In this case, reclaimers should just
+        * back off and wait for congestion to clear because further reclaim
+        * will encounter the same problem
+        */
+       if (nr_dirty == nr_congested)
+               zone_set_flag(zone, ZONE_CONGESTED);
+
        free_page_list(&free_pages);
 
        list_splice(&ret_pages, page_list);
@@ -1006,7 +1065,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
 
                        /* Check that we have not crossed a zone boundary. */
                        if (unlikely(page_zone_id(cursor_page) != zone_id))
-                               continue;
+                               break;
 
                        /*
                         * If we don't have enough swap space, reclaiming of
@@ -1014,8 +1073,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                         * pointless.
                         */
                        if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
-                                       !PageSwapCache(cursor_page))
-                               continue;
+                           !PageSwapCache(cursor_page))
+                               break;
 
                        if (__isolate_lru_page(cursor_page, mode, file) == 0) {
                                list_move(&cursor_page->lru, dst);
@@ -1026,11 +1085,16 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                                        nr_lumpy_dirty++;
                                scan++;
                        } else {
-                               if (mode == ISOLATE_BOTH &&
-                                               page_count(cursor_page))
-                                       nr_lumpy_failed++;
+                               /* the page is freed already. */
+                               if (!page_count(cursor_page))
+                                       continue;
+                               break;
                        }
                }
+
+               /* If we break out of the loop above, lumpy reclaim failed */
+               if (pfn < end_pfn)
+                       nr_lumpy_failed++;
        }
 
        *scanned = scan;
@@ -1253,7 +1317,7 @@ static inline bool should_reclaim_stall(unsigned long nr_taken,
                return false;
 
        /* Only stall on lumpy reclaim */
-       if (!sc->lumpy_reclaim_mode)
+       if (sc->lumpy_reclaim_mode == LUMPY_MODE_NONE)
                return false;
 
        /* If we have relaimed everything on the isolated list, no stall */
@@ -1286,7 +1350,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
        unsigned long nr_scanned;
        unsigned long nr_reclaimed = 0;
        unsigned long nr_taken;
-       unsigned long nr_active;
        unsigned long nr_anon;
        unsigned long nr_file;
 
@@ -1298,15 +1361,15 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
                        return SWAP_CLUSTER_MAX;
        }
 
-
+       set_lumpy_reclaim_mode(priority, sc, false);
        lru_add_drain();
        spin_lock_irq(&zone->lru_lock);
 
        if (scanning_global_lru(sc)) {
                nr_taken = isolate_pages_global(nr_to_scan,
                        &page_list, &nr_scanned, sc->order,
-                       sc->lumpy_reclaim_mode ?
-                               ISOLATE_BOTH : ISOLATE_INACTIVE,
+                       sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ?
+                                       ISOLATE_INACTIVE : ISOLATE_BOTH,
                        zone, 0, file);
                zone->pages_scanned += nr_scanned;
                if (current_is_kswapd())
@@ -1318,8 +1381,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
        } else {
                nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
                        &page_list, &nr_scanned, sc->order,
-                       sc->lumpy_reclaim_mode ?
-                               ISOLATE_BOTH : ISOLATE_INACTIVE,
+                       sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ?
+                                       ISOLATE_INACTIVE : ISOLATE_BOTH,
                        zone, sc->mem_cgroup,
                        0, file);
                /*
@@ -1337,20 +1400,12 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
 
        spin_unlock_irq(&zone->lru_lock);
 
-       nr_reclaimed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
+       nr_reclaimed = shrink_page_list(&page_list, zone, sc);
 
        /* Check if we should syncronously wait for writeback */
        if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
-               congestion_wait(BLK_RW_ASYNC, HZ/10);
-
-               /*
-                * The attempt at page out may have made some
-                * of the pages active, mark them inactive again.
-                */
-               nr_active = clear_active_flags(&page_list, NULL);
-               count_vm_events(PGDEACTIVATE, nr_active);
-
-               nr_reclaimed += shrink_page_list(&page_list, sc, PAGEOUT_IO_SYNC);
+               set_lumpy_reclaim_mode(priority, sc, true);
+               nr_reclaimed += shrink_page_list(&page_list, zone, sc);
        }
 
        local_irq_disable();
@@ -1359,6 +1414,12 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
        __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
 
        putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list);
+
+       trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
+               zone_idx(zone),
+               nr_scanned, nr_reclaimed,
+               priority,
+               trace_shrink_flags(file, sc->lumpy_reclaim_mode));
        return nr_reclaimed;
 }
 
@@ -1506,6 +1567,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        spin_unlock_irq(&zone->lru_lock);
 }
 
+#ifdef CONFIG_SWAP
 static int inactive_anon_is_low_global(struct zone *zone)
 {
        unsigned long active, inactive;
@@ -1531,12 +1593,26 @@ static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
 {
        int low;
 
+       /*
+        * If we don't have swap space, anonymous page deactivation
+        * is pointless.
+        */
+       if (!total_swap_pages)
+               return 0;
+
        if (scanning_global_lru(sc))
                low = inactive_anon_is_low_global(zone);
        else
                low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
        return low;
 }
+#else
+static inline int inactive_anon_is_low(struct zone *zone,
+                                       struct scan_control *sc)
+{
+       return 0;
+}
+#endif
 
 static int inactive_file_is_low_global(struct zone *zone)
 {
@@ -1721,21 +1797,6 @@ out:
        }
 }
 
-static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc)
-{
-       /*
-        * If we need a large contiguous chunk of memory, or have
-        * trouble getting a small set of contiguous pages, we
-        * will reclaim both active and inactive pages.
-        */
-       if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
-               sc->lumpy_reclaim_mode = 1;
-       else if (sc->order && priority < DEF_PRIORITY - 2)
-               sc->lumpy_reclaim_mode = 1;
-       else
-               sc->lumpy_reclaim_mode = 0;
-}
-
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
  */
@@ -1750,8 +1811,6 @@ static void shrink_zone(int priority, struct zone *zone,
 
        get_scan_count(zone, sc, nr, priority);
 
-       set_lumpy_reclaim_mode(priority, sc);
-
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
                                        nr[LRU_INACTIVE_FILE]) {
                for_each_evictable_lru(l) {
@@ -1782,7 +1841,7 @@ static void shrink_zone(int priority, struct zone *zone,
         * Even if we did not try to evict anon pages at all, we want to
         * rebalance the anon lru active/inactive ratio.
         */
-       if (inactive_anon_is_low(zone, sc) && nr_swap_pages > 0)
+       if (inactive_anon_is_low(zone, sc))
                shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
 
        throttle_vm_writeout(sc->gfp_mask);
@@ -1937,21 +1996,16 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
 
                /* Take a nap, wait for some writeback to complete */
                if (!sc->hibernation_mode && sc->nr_scanned &&
-                   priority < DEF_PRIORITY - 2)
-                       congestion_wait(BLK_RW_ASYNC, HZ/10);
+                   priority < DEF_PRIORITY - 2) {
+                       struct zone *preferred_zone;
+
+                       first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
+                                                       NULL, &preferred_zone);
+                       wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
+               }
        }
 
 out:
-       /*
-        * Now that we've scanned all the zones at this priority level, note
-        * that level within the zone so that the next thread which performs
-        * scanning of this zone will immediately start out at this priority
-        * level.  This affects only the decision whether or not to bring
-        * mapped pages onto the inactive list.
-        */
-       if (priority < 0)
-               priority = 0;
-
        delayacct_freepages_end();
        put_mems_allowed();
 
@@ -2247,6 +2301,15 @@ loop_again:
                                if (!zone_watermark_ok(zone, order,
                                            min_wmark_pages(zone), end_zone, 0))
                                        has_under_min_watermark_zone = 1;
+                       } else {
+                               /*
+                                * If a zone reaches its high watermark,
+                                * consider it to be no longer congested. It's
+                                * possible there are dirty pages backed by
+                                * congested BDIs but as pressure is relieved,
+                                * spectulatively avoid congestion waits
+                                */
+                               zone_clear_flag(zone, ZONE_CONGESTED);
                        }
 
                }
@@ -2987,6 +3050,7 @@ int scan_unevictable_handler(struct ctl_table *table, int write,
        return 0;
 }
 
+#ifdef CONFIG_NUMA
 /*
  * per node 'scan_unevictable_pages' attribute.  On demand re-scan of
  * a specified node's per zone unevictable lists for evictable pages.
@@ -3033,4 +3097,4 @@ void scan_unevictable_unregister_node(struct node *node)
 {
        sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
 }
-
+#endif
index 355a9e669aaa800d62fa31d2b83110bf76cce9d7..cd2e42be7b68f73dc60f40631f2b9f87708d3b47 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/vmstat.h>
 #include <linux/sched.h>
 #include <linux/math64.h>
+#include <linux/writeback.h>
+#include <linux/compaction.h>
 
 #ifdef CONFIG_VM_EVENT_COUNTERS
 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
@@ -394,6 +396,7 @@ void zone_statistics(struct zone *preferred_zone, struct zone *z)
 #endif
 
 #ifdef CONFIG_COMPACTION
+
 struct contig_page_info {
        unsigned long free_pages;
        unsigned long free_blocks_total;
@@ -745,6 +748,11 @@ static const char * const vmstat_text[] = {
        "nr_isolated_anon",
        "nr_isolated_file",
        "nr_shmem",
+       "nr_dirtied",
+       "nr_written",
+       "nr_dirty_threshold",
+       "nr_dirty_background_threshold",
+
 #ifdef CONFIG_NUMA
        "numa_hit",
        "numa_miss",
@@ -904,36 +912,44 @@ static const struct file_operations proc_zoneinfo_file_operations = {
        .release        = seq_release,
 };
 
+enum writeback_stat_item {
+       NR_DIRTY_THRESHOLD,
+       NR_DIRTY_BG_THRESHOLD,
+       NR_VM_WRITEBACK_STAT_ITEMS,
+};
+
 static void *vmstat_start(struct seq_file *m, loff_t *pos)
 {
        unsigned long *v;
-#ifdef CONFIG_VM_EVENT_COUNTERS
-       unsigned long *e;
-#endif
-       int i;
+       int i, stat_items_size;
 
        if (*pos >= ARRAY_SIZE(vmstat_text))
                return NULL;
+       stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
+                         NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
 
 #ifdef CONFIG_VM_EVENT_COUNTERS
-       v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long)
-                       + sizeof(struct vm_event_state), GFP_KERNEL);
-#else
-       v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long),
-                       GFP_KERNEL);
+       stat_items_size += sizeof(struct vm_event_state);
 #endif
+
+       v = kmalloc(stat_items_size, GFP_KERNEL);
        m->private = v;
        if (!v)
                return ERR_PTR(-ENOMEM);
        for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
                v[i] = global_page_state(i);
+       v += NR_VM_ZONE_STAT_ITEMS;
+
+       global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
+                           v + NR_DIRTY_THRESHOLD);
+       v += NR_VM_WRITEBACK_STAT_ITEMS;
+
 #ifdef CONFIG_VM_EVENT_COUNTERS
-       e = v + NR_VM_ZONE_STAT_ITEMS;
-       all_vm_events(e);
-       e[PGPGIN] /= 2;         /* sectors -> kbytes */
-       e[PGPGOUT] /= 2;
+       all_vm_events(v);
+       v[PGPGIN] /= 2;         /* sectors -> kbytes */
+       v[PGPGOUT] /= 2;
 #endif
-       return v + *pos;
+       return m->private + *pos;
 }
 
 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
index 7f67c072d4969670a0355c7aaad033dbdc651a5f..ee3cd280c76e9cfb58024fa22f3017352f0467bc 100644 (file)
@@ -377,7 +377,7 @@ static int sock_alloc_file(struct socket *sock, struct file **f, int flags)
                  &socket_file_ops);
        if (unlikely(!file)) {
                /* drop dentry, keep inode */
-               atomic_inc(&path.dentry->d_inode->i_count);
+               ihold(path.dentry->d_inode);
                path_put(&path);
                put_unused_fd(fd);
                return -ENFILE;
@@ -480,6 +480,7 @@ static struct socket *sock_alloc(void)
        sock = SOCKET_I(inode);
 
        kmemcheck_annotate_bitfield(sock, type);
+       inode->i_ino = get_next_ino();
        inode->i_mode = S_IFSOCK | S_IRWXUGO;
        inode->i_uid = current_fsuid();
        inode->i_gid = current_fsgid();
index 52f252432144ab439c9083acf20e8d487fbbd12d..7df92d237cb8103171a892f49a98aaca1cd05e49 100644 (file)
@@ -445,6 +445,7 @@ rpc_get_inode(struct super_block *sb, umode_t mode)
        struct inode *inode = new_inode(sb);
        if (!inode)
                return NULL;
+       inode->i_ino = get_next_ino();
        inode->i_mode = mode;
        inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
        switch(mode & S_IFMT) {
index 0ebc777a66601e05d48cc8805e57cb957d9512d4..3c95304a08174f550f64f36346ce031419f6a4c8 100644 (file)
 
 static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
 static DEFINE_SPINLOCK(unix_table_lock);
-static atomic_t unix_nr_socks = ATOMIC_INIT(0);
+static atomic_long_t unix_nr_socks;
 
 #define unix_sockets_unbound   (&unix_socket_table[UNIX_HASH_SIZE])
 
@@ -360,13 +360,13 @@ static void unix_sock_destructor(struct sock *sk)
        if (u->addr)
                unix_release_addr(u->addr);
 
-       atomic_dec(&unix_nr_socks);
+       atomic_long_dec(&unix_nr_socks);
        local_bh_disable();
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
        local_bh_enable();
 #ifdef UNIX_REFCNT_DEBUG
-       printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk,
-               atomic_read(&unix_nr_socks));
+       printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
+               atomic_long_read(&unix_nr_socks));
 #endif
 }
 
@@ -606,8 +606,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock)
        struct sock *sk = NULL;
        struct unix_sock *u;
 
-       atomic_inc(&unix_nr_socks);
-       if (atomic_read(&unix_nr_socks) > 2 * get_max_files())
+       atomic_long_inc(&unix_nr_socks);
+       if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
                goto out;
 
        sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
@@ -632,7 +632,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock)
        unix_insert_socket(unix_sockets_unbound, sk);
 out:
        if (sk == NULL)
-               atomic_dec(&unix_nr_socks);
+               atomic_long_dec(&unix_nr_socks);
        else {
                local_bh_disable();
                sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
index 2039acdf51222b3961b06291d6a02f4a8105a240..90b54d4697fd2de9ae1dcecc80fc25cdcab63d21 100755 (executable)
@@ -2,7 +2,7 @@
 # (c) 2001, Dave Jones. (the file handling bit)
 # (c) 2005, Joel Schopp <jschopp@austin.ibm.com> (the ugly bit)
 # (c) 2007,2008, Andy Whitcroft <apw@uk.ibm.com> (new conditions, test suite)
-# (c) 2008,2009, Andy Whitcroft <apw@canonical.com>
+# (c) 2008-2010 Andy Whitcroft <apw@canonical.com>
 # Licensed under the terms of the GNU GPL License version 2
 
 use strict;
@@ -10,7 +10,7 @@ use strict;
 my $P = $0;
 $P =~ s@.*/@@g;
 
-my $V = '0.30';
+my $V = '0.31';
 
 use Getopt::Long qw(:config no_auto_abbrev);
 
@@ -103,6 +103,8 @@ for my $key (keys %debug) {
        die "$@" if ($@);
 }
 
+my $rpt_cleaners = 0;
+
 if ($terse) {
        $emacs = 1;
        $quiet++;
@@ -150,6 +152,20 @@ our $Sparse        = qr{
 # We need \b after 'init' otherwise 'initconst' will cause a false positive in a check
 our $Attribute = qr{
                        const|
+                       __percpu|
+                       __nocast|
+                       __safe|
+                       __bitwise__|
+                       __packed__|
+                       __packed2__|
+                       __naked|
+                       __maybe_unused|
+                       __always_unused|
+                       __noreturn|
+                       __used|
+                       __cold|
+                       __noclone|
+                       __deprecated|
                        __read_mostly|
                        __kprobes|
                        __(?:mem|cpu|dev|)(?:initdata|initconst|init\b)|
@@ -675,15 +691,15 @@ sub ctx_block_get {
                $blk .= $rawlines[$line];
 
                # Handle nested #if/#else.
-               if ($rawlines[$line] =~ /^.\s*#\s*(?:ifndef|ifdef|if)\s/) {
+               if ($lines[$line] =~ /^.\s*#\s*(?:ifndef|ifdef|if)\s/) {
                        push(@stack, $level);
-               } elsif ($rawlines[$line] =~ /^.\s*#\s*(?:else|elif)\b/) {
+               } elsif ($lines[$line] =~ /^.\s*#\s*(?:else|elif)\b/) {
                        $level = $stack[$#stack - 1];
-               } elsif ($rawlines[$line] =~ /^.\s*#\s*endif\b/) {
+               } elsif ($lines[$line] =~ /^.\s*#\s*endif\b/) {
                        $level = pop(@stack);
                }
 
-               foreach my $c (split(//, $rawlines[$line])) {
+               foreach my $c (split(//, $lines[$line])) {
                        ##print "C<$c>L<$level><$open$close>O<$off>\n";
                        if ($off > 0) {
                                $off--;
@@ -843,7 +859,12 @@ sub annotate_values {
                                $av_preprocessor = 0;
                        }
 
-               } elsif ($cur =~ /^($Type)\s*(?:$Ident|,|\)|\()/) {
+               } elsif ($cur =~ /^(\(\s*$Type\s*)\)/) {
+                       print "CAST($1)\n" if ($dbg_values > 1);
+                       push(@av_paren_type, $type);
+                       $type = 'C';
+
+               } elsif ($cur =~ /^($Type)\s*(?:$Ident|,|\)|\(|\s*$)/) {
                        print "DECLARE($1)\n" if ($dbg_values > 1);
                        $type = 'T';
 
@@ -1308,7 +1329,11 @@ sub process {
                $here = "#$realline: " if ($file);
 
                # extract the filename as it passes
-               if ($line=~/^\+\+\+\s+(\S+)/) {
+               if ($line =~ /^diff --git.*?(\S+)$/) {
+                       $realfile = $1;
+                       $realfile =~ s@^([^/]*)/@@;
+
+               } elsif ($line =~ /^\+\+\+\s+(\S+)/) {
                        $realfile = $1;
                        $realfile =~ s@^([^/]*)/@@;
 
@@ -1332,6 +1357,14 @@ sub process {
 
                $cnt_lines++ if ($realcnt != 0);
 
+# Check for incorrect file permissions
+               if ($line =~ /^new (file )?mode.*[7531]\d{0,2}$/) {
+                       my $permhere = $here . "FILE: $realfile\n";
+                       if ($realfile =~ /(Makefile|Kconfig|\.c|\.h|\.S|\.tmpl)$/) {
+                               ERROR("do not set execute permissions for source files\n" . $permhere);
+                       }
+               }
+
 #check the patch for a signoff:
                if ($line =~ /^\s*signed-off-by:/i) {
                        # This is a signoff, if ugly, so do not double report.
@@ -1389,21 +1422,38 @@ sub process {
                } elsif ($rawline =~ /^\+.*\S\s+$/ || $rawline =~ /^\+\s+$/) {
                        my $herevet = "$here\n" . cat_vet($rawline) . "\n";
                        ERROR("trailing whitespace\n" . $herevet);
+                       $rpt_cleaners = 1;
                }
 
 # check for Kconfig help text having a real description
+# Only applies when adding the entry originally, after that we do not have
+# sufficient context to determine whether it is indeed long enough.
                if ($realfile =~ /Kconfig/ &&
-                   $line =~ /\+?\s*(---)?help(---)?$/) {
+                   $line =~ /\+\s*(?:---)?help(?:---)?$/) {
                        my $length = 0;
-                       for (my $l = $linenr; defined($lines[$l]); $l++) {
-                               my $f = $lines[$l];
+                       my $cnt = $realcnt;
+                       my $ln = $linenr + 1;
+                       my $f;
+                       my $is_end = 0;
+                       while ($cnt > 0 && defined $lines[$ln - 1]) {
+                               $f = $lines[$ln - 1];
+                               $cnt-- if ($lines[$ln - 1] !~ /^-/);
+                               $is_end = $lines[$ln - 1] =~ /^\+/;
+                               $ln++;
+
+                               next if ($f =~ /^-/);
+                               $f =~ s/^.//;
                                $f =~ s/#.*//;
                                $f =~ s/^\s+//;
                                next if ($f =~ /^$/);
-                               last if ($f =~ /^\s*config\s/);
+                               if ($f =~ /^\s*config\s/) {
+                                       $is_end = 1;
+                                       last;
+                               }
                                $length++;
                        }
-                       WARN("please write a paragraph that describes the config symbol fully\n" . $herecurr) if ($length < 4);
+                       WARN("please write a paragraph that describes the config symbol fully\n" . $herecurr) if ($is_end && $length < 4);
+                       #print "is_end<$is_end> length<$length>\n";
                }
 
 # check we are in a valid source file if not then ignore this hunk
@@ -1450,6 +1500,7 @@ sub process {
                    $rawline =~ /^\+\s*        \s*/) {
                        my $herevet = "$here\n" . cat_vet($rawline) . "\n";
                        ERROR("code indent should use tabs where possible\n" . $herevet);
+                       $rpt_cleaners = 1;
                }
 
 # check for space before tabs.
@@ -1459,10 +1510,13 @@ sub process {
                }
 
 # check for spaces at the beginning of a line.
-               if ($rawline =~ /^\+ / && $rawline !~ /\+ +\*/)  {
+# Exceptions:
+#  1) within comments
+#  2) indented preprocessor commands
+#  3) hanging labels
+               if ($rawline =~ /^\+ / && $line !~ /\+ *(?:$;|#|$Ident:)/)  {
                        my $herevet = "$here\n" . cat_vet($rawline) . "\n";
-                       WARN("please, no space for starting a line, \
-                               excluding comments\n" . $herevet);
+                       WARN("please, no spaces at the start of a line\n" . $herevet);
                }
 
 # check we are in a valid C source file if not then ignore this hunk
@@ -1598,7 +1652,7 @@ sub process {
 
                        if ($ctx !~ /{\s*/ && defined($lines[$ctx_ln -1]) && $lines[$ctx_ln - 1] =~ /^\+\s*{/) {
                                ERROR("that open brace { should be on the previous line\n" .
-                                       "$here\n$ctx\n$lines[$ctx_ln - 1]\n");
+                                       "$here\n$ctx\n$rawlines[$ctx_ln - 1]\n");
                        }
                        if ($level == 0 && $pre_ctx !~ /}\s*while\s*\($/ &&
                            $ctx =~ /\)\s*\;\s*$/ &&
@@ -1607,7 +1661,7 @@ sub process {
                                my ($nlength, $nindent) = line_stats($lines[$ctx_ln - 1]);
                                if ($nindent > $indent) {
                                        WARN("trailing semicolon indicates no statements, indent implies otherwise\n" .
-                                               "$here\n$ctx\n$lines[$ctx_ln - 1]\n");
+                                               "$here\n$ctx\n$rawlines[$ctx_ln - 1]\n");
                                }
                        }
                }
@@ -1768,8 +1822,17 @@ sub process {
                    !defined $suppress_export{$realline_next} &&
                    ($lines[$realline_next - 1] =~ /EXPORT_SYMBOL.*\((.*)\)/ ||
                     $lines[$realline_next - 1] =~ /EXPORT_UNUSED_SYMBOL.*\((.*)\)/)) {
+                       # Handle definitions which produce identifiers with
+                       # a prefix:
+                       #   XXX(foo);
+                       #   EXPORT_SYMBOL(something_foo);
                        my $name = $1;
-                       if ($stat !~ /(?:
+                       if ($stat =~ /^.([A-Z_]+)\s*\(\s*($Ident)/ &&
+                           $name =~ /^${Ident}_$2/) {
+#print "FOO C name<$name>\n";
+                               $suppress_export{$realline_next} = 1;
+
+                       } elsif ($stat !~ /(?:
                                \n.}\s*$|
                                ^.DEFINE_$Ident\(\Q$name\E\)|
                                ^.DECLARE_$Ident\(\Q$name\E\)|
@@ -1806,6 +1869,23 @@ sub process {
                                $herecurr);
                }
 
+# check for static const char * arrays.
+               if ($line =~ /\bstatic\s+const\s+char\s*\*\s*(\w+)\s*\[\s*\]\s*=\s*/) {
+                       WARN("static const char * array should probably be static const char * const\n" .
+                               $herecurr);
+               }
+
+# check for static char foo[] = "bar" declarations.
+               if ($line =~ /\bstatic\s+char\s+(\w+)\s*\[\s*\]\s*=\s*"/) {
+                       WARN("static char array declaration should probably be static const char\n" .
+                               $herecurr);
+               }
+
+# check for declarations of struct pci_device_id
+               if ($line =~ /\bstruct\s+pci_device_id\s+\w+\s*\[\s*\]\s*\=\s*\{/) {
+                       WARN("Use DEFINE_PCI_DEVICE_TABLE for struct pci_device_id\n" . $herecurr);
+               }
+
 # check for new typedefs, only function parameters and sparse annotations
 # make sense.
                if ($line =~ /\btypedef\s/ &&
@@ -1899,6 +1979,11 @@ sub process {
                        ERROR("open brace '{' following $1 go on the same line\n" . $hereprev);
                }
 
+# missing space after union, struct or enum definition
+               if ($line =~ /^.\s*(?:typedef\s+)?(enum|union|struct)(?:\s+$Ident)?(?:\s+$Ident)?[=\{]/) {
+                   WARN("missing space after $1 definition\n" . $herecurr);
+               }
+
 # check for spacing round square brackets; allowed:
 #  1. with a type on the left -- int [] a;
 #  2. at the beginning of a line for slice initialisers -- [0...10] = 5,
@@ -2176,21 +2261,29 @@ sub process {
                        my $value = $2;
 
                        # Flatten any parentheses
-                       $value =~ s/\)\(/\) \(/g;
+                       $value =~ s/\(/ \(/g;
+                       $value =~ s/\)/\) /g;
                        while ($value =~ s/\[[^\{\}]*\]/1/ ||
                               $value !~ /(?:$Ident|-?$Constant)\s*
                                             $Compare\s*
                                             (?:$Ident|-?$Constant)/x &&
                               $value =~ s/\([^\(\)]*\)/1/) {
                        }
-
-                       if ($value =~ /^(?:$Ident|-?$Constant)$/) {
+#print "value<$value>\n";
+                       if ($value =~ /^\s*(?:$Ident|-?$Constant)\s*$/) {
                                ERROR("return is not a function, parentheses are not required\n" . $herecurr);
 
                        } elsif ($spacing !~ /\s+/) {
                                ERROR("space required before the open parenthesis '('\n" . $herecurr);
                        }
                }
+# Return of what appears to be an errno should normally be -'ve
+               if ($line =~ /^.\s*return\s*(E[A-Z]*)\s*;/) {
+                       my $name = $1;
+                       if ($name ne 'EOF' && $name ne 'ERROR') {
+                               WARN("return of an errno should typically be -ve (return -$1)\n" . $herecurr);
+                       }
+               }
 
 # Need a space before open parenthesis after if, while etc
                if ($line=~/\b(if|while|for|switch)\(/) {
@@ -2409,8 +2502,8 @@ sub process {
                                \.$Ident\s*=\s*|
                                ^\"|\"$
                        }x;
-                       #print "REST<$rest> dstat<$dstat>\n";
-                       if ($rest ne '') {
+                       #print "REST<$rest> dstat<$dstat> ctx<$ctx>\n";
+                       if ($rest ne '' && $rest ne ',') {
                                if ($rest !~ /while\s*\(/ &&
                                    $dstat !~ /$exceptions/)
                                {
@@ -2839,6 +2932,15 @@ sub process {
                print "\n" if ($quiet == 0);
        }
 
+       if ($quiet == 0) {
+               # If there were whitespace errors which cleanpatch can fix
+               # then suggest that.
+               if ($rpt_cleaners) {
+                       print "NOTE: whitespace errors detected, you may wish to use scripts/cleanpatch or\n";
+                       print "      scripts/cleanfile\n\n";
+               }
+       }
+
        if ($clean == 1 && $quiet == 0) {
                print "$vname has no obvious style problems and is ready for submission.\n"
        }
index b2281982f52f1a156f3eb5b321d3ec9a5fcaee0f..d21ec3a89603b0fa8155671587deaebaa52b6163 100755 (executable)
@@ -13,7 +13,7 @@
 use strict;
 
 my $P = $0;
-my $V = '0.24';
+my $V = '0.26-beta6';
 
 use Getopt::Long qw(:config no_auto_abbrev);
 
@@ -24,15 +24,19 @@ my $email_maintainer = 1;
 my $email_list = 1;
 my $email_subscriber_list = 0;
 my $email_git_penguin_chiefs = 0;
-my $email_git = 1;
+my $email_git = 0;
 my $email_git_all_signature_types = 0;
 my $email_git_blame = 0;
+my $email_git_blame_signatures = 1;
+my $email_git_fallback = 1;
 my $email_git_min_signatures = 1;
 my $email_git_max_maintainers = 5;
 my $email_git_min_percent = 5;
 my $email_git_since = "1-year-ago";
 my $email_hg_since = "-365";
+my $interactive = 0;
 my $email_remove_duplicates = 1;
+my $email_use_mailmap = 1;
 my $output_multiline = 1;
 my $output_separator = ", ";
 my $output_roles = 0;
@@ -49,8 +53,13 @@ my $pattern_depth = 0;
 my $version = 0;
 my $help = 0;
 
+my $vcs_used = 0;
+
 my $exit = 0;
 
+my %commit_author_hash;
+my %commit_signer_hash;
+
 my @penguin_chief = ();
 push(@penguin_chief, "Linus Torvalds:torvalds\@linux-foundation.org");
 #Andrew wants in on most everything - 2009/01/14
@@ -73,7 +82,6 @@ my @signature_tags = ();
 push(@signature_tags, "Signed-off-by:");
 push(@signature_tags, "Reviewed-by:");
 push(@signature_tags, "Acked-by:");
-my $signaturePattern = "\(" . join("|", @signature_tags) . "\)";
 
 # rfc822 email address - preloaded methods go here.
 my $rfc822_lwsp = "(?:(?:\\r\\n)?[ \\t])";
@@ -86,31 +94,70 @@ my %VCS_cmds;
 my %VCS_cmds_git = (
     "execute_cmd" => \&git_execute_cmd,
     "available" => '(which("git") ne "") && (-d ".git")',
-    "find_signers_cmd" => "git log --no-color --since=\$email_git_since -- \$file",
-    "find_commit_signers_cmd" => "git log --no-color -1 \$commit",
+    "find_signers_cmd" =>
+       "git log --no-color --since=\$email_git_since " .
+           '--format="GitCommit: %H%n' .
+                     'GitAuthor: %an <%ae>%n' .
+                     'GitDate: %aD%n' .
+                     'GitSubject: %s%n' .
+                     '%b%n"' .
+           " -- \$file",
+    "find_commit_signers_cmd" =>
+       "git log --no-color " .
+           '--format="GitCommit: %H%n' .
+                     'GitAuthor: %an <%ae>%n' .
+                     'GitDate: %aD%n' .
+                     'GitSubject: %s%n' .
+                     '%b%n"' .
+           " -1 \$commit",
+    "find_commit_author_cmd" =>
+       "git log --no-color " .
+           '--format="GitCommit: %H%n' .
+                     'GitAuthor: %an <%ae>%n' .
+                     'GitDate: %aD%n' .
+                     'GitSubject: %s%n"' .
+           " -1 \$commit",
     "blame_range_cmd" => "git blame -l -L \$diff_start,+\$diff_length \$file",
     "blame_file_cmd" => "git blame -l \$file",
-    "commit_pattern" => "^commit [0-9a-f]{40,40}",
-    "blame_commit_pattern" => "^([0-9a-f]+) "
+    "commit_pattern" => "^GitCommit: ([0-9a-f]{40,40})",
+    "blame_commit_pattern" => "^([0-9a-f]+) ",
+    "author_pattern" => "^GitAuthor: (.*)",
+    "subject_pattern" => "^GitSubject: (.*)",
 );
 
 my %VCS_cmds_hg = (
     "execute_cmd" => \&hg_execute_cmd,
     "available" => '(which("hg") ne "") && (-d ".hg")',
     "find_signers_cmd" =>
-       "hg log --date=\$email_hg_since" .
-               " --template='commit {node}\\n{desc}\\n' -- \$file",
-    "find_commit_signers_cmd" => "hg log --template='{desc}\\n' -r \$commit",
+       "hg log --date=\$email_hg_since " .
+           "--template='HgCommit: {node}\\n" .
+                       "HgAuthor: {author}\\n" .
+                       "HgSubject: {desc}\\n'" .
+           " -- \$file",
+    "find_commit_signers_cmd" =>
+       "hg log " .
+           "--template='HgSubject: {desc}\\n'" .
+           " -r \$commit",
+    "find_commit_author_cmd" =>
+       "hg log " .
+           "--template='HgCommit: {node}\\n" .
+                       "HgAuthor: {author}\\n" .
+                       "HgSubject: {desc|firstline}\\n'" .
+           " -r \$commit",
     "blame_range_cmd" => "",           # not supported
-    "blame_file_cmd" => "hg blame -c \$file",
-    "commit_pattern" => "^commit [0-9a-f]{40,40}",
-    "blame_commit_pattern" => "^([0-9a-f]+):"
+    "blame_file_cmd" => "hg blame -n \$file",
+    "commit_pattern" => "^HgCommit: ([0-9a-f]{40,40})",
+    "blame_commit_pattern" => "^([ 0-9a-f]+):",
+    "author_pattern" => "^HgAuthor: (.*)",
+    "subject_pattern" => "^HgSubject: (.*)",
 );
 
-if (-f "${lk_path}.get_maintainer.conf") {
+my $conf = which_conf(".get_maintainer.conf");
+if (-f $conf) {
     my @conf_args;
-    open(my $conffile, '<', "${lk_path}.get_maintainer.conf")
-       or warn "$P: Can't open .get_maintainer.conf: $!\n";
+    open(my $conffile, '<', "$conf")
+       or warn "$P: Can't find a readable .get_maintainer.conf file $!\n";
+
     while (<$conffile>) {
        my $line = $_;
 
@@ -136,13 +183,17 @@ if (!GetOptions(
                'git!' => \$email_git,
                'git-all-signature-types!' => \$email_git_all_signature_types,
                'git-blame!' => \$email_git_blame,
+               'git-blame-signatures!' => \$email_git_blame_signatures,
+               'git-fallback!' => \$email_git_fallback,
                'git-chief-penguins!' => \$email_git_penguin_chiefs,
                'git-min-signatures=i' => \$email_git_min_signatures,
                'git-max-maintainers=i' => \$email_git_max_maintainers,
                'git-min-percent=i' => \$email_git_min_percent,
                'git-since=s' => \$email_git_since,
                'hg-since=s' => \$email_hg_since,
+               'i|interactive!' => \$interactive,
                'remove-duplicates!' => \$email_remove_duplicates,
+               'mailmap!' => \$email_use_mailmap,
                'm!' => \$email_maintainer,
                'n!' => \$email_usename,
                'l!' => \$email_list,
@@ -181,13 +232,9 @@ if (-t STDIN && !@ARGV) {
     die "$P: missing patchfile or -f file - use --help if necessary\n";
 }
 
-if ($output_separator ne ", ") {
-    $output_multiline = 0;
-}
-
-if ($output_rolestats) {
-    $output_roles = 1;
-}
+$output_multiline = 0 if ($output_separator ne ", ");
+$output_rolestats = 1 if ($interactive);
+$output_roles = 1 if ($output_rolestats);
 
 if ($sections) {
     $email = 0;
@@ -197,6 +244,7 @@ if ($sections) {
     $subsystem = 0;
     $web = 0;
     $keywords = 0;
+    $interactive = 0;
 } else {
     my $selections = $email + $scm + $status + $subsystem + $web;
     if ($selections == 0) {
@@ -215,10 +263,6 @@ if (!top_of_kernel_tree($lk_path)) {
        . "a linux kernel source tree.\n";
 }
 
-if ($email_git_all_signature_types) {
-    $signaturePattern = "(.+?)[Bb][Yy]:";
-}
-
 ## Read MAINTAINERS for type/value pairs
 
 my @typevalue = ();
@@ -253,31 +297,82 @@ while (<$maint>) {
 }
 close($maint);
 
-my %mailmap;
 
-if ($email_remove_duplicates) {
-    open(my $mailmap, '<', "${lk_path}.mailmap")
-       or warn "$P: Can't open .mailmap: $!\n";
-    while (<$mailmap>) {
-       my $line = $_;
+#
+# Read mail address map
+#
 
-       next if ($line =~ m/^\s*#/);
-       next if ($line =~ m/^\s*$/);
+my $mailmap;
 
-       my ($name, $address) = parse_email($line);
-       $line = format_email($name, $address, $email_usename);
+read_mailmap();
 
-       next if ($line =~ m/^\s*$/);
+sub read_mailmap {
+    $mailmap = {
+       names => {},
+       addresses => {}
+    };
 
-       if (exists($mailmap{$name})) {
-           my $obj = $mailmap{$name};
-           push(@$obj, $address);
-       } else {
-           my @arr = ($address);
-           $mailmap{$name} = \@arr;
+    return if (!$email_use_mailmap || !(-f "${lk_path}.mailmap"));
+
+    open(my $mailmap_file, '<', "${lk_path}.mailmap")
+       or warn "$P: Can't open .mailmap: $!\n";
+
+    while (<$mailmap_file>) {
+       s/#.*$//; #strip comments
+       s/^\s+|\s+$//g; #trim
+
+       next if (/^\s*$/); #skip empty lines
+       #entries have one of the following formats:
+       # name1 <mail1>
+       # <mail1> <mail2>
+       # name1 <mail1> <mail2>
+       # name1 <mail1> name2 <mail2>
+       # (see man git-shortlog)
+       if (/^(.+)<(.+)>$/) {
+           my $real_name = $1;
+           my $address = $2;
+
+           $real_name =~ s/\s+$//;
+           ($real_name, $address) = parse_email("$real_name <$address>");
+           $mailmap->{names}->{$address} = $real_name;
+
+       } elsif (/^<([^\s]+)>\s*<([^\s]+)>$/) {
+           my $real_address = $1;
+           my $wrong_address = $2;
+
+           $mailmap->{addresses}->{$wrong_address} = $real_address;
+
+       } elsif (/^(.+)<([^\s]+)>\s*<([^\s]+)>$/) {
+           my $real_name = $1;
+           my $real_address = $2;
+           my $wrong_address = $3;
+
+           $real_name =~ s/\s+$//;
+           ($real_name, $real_address) =
+               parse_email("$real_name <$real_address>");
+           $mailmap->{names}->{$wrong_address} = $real_name;
+           $mailmap->{addresses}->{$wrong_address} = $real_address;
+
+       } elsif (/^(.+)<([^\s]+)>\s*([^\s].*)<([^\s]+)>$/) {
+           my $real_name = $1;
+           my $real_address = $2;
+           my $wrong_name = $3;
+           my $wrong_address = $4;
+
+           $real_name =~ s/\s+$//;
+           ($real_name, $real_address) =
+               parse_email("$real_name <$real_address>");
+
+           $wrong_name =~ s/\s+$//;
+           ($wrong_name, $wrong_address) =
+               parse_email("$wrong_name <$wrong_address>");
+
+           my $wrong_email = format_email($wrong_name, $wrong_address, 1);
+           $mailmap->{names}->{$wrong_email} = $real_name;
+           $mailmap->{addresses}->{$wrong_email} = $real_address;
        }
     }
-    close($mailmap);
+    close($mailmap_file);
 }
 
 ## use the filenames on the command line or find the filenames in the patchfiles
@@ -302,7 +397,7 @@ foreach my $file (@ARGV) {
     }
     if ($from_filename) {
        push(@files, $file);
-       if (-f $file && ($keywords || $file_emails)) {
+       if ($file ne "MAINTAINERS" && -f $file && ($keywords || $file_emails)) {
            open(my $f, '<', $file)
                or die "$P: Can't open $file: $!\n";
            my $text = do { local($/) ; <$f> };
@@ -357,67 +452,127 @@ foreach my $file (@ARGV) {
 
 @file_emails = uniq(@file_emails);
 
+my %email_hash_name;
+my %email_hash_address;
 my @email_to = ();
+my %hash_list_to;
 my @list_to = ();
 my @scm = ();
 my @web = ();
 my @subsystem = ();
 my @status = ();
+my %deduplicate_name_hash = ();
+my %deduplicate_address_hash = ();
+my $signature_pattern;
 
-# Find responsible parties
+my @maintainers = get_maintainers();
 
-foreach my $file (@files) {
+if (@maintainers) {
+    @maintainers = merge_email(@maintainers);
+    output(@maintainers);
+}
 
-    my %hash;
-    my $tvi = find_first_section();
-    while ($tvi < @typevalue) {
-       my $start = find_starting_index($tvi);
-       my $end = find_ending_index($tvi);
-       my $exclude = 0;
-       my $i;
-
-       #Do not match excluded file patterns
-
-       for ($i = $start; $i < $end; $i++) {
-           my $line = $typevalue[$i];
-           if ($line =~ m/^(\C):\s*(.*)/) {
-               my $type = $1;
-               my $value = $2;
-               if ($type eq 'X') {
-                   if (file_match_pattern($file, $value)) {
-                       $exclude = 1;
-                       last;
-                   }
-               }
-           }
-       }
+if ($scm) {
+    @scm = uniq(@scm);
+    output(@scm);
+}
+
+if ($status) {
+    @status = uniq(@status);
+    output(@status);
+}
+
+if ($subsystem) {
+    @subsystem = uniq(@subsystem);
+    output(@subsystem);
+}
+
+if ($web) {
+    @web = uniq(@web);
+    output(@web);
+}
+
+exit($exit);
+
+sub get_maintainers {
+    %email_hash_name = ();
+    %email_hash_address = ();
+    %commit_author_hash = ();
+    %commit_signer_hash = ();
+    @email_to = ();
+    %hash_list_to = ();
+    @list_to = ();
+    @scm = ();
+    @web = ();
+    @subsystem = ();
+    @status = ();
+    %deduplicate_name_hash = ();
+    %deduplicate_address_hash = ();
+    if ($email_git_all_signature_types) {
+       $signature_pattern = "(.+?)[Bb][Yy]:";
+    } else {
+       $signature_pattern = "\(" . join("|", @signature_tags) . "\)";
+    }
+
+    # Find responsible parties
+
+    my %exact_pattern_match_hash = ();
+
+    foreach my $file (@files) {
+
+       my %hash;
+       my $tvi = find_first_section();
+       while ($tvi < @typevalue) {
+           my $start = find_starting_index($tvi);
+           my $end = find_ending_index($tvi);
+           my $exclude = 0;
+           my $i;
+
+           #Do not match excluded file patterns
 
-       if (!$exclude) {
            for ($i = $start; $i < $end; $i++) {
                my $line = $typevalue[$i];
                if ($line =~ m/^(\C):\s*(.*)/) {
                    my $type = $1;
                    my $value = $2;
-                   if ($type eq 'F') {
+                   if ($type eq 'X') {
                        if (file_match_pattern($file, $value)) {
-                           my $value_pd = ($value =~ tr@/@@);
-                           my $file_pd = ($file  =~ tr@/@@);
-                           $value_pd++ if (substr($value,-1,1) ne "/");
-                           if ($pattern_depth == 0 ||
-                               (($file_pd - $value_pd) < $pattern_depth)) {
-                               $hash{$tvi} = $value_pd;
+                           $exclude = 1;
+                           last;
+                       }
+                   }
+               }
+           }
+
+           if (!$exclude) {
+               for ($i = $start; $i < $end; $i++) {
+                   my $line = $typevalue[$i];
+                   if ($line =~ m/^(\C):\s*(.*)/) {
+                       my $type = $1;
+                       my $value = $2;
+                       if ($type eq 'F') {
+                           if (file_match_pattern($file, $value)) {
+                               my $value_pd = ($value =~ tr@/@@);
+                               my $file_pd = ($file  =~ tr@/@@);
+                               $value_pd++ if (substr($value,-1,1) ne "/");
+                               $value_pd = -1 if ($value =~ /^\.\*/);
+                               if ($value_pd >= $file_pd) {
+                                   $exact_pattern_match_hash{$file} = 1;
+                               }
+                               if ($pattern_depth == 0 ||
+                                   (($file_pd - $value_pd) < $pattern_depth)) {
+                                   $hash{$tvi} = $value_pd;
+                               }
                            }
                        }
                    }
                }
            }
+           $tvi = $end + 1;
        }
 
-       $tvi = $end + 1;
-    }
-
-    foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) {
-       add_categories($line);
+       foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) {
+           add_categories($line);
            if ($sections) {
                my $i;
                my $start = find_starting_index($line);
@@ -435,80 +590,71 @@ foreach my $file (@files) {
                }
                print("\n");
            }
+       }
     }
 
-    if ($email && $email_git) {
-       vcs_file_signoffs($file);
+    if ($keywords) {
+       @keyword_tvi = sort_and_uniq(@keyword_tvi);
+       foreach my $line (@keyword_tvi) {
+           add_categories($line);
+       }
     }
 
-    if ($email && $email_git_blame) {
-       vcs_file_blame($file);
+    foreach my $email (@email_to, @list_to) {
+       $email->[0] = deduplicate_email($email->[0]);
     }
-}
 
-if ($keywords) {
-    @keyword_tvi = sort_and_uniq(@keyword_tvi);
-    foreach my $line (@keyword_tvi) {
-       add_categories($line);
+    foreach my $file (@files) {
+       if ($email &&
+           ($email_git || ($email_git_fallback &&
+                           !$exact_pattern_match_hash{$file}))) {
+           vcs_file_signoffs($file);
+       }
+       if ($email && $email_git_blame) {
+           vcs_file_blame($file);
+       }
     }
-}
 
-if ($email) {
-    foreach my $chief (@penguin_chief) {
-       if ($chief =~ m/^(.*):(.*)/) {
-           my $email_address;
+    if ($email) {
+       foreach my $chief (@penguin_chief) {
+           if ($chief =~ m/^(.*):(.*)/) {
+               my $email_address;
 
-           $email_address = format_email($1, $2, $email_usename);
-           if ($email_git_penguin_chiefs) {
-               push(@email_to, [$email_address, 'chief penguin']);
-           } else {
-               @email_to = grep($_->[0] !~ /${email_address}/, @email_to);
+               $email_address = format_email($1, $2, $email_usename);
+               if ($email_git_penguin_chiefs) {
+                   push(@email_to, [$email_address, 'chief penguin']);
+               } else {
+                   @email_to = grep($_->[0] !~ /${email_address}/, @email_to);
+               }
            }
        }
-    }
 
-    foreach my $email (@file_emails) {
-       my ($name, $address) = parse_email($email);
+       foreach my $email (@file_emails) {
+           my ($name, $address) = parse_email($email);
 
-       my $tmp_email = format_email($name, $address, $email_usename);
-       push_email_address($tmp_email, '');
-       add_role($tmp_email, 'in file');
+           my $tmp_email = format_email($name, $address, $email_usename);
+           push_email_address($tmp_email, '');
+           add_role($tmp_email, 'in file');
+       }
     }
-}
 
-if ($email || $email_list) {
     my @to = ();
-    if ($email) {
-       @to = (@to, @email_to);
-    }
-    if ($email_list) {
-       @to = (@to, @list_to);
+    if ($email || $email_list) {
+       if ($email) {
+           @to = (@to, @email_to);
+       }
+       if ($email_list) {
+           @to = (@to, @list_to);
+       }
     }
-    output(merge_email(@to));
-}
-
-if ($scm) {
-    @scm = uniq(@scm);
-    output(@scm);
-}
-
-if ($status) {
-    @status = uniq(@status);
-    output(@status);
-}
 
-if ($subsystem) {
-    @subsystem = uniq(@subsystem);
-    output(@subsystem);
-}
+    if ($interactive) {
+       @to = interactive_get_maintainers(\@to);
+    }
 
-if ($web) {
-    @web = uniq(@web);
-    output(@web);
+    return @to;
 }
 
-exit($exit);
-
 sub file_match_pattern {
     my ($file, $pattern) = @_;
     if (substr($pattern, -1) eq "/") {
@@ -537,7 +683,8 @@ MAINTAINER field selection options:
   --email => print email address(es) if any
     --git => include recent git \*-by: signers
     --git-all-signature-types => include signers regardless of signature type
-        or use only ${signaturePattern} signers (default: $email_git_all_signature_types)
+        or use only ${signature_pattern} signers (default: $email_git_all_signature_types)
+    --git-fallback => use git when no exact MAINTAINERS pattern (default: $email_git_fallback)
     --git-chief-penguins => include ${penguin_chiefs}
     --git-min-signatures => number of signatures required (default: $email_git_min_signatures)
     --git-max-maintainers => maximum maintainers to add (default: $email_git_max_maintainers)
@@ -545,6 +692,7 @@ MAINTAINER field selection options:
     --git-blame => use git blame to find modified commits for patch or file
     --git-since => git history to use (default: $email_git_since)
     --hg-since => hg history to use (default: $email_hg_since)
+    --interactive => display a menu (mostly useful if used with the --git option)
     --m => include maintainer(s) if any
     --n => include name 'Full Name <addr\@domain.tld>'
     --l => include list(s) if any
@@ -565,8 +713,9 @@ Output type options:
 
 Other options:
   --pattern-depth => Number of pattern directory traversals (default: 0 (all))
-  --keywords => scan patch for keywords (default: 1 (on))
-  --sections => print the entire subsystem sections with pattern matches
+  --keywords => scan patch for keywords (default: $keywords)
+  --sections => print all of the subsystem sections with pattern matches
+  --mailmap => use .mailmap file (default: $email_use_mailmap)
   --version => show version
   --help => show this help information
 
@@ -606,30 +755,30 @@ EOT
 }
 
 sub top_of_kernel_tree {
-       my ($lk_path) = @_;
+    my ($lk_path) = @_;
 
-       if ($lk_path ne "" && substr($lk_path,length($lk_path)-1,1) ne "/") {
-           $lk_path .= "/";
-       }
-       if (   (-f "${lk_path}COPYING")
-           && (-f "${lk_path}CREDITS")
-           && (-f "${lk_path}Kbuild")
-           && (-f "${lk_path}MAINTAINERS")
-           && (-f "${lk_path}Makefile")
-           && (-f "${lk_path}README")
-           && (-d "${lk_path}Documentation")
-           && (-d "${lk_path}arch")
-           && (-d "${lk_path}include")
-           && (-d "${lk_path}drivers")
-           && (-d "${lk_path}fs")
-           && (-d "${lk_path}init")
-           && (-d "${lk_path}ipc")
-           && (-d "${lk_path}kernel")
-           && (-d "${lk_path}lib")
-           && (-d "${lk_path}scripts")) {
-               return 1;
-       }
-       return 0;
+    if ($lk_path ne "" && substr($lk_path,length($lk_path)-1,1) ne "/") {
+       $lk_path .= "/";
+    }
+    if (   (-f "${lk_path}COPYING")
+       && (-f "${lk_path}CREDITS")
+       && (-f "${lk_path}Kbuild")
+       && (-f "${lk_path}MAINTAINERS")
+       && (-f "${lk_path}Makefile")
+       && (-f "${lk_path}README")
+       && (-d "${lk_path}Documentation")
+       && (-d "${lk_path}arch")
+       && (-d "${lk_path}include")
+       && (-d "${lk_path}drivers")
+       && (-d "${lk_path}fs")
+       && (-d "${lk_path}init")
+       && (-d "${lk_path}ipc")
+       && (-d "${lk_path}kernel")
+       && (-d "${lk_path}lib")
+       && (-d "${lk_path}scripts")) {
+       return 1;
+    }
+    return 0;
 }
 
 sub parse_email {
@@ -821,11 +970,19 @@ sub add_categories {
                }
                if ($list_additional =~ m/subscribers-only/) {
                    if ($email_subscriber_list) {
-                       push(@list_to, [$list_address, "subscriber list${list_role}"]);
+                       if (!$hash_list_to{lc($list_address)}) {
+                           $hash_list_to{lc($list_address)} = 1;
+                           push(@list_to, [$list_address,
+                                           "subscriber list${list_role}"]);
+                       }
                    }
                } else {
                    if ($email_list) {
-                       push(@list_to, [$list_address, "open list${list_role}"]);
+                       if (!$hash_list_to{lc($list_address)}) {
+                           $hash_list_to{lc($list_address)} = 1;
+                           push(@list_to, [$list_address,
+                                           "open list${list_role}"]);
+                       }
                    }
                }
            } elsif ($ptype eq "M") {
@@ -856,15 +1013,12 @@ sub add_categories {
     }
 }
 
-my %email_hash_name;
-my %email_hash_address;
-
 sub email_inuse {
     my ($name, $address) = @_;
 
     return 1 if (($name eq "") && ($address eq ""));
-    return 1 if (($name ne "") && exists($email_hash_name{$name}));
-    return 1 if (($address ne "") && exists($email_hash_address{$address}));
+    return 1 if (($name ne "") && exists($email_hash_name{lc($name)}));
+    return 1 if (($address ne "") && exists($email_hash_address{lc($address)}));
 
     return 0;
 }
@@ -882,8 +1036,8 @@ sub push_email_address {
        push(@email_to, [format_email($name, $address, $email_usename), $role]);
     } elsif (!email_inuse($name, $address)) {
        push(@email_to, [format_email($name, $address, $email_usename), $role]);
-       $email_hash_name{$name}++;
-       $email_hash_address{$address}++;
+       $email_hash_name{lc($name)}++ if ($name ne "");
+       $email_hash_address{lc($address)}++;
     }
 
     return 1;
@@ -952,30 +1106,69 @@ sub which {
     return "";
 }
 
-sub mailmap {
-    my (@lines) = @_;
-    my %hash;
+sub which_conf {
+    my ($conf) = @_;
 
-    foreach my $line (@lines) {
-       my ($name, $address) = parse_email($line);
-       if (!exists($hash{$name})) {
-           $hash{$name} = $address;
-       } elsif ($address ne $hash{$name}) {
-           $address = $hash{$name};
-           $line = format_email($name, $address, $email_usename);
+    foreach my $path (split(/:/, ".:$ENV{HOME}:.scripts")) {
+       if (-e "$path/$conf") {
+           return "$path/$conf";
        }
-       if (exists($mailmap{$name})) {
-           my $obj = $mailmap{$name};
-           foreach my $map_address (@$obj) {
-               if (($map_address eq $address) &&
-                   ($map_address ne $hash{$name})) {
-                   $line = format_email($name, $hash{$name}, $email_usename);
-               }
-           }
+    }
+
+    return "";
+}
+
+sub mailmap_email {
+    my ($line) = @_;
+
+    my ($name, $address) = parse_email($line);
+    my $email = format_email($name, $address, 1);
+    my $real_name = $name;
+    my $real_address = $address;
+
+    if (exists $mailmap->{names}->{$email} ||
+       exists $mailmap->{addresses}->{$email}) {
+       if (exists $mailmap->{names}->{$email}) {
+           $real_name = $mailmap->{names}->{$email};
+       }
+       if (exists $mailmap->{addresses}->{$email}) {
+           $real_address = $mailmap->{addresses}->{$email};
+       }
+    } else {
+       if (exists $mailmap->{names}->{$address}) {
+           $real_name = $mailmap->{names}->{$address};
+       }
+       if (exists $mailmap->{addresses}->{$address}) {
+           $real_address = $mailmap->{addresses}->{$address};
        }
     }
+    return format_email($real_name, $real_address, 1);
+}
 
-    return @lines;
+sub mailmap {
+    my (@addresses) = @_;
+
+    my @mapped_emails = ();
+    foreach my $line (@addresses) {
+       push(@mapped_emails, mailmap_email($line));
+    }
+    merge_by_realname(@mapped_emails) if ($email_use_mailmap);
+    return @mapped_emails;
+}
+
+sub merge_by_realname {
+    my %address_map;
+    my (@emails) = @_;
+
+    foreach my $email (@emails) {
+       my ($name, $address) = parse_email($email);
+       if (exists $address_map{$name}) {
+           $address = $address_map{$name};
+           $email = format_email($name, $address, 1);
+       } else {
+           $address_map{$name} = $address;
+       }
+    }
 }
 
 sub git_execute_cmd {
@@ -999,10 +1192,30 @@ sub hg_execute_cmd {
     return @lines;
 }
 
+sub extract_formatted_signatures {
+    my (@signature_lines) = @_;
+
+    my @type = @signature_lines;
+
+    s/\s*(.*):.*/$1/ for (@type);
+
+    # cut -f2- -d":"
+    s/\s*.*:\s*(.+)\s*/$1/ for (@signature_lines);
+
+## Reformat email addresses (with names) to avoid badly written signatures
+
+    foreach my $signer (@signature_lines) {
+       $signer = deduplicate_email($signer);
+    }
+
+    return (\@type, \@signature_lines);
+}
+
 sub vcs_find_signers {
     my ($cmd) = @_;
-    my @lines = ();
     my $commits;
+    my @lines = ();
+    my @signatures = ();
 
     @lines = &{$VCS_cmds{"execute_cmd"}}($cmd);
 
@@ -1010,21 +1223,48 @@ sub vcs_find_signers {
 
     $commits = grep(/$pattern/, @lines);       # of commits
 
-    @lines = grep(/^[ \t]*${signaturePattern}.*\@.*$/, @lines);
+    @signatures = grep(/^[ \t]*${signature_pattern}.*\@.*$/, @lines);
+
+    return (0, @signatures) if !@signatures;
+
+    save_commits_by_author(@lines) if ($interactive);
+    save_commits_by_signer(@lines) if ($interactive);
+
+    if (!$email_git_penguin_chiefs) {
+       @signatures = grep(!/${penguin_chiefs}/i, @signatures);
+    }
+
+    my ($types_ref, $signers_ref) = extract_formatted_signatures(@signatures);
+
+    return ($commits, @$signers_ref);
+}
+
+sub vcs_find_author {
+    my ($cmd) = @_;
+    my @lines = ();
+
+    @lines = &{$VCS_cmds{"execute_cmd"}}($cmd);
+
     if (!$email_git_penguin_chiefs) {
        @lines = grep(!/${penguin_chiefs}/i, @lines);
     }
-    # cut -f2- -d":"
-    s/.*:\s*(.+)\s*/$1/ for (@lines);
 
-## Reformat email addresses (with names) to avoid badly written signatures
+    return @lines if !@lines;
 
+    my @authors = ();
     foreach my $line (@lines) {
-       my ($name, $address) = parse_email($line);
-       $line = format_email($name, $address, 1);
+       if ($line =~ m/$VCS_cmds{"author_pattern"}/) {
+           my $author = $1;
+           my ($name, $address) = parse_email($author);
+           $author = format_email($name, $address, 1);
+           push(@authors, $author);
+       }
     }
 
-    return ($commits, @lines);
+    save_commits_by_author(@lines) if ($interactive);
+    save_commits_by_signer(@lines) if ($interactive);
+
+    return @authors;
 }
 
 sub vcs_save_commits {
@@ -1084,6 +1324,10 @@ sub vcs_blame {
        @commits = vcs_save_commits($cmd);
     }
 
+    foreach my $commit (@commits) {
+       $commit =~ s/^\^//g;
+    }
+
     return @commits;
 }
 
@@ -1092,7 +1336,7 @@ sub vcs_exists {
     %VCS_cmds = %VCS_cmds_git;
     return 1 if eval $VCS_cmds{"available"};
     %VCS_cmds = %VCS_cmds_hg;
-    return 1 if eval $VCS_cmds{"available"};
+    return 2 if eval $VCS_cmds{"available"};
     %VCS_cmds = ();
     if (!$printed_novcs) {
        warn("$P: No supported VCS found.  Add --nogit to options?\n");
@@ -1104,6 +1348,405 @@ sub vcs_exists {
     return 0;
 }
 
+sub vcs_is_git {
+    vcs_exists();
+    return $vcs_used == 1;
+}
+
+sub vcs_is_hg {
+    return $vcs_used == 2;
+}
+
+sub interactive_get_maintainers {
+    my ($list_ref) = @_;
+    my @list = @$list_ref;
+
+    vcs_exists();
+
+    my %selected;
+    my %authored;
+    my %signed;
+    my $count = 0;
+    my $maintained = 0;
+    foreach my $entry (@list) {
+       $maintained = 1 if ($entry->[1] =~ /^(maintainer|supporter)/i);
+       $selected{$count} = 1;
+       $authored{$count} = 0;
+       $signed{$count} = 0;
+       $count++;
+    }
+
+    #menu loop
+    my $done = 0;
+    my $print_options = 0;
+    my $redraw = 1;
+    while (!$done) {
+       $count = 0;
+       if ($redraw) {
+           printf STDERR "\n%1s %2s %-65s",
+                         "*", "#", "email/list and role:stats";
+           if ($email_git ||
+               ($email_git_fallback && !$maintained) ||
+               $email_git_blame) {
+               print STDERR "auth sign";
+           }
+           print STDERR "\n";
+           foreach my $entry (@list) {
+               my $email = $entry->[0];
+               my $role = $entry->[1];
+               my $sel = "";
+               $sel = "*" if ($selected{$count});
+               my $commit_author = $commit_author_hash{$email};
+               my $commit_signer = $commit_signer_hash{$email};
+               my $authored = 0;
+               my $signed = 0;
+               $authored++ for (@{$commit_author});
+               $signed++ for (@{$commit_signer});
+               printf STDERR "%1s %2d %-65s", $sel, $count + 1, $email;
+               printf STDERR "%4d %4d", $authored, $signed
+                   if ($authored > 0 || $signed > 0);
+               printf STDERR "\n     %s\n", $role;
+               if ($authored{$count}) {
+                   my $commit_author = $commit_author_hash{$email};
+                   foreach my $ref (@{$commit_author}) {
+                       print STDERR "     Author: @{$ref}[1]\n";
+                   }
+               }
+               if ($signed{$count}) {
+                   my $commit_signer = $commit_signer_hash{$email};
+                   foreach my $ref (@{$commit_signer}) {
+                       print STDERR "     @{$ref}[2]: @{$ref}[1]\n";
+                   }
+               }
+
+               $count++;
+           }
+       }
+       my $date_ref = \$email_git_since;
+       $date_ref = \$email_hg_since if (vcs_is_hg());
+       if ($print_options) {
+           $print_options = 0;
+           if (vcs_exists()) {
+               print STDERR <<EOT
+
+Version Control options:
+g  use git history      [$email_git]
+gf use git-fallback     [$email_git_fallback]
+b  use git blame        [$email_git_blame]
+bs use blame signatures [$email_git_blame_signatures]
+c# minimum commits      [$email_git_min_signatures]
+%# min percent          [$email_git_min_percent]
+d# history to use       [$$date_ref]
+x# max maintainers      [$email_git_max_maintainers]
+t  all signature types  [$email_git_all_signature_types]
+m  use .mailmap         [$email_use_mailmap]
+EOT
+           }
+           print STDERR <<EOT
+
+Additional options:
+0  toggle all
+tm toggle maintainers
+tg toggle git entries
+tl toggle open list entries
+ts toggle subscriber list entries
+f  emails in file       [$file_emails]
+k  keywords in file     [$keywords]
+r  remove duplicates    [$email_remove_duplicates]
+p# pattern match depth  [$pattern_depth]
+EOT
+       }
+       print STDERR
+"\n#(toggle), A#(author), S#(signed) *(all), ^(none), O(options), Y(approve): ";
+
+       my $input = <STDIN>;
+       chomp($input);
+
+       $redraw = 1;
+       my $rerun = 0;
+       my @wish = split(/[, ]+/, $input);
+       foreach my $nr (@wish) {
+           $nr = lc($nr);
+           my $sel = substr($nr, 0, 1);
+           my $str = substr($nr, 1);
+           my $val = 0;
+           $val = $1 if $str =~ /^(\d+)$/;
+
+           if ($sel eq "y") {
+               $interactive = 0;
+               $done = 1;
+               $output_rolestats = 0;
+               $output_roles = 0;
+               last;
+           } elsif ($nr =~ /^\d+$/ && $nr > 0 && $nr <= $count) {
+               $selected{$nr - 1} = !$selected{$nr - 1};
+           } elsif ($sel eq "*" || $sel eq '^') {
+               my $toggle = 0;
+               $toggle = 1 if ($sel eq '*');
+               for (my $i = 0; $i < $count; $i++) {
+                   $selected{$i} = $toggle;
+               }
+           } elsif ($sel eq "0") {
+               for (my $i = 0; $i < $count; $i++) {
+                   $selected{$i} = !$selected{$i};
+               }
+           } elsif ($sel eq "t") {
+               if (lc($str) eq "m") {
+                   for (my $i = 0; $i < $count; $i++) {
+                       $selected{$i} = !$selected{$i}
+                           if ($list[$i]->[1] =~ /^(maintainer|supporter)/i);
+                   }
+               } elsif (lc($str) eq "g") {
+                   for (my $i = 0; $i < $count; $i++) {
+                       $selected{$i} = !$selected{$i}
+                           if ($list[$i]->[1] =~ /^(author|commit|signer)/i);
+                   }
+               } elsif (lc($str) eq "l") {
+                   for (my $i = 0; $i < $count; $i++) {
+                       $selected{$i} = !$selected{$i}
+                           if ($list[$i]->[1] =~ /^(open list)/i);
+                   }
+               } elsif (lc($str) eq "s") {
+                   for (my $i = 0; $i < $count; $i++) {
+                       $selected{$i} = !$selected{$i}
+                           if ($list[$i]->[1] =~ /^(subscriber list)/i);
+                   }
+               }
+           } elsif ($sel eq "a") {
+               if ($val > 0 && $val <= $count) {
+                   $authored{$val - 1} = !$authored{$val - 1};
+               } elsif ($str eq '*' || $str eq '^') {
+                   my $toggle = 0;
+                   $toggle = 1 if ($str eq '*');
+                   for (my $i = 0; $i < $count; $i++) {
+                       $authored{$i} = $toggle;
+                   }
+               }
+           } elsif ($sel eq "s") {
+               if ($val > 0 && $val <= $count) {
+                   $signed{$val - 1} = !$signed{$val - 1};
+               } elsif ($str eq '*' || $str eq '^') {
+                   my $toggle = 0;
+                   $toggle = 1 if ($str eq '*');
+                   for (my $i = 0; $i < $count; $i++) {
+                       $signed{$i} = $toggle;
+                   }
+               }
+           } elsif ($sel eq "o") {
+               $print_options = 1;
+               $redraw = 1;
+           } elsif ($sel eq "g") {
+               if ($str eq "f") {
+                   bool_invert(\$email_git_fallback);
+               } else {
+                   bool_invert(\$email_git);
+               }
+               $rerun = 1;
+           } elsif ($sel eq "b") {
+               if ($str eq "s") {
+                   bool_invert(\$email_git_blame_signatures);
+               } else {
+                   bool_invert(\$email_git_blame);
+               }
+               $rerun = 1;
+           } elsif ($sel eq "c") {
+               if ($val > 0) {
+                   $email_git_min_signatures = $val;
+                   $rerun = 1;
+               }
+           } elsif ($sel eq "x") {
+               if ($val > 0) {
+                   $email_git_max_maintainers = $val;
+                   $rerun = 1;
+               }
+           } elsif ($sel eq "%") {
+               if ($str ne "" && $val >= 0) {
+                   $email_git_min_percent = $val;
+                   $rerun = 1;
+               }
+           } elsif ($sel eq "d") {
+               if (vcs_is_git()) {
+                   $email_git_since = $str;
+               } elsif (vcs_is_hg()) {
+                   $email_hg_since = $str;
+               }
+               $rerun = 1;
+           } elsif ($sel eq "t") {
+               bool_invert(\$email_git_all_signature_types);
+               $rerun = 1;
+           } elsif ($sel eq "f") {
+               bool_invert(\$file_emails);
+               $rerun = 1;
+           } elsif ($sel eq "r") {
+               bool_invert(\$email_remove_duplicates);
+               $rerun = 1;
+           } elsif ($sel eq "m") {
+               bool_invert(\$email_use_mailmap);
+               read_mailmap();
+               $rerun = 1;
+           } elsif ($sel eq "k") {
+               bool_invert(\$keywords);
+               $rerun = 1;
+           } elsif ($sel eq "p") {
+               if ($str ne "" && $val >= 0) {
+                   $pattern_depth = $val;
+                   $rerun = 1;
+               }
+           } elsif ($sel eq "h" || $sel eq "?") {
+               print STDERR <<EOT
+
+Interactive mode allows you to select the various maintainers, submitters,
+commit signers and mailing lists that could be CC'd on a patch.
+
+Any *'d entry is selected.
+
+If you have git or hg installed, you can choose to summarize the commit
+history of files in the patch.  Also, each line of the current file can
+be matched to its commit author and that commits signers with blame.
+
+Various knobs exist to control the length of time for active commit
+tracking, the maximum number of commit authors and signers to add,
+and such.
+
+Enter selections at the prompt until you are satisfied that the selected
+maintainers are appropriate.  You may enter multiple selections separated
+by either commas or spaces.
+
+EOT
+           } else {
+               print STDERR "invalid option: '$nr'\n";
+               $redraw = 0;
+           }
+       }
+       if ($rerun) {
+           print STDERR "git-blame can be very slow, please have patience..."
+               if ($email_git_blame);
+           goto &get_maintainers;
+       }
+    }
+
+    #drop not selected entries
+    $count = 0;
+    my @new_emailto = ();
+    foreach my $entry (@list) {
+       if ($selected{$count}) {
+           push(@new_emailto, $list[$count]);
+       }
+       $count++;
+    }
+    return @new_emailto;
+}
+
+sub bool_invert {
+    my ($bool_ref) = @_;
+
+    if ($$bool_ref) {
+       $$bool_ref = 0;
+    } else {
+       $$bool_ref = 1;
+    }
+}
+
+sub deduplicate_email {
+    my ($email) = @_;
+
+    my $matched = 0;
+    my ($name, $address) = parse_email($email);
+    $email = format_email($name, $address, 1);
+    $email = mailmap_email($email);
+
+    return $email if (!$email_remove_duplicates);
+
+    ($name, $address) = parse_email($email);
+
+    if ($name ne "" && $deduplicate_name_hash{lc($name)}) {
+       $name = $deduplicate_name_hash{lc($name)}->[0];
+       $address = $deduplicate_name_hash{lc($name)}->[1];
+       $matched = 1;
+    } elsif ($deduplicate_address_hash{lc($address)}) {
+       $name = $deduplicate_address_hash{lc($address)}->[0];
+       $address = $deduplicate_address_hash{lc($address)}->[1];
+       $matched = 1;
+    }
+    if (!$matched) {
+       $deduplicate_name_hash{lc($name)} = [ $name, $address ];
+       $deduplicate_address_hash{lc($address)} = [ $name, $address ];
+    }
+    $email = format_email($name, $address, 1);
+    $email = mailmap_email($email);
+    return $email;
+}
+
+sub save_commits_by_author {
+    my (@lines) = @_;
+
+    my @authors = ();
+    my @commits = ();
+    my @subjects = ();
+
+    foreach my $line (@lines) {
+       if ($line =~ m/$VCS_cmds{"author_pattern"}/) {
+           my $author = $1;
+           $author = deduplicate_email($author);
+           push(@authors, $author);
+       }
+       push(@commits, $1) if ($line =~ m/$VCS_cmds{"commit_pattern"}/);
+       push(@subjects, $1) if ($line =~ m/$VCS_cmds{"subject_pattern"}/);
+    }
+
+    for (my $i = 0; $i < @authors; $i++) {
+       my $exists = 0;
+       foreach my $ref(@{$commit_author_hash{$authors[$i]}}) {
+           if (@{$ref}[0] eq $commits[$i] &&
+               @{$ref}[1] eq $subjects[$i]) {
+               $exists = 1;
+               last;
+           }
+       }
+       if (!$exists) {
+           push(@{$commit_author_hash{$authors[$i]}},
+                [ ($commits[$i], $subjects[$i]) ]);
+       }
+    }
+}
+
+sub save_commits_by_signer {
+    my (@lines) = @_;
+
+    my $commit = "";
+    my $subject = "";
+
+    foreach my $line (@lines) {
+       $commit = $1 if ($line =~ m/$VCS_cmds{"commit_pattern"}/);
+       $subject = $1 if ($line =~ m/$VCS_cmds{"subject_pattern"}/);
+       if ($line =~ /^[ \t]*${signature_pattern}.*\@.*$/) {
+           my @signatures = ($line);
+           my ($types_ref, $signers_ref) = extract_formatted_signatures(@signatures);
+           my @types = @$types_ref;
+           my @signers = @$signers_ref;
+
+           my $type = $types[0];
+           my $signer = $signers[0];
+
+           $signer = deduplicate_email($signer);
+
+           my $exists = 0;
+           foreach my $ref(@{$commit_signer_hash{$signer}}) {
+               if (@{$ref}[0] eq $commit &&
+                   @{$ref}[1] eq $subject &&
+                   @{$ref}[2] eq $type) {
+                   $exists = 1;
+                   last;
+               }
+           }
+           if (!$exists) {
+               push(@{$commit_signer_hash{$signer}},
+                    [ ($commit, $subject, $type) ]);
+           }
+       }
+    }
+}
+
 sub vcs_assign {
     my ($role, $divisor, @lines) = @_;
 
@@ -1117,9 +1760,9 @@ sub vcs_assign {
        $divisor = 1;
     }
 
-    if ($email_remove_duplicates) {
-       @lines = mailmap(@lines);
-    }
+    @lines = mailmap(@lines);
+
+    return if (@lines <= 0);
 
     @lines = sort(@lines);
 
@@ -1152,12 +1795,18 @@ sub vcs_file_signoffs {
     my @signers = ();
     my $commits;
 
-    return if (!vcs_exists());
+    $vcs_used = vcs_exists();
+    return if (!$vcs_used);
 
     my $cmd = $VCS_cmds{"find_signers_cmd"};
     $cmd =~ s/(\$\w+)/$1/eeg;          # interpolate $cmd
 
     ($commits, @signers) = vcs_find_signers($cmd);
+
+    foreach my $signer (@signers) {
+       $signer = deduplicate_email($signer);
+    }
+
     vcs_assign("commit_signer", $commits, @signers);
 }
 
@@ -1165,29 +1814,114 @@ sub vcs_file_blame {
     my ($file) = @_;
 
     my @signers = ();
+    my @all_commits = ();
     my @commits = ();
     my $total_commits;
+    my $total_lines;
 
-    return if (!vcs_exists());
+    $vcs_used = vcs_exists();
+    return if (!$vcs_used);
 
-    @commits = vcs_blame($file);
-    @commits = uniq(@commits);
+    @all_commits = vcs_blame($file);
+    @commits = uniq(@all_commits);
     $total_commits = @commits;
+    $total_lines = @all_commits;
 
-    foreach my $commit (@commits) {
-       my $commit_count;
-       my @commit_signers = ();
+    if ($email_git_blame_signatures) {
+       if (vcs_is_hg()) {
+           my $commit_count;
+           my @commit_signers = ();
+           my $commit = join(" -r ", @commits);
+           my $cmd;
+
+           $cmd = $VCS_cmds{"find_commit_signers_cmd"};
+           $cmd =~ s/(\$\w+)/$1/eeg;   #substitute variables in $cmd
+
+           ($commit_count, @commit_signers) = vcs_find_signers($cmd);
+
+           push(@signers, @commit_signers);
+       } else {
+           foreach my $commit (@commits) {
+               my $commit_count;
+               my @commit_signers = ();
+               my $cmd;
 
-       my $cmd = $VCS_cmds{"find_commit_signers_cmd"};
-       $cmd =~ s/(\$\w+)/$1/eeg;       #interpolate $cmd
+               $cmd = $VCS_cmds{"find_commit_signers_cmd"};
+               $cmd =~ s/(\$\w+)/$1/eeg;       #substitute variables in $cmd
 
-       ($commit_count, @commit_signers) = vcs_find_signers($cmd);
-       push(@signers, @commit_signers);
+               ($commit_count, @commit_signers) = vcs_find_signers($cmd);
+
+               push(@signers, @commit_signers);
+           }
+       }
     }
 
     if ($from_filename) {
+       if ($output_rolestats) {
+           my @blame_signers;
+           if (vcs_is_hg()) {{         # Double brace for last exit
+               my $commit_count;
+               my @commit_signers = ();
+               @commits = uniq(@commits);
+               @commits = sort(@commits);
+               my $commit = join(" -r ", @commits);
+               my $cmd;
+
+               $cmd = $VCS_cmds{"find_commit_author_cmd"};
+               $cmd =~ s/(\$\w+)/$1/eeg;       #substitute variables in $cmd
+
+               my @lines = ();
+
+               @lines = &{$VCS_cmds{"execute_cmd"}}($cmd);
+
+               if (!$email_git_penguin_chiefs) {
+                   @lines = grep(!/${penguin_chiefs}/i, @lines);
+               }
+
+               last if !@lines;
+
+               my @authors = ();
+               foreach my $line (@lines) {
+                   if ($line =~ m/$VCS_cmds{"author_pattern"}/) {
+                       my $author = $1;
+                       $author = deduplicate_email($author);
+                       push(@authors, $author);
+                   }
+               }
+
+               save_commits_by_author(@lines) if ($interactive);
+               save_commits_by_signer(@lines) if ($interactive);
+
+               push(@signers, @authors);
+           }}
+           else {
+               foreach my $commit (@commits) {
+                   my $i;
+                   my $cmd = $VCS_cmds{"find_commit_author_cmd"};
+                   $cmd =~ s/(\$\w+)/$1/eeg;   #interpolate $cmd
+                   my @author = vcs_find_author($cmd);
+                   next if !@author;
+
+                   my $formatted_author = deduplicate_email($author[0]);
+
+                   my $count = grep(/$commit/, @all_commits);
+                   for ($i = 0; $i < $count ; $i++) {
+                       push(@blame_signers, $formatted_author);
+                   }
+               }
+           }
+           if (@blame_signers) {
+               vcs_assign("authored lines", $total_lines, @blame_signers);
+           }
+       }
+       foreach my $signer (@signers) {
+           $signer = deduplicate_email($signer);
+       }
        vcs_assign("commits", $total_commits, @signers);
     } else {
+       foreach my $signer (@signers) {
+           $signer = deduplicate_email($signer);
+       }
        vcs_assign("modified commits", $total_commits, @signers);
     }
 }
index 82396050f18646ac0519352321637e930c05e367..36cc0cc39e78e135630384e6500f4a789fe7aa2a 100644 (file)
@@ -72,10 +72,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
                path_get(&root);
        }
 
-       spin_lock(&dcache_lock);
        tmp = root;
        res = __d_path(path, &tmp, buf, buflen);
-       spin_unlock(&dcache_lock);
 
        *name = res;
        /* handle error conditions - and still allow a partial path to
index 88839866cbcd029a52422b51cf163ec8167a4034..cb8f47c66a58bb7eb11c0a75319f378261393f14 100644 (file)
@@ -61,6 +61,7 @@ static struct inode *get_inode(struct super_block *sb, int mode, dev_t dev)
        struct inode *inode = new_inode(sb);
 
        if (inode) {
+               inode->i_ino = get_next_ino();
                inode->i_mode = mode;
                inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
                switch (mode & S_IFMT) {
index 87e0556bae70ff977ea290b3cdfcc2c308d8edf5..55a755c1a1bd9546a4655e83351cc93bc92cf655 100644 (file)
@@ -978,6 +978,7 @@ static struct inode *sel_make_inode(struct super_block *sb, int mode)
        struct inode *ret = new_inode(sb);
 
        if (ret) {
+               ret->i_ino = get_next_ino();
                ret->i_mode = mode;
                ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
        }
index ed8ccd680102b53d98f9b98cb9a3aa05c8143090..1d0bf8fa1922fdedec280ba5085f8cafe43d1d68 100644 (file)
@@ -127,10 +127,8 @@ char *tomoyo_realpath_from_path(struct path *path)
                /* If we don't have a vfsmount, we can't calculate. */
                if (!path->mnt)
                        break;
-               spin_lock(&dcache_lock);
                /* go to whatever namespace root we are under */
                pos = __d_path(path, &ns_root, buf, buf_len);
-               spin_unlock(&dcache_lock);
                /* Prepend "/proc" prefix if using internal proc vfs mount. */
                if (!IS_ERR(pos) && (path->mnt->mnt_flags & MNT_INTERNAL) &&
                    (path->mnt->mnt_sb->s_magic == PROC_SUPER_MAGIC)) {